x86: unify mask_IO_APIC_irq
[deliverable/linux.git] / arch / x86 / kernel / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
39 #ifdef CONFIG_ACPI
40 #include <acpi/acpi_bus.h>
41 #endif
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44
45 #include <asm/idle.h>
46 #include <asm/io.h>
47 #include <asm/smp.h>
48 #include <asm/desc.h>
49 #include <asm/proto.h>
50 #include <asm/acpi.h>
51 #include <asm/dma.h>
52 #include <asm/timer.h>
53 #include <asm/i8259.h>
54 #include <asm/nmi.h>
55 #include <asm/msidef.h>
56 #include <asm/hypertransport.h>
57 #include <asm/setup.h>
58 #include <asm/irq_remapping.h>
59
60 #include <mach_ipi.h>
61 #include <mach_apic.h>
62 #include <mach_apicdef.h>
63
64 #define __apicdebuginit(type) static type __init
65
66 /*
67 * Is the SiS APIC rmw bug present ?
68 * -1 = don't know, 0 = no, 1 = yes
69 */
70 int sis_apic_bug = -1;
71
72 static DEFINE_SPINLOCK(ioapic_lock);
73 static DEFINE_SPINLOCK(vector_lock);
74
75 int first_free_entry;
76 /*
77 * Rough estimation of how many shared IRQs there are, can
78 * be changed anytime.
79 */
80 int pin_map_size;
81
82 /*
83 * # of IRQ routing registers
84 */
85 int nr_ioapic_registers[MAX_IO_APICS];
86
87 /* I/O APIC entries */
88 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
89 int nr_ioapics;
90
91 /* MP IRQ source entries */
92 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
93
94 /* # of MP IRQ source entries */
95 int mp_irq_entries;
96
97 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
98 int mp_bus_id_to_type[MAX_MP_BUSSES];
99 #endif
100
101 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
102
103 int skip_ioapic_setup;
104
105 static int __init parse_noapic(char *str)
106 {
107 /* disable IO-APIC */
108 disable_ioapic_setup();
109 return 0;
110 }
111 early_param("noapic", parse_noapic);
112
113 struct irq_cfg;
114 struct irq_pin_list;
115 struct irq_cfg {
116 unsigned int irq;
117 struct irq_cfg *next;
118 struct irq_pin_list *irq_2_pin;
119 cpumask_t domain;
120 cpumask_t old_domain;
121 unsigned move_cleanup_count;
122 u8 vector;
123 u8 move_in_progress : 1;
124 };
125
126 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
127 static struct irq_cfg irq_cfg_legacy[] __initdata = {
128 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
129 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
130 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
131 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
132 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
133 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
134 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
135 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
136 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
137 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
138 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
139 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
140 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
141 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
142 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
143 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
144 };
145
146 static struct irq_cfg irq_cfg_init = { .irq = -1U, };
147 /* need to be biger than size of irq_cfg_legacy */
148 static int nr_irq_cfg = 32;
149
150 static int __init parse_nr_irq_cfg(char *arg)
151 {
152 if (arg) {
153 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
154 if (nr_irq_cfg < 32)
155 nr_irq_cfg = 32;
156 }
157 return 0;
158 }
159
160 early_param("nr_irq_cfg", parse_nr_irq_cfg);
161
162 static void init_one_irq_cfg(struct irq_cfg *cfg)
163 {
164 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
165 }
166
167 static struct irq_cfg *irq_cfgx;
168 static struct irq_cfg *irq_cfgx_free;
169 static void __init init_work(void *data)
170 {
171 struct dyn_array *da = data;
172 struct irq_cfg *cfg;
173 int legacy_count;
174 int i;
175
176 cfg = *da->name;
177
178 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
179
180 legacy_count = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]);
181 for (i = legacy_count; i < *da->nr; i++)
182 init_one_irq_cfg(&cfg[i]);
183
184 for (i = 1; i < *da->nr; i++)
185 cfg[i-1].next = &cfg[i];
186
187 irq_cfgx_free = &irq_cfgx[legacy_count];
188 irq_cfgx[legacy_count - 1].next = NULL;
189 }
190
191 #define for_each_irq_cfg(cfg) \
192 for (cfg = irq_cfgx; cfg; cfg = cfg->next)
193
194 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
195
196 static struct irq_cfg *irq_cfg(unsigned int irq)
197 {
198 struct irq_cfg *cfg;
199
200 cfg = irq_cfgx;
201 while (cfg) {
202 if (cfg->irq == irq)
203 return cfg;
204
205 cfg = cfg->next;
206 }
207
208 return NULL;
209 }
210
211 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
212 {
213 struct irq_cfg *cfg, *cfg_pri;
214 int i;
215 int count = 0;
216
217 cfg_pri = cfg = irq_cfgx;
218 while (cfg) {
219 if (cfg->irq == irq)
220 return cfg;
221
222 cfg_pri = cfg;
223 cfg = cfg->next;
224 count++;
225 }
226
227 if (!irq_cfgx_free) {
228 unsigned long phys;
229 unsigned long total_bytes;
230 /*
231 * we run out of pre-allocate ones, allocate more
232 */
233 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
234
235 total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
236 if (after_bootmem)
237 cfg = kzalloc(total_bytes, GFP_ATOMIC);
238 else
239 cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
240
241 if (!cfg)
242 panic("please boot with nr_irq_cfg= %d\n", count * 2);
243
244 phys = __pa(cfg);
245 printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
246
247 for (i = 0; i < nr_irq_cfg; i++)
248 init_one_irq_cfg(&cfg[i]);
249
250 for (i = 1; i < nr_irq_cfg; i++)
251 cfg[i-1].next = &cfg[i];
252
253 irq_cfgx_free = cfg;
254 }
255
256 cfg = irq_cfgx_free;
257 irq_cfgx_free = irq_cfgx_free->next;
258 cfg->next = NULL;
259 if (cfg_pri)
260 cfg_pri->next = cfg;
261 else
262 irq_cfgx = cfg;
263 cfg->irq = irq;
264 printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
265 #ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
266 {
267 /* dump the results */
268 struct irq_cfg *cfg;
269 unsigned long phys;
270 unsigned long bytes = sizeof(struct irq_cfg);
271
272 printk(KERN_DEBUG "=========================== %d\n", irq);
273 printk(KERN_DEBUG "irq_cfg dump after get that for %d\n", irq);
274 for_each_irq_cfg(cfg) {
275 phys = __pa(cfg);
276 printk(KERN_DEBUG "irq_cfg %d ==> [%#lx - %#lx]\n", cfg->irq, phys, phys + bytes);
277 }
278 printk(KERN_DEBUG "===========================\n");
279 }
280 #endif
281 return cfg;
282 }
283
284 /*
285 * This is performance-critical, we want to do it O(1)
286 *
287 * the indexing order of this array favors 1:1 mappings
288 * between pins and IRQs.
289 */
290
291 struct irq_pin_list {
292 int apic, pin;
293 struct irq_pin_list *next;
294 };
295
296 static struct irq_pin_list *irq_2_pin_head;
297 /* fill one page ? */
298 static int nr_irq_2_pin = 0x100;
299 static struct irq_pin_list *irq_2_pin_ptr;
300 static void __init irq_2_pin_init_work(void *data)
301 {
302 struct dyn_array *da = data;
303 struct irq_pin_list *pin;
304 int i;
305
306 pin = *da->name;
307
308 for (i = 1; i < *da->nr; i++)
309 pin[i-1].next = &pin[i];
310
311 irq_2_pin_ptr = &pin[0];
312 }
313 DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
314
315 static struct irq_pin_list *get_one_free_irq_2_pin(void)
316 {
317 struct irq_pin_list *pin;
318 int i;
319
320 pin = irq_2_pin_ptr;
321
322 if (pin) {
323 irq_2_pin_ptr = pin->next;
324 pin->next = NULL;
325 return pin;
326 }
327
328 /*
329 * we run out of pre-allocate ones, allocate more
330 */
331 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
332
333 if (after_bootmem)
334 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
335 GFP_ATOMIC);
336 else
337 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
338 nr_irq_2_pin, PAGE_SIZE, 0);
339
340 if (!pin)
341 panic("can not get more irq_2_pin\n");
342
343 for (i = 1; i < nr_irq_2_pin; i++)
344 pin[i-1].next = &pin[i];
345
346 irq_2_pin_ptr = pin->next;
347 pin->next = NULL;
348
349 return pin;
350 }
351
352 struct io_apic {
353 unsigned int index;
354 unsigned int unused[3];
355 unsigned int data;
356 };
357
358 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
359 {
360 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
361 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
362 }
363
364 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
365 {
366 struct io_apic __iomem *io_apic = io_apic_base(apic);
367 writel(reg, &io_apic->index);
368 return readl(&io_apic->data);
369 }
370
371 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
372 {
373 struct io_apic __iomem *io_apic = io_apic_base(apic);
374 writel(reg, &io_apic->index);
375 writel(value, &io_apic->data);
376 }
377
378 /*
379 * Re-write a value: to be used for read-modify-write
380 * cycles where the read already set up the index register.
381 *
382 * Older SiS APIC requires we rewrite the index register
383 */
384 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
385 {
386 struct io_apic __iomem *io_apic = io_apic_base(apic);
387 if (sis_apic_bug)
388 writel(reg, &io_apic->index);
389 writel(value, &io_apic->data);
390 }
391
392 #ifdef CONFIG_X86_64
393 static bool io_apic_level_ack_pending(unsigned int irq)
394 {
395 struct irq_pin_list *entry;
396 unsigned long flags;
397 struct irq_cfg *cfg = irq_cfg(irq);
398
399 spin_lock_irqsave(&ioapic_lock, flags);
400 entry = cfg->irq_2_pin;
401 for (;;) {
402 unsigned int reg;
403 int pin;
404
405 if (!entry)
406 break;
407 pin = entry->pin;
408 reg = io_apic_read(entry->apic, 0x10 + pin*2);
409 /* Is the remote IRR bit set? */
410 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
411 spin_unlock_irqrestore(&ioapic_lock, flags);
412 return true;
413 }
414 if (!entry->next)
415 break;
416 entry = entry->next;
417 }
418 spin_unlock_irqrestore(&ioapic_lock, flags);
419
420 return false;
421 }
422 #endif
423
424 union entry_union {
425 struct { u32 w1, w2; };
426 struct IO_APIC_route_entry entry;
427 };
428
429 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
430 {
431 union entry_union eu;
432 unsigned long flags;
433 spin_lock_irqsave(&ioapic_lock, flags);
434 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
435 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
436 spin_unlock_irqrestore(&ioapic_lock, flags);
437 return eu.entry;
438 }
439
440 /*
441 * When we write a new IO APIC routing entry, we need to write the high
442 * word first! If the mask bit in the low word is clear, we will enable
443 * the interrupt, and we need to make sure the entry is fully populated
444 * before that happens.
445 */
446 static void
447 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
448 {
449 union entry_union eu;
450 eu.entry = e;
451 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
452 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
453 }
454
455 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
456 {
457 unsigned long flags;
458 spin_lock_irqsave(&ioapic_lock, flags);
459 __ioapic_write_entry(apic, pin, e);
460 spin_unlock_irqrestore(&ioapic_lock, flags);
461 }
462
463 /*
464 * When we mask an IO APIC routing entry, we need to write the low
465 * word first, in order to set the mask bit before we change the
466 * high bits!
467 */
468 static void ioapic_mask_entry(int apic, int pin)
469 {
470 unsigned long flags;
471 union entry_union eu = { .entry.mask = 1 };
472
473 spin_lock_irqsave(&ioapic_lock, flags);
474 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
475 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
476 spin_unlock_irqrestore(&ioapic_lock, flags);
477 }
478
479 #ifdef CONFIG_SMP
480 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
481 {
482 int apic, pin;
483 struct irq_cfg *cfg;
484 struct irq_pin_list *entry;
485
486 cfg = irq_cfg(irq);
487 entry = cfg->irq_2_pin;
488 for (;;) {
489 unsigned int reg;
490
491 if (!entry)
492 break;
493
494 apic = entry->apic;
495 pin = entry->pin;
496 #ifdef CONFIG_INTR_REMAP
497 /*
498 * With interrupt-remapping, destination information comes
499 * from interrupt-remapping table entry.
500 */
501 if (!irq_remapped(irq))
502 io_apic_write(apic, 0x11 + pin*2, dest);
503 #else
504 io_apic_write(apic, 0x11 + pin*2, dest);
505 #endif
506 reg = io_apic_read(apic, 0x10 + pin*2);
507 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
508 reg |= vector;
509 io_apic_modify(apic, 0x10 + pin*2, reg);
510 if (!entry->next)
511 break;
512 entry = entry->next;
513 }
514 }
515
516 static int assign_irq_vector(int irq, cpumask_t mask);
517
518 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
519 {
520 struct irq_cfg *cfg;
521 unsigned long flags;
522 unsigned int dest;
523 cpumask_t tmp;
524 struct irq_desc *desc;
525
526 cpus_and(tmp, mask, cpu_online_map);
527 if (cpus_empty(tmp))
528 return;
529
530 cfg = irq_cfg(irq);
531 if (assign_irq_vector(irq, mask))
532 return;
533
534 cpus_and(tmp, cfg->domain, mask);
535 dest = cpu_mask_to_apicid(tmp);
536 /*
537 * Only the high 8 bits are valid.
538 */
539 dest = SET_APIC_LOGICAL_ID(dest);
540
541 desc = irq_to_desc(irq);
542 spin_lock_irqsave(&ioapic_lock, flags);
543 __target_IO_APIC_irq(irq, dest, cfg->vector);
544 desc->affinity = mask;
545 spin_unlock_irqrestore(&ioapic_lock, flags);
546 }
547 #endif /* CONFIG_SMP */
548
549 /*
550 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
551 * shared ISA-space IRQs, so we have to support them. We are super
552 * fast in the common case, and fast for shared ISA-space IRQs.
553 */
554 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
555 {
556 struct irq_cfg *cfg;
557 struct irq_pin_list *entry;
558
559 /* first time to refer irq_cfg, so with new */
560 cfg = irq_cfg_alloc(irq);
561 entry = cfg->irq_2_pin;
562 if (!entry) {
563 entry = get_one_free_irq_2_pin();
564 cfg->irq_2_pin = entry;
565 entry->apic = apic;
566 entry->pin = pin;
567 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
568 return;
569 }
570
571 while (entry->next) {
572 /* not again, please */
573 if (entry->apic == apic && entry->pin == pin)
574 return;
575
576 entry = entry->next;
577 }
578
579 entry->next = get_one_free_irq_2_pin();
580 entry = entry->next;
581 entry->apic = apic;
582 entry->pin = pin;
583 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
584 }
585
586 /*
587 * Reroute an IRQ to a different pin.
588 */
589 static void __init replace_pin_at_irq(unsigned int irq,
590 int oldapic, int oldpin,
591 int newapic, int newpin)
592 {
593 struct irq_cfg *cfg = irq_cfg(irq);
594 struct irq_pin_list *entry = cfg->irq_2_pin;
595 int replaced = 0;
596
597 while (entry) {
598 if (entry->apic == oldapic && entry->pin == oldpin) {
599 entry->apic = newapic;
600 entry->pin = newpin;
601 replaced = 1;
602 /* every one is different, right? */
603 break;
604 }
605 entry = entry->next;
606 }
607
608 /* why? call replace before add? */
609 if (!replaced)
610 add_pin_to_irq(irq, newapic, newpin);
611 }
612
613 #define __DO_ACTION(R, ACTION_ENABLE, ACTION_DISABLE, FINAL) \
614 \
615 { \
616 int pin; \
617 struct irq_cfg *cfg; \
618 struct irq_pin_list *entry; \
619 \
620 cfg = irq_cfg(irq); \
621 entry = cfg->irq_2_pin; \
622 for (;;) { \
623 unsigned int reg; \
624 if (!entry) \
625 break; \
626 pin = entry->pin; \
627 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
628 reg ACTION_DISABLE; \
629 reg ACTION_ENABLE; \
630 io_apic_modify(entry->apic, 0x10 + R + pin*2, reg); \
631 FINAL; \
632 if (!entry->next) \
633 break; \
634 entry = entry->next; \
635 } \
636 }
637
638 #define DO_ACTION(name,R, ACTION_ENABLE, ACTION_DISABLE, FINAL) \
639 \
640 static void name##_IO_APIC_irq (unsigned int irq) \
641 __DO_ACTION(R, ACTION_ENABLE, ACTION_DISABLE, FINAL)
642
643 /* mask = 0 */
644 DO_ACTION(__unmask, 0, |= 0, &= ~IO_APIC_REDIR_MASKED, )
645
646 #ifdef CONFIG_X86_64
647 /*
648 * Synchronize the IO-APIC and the CPU by doing
649 * a dummy read from the IO-APIC
650 */
651 static inline void io_apic_sync(unsigned int apic)
652 {
653 struct io_apic __iomem *io_apic = io_apic_base(apic);
654 readl(&io_apic->data);
655 }
656
657 /* mask = 1 */
658 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, &= ~0, io_apic_sync(entry->apic))
659
660 #else
661
662 /* mask = 1 */
663 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, &= ~0, )
664
665 /* mask = 1, trigger = 0 */
666 DO_ACTION(__mask_and_edge, 0, |= IO_APIC_REDIR_MASKED, &= ~IO_APIC_REDIR_LEVEL_TRIGGER, )
667
668 /* mask = 0, trigger = 1 */
669 DO_ACTION(__unmask_and_level, 0, |= IO_APIC_REDIR_LEVEL_TRIGGER, &= ~IO_APIC_REDIR_MASKED, )
670
671 #endif
672
673 static void mask_IO_APIC_irq (unsigned int irq)
674 {
675 unsigned long flags;
676
677 spin_lock_irqsave(&ioapic_lock, flags);
678 __mask_IO_APIC_irq(irq);
679 spin_unlock_irqrestore(&ioapic_lock, flags);
680 }
681
682 static void unmask_IO_APIC_irq (unsigned int irq)
683 {
684 unsigned long flags;
685
686 spin_lock_irqsave(&ioapic_lock, flags);
687 __unmask_IO_APIC_irq(irq);
688 spin_unlock_irqrestore(&ioapic_lock, flags);
689 }
690
691 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
692 {
693 struct IO_APIC_route_entry entry;
694
695 /* Check delivery_mode to be sure we're not clearing an SMI pin */
696 entry = ioapic_read_entry(apic, pin);
697 if (entry.delivery_mode == dest_SMI)
698 return;
699 /*
700 * Disable it in the IO-APIC irq-routing table:
701 */
702 ioapic_mask_entry(apic, pin);
703 }
704
705 static void clear_IO_APIC (void)
706 {
707 int apic, pin;
708
709 for (apic = 0; apic < nr_ioapics; apic++)
710 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
711 clear_IO_APIC_pin(apic, pin);
712 }
713
714 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
715 void send_IPI_self(int vector)
716 {
717 unsigned int cfg;
718
719 /*
720 * Wait for idle.
721 */
722 apic_wait_icr_idle();
723 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
724 /*
725 * Send the IPI. The write to APIC_ICR fires this off.
726 */
727 apic_write(APIC_ICR, cfg);
728 }
729 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
730
731 #ifdef CONFIG_X86_32
732 /*
733 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
734 * specific CPU-side IRQs.
735 */
736
737 #define MAX_PIRQS 8
738 static int pirq_entries [MAX_PIRQS];
739 static int pirqs_enabled;
740
741 static int __init ioapic_pirq_setup(char *str)
742 {
743 int i, max;
744 int ints[MAX_PIRQS+1];
745
746 get_options(str, ARRAY_SIZE(ints), ints);
747
748 for (i = 0; i < MAX_PIRQS; i++)
749 pirq_entries[i] = -1;
750
751 pirqs_enabled = 1;
752 apic_printk(APIC_VERBOSE, KERN_INFO
753 "PIRQ redirection, working around broken MP-BIOS.\n");
754 max = MAX_PIRQS;
755 if (ints[0] < MAX_PIRQS)
756 max = ints[0];
757
758 for (i = 0; i < max; i++) {
759 apic_printk(APIC_VERBOSE, KERN_DEBUG
760 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
761 /*
762 * PIRQs are mapped upside down, usually.
763 */
764 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
765 }
766 return 1;
767 }
768
769 __setup("pirq=", ioapic_pirq_setup);
770 #endif /* CONFIG_X86_32 */
771
772 #ifdef CONFIG_INTR_REMAP
773 /* I/O APIC RTE contents at the OS boot up */
774 static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
775
776 /*
777 * Saves and masks all the unmasked IO-APIC RTE's
778 */
779 int save_mask_IO_APIC_setup(void)
780 {
781 union IO_APIC_reg_01 reg_01;
782 unsigned long flags;
783 int apic, pin;
784
785 /*
786 * The number of IO-APIC IRQ registers (== #pins):
787 */
788 for (apic = 0; apic < nr_ioapics; apic++) {
789 spin_lock_irqsave(&ioapic_lock, flags);
790 reg_01.raw = io_apic_read(apic, 1);
791 spin_unlock_irqrestore(&ioapic_lock, flags);
792 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
793 }
794
795 for (apic = 0; apic < nr_ioapics; apic++) {
796 early_ioapic_entries[apic] =
797 kzalloc(sizeof(struct IO_APIC_route_entry) *
798 nr_ioapic_registers[apic], GFP_KERNEL);
799 if (!early_ioapic_entries[apic])
800 return -ENOMEM;
801 }
802
803 for (apic = 0; apic < nr_ioapics; apic++)
804 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
805 struct IO_APIC_route_entry entry;
806
807 entry = early_ioapic_entries[apic][pin] =
808 ioapic_read_entry(apic, pin);
809 if (!entry.mask) {
810 entry.mask = 1;
811 ioapic_write_entry(apic, pin, entry);
812 }
813 }
814 return 0;
815 }
816
817 void restore_IO_APIC_setup(void)
818 {
819 int apic, pin;
820
821 for (apic = 0; apic < nr_ioapics; apic++)
822 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
823 ioapic_write_entry(apic, pin,
824 early_ioapic_entries[apic][pin]);
825 }
826
827 void reinit_intr_remapped_IO_APIC(int intr_remapping)
828 {
829 /*
830 * for now plain restore of previous settings.
831 * TBD: In the case of OS enabling interrupt-remapping,
832 * IO-APIC RTE's need to be setup to point to interrupt-remapping
833 * table entries. for now, do a plain restore, and wait for
834 * the setup_IO_APIC_irqs() to do proper initialization.
835 */
836 restore_IO_APIC_setup();
837 }
838 #endif
839
840 /*
841 * Find the IRQ entry number of a certain pin.
842 */
843 static int find_irq_entry(int apic, int pin, int type)
844 {
845 int i;
846
847 for (i = 0; i < mp_irq_entries; i++)
848 if (mp_irqs[i].mp_irqtype == type &&
849 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
850 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
851 mp_irqs[i].mp_dstirq == pin)
852 return i;
853
854 return -1;
855 }
856
857 /*
858 * Find the pin to which IRQ[irq] (ISA) is connected
859 */
860 static int __init find_isa_irq_pin(int irq, int type)
861 {
862 int i;
863
864 for (i = 0; i < mp_irq_entries; i++) {
865 int lbus = mp_irqs[i].mp_srcbus;
866
867 if (test_bit(lbus, mp_bus_not_pci) &&
868 (mp_irqs[i].mp_irqtype == type) &&
869 (mp_irqs[i].mp_srcbusirq == irq))
870
871 return mp_irqs[i].mp_dstirq;
872 }
873 return -1;
874 }
875
876 static int __init find_isa_irq_apic(int irq, int type)
877 {
878 int i;
879
880 for (i = 0; i < mp_irq_entries; i++) {
881 int lbus = mp_irqs[i].mp_srcbus;
882
883 if (test_bit(lbus, mp_bus_not_pci) &&
884 (mp_irqs[i].mp_irqtype == type) &&
885 (mp_irqs[i].mp_srcbusirq == irq))
886 break;
887 }
888 if (i < mp_irq_entries) {
889 int apic;
890 for(apic = 0; apic < nr_ioapics; apic++) {
891 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
892 return apic;
893 }
894 }
895
896 return -1;
897 }
898
899 /*
900 * Find a specific PCI IRQ entry.
901 * Not an __init, possibly needed by modules
902 */
903 static int pin_2_irq(int idx, int apic, int pin);
904
905 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
906 {
907 int apic, i, best_guess = -1;
908
909 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
910 bus, slot, pin);
911 if (test_bit(bus, mp_bus_not_pci)) {
912 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
913 return -1;
914 }
915 for (i = 0; i < mp_irq_entries; i++) {
916 int lbus = mp_irqs[i].mp_srcbus;
917
918 for (apic = 0; apic < nr_ioapics; apic++)
919 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
920 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
921 break;
922
923 if (!test_bit(lbus, mp_bus_not_pci) &&
924 !mp_irqs[i].mp_irqtype &&
925 (bus == lbus) &&
926 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
927 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
928
929 if (!(apic || IO_APIC_IRQ(irq)))
930 continue;
931
932 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
933 return irq;
934 /*
935 * Use the first all-but-pin matching entry as a
936 * best-guess fuzzy result for broken mptables.
937 */
938 if (best_guess < 0)
939 best_guess = irq;
940 }
941 }
942 return best_guess;
943 }
944
945 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
946
947 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
948 /*
949 * EISA Edge/Level control register, ELCR
950 */
951 static int EISA_ELCR(unsigned int irq)
952 {
953 if (irq < 16) {
954 unsigned int port = 0x4d0 + (irq >> 3);
955 return (inb(port) >> (irq & 7)) & 1;
956 }
957 apic_printk(APIC_VERBOSE, KERN_INFO
958 "Broken MPtable reports ISA irq %d\n", irq);
959 return 0;
960 }
961
962 #endif
963
964 /* ISA interrupts are always polarity zero edge triggered,
965 * when listed as conforming in the MP table. */
966
967 #define default_ISA_trigger(idx) (0)
968 #define default_ISA_polarity(idx) (0)
969
970 /* EISA interrupts are always polarity zero and can be edge or level
971 * trigger depending on the ELCR value. If an interrupt is listed as
972 * EISA conforming in the MP table, that means its trigger type must
973 * be read in from the ELCR */
974
975 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
976 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
977
978 /* PCI interrupts are always polarity one level triggered,
979 * when listed as conforming in the MP table. */
980
981 #define default_PCI_trigger(idx) (1)
982 #define default_PCI_polarity(idx) (1)
983
984 /* MCA interrupts are always polarity zero level triggered,
985 * when listed as conforming in the MP table. */
986
987 #define default_MCA_trigger(idx) (1)
988 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
989
990 static int MPBIOS_polarity(int idx)
991 {
992 int bus = mp_irqs[idx].mp_srcbus;
993 int polarity;
994
995 /*
996 * Determine IRQ line polarity (high active or low active):
997 */
998 switch (mp_irqs[idx].mp_irqflag & 3)
999 {
1000 case 0: /* conforms, ie. bus-type dependent polarity */
1001 if (test_bit(bus, mp_bus_not_pci))
1002 polarity = default_ISA_polarity(idx);
1003 else
1004 polarity = default_PCI_polarity(idx);
1005 break;
1006 case 1: /* high active */
1007 {
1008 polarity = 0;
1009 break;
1010 }
1011 case 2: /* reserved */
1012 {
1013 printk(KERN_WARNING "broken BIOS!!\n");
1014 polarity = 1;
1015 break;
1016 }
1017 case 3: /* low active */
1018 {
1019 polarity = 1;
1020 break;
1021 }
1022 default: /* invalid */
1023 {
1024 printk(KERN_WARNING "broken BIOS!!\n");
1025 polarity = 1;
1026 break;
1027 }
1028 }
1029 return polarity;
1030 }
1031
1032 static int MPBIOS_trigger(int idx)
1033 {
1034 int bus = mp_irqs[idx].mp_srcbus;
1035 int trigger;
1036
1037 /*
1038 * Determine IRQ trigger mode (edge or level sensitive):
1039 */
1040 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
1041 {
1042 case 0: /* conforms, ie. bus-type dependent */
1043 if (test_bit(bus, mp_bus_not_pci))
1044 trigger = default_ISA_trigger(idx);
1045 else
1046 trigger = default_PCI_trigger(idx);
1047 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1048 switch (mp_bus_id_to_type[bus]) {
1049 case MP_BUS_ISA: /* ISA pin */
1050 {
1051 /* set before the switch */
1052 break;
1053 }
1054 case MP_BUS_EISA: /* EISA pin */
1055 {
1056 trigger = default_EISA_trigger(idx);
1057 break;
1058 }
1059 case MP_BUS_PCI: /* PCI pin */
1060 {
1061 /* set before the switch */
1062 break;
1063 }
1064 case MP_BUS_MCA: /* MCA pin */
1065 {
1066 trigger = default_MCA_trigger(idx);
1067 break;
1068 }
1069 default:
1070 {
1071 printk(KERN_WARNING "broken BIOS!!\n");
1072 trigger = 1;
1073 break;
1074 }
1075 }
1076 #endif
1077 break;
1078 case 1: /* edge */
1079 {
1080 trigger = 0;
1081 break;
1082 }
1083 case 2: /* reserved */
1084 {
1085 printk(KERN_WARNING "broken BIOS!!\n");
1086 trigger = 1;
1087 break;
1088 }
1089 case 3: /* level */
1090 {
1091 trigger = 1;
1092 break;
1093 }
1094 default: /* invalid */
1095 {
1096 printk(KERN_WARNING "broken BIOS!!\n");
1097 trigger = 0;
1098 break;
1099 }
1100 }
1101 return trigger;
1102 }
1103
1104 static inline int irq_polarity(int idx)
1105 {
1106 return MPBIOS_polarity(idx);
1107 }
1108
1109 static inline int irq_trigger(int idx)
1110 {
1111 return MPBIOS_trigger(idx);
1112 }
1113
1114 int (*ioapic_renumber_irq)(int ioapic, int irq);
1115 static int pin_2_irq(int idx, int apic, int pin)
1116 {
1117 int irq, i;
1118 int bus = mp_irqs[idx].mp_srcbus;
1119
1120 /*
1121 * Debugging check, we are in big trouble if this message pops up!
1122 */
1123 if (mp_irqs[idx].mp_dstirq != pin)
1124 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1125
1126 if (test_bit(bus, mp_bus_not_pci)) {
1127 irq = mp_irqs[idx].mp_srcbusirq;
1128 } else {
1129 /*
1130 * PCI IRQs are mapped in order
1131 */
1132 i = irq = 0;
1133 while (i < apic)
1134 irq += nr_ioapic_registers[i++];
1135 irq += pin;
1136 /*
1137 * For MPS mode, so far only needed by ES7000 platform
1138 */
1139 if (ioapic_renumber_irq)
1140 irq = ioapic_renumber_irq(apic, irq);
1141 }
1142
1143 #ifdef CONFIG_X86_32
1144 /*
1145 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1146 */
1147 if ((pin >= 16) && (pin <= 23)) {
1148 if (pirq_entries[pin-16] != -1) {
1149 if (!pirq_entries[pin-16]) {
1150 apic_printk(APIC_VERBOSE, KERN_DEBUG
1151 "disabling PIRQ%d\n", pin-16);
1152 } else {
1153 irq = pirq_entries[pin-16];
1154 apic_printk(APIC_VERBOSE, KERN_DEBUG
1155 "using PIRQ%d -> IRQ %d\n",
1156 pin-16, irq);
1157 }
1158 }
1159 }
1160 #endif
1161
1162 return irq;
1163 }
1164
1165 void lock_vector_lock(void)
1166 {
1167 /* Used to the online set of cpus does not change
1168 * during assign_irq_vector.
1169 */
1170 spin_lock(&vector_lock);
1171 }
1172
1173 void unlock_vector_lock(void)
1174 {
1175 spin_unlock(&vector_lock);
1176 }
1177
1178 static int __assign_irq_vector(int irq, cpumask_t mask)
1179 {
1180 /*
1181 * NOTE! The local APIC isn't very good at handling
1182 * multiple interrupts at the same interrupt level.
1183 * As the interrupt level is determined by taking the
1184 * vector number and shifting that right by 4, we
1185 * want to spread these out a bit so that they don't
1186 * all fall in the same interrupt level.
1187 *
1188 * Also, we've got to be careful not to trash gate
1189 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1190 */
1191 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1192 unsigned int old_vector;
1193 int cpu;
1194 struct irq_cfg *cfg;
1195
1196 cfg = irq_cfg(irq);
1197
1198 /* Only try and allocate irqs on cpus that are present */
1199 cpus_and(mask, mask, cpu_online_map);
1200
1201 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1202 return -EBUSY;
1203
1204 old_vector = cfg->vector;
1205 if (old_vector) {
1206 cpumask_t tmp;
1207 cpus_and(tmp, cfg->domain, mask);
1208 if (!cpus_empty(tmp))
1209 return 0;
1210 }
1211
1212 for_each_cpu_mask_nr(cpu, mask) {
1213 cpumask_t domain, new_mask;
1214 int new_cpu;
1215 int vector, offset;
1216
1217 domain = vector_allocation_domain(cpu);
1218 cpus_and(new_mask, domain, cpu_online_map);
1219
1220 vector = current_vector;
1221 offset = current_offset;
1222 next:
1223 vector += 8;
1224 if (vector >= first_system_vector) {
1225 /* If we run out of vectors on large boxen, must share them. */
1226 offset = (offset + 1) % 8;
1227 vector = FIRST_DEVICE_VECTOR + offset;
1228 }
1229 if (unlikely(current_vector == vector))
1230 continue;
1231 #ifdef CONFIG_X86_64
1232 if (vector == IA32_SYSCALL_VECTOR)
1233 goto next;
1234 #else
1235 if (vector == SYSCALL_VECTOR)
1236 goto next;
1237 #endif
1238 for_each_cpu_mask_nr(new_cpu, new_mask)
1239 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1240 goto next;
1241 /* Found one! */
1242 current_vector = vector;
1243 current_offset = offset;
1244 if (old_vector) {
1245 cfg->move_in_progress = 1;
1246 cfg->old_domain = cfg->domain;
1247 }
1248 for_each_cpu_mask_nr(new_cpu, new_mask)
1249 per_cpu(vector_irq, new_cpu)[vector] = irq;
1250 cfg->vector = vector;
1251 cfg->domain = domain;
1252 return 0;
1253 }
1254 return -ENOSPC;
1255 }
1256
1257 static int assign_irq_vector(int irq, cpumask_t mask)
1258 {
1259 int err;
1260 unsigned long flags;
1261
1262 spin_lock_irqsave(&vector_lock, flags);
1263 err = __assign_irq_vector(irq, mask);
1264 spin_unlock_irqrestore(&vector_lock, flags);
1265 return err;
1266 }
1267
1268 static void __clear_irq_vector(int irq)
1269 {
1270 struct irq_cfg *cfg;
1271 cpumask_t mask;
1272 int cpu, vector;
1273
1274 cfg = irq_cfg(irq);
1275 BUG_ON(!cfg->vector);
1276
1277 vector = cfg->vector;
1278 cpus_and(mask, cfg->domain, cpu_online_map);
1279 for_each_cpu_mask_nr(cpu, mask)
1280 per_cpu(vector_irq, cpu)[vector] = -1;
1281
1282 cfg->vector = 0;
1283 cpus_clear(cfg->domain);
1284 }
1285
1286 void __setup_vector_irq(int cpu)
1287 {
1288 /* Initialize vector_irq on a new cpu */
1289 /* This function must be called with vector_lock held */
1290 int irq, vector;
1291 struct irq_cfg *cfg;
1292
1293 /* Mark the inuse vectors */
1294 for_each_irq_cfg(cfg) {
1295 if (!cpu_isset(cpu, cfg->domain))
1296 continue;
1297 vector = cfg->vector;
1298 irq = cfg->irq;
1299 per_cpu(vector_irq, cpu)[vector] = irq;
1300 }
1301 /* Mark the free vectors */
1302 for (vector = 0; vector < NR_VECTORS; ++vector) {
1303 irq = per_cpu(vector_irq, cpu)[vector];
1304 if (irq < 0)
1305 continue;
1306
1307 cfg = irq_cfg(irq);
1308 if (!cpu_isset(cpu, cfg->domain))
1309 per_cpu(vector_irq, cpu)[vector] = -1;
1310 }
1311 }
1312
1313 static struct irq_chip ioapic_chip;
1314 #ifdef CONFIG_INTR_REMAP
1315 static struct irq_chip ir_ioapic_chip;
1316 #endif
1317
1318 #define IOAPIC_AUTO -1
1319 #define IOAPIC_EDGE 0
1320 #define IOAPIC_LEVEL 1
1321
1322 #ifdef CONFIG_X86_32
1323 static inline int IO_APIC_irq_trigger(int irq)
1324 {
1325 int apic, idx, pin;
1326
1327 for (apic = 0; apic < nr_ioapics; apic++) {
1328 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1329 idx = find_irq_entry(apic, pin, mp_INT);
1330 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1331 return irq_trigger(idx);
1332 }
1333 }
1334 /*
1335 * nonexistent IRQs are edge default
1336 */
1337 return 0;
1338 }
1339 #else
1340 static inline int IO_APIC_irq_trigger(int irq)
1341 {
1342 return 1;
1343 }
1344 #endif
1345
1346 static void ioapic_register_intr(int irq, unsigned long trigger)
1347 {
1348 struct irq_desc *desc;
1349
1350 /* first time to use this irq_desc */
1351 if (irq < 16)
1352 desc = irq_to_desc(irq);
1353 else
1354 desc = irq_to_desc_alloc(irq);
1355
1356 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1357 trigger == IOAPIC_LEVEL)
1358 desc->status |= IRQ_LEVEL;
1359 else
1360 desc->status &= ~IRQ_LEVEL;
1361
1362 #ifdef CONFIG_INTR_REMAP
1363 if (irq_remapped(irq)) {
1364 desc->status |= IRQ_MOVE_PCNTXT;
1365 if (trigger)
1366 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1367 handle_fasteoi_irq,
1368 "fasteoi");
1369 else
1370 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1371 handle_edge_irq, "edge");
1372 return;
1373 }
1374 #endif
1375 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1376 trigger == IOAPIC_LEVEL)
1377 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1378 handle_fasteoi_irq,
1379 "fasteoi");
1380 else
1381 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1382 handle_edge_irq, "edge");
1383 }
1384
1385 static int setup_ioapic_entry(int apic, int irq,
1386 struct IO_APIC_route_entry *entry,
1387 unsigned int destination, int trigger,
1388 int polarity, int vector)
1389 {
1390 /*
1391 * add it to the IO-APIC irq-routing table:
1392 */
1393 memset(entry,0,sizeof(*entry));
1394
1395 #ifdef CONFIG_INTR_REMAP
1396 if (intr_remapping_enabled) {
1397 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1398 struct irte irte;
1399 struct IR_IO_APIC_route_entry *ir_entry =
1400 (struct IR_IO_APIC_route_entry *) entry;
1401 int index;
1402
1403 if (!iommu)
1404 panic("No mapping iommu for ioapic %d\n", apic);
1405
1406 index = alloc_irte(iommu, irq, 1);
1407 if (index < 0)
1408 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1409
1410 memset(&irte, 0, sizeof(irte));
1411
1412 irte.present = 1;
1413 irte.dst_mode = INT_DEST_MODE;
1414 irte.trigger_mode = trigger;
1415 irte.dlvry_mode = INT_DELIVERY_MODE;
1416 irte.vector = vector;
1417 irte.dest_id = IRTE_DEST(destination);
1418
1419 modify_irte(irq, &irte);
1420
1421 ir_entry->index2 = (index >> 15) & 0x1;
1422 ir_entry->zero = 0;
1423 ir_entry->format = 1;
1424 ir_entry->index = (index & 0x7fff);
1425 } else
1426 #endif
1427 {
1428 entry->delivery_mode = INT_DELIVERY_MODE;
1429 entry->dest_mode = INT_DEST_MODE;
1430 entry->dest = destination;
1431 }
1432
1433 entry->mask = 0; /* enable IRQ */
1434 entry->trigger = trigger;
1435 entry->polarity = polarity;
1436 entry->vector = vector;
1437
1438 /* Mask level triggered irqs.
1439 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1440 */
1441 if (trigger)
1442 entry->mask = 1;
1443 return 0;
1444 }
1445
1446 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1447 int trigger, int polarity)
1448 {
1449 struct irq_cfg *cfg;
1450 struct IO_APIC_route_entry entry;
1451 cpumask_t mask;
1452
1453 if (!IO_APIC_IRQ(irq))
1454 return;
1455
1456 cfg = irq_cfg(irq);
1457
1458 mask = TARGET_CPUS;
1459 if (assign_irq_vector(irq, mask))
1460 return;
1461
1462 cpus_and(mask, cfg->domain, mask);
1463
1464 apic_printk(APIC_VERBOSE,KERN_DEBUG
1465 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1466 "IRQ %d Mode:%i Active:%i)\n",
1467 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1468 irq, trigger, polarity);
1469
1470
1471 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1472 cpu_mask_to_apicid(mask), trigger, polarity,
1473 cfg->vector)) {
1474 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1475 mp_ioapics[apic].mp_apicid, pin);
1476 __clear_irq_vector(irq);
1477 return;
1478 }
1479
1480 ioapic_register_intr(irq, trigger);
1481 if (irq < 16)
1482 disable_8259A_irq(irq);
1483
1484 ioapic_write_entry(apic, pin, entry);
1485 }
1486
1487 static void __init setup_IO_APIC_irqs(void)
1488 {
1489 int apic, pin, idx, irq, first_notcon = 1;
1490
1491 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1492
1493 for (apic = 0; apic < nr_ioapics; apic++) {
1494 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1495
1496 idx = find_irq_entry(apic,pin,mp_INT);
1497 if (idx == -1) {
1498 if (first_notcon) {
1499 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1500 first_notcon = 0;
1501 } else
1502 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1503 continue;
1504 }
1505 if (!first_notcon) {
1506 apic_printk(APIC_VERBOSE, " not connected.\n");
1507 first_notcon = 1;
1508 }
1509
1510 irq = pin_2_irq(idx, apic, pin);
1511 #ifdef CONFIG_X86_32
1512 if (multi_timer_check(apic, irq))
1513 continue;
1514 #endif
1515 add_pin_to_irq(irq, apic, pin);
1516
1517 setup_IO_APIC_irq(apic, pin, irq,
1518 irq_trigger(idx), irq_polarity(idx));
1519 }
1520 }
1521
1522 if (!first_notcon)
1523 apic_printk(APIC_VERBOSE, " not connected.\n");
1524 }
1525
1526 /*
1527 * Set up the timer pin, possibly with the 8259A-master behind.
1528 */
1529 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1530 int vector)
1531 {
1532 struct IO_APIC_route_entry entry;
1533
1534 #ifdef CONFIG_INTR_REMAP
1535 if (intr_remapping_enabled)
1536 return;
1537 #endif
1538
1539 memset(&entry, 0, sizeof(entry));
1540
1541 /*
1542 * We use logical delivery to get the timer IRQ
1543 * to the first CPU.
1544 */
1545 entry.dest_mode = INT_DEST_MODE;
1546 entry.mask = 1; /* mask IRQ now */
1547 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1548 entry.delivery_mode = INT_DELIVERY_MODE;
1549 entry.polarity = 0;
1550 entry.trigger = 0;
1551 entry.vector = vector;
1552
1553 /*
1554 * The timer IRQ doesn't have to know that behind the
1555 * scene we may have a 8259A-master in AEOI mode ...
1556 */
1557 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1558
1559 /*
1560 * Add it to the IO-APIC irq-routing table:
1561 */
1562 ioapic_write_entry(apic, pin, entry);
1563 }
1564
1565
1566 __apicdebuginit(void) print_IO_APIC(void)
1567 {
1568 int apic, i;
1569 union IO_APIC_reg_00 reg_00;
1570 union IO_APIC_reg_01 reg_01;
1571 union IO_APIC_reg_02 reg_02;
1572 union IO_APIC_reg_03 reg_03;
1573 unsigned long flags;
1574 struct irq_cfg *cfg;
1575
1576 if (apic_verbosity == APIC_QUIET)
1577 return;
1578
1579 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1580 for (i = 0; i < nr_ioapics; i++)
1581 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1582 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1583
1584 /*
1585 * We are a bit conservative about what we expect. We have to
1586 * know about every hardware change ASAP.
1587 */
1588 printk(KERN_INFO "testing the IO APIC.......................\n");
1589
1590 for (apic = 0; apic < nr_ioapics; apic++) {
1591
1592 spin_lock_irqsave(&ioapic_lock, flags);
1593 reg_00.raw = io_apic_read(apic, 0);
1594 reg_01.raw = io_apic_read(apic, 1);
1595 if (reg_01.bits.version >= 0x10)
1596 reg_02.raw = io_apic_read(apic, 2);
1597 if (reg_01.bits.version >= 0x20)
1598 reg_03.raw = io_apic_read(apic, 3);
1599 spin_unlock_irqrestore(&ioapic_lock, flags);
1600
1601 printk("\n");
1602 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1603 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1604 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1605 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1606 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1607
1608 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1609 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1610
1611 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1612 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1613
1614 /*
1615 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1616 * but the value of reg_02 is read as the previous read register
1617 * value, so ignore it if reg_02 == reg_01.
1618 */
1619 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1620 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1621 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1622 }
1623
1624 /*
1625 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1626 * or reg_03, but the value of reg_0[23] is read as the previous read
1627 * register value, so ignore it if reg_03 == reg_0[12].
1628 */
1629 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1630 reg_03.raw != reg_01.raw) {
1631 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1632 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1633 }
1634
1635 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1636
1637 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1638 " Stat Dmod Deli Vect: \n");
1639
1640 for (i = 0; i <= reg_01.bits.entries; i++) {
1641 struct IO_APIC_route_entry entry;
1642
1643 entry = ioapic_read_entry(apic, i);
1644
1645 printk(KERN_DEBUG " %02x %03X ",
1646 i,
1647 entry.dest
1648 );
1649
1650 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1651 entry.mask,
1652 entry.trigger,
1653 entry.irr,
1654 entry.polarity,
1655 entry.delivery_status,
1656 entry.dest_mode,
1657 entry.delivery_mode,
1658 entry.vector
1659 );
1660 }
1661 }
1662 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1663 for_each_irq_cfg(cfg) {
1664 struct irq_pin_list *entry = cfg->irq_2_pin;
1665 if (!entry)
1666 continue;
1667 printk(KERN_DEBUG "IRQ%d ", cfg->irq);
1668 for (;;) {
1669 printk("-> %d:%d", entry->apic, entry->pin);
1670 if (!entry->next)
1671 break;
1672 entry = entry->next;
1673 }
1674 printk("\n");
1675 }
1676
1677 printk(KERN_INFO ".................................... done.\n");
1678
1679 return;
1680 }
1681
1682 __apicdebuginit(void) print_APIC_bitfield(int base)
1683 {
1684 unsigned int v;
1685 int i, j;
1686
1687 if (apic_verbosity == APIC_QUIET)
1688 return;
1689
1690 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1691 for (i = 0; i < 8; i++) {
1692 v = apic_read(base + i*0x10);
1693 for (j = 0; j < 32; j++) {
1694 if (v & (1<<j))
1695 printk("1");
1696 else
1697 printk("0");
1698 }
1699 printk("\n");
1700 }
1701 }
1702
1703 __apicdebuginit(void) print_local_APIC(void *dummy)
1704 {
1705 unsigned int v, ver, maxlvt;
1706 u64 icr;
1707
1708 if (apic_verbosity == APIC_QUIET)
1709 return;
1710
1711 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1712 smp_processor_id(), hard_smp_processor_id());
1713 v = apic_read(APIC_ID);
1714 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1715 v = apic_read(APIC_LVR);
1716 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1717 ver = GET_APIC_VERSION(v);
1718 maxlvt = lapic_get_maxlvt();
1719
1720 v = apic_read(APIC_TASKPRI);
1721 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1722
1723 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1724 v = apic_read(APIC_ARBPRI);
1725 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1726 v & APIC_ARBPRI_MASK);
1727 v = apic_read(APIC_PROCPRI);
1728 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1729 }
1730
1731 v = apic_read(APIC_EOI);
1732 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1733 v = apic_read(APIC_RRR);
1734 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1735 v = apic_read(APIC_LDR);
1736 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1737 v = apic_read(APIC_DFR);
1738 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1739 v = apic_read(APIC_SPIV);
1740 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1741
1742 printk(KERN_DEBUG "... APIC ISR field:\n");
1743 print_APIC_bitfield(APIC_ISR);
1744 printk(KERN_DEBUG "... APIC TMR field:\n");
1745 print_APIC_bitfield(APIC_TMR);
1746 printk(KERN_DEBUG "... APIC IRR field:\n");
1747 print_APIC_bitfield(APIC_IRR);
1748
1749 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1750 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1751 apic_write(APIC_ESR, 0);
1752
1753 v = apic_read(APIC_ESR);
1754 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1755 }
1756
1757 icr = apic_icr_read();
1758 printk(KERN_DEBUG "... APIC ICR: %08x\n", icr);
1759 printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32);
1760
1761 v = apic_read(APIC_LVTT);
1762 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1763
1764 if (maxlvt > 3) { /* PC is LVT#4. */
1765 v = apic_read(APIC_LVTPC);
1766 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1767 }
1768 v = apic_read(APIC_LVT0);
1769 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1770 v = apic_read(APIC_LVT1);
1771 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1772
1773 if (maxlvt > 2) { /* ERR is LVT#3. */
1774 v = apic_read(APIC_LVTERR);
1775 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1776 }
1777
1778 v = apic_read(APIC_TMICT);
1779 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1780 v = apic_read(APIC_TMCCT);
1781 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1782 v = apic_read(APIC_TDCR);
1783 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1784 printk("\n");
1785 }
1786
1787 __apicdebuginit(void) print_all_local_APICs(void)
1788 {
1789 on_each_cpu(print_local_APIC, NULL, 1);
1790 }
1791
1792 __apicdebuginit(void) print_PIC(void)
1793 {
1794 unsigned int v;
1795 unsigned long flags;
1796
1797 if (apic_verbosity == APIC_QUIET)
1798 return;
1799
1800 printk(KERN_DEBUG "\nprinting PIC contents\n");
1801
1802 spin_lock_irqsave(&i8259A_lock, flags);
1803
1804 v = inb(0xa1) << 8 | inb(0x21);
1805 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1806
1807 v = inb(0xa0) << 8 | inb(0x20);
1808 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1809
1810 outb(0x0b,0xa0);
1811 outb(0x0b,0x20);
1812 v = inb(0xa0) << 8 | inb(0x20);
1813 outb(0x0a,0xa0);
1814 outb(0x0a,0x20);
1815
1816 spin_unlock_irqrestore(&i8259A_lock, flags);
1817
1818 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1819
1820 v = inb(0x4d1) << 8 | inb(0x4d0);
1821 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1822 }
1823
1824 __apicdebuginit(int) print_all_ICs(void)
1825 {
1826 print_PIC();
1827 print_all_local_APICs();
1828 print_IO_APIC();
1829
1830 return 0;
1831 }
1832
1833 fs_initcall(print_all_ICs);
1834
1835
1836 /* Where if anywhere is the i8259 connect in external int mode */
1837 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1838
1839 void __init enable_IO_APIC(void)
1840 {
1841 union IO_APIC_reg_01 reg_01;
1842 int i8259_apic, i8259_pin;
1843 int apic;
1844 unsigned long flags;
1845
1846 #ifdef CONFIG_X86_32
1847 int i;
1848 if (!pirqs_enabled)
1849 for (i = 0; i < MAX_PIRQS; i++)
1850 pirq_entries[i] = -1;
1851 #endif
1852
1853 /*
1854 * The number of IO-APIC IRQ registers (== #pins):
1855 */
1856 for (apic = 0; apic < nr_ioapics; apic++) {
1857 spin_lock_irqsave(&ioapic_lock, flags);
1858 reg_01.raw = io_apic_read(apic, 1);
1859 spin_unlock_irqrestore(&ioapic_lock, flags);
1860 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1861 }
1862 for(apic = 0; apic < nr_ioapics; apic++) {
1863 int pin;
1864 /* See if any of the pins is in ExtINT mode */
1865 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1866 struct IO_APIC_route_entry entry;
1867 entry = ioapic_read_entry(apic, pin);
1868
1869 /* If the interrupt line is enabled and in ExtInt mode
1870 * I have found the pin where the i8259 is connected.
1871 */
1872 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1873 ioapic_i8259.apic = apic;
1874 ioapic_i8259.pin = pin;
1875 goto found_i8259;
1876 }
1877 }
1878 }
1879 found_i8259:
1880 /* Look to see what if the MP table has reported the ExtINT */
1881 /* If we could not find the appropriate pin by looking at the ioapic
1882 * the i8259 probably is not connected the ioapic but give the
1883 * mptable a chance anyway.
1884 */
1885 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1886 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1887 /* Trust the MP table if nothing is setup in the hardware */
1888 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1889 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1890 ioapic_i8259.pin = i8259_pin;
1891 ioapic_i8259.apic = i8259_apic;
1892 }
1893 /* Complain if the MP table and the hardware disagree */
1894 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1895 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1896 {
1897 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1898 }
1899
1900 /*
1901 * Do not trust the IO-APIC being empty at bootup
1902 */
1903 clear_IO_APIC();
1904 }
1905
1906 /*
1907 * Not an __init, needed by the reboot code
1908 */
1909 void disable_IO_APIC(void)
1910 {
1911 /*
1912 * Clear the IO-APIC before rebooting:
1913 */
1914 clear_IO_APIC();
1915
1916 /*
1917 * If the i8259 is routed through an IOAPIC
1918 * Put that IOAPIC in virtual wire mode
1919 * so legacy interrupts can be delivered.
1920 */
1921 if (ioapic_i8259.pin != -1) {
1922 struct IO_APIC_route_entry entry;
1923
1924 memset(&entry, 0, sizeof(entry));
1925 entry.mask = 0; /* Enabled */
1926 entry.trigger = 0; /* Edge */
1927 entry.irr = 0;
1928 entry.polarity = 0; /* High */
1929 entry.delivery_status = 0;
1930 entry.dest_mode = 0; /* Physical */
1931 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1932 entry.vector = 0;
1933 entry.dest = read_apic_id();
1934
1935 /*
1936 * Add it to the IO-APIC irq-routing table:
1937 */
1938 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1939 }
1940
1941 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1942 }
1943
1944 #ifdef CONFIG_X86_32
1945 /*
1946 * function to set the IO-APIC physical IDs based on the
1947 * values stored in the MPC table.
1948 *
1949 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1950 */
1951
1952 static void __init setup_ioapic_ids_from_mpc(void)
1953 {
1954 union IO_APIC_reg_00 reg_00;
1955 physid_mask_t phys_id_present_map;
1956 int apic;
1957 int i;
1958 unsigned char old_id;
1959 unsigned long flags;
1960
1961 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
1962 return;
1963
1964 /*
1965 * Don't check I/O APIC IDs for xAPIC systems. They have
1966 * no meaning without the serial APIC bus.
1967 */
1968 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1969 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1970 return;
1971 /*
1972 * This is broken; anything with a real cpu count has to
1973 * circumvent this idiocy regardless.
1974 */
1975 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1976
1977 /*
1978 * Set the IOAPIC ID to the value stored in the MPC table.
1979 */
1980 for (apic = 0; apic < nr_ioapics; apic++) {
1981
1982 /* Read the register 0 value */
1983 spin_lock_irqsave(&ioapic_lock, flags);
1984 reg_00.raw = io_apic_read(apic, 0);
1985 spin_unlock_irqrestore(&ioapic_lock, flags);
1986
1987 old_id = mp_ioapics[apic].mp_apicid;
1988
1989 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
1990 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1991 apic, mp_ioapics[apic].mp_apicid);
1992 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1993 reg_00.bits.ID);
1994 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
1995 }
1996
1997 /*
1998 * Sanity check, is the ID really free? Every APIC in a
1999 * system must have a unique ID or we get lots of nice
2000 * 'stuck on smp_invalidate_needed IPI wait' messages.
2001 */
2002 if (check_apicid_used(phys_id_present_map,
2003 mp_ioapics[apic].mp_apicid)) {
2004 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2005 apic, mp_ioapics[apic].mp_apicid);
2006 for (i = 0; i < get_physical_broadcast(); i++)
2007 if (!physid_isset(i, phys_id_present_map))
2008 break;
2009 if (i >= get_physical_broadcast())
2010 panic("Max APIC ID exceeded!\n");
2011 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2012 i);
2013 physid_set(i, phys_id_present_map);
2014 mp_ioapics[apic].mp_apicid = i;
2015 } else {
2016 physid_mask_t tmp;
2017 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
2018 apic_printk(APIC_VERBOSE, "Setting %d in the "
2019 "phys_id_present_map\n",
2020 mp_ioapics[apic].mp_apicid);
2021 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2022 }
2023
2024
2025 /*
2026 * We need to adjust the IRQ routing table
2027 * if the ID changed.
2028 */
2029 if (old_id != mp_ioapics[apic].mp_apicid)
2030 for (i = 0; i < mp_irq_entries; i++)
2031 if (mp_irqs[i].mp_dstapic == old_id)
2032 mp_irqs[i].mp_dstapic
2033 = mp_ioapics[apic].mp_apicid;
2034
2035 /*
2036 * Read the right value from the MPC table and
2037 * write it into the ID register.
2038 */
2039 apic_printk(APIC_VERBOSE, KERN_INFO
2040 "...changing IO-APIC physical APIC ID to %d ...",
2041 mp_ioapics[apic].mp_apicid);
2042
2043 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
2044 spin_lock_irqsave(&ioapic_lock, flags);
2045
2046 /*
2047 * Sanity check
2048 */
2049 spin_lock_irqsave(&ioapic_lock, flags);
2050 reg_00.raw = io_apic_read(apic, 0);
2051 spin_unlock_irqrestore(&ioapic_lock, flags);
2052 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
2053 printk("could not set ID!\n");
2054 else
2055 apic_printk(APIC_VERBOSE, " ok.\n");
2056 }
2057 }
2058 #endif
2059
2060 int no_timer_check __initdata;
2061
2062 static int __init notimercheck(char *s)
2063 {
2064 no_timer_check = 1;
2065 return 1;
2066 }
2067 __setup("no_timer_check", notimercheck);
2068
2069 /*
2070 * There is a nasty bug in some older SMP boards, their mptable lies
2071 * about the timer IRQ. We do the following to work around the situation:
2072 *
2073 * - timer IRQ defaults to IO-APIC IRQ
2074 * - if this function detects that timer IRQs are defunct, then we fall
2075 * back to ISA timer IRQs
2076 */
2077 static int __init timer_irq_works(void)
2078 {
2079 unsigned long t1 = jiffies;
2080 unsigned long flags;
2081
2082 if (no_timer_check)
2083 return 1;
2084
2085 local_save_flags(flags);
2086 local_irq_enable();
2087 /* Let ten ticks pass... */
2088 mdelay((10 * 1000) / HZ);
2089 local_irq_restore(flags);
2090
2091 /*
2092 * Expect a few ticks at least, to be sure some possible
2093 * glue logic does not lock up after one or two first
2094 * ticks in a non-ExtINT mode. Also the local APIC
2095 * might have cached one ExtINT interrupt. Finally, at
2096 * least one tick may be lost due to delays.
2097 */
2098
2099 /* jiffies wrap? */
2100 if (time_after(jiffies, t1 + 4))
2101 return 1;
2102 return 0;
2103 }
2104
2105 /*
2106 * In the SMP+IOAPIC case it might happen that there are an unspecified
2107 * number of pending IRQ events unhandled. These cases are very rare,
2108 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2109 * better to do it this way as thus we do not have to be aware of
2110 * 'pending' interrupts in the IRQ path, except at this point.
2111 */
2112 /*
2113 * Edge triggered needs to resend any interrupt
2114 * that was delayed but this is now handled in the device
2115 * independent code.
2116 */
2117
2118 /*
2119 * Starting up a edge-triggered IO-APIC interrupt is
2120 * nasty - we need to make sure that we get the edge.
2121 * If it is already asserted for some reason, we need
2122 * return 1 to indicate that is was pending.
2123 *
2124 * This is not complete - we should be able to fake
2125 * an edge even if it isn't on the 8259A...
2126 */
2127
2128 static unsigned int startup_ioapic_irq(unsigned int irq)
2129 {
2130 int was_pending = 0;
2131 unsigned long flags;
2132
2133 spin_lock_irqsave(&ioapic_lock, flags);
2134 if (irq < 16) {
2135 disable_8259A_irq(irq);
2136 if (i8259A_irq_pending(irq))
2137 was_pending = 1;
2138 }
2139 __unmask_IO_APIC_irq(irq);
2140 spin_unlock_irqrestore(&ioapic_lock, flags);
2141
2142 return was_pending;
2143 }
2144
2145 #ifdef CONFIG_X86_64
2146 static int ioapic_retrigger_irq(unsigned int irq)
2147 {
2148
2149 struct irq_cfg *cfg = irq_cfg(irq);
2150 unsigned long flags;
2151
2152 spin_lock_irqsave(&vector_lock, flags);
2153 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
2154 spin_unlock_irqrestore(&vector_lock, flags);
2155
2156 return 1;
2157 }
2158 #else
2159 static int ioapic_retrigger_irq(unsigned int irq)
2160 {
2161 send_IPI_self(irq_cfg(irq)->vector);
2162
2163 return 1;
2164 }
2165 #endif
2166
2167 /*
2168 * Level and edge triggered IO-APIC interrupts need different handling,
2169 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2170 * handled with the level-triggered descriptor, but that one has slightly
2171 * more overhead. Level-triggered interrupts cannot be handled with the
2172 * edge-triggered handler, without risking IRQ storms and other ugly
2173 * races.
2174 */
2175
2176 #ifdef CONFIG_SMP
2177
2178 #ifdef CONFIG_INTR_REMAP
2179 static void ir_irq_migration(struct work_struct *work);
2180
2181 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2182
2183 /*
2184 * Migrate the IO-APIC irq in the presence of intr-remapping.
2185 *
2186 * For edge triggered, irq migration is a simple atomic update(of vector
2187 * and cpu destination) of IRTE and flush the hardware cache.
2188 *
2189 * For level triggered, we need to modify the io-apic RTE aswell with the update
2190 * vector information, along with modifying IRTE with vector and destination.
2191 * So irq migration for level triggered is little bit more complex compared to
2192 * edge triggered migration. But the good news is, we use the same algorithm
2193 * for level triggered migration as we have today, only difference being,
2194 * we now initiate the irq migration from process context instead of the
2195 * interrupt context.
2196 *
2197 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2198 * suppression) to the IO-APIC, level triggered irq migration will also be
2199 * as simple as edge triggered migration and we can do the irq migration
2200 * with a simple atomic update to IO-APIC RTE.
2201 */
2202 static void migrate_ioapic_irq(int irq, cpumask_t mask)
2203 {
2204 struct irq_cfg *cfg;
2205 struct irq_desc *desc;
2206 cpumask_t tmp, cleanup_mask;
2207 struct irte irte;
2208 int modify_ioapic_rte;
2209 unsigned int dest;
2210 unsigned long flags;
2211
2212 cpus_and(tmp, mask, cpu_online_map);
2213 if (cpus_empty(tmp))
2214 return;
2215
2216 if (get_irte(irq, &irte))
2217 return;
2218
2219 if (assign_irq_vector(irq, mask))
2220 return;
2221
2222 cfg = irq_cfg(irq);
2223 cpus_and(tmp, cfg->domain, mask);
2224 dest = cpu_mask_to_apicid(tmp);
2225
2226 desc = irq_to_desc(irq);
2227 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2228 if (modify_ioapic_rte) {
2229 spin_lock_irqsave(&ioapic_lock, flags);
2230 __target_IO_APIC_irq(irq, dest, cfg->vector);
2231 spin_unlock_irqrestore(&ioapic_lock, flags);
2232 }
2233
2234 irte.vector = cfg->vector;
2235 irte.dest_id = IRTE_DEST(dest);
2236
2237 /*
2238 * Modified the IRTE and flushes the Interrupt entry cache.
2239 */
2240 modify_irte(irq, &irte);
2241
2242 if (cfg->move_in_progress) {
2243 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2244 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2245 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2246 cfg->move_in_progress = 0;
2247 }
2248
2249 desc->affinity = mask;
2250 }
2251
2252 static int migrate_irq_remapped_level(int irq)
2253 {
2254 int ret = -1;
2255 struct irq_desc *desc = irq_to_desc(irq);
2256
2257 mask_IO_APIC_irq(irq);
2258
2259 if (io_apic_level_ack_pending(irq)) {
2260 /*
2261 * Interrupt in progress. Migrating irq now will change the
2262 * vector information in the IO-APIC RTE and that will confuse
2263 * the EOI broadcast performed by cpu.
2264 * So, delay the irq migration to the next instance.
2265 */
2266 schedule_delayed_work(&ir_migration_work, 1);
2267 goto unmask;
2268 }
2269
2270 /* everthing is clear. we have right of way */
2271 migrate_ioapic_irq(irq, desc->pending_mask);
2272
2273 ret = 0;
2274 desc->status &= ~IRQ_MOVE_PENDING;
2275 cpus_clear(desc->pending_mask);
2276
2277 unmask:
2278 unmask_IO_APIC_irq(irq);
2279 return ret;
2280 }
2281
2282 static void ir_irq_migration(struct work_struct *work)
2283 {
2284 unsigned int irq;
2285 struct irq_desc *desc;
2286
2287 for_each_irq_desc(irq, desc) {
2288 if (desc->status & IRQ_MOVE_PENDING) {
2289 unsigned long flags;
2290
2291 spin_lock_irqsave(&desc->lock, flags);
2292 if (!desc->chip->set_affinity ||
2293 !(desc->status & IRQ_MOVE_PENDING)) {
2294 desc->status &= ~IRQ_MOVE_PENDING;
2295 spin_unlock_irqrestore(&desc->lock, flags);
2296 continue;
2297 }
2298
2299 desc->chip->set_affinity(irq, desc->pending_mask);
2300 spin_unlock_irqrestore(&desc->lock, flags);
2301 }
2302 }
2303 }
2304
2305 /*
2306 * Migrates the IRQ destination in the process context.
2307 */
2308 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2309 {
2310 struct irq_desc *desc = irq_to_desc(irq);
2311
2312 if (desc->status & IRQ_LEVEL) {
2313 desc->status |= IRQ_MOVE_PENDING;
2314 desc->pending_mask = mask;
2315 migrate_irq_remapped_level(irq);
2316 return;
2317 }
2318
2319 migrate_ioapic_irq(irq, mask);
2320 }
2321 #endif
2322
2323 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2324 {
2325 unsigned vector, me;
2326 ack_APIC_irq();
2327 #ifdef CONFIG_X86_64
2328 exit_idle();
2329 #endif
2330 irq_enter();
2331
2332 me = smp_processor_id();
2333 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2334 unsigned int irq;
2335 struct irq_desc *desc;
2336 struct irq_cfg *cfg;
2337 irq = __get_cpu_var(vector_irq)[vector];
2338
2339 desc = irq_to_desc(irq);
2340 if (!desc)
2341 continue;
2342
2343 cfg = irq_cfg(irq);
2344 spin_lock(&desc->lock);
2345 if (!cfg->move_cleanup_count)
2346 goto unlock;
2347
2348 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
2349 goto unlock;
2350
2351 __get_cpu_var(vector_irq)[vector] = -1;
2352 cfg->move_cleanup_count--;
2353 unlock:
2354 spin_unlock(&desc->lock);
2355 }
2356
2357 irq_exit();
2358 }
2359
2360 static void irq_complete_move(unsigned int irq)
2361 {
2362 struct irq_cfg *cfg = irq_cfg(irq);
2363 unsigned vector, me;
2364
2365 if (likely(!cfg->move_in_progress))
2366 return;
2367
2368 vector = ~get_irq_regs()->orig_ax;
2369 me = smp_processor_id();
2370 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2371 cpumask_t cleanup_mask;
2372
2373 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2374 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2375 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2376 cfg->move_in_progress = 0;
2377 }
2378 }
2379 #else
2380 static inline void irq_complete_move(unsigned int irq) {}
2381 #endif
2382 #ifdef CONFIG_INTR_REMAP
2383 static void ack_x2apic_level(unsigned int irq)
2384 {
2385 ack_x2APIC_irq();
2386 }
2387
2388 static void ack_x2apic_edge(unsigned int irq)
2389 {
2390 ack_x2APIC_irq();
2391 }
2392 #endif
2393
2394 static void ack_apic_edge(unsigned int irq)
2395 {
2396 irq_complete_move(irq);
2397 move_native_irq(irq);
2398 ack_APIC_irq();
2399 }
2400
2401 #ifdef CONFIG_X86_64
2402 static void ack_apic_level(unsigned int irq)
2403 {
2404 int do_unmask_irq = 0;
2405
2406 irq_complete_move(irq);
2407 #ifdef CONFIG_GENERIC_PENDING_IRQ
2408 /* If we are moving the irq we need to mask it */
2409 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2410 do_unmask_irq = 1;
2411 mask_IO_APIC_irq(irq);
2412 }
2413 #endif
2414
2415 /*
2416 * We must acknowledge the irq before we move it or the acknowledge will
2417 * not propagate properly.
2418 */
2419 ack_APIC_irq();
2420
2421 /* Now we can move and renable the irq */
2422 if (unlikely(do_unmask_irq)) {
2423 /* Only migrate the irq if the ack has been received.
2424 *
2425 * On rare occasions the broadcast level triggered ack gets
2426 * delayed going to ioapics, and if we reprogram the
2427 * vector while Remote IRR is still set the irq will never
2428 * fire again.
2429 *
2430 * To prevent this scenario we read the Remote IRR bit
2431 * of the ioapic. This has two effects.
2432 * - On any sane system the read of the ioapic will
2433 * flush writes (and acks) going to the ioapic from
2434 * this cpu.
2435 * - We get to see if the ACK has actually been delivered.
2436 *
2437 * Based on failed experiments of reprogramming the
2438 * ioapic entry from outside of irq context starting
2439 * with masking the ioapic entry and then polling until
2440 * Remote IRR was clear before reprogramming the
2441 * ioapic I don't trust the Remote IRR bit to be
2442 * completey accurate.
2443 *
2444 * However there appears to be no other way to plug
2445 * this race, so if the Remote IRR bit is not
2446 * accurate and is causing problems then it is a hardware bug
2447 * and you can go talk to the chipset vendor about it.
2448 */
2449 if (!io_apic_level_ack_pending(irq))
2450 move_masked_irq(irq);
2451 unmask_IO_APIC_irq(irq);
2452 }
2453 }
2454 #else
2455 atomic_t irq_mis_count;
2456 static void ack_apic_level(unsigned int irq)
2457 {
2458 unsigned long v;
2459 int i;
2460
2461 irq_complete_move(irq);
2462 move_native_irq(irq);
2463 /*
2464 * It appears there is an erratum which affects at least version 0x11
2465 * of I/O APIC (that's the 82093AA and cores integrated into various
2466 * chipsets). Under certain conditions a level-triggered interrupt is
2467 * erroneously delivered as edge-triggered one but the respective IRR
2468 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2469 * message but it will never arrive and further interrupts are blocked
2470 * from the source. The exact reason is so far unknown, but the
2471 * phenomenon was observed when two consecutive interrupt requests
2472 * from a given source get delivered to the same CPU and the source is
2473 * temporarily disabled in between.
2474 *
2475 * A workaround is to simulate an EOI message manually. We achieve it
2476 * by setting the trigger mode to edge and then to level when the edge
2477 * trigger mode gets detected in the TMR of a local APIC for a
2478 * level-triggered interrupt. We mask the source for the time of the
2479 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2480 * The idea is from Manfred Spraul. --macro
2481 */
2482 i = irq_cfg(irq)->vector;
2483
2484 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2485
2486 ack_APIC_irq();
2487
2488 if (!(v & (1 << (i & 0x1f)))) {
2489 atomic_inc(&irq_mis_count);
2490 spin_lock(&ioapic_lock);
2491 __mask_and_edge_IO_APIC_irq(irq);
2492 __unmask_and_level_IO_APIC_irq(irq);
2493 spin_unlock(&ioapic_lock);
2494 }
2495 }
2496 #endif
2497
2498 static struct irq_chip ioapic_chip __read_mostly = {
2499 .name = "IO-APIC",
2500 .startup = startup_ioapic_irq,
2501 .mask = mask_IO_APIC_irq,
2502 .unmask = unmask_IO_APIC_irq,
2503 .ack = ack_apic_edge,
2504 .eoi = ack_apic_level,
2505 #ifdef CONFIG_SMP
2506 .set_affinity = set_ioapic_affinity_irq,
2507 #endif
2508 .retrigger = ioapic_retrigger_irq,
2509 };
2510
2511 #ifdef CONFIG_INTR_REMAP
2512 static struct irq_chip ir_ioapic_chip __read_mostly = {
2513 .name = "IR-IO-APIC",
2514 .startup = startup_ioapic_irq,
2515 .mask = mask_IO_APIC_irq,
2516 .unmask = unmask_IO_APIC_irq,
2517 .ack = ack_x2apic_edge,
2518 .eoi = ack_x2apic_level,
2519 #ifdef CONFIG_SMP
2520 .set_affinity = set_ir_ioapic_affinity_irq,
2521 #endif
2522 .retrigger = ioapic_retrigger_irq,
2523 };
2524 #endif
2525
2526 static inline void init_IO_APIC_traps(void)
2527 {
2528 int irq;
2529 struct irq_desc *desc;
2530 struct irq_cfg *cfg;
2531
2532 /*
2533 * NOTE! The local APIC isn't very good at handling
2534 * multiple interrupts at the same interrupt level.
2535 * As the interrupt level is determined by taking the
2536 * vector number and shifting that right by 4, we
2537 * want to spread these out a bit so that they don't
2538 * all fall in the same interrupt level.
2539 *
2540 * Also, we've got to be careful not to trash gate
2541 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2542 */
2543 for_each_irq_cfg(cfg) {
2544 irq = cfg->irq;
2545 if (IO_APIC_IRQ(irq) && !cfg->vector) {
2546 /*
2547 * Hmm.. We don't have an entry for this,
2548 * so default to an old-fashioned 8259
2549 * interrupt if we can..
2550 */
2551 if (irq < 16)
2552 make_8259A_irq(irq);
2553 else {
2554 desc = irq_to_desc(irq);
2555 /* Strange. Oh, well.. */
2556 desc->chip = &no_irq_chip;
2557 }
2558 }
2559 }
2560 }
2561
2562 /*
2563 * The local APIC irq-chip implementation:
2564 */
2565
2566 static void mask_lapic_irq(unsigned int irq)
2567 {
2568 unsigned long v;
2569
2570 v = apic_read(APIC_LVT0);
2571 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2572 }
2573
2574 static void unmask_lapic_irq(unsigned int irq)
2575 {
2576 unsigned long v;
2577
2578 v = apic_read(APIC_LVT0);
2579 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2580 }
2581
2582 static void ack_lapic_irq (unsigned int irq)
2583 {
2584 ack_APIC_irq();
2585 }
2586
2587 static struct irq_chip lapic_chip __read_mostly = {
2588 .name = "local-APIC",
2589 .mask = mask_lapic_irq,
2590 .unmask = unmask_lapic_irq,
2591 .ack = ack_lapic_irq,
2592 };
2593
2594 static void lapic_register_intr(int irq)
2595 {
2596 struct irq_desc *desc;
2597
2598 desc = irq_to_desc(irq);
2599 desc->status &= ~IRQ_LEVEL;
2600 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2601 "edge");
2602 }
2603
2604 static void __init setup_nmi(void)
2605 {
2606 /*
2607 * Dirty trick to enable the NMI watchdog ...
2608 * We put the 8259A master into AEOI mode and
2609 * unmask on all local APICs LVT0 as NMI.
2610 *
2611 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2612 * is from Maciej W. Rozycki - so we do not have to EOI from
2613 * the NMI handler or the timer interrupt.
2614 */
2615 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2616
2617 enable_NMI_through_LVT0();
2618
2619 apic_printk(APIC_VERBOSE, " done.\n");
2620 }
2621
2622 /*
2623 * This looks a bit hackish but it's about the only one way of sending
2624 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2625 * not support the ExtINT mode, unfortunately. We need to send these
2626 * cycles as some i82489DX-based boards have glue logic that keeps the
2627 * 8259A interrupt line asserted until INTA. --macro
2628 */
2629 static inline void __init unlock_ExtINT_logic(void)
2630 {
2631 int apic, pin, i;
2632 struct IO_APIC_route_entry entry0, entry1;
2633 unsigned char save_control, save_freq_select;
2634
2635 pin = find_isa_irq_pin(8, mp_INT);
2636 if (pin == -1) {
2637 WARN_ON_ONCE(1);
2638 return;
2639 }
2640 apic = find_isa_irq_apic(8, mp_INT);
2641 if (apic == -1) {
2642 WARN_ON_ONCE(1);
2643 return;
2644 }
2645
2646 entry0 = ioapic_read_entry(apic, pin);
2647 clear_IO_APIC_pin(apic, pin);
2648
2649 memset(&entry1, 0, sizeof(entry1));
2650
2651 entry1.dest_mode = 0; /* physical delivery */
2652 entry1.mask = 0; /* unmask IRQ now */
2653 entry1.dest = hard_smp_processor_id();
2654 entry1.delivery_mode = dest_ExtINT;
2655 entry1.polarity = entry0.polarity;
2656 entry1.trigger = 0;
2657 entry1.vector = 0;
2658
2659 ioapic_write_entry(apic, pin, entry1);
2660
2661 save_control = CMOS_READ(RTC_CONTROL);
2662 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2663 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2664 RTC_FREQ_SELECT);
2665 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2666
2667 i = 100;
2668 while (i-- > 0) {
2669 mdelay(10);
2670 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2671 i -= 10;
2672 }
2673
2674 CMOS_WRITE(save_control, RTC_CONTROL);
2675 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2676 clear_IO_APIC_pin(apic, pin);
2677
2678 ioapic_write_entry(apic, pin, entry0);
2679 }
2680
2681 static int disable_timer_pin_1 __initdata;
2682 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2683 static int __init disable_timer_pin_setup(char *arg)
2684 {
2685 disable_timer_pin_1 = 1;
2686 return 0;
2687 }
2688 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2689
2690 int timer_through_8259 __initdata;
2691
2692 /*
2693 * This code may look a bit paranoid, but it's supposed to cooperate with
2694 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2695 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2696 * fanatically on his truly buggy board.
2697 *
2698 * FIXME: really need to revamp this for all platforms.
2699 */
2700 static inline void __init check_timer(void)
2701 {
2702 struct irq_cfg *cfg = irq_cfg(0);
2703 int apic1, pin1, apic2, pin2;
2704 unsigned long flags;
2705 unsigned int ver;
2706 int no_pin1 = 0;
2707
2708 local_irq_save(flags);
2709
2710 ver = apic_read(APIC_LVR);
2711 ver = GET_APIC_VERSION(ver);
2712
2713 /*
2714 * get/set the timer IRQ vector:
2715 */
2716 disable_8259A_irq(0);
2717 assign_irq_vector(0, TARGET_CPUS);
2718
2719 /*
2720 * As IRQ0 is to be enabled in the 8259A, the virtual
2721 * wire has to be disabled in the local APIC. Also
2722 * timer interrupts need to be acknowledged manually in
2723 * the 8259A for the i82489DX when using the NMI
2724 * watchdog as that APIC treats NMIs as level-triggered.
2725 * The AEOI mode will finish them in the 8259A
2726 * automatically.
2727 */
2728 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2729 init_8259A(1);
2730 #ifdef CONFIG_X86_32
2731 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2732 #endif
2733
2734 pin1 = find_isa_irq_pin(0, mp_INT);
2735 apic1 = find_isa_irq_apic(0, mp_INT);
2736 pin2 = ioapic_i8259.pin;
2737 apic2 = ioapic_i8259.apic;
2738
2739 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2740 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2741 cfg->vector, apic1, pin1, apic2, pin2);
2742
2743 /*
2744 * Some BIOS writers are clueless and report the ExtINTA
2745 * I/O APIC input from the cascaded 8259A as the timer
2746 * interrupt input. So just in case, if only one pin
2747 * was found above, try it both directly and through the
2748 * 8259A.
2749 */
2750 if (pin1 == -1) {
2751 #ifdef CONFIG_INTR_REMAP
2752 if (intr_remapping_enabled)
2753 panic("BIOS bug: timer not connected to IO-APIC");
2754 #endif
2755 pin1 = pin2;
2756 apic1 = apic2;
2757 no_pin1 = 1;
2758 } else if (pin2 == -1) {
2759 pin2 = pin1;
2760 apic2 = apic1;
2761 }
2762
2763 if (pin1 != -1) {
2764 /*
2765 * Ok, does IRQ0 through the IOAPIC work?
2766 */
2767 if (no_pin1) {
2768 add_pin_to_irq(0, apic1, pin1);
2769 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2770 }
2771 unmask_IO_APIC_irq(0);
2772 if (timer_irq_works()) {
2773 if (nmi_watchdog == NMI_IO_APIC) {
2774 setup_nmi();
2775 enable_8259A_irq(0);
2776 }
2777 if (disable_timer_pin_1 > 0)
2778 clear_IO_APIC_pin(0, pin1);
2779 goto out;
2780 }
2781 #ifdef CONFIG_INTR_REMAP
2782 if (intr_remapping_enabled)
2783 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2784 #endif
2785 clear_IO_APIC_pin(apic1, pin1);
2786 if (!no_pin1)
2787 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2788 "8254 timer not connected to IO-APIC\n");
2789
2790 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2791 "(IRQ0) through the 8259A ...\n");
2792 apic_printk(APIC_QUIET, KERN_INFO
2793 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2794 /*
2795 * legacy devices should be connected to IO APIC #0
2796 */
2797 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2798 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2799 unmask_IO_APIC_irq(0);
2800 enable_8259A_irq(0);
2801 if (timer_irq_works()) {
2802 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2803 timer_through_8259 = 1;
2804 if (nmi_watchdog == NMI_IO_APIC) {
2805 disable_8259A_irq(0);
2806 setup_nmi();
2807 enable_8259A_irq(0);
2808 }
2809 goto out;
2810 }
2811 /*
2812 * Cleanup, just in case ...
2813 */
2814 disable_8259A_irq(0);
2815 clear_IO_APIC_pin(apic2, pin2);
2816 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2817 }
2818
2819 if (nmi_watchdog == NMI_IO_APIC) {
2820 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2821 "through the IO-APIC - disabling NMI Watchdog!\n");
2822 nmi_watchdog = NMI_NONE;
2823 }
2824 #ifdef CONFIG_X86_32
2825 timer_ack = 0;
2826 #endif
2827
2828 apic_printk(APIC_QUIET, KERN_INFO
2829 "...trying to set up timer as Virtual Wire IRQ...\n");
2830
2831 lapic_register_intr(0);
2832 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2833 enable_8259A_irq(0);
2834
2835 if (timer_irq_works()) {
2836 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2837 goto out;
2838 }
2839 disable_8259A_irq(0);
2840 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2841 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2842
2843 apic_printk(APIC_QUIET, KERN_INFO
2844 "...trying to set up timer as ExtINT IRQ...\n");
2845
2846 init_8259A(0);
2847 make_8259A_irq(0);
2848 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2849
2850 unlock_ExtINT_logic();
2851
2852 if (timer_irq_works()) {
2853 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2854 goto out;
2855 }
2856 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2857 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2858 "report. Then try booting with the 'noapic' option.\n");
2859 out:
2860 local_irq_restore(flags);
2861 }
2862
2863 /*
2864 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2865 * to devices. However there may be an I/O APIC pin available for
2866 * this interrupt regardless. The pin may be left unconnected, but
2867 * typically it will be reused as an ExtINT cascade interrupt for
2868 * the master 8259A. In the MPS case such a pin will normally be
2869 * reported as an ExtINT interrupt in the MP table. With ACPI
2870 * there is no provision for ExtINT interrupts, and in the absence
2871 * of an override it would be treated as an ordinary ISA I/O APIC
2872 * interrupt, that is edge-triggered and unmasked by default. We
2873 * used to do this, but it caused problems on some systems because
2874 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2875 * the same ExtINT cascade interrupt to drive the local APIC of the
2876 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2877 * the I/O APIC in all cases now. No actual device should request
2878 * it anyway. --macro
2879 */
2880 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2881
2882 void __init setup_IO_APIC(void)
2883 {
2884
2885 #ifdef CONFIG_X86_32
2886 enable_IO_APIC();
2887 #else
2888 /*
2889 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2890 */
2891 #endif
2892
2893 io_apic_irqs = ~PIC_IRQS;
2894
2895 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2896 /*
2897 * Set up IO-APIC IRQ routing.
2898 */
2899 #ifdef CONFIG_X86_32
2900 if (!acpi_ioapic)
2901 setup_ioapic_ids_from_mpc();
2902 #endif
2903 sync_Arb_IDs();
2904 setup_IO_APIC_irqs();
2905 init_IO_APIC_traps();
2906 check_timer();
2907 }
2908
2909 /*
2910 * Called after all the initialization is done. If we didnt find any
2911 * APIC bugs then we can allow the modify fast path
2912 */
2913
2914 static int __init io_apic_bug_finalize(void)
2915 {
2916 if (sis_apic_bug == -1)
2917 sis_apic_bug = 0;
2918 return 0;
2919 }
2920
2921 late_initcall(io_apic_bug_finalize);
2922
2923 struct sysfs_ioapic_data {
2924 struct sys_device dev;
2925 struct IO_APIC_route_entry entry[0];
2926 };
2927 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2928
2929 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2930 {
2931 struct IO_APIC_route_entry *entry;
2932 struct sysfs_ioapic_data *data;
2933 int i;
2934
2935 data = container_of(dev, struct sysfs_ioapic_data, dev);
2936 entry = data->entry;
2937 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2938 *entry = ioapic_read_entry(dev->id, i);
2939
2940 return 0;
2941 }
2942
2943 static int ioapic_resume(struct sys_device *dev)
2944 {
2945 struct IO_APIC_route_entry *entry;
2946 struct sysfs_ioapic_data *data;
2947 unsigned long flags;
2948 union IO_APIC_reg_00 reg_00;
2949 int i;
2950
2951 data = container_of(dev, struct sysfs_ioapic_data, dev);
2952 entry = data->entry;
2953
2954 spin_lock_irqsave(&ioapic_lock, flags);
2955 reg_00.raw = io_apic_read(dev->id, 0);
2956 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2957 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2958 io_apic_write(dev->id, 0, reg_00.raw);
2959 }
2960 spin_unlock_irqrestore(&ioapic_lock, flags);
2961 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2962 ioapic_write_entry(dev->id, i, entry[i]);
2963
2964 return 0;
2965 }
2966
2967 static struct sysdev_class ioapic_sysdev_class = {
2968 .name = "ioapic",
2969 .suspend = ioapic_suspend,
2970 .resume = ioapic_resume,
2971 };
2972
2973 static int __init ioapic_init_sysfs(void)
2974 {
2975 struct sys_device * dev;
2976 int i, size, error;
2977
2978 error = sysdev_class_register(&ioapic_sysdev_class);
2979 if (error)
2980 return error;
2981
2982 for (i = 0; i < nr_ioapics; i++ ) {
2983 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2984 * sizeof(struct IO_APIC_route_entry);
2985 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2986 if (!mp_ioapic_data[i]) {
2987 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2988 continue;
2989 }
2990 dev = &mp_ioapic_data[i]->dev;
2991 dev->id = i;
2992 dev->cls = &ioapic_sysdev_class;
2993 error = sysdev_register(dev);
2994 if (error) {
2995 kfree(mp_ioapic_data[i]);
2996 mp_ioapic_data[i] = NULL;
2997 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2998 continue;
2999 }
3000 }
3001
3002 return 0;
3003 }
3004
3005 device_initcall(ioapic_init_sysfs);
3006
3007 /*
3008 * Dynamic irq allocate and deallocation
3009 */
3010 unsigned int create_irq_nr(unsigned int irq_want)
3011 {
3012 /* Allocate an unused irq */
3013 unsigned int irq;
3014 unsigned int new;
3015 unsigned long flags;
3016 struct irq_cfg *cfg_new;
3017
3018 #ifndef CONFIG_HAVE_SPARSE_IRQ
3019 irq_want = nr_irqs - 1;
3020 #endif
3021
3022 irq = 0;
3023 spin_lock_irqsave(&vector_lock, flags);
3024 for (new = irq_want; new > 0; new--) {
3025 if (platform_legacy_irq(new))
3026 continue;
3027 cfg_new = irq_cfg(new);
3028 if (cfg_new && cfg_new->vector != 0)
3029 continue;
3030 /* check if need to create one */
3031 if (!cfg_new)
3032 cfg_new = irq_cfg_alloc(new);
3033 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
3034 irq = new;
3035 break;
3036 }
3037 spin_unlock_irqrestore(&vector_lock, flags);
3038
3039 if (irq > 0) {
3040 dynamic_irq_init(irq);
3041 }
3042 return irq;
3043 }
3044
3045 int create_irq(void)
3046 {
3047 int irq;
3048
3049 irq = create_irq_nr(nr_irqs - 1);
3050
3051 if (irq == 0)
3052 irq = -1;
3053
3054 return irq;
3055 }
3056
3057 void destroy_irq(unsigned int irq)
3058 {
3059 unsigned long flags;
3060
3061 dynamic_irq_cleanup(irq);
3062
3063 #ifdef CONFIG_INTR_REMAP
3064 free_irte(irq);
3065 #endif
3066 spin_lock_irqsave(&vector_lock, flags);
3067 __clear_irq_vector(irq);
3068 spin_unlock_irqrestore(&vector_lock, flags);
3069 }
3070
3071 /*
3072 * MSI message composition
3073 */
3074 #ifdef CONFIG_PCI_MSI
3075 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
3076 {
3077 struct irq_cfg *cfg;
3078 int err;
3079 unsigned dest;
3080 cpumask_t tmp;
3081
3082 tmp = TARGET_CPUS;
3083 err = assign_irq_vector(irq, tmp);
3084 if (err)
3085 return err;
3086
3087 cfg = irq_cfg(irq);
3088 cpus_and(tmp, cfg->domain, tmp);
3089 dest = cpu_mask_to_apicid(tmp);
3090
3091 #ifdef CONFIG_INTR_REMAP
3092 if (irq_remapped(irq)) {
3093 struct irte irte;
3094 int ir_index;
3095 u16 sub_handle;
3096
3097 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3098 BUG_ON(ir_index == -1);
3099
3100 memset (&irte, 0, sizeof(irte));
3101
3102 irte.present = 1;
3103 irte.dst_mode = INT_DEST_MODE;
3104 irte.trigger_mode = 0; /* edge */
3105 irte.dlvry_mode = INT_DELIVERY_MODE;
3106 irte.vector = cfg->vector;
3107 irte.dest_id = IRTE_DEST(dest);
3108
3109 modify_irte(irq, &irte);
3110
3111 msg->address_hi = MSI_ADDR_BASE_HI;
3112 msg->data = sub_handle;
3113 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3114 MSI_ADDR_IR_SHV |
3115 MSI_ADDR_IR_INDEX1(ir_index) |
3116 MSI_ADDR_IR_INDEX2(ir_index);
3117 } else
3118 #endif
3119 {
3120 msg->address_hi = MSI_ADDR_BASE_HI;
3121 msg->address_lo =
3122 MSI_ADDR_BASE_LO |
3123 ((INT_DEST_MODE == 0) ?
3124 MSI_ADDR_DEST_MODE_PHYSICAL:
3125 MSI_ADDR_DEST_MODE_LOGICAL) |
3126 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3127 MSI_ADDR_REDIRECTION_CPU:
3128 MSI_ADDR_REDIRECTION_LOWPRI) |
3129 MSI_ADDR_DEST_ID(dest);
3130
3131 msg->data =
3132 MSI_DATA_TRIGGER_EDGE |
3133 MSI_DATA_LEVEL_ASSERT |
3134 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3135 MSI_DATA_DELIVERY_FIXED:
3136 MSI_DATA_DELIVERY_LOWPRI) |
3137 MSI_DATA_VECTOR(cfg->vector);
3138 }
3139 return err;
3140 }
3141
3142 #ifdef CONFIG_SMP
3143 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3144 {
3145 struct irq_cfg *cfg;
3146 struct msi_msg msg;
3147 unsigned int dest;
3148 cpumask_t tmp;
3149 struct irq_desc *desc;
3150
3151 cpus_and(tmp, mask, cpu_online_map);
3152 if (cpus_empty(tmp))
3153 return;
3154
3155 if (assign_irq_vector(irq, mask))
3156 return;
3157
3158 cfg = irq_cfg(irq);
3159 cpus_and(tmp, cfg->domain, mask);
3160 dest = cpu_mask_to_apicid(tmp);
3161
3162 read_msi_msg(irq, &msg);
3163
3164 msg.data &= ~MSI_DATA_VECTOR_MASK;
3165 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3166 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3167 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3168
3169 write_msi_msg(irq, &msg);
3170 desc = irq_to_desc(irq);
3171 desc->affinity = mask;
3172 }
3173
3174 #ifdef CONFIG_INTR_REMAP
3175 /*
3176 * Migrate the MSI irq to another cpumask. This migration is
3177 * done in the process context using interrupt-remapping hardware.
3178 */
3179 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3180 {
3181 struct irq_cfg *cfg;
3182 unsigned int dest;
3183 cpumask_t tmp, cleanup_mask;
3184 struct irte irte;
3185 struct irq_desc *desc;
3186
3187 cpus_and(tmp, mask, cpu_online_map);
3188 if (cpus_empty(tmp))
3189 return;
3190
3191 if (get_irte(irq, &irte))
3192 return;
3193
3194 if (assign_irq_vector(irq, mask))
3195 return;
3196
3197 cfg = irq_cfg(irq);
3198 cpus_and(tmp, cfg->domain, mask);
3199 dest = cpu_mask_to_apicid(tmp);
3200
3201 irte.vector = cfg->vector;
3202 irte.dest_id = IRTE_DEST(dest);
3203
3204 /*
3205 * atomically update the IRTE with the new destination and vector.
3206 */
3207 modify_irte(irq, &irte);
3208
3209 /*
3210 * After this point, all the interrupts will start arriving
3211 * at the new destination. So, time to cleanup the previous
3212 * vector allocation.
3213 */
3214 if (cfg->move_in_progress) {
3215 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3216 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3217 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3218 cfg->move_in_progress = 0;
3219 }
3220
3221 desc = irq_to_desc(irq);
3222 desc->affinity = mask;
3223 }
3224 #endif
3225 #endif /* CONFIG_SMP */
3226
3227 /*
3228 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3229 * which implement the MSI or MSI-X Capability Structure.
3230 */
3231 static struct irq_chip msi_chip = {
3232 .name = "PCI-MSI",
3233 .unmask = unmask_msi_irq,
3234 .mask = mask_msi_irq,
3235 .ack = ack_apic_edge,
3236 #ifdef CONFIG_SMP
3237 .set_affinity = set_msi_irq_affinity,
3238 #endif
3239 .retrigger = ioapic_retrigger_irq,
3240 };
3241
3242 #ifdef CONFIG_INTR_REMAP
3243 static struct irq_chip msi_ir_chip = {
3244 .name = "IR-PCI-MSI",
3245 .unmask = unmask_msi_irq,
3246 .mask = mask_msi_irq,
3247 .ack = ack_x2apic_edge,
3248 #ifdef CONFIG_SMP
3249 .set_affinity = ir_set_msi_irq_affinity,
3250 #endif
3251 .retrigger = ioapic_retrigger_irq,
3252 };
3253
3254 /*
3255 * Map the PCI dev to the corresponding remapping hardware unit
3256 * and allocate 'nvec' consecutive interrupt-remapping table entries
3257 * in it.
3258 */
3259 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3260 {
3261 struct intel_iommu *iommu;
3262 int index;
3263
3264 iommu = map_dev_to_ir(dev);
3265 if (!iommu) {
3266 printk(KERN_ERR
3267 "Unable to map PCI %s to iommu\n", pci_name(dev));
3268 return -ENOENT;
3269 }
3270
3271 index = alloc_irte(iommu, irq, nvec);
3272 if (index < 0) {
3273 printk(KERN_ERR
3274 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3275 pci_name(dev));
3276 return -ENOSPC;
3277 }
3278 return index;
3279 }
3280 #endif
3281
3282 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3283 {
3284 int ret;
3285 struct msi_msg msg;
3286
3287 ret = msi_compose_msg(dev, irq, &msg);
3288 if (ret < 0)
3289 return ret;
3290
3291 set_irq_msi(irq, desc);
3292 write_msi_msg(irq, &msg);
3293
3294 #ifdef CONFIG_INTR_REMAP
3295 if (irq_remapped(irq)) {
3296 struct irq_desc *desc = irq_to_desc(irq);
3297 /*
3298 * irq migration in process context
3299 */
3300 desc->status |= IRQ_MOVE_PCNTXT;
3301 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3302 } else
3303 #endif
3304 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3305
3306 return 0;
3307 }
3308
3309 static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
3310 {
3311 unsigned int irq;
3312
3313 irq = dev->bus->number;
3314 irq <<= 8;
3315 irq |= dev->devfn;
3316 irq <<= 12;
3317
3318 return irq;
3319 }
3320
3321 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3322 {
3323 unsigned int irq;
3324 int ret;
3325 unsigned int irq_want;
3326
3327 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3328
3329 irq = create_irq_nr(irq_want);
3330 if (irq == 0)
3331 return -1;
3332
3333 #ifdef CONFIG_INTR_REMAP
3334 if (!intr_remapping_enabled)
3335 goto no_ir;
3336
3337 ret = msi_alloc_irte(dev, irq, 1);
3338 if (ret < 0)
3339 goto error;
3340 no_ir:
3341 #endif
3342 ret = setup_msi_irq(dev, desc, irq);
3343 if (ret < 0) {
3344 destroy_irq(irq);
3345 return ret;
3346 }
3347 return 0;
3348
3349 #ifdef CONFIG_INTR_REMAP
3350 error:
3351 destroy_irq(irq);
3352 return ret;
3353 #endif
3354 }
3355
3356 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3357 {
3358 unsigned int irq;
3359 int ret, sub_handle;
3360 struct msi_desc *desc;
3361 unsigned int irq_want;
3362
3363 #ifdef CONFIG_INTR_REMAP
3364 struct intel_iommu *iommu = 0;
3365 int index = 0;
3366 #endif
3367
3368 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3369 sub_handle = 0;
3370 list_for_each_entry(desc, &dev->msi_list, list) {
3371 irq = create_irq_nr(irq_want--);
3372 if (irq == 0)
3373 return -1;
3374 #ifdef CONFIG_INTR_REMAP
3375 if (!intr_remapping_enabled)
3376 goto no_ir;
3377
3378 if (!sub_handle) {
3379 /*
3380 * allocate the consecutive block of IRTE's
3381 * for 'nvec'
3382 */
3383 index = msi_alloc_irte(dev, irq, nvec);
3384 if (index < 0) {
3385 ret = index;
3386 goto error;
3387 }
3388 } else {
3389 iommu = map_dev_to_ir(dev);
3390 if (!iommu) {
3391 ret = -ENOENT;
3392 goto error;
3393 }
3394 /*
3395 * setup the mapping between the irq and the IRTE
3396 * base index, the sub_handle pointing to the
3397 * appropriate interrupt remap table entry.
3398 */
3399 set_irte_irq(irq, iommu, index, sub_handle);
3400 }
3401 no_ir:
3402 #endif
3403 ret = setup_msi_irq(dev, desc, irq);
3404 if (ret < 0)
3405 goto error;
3406 sub_handle++;
3407 }
3408 return 0;
3409
3410 error:
3411 destroy_irq(irq);
3412 return ret;
3413 }
3414
3415 void arch_teardown_msi_irq(unsigned int irq)
3416 {
3417 destroy_irq(irq);
3418 }
3419
3420 #ifdef CONFIG_DMAR
3421 #ifdef CONFIG_SMP
3422 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3423 {
3424 struct irq_cfg *cfg;
3425 struct msi_msg msg;
3426 unsigned int dest;
3427 cpumask_t tmp;
3428 struct irq_desc *desc;
3429
3430 cpus_and(tmp, mask, cpu_online_map);
3431 if (cpus_empty(tmp))
3432 return;
3433
3434 if (assign_irq_vector(irq, mask))
3435 return;
3436
3437 cfg = irq_cfg(irq);
3438 cpus_and(tmp, cfg->domain, mask);
3439 dest = cpu_mask_to_apicid(tmp);
3440
3441 dmar_msi_read(irq, &msg);
3442
3443 msg.data &= ~MSI_DATA_VECTOR_MASK;
3444 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3445 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3446 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3447
3448 dmar_msi_write(irq, &msg);
3449 desc = irq_to_desc(irq);
3450 desc->affinity = mask;
3451 }
3452 #endif /* CONFIG_SMP */
3453
3454 struct irq_chip dmar_msi_type = {
3455 .name = "DMAR_MSI",
3456 .unmask = dmar_msi_unmask,
3457 .mask = dmar_msi_mask,
3458 .ack = ack_apic_edge,
3459 #ifdef CONFIG_SMP
3460 .set_affinity = dmar_msi_set_affinity,
3461 #endif
3462 .retrigger = ioapic_retrigger_irq,
3463 };
3464
3465 int arch_setup_dmar_msi(unsigned int irq)
3466 {
3467 int ret;
3468 struct msi_msg msg;
3469
3470 ret = msi_compose_msg(NULL, irq, &msg);
3471 if (ret < 0)
3472 return ret;
3473 dmar_msi_write(irq, &msg);
3474 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3475 "edge");
3476 return 0;
3477 }
3478 #endif
3479
3480 #endif /* CONFIG_PCI_MSI */
3481 /*
3482 * Hypertransport interrupt support
3483 */
3484 #ifdef CONFIG_HT_IRQ
3485
3486 #ifdef CONFIG_SMP
3487
3488 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3489 {
3490 struct ht_irq_msg msg;
3491 fetch_ht_irq_msg(irq, &msg);
3492
3493 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3494 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3495
3496 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3497 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3498
3499 write_ht_irq_msg(irq, &msg);
3500 }
3501
3502 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3503 {
3504 struct irq_cfg *cfg;
3505 unsigned int dest;
3506 cpumask_t tmp;
3507 struct irq_desc *desc;
3508
3509 cpus_and(tmp, mask, cpu_online_map);
3510 if (cpus_empty(tmp))
3511 return;
3512
3513 if (assign_irq_vector(irq, mask))
3514 return;
3515
3516 cfg = irq_cfg(irq);
3517 cpus_and(tmp, cfg->domain, mask);
3518 dest = cpu_mask_to_apicid(tmp);
3519
3520 target_ht_irq(irq, dest, cfg->vector);
3521 desc = irq_to_desc(irq);
3522 desc->affinity = mask;
3523 }
3524 #endif
3525
3526 static struct irq_chip ht_irq_chip = {
3527 .name = "PCI-HT",
3528 .mask = mask_ht_irq,
3529 .unmask = unmask_ht_irq,
3530 .ack = ack_apic_edge,
3531 #ifdef CONFIG_SMP
3532 .set_affinity = set_ht_irq_affinity,
3533 #endif
3534 .retrigger = ioapic_retrigger_irq,
3535 };
3536
3537 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3538 {
3539 struct irq_cfg *cfg;
3540 int err;
3541 cpumask_t tmp;
3542
3543 tmp = TARGET_CPUS;
3544 err = assign_irq_vector(irq, tmp);
3545 if (!err) {
3546 struct ht_irq_msg msg;
3547 unsigned dest;
3548
3549 cfg = irq_cfg(irq);
3550 cpus_and(tmp, cfg->domain, tmp);
3551 dest = cpu_mask_to_apicid(tmp);
3552
3553 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3554
3555 msg.address_lo =
3556 HT_IRQ_LOW_BASE |
3557 HT_IRQ_LOW_DEST_ID(dest) |
3558 HT_IRQ_LOW_VECTOR(cfg->vector) |
3559 ((INT_DEST_MODE == 0) ?
3560 HT_IRQ_LOW_DM_PHYSICAL :
3561 HT_IRQ_LOW_DM_LOGICAL) |
3562 HT_IRQ_LOW_RQEOI_EDGE |
3563 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3564 HT_IRQ_LOW_MT_FIXED :
3565 HT_IRQ_LOW_MT_ARBITRATED) |
3566 HT_IRQ_LOW_IRQ_MASKED;
3567
3568 write_ht_irq_msg(irq, &msg);
3569
3570 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3571 handle_edge_irq, "edge");
3572 }
3573 return err;
3574 }
3575 #endif /* CONFIG_HT_IRQ */
3576
3577 /* --------------------------------------------------------------------------
3578 ACPI-based IOAPIC Configuration
3579 -------------------------------------------------------------------------- */
3580
3581 #ifdef CONFIG_ACPI
3582
3583 #ifdef CONFIG_X86_32
3584 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3585 {
3586 union IO_APIC_reg_00 reg_00;
3587 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3588 physid_mask_t tmp;
3589 unsigned long flags;
3590 int i = 0;
3591
3592 /*
3593 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3594 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3595 * supports up to 16 on one shared APIC bus.
3596 *
3597 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3598 * advantage of new APIC bus architecture.
3599 */
3600
3601 if (physids_empty(apic_id_map))
3602 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
3603
3604 spin_lock_irqsave(&ioapic_lock, flags);
3605 reg_00.raw = io_apic_read(ioapic, 0);
3606 spin_unlock_irqrestore(&ioapic_lock, flags);
3607
3608 if (apic_id >= get_physical_broadcast()) {
3609 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3610 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3611 apic_id = reg_00.bits.ID;
3612 }
3613
3614 /*
3615 * Every APIC in a system must have a unique ID or we get lots of nice
3616 * 'stuck on smp_invalidate_needed IPI wait' messages.
3617 */
3618 if (check_apicid_used(apic_id_map, apic_id)) {
3619
3620 for (i = 0; i < get_physical_broadcast(); i++) {
3621 if (!check_apicid_used(apic_id_map, i))
3622 break;
3623 }
3624
3625 if (i == get_physical_broadcast())
3626 panic("Max apic_id exceeded!\n");
3627
3628 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3629 "trying %d\n", ioapic, apic_id, i);
3630
3631 apic_id = i;
3632 }
3633
3634 tmp = apicid_to_cpu_present(apic_id);
3635 physids_or(apic_id_map, apic_id_map, tmp);
3636
3637 if (reg_00.bits.ID != apic_id) {
3638 reg_00.bits.ID = apic_id;
3639
3640 spin_lock_irqsave(&ioapic_lock, flags);
3641 io_apic_write(ioapic, 0, reg_00.raw);
3642 reg_00.raw = io_apic_read(ioapic, 0);
3643 spin_unlock_irqrestore(&ioapic_lock, flags);
3644
3645 /* Sanity check */
3646 if (reg_00.bits.ID != apic_id) {
3647 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3648 return -1;
3649 }
3650 }
3651
3652 apic_printk(APIC_VERBOSE, KERN_INFO
3653 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3654
3655 return apic_id;
3656 }
3657
3658 int __init io_apic_get_version(int ioapic)
3659 {
3660 union IO_APIC_reg_01 reg_01;
3661 unsigned long flags;
3662
3663 spin_lock_irqsave(&ioapic_lock, flags);
3664 reg_01.raw = io_apic_read(ioapic, 1);
3665 spin_unlock_irqrestore(&ioapic_lock, flags);
3666
3667 return reg_01.bits.version;
3668 }
3669 #endif
3670
3671 int __init io_apic_get_redir_entries (int ioapic)
3672 {
3673 union IO_APIC_reg_01 reg_01;
3674 unsigned long flags;
3675
3676 spin_lock_irqsave(&ioapic_lock, flags);
3677 reg_01.raw = io_apic_read(ioapic, 1);
3678 spin_unlock_irqrestore(&ioapic_lock, flags);
3679
3680 return reg_01.bits.entries;
3681 }
3682
3683
3684 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3685 {
3686 if (!IO_APIC_IRQ(irq)) {
3687 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3688 ioapic);
3689 return -EINVAL;
3690 }
3691
3692 /*
3693 * IRQs < 16 are already in the irq_2_pin[] map
3694 */
3695 if (irq >= 16)
3696 add_pin_to_irq(irq, ioapic, pin);
3697
3698 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
3699
3700 return 0;
3701 }
3702
3703
3704 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3705 {
3706 int i;
3707
3708 if (skip_ioapic_setup)
3709 return -1;
3710
3711 for (i = 0; i < mp_irq_entries; i++)
3712 if (mp_irqs[i].mp_irqtype == mp_INT &&
3713 mp_irqs[i].mp_srcbusirq == bus_irq)
3714 break;
3715 if (i >= mp_irq_entries)
3716 return -1;
3717
3718 *trigger = irq_trigger(i);
3719 *polarity = irq_polarity(i);
3720 return 0;
3721 }
3722
3723 #endif /* CONFIG_ACPI */
3724
3725 /*
3726 * This function currently is only a helper for the i386 smp boot process where
3727 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3728 * so mask in all cases should simply be TARGET_CPUS
3729 */
3730 #ifdef CONFIG_SMP
3731 void __init setup_ioapic_dest(void)
3732 {
3733 int pin, ioapic, irq, irq_entry;
3734 struct irq_cfg *cfg;
3735
3736 if (skip_ioapic_setup == 1)
3737 return;
3738
3739 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3740 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3741 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3742 if (irq_entry == -1)
3743 continue;
3744 irq = pin_2_irq(irq_entry, ioapic, pin);
3745
3746 /* setup_IO_APIC_irqs could fail to get vector for some device
3747 * when you have too many devices, because at that time only boot
3748 * cpu is online.
3749 */
3750 cfg = irq_cfg(irq);
3751 if (!cfg->vector)
3752 setup_IO_APIC_irq(ioapic, pin, irq,
3753 irq_trigger(irq_entry),
3754 irq_polarity(irq_entry));
3755 #ifdef CONFIG_INTR_REMAP
3756 else if (intr_remapping_enabled)
3757 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3758 #endif
3759 else
3760 set_ioapic_affinity_irq(irq, TARGET_CPUS);
3761 }
3762
3763 }
3764 }
3765 #endif
3766
3767 #define IOAPIC_RESOURCE_NAME_SIZE 11
3768
3769 static struct resource *ioapic_resources;
3770
3771 static struct resource * __init ioapic_setup_resources(void)
3772 {
3773 unsigned long n;
3774 struct resource *res;
3775 char *mem;
3776 int i;
3777
3778 if (nr_ioapics <= 0)
3779 return NULL;
3780
3781 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3782 n *= nr_ioapics;
3783
3784 mem = alloc_bootmem(n);
3785 res = (void *)mem;
3786
3787 if (mem != NULL) {
3788 mem += sizeof(struct resource) * nr_ioapics;
3789
3790 for (i = 0; i < nr_ioapics; i++) {
3791 res[i].name = mem;
3792 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3793 sprintf(mem, "IOAPIC %u", i);
3794 mem += IOAPIC_RESOURCE_NAME_SIZE;
3795 }
3796 }
3797
3798 ioapic_resources = res;
3799
3800 return res;
3801 }
3802
3803 void __init ioapic_init_mappings(void)
3804 {
3805 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3806 int i;
3807 struct resource *ioapic_res;
3808
3809 ioapic_res = ioapic_setup_resources();
3810 for (i = 0; i < nr_ioapics; i++) {
3811 if (smp_found_config) {
3812 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3813 #ifdef CONFIG_X86_32
3814 if (!ioapic_phys) {
3815 printk(KERN_ERR
3816 "WARNING: bogus zero IO-APIC "
3817 "address found in MPTABLE, "
3818 "disabling IO/APIC support!\n");
3819 smp_found_config = 0;
3820 skip_ioapic_setup = 1;
3821 goto fake_ioapic_page;
3822 }
3823 #endif
3824 } else {
3825 #ifdef CONFIG_X86_32
3826 fake_ioapic_page:
3827 #endif
3828 ioapic_phys = (unsigned long)
3829 alloc_bootmem_pages(PAGE_SIZE);
3830 ioapic_phys = __pa(ioapic_phys);
3831 }
3832 set_fixmap_nocache(idx, ioapic_phys);
3833 apic_printk(APIC_VERBOSE,
3834 "mapped IOAPIC to %08lx (%08lx)\n",
3835 __fix_to_virt(idx), ioapic_phys);
3836 idx++;
3837
3838 if (ioapic_res != NULL) {
3839 ioapic_res->start = ioapic_phys;
3840 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3841 ioapic_res++;
3842 }
3843 }
3844 }
3845
3846 static int __init ioapic_insert_resources(void)
3847 {
3848 int i;
3849 struct resource *r = ioapic_resources;
3850
3851 if (!r) {
3852 printk(KERN_ERR
3853 "IO APIC resources could be not be allocated.\n");
3854 return -1;
3855 }
3856
3857 for (i = 0; i < nr_ioapics; i++) {
3858 insert_resource(&iomem_resource, r);
3859 r++;
3860 }
3861
3862 return 0;
3863 }
3864
3865 /* Insert the IO APIC resources after PCI initialization has occured to handle
3866 * IO APICS that are mapped in on a BAR in PCI space. */
3867 late_initcall(ioapic_insert_resources);
This page took 0.113998 seconds and 5 git commands to generate.