07e1e45ee0268deab7fe499983986d506c24d188
[deliverable/linux.git] / arch / x86 / kernel / io_apic_64.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/acpi.h>
31 #include <linux/sysdev.h>
32 #include <linux/msi.h>
33 #include <linux/htirq.h>
34 #include <linux/dmar.h>
35 #include <linux/jiffies.h>
36 #ifdef CONFIG_ACPI
37 #include <acpi/acpi_bus.h>
38 #endif
39 #include <linux/bootmem.h>
40 #include <linux/dmar.h>
41
42 #include <asm/idle.h>
43 #include <asm/io.h>
44 #include <asm/smp.h>
45 #include <asm/desc.h>
46 #include <asm/proto.h>
47 #include <asm/acpi.h>
48 #include <asm/dma.h>
49 #include <asm/i8259.h>
50 #include <asm/nmi.h>
51 #include <asm/msidef.h>
52 #include <asm/hypertransport.h>
53 #include <asm/irq_remapping.h>
54
55 #include <mach_ipi.h>
56 #include <mach_apic.h>
57
58 #define __apicdebuginit(type) static type __init
59
60 struct irq_cfg;
61 struct irq_pin_list;
62 struct irq_cfg {
63 unsigned int irq;
64 struct irq_cfg *next;
65 struct irq_pin_list *irq_2_pin;
66 cpumask_t domain;
67 cpumask_t old_domain;
68 unsigned move_cleanup_count;
69 u8 vector;
70 u8 move_in_progress : 1;
71 };
72
73 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
74 static struct irq_cfg irq_cfg_legacy[] __initdata = {
75 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
76 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
77 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
78 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
79 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
80 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
81 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
82 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
83 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
84 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
85 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
86 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
87 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
88 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
89 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
90 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
91 };
92
93 static struct irq_cfg irq_cfg_init = { .irq = -1U, };
94 /* need to be biger than size of irq_cfg_legacy */
95 static int nr_irq_cfg = 32;
96
97 static int __init parse_nr_irq_cfg(char *arg)
98 {
99 if (arg) {
100 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
101 if (nr_irq_cfg < 32)
102 nr_irq_cfg = 32;
103 }
104 return 0;
105 }
106
107 early_param("nr_irq_cfg", parse_nr_irq_cfg);
108
109 static void init_one_irq_cfg(struct irq_cfg *cfg)
110 {
111 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
112 }
113
114 static struct irq_cfg *irq_cfgx;
115 static struct irq_cfg *irq_cfgx_free;
116 static void __init init_work(void *data)
117 {
118 struct dyn_array *da = data;
119 struct irq_cfg *cfg;
120 int legacy_count;
121 int i;
122
123 cfg = *da->name;
124
125 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
126
127 legacy_count = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]);
128 for (i = legacy_count; i < *da->nr; i++)
129 init_one_irq_cfg(&cfg[i]);
130
131 for (i = 1; i < *da->nr; i++)
132 cfg[i-1].next = &cfg[i];
133
134 irq_cfgx_free = &irq_cfgx[legacy_count];
135 irq_cfgx[legacy_count - 1].next = NULL;
136 }
137
138 #define for_each_irq_cfg(cfg) \
139 for (cfg = irq_cfgx; cfg; cfg = cfg->next)
140
141 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
142
143 static struct irq_cfg *irq_cfg(unsigned int irq)
144 {
145 struct irq_cfg *cfg;
146
147 cfg = irq_cfgx;
148 while (cfg) {
149 if (cfg->irq == irq)
150 return cfg;
151
152 cfg = cfg->next;
153 }
154
155 return NULL;
156 }
157
158 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
159 {
160 struct irq_cfg *cfg, *cfg_pri;
161 int i;
162 int count = 0;
163
164 cfg_pri = cfg = irq_cfgx;
165 while (cfg) {
166 if (cfg->irq == irq)
167 return cfg;
168
169 cfg_pri = cfg;
170 cfg = cfg->next;
171 count++;
172 }
173
174 if (!irq_cfgx_free) {
175 unsigned long phys;
176 unsigned long total_bytes;
177 /*
178 * we run out of pre-allocate ones, allocate more
179 */
180 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
181
182 total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
183 if (after_bootmem)
184 cfg = kzalloc(total_bytes, GFP_ATOMIC);
185 else
186 cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
187
188 if (!cfg)
189 panic("please boot with nr_irq_cfg= %d\n", count * 2);
190
191 phys = __pa(cfg);
192 printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
193
194 for (i = 0; i < nr_irq_cfg; i++)
195 init_one_irq_cfg(&cfg[i]);
196
197 for (i = 1; i < nr_irq_cfg; i++)
198 cfg[i-1].next = &cfg[i];
199
200 irq_cfgx_free = cfg;
201 }
202
203 cfg = irq_cfgx_free;
204 irq_cfgx_free = irq_cfgx_free->next;
205 cfg->next = NULL;
206 if (cfg_pri)
207 cfg_pri->next = cfg;
208 else
209 irq_cfgx = cfg;
210 cfg->irq = irq;
211 printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
212 #ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
213 {
214 /* dump the results */
215 struct irq_cfg *cfg;
216 unsigned long phys;
217 unsigned long bytes = sizeof(struct irq_cfg);
218
219 printk(KERN_DEBUG "=========================== %d\n", irq);
220 printk(KERN_DEBUG "irq_cfg dump after get that for %d\n", irq);
221 for_each_irq_cfg(cfg) {
222 phys = __pa(cfg);
223 printk(KERN_DEBUG "irq_cfg %d ==> [%#lx - %#lx]\n", cfg->irq, phys, phys + bytes);
224 }
225 printk(KERN_DEBUG "===========================\n");
226 }
227 #endif
228 return cfg;
229 }
230
231 static int assign_irq_vector(int irq, cpumask_t mask);
232
233 int first_system_vector = 0xfe;
234
235 char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
236
237 int sis_apic_bug; /* not actually supported, dummy for compile */
238
239 static int no_timer_check;
240
241 static int disable_timer_pin_1 __initdata;
242
243 int timer_through_8259 __initdata;
244
245 /* Where if anywhere is the i8259 connect in external int mode */
246 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
247
248 static DEFINE_SPINLOCK(ioapic_lock);
249 static DEFINE_SPINLOCK(vector_lock);
250
251 /*
252 * # of IRQ routing registers
253 */
254 int nr_ioapic_registers[MAX_IO_APICS];
255
256 /* I/O APIC RTE contents at the OS boot up */
257 struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
258
259 /* I/O APIC entries */
260 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
261 int nr_ioapics;
262
263 /* MP IRQ source entries */
264 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
265
266 /* # of MP IRQ source entries */
267 int mp_irq_entries;
268
269 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
270
271 /*
272 * Rough estimation of how many shared IRQs there are, can
273 * be changed anytime.
274 */
275
276 int pin_map_size;
277
278 /*
279 * This is performance-critical, we want to do it O(1)
280 *
281 * the indexing order of this array favors 1:1 mappings
282 * between pins and IRQs.
283 */
284
285 struct irq_pin_list {
286 int apic, pin;
287 struct irq_pin_list *next;
288 };
289
290 static struct irq_pin_list *irq_2_pin_head;
291 /* fill one page ? */
292 static int nr_irq_2_pin = 0x100;
293 static struct irq_pin_list *irq_2_pin_ptr;
294 static void __init irq_2_pin_init_work(void *data)
295 {
296 struct dyn_array *da = data;
297 struct irq_pin_list *pin;
298 int i;
299
300 pin = *da->name;
301
302 for (i = 1; i < *da->nr; i++)
303 pin[i-1].next = &pin[i];
304
305 irq_2_pin_ptr = &pin[0];
306 }
307 DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
308
309 static struct irq_pin_list *get_one_free_irq_2_pin(void)
310 {
311 struct irq_pin_list *pin;
312 int i;
313
314 pin = irq_2_pin_ptr;
315
316 if (pin) {
317 irq_2_pin_ptr = pin->next;
318 pin->next = NULL;
319 return pin;
320 }
321
322 /*
323 * we run out of pre-allocate ones, allocate more
324 */
325 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
326
327 if (after_bootmem)
328 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
329 GFP_ATOMIC);
330 else
331 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
332 nr_irq_2_pin, PAGE_SIZE, 0);
333
334 if (!pin)
335 panic("can not get more irq_2_pin\n");
336
337 for (i = 1; i < nr_irq_2_pin; i++)
338 pin[i-1].next = &pin[i];
339
340 irq_2_pin_ptr = pin->next;
341 pin->next = NULL;
342
343 return pin;
344 }
345
346 struct io_apic {
347 unsigned int index;
348 unsigned int unused[3];
349 unsigned int data;
350 };
351
352 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
353 {
354 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
355 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
356 }
357
358 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
359 {
360 struct io_apic __iomem *io_apic = io_apic_base(apic);
361 writel(reg, &io_apic->index);
362 return readl(&io_apic->data);
363 }
364
365 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
366 {
367 struct io_apic __iomem *io_apic = io_apic_base(apic);
368 writel(reg, &io_apic->index);
369 writel(value, &io_apic->data);
370 }
371
372 /*
373 * Re-write a value: to be used for read-modify-write
374 * cycles where the read already set up the index register.
375 */
376 static inline void io_apic_modify(unsigned int apic, unsigned int value)
377 {
378 struct io_apic __iomem *io_apic = io_apic_base(apic);
379 writel(value, &io_apic->data);
380 }
381
382 static bool io_apic_level_ack_pending(unsigned int irq)
383 {
384 struct irq_pin_list *entry;
385 unsigned long flags;
386 struct irq_cfg *cfg = irq_cfg(irq);
387
388 spin_lock_irqsave(&ioapic_lock, flags);
389 entry = cfg->irq_2_pin;
390 for (;;) {
391 unsigned int reg;
392 int pin;
393
394 if (!entry)
395 break;
396 pin = entry->pin;
397 reg = io_apic_read(entry->apic, 0x10 + pin*2);
398 /* Is the remote IRR bit set? */
399 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
400 spin_unlock_irqrestore(&ioapic_lock, flags);
401 return true;
402 }
403 if (!entry->next)
404 break;
405 entry = entry->next;
406 }
407 spin_unlock_irqrestore(&ioapic_lock, flags);
408
409 return false;
410 }
411
412 union entry_union {
413 struct { u32 w1, w2; };
414 struct IO_APIC_route_entry entry;
415 };
416
417 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
418 {
419 union entry_union eu;
420 unsigned long flags;
421 spin_lock_irqsave(&ioapic_lock, flags);
422 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
423 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
424 spin_unlock_irqrestore(&ioapic_lock, flags);
425 return eu.entry;
426 }
427
428 /*
429 * When we write a new IO APIC routing entry, we need to write the high
430 * word first! If the mask bit in the low word is clear, we will enable
431 * the interrupt, and we need to make sure the entry is fully populated
432 * before that happens.
433 */
434 static void
435 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
436 {
437 union entry_union eu;
438 eu.entry = e;
439 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
440 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
441 }
442
443 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
444 {
445 unsigned long flags;
446 spin_lock_irqsave(&ioapic_lock, flags);
447 __ioapic_write_entry(apic, pin, e);
448 spin_unlock_irqrestore(&ioapic_lock, flags);
449 }
450
451 /*
452 * When we mask an IO APIC routing entry, we need to write the low
453 * word first, in order to set the mask bit before we change the
454 * high bits!
455 */
456 static void ioapic_mask_entry(int apic, int pin)
457 {
458 unsigned long flags;
459 union entry_union eu = { .entry.mask = 1 };
460
461 spin_lock_irqsave(&ioapic_lock, flags);
462 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
463 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
464 spin_unlock_irqrestore(&ioapic_lock, flags);
465 }
466
467 #ifdef CONFIG_SMP
468 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
469 {
470 int apic, pin;
471 struct irq_cfg *cfg;
472 struct irq_pin_list *entry;
473
474 cfg = irq_cfg(irq);
475 entry = cfg->irq_2_pin;
476 for (;;) {
477 unsigned int reg;
478
479 if (!entry)
480 break;
481
482 apic = entry->apic;
483 pin = entry->pin;
484 /*
485 * With interrupt-remapping, destination information comes
486 * from interrupt-remapping table entry.
487 */
488 if (!irq_remapped(irq))
489 io_apic_write(apic, 0x11 + pin*2, dest);
490 reg = io_apic_read(apic, 0x10 + pin*2);
491 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
492 reg |= vector;
493 io_apic_modify(apic, reg);
494 if (!entry->next)
495 break;
496 entry = entry->next;
497 }
498 }
499
500 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
501 {
502 struct irq_cfg *cfg = irq_cfg(irq);
503 unsigned long flags;
504 unsigned int dest;
505 cpumask_t tmp;
506 struct irq_desc *desc;
507
508 cpus_and(tmp, mask, cpu_online_map);
509 if (cpus_empty(tmp))
510 return;
511
512 if (assign_irq_vector(irq, mask))
513 return;
514
515 cpus_and(tmp, cfg->domain, mask);
516 dest = cpu_mask_to_apicid(tmp);
517
518 /*
519 * Only the high 8 bits are valid.
520 */
521 dest = SET_APIC_LOGICAL_ID(dest);
522
523 desc = irq_to_desc(irq);
524 spin_lock_irqsave(&ioapic_lock, flags);
525 __target_IO_APIC_irq(irq, dest, cfg->vector);
526 desc->affinity = mask;
527 spin_unlock_irqrestore(&ioapic_lock, flags);
528 }
529 #endif
530
531 /*
532 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
533 * shared ISA-space IRQs, so we have to support them. We are super
534 * fast in the common case, and fast for shared ISA-space IRQs.
535 */
536 int first_free_entry;
537 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
538 {
539 struct irq_cfg *cfg;
540 struct irq_pin_list *entry;
541
542 /* first time to refer irq_cfg, so with new */
543 cfg = irq_cfg_alloc(irq);
544 entry = cfg->irq_2_pin;
545 if (!entry) {
546 entry = get_one_free_irq_2_pin();
547 cfg->irq_2_pin = entry;
548 entry->apic = apic;
549 entry->pin = pin;
550 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
551 return;
552 }
553
554 while (entry->next) {
555 /* not again, please */
556 if (entry->apic == apic && entry->pin == pin)
557 return;
558
559 entry = entry->next;
560 }
561
562 entry->next = get_one_free_irq_2_pin();
563 entry = entry->next;
564 entry->apic = apic;
565 entry->pin = pin;
566 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
567 }
568
569 /*
570 * Reroute an IRQ to a different pin.
571 */
572 static void __init replace_pin_at_irq(unsigned int irq,
573 int oldapic, int oldpin,
574 int newapic, int newpin)
575 {
576 struct irq_cfg *cfg = irq_cfg(irq);
577 struct irq_pin_list *entry = cfg->irq_2_pin;
578 int replaced = 0;
579
580 while (entry) {
581 if (entry->apic == oldapic && entry->pin == oldpin) {
582 entry->apic = newapic;
583 entry->pin = newpin;
584 replaced = 1;
585 /* every one is different, right? */
586 break;
587 }
588 entry = entry->next;
589 }
590
591 /* why? call replace before add? */
592 if (!replaced)
593 add_pin_to_irq(irq, newapic, newpin);
594 }
595
596 /*
597 * Synchronize the IO-APIC and the CPU by doing
598 * a dummy read from the IO-APIC
599 */
600 static inline void io_apic_sync(unsigned int apic)
601 {
602 struct io_apic __iomem *io_apic = io_apic_base(apic);
603 readl(&io_apic->data);
604 }
605
606 #define __DO_ACTION(R, ACTION, FINAL) \
607 \
608 { \
609 int pin; \
610 struct irq_cfg *cfg; \
611 struct irq_pin_list *entry; \
612 \
613 cfg = irq_cfg(irq); \
614 entry = cfg->irq_2_pin; \
615 for (;;) { \
616 unsigned int reg; \
617 if (!entry) \
618 break; \
619 pin = entry->pin; \
620 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
621 reg ACTION; \
622 io_apic_modify(entry->apic, reg); \
623 FINAL; \
624 if (!entry->next) \
625 break; \
626 entry = entry->next; \
627 } \
628 }
629
630 #define DO_ACTION(name,R,ACTION, FINAL) \
631 \
632 static void name##_IO_APIC_irq (unsigned int irq) \
633 __DO_ACTION(R, ACTION, FINAL)
634
635 /* mask = 1 */
636 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
637
638 /* mask = 0 */
639 DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, )
640
641 static void mask_IO_APIC_irq (unsigned int irq)
642 {
643 unsigned long flags;
644
645 spin_lock_irqsave(&ioapic_lock, flags);
646 __mask_IO_APIC_irq(irq);
647 spin_unlock_irqrestore(&ioapic_lock, flags);
648 }
649
650 static void unmask_IO_APIC_irq (unsigned int irq)
651 {
652 unsigned long flags;
653
654 spin_lock_irqsave(&ioapic_lock, flags);
655 __unmask_IO_APIC_irq(irq);
656 spin_unlock_irqrestore(&ioapic_lock, flags);
657 }
658
659 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
660 {
661 struct IO_APIC_route_entry entry;
662
663 /* Check delivery_mode to be sure we're not clearing an SMI pin */
664 entry = ioapic_read_entry(apic, pin);
665 if (entry.delivery_mode == dest_SMI)
666 return;
667 /*
668 * Disable it in the IO-APIC irq-routing table:
669 */
670 ioapic_mask_entry(apic, pin);
671 }
672
673 static void clear_IO_APIC (void)
674 {
675 int apic, pin;
676
677 for (apic = 0; apic < nr_ioapics; apic++)
678 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
679 clear_IO_APIC_pin(apic, pin);
680 }
681
682 /*
683 * Saves and masks all the unmasked IO-APIC RTE's
684 */
685 int save_mask_IO_APIC_setup(void)
686 {
687 union IO_APIC_reg_01 reg_01;
688 unsigned long flags;
689 int apic, pin;
690
691 /*
692 * The number of IO-APIC IRQ registers (== #pins):
693 */
694 for (apic = 0; apic < nr_ioapics; apic++) {
695 spin_lock_irqsave(&ioapic_lock, flags);
696 reg_01.raw = io_apic_read(apic, 1);
697 spin_unlock_irqrestore(&ioapic_lock, flags);
698 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
699 }
700
701 for (apic = 0; apic < nr_ioapics; apic++) {
702 early_ioapic_entries[apic] =
703 kzalloc(sizeof(struct IO_APIC_route_entry) *
704 nr_ioapic_registers[apic], GFP_KERNEL);
705 if (!early_ioapic_entries[apic])
706 return -ENOMEM;
707 }
708
709 for (apic = 0; apic < nr_ioapics; apic++)
710 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
711 struct IO_APIC_route_entry entry;
712
713 entry = early_ioapic_entries[apic][pin] =
714 ioapic_read_entry(apic, pin);
715 if (!entry.mask) {
716 entry.mask = 1;
717 ioapic_write_entry(apic, pin, entry);
718 }
719 }
720 return 0;
721 }
722
723 void restore_IO_APIC_setup(void)
724 {
725 int apic, pin;
726
727 for (apic = 0; apic < nr_ioapics; apic++)
728 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
729 ioapic_write_entry(apic, pin,
730 early_ioapic_entries[apic][pin]);
731 }
732
733 void reinit_intr_remapped_IO_APIC(int intr_remapping)
734 {
735 /*
736 * for now plain restore of previous settings.
737 * TBD: In the case of OS enabling interrupt-remapping,
738 * IO-APIC RTE's need to be setup to point to interrupt-remapping
739 * table entries. for now, do a plain restore, and wait for
740 * the setup_IO_APIC_irqs() to do proper initialization.
741 */
742 restore_IO_APIC_setup();
743 }
744
745 int skip_ioapic_setup;
746 int ioapic_force;
747
748 static int __init parse_noapic(char *str)
749 {
750 disable_ioapic_setup();
751 return 0;
752 }
753 early_param("noapic", parse_noapic);
754
755 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
756 static int __init disable_timer_pin_setup(char *arg)
757 {
758 disable_timer_pin_1 = 1;
759 return 1;
760 }
761 __setup("disable_timer_pin_1", disable_timer_pin_setup);
762
763
764 /*
765 * Find the IRQ entry number of a certain pin.
766 */
767 static int find_irq_entry(int apic, int pin, int type)
768 {
769 int i;
770
771 for (i = 0; i < mp_irq_entries; i++)
772 if (mp_irqs[i].mp_irqtype == type &&
773 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
774 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
775 mp_irqs[i].mp_dstirq == pin)
776 return i;
777
778 return -1;
779 }
780
781 /*
782 * Find the pin to which IRQ[irq] (ISA) is connected
783 */
784 static int __init find_isa_irq_pin(int irq, int type)
785 {
786 int i;
787
788 for (i = 0; i < mp_irq_entries; i++) {
789 int lbus = mp_irqs[i].mp_srcbus;
790
791 if (test_bit(lbus, mp_bus_not_pci) &&
792 (mp_irqs[i].mp_irqtype == type) &&
793 (mp_irqs[i].mp_srcbusirq == irq))
794
795 return mp_irqs[i].mp_dstirq;
796 }
797 return -1;
798 }
799
800 static int __init find_isa_irq_apic(int irq, int type)
801 {
802 int i;
803
804 for (i = 0; i < mp_irq_entries; i++) {
805 int lbus = mp_irqs[i].mp_srcbus;
806
807 if (test_bit(lbus, mp_bus_not_pci) &&
808 (mp_irqs[i].mp_irqtype == type) &&
809 (mp_irqs[i].mp_srcbusirq == irq))
810 break;
811 }
812 if (i < mp_irq_entries) {
813 int apic;
814 for(apic = 0; apic < nr_ioapics; apic++) {
815 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
816 return apic;
817 }
818 }
819
820 return -1;
821 }
822
823 /*
824 * Find a specific PCI IRQ entry.
825 * Not an __init, possibly needed by modules
826 */
827 static int pin_2_irq(int idx, int apic, int pin);
828
829 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
830 {
831 int apic, i, best_guess = -1;
832
833 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
834 bus, slot, pin);
835 if (test_bit(bus, mp_bus_not_pci)) {
836 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
837 return -1;
838 }
839 for (i = 0; i < mp_irq_entries; i++) {
840 int lbus = mp_irqs[i].mp_srcbus;
841
842 for (apic = 0; apic < nr_ioapics; apic++)
843 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
844 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
845 break;
846
847 if (!test_bit(lbus, mp_bus_not_pci) &&
848 !mp_irqs[i].mp_irqtype &&
849 (bus == lbus) &&
850 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
851 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
852
853 if (!(apic || IO_APIC_IRQ(irq)))
854 continue;
855
856 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
857 return irq;
858 /*
859 * Use the first all-but-pin matching entry as a
860 * best-guess fuzzy result for broken mptables.
861 */
862 if (best_guess < 0)
863 best_guess = irq;
864 }
865 }
866 return best_guess;
867 }
868
869 /* ISA interrupts are always polarity zero edge triggered,
870 * when listed as conforming in the MP table. */
871
872 #define default_ISA_trigger(idx) (0)
873 #define default_ISA_polarity(idx) (0)
874
875 /* PCI interrupts are always polarity one level triggered,
876 * when listed as conforming in the MP table. */
877
878 #define default_PCI_trigger(idx) (1)
879 #define default_PCI_polarity(idx) (1)
880
881 static int MPBIOS_polarity(int idx)
882 {
883 int bus = mp_irqs[idx].mp_srcbus;
884 int polarity;
885
886 /*
887 * Determine IRQ line polarity (high active or low active):
888 */
889 switch (mp_irqs[idx].mp_irqflag & 3)
890 {
891 case 0: /* conforms, ie. bus-type dependent polarity */
892 if (test_bit(bus, mp_bus_not_pci))
893 polarity = default_ISA_polarity(idx);
894 else
895 polarity = default_PCI_polarity(idx);
896 break;
897 case 1: /* high active */
898 {
899 polarity = 0;
900 break;
901 }
902 case 2: /* reserved */
903 {
904 printk(KERN_WARNING "broken BIOS!!\n");
905 polarity = 1;
906 break;
907 }
908 case 3: /* low active */
909 {
910 polarity = 1;
911 break;
912 }
913 default: /* invalid */
914 {
915 printk(KERN_WARNING "broken BIOS!!\n");
916 polarity = 1;
917 break;
918 }
919 }
920 return polarity;
921 }
922
923 static int MPBIOS_trigger(int idx)
924 {
925 int bus = mp_irqs[idx].mp_srcbus;
926 int trigger;
927
928 /*
929 * Determine IRQ trigger mode (edge or level sensitive):
930 */
931 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
932 {
933 case 0: /* conforms, ie. bus-type dependent */
934 if (test_bit(bus, mp_bus_not_pci))
935 trigger = default_ISA_trigger(idx);
936 else
937 trigger = default_PCI_trigger(idx);
938 break;
939 case 1: /* edge */
940 {
941 trigger = 0;
942 break;
943 }
944 case 2: /* reserved */
945 {
946 printk(KERN_WARNING "broken BIOS!!\n");
947 trigger = 1;
948 break;
949 }
950 case 3: /* level */
951 {
952 trigger = 1;
953 break;
954 }
955 default: /* invalid */
956 {
957 printk(KERN_WARNING "broken BIOS!!\n");
958 trigger = 0;
959 break;
960 }
961 }
962 return trigger;
963 }
964
965 static inline int irq_polarity(int idx)
966 {
967 return MPBIOS_polarity(idx);
968 }
969
970 static inline int irq_trigger(int idx)
971 {
972 return MPBIOS_trigger(idx);
973 }
974
975 static int pin_2_irq(int idx, int apic, int pin)
976 {
977 int irq, i;
978 int bus = mp_irqs[idx].mp_srcbus;
979
980 /*
981 * Debugging check, we are in big trouble if this message pops up!
982 */
983 if (mp_irqs[idx].mp_dstirq != pin)
984 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
985
986 if (test_bit(bus, mp_bus_not_pci)) {
987 irq = mp_irqs[idx].mp_srcbusirq;
988 } else {
989 /*
990 * PCI IRQs are mapped in order
991 */
992 i = irq = 0;
993 while (i < apic)
994 irq += nr_ioapic_registers[i++];
995 irq += pin;
996 }
997 return irq;
998 }
999
1000 void lock_vector_lock(void)
1001 {
1002 /* Used to the online set of cpus does not change
1003 * during assign_irq_vector.
1004 */
1005 spin_lock(&vector_lock);
1006 }
1007
1008 void unlock_vector_lock(void)
1009 {
1010 spin_unlock(&vector_lock);
1011 }
1012
1013 static int __assign_irq_vector(int irq, cpumask_t mask)
1014 {
1015 /*
1016 * NOTE! The local APIC isn't very good at handling
1017 * multiple interrupts at the same interrupt level.
1018 * As the interrupt level is determined by taking the
1019 * vector number and shifting that right by 4, we
1020 * want to spread these out a bit so that they don't
1021 * all fall in the same interrupt level.
1022 *
1023 * Also, we've got to be careful not to trash gate
1024 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1025 */
1026 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1027 unsigned int old_vector;
1028 int cpu;
1029 struct irq_cfg *cfg;
1030
1031 cfg = irq_cfg(irq);
1032
1033 /* Only try and allocate irqs on cpus that are present */
1034 cpus_and(mask, mask, cpu_online_map);
1035
1036 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1037 return -EBUSY;
1038
1039 old_vector = cfg->vector;
1040 if (old_vector) {
1041 cpumask_t tmp;
1042 cpus_and(tmp, cfg->domain, mask);
1043 if (!cpus_empty(tmp))
1044 return 0;
1045 }
1046
1047 for_each_cpu_mask_nr(cpu, mask) {
1048 cpumask_t domain, new_mask;
1049 int new_cpu;
1050 int vector, offset;
1051
1052 domain = vector_allocation_domain(cpu);
1053 cpus_and(new_mask, domain, cpu_online_map);
1054
1055 vector = current_vector;
1056 offset = current_offset;
1057 next:
1058 vector += 8;
1059 if (vector >= first_system_vector) {
1060 /* If we run out of vectors on large boxen, must share them. */
1061 offset = (offset + 1) % 8;
1062 vector = FIRST_DEVICE_VECTOR + offset;
1063 }
1064 if (unlikely(current_vector == vector))
1065 continue;
1066 if (vector == IA32_SYSCALL_VECTOR)
1067 goto next;
1068 for_each_cpu_mask_nr(new_cpu, new_mask)
1069 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1070 goto next;
1071 /* Found one! */
1072 current_vector = vector;
1073 current_offset = offset;
1074 if (old_vector) {
1075 cfg->move_in_progress = 1;
1076 cfg->old_domain = cfg->domain;
1077 }
1078 for_each_cpu_mask_nr(new_cpu, new_mask)
1079 per_cpu(vector_irq, new_cpu)[vector] = irq;
1080 cfg->vector = vector;
1081 cfg->domain = domain;
1082 return 0;
1083 }
1084 return -ENOSPC;
1085 }
1086
1087 static int assign_irq_vector(int irq, cpumask_t mask)
1088 {
1089 int err;
1090 unsigned long flags;
1091
1092 spin_lock_irqsave(&vector_lock, flags);
1093 err = __assign_irq_vector(irq, mask);
1094 spin_unlock_irqrestore(&vector_lock, flags);
1095 return err;
1096 }
1097
1098 static void __clear_irq_vector(int irq)
1099 {
1100 struct irq_cfg *cfg;
1101 cpumask_t mask;
1102 int cpu, vector;
1103
1104 cfg = irq_cfg(irq);
1105 BUG_ON(!cfg->vector);
1106
1107 vector = cfg->vector;
1108 cpus_and(mask, cfg->domain, cpu_online_map);
1109 for_each_cpu_mask_nr(cpu, mask)
1110 per_cpu(vector_irq, cpu)[vector] = -1;
1111
1112 cfg->vector = 0;
1113 cpus_clear(cfg->domain);
1114 }
1115
1116 void __setup_vector_irq(int cpu)
1117 {
1118 /* Initialize vector_irq on a new cpu */
1119 /* This function must be called with vector_lock held */
1120 int irq, vector;
1121 struct irq_cfg *cfg;
1122
1123 /* Mark the inuse vectors */
1124 for_each_irq_cfg(cfg) {
1125 if (!cpu_isset(cpu, cfg->domain))
1126 continue;
1127 vector = cfg->vector;
1128 irq = cfg->irq;
1129 per_cpu(vector_irq, cpu)[vector] = irq;
1130 }
1131 /* Mark the free vectors */
1132 for (vector = 0; vector < NR_VECTORS; ++vector) {
1133 irq = per_cpu(vector_irq, cpu)[vector];
1134 if (irq < 0)
1135 continue;
1136
1137 cfg = irq_cfg(irq);
1138 if (!cpu_isset(cpu, cfg->domain))
1139 per_cpu(vector_irq, cpu)[vector] = -1;
1140 }
1141 }
1142
1143 static struct irq_chip ioapic_chip;
1144 #ifdef CONFIG_INTR_REMAP
1145 static struct irq_chip ir_ioapic_chip;
1146 #endif
1147
1148 static void ioapic_register_intr(int irq, unsigned long trigger)
1149 {
1150 struct irq_desc *desc;
1151
1152 /* first time to use this irq_desc */
1153 if (irq < 16)
1154 desc = irq_to_desc(irq);
1155 else
1156 desc = irq_to_desc_alloc(irq);
1157
1158 if (trigger)
1159 desc->status |= IRQ_LEVEL;
1160 else
1161 desc->status &= ~IRQ_LEVEL;
1162
1163 #ifdef CONFIG_INTR_REMAP
1164 if (irq_remapped(irq)) {
1165 desc->status |= IRQ_MOVE_PCNTXT;
1166 if (trigger)
1167 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1168 handle_fasteoi_irq,
1169 "fasteoi");
1170 else
1171 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1172 handle_edge_irq, "edge");
1173 return;
1174 }
1175 #endif
1176 if (trigger)
1177 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1178 handle_fasteoi_irq,
1179 "fasteoi");
1180 else
1181 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1182 handle_edge_irq, "edge");
1183 }
1184
1185 static int setup_ioapic_entry(int apic, int irq,
1186 struct IO_APIC_route_entry *entry,
1187 unsigned int destination, int trigger,
1188 int polarity, int vector)
1189 {
1190 /*
1191 * add it to the IO-APIC irq-routing table:
1192 */
1193 memset(entry,0,sizeof(*entry));
1194
1195 #ifdef CONFIG_INTR_REMAP
1196 if (intr_remapping_enabled) {
1197 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1198 struct irte irte;
1199 struct IR_IO_APIC_route_entry *ir_entry =
1200 (struct IR_IO_APIC_route_entry *) entry;
1201 int index;
1202
1203 if (!iommu)
1204 panic("No mapping iommu for ioapic %d\n", apic);
1205
1206 index = alloc_irte(iommu, irq, 1);
1207 if (index < 0)
1208 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1209
1210 memset(&irte, 0, sizeof(irte));
1211
1212 irte.present = 1;
1213 irte.dst_mode = INT_DEST_MODE;
1214 irte.trigger_mode = trigger;
1215 irte.dlvry_mode = INT_DELIVERY_MODE;
1216 irte.vector = vector;
1217 irte.dest_id = IRTE_DEST(destination);
1218
1219 modify_irte(irq, &irte);
1220
1221 ir_entry->index2 = (index >> 15) & 0x1;
1222 ir_entry->zero = 0;
1223 ir_entry->format = 1;
1224 ir_entry->index = (index & 0x7fff);
1225 } else
1226 #endif
1227 {
1228 entry->delivery_mode = INT_DELIVERY_MODE;
1229 entry->dest_mode = INT_DEST_MODE;
1230 entry->dest = destination;
1231 }
1232
1233 entry->mask = 0; /* enable IRQ */
1234 entry->trigger = trigger;
1235 entry->polarity = polarity;
1236 entry->vector = vector;
1237
1238 /* Mask level triggered irqs.
1239 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1240 */
1241 if (trigger)
1242 entry->mask = 1;
1243 return 0;
1244 }
1245
1246 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1247 int trigger, int polarity)
1248 {
1249 struct irq_cfg *cfg;
1250 struct IO_APIC_route_entry entry;
1251 cpumask_t mask;
1252
1253 if (!IO_APIC_IRQ(irq))
1254 return;
1255
1256 cfg = irq_cfg(irq);
1257
1258 mask = TARGET_CPUS;
1259 if (assign_irq_vector(irq, mask))
1260 return;
1261
1262 cpus_and(mask, cfg->domain, mask);
1263
1264 apic_printk(APIC_VERBOSE,KERN_DEBUG
1265 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1266 "IRQ %d Mode:%i Active:%i)\n",
1267 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1268 irq, trigger, polarity);
1269
1270
1271 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1272 cpu_mask_to_apicid(mask), trigger, polarity,
1273 cfg->vector)) {
1274 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1275 mp_ioapics[apic].mp_apicid, pin);
1276 __clear_irq_vector(irq);
1277 return;
1278 }
1279
1280 ioapic_register_intr(irq, trigger);
1281 if (irq < 16)
1282 disable_8259A_irq(irq);
1283
1284 ioapic_write_entry(apic, pin, entry);
1285 }
1286
1287 static void __init setup_IO_APIC_irqs(void)
1288 {
1289 int apic, pin, idx, irq, first_notcon = 1;
1290
1291 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1292
1293 for (apic = 0; apic < nr_ioapics; apic++) {
1294 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1295
1296 idx = find_irq_entry(apic,pin,mp_INT);
1297 if (idx == -1) {
1298 if (first_notcon) {
1299 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1300 first_notcon = 0;
1301 } else
1302 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1303 continue;
1304 }
1305 if (!first_notcon) {
1306 apic_printk(APIC_VERBOSE, " not connected.\n");
1307 first_notcon = 1;
1308 }
1309
1310 irq = pin_2_irq(idx, apic, pin);
1311 add_pin_to_irq(irq, apic, pin);
1312
1313 setup_IO_APIC_irq(apic, pin, irq,
1314 irq_trigger(idx), irq_polarity(idx));
1315 }
1316 }
1317
1318 if (!first_notcon)
1319 apic_printk(APIC_VERBOSE, " not connected.\n");
1320 }
1321
1322 /*
1323 * Set up the timer pin, possibly with the 8259A-master behind.
1324 */
1325 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1326 int vector)
1327 {
1328 struct IO_APIC_route_entry entry;
1329
1330 if (intr_remapping_enabled)
1331 return;
1332
1333 memset(&entry, 0, sizeof(entry));
1334
1335 /*
1336 * We use logical delivery to get the timer IRQ
1337 * to the first CPU.
1338 */
1339 entry.dest_mode = INT_DEST_MODE;
1340 entry.mask = 1; /* mask IRQ now */
1341 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1342 entry.delivery_mode = INT_DELIVERY_MODE;
1343 entry.polarity = 0;
1344 entry.trigger = 0;
1345 entry.vector = vector;
1346
1347 /*
1348 * The timer IRQ doesn't have to know that behind the
1349 * scene we may have a 8259A-master in AEOI mode ...
1350 */
1351 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1352
1353 /*
1354 * Add it to the IO-APIC irq-routing table:
1355 */
1356 ioapic_write_entry(apic, pin, entry);
1357 }
1358
1359
1360 __apicdebuginit(void) print_IO_APIC(void)
1361 {
1362 int apic, i;
1363 union IO_APIC_reg_00 reg_00;
1364 union IO_APIC_reg_01 reg_01;
1365 union IO_APIC_reg_02 reg_02;
1366 unsigned long flags;
1367 struct irq_cfg *cfg;
1368
1369 if (apic_verbosity == APIC_QUIET)
1370 return;
1371
1372 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1373 for (i = 0; i < nr_ioapics; i++)
1374 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1375 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1376
1377 /*
1378 * We are a bit conservative about what we expect. We have to
1379 * know about every hardware change ASAP.
1380 */
1381 printk(KERN_INFO "testing the IO APIC.......................\n");
1382
1383 for (apic = 0; apic < nr_ioapics; apic++) {
1384
1385 spin_lock_irqsave(&ioapic_lock, flags);
1386 reg_00.raw = io_apic_read(apic, 0);
1387 reg_01.raw = io_apic_read(apic, 1);
1388 if (reg_01.bits.version >= 0x10)
1389 reg_02.raw = io_apic_read(apic, 2);
1390 spin_unlock_irqrestore(&ioapic_lock, flags);
1391
1392 printk("\n");
1393 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1394 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1395 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1396 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1397 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1398
1399 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1400 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1401
1402 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1403 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1404
1405 if (reg_01.bits.version >= 0x10) {
1406 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1407 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1408 }
1409
1410 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1411
1412 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1413 " Stat Dmod Deli Vect: \n");
1414
1415 for (i = 0; i <= reg_01.bits.entries; i++) {
1416 struct IO_APIC_route_entry entry;
1417
1418 entry = ioapic_read_entry(apic, i);
1419
1420 printk(KERN_DEBUG " %02x %03X ",
1421 i,
1422 entry.dest
1423 );
1424
1425 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1426 entry.mask,
1427 entry.trigger,
1428 entry.irr,
1429 entry.polarity,
1430 entry.delivery_status,
1431 entry.dest_mode,
1432 entry.delivery_mode,
1433 entry.vector
1434 );
1435 }
1436 }
1437 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1438 for_each_irq_cfg(cfg) {
1439 struct irq_pin_list *entry = cfg->irq_2_pin;
1440 if (!entry)
1441 continue;
1442 printk(KERN_DEBUG "IRQ%d ", cfg->irq);
1443 for (;;) {
1444 printk("-> %d:%d", entry->apic, entry->pin);
1445 if (!entry->next)
1446 break;
1447 entry = entry->next;
1448 }
1449 printk("\n");
1450 }
1451
1452 printk(KERN_INFO ".................................... done.\n");
1453
1454 return;
1455 }
1456
1457 __apicdebuginit(void) print_APIC_bitfield(int base)
1458 {
1459 unsigned int v;
1460 int i, j;
1461
1462 if (apic_verbosity == APIC_QUIET)
1463 return;
1464
1465 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1466 for (i = 0; i < 8; i++) {
1467 v = apic_read(base + i*0x10);
1468 for (j = 0; j < 32; j++) {
1469 if (v & (1<<j))
1470 printk("1");
1471 else
1472 printk("0");
1473 }
1474 printk("\n");
1475 }
1476 }
1477
1478 __apicdebuginit(void) print_local_APIC(void *dummy)
1479 {
1480 unsigned int v, ver, maxlvt;
1481 unsigned long icr;
1482
1483 if (apic_verbosity == APIC_QUIET)
1484 return;
1485
1486 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1487 smp_processor_id(), hard_smp_processor_id());
1488 v = apic_read(APIC_ID);
1489 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1490 v = apic_read(APIC_LVR);
1491 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1492 ver = GET_APIC_VERSION(v);
1493 maxlvt = lapic_get_maxlvt();
1494
1495 v = apic_read(APIC_TASKPRI);
1496 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1497
1498 v = apic_read(APIC_ARBPRI);
1499 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1500 v & APIC_ARBPRI_MASK);
1501 v = apic_read(APIC_PROCPRI);
1502 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1503
1504 v = apic_read(APIC_EOI);
1505 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1506 v = apic_read(APIC_RRR);
1507 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1508 v = apic_read(APIC_LDR);
1509 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1510 v = apic_read(APIC_DFR);
1511 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1512 v = apic_read(APIC_SPIV);
1513 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1514
1515 printk(KERN_DEBUG "... APIC ISR field:\n");
1516 print_APIC_bitfield(APIC_ISR);
1517 printk(KERN_DEBUG "... APIC TMR field:\n");
1518 print_APIC_bitfield(APIC_TMR);
1519 printk(KERN_DEBUG "... APIC IRR field:\n");
1520 print_APIC_bitfield(APIC_IRR);
1521
1522 v = apic_read(APIC_ESR);
1523 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1524
1525 icr = apic_icr_read();
1526 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1527 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1528
1529 v = apic_read(APIC_LVTT);
1530 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1531
1532 if (maxlvt > 3) { /* PC is LVT#4. */
1533 v = apic_read(APIC_LVTPC);
1534 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1535 }
1536 v = apic_read(APIC_LVT0);
1537 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1538 v = apic_read(APIC_LVT1);
1539 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1540
1541 if (maxlvt > 2) { /* ERR is LVT#3. */
1542 v = apic_read(APIC_LVTERR);
1543 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1544 }
1545
1546 v = apic_read(APIC_TMICT);
1547 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1548 v = apic_read(APIC_TMCCT);
1549 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1550 v = apic_read(APIC_TDCR);
1551 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1552 printk("\n");
1553 }
1554
1555 __apicdebuginit(void) print_all_local_APICs(void)
1556 {
1557 on_each_cpu(print_local_APIC, NULL, 1);
1558 }
1559
1560 __apicdebuginit(void) print_PIC(void)
1561 {
1562 unsigned int v;
1563 unsigned long flags;
1564
1565 if (apic_verbosity == APIC_QUIET)
1566 return;
1567
1568 printk(KERN_DEBUG "\nprinting PIC contents\n");
1569
1570 spin_lock_irqsave(&i8259A_lock, flags);
1571
1572 v = inb(0xa1) << 8 | inb(0x21);
1573 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1574
1575 v = inb(0xa0) << 8 | inb(0x20);
1576 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1577
1578 outb(0x0b,0xa0);
1579 outb(0x0b,0x20);
1580 v = inb(0xa0) << 8 | inb(0x20);
1581 outb(0x0a,0xa0);
1582 outb(0x0a,0x20);
1583
1584 spin_unlock_irqrestore(&i8259A_lock, flags);
1585
1586 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1587
1588 v = inb(0x4d1) << 8 | inb(0x4d0);
1589 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1590 }
1591
1592 __apicdebuginit(int) print_all_ICs(void)
1593 {
1594 print_PIC();
1595 print_all_local_APICs();
1596 print_IO_APIC();
1597
1598 return 0;
1599 }
1600
1601 fs_initcall(print_all_ICs);
1602
1603
1604 void __init enable_IO_APIC(void)
1605 {
1606 union IO_APIC_reg_01 reg_01;
1607 int i8259_apic, i8259_pin;
1608 int apic;
1609 unsigned long flags;
1610
1611 /*
1612 * The number of IO-APIC IRQ registers (== #pins):
1613 */
1614 for (apic = 0; apic < nr_ioapics; apic++) {
1615 spin_lock_irqsave(&ioapic_lock, flags);
1616 reg_01.raw = io_apic_read(apic, 1);
1617 spin_unlock_irqrestore(&ioapic_lock, flags);
1618 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1619 }
1620 for(apic = 0; apic < nr_ioapics; apic++) {
1621 int pin;
1622 /* See if any of the pins is in ExtINT mode */
1623 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1624 struct IO_APIC_route_entry entry;
1625 entry = ioapic_read_entry(apic, pin);
1626
1627 /* If the interrupt line is enabled and in ExtInt mode
1628 * I have found the pin where the i8259 is connected.
1629 */
1630 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1631 ioapic_i8259.apic = apic;
1632 ioapic_i8259.pin = pin;
1633 goto found_i8259;
1634 }
1635 }
1636 }
1637 found_i8259:
1638 /* Look to see what if the MP table has reported the ExtINT */
1639 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1640 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1641 /* Trust the MP table if nothing is setup in the hardware */
1642 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1643 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1644 ioapic_i8259.pin = i8259_pin;
1645 ioapic_i8259.apic = i8259_apic;
1646 }
1647 /* Complain if the MP table and the hardware disagree */
1648 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1649 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1650 {
1651 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1652 }
1653
1654 /*
1655 * Do not trust the IO-APIC being empty at bootup
1656 */
1657 clear_IO_APIC();
1658 }
1659
1660 /*
1661 * Not an __init, needed by the reboot code
1662 */
1663 void disable_IO_APIC(void)
1664 {
1665 /*
1666 * Clear the IO-APIC before rebooting:
1667 */
1668 clear_IO_APIC();
1669
1670 /*
1671 * If the i8259 is routed through an IOAPIC
1672 * Put that IOAPIC in virtual wire mode
1673 * so legacy interrupts can be delivered.
1674 */
1675 if (ioapic_i8259.pin != -1) {
1676 struct IO_APIC_route_entry entry;
1677
1678 memset(&entry, 0, sizeof(entry));
1679 entry.mask = 0; /* Enabled */
1680 entry.trigger = 0; /* Edge */
1681 entry.irr = 0;
1682 entry.polarity = 0; /* High */
1683 entry.delivery_status = 0;
1684 entry.dest_mode = 0; /* Physical */
1685 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1686 entry.vector = 0;
1687 entry.dest = read_apic_id();
1688
1689 /*
1690 * Add it to the IO-APIC irq-routing table:
1691 */
1692 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1693 }
1694
1695 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1696 }
1697
1698 /*
1699 * There is a nasty bug in some older SMP boards, their mptable lies
1700 * about the timer IRQ. We do the following to work around the situation:
1701 *
1702 * - timer IRQ defaults to IO-APIC IRQ
1703 * - if this function detects that timer IRQs are defunct, then we fall
1704 * back to ISA timer IRQs
1705 */
1706 static int __init timer_irq_works(void)
1707 {
1708 unsigned long t1 = jiffies;
1709 unsigned long flags;
1710
1711 local_save_flags(flags);
1712 local_irq_enable();
1713 /* Let ten ticks pass... */
1714 mdelay((10 * 1000) / HZ);
1715 local_irq_restore(flags);
1716
1717 /*
1718 * Expect a few ticks at least, to be sure some possible
1719 * glue logic does not lock up after one or two first
1720 * ticks in a non-ExtINT mode. Also the local APIC
1721 * might have cached one ExtINT interrupt. Finally, at
1722 * least one tick may be lost due to delays.
1723 */
1724
1725 /* jiffies wrap? */
1726 if (time_after(jiffies, t1 + 4))
1727 return 1;
1728 return 0;
1729 }
1730
1731 /*
1732 * In the SMP+IOAPIC case it might happen that there are an unspecified
1733 * number of pending IRQ events unhandled. These cases are very rare,
1734 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1735 * better to do it this way as thus we do not have to be aware of
1736 * 'pending' interrupts in the IRQ path, except at this point.
1737 */
1738 /*
1739 * Edge triggered needs to resend any interrupt
1740 * that was delayed but this is now handled in the device
1741 * independent code.
1742 */
1743
1744 /*
1745 * Starting up a edge-triggered IO-APIC interrupt is
1746 * nasty - we need to make sure that we get the edge.
1747 * If it is already asserted for some reason, we need
1748 * return 1 to indicate that is was pending.
1749 *
1750 * This is not complete - we should be able to fake
1751 * an edge even if it isn't on the 8259A...
1752 */
1753
1754 static unsigned int startup_ioapic_irq(unsigned int irq)
1755 {
1756 int was_pending = 0;
1757 unsigned long flags;
1758
1759 spin_lock_irqsave(&ioapic_lock, flags);
1760 if (irq < 16) {
1761 disable_8259A_irq(irq);
1762 if (i8259A_irq_pending(irq))
1763 was_pending = 1;
1764 }
1765 __unmask_IO_APIC_irq(irq);
1766 spin_unlock_irqrestore(&ioapic_lock, flags);
1767
1768 return was_pending;
1769 }
1770
1771 static int ioapic_retrigger_irq(unsigned int irq)
1772 {
1773 struct irq_cfg *cfg = irq_cfg(irq);
1774 unsigned long flags;
1775
1776 spin_lock_irqsave(&vector_lock, flags);
1777 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
1778 spin_unlock_irqrestore(&vector_lock, flags);
1779
1780 return 1;
1781 }
1782
1783 /*
1784 * Level and edge triggered IO-APIC interrupts need different handling,
1785 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1786 * handled with the level-triggered descriptor, but that one has slightly
1787 * more overhead. Level-triggered interrupts cannot be handled with the
1788 * edge-triggered handler, without risking IRQ storms and other ugly
1789 * races.
1790 */
1791
1792 #ifdef CONFIG_SMP
1793
1794 #ifdef CONFIG_INTR_REMAP
1795 static void ir_irq_migration(struct work_struct *work);
1796
1797 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
1798
1799 /*
1800 * Migrate the IO-APIC irq in the presence of intr-remapping.
1801 *
1802 * For edge triggered, irq migration is a simple atomic update(of vector
1803 * and cpu destination) of IRTE and flush the hardware cache.
1804 *
1805 * For level triggered, we need to modify the io-apic RTE aswell with the update
1806 * vector information, along with modifying IRTE with vector and destination.
1807 * So irq migration for level triggered is little bit more complex compared to
1808 * edge triggered migration. But the good news is, we use the same algorithm
1809 * for level triggered migration as we have today, only difference being,
1810 * we now initiate the irq migration from process context instead of the
1811 * interrupt context.
1812 *
1813 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1814 * suppression) to the IO-APIC, level triggered irq migration will also be
1815 * as simple as edge triggered migration and we can do the irq migration
1816 * with a simple atomic update to IO-APIC RTE.
1817 */
1818 static void migrate_ioapic_irq(int irq, cpumask_t mask)
1819 {
1820 struct irq_cfg *cfg;
1821 struct irq_desc *desc;
1822 cpumask_t tmp, cleanup_mask;
1823 struct irte irte;
1824 int modify_ioapic_rte;
1825 unsigned int dest;
1826 unsigned long flags;
1827
1828 cpus_and(tmp, mask, cpu_online_map);
1829 if (cpus_empty(tmp))
1830 return;
1831
1832 if (get_irte(irq, &irte))
1833 return;
1834
1835 if (assign_irq_vector(irq, mask))
1836 return;
1837
1838 cfg = irq_cfg(irq);
1839 cpus_and(tmp, cfg->domain, mask);
1840 dest = cpu_mask_to_apicid(tmp);
1841
1842 desc = irq_to_desc(irq);
1843 modify_ioapic_rte = desc->status & IRQ_LEVEL;
1844 if (modify_ioapic_rte) {
1845 spin_lock_irqsave(&ioapic_lock, flags);
1846 __target_IO_APIC_irq(irq, dest, cfg->vector);
1847 spin_unlock_irqrestore(&ioapic_lock, flags);
1848 }
1849
1850 irte.vector = cfg->vector;
1851 irte.dest_id = IRTE_DEST(dest);
1852
1853 /*
1854 * Modified the IRTE and flushes the Interrupt entry cache.
1855 */
1856 modify_irte(irq, &irte);
1857
1858 if (cfg->move_in_progress) {
1859 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1860 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1861 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1862 cfg->move_in_progress = 0;
1863 }
1864
1865 desc->affinity = mask;
1866 }
1867
1868 static int migrate_irq_remapped_level(int irq)
1869 {
1870 int ret = -1;
1871 struct irq_desc *desc = irq_to_desc(irq);
1872
1873 mask_IO_APIC_irq(irq);
1874
1875 if (io_apic_level_ack_pending(irq)) {
1876 /*
1877 * Interrupt in progress. Migrating irq now will change the
1878 * vector information in the IO-APIC RTE and that will confuse
1879 * the EOI broadcast performed by cpu.
1880 * So, delay the irq migration to the next instance.
1881 */
1882 schedule_delayed_work(&ir_migration_work, 1);
1883 goto unmask;
1884 }
1885
1886 /* everthing is clear. we have right of way */
1887 migrate_ioapic_irq(irq, desc->pending_mask);
1888
1889 ret = 0;
1890 desc->status &= ~IRQ_MOVE_PENDING;
1891 cpus_clear(desc->pending_mask);
1892
1893 unmask:
1894 unmask_IO_APIC_irq(irq);
1895 return ret;
1896 }
1897
1898 static void ir_irq_migration(struct work_struct *work)
1899 {
1900 unsigned int irq;
1901 struct irq_desc *desc;
1902
1903 for_each_irq_desc(irq, desc) {
1904 if (desc->status & IRQ_MOVE_PENDING) {
1905 unsigned long flags;
1906
1907 spin_lock_irqsave(&desc->lock, flags);
1908 if (!desc->chip->set_affinity ||
1909 !(desc->status & IRQ_MOVE_PENDING)) {
1910 desc->status &= ~IRQ_MOVE_PENDING;
1911 spin_unlock_irqrestore(&desc->lock, flags);
1912 continue;
1913 }
1914
1915 desc->chip->set_affinity(irq, desc->pending_mask);
1916 spin_unlock_irqrestore(&desc->lock, flags);
1917 }
1918 }
1919 }
1920
1921 /*
1922 * Migrates the IRQ destination in the process context.
1923 */
1924 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1925 {
1926 struct irq_desc *desc = irq_to_desc(irq);
1927
1928 if (desc->status & IRQ_LEVEL) {
1929 desc->status |= IRQ_MOVE_PENDING;
1930 desc->pending_mask = mask;
1931 migrate_irq_remapped_level(irq);
1932 return;
1933 }
1934
1935 migrate_ioapic_irq(irq, mask);
1936 }
1937 #endif
1938
1939 asmlinkage void smp_irq_move_cleanup_interrupt(void)
1940 {
1941 unsigned vector, me;
1942 ack_APIC_irq();
1943 exit_idle();
1944 irq_enter();
1945
1946 me = smp_processor_id();
1947 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1948 unsigned int irq;
1949 struct irq_desc *desc;
1950 struct irq_cfg *cfg;
1951 irq = __get_cpu_var(vector_irq)[vector];
1952
1953 desc = irq_to_desc(irq);
1954 if (!desc)
1955 continue;
1956
1957 cfg = irq_cfg(irq);
1958 spin_lock(&desc->lock);
1959 if (!cfg->move_cleanup_count)
1960 goto unlock;
1961
1962 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1963 goto unlock;
1964
1965 __get_cpu_var(vector_irq)[vector] = -1;
1966 cfg->move_cleanup_count--;
1967 unlock:
1968 spin_unlock(&desc->lock);
1969 }
1970
1971 irq_exit();
1972 }
1973
1974 static void irq_complete_move(unsigned int irq)
1975 {
1976 struct irq_cfg *cfg = irq_cfg(irq);
1977 unsigned vector, me;
1978
1979 if (likely(!cfg->move_in_progress))
1980 return;
1981
1982 vector = ~get_irq_regs()->orig_ax;
1983 me = smp_processor_id();
1984 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
1985 cpumask_t cleanup_mask;
1986
1987 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1988 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1989 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1990 cfg->move_in_progress = 0;
1991 }
1992 }
1993 #else
1994 static inline void irq_complete_move(unsigned int irq) {}
1995 #endif
1996 #ifdef CONFIG_INTR_REMAP
1997 static void ack_x2apic_level(unsigned int irq)
1998 {
1999 ack_x2APIC_irq();
2000 }
2001
2002 static void ack_x2apic_edge(unsigned int irq)
2003 {
2004 ack_x2APIC_irq();
2005 }
2006 #endif
2007
2008 static void ack_apic_edge(unsigned int irq)
2009 {
2010 irq_complete_move(irq);
2011 move_native_irq(irq);
2012 ack_APIC_irq();
2013 }
2014
2015 static void ack_apic_level(unsigned int irq)
2016 {
2017 int do_unmask_irq = 0;
2018
2019 irq_complete_move(irq);
2020 #ifdef CONFIG_GENERIC_PENDING_IRQ
2021 /* If we are moving the irq we need to mask it */
2022 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2023 do_unmask_irq = 1;
2024 mask_IO_APIC_irq(irq);
2025 }
2026 #endif
2027
2028 /*
2029 * We must acknowledge the irq before we move it or the acknowledge will
2030 * not propagate properly.
2031 */
2032 ack_APIC_irq();
2033
2034 /* Now we can move and renable the irq */
2035 if (unlikely(do_unmask_irq)) {
2036 /* Only migrate the irq if the ack has been received.
2037 *
2038 * On rare occasions the broadcast level triggered ack gets
2039 * delayed going to ioapics, and if we reprogram the
2040 * vector while Remote IRR is still set the irq will never
2041 * fire again.
2042 *
2043 * To prevent this scenario we read the Remote IRR bit
2044 * of the ioapic. This has two effects.
2045 * - On any sane system the read of the ioapic will
2046 * flush writes (and acks) going to the ioapic from
2047 * this cpu.
2048 * - We get to see if the ACK has actually been delivered.
2049 *
2050 * Based on failed experiments of reprogramming the
2051 * ioapic entry from outside of irq context starting
2052 * with masking the ioapic entry and then polling until
2053 * Remote IRR was clear before reprogramming the
2054 * ioapic I don't trust the Remote IRR bit to be
2055 * completey accurate.
2056 *
2057 * However there appears to be no other way to plug
2058 * this race, so if the Remote IRR bit is not
2059 * accurate and is causing problems then it is a hardware bug
2060 * and you can go talk to the chipset vendor about it.
2061 */
2062 if (!io_apic_level_ack_pending(irq))
2063 move_masked_irq(irq);
2064 unmask_IO_APIC_irq(irq);
2065 }
2066 }
2067
2068 static struct irq_chip ioapic_chip __read_mostly = {
2069 .name = "IO-APIC",
2070 .startup = startup_ioapic_irq,
2071 .mask = mask_IO_APIC_irq,
2072 .unmask = unmask_IO_APIC_irq,
2073 .ack = ack_apic_edge,
2074 .eoi = ack_apic_level,
2075 #ifdef CONFIG_SMP
2076 .set_affinity = set_ioapic_affinity_irq,
2077 #endif
2078 .retrigger = ioapic_retrigger_irq,
2079 };
2080
2081 #ifdef CONFIG_INTR_REMAP
2082 static struct irq_chip ir_ioapic_chip __read_mostly = {
2083 .name = "IR-IO-APIC",
2084 .startup = startup_ioapic_irq,
2085 .mask = mask_IO_APIC_irq,
2086 .unmask = unmask_IO_APIC_irq,
2087 .ack = ack_x2apic_edge,
2088 .eoi = ack_x2apic_level,
2089 #ifdef CONFIG_SMP
2090 .set_affinity = set_ir_ioapic_affinity_irq,
2091 #endif
2092 .retrigger = ioapic_retrigger_irq,
2093 };
2094 #endif
2095
2096 static inline void init_IO_APIC_traps(void)
2097 {
2098 int irq;
2099 struct irq_desc *desc;
2100 struct irq_cfg *cfg;
2101
2102 /*
2103 * NOTE! The local APIC isn't very good at handling
2104 * multiple interrupts at the same interrupt level.
2105 * As the interrupt level is determined by taking the
2106 * vector number and shifting that right by 4, we
2107 * want to spread these out a bit so that they don't
2108 * all fall in the same interrupt level.
2109 *
2110 * Also, we've got to be careful not to trash gate
2111 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2112 */
2113 for_each_irq_cfg(cfg) {
2114 irq = cfg->irq;
2115 if (IO_APIC_IRQ(irq) && !cfg->vector) {
2116 /*
2117 * Hmm.. We don't have an entry for this,
2118 * so default to an old-fashioned 8259
2119 * interrupt if we can..
2120 */
2121 if (irq < 16)
2122 make_8259A_irq(irq);
2123 else {
2124 desc = irq_to_desc(irq);
2125 /* Strange. Oh, well.. */
2126 desc->chip = &no_irq_chip;
2127 }
2128 }
2129 }
2130 }
2131
2132 static void unmask_lapic_irq(unsigned int irq)
2133 {
2134 unsigned long v;
2135
2136 v = apic_read(APIC_LVT0);
2137 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2138 }
2139
2140 static void mask_lapic_irq(unsigned int irq)
2141 {
2142 unsigned long v;
2143
2144 v = apic_read(APIC_LVT0);
2145 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2146 }
2147
2148 static void ack_lapic_irq (unsigned int irq)
2149 {
2150 ack_APIC_irq();
2151 }
2152
2153 static struct irq_chip lapic_chip __read_mostly = {
2154 .name = "local-APIC",
2155 .mask = mask_lapic_irq,
2156 .unmask = unmask_lapic_irq,
2157 .ack = ack_lapic_irq,
2158 };
2159
2160 static void lapic_register_intr(int irq)
2161 {
2162 struct irq_desc *desc;
2163
2164 desc = irq_to_desc(irq);
2165 desc->status &= ~IRQ_LEVEL;
2166 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2167 "edge");
2168 }
2169
2170 static void __init setup_nmi(void)
2171 {
2172 /*
2173 * Dirty trick to enable the NMI watchdog ...
2174 * We put the 8259A master into AEOI mode and
2175 * unmask on all local APICs LVT0 as NMI.
2176 *
2177 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2178 * is from Maciej W. Rozycki - so we do not have to EOI from
2179 * the NMI handler or the timer interrupt.
2180 */
2181 printk(KERN_INFO "activating NMI Watchdog ...");
2182
2183 enable_NMI_through_LVT0();
2184
2185 printk(" done.\n");
2186 }
2187
2188 /*
2189 * This looks a bit hackish but it's about the only one way of sending
2190 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2191 * not support the ExtINT mode, unfortunately. We need to send these
2192 * cycles as some i82489DX-based boards have glue logic that keeps the
2193 * 8259A interrupt line asserted until INTA. --macro
2194 */
2195 static inline void __init unlock_ExtINT_logic(void)
2196 {
2197 int apic, pin, i;
2198 struct IO_APIC_route_entry entry0, entry1;
2199 unsigned char save_control, save_freq_select;
2200
2201 pin = find_isa_irq_pin(8, mp_INT);
2202 apic = find_isa_irq_apic(8, mp_INT);
2203 if (pin == -1)
2204 return;
2205
2206 entry0 = ioapic_read_entry(apic, pin);
2207
2208 clear_IO_APIC_pin(apic, pin);
2209
2210 memset(&entry1, 0, sizeof(entry1));
2211
2212 entry1.dest_mode = 0; /* physical delivery */
2213 entry1.mask = 0; /* unmask IRQ now */
2214 entry1.dest = hard_smp_processor_id();
2215 entry1.delivery_mode = dest_ExtINT;
2216 entry1.polarity = entry0.polarity;
2217 entry1.trigger = 0;
2218 entry1.vector = 0;
2219
2220 ioapic_write_entry(apic, pin, entry1);
2221
2222 save_control = CMOS_READ(RTC_CONTROL);
2223 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2224 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2225 RTC_FREQ_SELECT);
2226 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2227
2228 i = 100;
2229 while (i-- > 0) {
2230 mdelay(10);
2231 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2232 i -= 10;
2233 }
2234
2235 CMOS_WRITE(save_control, RTC_CONTROL);
2236 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2237 clear_IO_APIC_pin(apic, pin);
2238
2239 ioapic_write_entry(apic, pin, entry0);
2240 }
2241
2242 /*
2243 * This code may look a bit paranoid, but it's supposed to cooperate with
2244 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2245 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2246 * fanatically on his truly buggy board.
2247 *
2248 * FIXME: really need to revamp this for modern platforms only.
2249 */
2250 static inline void __init check_timer(void)
2251 {
2252 struct irq_cfg *cfg = irq_cfg(0);
2253 int apic1, pin1, apic2, pin2;
2254 unsigned long flags;
2255 int no_pin1 = 0;
2256
2257 local_irq_save(flags);
2258
2259 /*
2260 * get/set the timer IRQ vector:
2261 */
2262 disable_8259A_irq(0);
2263 assign_irq_vector(0, TARGET_CPUS);
2264
2265 /*
2266 * As IRQ0 is to be enabled in the 8259A, the virtual
2267 * wire has to be disabled in the local APIC.
2268 */
2269 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2270 init_8259A(1);
2271
2272 pin1 = find_isa_irq_pin(0, mp_INT);
2273 apic1 = find_isa_irq_apic(0, mp_INT);
2274 pin2 = ioapic_i8259.pin;
2275 apic2 = ioapic_i8259.apic;
2276
2277 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2278 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2279 cfg->vector, apic1, pin1, apic2, pin2);
2280
2281 /*
2282 * Some BIOS writers are clueless and report the ExtINTA
2283 * I/O APIC input from the cascaded 8259A as the timer
2284 * interrupt input. So just in case, if only one pin
2285 * was found above, try it both directly and through the
2286 * 8259A.
2287 */
2288 if (pin1 == -1) {
2289 if (intr_remapping_enabled)
2290 panic("BIOS bug: timer not connected to IO-APIC");
2291 pin1 = pin2;
2292 apic1 = apic2;
2293 no_pin1 = 1;
2294 } else if (pin2 == -1) {
2295 pin2 = pin1;
2296 apic2 = apic1;
2297 }
2298
2299 if (pin1 != -1) {
2300 /*
2301 * Ok, does IRQ0 through the IOAPIC work?
2302 */
2303 if (no_pin1) {
2304 add_pin_to_irq(0, apic1, pin1);
2305 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2306 }
2307 unmask_IO_APIC_irq(0);
2308 if (!no_timer_check && timer_irq_works()) {
2309 if (nmi_watchdog == NMI_IO_APIC) {
2310 setup_nmi();
2311 enable_8259A_irq(0);
2312 }
2313 if (disable_timer_pin_1 > 0)
2314 clear_IO_APIC_pin(0, pin1);
2315 goto out;
2316 }
2317 if (intr_remapping_enabled)
2318 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2319 clear_IO_APIC_pin(apic1, pin1);
2320 if (!no_pin1)
2321 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2322 "8254 timer not connected to IO-APIC\n");
2323
2324 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2325 "(IRQ0) through the 8259A ...\n");
2326 apic_printk(APIC_QUIET, KERN_INFO
2327 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2328 /*
2329 * legacy devices should be connected to IO APIC #0
2330 */
2331 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2332 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2333 unmask_IO_APIC_irq(0);
2334 enable_8259A_irq(0);
2335 if (timer_irq_works()) {
2336 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2337 timer_through_8259 = 1;
2338 if (nmi_watchdog == NMI_IO_APIC) {
2339 disable_8259A_irq(0);
2340 setup_nmi();
2341 enable_8259A_irq(0);
2342 }
2343 goto out;
2344 }
2345 /*
2346 * Cleanup, just in case ...
2347 */
2348 disable_8259A_irq(0);
2349 clear_IO_APIC_pin(apic2, pin2);
2350 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2351 }
2352
2353 if (nmi_watchdog == NMI_IO_APIC) {
2354 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2355 "through the IO-APIC - disabling NMI Watchdog!\n");
2356 nmi_watchdog = NMI_NONE;
2357 }
2358
2359 apic_printk(APIC_QUIET, KERN_INFO
2360 "...trying to set up timer as Virtual Wire IRQ...\n");
2361
2362 lapic_register_intr(0);
2363 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2364 enable_8259A_irq(0);
2365
2366 if (timer_irq_works()) {
2367 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2368 goto out;
2369 }
2370 disable_8259A_irq(0);
2371 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2372 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2373
2374 apic_printk(APIC_QUIET, KERN_INFO
2375 "...trying to set up timer as ExtINT IRQ...\n");
2376
2377 init_8259A(0);
2378 make_8259A_irq(0);
2379 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2380
2381 unlock_ExtINT_logic();
2382
2383 if (timer_irq_works()) {
2384 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2385 goto out;
2386 }
2387 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2388 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2389 "report. Then try booting with the 'noapic' option.\n");
2390 out:
2391 local_irq_restore(flags);
2392 }
2393
2394 static int __init notimercheck(char *s)
2395 {
2396 no_timer_check = 1;
2397 return 1;
2398 }
2399 __setup("no_timer_check", notimercheck);
2400
2401 /*
2402 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2403 * to devices. However there may be an I/O APIC pin available for
2404 * this interrupt regardless. The pin may be left unconnected, but
2405 * typically it will be reused as an ExtINT cascade interrupt for
2406 * the master 8259A. In the MPS case such a pin will normally be
2407 * reported as an ExtINT interrupt in the MP table. With ACPI
2408 * there is no provision for ExtINT interrupts, and in the absence
2409 * of an override it would be treated as an ordinary ISA I/O APIC
2410 * interrupt, that is edge-triggered and unmasked by default. We
2411 * used to do this, but it caused problems on some systems because
2412 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2413 * the same ExtINT cascade interrupt to drive the local APIC of the
2414 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2415 * the I/O APIC in all cases now. No actual device should request
2416 * it anyway. --macro
2417 */
2418 #define PIC_IRQS (1<<2)
2419
2420 void __init setup_IO_APIC(void)
2421 {
2422
2423 /*
2424 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2425 */
2426
2427 io_apic_irqs = ~PIC_IRQS;
2428
2429 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2430
2431 sync_Arb_IDs();
2432 setup_IO_APIC_irqs();
2433 init_IO_APIC_traps();
2434 check_timer();
2435 }
2436
2437 struct sysfs_ioapic_data {
2438 struct sys_device dev;
2439 struct IO_APIC_route_entry entry[0];
2440 };
2441 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2442
2443 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2444 {
2445 struct IO_APIC_route_entry *entry;
2446 struct sysfs_ioapic_data *data;
2447 int i;
2448
2449 data = container_of(dev, struct sysfs_ioapic_data, dev);
2450 entry = data->entry;
2451 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2452 *entry = ioapic_read_entry(dev->id, i);
2453
2454 return 0;
2455 }
2456
2457 static int ioapic_resume(struct sys_device *dev)
2458 {
2459 struct IO_APIC_route_entry *entry;
2460 struct sysfs_ioapic_data *data;
2461 unsigned long flags;
2462 union IO_APIC_reg_00 reg_00;
2463 int i;
2464
2465 data = container_of(dev, struct sysfs_ioapic_data, dev);
2466 entry = data->entry;
2467
2468 spin_lock_irqsave(&ioapic_lock, flags);
2469 reg_00.raw = io_apic_read(dev->id, 0);
2470 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2471 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2472 io_apic_write(dev->id, 0, reg_00.raw);
2473 }
2474 spin_unlock_irqrestore(&ioapic_lock, flags);
2475 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2476 ioapic_write_entry(dev->id, i, entry[i]);
2477
2478 return 0;
2479 }
2480
2481 static struct sysdev_class ioapic_sysdev_class = {
2482 .name = "ioapic",
2483 .suspend = ioapic_suspend,
2484 .resume = ioapic_resume,
2485 };
2486
2487 static int __init ioapic_init_sysfs(void)
2488 {
2489 struct sys_device * dev;
2490 int i, size, error;
2491
2492 error = sysdev_class_register(&ioapic_sysdev_class);
2493 if (error)
2494 return error;
2495
2496 for (i = 0; i < nr_ioapics; i++ ) {
2497 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2498 * sizeof(struct IO_APIC_route_entry);
2499 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2500 if (!mp_ioapic_data[i]) {
2501 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2502 continue;
2503 }
2504 dev = &mp_ioapic_data[i]->dev;
2505 dev->id = i;
2506 dev->cls = &ioapic_sysdev_class;
2507 error = sysdev_register(dev);
2508 if (error) {
2509 kfree(mp_ioapic_data[i]);
2510 mp_ioapic_data[i] = NULL;
2511 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2512 continue;
2513 }
2514 }
2515
2516 return 0;
2517 }
2518
2519 device_initcall(ioapic_init_sysfs);
2520
2521 /*
2522 * Dynamic irq allocate and deallocation
2523 */
2524 unsigned int create_irq_nr(unsigned int irq_want)
2525 {
2526 /* Allocate an unused irq */
2527 unsigned int irq;
2528 unsigned int new;
2529 unsigned long flags;
2530 struct irq_cfg *cfg_new;
2531
2532 #ifndef CONFIG_HAVE_SPARSE_IRQ
2533 irq_want = nr_irqs - 1;
2534 #endif
2535
2536 irq = 0;
2537 spin_lock_irqsave(&vector_lock, flags);
2538 for (new = irq_want; new > 0; new--) {
2539 if (platform_legacy_irq(new))
2540 continue;
2541 cfg_new = irq_cfg(new);
2542 if (cfg_new && cfg_new->vector != 0)
2543 continue;
2544 /* check if need to create one */
2545 if (!cfg_new)
2546 cfg_new = irq_cfg_alloc(new);
2547 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
2548 irq = new;
2549 break;
2550 }
2551 spin_unlock_irqrestore(&vector_lock, flags);
2552
2553 if (irq > 0) {
2554 dynamic_irq_init(irq);
2555 }
2556 return irq;
2557 }
2558
2559 int create_irq(void)
2560 {
2561 int irq;
2562
2563 irq = create_irq_nr(nr_irqs - 1);
2564
2565 if (irq == 0)
2566 irq = -1;
2567
2568 return irq;
2569 }
2570
2571 void destroy_irq(unsigned int irq)
2572 {
2573 unsigned long flags;
2574
2575 dynamic_irq_cleanup(irq);
2576
2577 #ifdef CONFIG_INTR_REMAP
2578 free_irte(irq);
2579 #endif
2580 spin_lock_irqsave(&vector_lock, flags);
2581 __clear_irq_vector(irq);
2582 spin_unlock_irqrestore(&vector_lock, flags);
2583 }
2584
2585 /*
2586 * MSI message composition
2587 */
2588 #ifdef CONFIG_PCI_MSI
2589 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2590 {
2591 struct irq_cfg *cfg;
2592 int err;
2593 unsigned dest;
2594 cpumask_t tmp;
2595
2596 tmp = TARGET_CPUS;
2597 err = assign_irq_vector(irq, tmp);
2598 if (err)
2599 return err;
2600
2601 cfg = irq_cfg(irq);
2602 cpus_and(tmp, cfg->domain, tmp);
2603 dest = cpu_mask_to_apicid(tmp);
2604
2605 #ifdef CONFIG_INTR_REMAP
2606 if (irq_remapped(irq)) {
2607 struct irte irte;
2608 int ir_index;
2609 u16 sub_handle;
2610
2611 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
2612 BUG_ON(ir_index == -1);
2613
2614 memset (&irte, 0, sizeof(irte));
2615
2616 irte.present = 1;
2617 irte.dst_mode = INT_DEST_MODE;
2618 irte.trigger_mode = 0; /* edge */
2619 irte.dlvry_mode = INT_DELIVERY_MODE;
2620 irte.vector = cfg->vector;
2621 irte.dest_id = IRTE_DEST(dest);
2622
2623 modify_irte(irq, &irte);
2624
2625 msg->address_hi = MSI_ADDR_BASE_HI;
2626 msg->data = sub_handle;
2627 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
2628 MSI_ADDR_IR_SHV |
2629 MSI_ADDR_IR_INDEX1(ir_index) |
2630 MSI_ADDR_IR_INDEX2(ir_index);
2631 } else
2632 #endif
2633 {
2634 msg->address_hi = MSI_ADDR_BASE_HI;
2635 msg->address_lo =
2636 MSI_ADDR_BASE_LO |
2637 ((INT_DEST_MODE == 0) ?
2638 MSI_ADDR_DEST_MODE_PHYSICAL:
2639 MSI_ADDR_DEST_MODE_LOGICAL) |
2640 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2641 MSI_ADDR_REDIRECTION_CPU:
2642 MSI_ADDR_REDIRECTION_LOWPRI) |
2643 MSI_ADDR_DEST_ID(dest);
2644
2645 msg->data =
2646 MSI_DATA_TRIGGER_EDGE |
2647 MSI_DATA_LEVEL_ASSERT |
2648 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2649 MSI_DATA_DELIVERY_FIXED:
2650 MSI_DATA_DELIVERY_LOWPRI) |
2651 MSI_DATA_VECTOR(cfg->vector);
2652 }
2653 return err;
2654 }
2655
2656 #ifdef CONFIG_SMP
2657 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2658 {
2659 struct irq_cfg *cfg;
2660 struct msi_msg msg;
2661 unsigned int dest;
2662 cpumask_t tmp;
2663 struct irq_desc *desc;
2664
2665 cpus_and(tmp, mask, cpu_online_map);
2666 if (cpus_empty(tmp))
2667 return;
2668
2669 if (assign_irq_vector(irq, mask))
2670 return;
2671
2672 cfg = irq_cfg(irq);
2673 cpus_and(tmp, cfg->domain, mask);
2674 dest = cpu_mask_to_apicid(tmp);
2675
2676 read_msi_msg(irq, &msg);
2677
2678 msg.data &= ~MSI_DATA_VECTOR_MASK;
2679 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2680 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2681 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2682
2683 write_msi_msg(irq, &msg);
2684 desc = irq_to_desc(irq);
2685 desc->affinity = mask;
2686 }
2687
2688 #ifdef CONFIG_INTR_REMAP
2689 /*
2690 * Migrate the MSI irq to another cpumask. This migration is
2691 * done in the process context using interrupt-remapping hardware.
2692 */
2693 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2694 {
2695 struct irq_cfg *cfg;
2696 unsigned int dest;
2697 cpumask_t tmp, cleanup_mask;
2698 struct irte irte;
2699 struct irq_desc *desc;
2700
2701 cpus_and(tmp, mask, cpu_online_map);
2702 if (cpus_empty(tmp))
2703 return;
2704
2705 if (get_irte(irq, &irte))
2706 return;
2707
2708 if (assign_irq_vector(irq, mask))
2709 return;
2710
2711 cfg = irq_cfg(irq);
2712 cpus_and(tmp, cfg->domain, mask);
2713 dest = cpu_mask_to_apicid(tmp);
2714
2715 irte.vector = cfg->vector;
2716 irte.dest_id = IRTE_DEST(dest);
2717
2718 /*
2719 * atomically update the IRTE with the new destination and vector.
2720 */
2721 modify_irte(irq, &irte);
2722
2723 /*
2724 * After this point, all the interrupts will start arriving
2725 * at the new destination. So, time to cleanup the previous
2726 * vector allocation.
2727 */
2728 if (cfg->move_in_progress) {
2729 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2730 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2731 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2732 cfg->move_in_progress = 0;
2733 }
2734
2735 desc = irq_to_desc(irq);
2736 desc->affinity = mask;
2737 }
2738 #endif
2739 #endif /* CONFIG_SMP */
2740
2741 /*
2742 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2743 * which implement the MSI or MSI-X Capability Structure.
2744 */
2745 static struct irq_chip msi_chip = {
2746 .name = "PCI-MSI",
2747 .unmask = unmask_msi_irq,
2748 .mask = mask_msi_irq,
2749 .ack = ack_apic_edge,
2750 #ifdef CONFIG_SMP
2751 .set_affinity = set_msi_irq_affinity,
2752 #endif
2753 .retrigger = ioapic_retrigger_irq,
2754 };
2755
2756 #ifdef CONFIG_INTR_REMAP
2757 static struct irq_chip msi_ir_chip = {
2758 .name = "IR-PCI-MSI",
2759 .unmask = unmask_msi_irq,
2760 .mask = mask_msi_irq,
2761 .ack = ack_x2apic_edge,
2762 #ifdef CONFIG_SMP
2763 .set_affinity = ir_set_msi_irq_affinity,
2764 #endif
2765 .retrigger = ioapic_retrigger_irq,
2766 };
2767
2768 /*
2769 * Map the PCI dev to the corresponding remapping hardware unit
2770 * and allocate 'nvec' consecutive interrupt-remapping table entries
2771 * in it.
2772 */
2773 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
2774 {
2775 struct intel_iommu *iommu;
2776 int index;
2777
2778 iommu = map_dev_to_ir(dev);
2779 if (!iommu) {
2780 printk(KERN_ERR
2781 "Unable to map PCI %s to iommu\n", pci_name(dev));
2782 return -ENOENT;
2783 }
2784
2785 index = alloc_irte(iommu, irq, nvec);
2786 if (index < 0) {
2787 printk(KERN_ERR
2788 "Unable to allocate %d IRTE for PCI %s\n", nvec,
2789 pci_name(dev));
2790 return -ENOSPC;
2791 }
2792 return index;
2793 }
2794 #endif
2795
2796 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2797 {
2798 int ret;
2799 struct msi_msg msg;
2800
2801 ret = msi_compose_msg(dev, irq, &msg);
2802 if (ret < 0)
2803 return ret;
2804
2805 set_irq_msi(irq, desc);
2806 write_msi_msg(irq, &msg);
2807
2808 #ifdef CONFIG_INTR_REMAP
2809 if (irq_remapped(irq)) {
2810 struct irq_desc *desc = irq_to_desc(irq);
2811 /*
2812 * irq migration in process context
2813 */
2814 desc->status |= IRQ_MOVE_PCNTXT;
2815 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
2816 } else
2817 #endif
2818 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2819
2820 return 0;
2821 }
2822
2823 static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
2824 {
2825 unsigned int irq;
2826
2827 irq = dev->bus->number;
2828 irq <<= 8;
2829 irq |= dev->devfn;
2830 irq <<= 12;
2831
2832 return irq;
2833 }
2834
2835 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2836 {
2837 unsigned int irq;
2838 int ret;
2839 unsigned int irq_want;
2840
2841 irq_want = build_irq_for_pci_dev(dev) + 0x100;
2842
2843 irq = create_irq_nr(irq_want);
2844 if (irq == 0)
2845 return -1;
2846
2847 #ifdef CONFIG_INTR_REMAP
2848 if (!intr_remapping_enabled)
2849 goto no_ir;
2850
2851 ret = msi_alloc_irte(dev, irq, 1);
2852 if (ret < 0)
2853 goto error;
2854 no_ir:
2855 #endif
2856 ret = setup_msi_irq(dev, desc, irq);
2857 if (ret < 0) {
2858 destroy_irq(irq);
2859 return ret;
2860 }
2861 return 0;
2862
2863 #ifdef CONFIG_INTR_REMAP
2864 error:
2865 destroy_irq(irq);
2866 return ret;
2867 #endif
2868 }
2869
2870 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
2871 {
2872 unsigned int irq;
2873 int ret, sub_handle;
2874 struct msi_desc *desc;
2875 unsigned int irq_want;
2876
2877 #ifdef CONFIG_INTR_REMAP
2878 struct intel_iommu *iommu = 0;
2879 int index = 0;
2880 #endif
2881
2882 irq_want = build_irq_for_pci_dev(dev) + 0x100;
2883 sub_handle = 0;
2884 list_for_each_entry(desc, &dev->msi_list, list) {
2885 irq = create_irq_nr(irq_want--);
2886 if (irq == 0)
2887 return -1;
2888 #ifdef CONFIG_INTR_REMAP
2889 if (!intr_remapping_enabled)
2890 goto no_ir;
2891
2892 if (!sub_handle) {
2893 /*
2894 * allocate the consecutive block of IRTE's
2895 * for 'nvec'
2896 */
2897 index = msi_alloc_irte(dev, irq, nvec);
2898 if (index < 0) {
2899 ret = index;
2900 goto error;
2901 }
2902 } else {
2903 iommu = map_dev_to_ir(dev);
2904 if (!iommu) {
2905 ret = -ENOENT;
2906 goto error;
2907 }
2908 /*
2909 * setup the mapping between the irq and the IRTE
2910 * base index, the sub_handle pointing to the
2911 * appropriate interrupt remap table entry.
2912 */
2913 set_irte_irq(irq, iommu, index, sub_handle);
2914 }
2915 no_ir:
2916 #endif
2917 ret = setup_msi_irq(dev, desc, irq);
2918 if (ret < 0)
2919 goto error;
2920 sub_handle++;
2921 }
2922 return 0;
2923
2924 error:
2925 destroy_irq(irq);
2926 return ret;
2927 }
2928
2929 void arch_teardown_msi_irq(unsigned int irq)
2930 {
2931 destroy_irq(irq);
2932 }
2933
2934 #ifdef CONFIG_DMAR
2935 #ifdef CONFIG_SMP
2936 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2937 {
2938 struct irq_cfg *cfg;
2939 struct msi_msg msg;
2940 unsigned int dest;
2941 cpumask_t tmp;
2942 struct irq_desc *desc;
2943
2944 cpus_and(tmp, mask, cpu_online_map);
2945 if (cpus_empty(tmp))
2946 return;
2947
2948 if (assign_irq_vector(irq, mask))
2949 return;
2950
2951 cfg = irq_cfg(irq);
2952 cpus_and(tmp, cfg->domain, mask);
2953 dest = cpu_mask_to_apicid(tmp);
2954
2955 dmar_msi_read(irq, &msg);
2956
2957 msg.data &= ~MSI_DATA_VECTOR_MASK;
2958 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2959 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2960 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2961
2962 dmar_msi_write(irq, &msg);
2963 desc = irq_to_desc(irq);
2964 desc->affinity = mask;
2965 }
2966 #endif /* CONFIG_SMP */
2967
2968 struct irq_chip dmar_msi_type = {
2969 .name = "DMAR_MSI",
2970 .unmask = dmar_msi_unmask,
2971 .mask = dmar_msi_mask,
2972 .ack = ack_apic_edge,
2973 #ifdef CONFIG_SMP
2974 .set_affinity = dmar_msi_set_affinity,
2975 #endif
2976 .retrigger = ioapic_retrigger_irq,
2977 };
2978
2979 int arch_setup_dmar_msi(unsigned int irq)
2980 {
2981 int ret;
2982 struct msi_msg msg;
2983
2984 ret = msi_compose_msg(NULL, irq, &msg);
2985 if (ret < 0)
2986 return ret;
2987 dmar_msi_write(irq, &msg);
2988 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2989 "edge");
2990 return 0;
2991 }
2992 #endif
2993
2994 #endif /* CONFIG_PCI_MSI */
2995 /*
2996 * Hypertransport interrupt support
2997 */
2998 #ifdef CONFIG_HT_IRQ
2999
3000 #ifdef CONFIG_SMP
3001
3002 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3003 {
3004 struct ht_irq_msg msg;
3005 fetch_ht_irq_msg(irq, &msg);
3006
3007 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3008 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3009
3010 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3011 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3012
3013 write_ht_irq_msg(irq, &msg);
3014 }
3015
3016 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3017 {
3018 struct irq_cfg *cfg;
3019 unsigned int dest;
3020 cpumask_t tmp;
3021 struct irq_desc *desc;
3022
3023 cpus_and(tmp, mask, cpu_online_map);
3024 if (cpus_empty(tmp))
3025 return;
3026
3027 if (assign_irq_vector(irq, mask))
3028 return;
3029
3030 cfg = irq_cfg(irq);
3031 cpus_and(tmp, cfg->domain, mask);
3032 dest = cpu_mask_to_apicid(tmp);
3033
3034 target_ht_irq(irq, dest, cfg->vector);
3035 desc = irq_to_desc(irq);
3036 desc->affinity = mask;
3037 }
3038 #endif
3039
3040 static struct irq_chip ht_irq_chip = {
3041 .name = "PCI-HT",
3042 .mask = mask_ht_irq,
3043 .unmask = unmask_ht_irq,
3044 .ack = ack_apic_edge,
3045 #ifdef CONFIG_SMP
3046 .set_affinity = set_ht_irq_affinity,
3047 #endif
3048 .retrigger = ioapic_retrigger_irq,
3049 };
3050
3051 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3052 {
3053 struct irq_cfg *cfg;
3054 int err;
3055 cpumask_t tmp;
3056
3057 tmp = TARGET_CPUS;
3058 err = assign_irq_vector(irq, tmp);
3059 if (!err) {
3060 struct ht_irq_msg msg;
3061 unsigned dest;
3062
3063 cfg = irq_cfg(irq);
3064 cpus_and(tmp, cfg->domain, tmp);
3065 dest = cpu_mask_to_apicid(tmp);
3066
3067 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3068
3069 msg.address_lo =
3070 HT_IRQ_LOW_BASE |
3071 HT_IRQ_LOW_DEST_ID(dest) |
3072 HT_IRQ_LOW_VECTOR(cfg->vector) |
3073 ((INT_DEST_MODE == 0) ?
3074 HT_IRQ_LOW_DM_PHYSICAL :
3075 HT_IRQ_LOW_DM_LOGICAL) |
3076 HT_IRQ_LOW_RQEOI_EDGE |
3077 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3078 HT_IRQ_LOW_MT_FIXED :
3079 HT_IRQ_LOW_MT_ARBITRATED) |
3080 HT_IRQ_LOW_IRQ_MASKED;
3081
3082 write_ht_irq_msg(irq, &msg);
3083
3084 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3085 handle_edge_irq, "edge");
3086 }
3087 return err;
3088 }
3089 #endif /* CONFIG_HT_IRQ */
3090
3091 /* --------------------------------------------------------------------------
3092 ACPI-based IOAPIC Configuration
3093 -------------------------------------------------------------------------- */
3094
3095 #ifdef CONFIG_ACPI
3096
3097 #define IO_APIC_MAX_ID 0xFE
3098
3099 int __init io_apic_get_redir_entries (int ioapic)
3100 {
3101 union IO_APIC_reg_01 reg_01;
3102 unsigned long flags;
3103
3104 spin_lock_irqsave(&ioapic_lock, flags);
3105 reg_01.raw = io_apic_read(ioapic, 1);
3106 spin_unlock_irqrestore(&ioapic_lock, flags);
3107
3108 return reg_01.bits.entries;
3109 }
3110
3111
3112 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3113 {
3114 if (!IO_APIC_IRQ(irq)) {
3115 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3116 ioapic);
3117 return -EINVAL;
3118 }
3119
3120 /*
3121 * IRQs < 16 are already in the irq_2_pin[] map
3122 */
3123 if (irq >= 16)
3124 add_pin_to_irq(irq, ioapic, pin);
3125
3126 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
3127
3128 return 0;
3129 }
3130
3131
3132 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3133 {
3134 int i;
3135
3136 if (skip_ioapic_setup)
3137 return -1;
3138
3139 for (i = 0; i < mp_irq_entries; i++)
3140 if (mp_irqs[i].mp_irqtype == mp_INT &&
3141 mp_irqs[i].mp_srcbusirq == bus_irq)
3142 break;
3143 if (i >= mp_irq_entries)
3144 return -1;
3145
3146 *trigger = irq_trigger(i);
3147 *polarity = irq_polarity(i);
3148 return 0;
3149 }
3150
3151 #endif /* CONFIG_ACPI */
3152
3153 /*
3154 * This function currently is only a helper for the i386 smp boot process where
3155 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3156 * so mask in all cases should simply be TARGET_CPUS
3157 */
3158 #ifdef CONFIG_SMP
3159 void __init setup_ioapic_dest(void)
3160 {
3161 int pin, ioapic, irq, irq_entry;
3162 struct irq_cfg *cfg;
3163
3164 if (skip_ioapic_setup == 1)
3165 return;
3166
3167 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3168 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3169 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3170 if (irq_entry == -1)
3171 continue;
3172 irq = pin_2_irq(irq_entry, ioapic, pin);
3173
3174 /* setup_IO_APIC_irqs could fail to get vector for some device
3175 * when you have too many devices, because at that time only boot
3176 * cpu is online.
3177 */
3178 cfg = irq_cfg(irq);
3179 if (!cfg->vector)
3180 setup_IO_APIC_irq(ioapic, pin, irq,
3181 irq_trigger(irq_entry),
3182 irq_polarity(irq_entry));
3183 #ifdef CONFIG_INTR_REMAP
3184 else if (intr_remapping_enabled)
3185 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3186 #endif
3187 else
3188 set_ioapic_affinity_irq(irq, TARGET_CPUS);
3189 }
3190
3191 }
3192 }
3193 #endif
3194
3195 #define IOAPIC_RESOURCE_NAME_SIZE 11
3196
3197 static struct resource *ioapic_resources;
3198
3199 static struct resource * __init ioapic_setup_resources(void)
3200 {
3201 unsigned long n;
3202 struct resource *res;
3203 char *mem;
3204 int i;
3205
3206 if (nr_ioapics <= 0)
3207 return NULL;
3208
3209 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3210 n *= nr_ioapics;
3211
3212 mem = alloc_bootmem(n);
3213 res = (void *)mem;
3214
3215 if (mem != NULL) {
3216 mem += sizeof(struct resource) * nr_ioapics;
3217
3218 for (i = 0; i < nr_ioapics; i++) {
3219 res[i].name = mem;
3220 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3221 sprintf(mem, "IOAPIC %u", i);
3222 mem += IOAPIC_RESOURCE_NAME_SIZE;
3223 }
3224 }
3225
3226 ioapic_resources = res;
3227
3228 return res;
3229 }
3230
3231 void __init ioapic_init_mappings(void)
3232 {
3233 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3234 struct resource *ioapic_res;
3235 int i;
3236
3237 ioapic_res = ioapic_setup_resources();
3238 for (i = 0; i < nr_ioapics; i++) {
3239 if (smp_found_config) {
3240 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3241 } else {
3242 ioapic_phys = (unsigned long)
3243 alloc_bootmem_pages(PAGE_SIZE);
3244 ioapic_phys = __pa(ioapic_phys);
3245 }
3246 set_fixmap_nocache(idx, ioapic_phys);
3247 apic_printk(APIC_VERBOSE,
3248 "mapped IOAPIC to %016lx (%016lx)\n",
3249 __fix_to_virt(idx), ioapic_phys);
3250 idx++;
3251
3252 if (ioapic_res != NULL) {
3253 ioapic_res->start = ioapic_phys;
3254 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3255 ioapic_res++;
3256 }
3257 }
3258 }
3259
3260 static int __init ioapic_insert_resources(void)
3261 {
3262 int i;
3263 struct resource *r = ioapic_resources;
3264
3265 if (!r) {
3266 printk(KERN_ERR
3267 "IO APIC resources could be not be allocated.\n");
3268 return -1;
3269 }
3270
3271 for (i = 0; i < nr_ioapics; i++) {
3272 insert_resource(&iomem_resource, r);
3273 r++;
3274 }
3275
3276 return 0;
3277 }
3278
3279 /* Insert the IO APIC resources after PCI initialization has occured to handle
3280 * IO APICS that are mapped in on a BAR in PCI space. */
3281 late_initcall(ioapic_insert_resources);
3282
This page took 0.103093 seconds and 4 git commands to generate.