858c37a31a2f416beb612cf5d6343bc31a0f37a8
[deliverable/linux.git] / arch / x86 / kernel / io_apic_64.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/acpi.h>
31 #include <linux/sysdev.h>
32 #include <linux/msi.h>
33 #include <linux/htirq.h>
34 #include <linux/dmar.h>
35 #include <linux/jiffies.h>
36 #ifdef CONFIG_ACPI
37 #include <acpi/acpi_bus.h>
38 #endif
39 #include <linux/bootmem.h>
40 #include <linux/dmar.h>
41
42 #include <asm/idle.h>
43 #include <asm/io.h>
44 #include <asm/smp.h>
45 #include <asm/desc.h>
46 #include <asm/proto.h>
47 #include <asm/acpi.h>
48 #include <asm/dma.h>
49 #include <asm/i8259.h>
50 #include <asm/nmi.h>
51 #include <asm/msidef.h>
52 #include <asm/hypertransport.h>
53 #include <asm/irq_remapping.h>
54
55 #include <mach_ipi.h>
56 #include <mach_apic.h>
57
58 #define __apicdebuginit(type) static type __init
59
60 struct irq_cfg;
61
62 struct irq_cfg {
63 unsigned int irq;
64 struct irq_cfg *next;
65 cpumask_t domain;
66 cpumask_t old_domain;
67 unsigned move_cleanup_count;
68 u8 vector;
69 u8 move_in_progress : 1;
70 };
71
72 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
73 static struct irq_cfg irq_cfg_legacy[] __initdata = {
74 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
75 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
76 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
77 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
78 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
79 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
80 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
81 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
82 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
83 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
84 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
85 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
86 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
87 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
88 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
89 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
90 };
91
92 static struct irq_cfg irq_cfg_init = { .irq = -1U, };
93 /* need to be biger than size of irq_cfg_legacy */
94 static int nr_irq_cfg = 32;
95
96 static int __init parse_nr_irq_cfg(char *arg)
97 {
98 if (arg) {
99 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
100 if (nr_irq_cfg < 32)
101 nr_irq_cfg = 32;
102 }
103 return 0;
104 }
105
106 early_param("nr_irq_cfg", parse_nr_irq_cfg);
107
108 static void init_one_irq_cfg(struct irq_cfg *cfg)
109 {
110 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
111 }
112
113 static void __init init_work(void *data)
114 {
115 struct dyn_array *da = data;
116 struct irq_cfg *cfg;
117 int i;
118
119 cfg = *da->name;
120
121 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
122
123 i = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]);
124 for (; i < *da->nr; i++)
125 init_one_irq_cfg(&cfg[i]);
126
127 for (i = 1; i < *da->nr; i++)
128 cfg[i-1].next = &cfg[i];
129 }
130
131 static struct irq_cfg *irq_cfgx;
132 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
133
134 static struct irq_cfg *irq_cfg(unsigned int irq)
135 {
136 struct irq_cfg *cfg;
137
138 BUG_ON(irq == -1U);
139
140 cfg = &irq_cfgx[0];
141 while (cfg) {
142 if (cfg->irq == irq)
143 return cfg;
144
145 if (cfg->irq == -1U)
146 return NULL;
147
148 cfg = cfg->next;
149 }
150
151 return NULL;
152 }
153
154 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
155 {
156 struct irq_cfg *cfg, *cfg_pri;
157 int i;
158 int count = 0;
159
160 BUG_ON(irq == -1U);
161
162 cfg_pri = cfg = &irq_cfgx[0];
163 while (cfg) {
164 if (cfg->irq == irq)
165 return cfg;
166
167 if (cfg->irq == -1U) {
168 cfg->irq = irq;
169 return cfg;
170 }
171 cfg_pri = cfg;
172 cfg = cfg->next;
173 count++;
174 }
175
176 /*
177 * we run out of pre-allocate ones, allocate more
178 */
179 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
180
181 if (after_bootmem)
182 cfg = kzalloc(sizeof(struct irq_cfg)*nr_irq_cfg, GFP_ATOMIC);
183 else
184 cfg = __alloc_bootmem_nopanic(sizeof(struct irq_cfg)*nr_irq_cfg, PAGE_SIZE, 0);
185
186 if (!cfg)
187 panic("please boot with nr_irq_cfg= %d\n", count * 2);
188
189 for (i = 0; i < nr_irq_cfg; i++)
190 init_one_irq_cfg(&cfg[i]);
191
192 for (i = 1; i < nr_irq_cfg; i++)
193 cfg[i-1].next = &cfg[i];
194
195 cfg->irq = irq;
196 cfg_pri->next = cfg;
197
198 return cfg;
199 }
200
201 static int assign_irq_vector(int irq, cpumask_t mask);
202
203 int first_system_vector = 0xfe;
204
205 char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
206
207 int sis_apic_bug; /* not actually supported, dummy for compile */
208
209 static int no_timer_check;
210
211 static int disable_timer_pin_1 __initdata;
212
213 int timer_through_8259 __initdata;
214
215 /* Where if anywhere is the i8259 connect in external int mode */
216 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
217
218 static DEFINE_SPINLOCK(ioapic_lock);
219 static DEFINE_SPINLOCK(vector_lock);
220
221 /*
222 * # of IRQ routing registers
223 */
224 int nr_ioapic_registers[MAX_IO_APICS];
225
226 /* I/O APIC RTE contents at the OS boot up */
227 struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
228
229 /* I/O APIC entries */
230 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
231 int nr_ioapics;
232
233 /* MP IRQ source entries */
234 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
235
236 /* # of MP IRQ source entries */
237 int mp_irq_entries;
238
239 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
240
241 /*
242 * Rough estimation of how many shared IRQs there are, can
243 * be changed anytime.
244 */
245
246 int pin_map_size;
247
248 /*
249 * This is performance-critical, we want to do it O(1)
250 *
251 * the indexing order of this array favors 1:1 mappings
252 * between pins and IRQs.
253 */
254
255 static struct irq_pin_list {
256 short apic, pin;
257 int next;
258 } *irq_2_pin;
259
260 DEFINE_DYN_ARRAY(irq_2_pin, sizeof(struct irq_pin_list), pin_map_size, sizeof(struct irq_pin_list), NULL);
261
262
263 struct io_apic {
264 unsigned int index;
265 unsigned int unused[3];
266 unsigned int data;
267 };
268
269 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
270 {
271 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
272 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
273 }
274
275 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
276 {
277 struct io_apic __iomem *io_apic = io_apic_base(apic);
278 writel(reg, &io_apic->index);
279 return readl(&io_apic->data);
280 }
281
282 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
283 {
284 struct io_apic __iomem *io_apic = io_apic_base(apic);
285 writel(reg, &io_apic->index);
286 writel(value, &io_apic->data);
287 }
288
289 /*
290 * Re-write a value: to be used for read-modify-write
291 * cycles where the read already set up the index register.
292 */
293 static inline void io_apic_modify(unsigned int apic, unsigned int value)
294 {
295 struct io_apic __iomem *io_apic = io_apic_base(apic);
296 writel(value, &io_apic->data);
297 }
298
299 static bool io_apic_level_ack_pending(unsigned int irq)
300 {
301 struct irq_pin_list *entry;
302 unsigned long flags;
303
304 spin_lock_irqsave(&ioapic_lock, flags);
305 entry = irq_2_pin + irq;
306 for (;;) {
307 unsigned int reg;
308 int pin;
309
310 pin = entry->pin;
311 if (pin == -1)
312 break;
313 reg = io_apic_read(entry->apic, 0x10 + pin*2);
314 /* Is the remote IRR bit set? */
315 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
316 spin_unlock_irqrestore(&ioapic_lock, flags);
317 return true;
318 }
319 if (!entry->next)
320 break;
321 entry = irq_2_pin + entry->next;
322 }
323 spin_unlock_irqrestore(&ioapic_lock, flags);
324
325 return false;
326 }
327
328 /*
329 * Synchronize the IO-APIC and the CPU by doing
330 * a dummy read from the IO-APIC
331 */
332 static inline void io_apic_sync(unsigned int apic)
333 {
334 struct io_apic __iomem *io_apic = io_apic_base(apic);
335 readl(&io_apic->data);
336 }
337
338 #define __DO_ACTION(R, ACTION, FINAL) \
339 \
340 { \
341 int pin; \
342 struct irq_pin_list *entry = irq_2_pin + irq; \
343 \
344 BUG_ON(irq >= nr_irqs); \
345 for (;;) { \
346 unsigned int reg; \
347 pin = entry->pin; \
348 if (pin == -1) \
349 break; \
350 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
351 reg ACTION; \
352 io_apic_modify(entry->apic, reg); \
353 FINAL; \
354 if (!entry->next) \
355 break; \
356 entry = irq_2_pin + entry->next; \
357 } \
358 }
359
360 union entry_union {
361 struct { u32 w1, w2; };
362 struct IO_APIC_route_entry entry;
363 };
364
365 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
366 {
367 union entry_union eu;
368 unsigned long flags;
369 spin_lock_irqsave(&ioapic_lock, flags);
370 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
371 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
372 spin_unlock_irqrestore(&ioapic_lock, flags);
373 return eu.entry;
374 }
375
376 /*
377 * When we write a new IO APIC routing entry, we need to write the high
378 * word first! If the mask bit in the low word is clear, we will enable
379 * the interrupt, and we need to make sure the entry is fully populated
380 * before that happens.
381 */
382 static void
383 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
384 {
385 union entry_union eu;
386 eu.entry = e;
387 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
388 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
389 }
390
391 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
392 {
393 unsigned long flags;
394 spin_lock_irqsave(&ioapic_lock, flags);
395 __ioapic_write_entry(apic, pin, e);
396 spin_unlock_irqrestore(&ioapic_lock, flags);
397 }
398
399 /*
400 * When we mask an IO APIC routing entry, we need to write the low
401 * word first, in order to set the mask bit before we change the
402 * high bits!
403 */
404 static void ioapic_mask_entry(int apic, int pin)
405 {
406 unsigned long flags;
407 union entry_union eu = { .entry.mask = 1 };
408
409 spin_lock_irqsave(&ioapic_lock, flags);
410 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
411 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
412 spin_unlock_irqrestore(&ioapic_lock, flags);
413 }
414
415 #ifdef CONFIG_SMP
416 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
417 {
418 int apic, pin;
419 struct irq_pin_list *entry = irq_2_pin + irq;
420
421 BUG_ON(irq >= nr_irqs);
422 for (;;) {
423 unsigned int reg;
424 apic = entry->apic;
425 pin = entry->pin;
426 if (pin == -1)
427 break;
428 /*
429 * With interrupt-remapping, destination information comes
430 * from interrupt-remapping table entry.
431 */
432 if (!irq_remapped(irq))
433 io_apic_write(apic, 0x11 + pin*2, dest);
434 reg = io_apic_read(apic, 0x10 + pin*2);
435 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
436 reg |= vector;
437 io_apic_modify(apic, reg);
438 if (!entry->next)
439 break;
440 entry = irq_2_pin + entry->next;
441 }
442 }
443
444 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
445 {
446 struct irq_cfg *cfg = irq_cfg(irq);
447 unsigned long flags;
448 unsigned int dest;
449 cpumask_t tmp;
450 struct irq_desc *desc;
451
452 cpus_and(tmp, mask, cpu_online_map);
453 if (cpus_empty(tmp))
454 return;
455
456 if (assign_irq_vector(irq, mask))
457 return;
458
459 cpus_and(tmp, cfg->domain, mask);
460 dest = cpu_mask_to_apicid(tmp);
461
462 /*
463 * Only the high 8 bits are valid.
464 */
465 dest = SET_APIC_LOGICAL_ID(dest);
466
467 desc = irq_to_desc(irq);
468 spin_lock_irqsave(&ioapic_lock, flags);
469 __target_IO_APIC_irq(irq, dest, cfg->vector);
470 desc->affinity = mask;
471 spin_unlock_irqrestore(&ioapic_lock, flags);
472 }
473 #endif
474
475 /*
476 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
477 * shared ISA-space IRQs, so we have to support them. We are super
478 * fast in the common case, and fast for shared ISA-space IRQs.
479 */
480 int first_free_entry;
481 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
482 {
483 struct irq_pin_list *entry = irq_2_pin + irq;
484
485 BUG_ON(irq >= nr_irqs);
486 irq_cfg_alloc(irq);
487
488 while (entry->next)
489 entry = irq_2_pin + entry->next;
490
491 if (entry->pin != -1) {
492 entry->next = first_free_entry;
493 entry = irq_2_pin + entry->next;
494 if (++first_free_entry >= pin_map_size)
495 panic("io_apic.c: ran out of irq_2_pin entries!");
496 }
497 entry->apic = apic;
498 entry->pin = pin;
499 }
500
501 /*
502 * Reroute an IRQ to a different pin.
503 */
504 static void __init replace_pin_at_irq(unsigned int irq,
505 int oldapic, int oldpin,
506 int newapic, int newpin)
507 {
508 struct irq_pin_list *entry = irq_2_pin + irq;
509
510 while (1) {
511 if (entry->apic == oldapic && entry->pin == oldpin) {
512 entry->apic = newapic;
513 entry->pin = newpin;
514 }
515 if (!entry->next)
516 break;
517 entry = irq_2_pin + entry->next;
518 }
519 }
520
521
522 #define DO_ACTION(name,R,ACTION, FINAL) \
523 \
524 static void name##_IO_APIC_irq (unsigned int irq) \
525 __DO_ACTION(R, ACTION, FINAL)
526
527 /* mask = 1 */
528 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
529
530 /* mask = 0 */
531 DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, )
532
533 static void mask_IO_APIC_irq (unsigned int irq)
534 {
535 unsigned long flags;
536
537 spin_lock_irqsave(&ioapic_lock, flags);
538 __mask_IO_APIC_irq(irq);
539 spin_unlock_irqrestore(&ioapic_lock, flags);
540 }
541
542 static void unmask_IO_APIC_irq (unsigned int irq)
543 {
544 unsigned long flags;
545
546 spin_lock_irqsave(&ioapic_lock, flags);
547 __unmask_IO_APIC_irq(irq);
548 spin_unlock_irqrestore(&ioapic_lock, flags);
549 }
550
551 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
552 {
553 struct IO_APIC_route_entry entry;
554
555 /* Check delivery_mode to be sure we're not clearing an SMI pin */
556 entry = ioapic_read_entry(apic, pin);
557 if (entry.delivery_mode == dest_SMI)
558 return;
559 /*
560 * Disable it in the IO-APIC irq-routing table:
561 */
562 ioapic_mask_entry(apic, pin);
563 }
564
565 static void clear_IO_APIC (void)
566 {
567 int apic, pin;
568
569 for (apic = 0; apic < nr_ioapics; apic++)
570 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
571 clear_IO_APIC_pin(apic, pin);
572 }
573
574 /*
575 * Saves and masks all the unmasked IO-APIC RTE's
576 */
577 int save_mask_IO_APIC_setup(void)
578 {
579 union IO_APIC_reg_01 reg_01;
580 unsigned long flags;
581 int apic, pin;
582
583 /*
584 * The number of IO-APIC IRQ registers (== #pins):
585 */
586 for (apic = 0; apic < nr_ioapics; apic++) {
587 spin_lock_irqsave(&ioapic_lock, flags);
588 reg_01.raw = io_apic_read(apic, 1);
589 spin_unlock_irqrestore(&ioapic_lock, flags);
590 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
591 }
592
593 for (apic = 0; apic < nr_ioapics; apic++) {
594 early_ioapic_entries[apic] =
595 kzalloc(sizeof(struct IO_APIC_route_entry) *
596 nr_ioapic_registers[apic], GFP_KERNEL);
597 if (!early_ioapic_entries[apic])
598 return -ENOMEM;
599 }
600
601 for (apic = 0; apic < nr_ioapics; apic++)
602 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
603 struct IO_APIC_route_entry entry;
604
605 entry = early_ioapic_entries[apic][pin] =
606 ioapic_read_entry(apic, pin);
607 if (!entry.mask) {
608 entry.mask = 1;
609 ioapic_write_entry(apic, pin, entry);
610 }
611 }
612 return 0;
613 }
614
615 void restore_IO_APIC_setup(void)
616 {
617 int apic, pin;
618
619 for (apic = 0; apic < nr_ioapics; apic++)
620 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
621 ioapic_write_entry(apic, pin,
622 early_ioapic_entries[apic][pin]);
623 }
624
625 void reinit_intr_remapped_IO_APIC(int intr_remapping)
626 {
627 /*
628 * for now plain restore of previous settings.
629 * TBD: In the case of OS enabling interrupt-remapping,
630 * IO-APIC RTE's need to be setup to point to interrupt-remapping
631 * table entries. for now, do a plain restore, and wait for
632 * the setup_IO_APIC_irqs() to do proper initialization.
633 */
634 restore_IO_APIC_setup();
635 }
636
637 int skip_ioapic_setup;
638 int ioapic_force;
639
640 static int __init parse_noapic(char *str)
641 {
642 disable_ioapic_setup();
643 return 0;
644 }
645 early_param("noapic", parse_noapic);
646
647 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
648 static int __init disable_timer_pin_setup(char *arg)
649 {
650 disable_timer_pin_1 = 1;
651 return 1;
652 }
653 __setup("disable_timer_pin_1", disable_timer_pin_setup);
654
655
656 /*
657 * Find the IRQ entry number of a certain pin.
658 */
659 static int find_irq_entry(int apic, int pin, int type)
660 {
661 int i;
662
663 for (i = 0; i < mp_irq_entries; i++)
664 if (mp_irqs[i].mp_irqtype == type &&
665 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
666 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
667 mp_irqs[i].mp_dstirq == pin)
668 return i;
669
670 return -1;
671 }
672
673 /*
674 * Find the pin to which IRQ[irq] (ISA) is connected
675 */
676 static int __init find_isa_irq_pin(int irq, int type)
677 {
678 int i;
679
680 for (i = 0; i < mp_irq_entries; i++) {
681 int lbus = mp_irqs[i].mp_srcbus;
682
683 if (test_bit(lbus, mp_bus_not_pci) &&
684 (mp_irqs[i].mp_irqtype == type) &&
685 (mp_irqs[i].mp_srcbusirq == irq))
686
687 return mp_irqs[i].mp_dstirq;
688 }
689 return -1;
690 }
691
692 static int __init find_isa_irq_apic(int irq, int type)
693 {
694 int i;
695
696 for (i = 0; i < mp_irq_entries; i++) {
697 int lbus = mp_irqs[i].mp_srcbus;
698
699 if (test_bit(lbus, mp_bus_not_pci) &&
700 (mp_irqs[i].mp_irqtype == type) &&
701 (mp_irqs[i].mp_srcbusirq == irq))
702 break;
703 }
704 if (i < mp_irq_entries) {
705 int apic;
706 for(apic = 0; apic < nr_ioapics; apic++) {
707 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
708 return apic;
709 }
710 }
711
712 return -1;
713 }
714
715 /*
716 * Find a specific PCI IRQ entry.
717 * Not an __init, possibly needed by modules
718 */
719 static int pin_2_irq(int idx, int apic, int pin);
720
721 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
722 {
723 int apic, i, best_guess = -1;
724
725 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
726 bus, slot, pin);
727 if (test_bit(bus, mp_bus_not_pci)) {
728 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
729 return -1;
730 }
731 for (i = 0; i < mp_irq_entries; i++) {
732 int lbus = mp_irqs[i].mp_srcbus;
733
734 for (apic = 0; apic < nr_ioapics; apic++)
735 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
736 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
737 break;
738
739 if (!test_bit(lbus, mp_bus_not_pci) &&
740 !mp_irqs[i].mp_irqtype &&
741 (bus == lbus) &&
742 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
743 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
744
745 if (!(apic || IO_APIC_IRQ(irq)))
746 continue;
747
748 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
749 return irq;
750 /*
751 * Use the first all-but-pin matching entry as a
752 * best-guess fuzzy result for broken mptables.
753 */
754 if (best_guess < 0)
755 best_guess = irq;
756 }
757 }
758 BUG_ON(best_guess >= nr_irqs);
759 return best_guess;
760 }
761
762 /* ISA interrupts are always polarity zero edge triggered,
763 * when listed as conforming in the MP table. */
764
765 #define default_ISA_trigger(idx) (0)
766 #define default_ISA_polarity(idx) (0)
767
768 /* PCI interrupts are always polarity one level triggered,
769 * when listed as conforming in the MP table. */
770
771 #define default_PCI_trigger(idx) (1)
772 #define default_PCI_polarity(idx) (1)
773
774 static int MPBIOS_polarity(int idx)
775 {
776 int bus = mp_irqs[idx].mp_srcbus;
777 int polarity;
778
779 /*
780 * Determine IRQ line polarity (high active or low active):
781 */
782 switch (mp_irqs[idx].mp_irqflag & 3)
783 {
784 case 0: /* conforms, ie. bus-type dependent polarity */
785 if (test_bit(bus, mp_bus_not_pci))
786 polarity = default_ISA_polarity(idx);
787 else
788 polarity = default_PCI_polarity(idx);
789 break;
790 case 1: /* high active */
791 {
792 polarity = 0;
793 break;
794 }
795 case 2: /* reserved */
796 {
797 printk(KERN_WARNING "broken BIOS!!\n");
798 polarity = 1;
799 break;
800 }
801 case 3: /* low active */
802 {
803 polarity = 1;
804 break;
805 }
806 default: /* invalid */
807 {
808 printk(KERN_WARNING "broken BIOS!!\n");
809 polarity = 1;
810 break;
811 }
812 }
813 return polarity;
814 }
815
816 static int MPBIOS_trigger(int idx)
817 {
818 int bus = mp_irqs[idx].mp_srcbus;
819 int trigger;
820
821 /*
822 * Determine IRQ trigger mode (edge or level sensitive):
823 */
824 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
825 {
826 case 0: /* conforms, ie. bus-type dependent */
827 if (test_bit(bus, mp_bus_not_pci))
828 trigger = default_ISA_trigger(idx);
829 else
830 trigger = default_PCI_trigger(idx);
831 break;
832 case 1: /* edge */
833 {
834 trigger = 0;
835 break;
836 }
837 case 2: /* reserved */
838 {
839 printk(KERN_WARNING "broken BIOS!!\n");
840 trigger = 1;
841 break;
842 }
843 case 3: /* level */
844 {
845 trigger = 1;
846 break;
847 }
848 default: /* invalid */
849 {
850 printk(KERN_WARNING "broken BIOS!!\n");
851 trigger = 0;
852 break;
853 }
854 }
855 return trigger;
856 }
857
858 static inline int irq_polarity(int idx)
859 {
860 return MPBIOS_polarity(idx);
861 }
862
863 static inline int irq_trigger(int idx)
864 {
865 return MPBIOS_trigger(idx);
866 }
867
868 static int pin_2_irq(int idx, int apic, int pin)
869 {
870 int irq, i;
871 int bus = mp_irqs[idx].mp_srcbus;
872
873 /*
874 * Debugging check, we are in big trouble if this message pops up!
875 */
876 if (mp_irqs[idx].mp_dstirq != pin)
877 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
878
879 if (test_bit(bus, mp_bus_not_pci)) {
880 irq = mp_irqs[idx].mp_srcbusirq;
881 } else {
882 /*
883 * PCI IRQs are mapped in order
884 */
885 i = irq = 0;
886 while (i < apic)
887 irq += nr_ioapic_registers[i++];
888 irq += pin;
889 }
890 BUG_ON(irq >= nr_irqs);
891 return irq;
892 }
893
894 void lock_vector_lock(void)
895 {
896 /* Used to the online set of cpus does not change
897 * during assign_irq_vector.
898 */
899 spin_lock(&vector_lock);
900 }
901
902 void unlock_vector_lock(void)
903 {
904 spin_unlock(&vector_lock);
905 }
906
907 static int __assign_irq_vector(int irq, cpumask_t mask)
908 {
909 /*
910 * NOTE! The local APIC isn't very good at handling
911 * multiple interrupts at the same interrupt level.
912 * As the interrupt level is determined by taking the
913 * vector number and shifting that right by 4, we
914 * want to spread these out a bit so that they don't
915 * all fall in the same interrupt level.
916 *
917 * Also, we've got to be careful not to trash gate
918 * 0x80, because int 0x80 is hm, kind of importantish. ;)
919 */
920 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
921 unsigned int old_vector;
922 int cpu;
923 struct irq_cfg *cfg;
924
925 BUG_ON((unsigned)irq >= nr_irqs);
926 cfg = irq_cfg(irq);
927
928 /* Only try and allocate irqs on cpus that are present */
929 cpus_and(mask, mask, cpu_online_map);
930
931 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
932 return -EBUSY;
933
934 old_vector = cfg->vector;
935 if (old_vector) {
936 cpumask_t tmp;
937 cpus_and(tmp, cfg->domain, mask);
938 if (!cpus_empty(tmp))
939 return 0;
940 }
941
942 for_each_cpu_mask_nr(cpu, mask) {
943 cpumask_t domain, new_mask;
944 int new_cpu;
945 int vector, offset;
946
947 domain = vector_allocation_domain(cpu);
948 cpus_and(new_mask, domain, cpu_online_map);
949
950 vector = current_vector;
951 offset = current_offset;
952 next:
953 vector += 8;
954 if (vector >= first_system_vector) {
955 /* If we run out of vectors on large boxen, must share them. */
956 offset = (offset + 1) % 8;
957 vector = FIRST_DEVICE_VECTOR + offset;
958 }
959 if (unlikely(current_vector == vector))
960 continue;
961 if (vector == IA32_SYSCALL_VECTOR)
962 goto next;
963 for_each_cpu_mask_nr(new_cpu, new_mask)
964 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
965 goto next;
966 /* Found one! */
967 current_vector = vector;
968 current_offset = offset;
969 if (old_vector) {
970 cfg->move_in_progress = 1;
971 cfg->old_domain = cfg->domain;
972 }
973 for_each_cpu_mask_nr(new_cpu, new_mask)
974 per_cpu(vector_irq, new_cpu)[vector] = irq;
975 cfg->vector = vector;
976 cfg->domain = domain;
977 return 0;
978 }
979 return -ENOSPC;
980 }
981
982 static int assign_irq_vector(int irq, cpumask_t mask)
983 {
984 int err;
985 unsigned long flags;
986
987 spin_lock_irqsave(&vector_lock, flags);
988 err = __assign_irq_vector(irq, mask);
989 spin_unlock_irqrestore(&vector_lock, flags);
990 return err;
991 }
992
993 static void __clear_irq_vector(int irq)
994 {
995 struct irq_cfg *cfg;
996 cpumask_t mask;
997 int cpu, vector;
998
999 BUG_ON((unsigned)irq >= nr_irqs);
1000 cfg = irq_cfg(irq);
1001 BUG_ON(!cfg->vector);
1002
1003 vector = cfg->vector;
1004 cpus_and(mask, cfg->domain, cpu_online_map);
1005 for_each_cpu_mask_nr(cpu, mask)
1006 per_cpu(vector_irq, cpu)[vector] = -1;
1007
1008 cfg->vector = 0;
1009 cpus_clear(cfg->domain);
1010 }
1011
1012 void __setup_vector_irq(int cpu)
1013 {
1014 /* Initialize vector_irq on a new cpu */
1015 /* This function must be called with vector_lock held */
1016 int irq, vector;
1017
1018 /* Mark the inuse vectors */
1019 for (irq = 0; irq < nr_irqs; ++irq) {
1020 struct irq_cfg *cfg = irq_cfg(irq);
1021
1022 if (!cpu_isset(cpu, cfg->domain))
1023 continue;
1024 vector = cfg->vector;
1025 per_cpu(vector_irq, cpu)[vector] = irq;
1026 }
1027 /* Mark the free vectors */
1028 for (vector = 0; vector < NR_VECTORS; ++vector) {
1029 struct irq_cfg *cfg;
1030
1031 irq = per_cpu(vector_irq, cpu)[vector];
1032 if (irq < 0)
1033 continue;
1034
1035 cfg = irq_cfg(irq);
1036 if (!cpu_isset(cpu, cfg->domain))
1037 per_cpu(vector_irq, cpu)[vector] = -1;
1038 }
1039 }
1040
1041 static struct irq_chip ioapic_chip;
1042 #ifdef CONFIG_INTR_REMAP
1043 static struct irq_chip ir_ioapic_chip;
1044 #endif
1045
1046 static void ioapic_register_intr(int irq, unsigned long trigger)
1047 {
1048 struct irq_desc *desc;
1049
1050 desc = irq_to_desc(irq);
1051 if (trigger)
1052 desc->status |= IRQ_LEVEL;
1053 else
1054 desc->status &= ~IRQ_LEVEL;
1055
1056 #ifdef CONFIG_INTR_REMAP
1057 if (irq_remapped(irq)) {
1058 desc->status |= IRQ_MOVE_PCNTXT;
1059 if (trigger)
1060 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1061 handle_fasteoi_irq,
1062 "fasteoi");
1063 else
1064 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1065 handle_edge_irq, "edge");
1066 return;
1067 }
1068 #endif
1069 if (trigger)
1070 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1071 handle_fasteoi_irq,
1072 "fasteoi");
1073 else
1074 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1075 handle_edge_irq, "edge");
1076 }
1077
1078 static int setup_ioapic_entry(int apic, int irq,
1079 struct IO_APIC_route_entry *entry,
1080 unsigned int destination, int trigger,
1081 int polarity, int vector)
1082 {
1083 /*
1084 * add it to the IO-APIC irq-routing table:
1085 */
1086 memset(entry,0,sizeof(*entry));
1087
1088 #ifdef CONFIG_INTR_REMAP
1089 if (intr_remapping_enabled) {
1090 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1091 struct irte irte;
1092 struct IR_IO_APIC_route_entry *ir_entry =
1093 (struct IR_IO_APIC_route_entry *) entry;
1094 int index;
1095
1096 if (!iommu)
1097 panic("No mapping iommu for ioapic %d\n", apic);
1098
1099 index = alloc_irte(iommu, irq, 1);
1100 if (index < 0)
1101 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1102
1103 memset(&irte, 0, sizeof(irte));
1104
1105 irte.present = 1;
1106 irte.dst_mode = INT_DEST_MODE;
1107 irte.trigger_mode = trigger;
1108 irte.dlvry_mode = INT_DELIVERY_MODE;
1109 irte.vector = vector;
1110 irte.dest_id = IRTE_DEST(destination);
1111
1112 modify_irte(irq, &irte);
1113
1114 ir_entry->index2 = (index >> 15) & 0x1;
1115 ir_entry->zero = 0;
1116 ir_entry->format = 1;
1117 ir_entry->index = (index & 0x7fff);
1118 } else
1119 #endif
1120 {
1121 entry->delivery_mode = INT_DELIVERY_MODE;
1122 entry->dest_mode = INT_DEST_MODE;
1123 entry->dest = destination;
1124 }
1125
1126 entry->mask = 0; /* enable IRQ */
1127 entry->trigger = trigger;
1128 entry->polarity = polarity;
1129 entry->vector = vector;
1130
1131 /* Mask level triggered irqs.
1132 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1133 */
1134 if (trigger)
1135 entry->mask = 1;
1136 return 0;
1137 }
1138
1139 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1140 int trigger, int polarity)
1141 {
1142 struct irq_cfg *cfg;
1143 struct IO_APIC_route_entry entry;
1144 cpumask_t mask;
1145
1146 if (!IO_APIC_IRQ(irq))
1147 return;
1148
1149 cfg = irq_cfg(irq);
1150
1151 mask = TARGET_CPUS;
1152 if (assign_irq_vector(irq, mask))
1153 return;
1154
1155 cpus_and(mask, cfg->domain, mask);
1156
1157 apic_printk(APIC_VERBOSE,KERN_DEBUG
1158 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1159 "IRQ %d Mode:%i Active:%i)\n",
1160 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1161 irq, trigger, polarity);
1162
1163
1164 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1165 cpu_mask_to_apicid(mask), trigger, polarity,
1166 cfg->vector)) {
1167 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1168 mp_ioapics[apic].mp_apicid, pin);
1169 __clear_irq_vector(irq);
1170 return;
1171 }
1172
1173 ioapic_register_intr(irq, trigger);
1174 if (irq < 16)
1175 disable_8259A_irq(irq);
1176
1177 ioapic_write_entry(apic, pin, entry);
1178 }
1179
1180 static void __init setup_IO_APIC_irqs(void)
1181 {
1182 int apic, pin, idx, irq, first_notcon = 1;
1183
1184 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1185
1186 for (apic = 0; apic < nr_ioapics; apic++) {
1187 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1188
1189 idx = find_irq_entry(apic,pin,mp_INT);
1190 if (idx == -1) {
1191 if (first_notcon) {
1192 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1193 first_notcon = 0;
1194 } else
1195 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1196 continue;
1197 }
1198 if (!first_notcon) {
1199 apic_printk(APIC_VERBOSE, " not connected.\n");
1200 first_notcon = 1;
1201 }
1202
1203 irq = pin_2_irq(idx, apic, pin);
1204 add_pin_to_irq(irq, apic, pin);
1205
1206 setup_IO_APIC_irq(apic, pin, irq,
1207 irq_trigger(idx), irq_polarity(idx));
1208 }
1209 }
1210
1211 if (!first_notcon)
1212 apic_printk(APIC_VERBOSE, " not connected.\n");
1213 }
1214
1215 /*
1216 * Set up the timer pin, possibly with the 8259A-master behind.
1217 */
1218 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1219 int vector)
1220 {
1221 struct IO_APIC_route_entry entry;
1222
1223 if (intr_remapping_enabled)
1224 return;
1225
1226 memset(&entry, 0, sizeof(entry));
1227
1228 /*
1229 * We use logical delivery to get the timer IRQ
1230 * to the first CPU.
1231 */
1232 entry.dest_mode = INT_DEST_MODE;
1233 entry.mask = 1; /* mask IRQ now */
1234 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1235 entry.delivery_mode = INT_DELIVERY_MODE;
1236 entry.polarity = 0;
1237 entry.trigger = 0;
1238 entry.vector = vector;
1239
1240 /*
1241 * The timer IRQ doesn't have to know that behind the
1242 * scene we may have a 8259A-master in AEOI mode ...
1243 */
1244 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1245
1246 /*
1247 * Add it to the IO-APIC irq-routing table:
1248 */
1249 ioapic_write_entry(apic, pin, entry);
1250 }
1251
1252
1253 __apicdebuginit(void) print_IO_APIC(void)
1254 {
1255 int apic, i;
1256 union IO_APIC_reg_00 reg_00;
1257 union IO_APIC_reg_01 reg_01;
1258 union IO_APIC_reg_02 reg_02;
1259 unsigned long flags;
1260
1261 if (apic_verbosity == APIC_QUIET)
1262 return;
1263
1264 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1265 for (i = 0; i < nr_ioapics; i++)
1266 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1267 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1268
1269 /*
1270 * We are a bit conservative about what we expect. We have to
1271 * know about every hardware change ASAP.
1272 */
1273 printk(KERN_INFO "testing the IO APIC.......................\n");
1274
1275 for (apic = 0; apic < nr_ioapics; apic++) {
1276
1277 spin_lock_irqsave(&ioapic_lock, flags);
1278 reg_00.raw = io_apic_read(apic, 0);
1279 reg_01.raw = io_apic_read(apic, 1);
1280 if (reg_01.bits.version >= 0x10)
1281 reg_02.raw = io_apic_read(apic, 2);
1282 spin_unlock_irqrestore(&ioapic_lock, flags);
1283
1284 printk("\n");
1285 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1286 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1287 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1288
1289 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1290 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1291
1292 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1293 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1294
1295 if (reg_01.bits.version >= 0x10) {
1296 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1297 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1298 }
1299
1300 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1301
1302 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1303 " Stat Dmod Deli Vect: \n");
1304
1305 for (i = 0; i <= reg_01.bits.entries; i++) {
1306 struct IO_APIC_route_entry entry;
1307
1308 entry = ioapic_read_entry(apic, i);
1309
1310 printk(KERN_DEBUG " %02x %03X ",
1311 i,
1312 entry.dest
1313 );
1314
1315 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1316 entry.mask,
1317 entry.trigger,
1318 entry.irr,
1319 entry.polarity,
1320 entry.delivery_status,
1321 entry.dest_mode,
1322 entry.delivery_mode,
1323 entry.vector
1324 );
1325 }
1326 }
1327 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1328 for (i = 0; i < nr_irqs; i++) {
1329 struct irq_pin_list *entry = irq_2_pin + i;
1330 if (entry->pin < 0)
1331 continue;
1332 printk(KERN_DEBUG "IRQ%d ", i);
1333 for (;;) {
1334 printk("-> %d:%d", entry->apic, entry->pin);
1335 if (!entry->next)
1336 break;
1337 entry = irq_2_pin + entry->next;
1338 }
1339 printk("\n");
1340 }
1341
1342 printk(KERN_INFO ".................................... done.\n");
1343
1344 return;
1345 }
1346
1347 __apicdebuginit(void) print_APIC_bitfield(int base)
1348 {
1349 unsigned int v;
1350 int i, j;
1351
1352 if (apic_verbosity == APIC_QUIET)
1353 return;
1354
1355 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1356 for (i = 0; i < 8; i++) {
1357 v = apic_read(base + i*0x10);
1358 for (j = 0; j < 32; j++) {
1359 if (v & (1<<j))
1360 printk("1");
1361 else
1362 printk("0");
1363 }
1364 printk("\n");
1365 }
1366 }
1367
1368 __apicdebuginit(void) print_local_APIC(void *dummy)
1369 {
1370 unsigned int v, ver, maxlvt;
1371 unsigned long icr;
1372
1373 if (apic_verbosity == APIC_QUIET)
1374 return;
1375
1376 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1377 smp_processor_id(), hard_smp_processor_id());
1378 v = apic_read(APIC_ID);
1379 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1380 v = apic_read(APIC_LVR);
1381 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1382 ver = GET_APIC_VERSION(v);
1383 maxlvt = lapic_get_maxlvt();
1384
1385 v = apic_read(APIC_TASKPRI);
1386 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1387
1388 v = apic_read(APIC_ARBPRI);
1389 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1390 v & APIC_ARBPRI_MASK);
1391 v = apic_read(APIC_PROCPRI);
1392 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1393
1394 v = apic_read(APIC_EOI);
1395 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1396 v = apic_read(APIC_RRR);
1397 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1398 v = apic_read(APIC_LDR);
1399 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1400 v = apic_read(APIC_DFR);
1401 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1402 v = apic_read(APIC_SPIV);
1403 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1404
1405 printk(KERN_DEBUG "... APIC ISR field:\n");
1406 print_APIC_bitfield(APIC_ISR);
1407 printk(KERN_DEBUG "... APIC TMR field:\n");
1408 print_APIC_bitfield(APIC_TMR);
1409 printk(KERN_DEBUG "... APIC IRR field:\n");
1410 print_APIC_bitfield(APIC_IRR);
1411
1412 v = apic_read(APIC_ESR);
1413 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1414
1415 icr = apic_icr_read();
1416 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1417 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1418
1419 v = apic_read(APIC_LVTT);
1420 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1421
1422 if (maxlvt > 3) { /* PC is LVT#4. */
1423 v = apic_read(APIC_LVTPC);
1424 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1425 }
1426 v = apic_read(APIC_LVT0);
1427 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1428 v = apic_read(APIC_LVT1);
1429 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1430
1431 if (maxlvt > 2) { /* ERR is LVT#3. */
1432 v = apic_read(APIC_LVTERR);
1433 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1434 }
1435
1436 v = apic_read(APIC_TMICT);
1437 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1438 v = apic_read(APIC_TMCCT);
1439 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1440 v = apic_read(APIC_TDCR);
1441 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1442 printk("\n");
1443 }
1444
1445 __apicdebuginit(void) print_all_local_APICs(void)
1446 {
1447 on_each_cpu(print_local_APIC, NULL, 1);
1448 }
1449
1450 __apicdebuginit(void) print_PIC(void)
1451 {
1452 unsigned int v;
1453 unsigned long flags;
1454
1455 if (apic_verbosity == APIC_QUIET)
1456 return;
1457
1458 printk(KERN_DEBUG "\nprinting PIC contents\n");
1459
1460 spin_lock_irqsave(&i8259A_lock, flags);
1461
1462 v = inb(0xa1) << 8 | inb(0x21);
1463 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1464
1465 v = inb(0xa0) << 8 | inb(0x20);
1466 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1467
1468 outb(0x0b,0xa0);
1469 outb(0x0b,0x20);
1470 v = inb(0xa0) << 8 | inb(0x20);
1471 outb(0x0a,0xa0);
1472 outb(0x0a,0x20);
1473
1474 spin_unlock_irqrestore(&i8259A_lock, flags);
1475
1476 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1477
1478 v = inb(0x4d1) << 8 | inb(0x4d0);
1479 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1480 }
1481
1482 __apicdebuginit(int) print_all_ICs(void)
1483 {
1484 print_PIC();
1485 print_all_local_APICs();
1486 print_IO_APIC();
1487
1488 return 0;
1489 }
1490
1491 fs_initcall(print_all_ICs);
1492
1493
1494 void __init enable_IO_APIC(void)
1495 {
1496 union IO_APIC_reg_01 reg_01;
1497 int i8259_apic, i8259_pin;
1498 int i, apic;
1499 unsigned long flags;
1500
1501 for (i = 0; i < pin_map_size; i++) {
1502 irq_2_pin[i].pin = -1;
1503 irq_2_pin[i].next = 0;
1504 }
1505
1506 /*
1507 * The number of IO-APIC IRQ registers (== #pins):
1508 */
1509 for (apic = 0; apic < nr_ioapics; apic++) {
1510 spin_lock_irqsave(&ioapic_lock, flags);
1511 reg_01.raw = io_apic_read(apic, 1);
1512 spin_unlock_irqrestore(&ioapic_lock, flags);
1513 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1514 }
1515 for(apic = 0; apic < nr_ioapics; apic++) {
1516 int pin;
1517 /* See if any of the pins is in ExtINT mode */
1518 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1519 struct IO_APIC_route_entry entry;
1520 entry = ioapic_read_entry(apic, pin);
1521
1522 /* If the interrupt line is enabled and in ExtInt mode
1523 * I have found the pin where the i8259 is connected.
1524 */
1525 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1526 ioapic_i8259.apic = apic;
1527 ioapic_i8259.pin = pin;
1528 goto found_i8259;
1529 }
1530 }
1531 }
1532 found_i8259:
1533 /* Look to see what if the MP table has reported the ExtINT */
1534 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1535 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1536 /* Trust the MP table if nothing is setup in the hardware */
1537 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1538 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1539 ioapic_i8259.pin = i8259_pin;
1540 ioapic_i8259.apic = i8259_apic;
1541 }
1542 /* Complain if the MP table and the hardware disagree */
1543 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1544 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1545 {
1546 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1547 }
1548
1549 /*
1550 * Do not trust the IO-APIC being empty at bootup
1551 */
1552 clear_IO_APIC();
1553 }
1554
1555 /*
1556 * Not an __init, needed by the reboot code
1557 */
1558 void disable_IO_APIC(void)
1559 {
1560 /*
1561 * Clear the IO-APIC before rebooting:
1562 */
1563 clear_IO_APIC();
1564
1565 /*
1566 * If the i8259 is routed through an IOAPIC
1567 * Put that IOAPIC in virtual wire mode
1568 * so legacy interrupts can be delivered.
1569 */
1570 if (ioapic_i8259.pin != -1) {
1571 struct IO_APIC_route_entry entry;
1572
1573 memset(&entry, 0, sizeof(entry));
1574 entry.mask = 0; /* Enabled */
1575 entry.trigger = 0; /* Edge */
1576 entry.irr = 0;
1577 entry.polarity = 0; /* High */
1578 entry.delivery_status = 0;
1579 entry.dest_mode = 0; /* Physical */
1580 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1581 entry.vector = 0;
1582 entry.dest = read_apic_id();
1583
1584 /*
1585 * Add it to the IO-APIC irq-routing table:
1586 */
1587 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1588 }
1589
1590 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1591 }
1592
1593 /*
1594 * There is a nasty bug in some older SMP boards, their mptable lies
1595 * about the timer IRQ. We do the following to work around the situation:
1596 *
1597 * - timer IRQ defaults to IO-APIC IRQ
1598 * - if this function detects that timer IRQs are defunct, then we fall
1599 * back to ISA timer IRQs
1600 */
1601 static int __init timer_irq_works(void)
1602 {
1603 unsigned long t1 = jiffies;
1604 unsigned long flags;
1605
1606 local_save_flags(flags);
1607 local_irq_enable();
1608 /* Let ten ticks pass... */
1609 mdelay((10 * 1000) / HZ);
1610 local_irq_restore(flags);
1611
1612 /*
1613 * Expect a few ticks at least, to be sure some possible
1614 * glue logic does not lock up after one or two first
1615 * ticks in a non-ExtINT mode. Also the local APIC
1616 * might have cached one ExtINT interrupt. Finally, at
1617 * least one tick may be lost due to delays.
1618 */
1619
1620 /* jiffies wrap? */
1621 if (time_after(jiffies, t1 + 4))
1622 return 1;
1623 return 0;
1624 }
1625
1626 /*
1627 * In the SMP+IOAPIC case it might happen that there are an unspecified
1628 * number of pending IRQ events unhandled. These cases are very rare,
1629 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1630 * better to do it this way as thus we do not have to be aware of
1631 * 'pending' interrupts in the IRQ path, except at this point.
1632 */
1633 /*
1634 * Edge triggered needs to resend any interrupt
1635 * that was delayed but this is now handled in the device
1636 * independent code.
1637 */
1638
1639 /*
1640 * Starting up a edge-triggered IO-APIC interrupt is
1641 * nasty - we need to make sure that we get the edge.
1642 * If it is already asserted for some reason, we need
1643 * return 1 to indicate that is was pending.
1644 *
1645 * This is not complete - we should be able to fake
1646 * an edge even if it isn't on the 8259A...
1647 */
1648
1649 static unsigned int startup_ioapic_irq(unsigned int irq)
1650 {
1651 int was_pending = 0;
1652 unsigned long flags;
1653
1654 spin_lock_irqsave(&ioapic_lock, flags);
1655 if (irq < 16) {
1656 disable_8259A_irq(irq);
1657 if (i8259A_irq_pending(irq))
1658 was_pending = 1;
1659 }
1660 __unmask_IO_APIC_irq(irq);
1661 spin_unlock_irqrestore(&ioapic_lock, flags);
1662
1663 return was_pending;
1664 }
1665
1666 static int ioapic_retrigger_irq(unsigned int irq)
1667 {
1668 struct irq_cfg *cfg = irq_cfg(irq);
1669 unsigned long flags;
1670
1671 spin_lock_irqsave(&vector_lock, flags);
1672 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
1673 spin_unlock_irqrestore(&vector_lock, flags);
1674
1675 return 1;
1676 }
1677
1678 /*
1679 * Level and edge triggered IO-APIC interrupts need different handling,
1680 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1681 * handled with the level-triggered descriptor, but that one has slightly
1682 * more overhead. Level-triggered interrupts cannot be handled with the
1683 * edge-triggered handler, without risking IRQ storms and other ugly
1684 * races.
1685 */
1686
1687 #ifdef CONFIG_SMP
1688
1689 #ifdef CONFIG_INTR_REMAP
1690 static void ir_irq_migration(struct work_struct *work);
1691
1692 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
1693
1694 /*
1695 * Migrate the IO-APIC irq in the presence of intr-remapping.
1696 *
1697 * For edge triggered, irq migration is a simple atomic update(of vector
1698 * and cpu destination) of IRTE and flush the hardware cache.
1699 *
1700 * For level triggered, we need to modify the io-apic RTE aswell with the update
1701 * vector information, along with modifying IRTE with vector and destination.
1702 * So irq migration for level triggered is little bit more complex compared to
1703 * edge triggered migration. But the good news is, we use the same algorithm
1704 * for level triggered migration as we have today, only difference being,
1705 * we now initiate the irq migration from process context instead of the
1706 * interrupt context.
1707 *
1708 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1709 * suppression) to the IO-APIC, level triggered irq migration will also be
1710 * as simple as edge triggered migration and we can do the irq migration
1711 * with a simple atomic update to IO-APIC RTE.
1712 */
1713 static void migrate_ioapic_irq(int irq, cpumask_t mask)
1714 {
1715 struct irq_cfg *cfg;
1716 struct irq_desc *desc;
1717 cpumask_t tmp, cleanup_mask;
1718 struct irte irte;
1719 int modify_ioapic_rte;
1720 unsigned int dest;
1721 unsigned long flags;
1722
1723 cpus_and(tmp, mask, cpu_online_map);
1724 if (cpus_empty(tmp))
1725 return;
1726
1727 if (get_irte(irq, &irte))
1728 return;
1729
1730 if (assign_irq_vector(irq, mask))
1731 return;
1732
1733 cfg = irq_cfg(irq);
1734 cpus_and(tmp, cfg->domain, mask);
1735 dest = cpu_mask_to_apicid(tmp);
1736
1737 desc = irq_to_desc(irq);
1738 modify_ioapic_rte = desc->status & IRQ_LEVEL;
1739 if (modify_ioapic_rte) {
1740 spin_lock_irqsave(&ioapic_lock, flags);
1741 __target_IO_APIC_irq(irq, dest, cfg->vector);
1742 spin_unlock_irqrestore(&ioapic_lock, flags);
1743 }
1744
1745 irte.vector = cfg->vector;
1746 irte.dest_id = IRTE_DEST(dest);
1747
1748 /*
1749 * Modified the IRTE and flushes the Interrupt entry cache.
1750 */
1751 modify_irte(irq, &irte);
1752
1753 if (cfg->move_in_progress) {
1754 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1755 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1756 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1757 cfg->move_in_progress = 0;
1758 }
1759
1760 desc->affinity = mask;
1761 }
1762
1763 static int migrate_irq_remapped_level(int irq)
1764 {
1765 int ret = -1;
1766 struct irq_desc *desc = irq_to_desc(irq);
1767
1768 mask_IO_APIC_irq(irq);
1769
1770 if (io_apic_level_ack_pending(irq)) {
1771 /*
1772 * Interrupt in progress. Migrating irq now will change the
1773 * vector information in the IO-APIC RTE and that will confuse
1774 * the EOI broadcast performed by cpu.
1775 * So, delay the irq migration to the next instance.
1776 */
1777 schedule_delayed_work(&ir_migration_work, 1);
1778 goto unmask;
1779 }
1780
1781 /* everthing is clear. we have right of way */
1782 migrate_ioapic_irq(irq, desc->pending_mask);
1783
1784 ret = 0;
1785 desc->status &= ~IRQ_MOVE_PENDING;
1786 cpus_clear(desc->pending_mask);
1787
1788 unmask:
1789 unmask_IO_APIC_irq(irq);
1790 return ret;
1791 }
1792
1793 static void ir_irq_migration(struct work_struct *work)
1794 {
1795 int irq;
1796
1797 for (irq = 0; irq < nr_irqs; irq++) {
1798 struct irq_desc *desc = irq_to_desc(irq);
1799 if (desc->status & IRQ_MOVE_PENDING) {
1800 unsigned long flags;
1801
1802 spin_lock_irqsave(&desc->lock, flags);
1803 if (!desc->chip->set_affinity ||
1804 !(desc->status & IRQ_MOVE_PENDING)) {
1805 desc->status &= ~IRQ_MOVE_PENDING;
1806 spin_unlock_irqrestore(&desc->lock, flags);
1807 continue;
1808 }
1809
1810 desc->chip->set_affinity(irq, desc->pending_mask);
1811 spin_unlock_irqrestore(&desc->lock, flags);
1812 }
1813 }
1814 }
1815
1816 /*
1817 * Migrates the IRQ destination in the process context.
1818 */
1819 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1820 {
1821 struct irq_desc *desc = irq_to_desc(irq);
1822
1823 if (desc->status & IRQ_LEVEL) {
1824 desc->status |= IRQ_MOVE_PENDING;
1825 desc->pending_mask = mask;
1826 migrate_irq_remapped_level(irq);
1827 return;
1828 }
1829
1830 migrate_ioapic_irq(irq, mask);
1831 }
1832 #endif
1833
1834 asmlinkage void smp_irq_move_cleanup_interrupt(void)
1835 {
1836 unsigned vector, me;
1837 ack_APIC_irq();
1838 exit_idle();
1839 irq_enter();
1840
1841 me = smp_processor_id();
1842 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1843 unsigned int irq;
1844 struct irq_desc *desc;
1845 struct irq_cfg *cfg;
1846 irq = __get_cpu_var(vector_irq)[vector];
1847 if (irq >= nr_irqs)
1848 continue;
1849
1850 desc = irq_to_desc(irq);
1851 cfg = irq_cfg(irq);
1852 spin_lock(&desc->lock);
1853 if (!cfg->move_cleanup_count)
1854 goto unlock;
1855
1856 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1857 goto unlock;
1858
1859 __get_cpu_var(vector_irq)[vector] = -1;
1860 cfg->move_cleanup_count--;
1861 unlock:
1862 spin_unlock(&desc->lock);
1863 }
1864
1865 irq_exit();
1866 }
1867
1868 static void irq_complete_move(unsigned int irq)
1869 {
1870 struct irq_cfg *cfg = irq_cfg(irq);
1871 unsigned vector, me;
1872
1873 if (likely(!cfg->move_in_progress))
1874 return;
1875
1876 vector = ~get_irq_regs()->orig_ax;
1877 me = smp_processor_id();
1878 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
1879 cpumask_t cleanup_mask;
1880
1881 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1882 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1883 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1884 cfg->move_in_progress = 0;
1885 }
1886 }
1887 #else
1888 static inline void irq_complete_move(unsigned int irq) {}
1889 #endif
1890 #ifdef CONFIG_INTR_REMAP
1891 static void ack_x2apic_level(unsigned int irq)
1892 {
1893 ack_x2APIC_irq();
1894 }
1895
1896 static void ack_x2apic_edge(unsigned int irq)
1897 {
1898 ack_x2APIC_irq();
1899 }
1900 #endif
1901
1902 static void ack_apic_edge(unsigned int irq)
1903 {
1904 irq_complete_move(irq);
1905 move_native_irq(irq);
1906 ack_APIC_irq();
1907 }
1908
1909 static void ack_apic_level(unsigned int irq)
1910 {
1911 int do_unmask_irq = 0;
1912
1913 irq_complete_move(irq);
1914 #ifdef CONFIG_GENERIC_PENDING_IRQ
1915 /* If we are moving the irq we need to mask it */
1916 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
1917 do_unmask_irq = 1;
1918 mask_IO_APIC_irq(irq);
1919 }
1920 #endif
1921
1922 /*
1923 * We must acknowledge the irq before we move it or the acknowledge will
1924 * not propagate properly.
1925 */
1926 ack_APIC_irq();
1927
1928 /* Now we can move and renable the irq */
1929 if (unlikely(do_unmask_irq)) {
1930 /* Only migrate the irq if the ack has been received.
1931 *
1932 * On rare occasions the broadcast level triggered ack gets
1933 * delayed going to ioapics, and if we reprogram the
1934 * vector while Remote IRR is still set the irq will never
1935 * fire again.
1936 *
1937 * To prevent this scenario we read the Remote IRR bit
1938 * of the ioapic. This has two effects.
1939 * - On any sane system the read of the ioapic will
1940 * flush writes (and acks) going to the ioapic from
1941 * this cpu.
1942 * - We get to see if the ACK has actually been delivered.
1943 *
1944 * Based on failed experiments of reprogramming the
1945 * ioapic entry from outside of irq context starting
1946 * with masking the ioapic entry and then polling until
1947 * Remote IRR was clear before reprogramming the
1948 * ioapic I don't trust the Remote IRR bit to be
1949 * completey accurate.
1950 *
1951 * However there appears to be no other way to plug
1952 * this race, so if the Remote IRR bit is not
1953 * accurate and is causing problems then it is a hardware bug
1954 * and you can go talk to the chipset vendor about it.
1955 */
1956 if (!io_apic_level_ack_pending(irq))
1957 move_masked_irq(irq);
1958 unmask_IO_APIC_irq(irq);
1959 }
1960 }
1961
1962 static struct irq_chip ioapic_chip __read_mostly = {
1963 .name = "IO-APIC",
1964 .startup = startup_ioapic_irq,
1965 .mask = mask_IO_APIC_irq,
1966 .unmask = unmask_IO_APIC_irq,
1967 .ack = ack_apic_edge,
1968 .eoi = ack_apic_level,
1969 #ifdef CONFIG_SMP
1970 .set_affinity = set_ioapic_affinity_irq,
1971 #endif
1972 .retrigger = ioapic_retrigger_irq,
1973 };
1974
1975 #ifdef CONFIG_INTR_REMAP
1976 static struct irq_chip ir_ioapic_chip __read_mostly = {
1977 .name = "IR-IO-APIC",
1978 .startup = startup_ioapic_irq,
1979 .mask = mask_IO_APIC_irq,
1980 .unmask = unmask_IO_APIC_irq,
1981 .ack = ack_x2apic_edge,
1982 .eoi = ack_x2apic_level,
1983 #ifdef CONFIG_SMP
1984 .set_affinity = set_ir_ioapic_affinity_irq,
1985 #endif
1986 .retrigger = ioapic_retrigger_irq,
1987 };
1988 #endif
1989
1990 static inline void init_IO_APIC_traps(void)
1991 {
1992 int irq;
1993 struct irq_desc *desc;
1994
1995 /*
1996 * NOTE! The local APIC isn't very good at handling
1997 * multiple interrupts at the same interrupt level.
1998 * As the interrupt level is determined by taking the
1999 * vector number and shifting that right by 4, we
2000 * want to spread these out a bit so that they don't
2001 * all fall in the same interrupt level.
2002 *
2003 * Also, we've got to be careful not to trash gate
2004 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2005 */
2006 for (irq = 0; irq < nr_irqs ; irq++) {
2007 struct irq_cfg *cfg;
2008
2009 cfg = irq_cfg(irq);
2010 if (IO_APIC_IRQ(irq) && !cfg->vector) {
2011 /*
2012 * Hmm.. We don't have an entry for this,
2013 * so default to an old-fashioned 8259
2014 * interrupt if we can..
2015 */
2016 if (irq < 16)
2017 make_8259A_irq(irq);
2018 else {
2019 desc = irq_to_desc(irq);
2020 /* Strange. Oh, well.. */
2021 desc->chip = &no_irq_chip;
2022 }
2023 }
2024 }
2025 }
2026
2027 static void unmask_lapic_irq(unsigned int irq)
2028 {
2029 unsigned long v;
2030
2031 v = apic_read(APIC_LVT0);
2032 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2033 }
2034
2035 static void mask_lapic_irq(unsigned int irq)
2036 {
2037 unsigned long v;
2038
2039 v = apic_read(APIC_LVT0);
2040 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2041 }
2042
2043 static void ack_lapic_irq (unsigned int irq)
2044 {
2045 ack_APIC_irq();
2046 }
2047
2048 static struct irq_chip lapic_chip __read_mostly = {
2049 .name = "local-APIC",
2050 .mask = mask_lapic_irq,
2051 .unmask = unmask_lapic_irq,
2052 .ack = ack_lapic_irq,
2053 };
2054
2055 static void lapic_register_intr(int irq)
2056 {
2057 struct irq_desc *desc;
2058
2059 desc = irq_to_desc(irq);
2060 desc->status &= ~IRQ_LEVEL;
2061 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2062 "edge");
2063 }
2064
2065 static void __init setup_nmi(void)
2066 {
2067 /*
2068 * Dirty trick to enable the NMI watchdog ...
2069 * We put the 8259A master into AEOI mode and
2070 * unmask on all local APICs LVT0 as NMI.
2071 *
2072 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2073 * is from Maciej W. Rozycki - so we do not have to EOI from
2074 * the NMI handler or the timer interrupt.
2075 */
2076 printk(KERN_INFO "activating NMI Watchdog ...");
2077
2078 enable_NMI_through_LVT0();
2079
2080 printk(" done.\n");
2081 }
2082
2083 /*
2084 * This looks a bit hackish but it's about the only one way of sending
2085 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2086 * not support the ExtINT mode, unfortunately. We need to send these
2087 * cycles as some i82489DX-based boards have glue logic that keeps the
2088 * 8259A interrupt line asserted until INTA. --macro
2089 */
2090 static inline void __init unlock_ExtINT_logic(void)
2091 {
2092 int apic, pin, i;
2093 struct IO_APIC_route_entry entry0, entry1;
2094 unsigned char save_control, save_freq_select;
2095
2096 pin = find_isa_irq_pin(8, mp_INT);
2097 apic = find_isa_irq_apic(8, mp_INT);
2098 if (pin == -1)
2099 return;
2100
2101 entry0 = ioapic_read_entry(apic, pin);
2102
2103 clear_IO_APIC_pin(apic, pin);
2104
2105 memset(&entry1, 0, sizeof(entry1));
2106
2107 entry1.dest_mode = 0; /* physical delivery */
2108 entry1.mask = 0; /* unmask IRQ now */
2109 entry1.dest = hard_smp_processor_id();
2110 entry1.delivery_mode = dest_ExtINT;
2111 entry1.polarity = entry0.polarity;
2112 entry1.trigger = 0;
2113 entry1.vector = 0;
2114
2115 ioapic_write_entry(apic, pin, entry1);
2116
2117 save_control = CMOS_READ(RTC_CONTROL);
2118 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2119 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2120 RTC_FREQ_SELECT);
2121 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2122
2123 i = 100;
2124 while (i-- > 0) {
2125 mdelay(10);
2126 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2127 i -= 10;
2128 }
2129
2130 CMOS_WRITE(save_control, RTC_CONTROL);
2131 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2132 clear_IO_APIC_pin(apic, pin);
2133
2134 ioapic_write_entry(apic, pin, entry0);
2135 }
2136
2137 /*
2138 * This code may look a bit paranoid, but it's supposed to cooperate with
2139 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2140 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2141 * fanatically on his truly buggy board.
2142 *
2143 * FIXME: really need to revamp this for modern platforms only.
2144 */
2145 static inline void __init check_timer(void)
2146 {
2147 struct irq_cfg *cfg = irq_cfg(0);
2148 int apic1, pin1, apic2, pin2;
2149 unsigned long flags;
2150 int no_pin1 = 0;
2151
2152 local_irq_save(flags);
2153
2154 /*
2155 * get/set the timer IRQ vector:
2156 */
2157 disable_8259A_irq(0);
2158 assign_irq_vector(0, TARGET_CPUS);
2159
2160 /*
2161 * As IRQ0 is to be enabled in the 8259A, the virtual
2162 * wire has to be disabled in the local APIC.
2163 */
2164 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2165 init_8259A(1);
2166
2167 pin1 = find_isa_irq_pin(0, mp_INT);
2168 apic1 = find_isa_irq_apic(0, mp_INT);
2169 pin2 = ioapic_i8259.pin;
2170 apic2 = ioapic_i8259.apic;
2171
2172 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2173 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2174 cfg->vector, apic1, pin1, apic2, pin2);
2175
2176 /*
2177 * Some BIOS writers are clueless and report the ExtINTA
2178 * I/O APIC input from the cascaded 8259A as the timer
2179 * interrupt input. So just in case, if only one pin
2180 * was found above, try it both directly and through the
2181 * 8259A.
2182 */
2183 if (pin1 == -1) {
2184 if (intr_remapping_enabled)
2185 panic("BIOS bug: timer not connected to IO-APIC");
2186 pin1 = pin2;
2187 apic1 = apic2;
2188 no_pin1 = 1;
2189 } else if (pin2 == -1) {
2190 pin2 = pin1;
2191 apic2 = apic1;
2192 }
2193
2194 if (pin1 != -1) {
2195 /*
2196 * Ok, does IRQ0 through the IOAPIC work?
2197 */
2198 if (no_pin1) {
2199 add_pin_to_irq(0, apic1, pin1);
2200 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2201 }
2202 unmask_IO_APIC_irq(0);
2203 if (!no_timer_check && timer_irq_works()) {
2204 if (nmi_watchdog == NMI_IO_APIC) {
2205 setup_nmi();
2206 enable_8259A_irq(0);
2207 }
2208 if (disable_timer_pin_1 > 0)
2209 clear_IO_APIC_pin(0, pin1);
2210 goto out;
2211 }
2212 if (intr_remapping_enabled)
2213 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2214 clear_IO_APIC_pin(apic1, pin1);
2215 if (!no_pin1)
2216 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2217 "8254 timer not connected to IO-APIC\n");
2218
2219 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2220 "(IRQ0) through the 8259A ...\n");
2221 apic_printk(APIC_QUIET, KERN_INFO
2222 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2223 /*
2224 * legacy devices should be connected to IO APIC #0
2225 */
2226 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2227 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2228 unmask_IO_APIC_irq(0);
2229 enable_8259A_irq(0);
2230 if (timer_irq_works()) {
2231 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2232 timer_through_8259 = 1;
2233 if (nmi_watchdog == NMI_IO_APIC) {
2234 disable_8259A_irq(0);
2235 setup_nmi();
2236 enable_8259A_irq(0);
2237 }
2238 goto out;
2239 }
2240 /*
2241 * Cleanup, just in case ...
2242 */
2243 disable_8259A_irq(0);
2244 clear_IO_APIC_pin(apic2, pin2);
2245 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2246 }
2247
2248 if (nmi_watchdog == NMI_IO_APIC) {
2249 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2250 "through the IO-APIC - disabling NMI Watchdog!\n");
2251 nmi_watchdog = NMI_NONE;
2252 }
2253
2254 apic_printk(APIC_QUIET, KERN_INFO
2255 "...trying to set up timer as Virtual Wire IRQ...\n");
2256
2257 lapic_register_intr(0);
2258 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2259 enable_8259A_irq(0);
2260
2261 if (timer_irq_works()) {
2262 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2263 goto out;
2264 }
2265 disable_8259A_irq(0);
2266 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2267 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2268
2269 apic_printk(APIC_QUIET, KERN_INFO
2270 "...trying to set up timer as ExtINT IRQ...\n");
2271
2272 init_8259A(0);
2273 make_8259A_irq(0);
2274 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2275
2276 unlock_ExtINT_logic();
2277
2278 if (timer_irq_works()) {
2279 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2280 goto out;
2281 }
2282 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2283 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2284 "report. Then try booting with the 'noapic' option.\n");
2285 out:
2286 local_irq_restore(flags);
2287 }
2288
2289 static int __init notimercheck(char *s)
2290 {
2291 no_timer_check = 1;
2292 return 1;
2293 }
2294 __setup("no_timer_check", notimercheck);
2295
2296 /*
2297 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2298 * to devices. However there may be an I/O APIC pin available for
2299 * this interrupt regardless. The pin may be left unconnected, but
2300 * typically it will be reused as an ExtINT cascade interrupt for
2301 * the master 8259A. In the MPS case such a pin will normally be
2302 * reported as an ExtINT interrupt in the MP table. With ACPI
2303 * there is no provision for ExtINT interrupts, and in the absence
2304 * of an override it would be treated as an ordinary ISA I/O APIC
2305 * interrupt, that is edge-triggered and unmasked by default. We
2306 * used to do this, but it caused problems on some systems because
2307 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2308 * the same ExtINT cascade interrupt to drive the local APIC of the
2309 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2310 * the I/O APIC in all cases now. No actual device should request
2311 * it anyway. --macro
2312 */
2313 #define PIC_IRQS (1<<2)
2314
2315 void __init setup_IO_APIC(void)
2316 {
2317
2318 /*
2319 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2320 */
2321
2322 io_apic_irqs = ~PIC_IRQS;
2323
2324 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2325
2326 sync_Arb_IDs();
2327 setup_IO_APIC_irqs();
2328 init_IO_APIC_traps();
2329 check_timer();
2330 }
2331
2332 struct sysfs_ioapic_data {
2333 struct sys_device dev;
2334 struct IO_APIC_route_entry entry[0];
2335 };
2336 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2337
2338 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2339 {
2340 struct IO_APIC_route_entry *entry;
2341 struct sysfs_ioapic_data *data;
2342 int i;
2343
2344 data = container_of(dev, struct sysfs_ioapic_data, dev);
2345 entry = data->entry;
2346 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2347 *entry = ioapic_read_entry(dev->id, i);
2348
2349 return 0;
2350 }
2351
2352 static int ioapic_resume(struct sys_device *dev)
2353 {
2354 struct IO_APIC_route_entry *entry;
2355 struct sysfs_ioapic_data *data;
2356 unsigned long flags;
2357 union IO_APIC_reg_00 reg_00;
2358 int i;
2359
2360 data = container_of(dev, struct sysfs_ioapic_data, dev);
2361 entry = data->entry;
2362
2363 spin_lock_irqsave(&ioapic_lock, flags);
2364 reg_00.raw = io_apic_read(dev->id, 0);
2365 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2366 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
2367 io_apic_write(dev->id, 0, reg_00.raw);
2368 }
2369 spin_unlock_irqrestore(&ioapic_lock, flags);
2370 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2371 ioapic_write_entry(dev->id, i, entry[i]);
2372
2373 return 0;
2374 }
2375
2376 static struct sysdev_class ioapic_sysdev_class = {
2377 .name = "ioapic",
2378 .suspend = ioapic_suspend,
2379 .resume = ioapic_resume,
2380 };
2381
2382 static int __init ioapic_init_sysfs(void)
2383 {
2384 struct sys_device * dev;
2385 int i, size, error;
2386
2387 error = sysdev_class_register(&ioapic_sysdev_class);
2388 if (error)
2389 return error;
2390
2391 for (i = 0; i < nr_ioapics; i++ ) {
2392 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2393 * sizeof(struct IO_APIC_route_entry);
2394 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
2395 if (!mp_ioapic_data[i]) {
2396 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2397 continue;
2398 }
2399 dev = &mp_ioapic_data[i]->dev;
2400 dev->id = i;
2401 dev->cls = &ioapic_sysdev_class;
2402 error = sysdev_register(dev);
2403 if (error) {
2404 kfree(mp_ioapic_data[i]);
2405 mp_ioapic_data[i] = NULL;
2406 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2407 continue;
2408 }
2409 }
2410
2411 return 0;
2412 }
2413
2414 device_initcall(ioapic_init_sysfs);
2415
2416 /*
2417 * Dynamic irq allocate and deallocation
2418 */
2419 int create_irq(void)
2420 {
2421 /* Allocate an unused irq */
2422 int irq;
2423 int new;
2424 unsigned long flags;
2425 struct irq_cfg *cfg_new;
2426
2427 irq = -ENOSPC;
2428 spin_lock_irqsave(&vector_lock, flags);
2429 for (new = (nr_irqs - 1); new >= 0; new--) {
2430 if (platform_legacy_irq(new))
2431 continue;
2432 cfg_new = irq_cfg(new);
2433 if (cfg_new && cfg_new->vector != 0)
2434 continue;
2435 /* check if need to create one */
2436 if (!cfg_new)
2437 cfg_new = irq_cfg_alloc(new);
2438 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
2439 irq = new;
2440 break;
2441 }
2442 spin_unlock_irqrestore(&vector_lock, flags);
2443
2444 if (irq >= 0) {
2445 dynamic_irq_init(irq);
2446 }
2447 return irq;
2448 }
2449
2450 void destroy_irq(unsigned int irq)
2451 {
2452 unsigned long flags;
2453
2454 dynamic_irq_cleanup(irq);
2455
2456 #ifdef CONFIG_INTR_REMAP
2457 free_irte(irq);
2458 #endif
2459 spin_lock_irqsave(&vector_lock, flags);
2460 __clear_irq_vector(irq);
2461 spin_unlock_irqrestore(&vector_lock, flags);
2462 }
2463
2464 /*
2465 * MSI message composition
2466 */
2467 #ifdef CONFIG_PCI_MSI
2468 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2469 {
2470 struct irq_cfg *cfg;
2471 int err;
2472 unsigned dest;
2473 cpumask_t tmp;
2474
2475 tmp = TARGET_CPUS;
2476 err = assign_irq_vector(irq, tmp);
2477 if (err)
2478 return err;
2479
2480 cfg = irq_cfg(irq);
2481 cpus_and(tmp, cfg->domain, tmp);
2482 dest = cpu_mask_to_apicid(tmp);
2483
2484 #ifdef CONFIG_INTR_REMAP
2485 if (irq_remapped(irq)) {
2486 struct irte irte;
2487 int ir_index;
2488 u16 sub_handle;
2489
2490 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
2491 BUG_ON(ir_index == -1);
2492
2493 memset (&irte, 0, sizeof(irte));
2494
2495 irte.present = 1;
2496 irte.dst_mode = INT_DEST_MODE;
2497 irte.trigger_mode = 0; /* edge */
2498 irte.dlvry_mode = INT_DELIVERY_MODE;
2499 irte.vector = cfg->vector;
2500 irte.dest_id = IRTE_DEST(dest);
2501
2502 modify_irte(irq, &irte);
2503
2504 msg->address_hi = MSI_ADDR_BASE_HI;
2505 msg->data = sub_handle;
2506 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
2507 MSI_ADDR_IR_SHV |
2508 MSI_ADDR_IR_INDEX1(ir_index) |
2509 MSI_ADDR_IR_INDEX2(ir_index);
2510 } else
2511 #endif
2512 {
2513 msg->address_hi = MSI_ADDR_BASE_HI;
2514 msg->address_lo =
2515 MSI_ADDR_BASE_LO |
2516 ((INT_DEST_MODE == 0) ?
2517 MSI_ADDR_DEST_MODE_PHYSICAL:
2518 MSI_ADDR_DEST_MODE_LOGICAL) |
2519 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2520 MSI_ADDR_REDIRECTION_CPU:
2521 MSI_ADDR_REDIRECTION_LOWPRI) |
2522 MSI_ADDR_DEST_ID(dest);
2523
2524 msg->data =
2525 MSI_DATA_TRIGGER_EDGE |
2526 MSI_DATA_LEVEL_ASSERT |
2527 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2528 MSI_DATA_DELIVERY_FIXED:
2529 MSI_DATA_DELIVERY_LOWPRI) |
2530 MSI_DATA_VECTOR(cfg->vector);
2531 }
2532 return err;
2533 }
2534
2535 #ifdef CONFIG_SMP
2536 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2537 {
2538 struct irq_cfg *cfg;
2539 struct msi_msg msg;
2540 unsigned int dest;
2541 cpumask_t tmp;
2542 struct irq_desc *desc;
2543
2544 cpus_and(tmp, mask, cpu_online_map);
2545 if (cpus_empty(tmp))
2546 return;
2547
2548 if (assign_irq_vector(irq, mask))
2549 return;
2550
2551 cfg = irq_cfg(irq);
2552 cpus_and(tmp, cfg->domain, mask);
2553 dest = cpu_mask_to_apicid(tmp);
2554
2555 read_msi_msg(irq, &msg);
2556
2557 msg.data &= ~MSI_DATA_VECTOR_MASK;
2558 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2559 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2560 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2561
2562 write_msi_msg(irq, &msg);
2563 desc = irq_to_desc(irq);
2564 desc->affinity = mask;
2565 }
2566
2567 #ifdef CONFIG_INTR_REMAP
2568 /*
2569 * Migrate the MSI irq to another cpumask. This migration is
2570 * done in the process context using interrupt-remapping hardware.
2571 */
2572 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2573 {
2574 struct irq_cfg *cfg;
2575 unsigned int dest;
2576 cpumask_t tmp, cleanup_mask;
2577 struct irte irte;
2578 struct irq_desc *desc;
2579
2580 cpus_and(tmp, mask, cpu_online_map);
2581 if (cpus_empty(tmp))
2582 return;
2583
2584 if (get_irte(irq, &irte))
2585 return;
2586
2587 if (assign_irq_vector(irq, mask))
2588 return;
2589
2590 cfg = irq_cfg(irq);
2591 cpus_and(tmp, cfg->domain, mask);
2592 dest = cpu_mask_to_apicid(tmp);
2593
2594 irte.vector = cfg->vector;
2595 irte.dest_id = IRTE_DEST(dest);
2596
2597 /*
2598 * atomically update the IRTE with the new destination and vector.
2599 */
2600 modify_irte(irq, &irte);
2601
2602 /*
2603 * After this point, all the interrupts will start arriving
2604 * at the new destination. So, time to cleanup the previous
2605 * vector allocation.
2606 */
2607 if (cfg->move_in_progress) {
2608 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2609 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2610 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2611 cfg->move_in_progress = 0;
2612 }
2613
2614 desc = irq_to_desc(irq);
2615 desc->affinity = mask;
2616 }
2617 #endif
2618 #endif /* CONFIG_SMP */
2619
2620 /*
2621 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2622 * which implement the MSI or MSI-X Capability Structure.
2623 */
2624 static struct irq_chip msi_chip = {
2625 .name = "PCI-MSI",
2626 .unmask = unmask_msi_irq,
2627 .mask = mask_msi_irq,
2628 .ack = ack_apic_edge,
2629 #ifdef CONFIG_SMP
2630 .set_affinity = set_msi_irq_affinity,
2631 #endif
2632 .retrigger = ioapic_retrigger_irq,
2633 };
2634
2635 #ifdef CONFIG_INTR_REMAP
2636 static struct irq_chip msi_ir_chip = {
2637 .name = "IR-PCI-MSI",
2638 .unmask = unmask_msi_irq,
2639 .mask = mask_msi_irq,
2640 .ack = ack_x2apic_edge,
2641 #ifdef CONFIG_SMP
2642 .set_affinity = ir_set_msi_irq_affinity,
2643 #endif
2644 .retrigger = ioapic_retrigger_irq,
2645 };
2646
2647 /*
2648 * Map the PCI dev to the corresponding remapping hardware unit
2649 * and allocate 'nvec' consecutive interrupt-remapping table entries
2650 * in it.
2651 */
2652 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
2653 {
2654 struct intel_iommu *iommu;
2655 int index;
2656
2657 iommu = map_dev_to_ir(dev);
2658 if (!iommu) {
2659 printk(KERN_ERR
2660 "Unable to map PCI %s to iommu\n", pci_name(dev));
2661 return -ENOENT;
2662 }
2663
2664 index = alloc_irte(iommu, irq, nvec);
2665 if (index < 0) {
2666 printk(KERN_ERR
2667 "Unable to allocate %d IRTE for PCI %s\n", nvec,
2668 pci_name(dev));
2669 return -ENOSPC;
2670 }
2671 return index;
2672 }
2673 #endif
2674
2675 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2676 {
2677 int ret;
2678 struct msi_msg msg;
2679
2680 ret = msi_compose_msg(dev, irq, &msg);
2681 if (ret < 0)
2682 return ret;
2683
2684 set_irq_msi(irq, desc);
2685 write_msi_msg(irq, &msg);
2686
2687 #ifdef CONFIG_INTR_REMAP
2688 if (irq_remapped(irq)) {
2689 struct irq_desc *desc = irq_to_desc(irq);
2690 /*
2691 * irq migration in process context
2692 */
2693 desc->status |= IRQ_MOVE_PCNTXT;
2694 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
2695 } else
2696 #endif
2697 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2698
2699 return 0;
2700 }
2701
2702 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2703 {
2704 int irq, ret;
2705
2706 irq = create_irq();
2707 if (irq < 0)
2708 return irq;
2709
2710 #ifdef CONFIG_INTR_REMAP
2711 if (!intr_remapping_enabled)
2712 goto no_ir;
2713
2714 ret = msi_alloc_irte(dev, irq, 1);
2715 if (ret < 0)
2716 goto error;
2717 no_ir:
2718 #endif
2719 ret = setup_msi_irq(dev, desc, irq);
2720 if (ret < 0) {
2721 destroy_irq(irq);
2722 return ret;
2723 }
2724 return 0;
2725
2726 #ifdef CONFIG_INTR_REMAP
2727 error:
2728 destroy_irq(irq);
2729 return ret;
2730 #endif
2731 }
2732
2733 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
2734 {
2735 int irq, ret, sub_handle;
2736 struct msi_desc *desc;
2737 #ifdef CONFIG_INTR_REMAP
2738 struct intel_iommu *iommu = 0;
2739 int index = 0;
2740 #endif
2741
2742 sub_handle = 0;
2743 list_for_each_entry(desc, &dev->msi_list, list) {
2744 irq = create_irq();
2745 if (irq < 0)
2746 return irq;
2747 #ifdef CONFIG_INTR_REMAP
2748 if (!intr_remapping_enabled)
2749 goto no_ir;
2750
2751 if (!sub_handle) {
2752 /*
2753 * allocate the consecutive block of IRTE's
2754 * for 'nvec'
2755 */
2756 index = msi_alloc_irte(dev, irq, nvec);
2757 if (index < 0) {
2758 ret = index;
2759 goto error;
2760 }
2761 } else {
2762 iommu = map_dev_to_ir(dev);
2763 if (!iommu) {
2764 ret = -ENOENT;
2765 goto error;
2766 }
2767 /*
2768 * setup the mapping between the irq and the IRTE
2769 * base index, the sub_handle pointing to the
2770 * appropriate interrupt remap table entry.
2771 */
2772 set_irte_irq(irq, iommu, index, sub_handle);
2773 }
2774 no_ir:
2775 #endif
2776 ret = setup_msi_irq(dev, desc, irq);
2777 if (ret < 0)
2778 goto error;
2779 sub_handle++;
2780 }
2781 return 0;
2782
2783 error:
2784 destroy_irq(irq);
2785 return ret;
2786 }
2787
2788 void arch_teardown_msi_irq(unsigned int irq)
2789 {
2790 destroy_irq(irq);
2791 }
2792
2793 #ifdef CONFIG_DMAR
2794 #ifdef CONFIG_SMP
2795 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2796 {
2797 struct irq_cfg *cfg;
2798 struct msi_msg msg;
2799 unsigned int dest;
2800 cpumask_t tmp;
2801 struct irq_desc *desc;
2802
2803 cpus_and(tmp, mask, cpu_online_map);
2804 if (cpus_empty(tmp))
2805 return;
2806
2807 if (assign_irq_vector(irq, mask))
2808 return;
2809
2810 cfg = irq_cfg(irq);
2811 cpus_and(tmp, cfg->domain, mask);
2812 dest = cpu_mask_to_apicid(tmp);
2813
2814 dmar_msi_read(irq, &msg);
2815
2816 msg.data &= ~MSI_DATA_VECTOR_MASK;
2817 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2818 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2819 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2820
2821 dmar_msi_write(irq, &msg);
2822 desc = irq_to_desc(irq);
2823 desc->affinity = mask;
2824 }
2825 #endif /* CONFIG_SMP */
2826
2827 struct irq_chip dmar_msi_type = {
2828 .name = "DMAR_MSI",
2829 .unmask = dmar_msi_unmask,
2830 .mask = dmar_msi_mask,
2831 .ack = ack_apic_edge,
2832 #ifdef CONFIG_SMP
2833 .set_affinity = dmar_msi_set_affinity,
2834 #endif
2835 .retrigger = ioapic_retrigger_irq,
2836 };
2837
2838 int arch_setup_dmar_msi(unsigned int irq)
2839 {
2840 int ret;
2841 struct msi_msg msg;
2842
2843 ret = msi_compose_msg(NULL, irq, &msg);
2844 if (ret < 0)
2845 return ret;
2846 dmar_msi_write(irq, &msg);
2847 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2848 "edge");
2849 return 0;
2850 }
2851 #endif
2852
2853 #endif /* CONFIG_PCI_MSI */
2854 /*
2855 * Hypertransport interrupt support
2856 */
2857 #ifdef CONFIG_HT_IRQ
2858
2859 #ifdef CONFIG_SMP
2860
2861 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
2862 {
2863 struct ht_irq_msg msg;
2864 fetch_ht_irq_msg(irq, &msg);
2865
2866 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
2867 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
2868
2869 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
2870 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
2871
2872 write_ht_irq_msg(irq, &msg);
2873 }
2874
2875 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2876 {
2877 struct irq_cfg *cfg;
2878 unsigned int dest;
2879 cpumask_t tmp;
2880 struct irq_desc *desc;
2881
2882 cpus_and(tmp, mask, cpu_online_map);
2883 if (cpus_empty(tmp))
2884 return;
2885
2886 if (assign_irq_vector(irq, mask))
2887 return;
2888
2889 cfg = irq_cfg(irq);
2890 cpus_and(tmp, cfg->domain, mask);
2891 dest = cpu_mask_to_apicid(tmp);
2892
2893 target_ht_irq(irq, dest, cfg->vector);
2894 desc = irq_to_desc(irq);
2895 desc->affinity = mask;
2896 }
2897 #endif
2898
2899 static struct irq_chip ht_irq_chip = {
2900 .name = "PCI-HT",
2901 .mask = mask_ht_irq,
2902 .unmask = unmask_ht_irq,
2903 .ack = ack_apic_edge,
2904 #ifdef CONFIG_SMP
2905 .set_affinity = set_ht_irq_affinity,
2906 #endif
2907 .retrigger = ioapic_retrigger_irq,
2908 };
2909
2910 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2911 {
2912 struct irq_cfg *cfg;
2913 int err;
2914 cpumask_t tmp;
2915
2916 tmp = TARGET_CPUS;
2917 err = assign_irq_vector(irq, tmp);
2918 if (!err) {
2919 struct ht_irq_msg msg;
2920 unsigned dest;
2921
2922 cfg = irq_cfg(irq);
2923 cpus_and(tmp, cfg->domain, tmp);
2924 dest = cpu_mask_to_apicid(tmp);
2925
2926 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
2927
2928 msg.address_lo =
2929 HT_IRQ_LOW_BASE |
2930 HT_IRQ_LOW_DEST_ID(dest) |
2931 HT_IRQ_LOW_VECTOR(cfg->vector) |
2932 ((INT_DEST_MODE == 0) ?
2933 HT_IRQ_LOW_DM_PHYSICAL :
2934 HT_IRQ_LOW_DM_LOGICAL) |
2935 HT_IRQ_LOW_RQEOI_EDGE |
2936 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2937 HT_IRQ_LOW_MT_FIXED :
2938 HT_IRQ_LOW_MT_ARBITRATED) |
2939 HT_IRQ_LOW_IRQ_MASKED;
2940
2941 write_ht_irq_msg(irq, &msg);
2942
2943 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2944 handle_edge_irq, "edge");
2945 }
2946 return err;
2947 }
2948 #endif /* CONFIG_HT_IRQ */
2949
2950 /* --------------------------------------------------------------------------
2951 ACPI-based IOAPIC Configuration
2952 -------------------------------------------------------------------------- */
2953
2954 #ifdef CONFIG_ACPI
2955
2956 #define IO_APIC_MAX_ID 0xFE
2957
2958 int __init io_apic_get_redir_entries (int ioapic)
2959 {
2960 union IO_APIC_reg_01 reg_01;
2961 unsigned long flags;
2962
2963 spin_lock_irqsave(&ioapic_lock, flags);
2964 reg_01.raw = io_apic_read(ioapic, 1);
2965 spin_unlock_irqrestore(&ioapic_lock, flags);
2966
2967 return reg_01.bits.entries;
2968 }
2969
2970
2971 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2972 {
2973 if (!IO_APIC_IRQ(irq)) {
2974 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2975 ioapic);
2976 return -EINVAL;
2977 }
2978
2979 /*
2980 * IRQs < 16 are already in the irq_2_pin[] map
2981 */
2982 if (irq >= 16)
2983 add_pin_to_irq(irq, ioapic, pin);
2984
2985 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
2986
2987 return 0;
2988 }
2989
2990
2991 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2992 {
2993 int i;
2994
2995 if (skip_ioapic_setup)
2996 return -1;
2997
2998 for (i = 0; i < mp_irq_entries; i++)
2999 if (mp_irqs[i].mp_irqtype == mp_INT &&
3000 mp_irqs[i].mp_srcbusirq == bus_irq)
3001 break;
3002 if (i >= mp_irq_entries)
3003 return -1;
3004
3005 *trigger = irq_trigger(i);
3006 *polarity = irq_polarity(i);
3007 return 0;
3008 }
3009
3010 #endif /* CONFIG_ACPI */
3011
3012 /*
3013 * This function currently is only a helper for the i386 smp boot process where
3014 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3015 * so mask in all cases should simply be TARGET_CPUS
3016 */
3017 #ifdef CONFIG_SMP
3018 void __init setup_ioapic_dest(void)
3019 {
3020 int pin, ioapic, irq, irq_entry;
3021 struct irq_cfg *cfg;
3022
3023 if (skip_ioapic_setup == 1)
3024 return;
3025
3026 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3027 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3028 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3029 if (irq_entry == -1)
3030 continue;
3031 irq = pin_2_irq(irq_entry, ioapic, pin);
3032
3033 /* setup_IO_APIC_irqs could fail to get vector for some device
3034 * when you have too many devices, because at that time only boot
3035 * cpu is online.
3036 */
3037 cfg = irq_cfg(irq);
3038 if (!cfg->vector)
3039 setup_IO_APIC_irq(ioapic, pin, irq,
3040 irq_trigger(irq_entry),
3041 irq_polarity(irq_entry));
3042 #ifdef CONFIG_INTR_REMAP
3043 else if (intr_remapping_enabled)
3044 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3045 #endif
3046 else
3047 set_ioapic_affinity_irq(irq, TARGET_CPUS);
3048 }
3049
3050 }
3051 }
3052 #endif
3053
3054 #define IOAPIC_RESOURCE_NAME_SIZE 11
3055
3056 static struct resource *ioapic_resources;
3057
3058 static struct resource * __init ioapic_setup_resources(void)
3059 {
3060 unsigned long n;
3061 struct resource *res;
3062 char *mem;
3063 int i;
3064
3065 if (nr_ioapics <= 0)
3066 return NULL;
3067
3068 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3069 n *= nr_ioapics;
3070
3071 mem = alloc_bootmem(n);
3072 res = (void *)mem;
3073
3074 if (mem != NULL) {
3075 mem += sizeof(struct resource) * nr_ioapics;
3076
3077 for (i = 0; i < nr_ioapics; i++) {
3078 res[i].name = mem;
3079 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3080 sprintf(mem, "IOAPIC %u", i);
3081 mem += IOAPIC_RESOURCE_NAME_SIZE;
3082 }
3083 }
3084
3085 ioapic_resources = res;
3086
3087 return res;
3088 }
3089
3090 void __init ioapic_init_mappings(void)
3091 {
3092 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3093 struct resource *ioapic_res;
3094 int i;
3095
3096 ioapic_res = ioapic_setup_resources();
3097 for (i = 0; i < nr_ioapics; i++) {
3098 if (smp_found_config) {
3099 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3100 } else {
3101 ioapic_phys = (unsigned long)
3102 alloc_bootmem_pages(PAGE_SIZE);
3103 ioapic_phys = __pa(ioapic_phys);
3104 }
3105 set_fixmap_nocache(idx, ioapic_phys);
3106 apic_printk(APIC_VERBOSE,
3107 "mapped IOAPIC to %016lx (%016lx)\n",
3108 __fix_to_virt(idx), ioapic_phys);
3109 idx++;
3110
3111 if (ioapic_res != NULL) {
3112 ioapic_res->start = ioapic_phys;
3113 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3114 ioapic_res++;
3115 }
3116 }
3117 }
3118
3119 static int __init ioapic_insert_resources(void)
3120 {
3121 int i;
3122 struct resource *r = ioapic_resources;
3123
3124 if (!r) {
3125 printk(KERN_ERR
3126 "IO APIC resources could be not be allocated.\n");
3127 return -1;
3128 }
3129
3130 for (i = 0; i < nr_ioapics; i++) {
3131 insert_resource(&iomem_resource, r);
3132 r++;
3133 }
3134
3135 return 0;
3136 }
3137
3138 /* Insert the IO APIC resources after PCI initialization has occured to handle
3139 * IO APICS that are mapped in on a BAR in PCI space. */
3140 late_initcall(ioapic_insert_resources);
3141
This page took 0.090374 seconds and 4 git commands to generate.