x86: io-apic - get rid of __DO_ACTION macro
[deliverable/linux.git] / arch / x86 / kernel / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
39 #ifdef CONFIG_ACPI
40 #include <acpi/acpi_bus.h>
41 #endif
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
45
46 #include <asm/idle.h>
47 #include <asm/io.h>
48 #include <asm/smp.h>
49 #include <asm/desc.h>
50 #include <asm/proto.h>
51 #include <asm/acpi.h>
52 #include <asm/dma.h>
53 #include <asm/timer.h>
54 #include <asm/i8259.h>
55 #include <asm/nmi.h>
56 #include <asm/msidef.h>
57 #include <asm/hypertransport.h>
58 #include <asm/setup.h>
59 #include <asm/irq_remapping.h>
60 #include <asm/hpet.h>
61
62 #include <mach_ipi.h>
63 #include <mach_apic.h>
64 #include <mach_apicdef.h>
65
66 #define __apicdebuginit(type) static type __init
67
68 /*
69 * Is the SiS APIC rmw bug present ?
70 * -1 = don't know, 0 = no, 1 = yes
71 */
72 int sis_apic_bug = -1;
73
74 static DEFINE_SPINLOCK(ioapic_lock);
75 static DEFINE_SPINLOCK(vector_lock);
76
77 /*
78 * # of IRQ routing registers
79 */
80 int nr_ioapic_registers[MAX_IO_APICS];
81
82 /* I/O APIC entries */
83 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
84 int nr_ioapics;
85
86 /* MP IRQ source entries */
87 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
88
89 /* # of MP IRQ source entries */
90 int mp_irq_entries;
91
92 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
93 int mp_bus_id_to_type[MAX_MP_BUSSES];
94 #endif
95
96 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
97
98 int skip_ioapic_setup;
99
100 static int __init parse_noapic(char *str)
101 {
102 /* disable IO-APIC */
103 disable_ioapic_setup();
104 return 0;
105 }
106 early_param("noapic", parse_noapic);
107
108 struct irq_cfg;
109 struct irq_pin_list;
110 struct irq_cfg {
111 unsigned int irq;
112 #ifdef CONFIG_HAVE_SPARSE_IRQ
113 struct irq_cfg *next;
114 #endif
115 struct irq_pin_list *irq_2_pin;
116 cpumask_t domain;
117 cpumask_t old_domain;
118 unsigned move_cleanup_count;
119 u8 vector;
120 u8 move_in_progress : 1;
121 };
122
123 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
124 static struct irq_cfg irq_cfg_legacy[] __initdata = {
125 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
126 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
127 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
128 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
129 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
130 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
131 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
132 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
133 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
134 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
135 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
136 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
137 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
138 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
139 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
140 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
141 };
142
143 static struct irq_cfg irq_cfg_init = { .irq = -1U, };
144
145 static void init_one_irq_cfg(struct irq_cfg *cfg)
146 {
147 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
148 }
149
150 static struct irq_cfg *irq_cfgx;
151
152 #ifdef CONFIG_HAVE_SPARSE_IRQ
153 /*
154 * Protect the irq_cfgx_free freelist:
155 */
156 static DEFINE_SPINLOCK(irq_cfg_lock);
157
158 static struct irq_cfg *irq_cfgx_free;
159 #endif
160
161 static void __init init_work(void *data)
162 {
163 struct dyn_array *da = data;
164 struct irq_cfg *cfg;
165 int legacy_count;
166 int i;
167
168 cfg = *da->name;
169
170 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
171
172 legacy_count = ARRAY_SIZE(irq_cfg_legacy);
173 for (i = legacy_count; i < *da->nr; i++)
174 init_one_irq_cfg(&cfg[i]);
175
176 #ifdef CONFIG_HAVE_SPARSE_IRQ
177 for (i = 1; i < *da->nr; i++)
178 cfg[i-1].next = &cfg[i];
179
180 irq_cfgx_free = &irq_cfgx[legacy_count];
181 irq_cfgx[legacy_count - 1].next = NULL;
182 #endif
183 }
184
185 #ifdef CONFIG_HAVE_SPARSE_IRQ
186 /* need to be biger than size of irq_cfg_legacy */
187 static int nr_irq_cfg = 32;
188
189 static int __init parse_nr_irq_cfg(char *arg)
190 {
191 if (arg) {
192 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
193 if (nr_irq_cfg < 32)
194 nr_irq_cfg = 32;
195 }
196 return 0;
197 }
198
199 early_param("nr_irq_cfg", parse_nr_irq_cfg);
200
201 #define for_each_irq_cfg(irqX, cfg) \
202 for (cfg = irq_cfgx, irqX = cfg->irq; cfg; cfg = cfg->next, irqX = cfg ? cfg->irq : -1U)
203
204
205 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
206
207 static struct irq_cfg *irq_cfg(unsigned int irq)
208 {
209 struct irq_cfg *cfg;
210
211 cfg = irq_cfgx;
212 while (cfg) {
213 if (cfg->irq == irq)
214 return cfg;
215
216 cfg = cfg->next;
217 }
218
219 return NULL;
220 }
221
222 static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
223 {
224 struct irq_cfg *cfg, *cfg_pri;
225 unsigned long flags;
226 int count = 0;
227 int i;
228
229 cfg_pri = cfg = irq_cfgx;
230 while (cfg) {
231 if (cfg->irq == irq)
232 return cfg;
233
234 cfg_pri = cfg;
235 cfg = cfg->next;
236 count++;
237 }
238
239 spin_lock_irqsave(&irq_cfg_lock, flags);
240 if (!irq_cfgx_free) {
241 unsigned long phys;
242 unsigned long total_bytes;
243 /*
244 * we run out of pre-allocate ones, allocate more
245 */
246 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
247
248 total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
249 if (after_bootmem)
250 cfg = kzalloc(total_bytes, GFP_ATOMIC);
251 else
252 cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
253
254 if (!cfg)
255 panic("please boot with nr_irq_cfg= %d\n", count * 2);
256
257 phys = __pa(cfg);
258 printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
259
260 for (i = 0; i < nr_irq_cfg; i++)
261 init_one_irq_cfg(&cfg[i]);
262
263 for (i = 1; i < nr_irq_cfg; i++)
264 cfg[i-1].next = &cfg[i];
265
266 irq_cfgx_free = cfg;
267 }
268
269 cfg = irq_cfgx_free;
270 irq_cfgx_free = irq_cfgx_free->next;
271 cfg->next = NULL;
272 if (cfg_pri)
273 cfg_pri->next = cfg;
274 else
275 irq_cfgx = cfg;
276 cfg->irq = irq;
277
278 spin_unlock_irqrestore(&irq_cfg_lock, flags);
279
280 printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
281 #ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
282 {
283 /* dump the results */
284 struct irq_cfg *cfg;
285 unsigned long phys;
286 unsigned long bytes = sizeof(struct irq_cfg);
287
288 printk(KERN_DEBUG "=========================== %d\n", irq);
289 printk(KERN_DEBUG "irq_cfg dump after get that for %d\n", irq);
290 for_each_irq_cfg(cfg) {
291 phys = __pa(cfg);
292 printk(KERN_DEBUG "irq_cfg %d ==> [%#lx - %#lx]\n", cfg->irq, phys, phys + bytes);
293 }
294 printk(KERN_DEBUG "===========================\n");
295 }
296 #endif
297 return cfg;
298 }
299 #else
300
301 #define for_each_irq_cfg(irq, cfg) \
302 for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
303
304 DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work);
305
306 struct irq_cfg *irq_cfg(unsigned int irq)
307 {
308 if (irq < nr_irqs)
309 return &irq_cfgx[irq];
310
311 return NULL;
312 }
313 struct irq_cfg *irq_cfg_alloc(unsigned int irq)
314 {
315 return irq_cfg(irq);
316 }
317
318 #endif
319 /*
320 * This is performance-critical, we want to do it O(1)
321 *
322 * the indexing order of this array favors 1:1 mappings
323 * between pins and IRQs.
324 */
325
326 struct irq_pin_list {
327 int apic, pin;
328 struct irq_pin_list *next;
329 };
330
331 static struct irq_pin_list *irq_2_pin_head;
332 /* fill one page ? */
333 static int nr_irq_2_pin = 0x100;
334 static struct irq_pin_list *irq_2_pin_ptr;
335 static void __init irq_2_pin_init_work(void *data)
336 {
337 struct dyn_array *da = data;
338 struct irq_pin_list *pin;
339 int i;
340
341 pin = *da->name;
342
343 for (i = 1; i < *da->nr; i++)
344 pin[i-1].next = &pin[i];
345
346 irq_2_pin_ptr = &pin[0];
347 }
348 DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
349
350 static struct irq_pin_list *get_one_free_irq_2_pin(void)
351 {
352 struct irq_pin_list *pin;
353 int i;
354
355 pin = irq_2_pin_ptr;
356
357 if (pin) {
358 irq_2_pin_ptr = pin->next;
359 pin->next = NULL;
360 return pin;
361 }
362
363 /*
364 * we run out of pre-allocate ones, allocate more
365 */
366 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
367
368 if (after_bootmem)
369 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
370 GFP_ATOMIC);
371 else
372 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
373 nr_irq_2_pin, PAGE_SIZE, 0);
374
375 if (!pin)
376 panic("can not get more irq_2_pin\n");
377
378 for (i = 1; i < nr_irq_2_pin; i++)
379 pin[i-1].next = &pin[i];
380
381 irq_2_pin_ptr = pin->next;
382 pin->next = NULL;
383
384 return pin;
385 }
386
387 struct io_apic {
388 unsigned int index;
389 unsigned int unused[3];
390 unsigned int data;
391 };
392
393 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
394 {
395 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
396 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
397 }
398
399 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
400 {
401 struct io_apic __iomem *io_apic = io_apic_base(apic);
402 writel(reg, &io_apic->index);
403 return readl(&io_apic->data);
404 }
405
406 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
407 {
408 struct io_apic __iomem *io_apic = io_apic_base(apic);
409 writel(reg, &io_apic->index);
410 writel(value, &io_apic->data);
411 }
412
413 /*
414 * Re-write a value: to be used for read-modify-write
415 * cycles where the read already set up the index register.
416 *
417 * Older SiS APIC requires we rewrite the index register
418 */
419 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
420 {
421 struct io_apic __iomem *io_apic = io_apic_base(apic);
422 if (sis_apic_bug)
423 writel(reg, &io_apic->index);
424 writel(value, &io_apic->data);
425 }
426
427 static bool io_apic_level_ack_pending(unsigned int irq)
428 {
429 struct irq_pin_list *entry;
430 unsigned long flags;
431 struct irq_cfg *cfg = irq_cfg(irq);
432
433 spin_lock_irqsave(&ioapic_lock, flags);
434 entry = cfg->irq_2_pin;
435 for (;;) {
436 unsigned int reg;
437 int pin;
438
439 if (!entry)
440 break;
441 pin = entry->pin;
442 reg = io_apic_read(entry->apic, 0x10 + pin*2);
443 /* Is the remote IRR bit set? */
444 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
445 spin_unlock_irqrestore(&ioapic_lock, flags);
446 return true;
447 }
448 if (!entry->next)
449 break;
450 entry = entry->next;
451 }
452 spin_unlock_irqrestore(&ioapic_lock, flags);
453
454 return false;
455 }
456
457 union entry_union {
458 struct { u32 w1, w2; };
459 struct IO_APIC_route_entry entry;
460 };
461
462 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
463 {
464 union entry_union eu;
465 unsigned long flags;
466 spin_lock_irqsave(&ioapic_lock, flags);
467 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
468 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
469 spin_unlock_irqrestore(&ioapic_lock, flags);
470 return eu.entry;
471 }
472
473 /*
474 * When we write a new IO APIC routing entry, we need to write the high
475 * word first! If the mask bit in the low word is clear, we will enable
476 * the interrupt, and we need to make sure the entry is fully populated
477 * before that happens.
478 */
479 static void
480 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
481 {
482 union entry_union eu;
483 eu.entry = e;
484 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
485 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
486 }
487
488 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
489 {
490 unsigned long flags;
491 spin_lock_irqsave(&ioapic_lock, flags);
492 __ioapic_write_entry(apic, pin, e);
493 spin_unlock_irqrestore(&ioapic_lock, flags);
494 }
495
496 /*
497 * When we mask an IO APIC routing entry, we need to write the low
498 * word first, in order to set the mask bit before we change the
499 * high bits!
500 */
501 static void ioapic_mask_entry(int apic, int pin)
502 {
503 unsigned long flags;
504 union entry_union eu = { .entry.mask = 1 };
505
506 spin_lock_irqsave(&ioapic_lock, flags);
507 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
508 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
509 spin_unlock_irqrestore(&ioapic_lock, flags);
510 }
511
512 #ifdef CONFIG_SMP
513 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
514 {
515 int apic, pin;
516 struct irq_cfg *cfg;
517 struct irq_pin_list *entry;
518
519 cfg = irq_cfg(irq);
520 entry = cfg->irq_2_pin;
521 for (;;) {
522 unsigned int reg;
523
524 if (!entry)
525 break;
526
527 apic = entry->apic;
528 pin = entry->pin;
529 #ifdef CONFIG_INTR_REMAP
530 /*
531 * With interrupt-remapping, destination information comes
532 * from interrupt-remapping table entry.
533 */
534 if (!irq_remapped(irq))
535 io_apic_write(apic, 0x11 + pin*2, dest);
536 #else
537 io_apic_write(apic, 0x11 + pin*2, dest);
538 #endif
539 reg = io_apic_read(apic, 0x10 + pin*2);
540 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
541 reg |= vector;
542 io_apic_modify(apic, 0x10 + pin*2, reg);
543 if (!entry->next)
544 break;
545 entry = entry->next;
546 }
547 }
548
549 static int assign_irq_vector(int irq, cpumask_t mask);
550
551 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
552 {
553 struct irq_cfg *cfg;
554 unsigned long flags;
555 unsigned int dest;
556 cpumask_t tmp;
557 struct irq_desc *desc;
558
559 cpus_and(tmp, mask, cpu_online_map);
560 if (cpus_empty(tmp))
561 return;
562
563 cfg = irq_cfg(irq);
564 if (assign_irq_vector(irq, mask))
565 return;
566
567 cpus_and(tmp, cfg->domain, mask);
568 dest = cpu_mask_to_apicid(tmp);
569 /*
570 * Only the high 8 bits are valid.
571 */
572 dest = SET_APIC_LOGICAL_ID(dest);
573
574 desc = irq_to_desc(irq);
575 spin_lock_irqsave(&ioapic_lock, flags);
576 __target_IO_APIC_irq(irq, dest, cfg->vector);
577 desc->affinity = mask;
578 spin_unlock_irqrestore(&ioapic_lock, flags);
579 }
580 #endif /* CONFIG_SMP */
581
582 /*
583 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
584 * shared ISA-space IRQs, so we have to support them. We are super
585 * fast in the common case, and fast for shared ISA-space IRQs.
586 */
587 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
588 {
589 struct irq_cfg *cfg;
590 struct irq_pin_list *entry;
591
592 /* first time to refer irq_cfg, so with new */
593 cfg = irq_cfg_alloc(irq);
594 entry = cfg->irq_2_pin;
595 if (!entry) {
596 entry = get_one_free_irq_2_pin();
597 cfg->irq_2_pin = entry;
598 entry->apic = apic;
599 entry->pin = pin;
600 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
601 return;
602 }
603
604 while (entry->next) {
605 /* not again, please */
606 if (entry->apic == apic && entry->pin == pin)
607 return;
608
609 entry = entry->next;
610 }
611
612 entry->next = get_one_free_irq_2_pin();
613 entry = entry->next;
614 entry->apic = apic;
615 entry->pin = pin;
616 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
617 }
618
619 /*
620 * Reroute an IRQ to a different pin.
621 */
622 static void __init replace_pin_at_irq(unsigned int irq,
623 int oldapic, int oldpin,
624 int newapic, int newpin)
625 {
626 struct irq_cfg *cfg = irq_cfg(irq);
627 struct irq_pin_list *entry = cfg->irq_2_pin;
628 int replaced = 0;
629
630 while (entry) {
631 if (entry->apic == oldapic && entry->pin == oldpin) {
632 entry->apic = newapic;
633 entry->pin = newpin;
634 replaced = 1;
635 /* every one is different, right? */
636 break;
637 }
638 entry = entry->next;
639 }
640
641 /* why? call replace before add? */
642 if (!replaced)
643 add_pin_to_irq(irq, newapic, newpin);
644 }
645
646 static inline void io_apic_modify_irq(unsigned int irq,
647 int mask_and, int mask_or,
648 void (*final)(struct irq_pin_list *entry))
649 {
650 int pin;
651 struct irq_cfg *cfg;
652 struct irq_pin_list *entry;
653
654 cfg = irq_cfg(irq);
655 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
656 unsigned int reg;
657 pin = entry->pin;
658 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
659 reg &= mask_and;
660 reg |= mask_or;
661 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
662 if (final)
663 final(entry);
664 }
665 }
666
667 static void __unmask_IO_APIC_irq(unsigned int irq)
668 {
669 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL);
670 }
671
672 #ifdef CONFIG_X86_64
673 void io_apic_sync(struct irq_pin_list *entry)
674 {
675 /*
676 * Synchronize the IO-APIC and the CPU by doing
677 * a dummy read from the IO-APIC
678 */
679 struct io_apic __iomem *io_apic;
680 io_apic = io_apic_base(entry->apic);
681 readl(&io_apic->data);
682 }
683
684 static void __mask_IO_APIC_irq(unsigned int irq)
685 {
686 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
687 }
688 #else /* CONFIG_X86_32 */
689 static void __mask_IO_APIC_irq(unsigned int irq)
690 {
691 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL);
692 }
693
694 static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
695 {
696 io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER,
697 IO_APIC_REDIR_MASKED, NULL);
698 }
699
700 static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
701 {
702 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED,
703 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
704 }
705 #endif /* CONFIG_X86_32 */
706
707 static void mask_IO_APIC_irq (unsigned int irq)
708 {
709 unsigned long flags;
710
711 spin_lock_irqsave(&ioapic_lock, flags);
712 __mask_IO_APIC_irq(irq);
713 spin_unlock_irqrestore(&ioapic_lock, flags);
714 }
715
716 static void unmask_IO_APIC_irq (unsigned int irq)
717 {
718 unsigned long flags;
719
720 spin_lock_irqsave(&ioapic_lock, flags);
721 __unmask_IO_APIC_irq(irq);
722 spin_unlock_irqrestore(&ioapic_lock, flags);
723 }
724
725 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
726 {
727 struct IO_APIC_route_entry entry;
728
729 /* Check delivery_mode to be sure we're not clearing an SMI pin */
730 entry = ioapic_read_entry(apic, pin);
731 if (entry.delivery_mode == dest_SMI)
732 return;
733 /*
734 * Disable it in the IO-APIC irq-routing table:
735 */
736 ioapic_mask_entry(apic, pin);
737 }
738
739 static void clear_IO_APIC (void)
740 {
741 int apic, pin;
742
743 for (apic = 0; apic < nr_ioapics; apic++)
744 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
745 clear_IO_APIC_pin(apic, pin);
746 }
747
748 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
749 void send_IPI_self(int vector)
750 {
751 unsigned int cfg;
752
753 /*
754 * Wait for idle.
755 */
756 apic_wait_icr_idle();
757 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
758 /*
759 * Send the IPI. The write to APIC_ICR fires this off.
760 */
761 apic_write(APIC_ICR, cfg);
762 }
763 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
764
765 #ifdef CONFIG_X86_32
766 /*
767 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
768 * specific CPU-side IRQs.
769 */
770
771 #define MAX_PIRQS 8
772 static int pirq_entries [MAX_PIRQS];
773 static int pirqs_enabled;
774
775 static int __init ioapic_pirq_setup(char *str)
776 {
777 int i, max;
778 int ints[MAX_PIRQS+1];
779
780 get_options(str, ARRAY_SIZE(ints), ints);
781
782 for (i = 0; i < MAX_PIRQS; i++)
783 pirq_entries[i] = -1;
784
785 pirqs_enabled = 1;
786 apic_printk(APIC_VERBOSE, KERN_INFO
787 "PIRQ redirection, working around broken MP-BIOS.\n");
788 max = MAX_PIRQS;
789 if (ints[0] < MAX_PIRQS)
790 max = ints[0];
791
792 for (i = 0; i < max; i++) {
793 apic_printk(APIC_VERBOSE, KERN_DEBUG
794 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
795 /*
796 * PIRQs are mapped upside down, usually.
797 */
798 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
799 }
800 return 1;
801 }
802
803 __setup("pirq=", ioapic_pirq_setup);
804 #endif /* CONFIG_X86_32 */
805
806 #ifdef CONFIG_INTR_REMAP
807 /* I/O APIC RTE contents at the OS boot up */
808 static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
809
810 /*
811 * Saves and masks all the unmasked IO-APIC RTE's
812 */
813 int save_mask_IO_APIC_setup(void)
814 {
815 union IO_APIC_reg_01 reg_01;
816 unsigned long flags;
817 int apic, pin;
818
819 /*
820 * The number of IO-APIC IRQ registers (== #pins):
821 */
822 for (apic = 0; apic < nr_ioapics; apic++) {
823 spin_lock_irqsave(&ioapic_lock, flags);
824 reg_01.raw = io_apic_read(apic, 1);
825 spin_unlock_irqrestore(&ioapic_lock, flags);
826 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
827 }
828
829 for (apic = 0; apic < nr_ioapics; apic++) {
830 early_ioapic_entries[apic] =
831 kzalloc(sizeof(struct IO_APIC_route_entry) *
832 nr_ioapic_registers[apic], GFP_KERNEL);
833 if (!early_ioapic_entries[apic])
834 return -ENOMEM;
835 }
836
837 for (apic = 0; apic < nr_ioapics; apic++)
838 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
839 struct IO_APIC_route_entry entry;
840
841 entry = early_ioapic_entries[apic][pin] =
842 ioapic_read_entry(apic, pin);
843 if (!entry.mask) {
844 entry.mask = 1;
845 ioapic_write_entry(apic, pin, entry);
846 }
847 }
848 return 0;
849 }
850
851 void restore_IO_APIC_setup(void)
852 {
853 int apic, pin;
854
855 for (apic = 0; apic < nr_ioapics; apic++)
856 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
857 ioapic_write_entry(apic, pin,
858 early_ioapic_entries[apic][pin]);
859 }
860
861 void reinit_intr_remapped_IO_APIC(int intr_remapping)
862 {
863 /*
864 * for now plain restore of previous settings.
865 * TBD: In the case of OS enabling interrupt-remapping,
866 * IO-APIC RTE's need to be setup to point to interrupt-remapping
867 * table entries. for now, do a plain restore, and wait for
868 * the setup_IO_APIC_irqs() to do proper initialization.
869 */
870 restore_IO_APIC_setup();
871 }
872 #endif
873
874 /*
875 * Find the IRQ entry number of a certain pin.
876 */
877 static int find_irq_entry(int apic, int pin, int type)
878 {
879 int i;
880
881 for (i = 0; i < mp_irq_entries; i++)
882 if (mp_irqs[i].mp_irqtype == type &&
883 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
884 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
885 mp_irqs[i].mp_dstirq == pin)
886 return i;
887
888 return -1;
889 }
890
891 /*
892 * Find the pin to which IRQ[irq] (ISA) is connected
893 */
894 static int __init find_isa_irq_pin(int irq, int type)
895 {
896 int i;
897
898 for (i = 0; i < mp_irq_entries; i++) {
899 int lbus = mp_irqs[i].mp_srcbus;
900
901 if (test_bit(lbus, mp_bus_not_pci) &&
902 (mp_irqs[i].mp_irqtype == type) &&
903 (mp_irqs[i].mp_srcbusirq == irq))
904
905 return mp_irqs[i].mp_dstirq;
906 }
907 return -1;
908 }
909
910 static int __init find_isa_irq_apic(int irq, int type)
911 {
912 int i;
913
914 for (i = 0; i < mp_irq_entries; i++) {
915 int lbus = mp_irqs[i].mp_srcbus;
916
917 if (test_bit(lbus, mp_bus_not_pci) &&
918 (mp_irqs[i].mp_irqtype == type) &&
919 (mp_irqs[i].mp_srcbusirq == irq))
920 break;
921 }
922 if (i < mp_irq_entries) {
923 int apic;
924 for(apic = 0; apic < nr_ioapics; apic++) {
925 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
926 return apic;
927 }
928 }
929
930 return -1;
931 }
932
933 /*
934 * Find a specific PCI IRQ entry.
935 * Not an __init, possibly needed by modules
936 */
937 static int pin_2_irq(int idx, int apic, int pin);
938
939 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
940 {
941 int apic, i, best_guess = -1;
942
943 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
944 bus, slot, pin);
945 if (test_bit(bus, mp_bus_not_pci)) {
946 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
947 return -1;
948 }
949 for (i = 0; i < mp_irq_entries; i++) {
950 int lbus = mp_irqs[i].mp_srcbus;
951
952 for (apic = 0; apic < nr_ioapics; apic++)
953 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
954 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
955 break;
956
957 if (!test_bit(lbus, mp_bus_not_pci) &&
958 !mp_irqs[i].mp_irqtype &&
959 (bus == lbus) &&
960 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
961 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
962
963 if (!(apic || IO_APIC_IRQ(irq)))
964 continue;
965
966 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
967 return irq;
968 /*
969 * Use the first all-but-pin matching entry as a
970 * best-guess fuzzy result for broken mptables.
971 */
972 if (best_guess < 0)
973 best_guess = irq;
974 }
975 }
976 return best_guess;
977 }
978
979 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
980
981 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
982 /*
983 * EISA Edge/Level control register, ELCR
984 */
985 static int EISA_ELCR(unsigned int irq)
986 {
987 if (irq < 16) {
988 unsigned int port = 0x4d0 + (irq >> 3);
989 return (inb(port) >> (irq & 7)) & 1;
990 }
991 apic_printk(APIC_VERBOSE, KERN_INFO
992 "Broken MPtable reports ISA irq %d\n", irq);
993 return 0;
994 }
995
996 #endif
997
998 /* ISA interrupts are always polarity zero edge triggered,
999 * when listed as conforming in the MP table. */
1000
1001 #define default_ISA_trigger(idx) (0)
1002 #define default_ISA_polarity(idx) (0)
1003
1004 /* EISA interrupts are always polarity zero and can be edge or level
1005 * trigger depending on the ELCR value. If an interrupt is listed as
1006 * EISA conforming in the MP table, that means its trigger type must
1007 * be read in from the ELCR */
1008
1009 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
1010 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
1011
1012 /* PCI interrupts are always polarity one level triggered,
1013 * when listed as conforming in the MP table. */
1014
1015 #define default_PCI_trigger(idx) (1)
1016 #define default_PCI_polarity(idx) (1)
1017
1018 /* MCA interrupts are always polarity zero level triggered,
1019 * when listed as conforming in the MP table. */
1020
1021 #define default_MCA_trigger(idx) (1)
1022 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
1023
1024 static int MPBIOS_polarity(int idx)
1025 {
1026 int bus = mp_irqs[idx].mp_srcbus;
1027 int polarity;
1028
1029 /*
1030 * Determine IRQ line polarity (high active or low active):
1031 */
1032 switch (mp_irqs[idx].mp_irqflag & 3)
1033 {
1034 case 0: /* conforms, ie. bus-type dependent polarity */
1035 if (test_bit(bus, mp_bus_not_pci))
1036 polarity = default_ISA_polarity(idx);
1037 else
1038 polarity = default_PCI_polarity(idx);
1039 break;
1040 case 1: /* high active */
1041 {
1042 polarity = 0;
1043 break;
1044 }
1045 case 2: /* reserved */
1046 {
1047 printk(KERN_WARNING "broken BIOS!!\n");
1048 polarity = 1;
1049 break;
1050 }
1051 case 3: /* low active */
1052 {
1053 polarity = 1;
1054 break;
1055 }
1056 default: /* invalid */
1057 {
1058 printk(KERN_WARNING "broken BIOS!!\n");
1059 polarity = 1;
1060 break;
1061 }
1062 }
1063 return polarity;
1064 }
1065
1066 static int MPBIOS_trigger(int idx)
1067 {
1068 int bus = mp_irqs[idx].mp_srcbus;
1069 int trigger;
1070
1071 /*
1072 * Determine IRQ trigger mode (edge or level sensitive):
1073 */
1074 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
1075 {
1076 case 0: /* conforms, ie. bus-type dependent */
1077 if (test_bit(bus, mp_bus_not_pci))
1078 trigger = default_ISA_trigger(idx);
1079 else
1080 trigger = default_PCI_trigger(idx);
1081 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1082 switch (mp_bus_id_to_type[bus]) {
1083 case MP_BUS_ISA: /* ISA pin */
1084 {
1085 /* set before the switch */
1086 break;
1087 }
1088 case MP_BUS_EISA: /* EISA pin */
1089 {
1090 trigger = default_EISA_trigger(idx);
1091 break;
1092 }
1093 case MP_BUS_PCI: /* PCI pin */
1094 {
1095 /* set before the switch */
1096 break;
1097 }
1098 case MP_BUS_MCA: /* MCA pin */
1099 {
1100 trigger = default_MCA_trigger(idx);
1101 break;
1102 }
1103 default:
1104 {
1105 printk(KERN_WARNING "broken BIOS!!\n");
1106 trigger = 1;
1107 break;
1108 }
1109 }
1110 #endif
1111 break;
1112 case 1: /* edge */
1113 {
1114 trigger = 0;
1115 break;
1116 }
1117 case 2: /* reserved */
1118 {
1119 printk(KERN_WARNING "broken BIOS!!\n");
1120 trigger = 1;
1121 break;
1122 }
1123 case 3: /* level */
1124 {
1125 trigger = 1;
1126 break;
1127 }
1128 default: /* invalid */
1129 {
1130 printk(KERN_WARNING "broken BIOS!!\n");
1131 trigger = 0;
1132 break;
1133 }
1134 }
1135 return trigger;
1136 }
1137
1138 static inline int irq_polarity(int idx)
1139 {
1140 return MPBIOS_polarity(idx);
1141 }
1142
1143 static inline int irq_trigger(int idx)
1144 {
1145 return MPBIOS_trigger(idx);
1146 }
1147
1148 int (*ioapic_renumber_irq)(int ioapic, int irq);
1149 static int pin_2_irq(int idx, int apic, int pin)
1150 {
1151 int irq, i;
1152 int bus = mp_irqs[idx].mp_srcbus;
1153
1154 /*
1155 * Debugging check, we are in big trouble if this message pops up!
1156 */
1157 if (mp_irqs[idx].mp_dstirq != pin)
1158 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1159
1160 if (test_bit(bus, mp_bus_not_pci)) {
1161 irq = mp_irqs[idx].mp_srcbusirq;
1162 } else {
1163 /*
1164 * PCI IRQs are mapped in order
1165 */
1166 i = irq = 0;
1167 while (i < apic)
1168 irq += nr_ioapic_registers[i++];
1169 irq += pin;
1170 /*
1171 * For MPS mode, so far only needed by ES7000 platform
1172 */
1173 if (ioapic_renumber_irq)
1174 irq = ioapic_renumber_irq(apic, irq);
1175 }
1176
1177 #ifdef CONFIG_X86_32
1178 /*
1179 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1180 */
1181 if ((pin >= 16) && (pin <= 23)) {
1182 if (pirq_entries[pin-16] != -1) {
1183 if (!pirq_entries[pin-16]) {
1184 apic_printk(APIC_VERBOSE, KERN_DEBUG
1185 "disabling PIRQ%d\n", pin-16);
1186 } else {
1187 irq = pirq_entries[pin-16];
1188 apic_printk(APIC_VERBOSE, KERN_DEBUG
1189 "using PIRQ%d -> IRQ %d\n",
1190 pin-16, irq);
1191 }
1192 }
1193 }
1194 #endif
1195
1196 return irq;
1197 }
1198
1199 void lock_vector_lock(void)
1200 {
1201 /* Used to the online set of cpus does not change
1202 * during assign_irq_vector.
1203 */
1204 spin_lock(&vector_lock);
1205 }
1206
1207 void unlock_vector_lock(void)
1208 {
1209 spin_unlock(&vector_lock);
1210 }
1211
1212 static int __assign_irq_vector(int irq, cpumask_t mask)
1213 {
1214 /*
1215 * NOTE! The local APIC isn't very good at handling
1216 * multiple interrupts at the same interrupt level.
1217 * As the interrupt level is determined by taking the
1218 * vector number and shifting that right by 4, we
1219 * want to spread these out a bit so that they don't
1220 * all fall in the same interrupt level.
1221 *
1222 * Also, we've got to be careful not to trash gate
1223 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1224 */
1225 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1226 unsigned int old_vector;
1227 int cpu;
1228 struct irq_cfg *cfg;
1229
1230 cfg = irq_cfg(irq);
1231
1232 /* Only try and allocate irqs on cpus that are present */
1233 cpus_and(mask, mask, cpu_online_map);
1234
1235 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1236 return -EBUSY;
1237
1238 old_vector = cfg->vector;
1239 if (old_vector) {
1240 cpumask_t tmp;
1241 cpus_and(tmp, cfg->domain, mask);
1242 if (!cpus_empty(tmp))
1243 return 0;
1244 }
1245
1246 for_each_cpu_mask_nr(cpu, mask) {
1247 cpumask_t domain, new_mask;
1248 int new_cpu;
1249 int vector, offset;
1250
1251 domain = vector_allocation_domain(cpu);
1252 cpus_and(new_mask, domain, cpu_online_map);
1253
1254 vector = current_vector;
1255 offset = current_offset;
1256 next:
1257 vector += 8;
1258 if (vector >= first_system_vector) {
1259 /* If we run out of vectors on large boxen, must share them. */
1260 offset = (offset + 1) % 8;
1261 vector = FIRST_DEVICE_VECTOR + offset;
1262 }
1263 if (unlikely(current_vector == vector))
1264 continue;
1265 #ifdef CONFIG_X86_64
1266 if (vector == IA32_SYSCALL_VECTOR)
1267 goto next;
1268 #else
1269 if (vector == SYSCALL_VECTOR)
1270 goto next;
1271 #endif
1272 for_each_cpu_mask_nr(new_cpu, new_mask)
1273 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1274 goto next;
1275 /* Found one! */
1276 current_vector = vector;
1277 current_offset = offset;
1278 if (old_vector) {
1279 cfg->move_in_progress = 1;
1280 cfg->old_domain = cfg->domain;
1281 }
1282 for_each_cpu_mask_nr(new_cpu, new_mask)
1283 per_cpu(vector_irq, new_cpu)[vector] = irq;
1284 cfg->vector = vector;
1285 cfg->domain = domain;
1286 return 0;
1287 }
1288 return -ENOSPC;
1289 }
1290
1291 static int assign_irq_vector(int irq, cpumask_t mask)
1292 {
1293 int err;
1294 unsigned long flags;
1295
1296 spin_lock_irqsave(&vector_lock, flags);
1297 err = __assign_irq_vector(irq, mask);
1298 spin_unlock_irqrestore(&vector_lock, flags);
1299 return err;
1300 }
1301
1302 static void __clear_irq_vector(int irq)
1303 {
1304 struct irq_cfg *cfg;
1305 cpumask_t mask;
1306 int cpu, vector;
1307
1308 cfg = irq_cfg(irq);
1309 BUG_ON(!cfg->vector);
1310
1311 vector = cfg->vector;
1312 cpus_and(mask, cfg->domain, cpu_online_map);
1313 for_each_cpu_mask_nr(cpu, mask)
1314 per_cpu(vector_irq, cpu)[vector] = -1;
1315
1316 cfg->vector = 0;
1317 cpus_clear(cfg->domain);
1318 }
1319
1320 void __setup_vector_irq(int cpu)
1321 {
1322 /* Initialize vector_irq on a new cpu */
1323 /* This function must be called with vector_lock held */
1324 int irq, vector;
1325 struct irq_cfg *cfg;
1326
1327 /* Mark the inuse vectors */
1328 for_each_irq_cfg(irq, cfg) {
1329 if (!cpu_isset(cpu, cfg->domain))
1330 continue;
1331 vector = cfg->vector;
1332 per_cpu(vector_irq, cpu)[vector] = irq;
1333 }
1334 /* Mark the free vectors */
1335 for (vector = 0; vector < NR_VECTORS; ++vector) {
1336 irq = per_cpu(vector_irq, cpu)[vector];
1337 if (irq < 0)
1338 continue;
1339
1340 cfg = irq_cfg(irq);
1341 if (!cpu_isset(cpu, cfg->domain))
1342 per_cpu(vector_irq, cpu)[vector] = -1;
1343 }
1344 }
1345
1346 static struct irq_chip ioapic_chip;
1347 #ifdef CONFIG_INTR_REMAP
1348 static struct irq_chip ir_ioapic_chip;
1349 #endif
1350
1351 #define IOAPIC_AUTO -1
1352 #define IOAPIC_EDGE 0
1353 #define IOAPIC_LEVEL 1
1354
1355 #ifdef CONFIG_X86_32
1356 static inline int IO_APIC_irq_trigger(int irq)
1357 {
1358 int apic, idx, pin;
1359
1360 for (apic = 0; apic < nr_ioapics; apic++) {
1361 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1362 idx = find_irq_entry(apic, pin, mp_INT);
1363 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1364 return irq_trigger(idx);
1365 }
1366 }
1367 /*
1368 * nonexistent IRQs are edge default
1369 */
1370 return 0;
1371 }
1372 #else
1373 static inline int IO_APIC_irq_trigger(int irq)
1374 {
1375 return 1;
1376 }
1377 #endif
1378
1379 static void ioapic_register_intr(int irq, unsigned long trigger)
1380 {
1381 struct irq_desc *desc;
1382
1383 /* first time to use this irq_desc */
1384 if (irq < 16)
1385 desc = irq_to_desc(irq);
1386 else
1387 desc = irq_to_desc_alloc(irq);
1388
1389 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1390 trigger == IOAPIC_LEVEL)
1391 desc->status |= IRQ_LEVEL;
1392 else
1393 desc->status &= ~IRQ_LEVEL;
1394
1395 #ifdef CONFIG_INTR_REMAP
1396 if (irq_remapped(irq)) {
1397 desc->status |= IRQ_MOVE_PCNTXT;
1398 if (trigger)
1399 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1400 handle_fasteoi_irq,
1401 "fasteoi");
1402 else
1403 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1404 handle_edge_irq, "edge");
1405 return;
1406 }
1407 #endif
1408 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1409 trigger == IOAPIC_LEVEL)
1410 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1411 handle_fasteoi_irq,
1412 "fasteoi");
1413 else
1414 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1415 handle_edge_irq, "edge");
1416 }
1417
1418 static int setup_ioapic_entry(int apic, int irq,
1419 struct IO_APIC_route_entry *entry,
1420 unsigned int destination, int trigger,
1421 int polarity, int vector)
1422 {
1423 /*
1424 * add it to the IO-APIC irq-routing table:
1425 */
1426 memset(entry,0,sizeof(*entry));
1427
1428 #ifdef CONFIG_INTR_REMAP
1429 if (intr_remapping_enabled) {
1430 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1431 struct irte irte;
1432 struct IR_IO_APIC_route_entry *ir_entry =
1433 (struct IR_IO_APIC_route_entry *) entry;
1434 int index;
1435
1436 if (!iommu)
1437 panic("No mapping iommu for ioapic %d\n", apic);
1438
1439 index = alloc_irte(iommu, irq, 1);
1440 if (index < 0)
1441 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1442
1443 memset(&irte, 0, sizeof(irte));
1444
1445 irte.present = 1;
1446 irte.dst_mode = INT_DEST_MODE;
1447 irte.trigger_mode = trigger;
1448 irte.dlvry_mode = INT_DELIVERY_MODE;
1449 irte.vector = vector;
1450 irte.dest_id = IRTE_DEST(destination);
1451
1452 modify_irte(irq, &irte);
1453
1454 ir_entry->index2 = (index >> 15) & 0x1;
1455 ir_entry->zero = 0;
1456 ir_entry->format = 1;
1457 ir_entry->index = (index & 0x7fff);
1458 } else
1459 #endif
1460 {
1461 entry->delivery_mode = INT_DELIVERY_MODE;
1462 entry->dest_mode = INT_DEST_MODE;
1463 entry->dest = destination;
1464 }
1465
1466 entry->mask = 0; /* enable IRQ */
1467 entry->trigger = trigger;
1468 entry->polarity = polarity;
1469 entry->vector = vector;
1470
1471 /* Mask level triggered irqs.
1472 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1473 */
1474 if (trigger)
1475 entry->mask = 1;
1476 return 0;
1477 }
1478
1479 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1480 int trigger, int polarity)
1481 {
1482 struct irq_cfg *cfg;
1483 struct IO_APIC_route_entry entry;
1484 cpumask_t mask;
1485
1486 if (!IO_APIC_IRQ(irq))
1487 return;
1488
1489 cfg = irq_cfg(irq);
1490
1491 mask = TARGET_CPUS;
1492 if (assign_irq_vector(irq, mask))
1493 return;
1494
1495 cpus_and(mask, cfg->domain, mask);
1496
1497 apic_printk(APIC_VERBOSE,KERN_DEBUG
1498 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1499 "IRQ %d Mode:%i Active:%i)\n",
1500 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1501 irq, trigger, polarity);
1502
1503
1504 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1505 cpu_mask_to_apicid(mask), trigger, polarity,
1506 cfg->vector)) {
1507 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1508 mp_ioapics[apic].mp_apicid, pin);
1509 __clear_irq_vector(irq);
1510 return;
1511 }
1512
1513 ioapic_register_intr(irq, trigger);
1514 if (irq < 16)
1515 disable_8259A_irq(irq);
1516
1517 ioapic_write_entry(apic, pin, entry);
1518 }
1519
1520 static void __init setup_IO_APIC_irqs(void)
1521 {
1522 int apic, pin, idx, irq;
1523 int notcon = 0;
1524
1525 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1526
1527 for (apic = 0; apic < nr_ioapics; apic++) {
1528 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1529
1530 idx = find_irq_entry(apic, pin, mp_INT);
1531 if (idx == -1) {
1532 if (!notcon) {
1533 notcon = 1;
1534 apic_printk(APIC_VERBOSE,
1535 KERN_DEBUG " %d-%d",
1536 mp_ioapics[apic].mp_apicid,
1537 pin);
1538 } else
1539 apic_printk(APIC_VERBOSE, " %d-%d",
1540 mp_ioapics[apic].mp_apicid,
1541 pin);
1542 continue;
1543 }
1544
1545 irq = pin_2_irq(idx, apic, pin);
1546 #ifdef CONFIG_X86_32
1547 if (multi_timer_check(apic, irq))
1548 continue;
1549 #endif
1550 add_pin_to_irq(irq, apic, pin);
1551
1552 setup_IO_APIC_irq(apic, pin, irq,
1553 irq_trigger(idx), irq_polarity(idx));
1554 }
1555 if (notcon) {
1556 apic_printk(APIC_VERBOSE,
1557 " (apicid-pin) not connected\n");
1558 notcon = 0;
1559 }
1560 }
1561
1562 if (notcon)
1563 apic_printk(APIC_VERBOSE,
1564 " (apicid-pin) not connected\n");
1565 }
1566
1567 /*
1568 * Set up the timer pin, possibly with the 8259A-master behind.
1569 */
1570 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1571 int vector)
1572 {
1573 struct IO_APIC_route_entry entry;
1574
1575 #ifdef CONFIG_INTR_REMAP
1576 if (intr_remapping_enabled)
1577 return;
1578 #endif
1579
1580 memset(&entry, 0, sizeof(entry));
1581
1582 /*
1583 * We use logical delivery to get the timer IRQ
1584 * to the first CPU.
1585 */
1586 entry.dest_mode = INT_DEST_MODE;
1587 entry.mask = 1; /* mask IRQ now */
1588 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1589 entry.delivery_mode = INT_DELIVERY_MODE;
1590 entry.polarity = 0;
1591 entry.trigger = 0;
1592 entry.vector = vector;
1593
1594 /*
1595 * The timer IRQ doesn't have to know that behind the
1596 * scene we may have a 8259A-master in AEOI mode ...
1597 */
1598 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1599
1600 /*
1601 * Add it to the IO-APIC irq-routing table:
1602 */
1603 ioapic_write_entry(apic, pin, entry);
1604 }
1605
1606
1607 __apicdebuginit(void) print_IO_APIC(void)
1608 {
1609 int apic, i;
1610 union IO_APIC_reg_00 reg_00;
1611 union IO_APIC_reg_01 reg_01;
1612 union IO_APIC_reg_02 reg_02;
1613 union IO_APIC_reg_03 reg_03;
1614 unsigned long flags;
1615 struct irq_cfg *cfg;
1616 unsigned int irq;
1617
1618 if (apic_verbosity == APIC_QUIET)
1619 return;
1620
1621 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1622 for (i = 0; i < nr_ioapics; i++)
1623 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1624 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1625
1626 /*
1627 * We are a bit conservative about what we expect. We have to
1628 * know about every hardware change ASAP.
1629 */
1630 printk(KERN_INFO "testing the IO APIC.......................\n");
1631
1632 for (apic = 0; apic < nr_ioapics; apic++) {
1633
1634 spin_lock_irqsave(&ioapic_lock, flags);
1635 reg_00.raw = io_apic_read(apic, 0);
1636 reg_01.raw = io_apic_read(apic, 1);
1637 if (reg_01.bits.version >= 0x10)
1638 reg_02.raw = io_apic_read(apic, 2);
1639 if (reg_01.bits.version >= 0x20)
1640 reg_03.raw = io_apic_read(apic, 3);
1641 spin_unlock_irqrestore(&ioapic_lock, flags);
1642
1643 printk("\n");
1644 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1645 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1646 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1647 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1648 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1649
1650 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1651 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1652
1653 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1654 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1655
1656 /*
1657 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1658 * but the value of reg_02 is read as the previous read register
1659 * value, so ignore it if reg_02 == reg_01.
1660 */
1661 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1662 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1663 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1664 }
1665
1666 /*
1667 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1668 * or reg_03, but the value of reg_0[23] is read as the previous read
1669 * register value, so ignore it if reg_03 == reg_0[12].
1670 */
1671 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1672 reg_03.raw != reg_01.raw) {
1673 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1674 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1675 }
1676
1677 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1678
1679 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1680 " Stat Dmod Deli Vect: \n");
1681
1682 for (i = 0; i <= reg_01.bits.entries; i++) {
1683 struct IO_APIC_route_entry entry;
1684
1685 entry = ioapic_read_entry(apic, i);
1686
1687 printk(KERN_DEBUG " %02x %03X ",
1688 i,
1689 entry.dest
1690 );
1691
1692 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1693 entry.mask,
1694 entry.trigger,
1695 entry.irr,
1696 entry.polarity,
1697 entry.delivery_status,
1698 entry.dest_mode,
1699 entry.delivery_mode,
1700 entry.vector
1701 );
1702 }
1703 }
1704 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1705 for_each_irq_cfg(irq, cfg) {
1706 struct irq_pin_list *entry = cfg->irq_2_pin;
1707 if (!entry)
1708 continue;
1709 printk(KERN_DEBUG "IRQ%d ", irq);
1710 for (;;) {
1711 printk("-> %d:%d", entry->apic, entry->pin);
1712 if (!entry->next)
1713 break;
1714 entry = entry->next;
1715 }
1716 printk("\n");
1717 }
1718
1719 printk(KERN_INFO ".................................... done.\n");
1720
1721 return;
1722 }
1723
1724 __apicdebuginit(void) print_APIC_bitfield(int base)
1725 {
1726 unsigned int v;
1727 int i, j;
1728
1729 if (apic_verbosity == APIC_QUIET)
1730 return;
1731
1732 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1733 for (i = 0; i < 8; i++) {
1734 v = apic_read(base + i*0x10);
1735 for (j = 0; j < 32; j++) {
1736 if (v & (1<<j))
1737 printk("1");
1738 else
1739 printk("0");
1740 }
1741 printk("\n");
1742 }
1743 }
1744
1745 __apicdebuginit(void) print_local_APIC(void *dummy)
1746 {
1747 unsigned int v, ver, maxlvt;
1748 u64 icr;
1749
1750 if (apic_verbosity == APIC_QUIET)
1751 return;
1752
1753 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1754 smp_processor_id(), hard_smp_processor_id());
1755 v = apic_read(APIC_ID);
1756 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1757 v = apic_read(APIC_LVR);
1758 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1759 ver = GET_APIC_VERSION(v);
1760 maxlvt = lapic_get_maxlvt();
1761
1762 v = apic_read(APIC_TASKPRI);
1763 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1764
1765 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1766 if (!APIC_XAPIC(ver)) {
1767 v = apic_read(APIC_ARBPRI);
1768 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1769 v & APIC_ARBPRI_MASK);
1770 }
1771 v = apic_read(APIC_PROCPRI);
1772 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1773 }
1774
1775 /*
1776 * Remote read supported only in the 82489DX and local APIC for
1777 * Pentium processors.
1778 */
1779 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1780 v = apic_read(APIC_RRR);
1781 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1782 }
1783
1784 v = apic_read(APIC_LDR);
1785 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1786 if (!x2apic_enabled()) {
1787 v = apic_read(APIC_DFR);
1788 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1789 }
1790 v = apic_read(APIC_SPIV);
1791 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1792
1793 printk(KERN_DEBUG "... APIC ISR field:\n");
1794 print_APIC_bitfield(APIC_ISR);
1795 printk(KERN_DEBUG "... APIC TMR field:\n");
1796 print_APIC_bitfield(APIC_TMR);
1797 printk(KERN_DEBUG "... APIC IRR field:\n");
1798 print_APIC_bitfield(APIC_IRR);
1799
1800 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1801 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1802 apic_write(APIC_ESR, 0);
1803
1804 v = apic_read(APIC_ESR);
1805 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1806 }
1807
1808 icr = apic_icr_read();
1809 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1810 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1811
1812 v = apic_read(APIC_LVTT);
1813 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1814
1815 if (maxlvt > 3) { /* PC is LVT#4. */
1816 v = apic_read(APIC_LVTPC);
1817 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1818 }
1819 v = apic_read(APIC_LVT0);
1820 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1821 v = apic_read(APIC_LVT1);
1822 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1823
1824 if (maxlvt > 2) { /* ERR is LVT#3. */
1825 v = apic_read(APIC_LVTERR);
1826 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1827 }
1828
1829 v = apic_read(APIC_TMICT);
1830 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1831 v = apic_read(APIC_TMCCT);
1832 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1833 v = apic_read(APIC_TDCR);
1834 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1835 printk("\n");
1836 }
1837
1838 __apicdebuginit(void) print_all_local_APICs(void)
1839 {
1840 int cpu;
1841
1842 preempt_disable();
1843 for_each_online_cpu(cpu)
1844 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1845 preempt_enable();
1846 }
1847
1848 __apicdebuginit(void) print_PIC(void)
1849 {
1850 unsigned int v;
1851 unsigned long flags;
1852
1853 if (apic_verbosity == APIC_QUIET)
1854 return;
1855
1856 printk(KERN_DEBUG "\nprinting PIC contents\n");
1857
1858 spin_lock_irqsave(&i8259A_lock, flags);
1859
1860 v = inb(0xa1) << 8 | inb(0x21);
1861 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1862
1863 v = inb(0xa0) << 8 | inb(0x20);
1864 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1865
1866 outb(0x0b,0xa0);
1867 outb(0x0b,0x20);
1868 v = inb(0xa0) << 8 | inb(0x20);
1869 outb(0x0a,0xa0);
1870 outb(0x0a,0x20);
1871
1872 spin_unlock_irqrestore(&i8259A_lock, flags);
1873
1874 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1875
1876 v = inb(0x4d1) << 8 | inb(0x4d0);
1877 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1878 }
1879
1880 __apicdebuginit(int) print_all_ICs(void)
1881 {
1882 print_PIC();
1883 print_all_local_APICs();
1884 print_IO_APIC();
1885
1886 return 0;
1887 }
1888
1889 fs_initcall(print_all_ICs);
1890
1891
1892 /* Where if anywhere is the i8259 connect in external int mode */
1893 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1894
1895 void __init enable_IO_APIC(void)
1896 {
1897 union IO_APIC_reg_01 reg_01;
1898 int i8259_apic, i8259_pin;
1899 int apic;
1900 unsigned long flags;
1901
1902 #ifdef CONFIG_X86_32
1903 int i;
1904 if (!pirqs_enabled)
1905 for (i = 0; i < MAX_PIRQS; i++)
1906 pirq_entries[i] = -1;
1907 #endif
1908
1909 /*
1910 * The number of IO-APIC IRQ registers (== #pins):
1911 */
1912 for (apic = 0; apic < nr_ioapics; apic++) {
1913 spin_lock_irqsave(&ioapic_lock, flags);
1914 reg_01.raw = io_apic_read(apic, 1);
1915 spin_unlock_irqrestore(&ioapic_lock, flags);
1916 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1917 }
1918 for(apic = 0; apic < nr_ioapics; apic++) {
1919 int pin;
1920 /* See if any of the pins is in ExtINT mode */
1921 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1922 struct IO_APIC_route_entry entry;
1923 entry = ioapic_read_entry(apic, pin);
1924
1925 /* If the interrupt line is enabled and in ExtInt mode
1926 * I have found the pin where the i8259 is connected.
1927 */
1928 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1929 ioapic_i8259.apic = apic;
1930 ioapic_i8259.pin = pin;
1931 goto found_i8259;
1932 }
1933 }
1934 }
1935 found_i8259:
1936 /* Look to see what if the MP table has reported the ExtINT */
1937 /* If we could not find the appropriate pin by looking at the ioapic
1938 * the i8259 probably is not connected the ioapic but give the
1939 * mptable a chance anyway.
1940 */
1941 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1942 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1943 /* Trust the MP table if nothing is setup in the hardware */
1944 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1945 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1946 ioapic_i8259.pin = i8259_pin;
1947 ioapic_i8259.apic = i8259_apic;
1948 }
1949 /* Complain if the MP table and the hardware disagree */
1950 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1951 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1952 {
1953 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1954 }
1955
1956 /*
1957 * Do not trust the IO-APIC being empty at bootup
1958 */
1959 clear_IO_APIC();
1960 }
1961
1962 /*
1963 * Not an __init, needed by the reboot code
1964 */
1965 void disable_IO_APIC(void)
1966 {
1967 /*
1968 * Clear the IO-APIC before rebooting:
1969 */
1970 clear_IO_APIC();
1971
1972 /*
1973 * If the i8259 is routed through an IOAPIC
1974 * Put that IOAPIC in virtual wire mode
1975 * so legacy interrupts can be delivered.
1976 */
1977 if (ioapic_i8259.pin != -1) {
1978 struct IO_APIC_route_entry entry;
1979
1980 memset(&entry, 0, sizeof(entry));
1981 entry.mask = 0; /* Enabled */
1982 entry.trigger = 0; /* Edge */
1983 entry.irr = 0;
1984 entry.polarity = 0; /* High */
1985 entry.delivery_status = 0;
1986 entry.dest_mode = 0; /* Physical */
1987 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1988 entry.vector = 0;
1989 entry.dest = read_apic_id();
1990
1991 /*
1992 * Add it to the IO-APIC irq-routing table:
1993 */
1994 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1995 }
1996
1997 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1998 }
1999
2000 #ifdef CONFIG_X86_32
2001 /*
2002 * function to set the IO-APIC physical IDs based on the
2003 * values stored in the MPC table.
2004 *
2005 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2006 */
2007
2008 static void __init setup_ioapic_ids_from_mpc(void)
2009 {
2010 union IO_APIC_reg_00 reg_00;
2011 physid_mask_t phys_id_present_map;
2012 int apic;
2013 int i;
2014 unsigned char old_id;
2015 unsigned long flags;
2016
2017 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
2018 return;
2019
2020 /*
2021 * Don't check I/O APIC IDs for xAPIC systems. They have
2022 * no meaning without the serial APIC bus.
2023 */
2024 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2025 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2026 return;
2027 /*
2028 * This is broken; anything with a real cpu count has to
2029 * circumvent this idiocy regardless.
2030 */
2031 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
2032
2033 /*
2034 * Set the IOAPIC ID to the value stored in the MPC table.
2035 */
2036 for (apic = 0; apic < nr_ioapics; apic++) {
2037
2038 /* Read the register 0 value */
2039 spin_lock_irqsave(&ioapic_lock, flags);
2040 reg_00.raw = io_apic_read(apic, 0);
2041 spin_unlock_irqrestore(&ioapic_lock, flags);
2042
2043 old_id = mp_ioapics[apic].mp_apicid;
2044
2045 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
2046 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2047 apic, mp_ioapics[apic].mp_apicid);
2048 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2049 reg_00.bits.ID);
2050 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
2051 }
2052
2053 /*
2054 * Sanity check, is the ID really free? Every APIC in a
2055 * system must have a unique ID or we get lots of nice
2056 * 'stuck on smp_invalidate_needed IPI wait' messages.
2057 */
2058 if (check_apicid_used(phys_id_present_map,
2059 mp_ioapics[apic].mp_apicid)) {
2060 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2061 apic, mp_ioapics[apic].mp_apicid);
2062 for (i = 0; i < get_physical_broadcast(); i++)
2063 if (!physid_isset(i, phys_id_present_map))
2064 break;
2065 if (i >= get_physical_broadcast())
2066 panic("Max APIC ID exceeded!\n");
2067 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2068 i);
2069 physid_set(i, phys_id_present_map);
2070 mp_ioapics[apic].mp_apicid = i;
2071 } else {
2072 physid_mask_t tmp;
2073 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
2074 apic_printk(APIC_VERBOSE, "Setting %d in the "
2075 "phys_id_present_map\n",
2076 mp_ioapics[apic].mp_apicid);
2077 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2078 }
2079
2080
2081 /*
2082 * We need to adjust the IRQ routing table
2083 * if the ID changed.
2084 */
2085 if (old_id != mp_ioapics[apic].mp_apicid)
2086 for (i = 0; i < mp_irq_entries; i++)
2087 if (mp_irqs[i].mp_dstapic == old_id)
2088 mp_irqs[i].mp_dstapic
2089 = mp_ioapics[apic].mp_apicid;
2090
2091 /*
2092 * Read the right value from the MPC table and
2093 * write it into the ID register.
2094 */
2095 apic_printk(APIC_VERBOSE, KERN_INFO
2096 "...changing IO-APIC physical APIC ID to %d ...",
2097 mp_ioapics[apic].mp_apicid);
2098
2099 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
2100 spin_lock_irqsave(&ioapic_lock, flags);
2101 io_apic_write(apic, 0, reg_00.raw);
2102 spin_unlock_irqrestore(&ioapic_lock, flags);
2103
2104 /*
2105 * Sanity check
2106 */
2107 spin_lock_irqsave(&ioapic_lock, flags);
2108 reg_00.raw = io_apic_read(apic, 0);
2109 spin_unlock_irqrestore(&ioapic_lock, flags);
2110 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
2111 printk("could not set ID!\n");
2112 else
2113 apic_printk(APIC_VERBOSE, " ok.\n");
2114 }
2115 }
2116 #endif
2117
2118 int no_timer_check __initdata;
2119
2120 static int __init notimercheck(char *s)
2121 {
2122 no_timer_check = 1;
2123 return 1;
2124 }
2125 __setup("no_timer_check", notimercheck);
2126
2127 /*
2128 * There is a nasty bug in some older SMP boards, their mptable lies
2129 * about the timer IRQ. We do the following to work around the situation:
2130 *
2131 * - timer IRQ defaults to IO-APIC IRQ
2132 * - if this function detects that timer IRQs are defunct, then we fall
2133 * back to ISA timer IRQs
2134 */
2135 static int __init timer_irq_works(void)
2136 {
2137 unsigned long t1 = jiffies;
2138 unsigned long flags;
2139
2140 if (no_timer_check)
2141 return 1;
2142
2143 local_save_flags(flags);
2144 local_irq_enable();
2145 /* Let ten ticks pass... */
2146 mdelay((10 * 1000) / HZ);
2147 local_irq_restore(flags);
2148
2149 /*
2150 * Expect a few ticks at least, to be sure some possible
2151 * glue logic does not lock up after one or two first
2152 * ticks in a non-ExtINT mode. Also the local APIC
2153 * might have cached one ExtINT interrupt. Finally, at
2154 * least one tick may be lost due to delays.
2155 */
2156
2157 /* jiffies wrap? */
2158 if (time_after(jiffies, t1 + 4))
2159 return 1;
2160 return 0;
2161 }
2162
2163 /*
2164 * In the SMP+IOAPIC case it might happen that there are an unspecified
2165 * number of pending IRQ events unhandled. These cases are very rare,
2166 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2167 * better to do it this way as thus we do not have to be aware of
2168 * 'pending' interrupts in the IRQ path, except at this point.
2169 */
2170 /*
2171 * Edge triggered needs to resend any interrupt
2172 * that was delayed but this is now handled in the device
2173 * independent code.
2174 */
2175
2176 /*
2177 * Starting up a edge-triggered IO-APIC interrupt is
2178 * nasty - we need to make sure that we get the edge.
2179 * If it is already asserted for some reason, we need
2180 * return 1 to indicate that is was pending.
2181 *
2182 * This is not complete - we should be able to fake
2183 * an edge even if it isn't on the 8259A...
2184 */
2185
2186 static unsigned int startup_ioapic_irq(unsigned int irq)
2187 {
2188 int was_pending = 0;
2189 unsigned long flags;
2190
2191 spin_lock_irqsave(&ioapic_lock, flags);
2192 if (irq < 16) {
2193 disable_8259A_irq(irq);
2194 if (i8259A_irq_pending(irq))
2195 was_pending = 1;
2196 }
2197 __unmask_IO_APIC_irq(irq);
2198 spin_unlock_irqrestore(&ioapic_lock, flags);
2199
2200 return was_pending;
2201 }
2202
2203 #ifdef CONFIG_X86_64
2204 static int ioapic_retrigger_irq(unsigned int irq)
2205 {
2206
2207 struct irq_cfg *cfg = irq_cfg(irq);
2208 unsigned long flags;
2209
2210 spin_lock_irqsave(&vector_lock, flags);
2211 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
2212 spin_unlock_irqrestore(&vector_lock, flags);
2213
2214 return 1;
2215 }
2216 #else
2217 static int ioapic_retrigger_irq(unsigned int irq)
2218 {
2219 send_IPI_self(irq_cfg(irq)->vector);
2220
2221 return 1;
2222 }
2223 #endif
2224
2225 /*
2226 * Level and edge triggered IO-APIC interrupts need different handling,
2227 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2228 * handled with the level-triggered descriptor, but that one has slightly
2229 * more overhead. Level-triggered interrupts cannot be handled with the
2230 * edge-triggered handler, without risking IRQ storms and other ugly
2231 * races.
2232 */
2233
2234 #ifdef CONFIG_SMP
2235
2236 #ifdef CONFIG_INTR_REMAP
2237 static void ir_irq_migration(struct work_struct *work);
2238
2239 static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2240
2241 /*
2242 * Migrate the IO-APIC irq in the presence of intr-remapping.
2243 *
2244 * For edge triggered, irq migration is a simple atomic update(of vector
2245 * and cpu destination) of IRTE and flush the hardware cache.
2246 *
2247 * For level triggered, we need to modify the io-apic RTE aswell with the update
2248 * vector information, along with modifying IRTE with vector and destination.
2249 * So irq migration for level triggered is little bit more complex compared to
2250 * edge triggered migration. But the good news is, we use the same algorithm
2251 * for level triggered migration as we have today, only difference being,
2252 * we now initiate the irq migration from process context instead of the
2253 * interrupt context.
2254 *
2255 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2256 * suppression) to the IO-APIC, level triggered irq migration will also be
2257 * as simple as edge triggered migration and we can do the irq migration
2258 * with a simple atomic update to IO-APIC RTE.
2259 */
2260 static void migrate_ioapic_irq(int irq, cpumask_t mask)
2261 {
2262 struct irq_cfg *cfg;
2263 struct irq_desc *desc;
2264 cpumask_t tmp, cleanup_mask;
2265 struct irte irte;
2266 int modify_ioapic_rte;
2267 unsigned int dest;
2268 unsigned long flags;
2269
2270 cpus_and(tmp, mask, cpu_online_map);
2271 if (cpus_empty(tmp))
2272 return;
2273
2274 if (get_irte(irq, &irte))
2275 return;
2276
2277 if (assign_irq_vector(irq, mask))
2278 return;
2279
2280 cfg = irq_cfg(irq);
2281 cpus_and(tmp, cfg->domain, mask);
2282 dest = cpu_mask_to_apicid(tmp);
2283
2284 desc = irq_to_desc(irq);
2285 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2286 if (modify_ioapic_rte) {
2287 spin_lock_irqsave(&ioapic_lock, flags);
2288 __target_IO_APIC_irq(irq, dest, cfg->vector);
2289 spin_unlock_irqrestore(&ioapic_lock, flags);
2290 }
2291
2292 irte.vector = cfg->vector;
2293 irte.dest_id = IRTE_DEST(dest);
2294
2295 /*
2296 * Modified the IRTE and flushes the Interrupt entry cache.
2297 */
2298 modify_irte(irq, &irte);
2299
2300 if (cfg->move_in_progress) {
2301 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2302 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2303 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2304 cfg->move_in_progress = 0;
2305 }
2306
2307 desc->affinity = mask;
2308 }
2309
2310 static int migrate_irq_remapped_level(int irq)
2311 {
2312 int ret = -1;
2313 struct irq_desc *desc = irq_to_desc(irq);
2314
2315 mask_IO_APIC_irq(irq);
2316
2317 if (io_apic_level_ack_pending(irq)) {
2318 /*
2319 * Interrupt in progress. Migrating irq now will change the
2320 * vector information in the IO-APIC RTE and that will confuse
2321 * the EOI broadcast performed by cpu.
2322 * So, delay the irq migration to the next instance.
2323 */
2324 schedule_delayed_work(&ir_migration_work, 1);
2325 goto unmask;
2326 }
2327
2328 /* everthing is clear. we have right of way */
2329 migrate_ioapic_irq(irq, desc->pending_mask);
2330
2331 ret = 0;
2332 desc->status &= ~IRQ_MOVE_PENDING;
2333 cpus_clear(desc->pending_mask);
2334
2335 unmask:
2336 unmask_IO_APIC_irq(irq);
2337 return ret;
2338 }
2339
2340 static void ir_irq_migration(struct work_struct *work)
2341 {
2342 unsigned int irq;
2343 struct irq_desc *desc;
2344
2345 for_each_irq_desc(irq, desc) {
2346 if (desc->status & IRQ_MOVE_PENDING) {
2347 unsigned long flags;
2348
2349 spin_lock_irqsave(&desc->lock, flags);
2350 if (!desc->chip->set_affinity ||
2351 !(desc->status & IRQ_MOVE_PENDING)) {
2352 desc->status &= ~IRQ_MOVE_PENDING;
2353 spin_unlock_irqrestore(&desc->lock, flags);
2354 continue;
2355 }
2356
2357 desc->chip->set_affinity(irq, desc->pending_mask);
2358 spin_unlock_irqrestore(&desc->lock, flags);
2359 }
2360 }
2361 }
2362
2363 /*
2364 * Migrates the IRQ destination in the process context.
2365 */
2366 static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2367 {
2368 struct irq_desc *desc = irq_to_desc(irq);
2369
2370 if (desc->status & IRQ_LEVEL) {
2371 desc->status |= IRQ_MOVE_PENDING;
2372 desc->pending_mask = mask;
2373 migrate_irq_remapped_level(irq);
2374 return;
2375 }
2376
2377 migrate_ioapic_irq(irq, mask);
2378 }
2379 #endif
2380
2381 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2382 {
2383 unsigned vector, me;
2384 ack_APIC_irq();
2385 #ifdef CONFIG_X86_64
2386 exit_idle();
2387 #endif
2388 irq_enter();
2389
2390 me = smp_processor_id();
2391 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2392 unsigned int irq;
2393 struct irq_desc *desc;
2394 struct irq_cfg *cfg;
2395 irq = __get_cpu_var(vector_irq)[vector];
2396
2397 desc = irq_to_desc(irq);
2398 if (!desc)
2399 continue;
2400
2401 cfg = irq_cfg(irq);
2402 spin_lock(&desc->lock);
2403 if (!cfg->move_cleanup_count)
2404 goto unlock;
2405
2406 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
2407 goto unlock;
2408
2409 __get_cpu_var(vector_irq)[vector] = -1;
2410 cfg->move_cleanup_count--;
2411 unlock:
2412 spin_unlock(&desc->lock);
2413 }
2414
2415 irq_exit();
2416 }
2417
2418 static void irq_complete_move(unsigned int irq)
2419 {
2420 struct irq_cfg *cfg = irq_cfg(irq);
2421 unsigned vector, me;
2422
2423 if (likely(!cfg->move_in_progress))
2424 return;
2425
2426 vector = ~get_irq_regs()->orig_ax;
2427 me = smp_processor_id();
2428 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2429 cpumask_t cleanup_mask;
2430
2431 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2432 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2433 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2434 cfg->move_in_progress = 0;
2435 }
2436 }
2437 #else
2438 static inline void irq_complete_move(unsigned int irq) {}
2439 #endif
2440 #ifdef CONFIG_INTR_REMAP
2441 static void ack_x2apic_level(unsigned int irq)
2442 {
2443 ack_x2APIC_irq();
2444 }
2445
2446 static void ack_x2apic_edge(unsigned int irq)
2447 {
2448 ack_x2APIC_irq();
2449 }
2450 #endif
2451
2452 static void ack_apic_edge(unsigned int irq)
2453 {
2454 irq_complete_move(irq);
2455 move_native_irq(irq);
2456 ack_APIC_irq();
2457 }
2458
2459 #ifdef CONFIG_X86_32
2460 atomic_t irq_mis_count;
2461 #endif
2462
2463 static void ack_apic_level(unsigned int irq)
2464 {
2465 #ifdef CONFIG_X86_32
2466 unsigned long v;
2467 int i;
2468 #endif
2469 int do_unmask_irq = 0;
2470
2471 irq_complete_move(irq);
2472 #ifdef CONFIG_GENERIC_PENDING_IRQ
2473 /* If we are moving the irq we need to mask it */
2474 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2475 do_unmask_irq = 1;
2476 mask_IO_APIC_irq(irq);
2477 }
2478 #endif
2479
2480 #ifdef CONFIG_X86_32
2481 /*
2482 * It appears there is an erratum which affects at least version 0x11
2483 * of I/O APIC (that's the 82093AA and cores integrated into various
2484 * chipsets). Under certain conditions a level-triggered interrupt is
2485 * erroneously delivered as edge-triggered one but the respective IRR
2486 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2487 * message but it will never arrive and further interrupts are blocked
2488 * from the source. The exact reason is so far unknown, but the
2489 * phenomenon was observed when two consecutive interrupt requests
2490 * from a given source get delivered to the same CPU and the source is
2491 * temporarily disabled in between.
2492 *
2493 * A workaround is to simulate an EOI message manually. We achieve it
2494 * by setting the trigger mode to edge and then to level when the edge
2495 * trigger mode gets detected in the TMR of a local APIC for a
2496 * level-triggered interrupt. We mask the source for the time of the
2497 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2498 * The idea is from Manfred Spraul. --macro
2499 */
2500 i = irq_cfg(irq)->vector;
2501
2502 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2503 #endif
2504
2505 /*
2506 * We must acknowledge the irq before we move it or the acknowledge will
2507 * not propagate properly.
2508 */
2509 ack_APIC_irq();
2510
2511 /* Now we can move and renable the irq */
2512 if (unlikely(do_unmask_irq)) {
2513 /* Only migrate the irq if the ack has been received.
2514 *
2515 * On rare occasions the broadcast level triggered ack gets
2516 * delayed going to ioapics, and if we reprogram the
2517 * vector while Remote IRR is still set the irq will never
2518 * fire again.
2519 *
2520 * To prevent this scenario we read the Remote IRR bit
2521 * of the ioapic. This has two effects.
2522 * - On any sane system the read of the ioapic will
2523 * flush writes (and acks) going to the ioapic from
2524 * this cpu.
2525 * - We get to see if the ACK has actually been delivered.
2526 *
2527 * Based on failed experiments of reprogramming the
2528 * ioapic entry from outside of irq context starting
2529 * with masking the ioapic entry and then polling until
2530 * Remote IRR was clear before reprogramming the
2531 * ioapic I don't trust the Remote IRR bit to be
2532 * completey accurate.
2533 *
2534 * However there appears to be no other way to plug
2535 * this race, so if the Remote IRR bit is not
2536 * accurate and is causing problems then it is a hardware bug
2537 * and you can go talk to the chipset vendor about it.
2538 */
2539 if (!io_apic_level_ack_pending(irq))
2540 move_masked_irq(irq);
2541 unmask_IO_APIC_irq(irq);
2542 }
2543
2544 #ifdef CONFIG_X86_32
2545 if (!(v & (1 << (i & 0x1f)))) {
2546 atomic_inc(&irq_mis_count);
2547 spin_lock(&ioapic_lock);
2548 __mask_and_edge_IO_APIC_irq(irq);
2549 __unmask_and_level_IO_APIC_irq(irq);
2550 spin_unlock(&ioapic_lock);
2551 }
2552 #endif
2553 }
2554
2555 static struct irq_chip ioapic_chip __read_mostly = {
2556 .name = "IO-APIC",
2557 .startup = startup_ioapic_irq,
2558 .mask = mask_IO_APIC_irq,
2559 .unmask = unmask_IO_APIC_irq,
2560 .ack = ack_apic_edge,
2561 .eoi = ack_apic_level,
2562 #ifdef CONFIG_SMP
2563 .set_affinity = set_ioapic_affinity_irq,
2564 #endif
2565 .retrigger = ioapic_retrigger_irq,
2566 };
2567
2568 #ifdef CONFIG_INTR_REMAP
2569 static struct irq_chip ir_ioapic_chip __read_mostly = {
2570 .name = "IR-IO-APIC",
2571 .startup = startup_ioapic_irq,
2572 .mask = mask_IO_APIC_irq,
2573 .unmask = unmask_IO_APIC_irq,
2574 .ack = ack_x2apic_edge,
2575 .eoi = ack_x2apic_level,
2576 #ifdef CONFIG_SMP
2577 .set_affinity = set_ir_ioapic_affinity_irq,
2578 #endif
2579 .retrigger = ioapic_retrigger_irq,
2580 };
2581 #endif
2582
2583 static inline void init_IO_APIC_traps(void)
2584 {
2585 int irq;
2586 struct irq_desc *desc;
2587 struct irq_cfg *cfg;
2588
2589 /*
2590 * NOTE! The local APIC isn't very good at handling
2591 * multiple interrupts at the same interrupt level.
2592 * As the interrupt level is determined by taking the
2593 * vector number and shifting that right by 4, we
2594 * want to spread these out a bit so that they don't
2595 * all fall in the same interrupt level.
2596 *
2597 * Also, we've got to be careful not to trash gate
2598 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2599 */
2600 for_each_irq_cfg(irq, cfg) {
2601 if (IO_APIC_IRQ(irq) && !cfg->vector) {
2602 /*
2603 * Hmm.. We don't have an entry for this,
2604 * so default to an old-fashioned 8259
2605 * interrupt if we can..
2606 */
2607 if (irq < 16)
2608 make_8259A_irq(irq);
2609 else {
2610 desc = irq_to_desc(irq);
2611 /* Strange. Oh, well.. */
2612 desc->chip = &no_irq_chip;
2613 }
2614 }
2615 }
2616 }
2617
2618 /*
2619 * The local APIC irq-chip implementation:
2620 */
2621
2622 static void mask_lapic_irq(unsigned int irq)
2623 {
2624 unsigned long v;
2625
2626 v = apic_read(APIC_LVT0);
2627 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2628 }
2629
2630 static void unmask_lapic_irq(unsigned int irq)
2631 {
2632 unsigned long v;
2633
2634 v = apic_read(APIC_LVT0);
2635 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2636 }
2637
2638 static void ack_lapic_irq (unsigned int irq)
2639 {
2640 ack_APIC_irq();
2641 }
2642
2643 static struct irq_chip lapic_chip __read_mostly = {
2644 .name = "local-APIC",
2645 .mask = mask_lapic_irq,
2646 .unmask = unmask_lapic_irq,
2647 .ack = ack_lapic_irq,
2648 };
2649
2650 static void lapic_register_intr(int irq)
2651 {
2652 struct irq_desc *desc;
2653
2654 desc = irq_to_desc(irq);
2655 desc->status &= ~IRQ_LEVEL;
2656 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2657 "edge");
2658 }
2659
2660 static void __init setup_nmi(void)
2661 {
2662 /*
2663 * Dirty trick to enable the NMI watchdog ...
2664 * We put the 8259A master into AEOI mode and
2665 * unmask on all local APICs LVT0 as NMI.
2666 *
2667 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2668 * is from Maciej W. Rozycki - so we do not have to EOI from
2669 * the NMI handler or the timer interrupt.
2670 */
2671 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2672
2673 enable_NMI_through_LVT0();
2674
2675 apic_printk(APIC_VERBOSE, " done.\n");
2676 }
2677
2678 /*
2679 * This looks a bit hackish but it's about the only one way of sending
2680 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2681 * not support the ExtINT mode, unfortunately. We need to send these
2682 * cycles as some i82489DX-based boards have glue logic that keeps the
2683 * 8259A interrupt line asserted until INTA. --macro
2684 */
2685 static inline void __init unlock_ExtINT_logic(void)
2686 {
2687 int apic, pin, i;
2688 struct IO_APIC_route_entry entry0, entry1;
2689 unsigned char save_control, save_freq_select;
2690
2691 pin = find_isa_irq_pin(8, mp_INT);
2692 if (pin == -1) {
2693 WARN_ON_ONCE(1);
2694 return;
2695 }
2696 apic = find_isa_irq_apic(8, mp_INT);
2697 if (apic == -1) {
2698 WARN_ON_ONCE(1);
2699 return;
2700 }
2701
2702 entry0 = ioapic_read_entry(apic, pin);
2703 clear_IO_APIC_pin(apic, pin);
2704
2705 memset(&entry1, 0, sizeof(entry1));
2706
2707 entry1.dest_mode = 0; /* physical delivery */
2708 entry1.mask = 0; /* unmask IRQ now */
2709 entry1.dest = hard_smp_processor_id();
2710 entry1.delivery_mode = dest_ExtINT;
2711 entry1.polarity = entry0.polarity;
2712 entry1.trigger = 0;
2713 entry1.vector = 0;
2714
2715 ioapic_write_entry(apic, pin, entry1);
2716
2717 save_control = CMOS_READ(RTC_CONTROL);
2718 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2719 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2720 RTC_FREQ_SELECT);
2721 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2722
2723 i = 100;
2724 while (i-- > 0) {
2725 mdelay(10);
2726 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2727 i -= 10;
2728 }
2729
2730 CMOS_WRITE(save_control, RTC_CONTROL);
2731 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2732 clear_IO_APIC_pin(apic, pin);
2733
2734 ioapic_write_entry(apic, pin, entry0);
2735 }
2736
2737 static int disable_timer_pin_1 __initdata;
2738 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2739 static int __init disable_timer_pin_setup(char *arg)
2740 {
2741 disable_timer_pin_1 = 1;
2742 return 0;
2743 }
2744 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2745
2746 int timer_through_8259 __initdata;
2747
2748 /*
2749 * This code may look a bit paranoid, but it's supposed to cooperate with
2750 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2751 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2752 * fanatically on his truly buggy board.
2753 *
2754 * FIXME: really need to revamp this for all platforms.
2755 */
2756 static inline void __init check_timer(void)
2757 {
2758 struct irq_cfg *cfg = irq_cfg(0);
2759 int apic1, pin1, apic2, pin2;
2760 unsigned long flags;
2761 unsigned int ver;
2762 int no_pin1 = 0;
2763
2764 local_irq_save(flags);
2765
2766 ver = apic_read(APIC_LVR);
2767 ver = GET_APIC_VERSION(ver);
2768
2769 /*
2770 * get/set the timer IRQ vector:
2771 */
2772 disable_8259A_irq(0);
2773 assign_irq_vector(0, TARGET_CPUS);
2774
2775 /*
2776 * As IRQ0 is to be enabled in the 8259A, the virtual
2777 * wire has to be disabled in the local APIC. Also
2778 * timer interrupts need to be acknowledged manually in
2779 * the 8259A for the i82489DX when using the NMI
2780 * watchdog as that APIC treats NMIs as level-triggered.
2781 * The AEOI mode will finish them in the 8259A
2782 * automatically.
2783 */
2784 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2785 init_8259A(1);
2786 #ifdef CONFIG_X86_32
2787 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2788 #endif
2789
2790 pin1 = find_isa_irq_pin(0, mp_INT);
2791 apic1 = find_isa_irq_apic(0, mp_INT);
2792 pin2 = ioapic_i8259.pin;
2793 apic2 = ioapic_i8259.apic;
2794
2795 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2796 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2797 cfg->vector, apic1, pin1, apic2, pin2);
2798
2799 /*
2800 * Some BIOS writers are clueless and report the ExtINTA
2801 * I/O APIC input from the cascaded 8259A as the timer
2802 * interrupt input. So just in case, if only one pin
2803 * was found above, try it both directly and through the
2804 * 8259A.
2805 */
2806 if (pin1 == -1) {
2807 #ifdef CONFIG_INTR_REMAP
2808 if (intr_remapping_enabled)
2809 panic("BIOS bug: timer not connected to IO-APIC");
2810 #endif
2811 pin1 = pin2;
2812 apic1 = apic2;
2813 no_pin1 = 1;
2814 } else if (pin2 == -1) {
2815 pin2 = pin1;
2816 apic2 = apic1;
2817 }
2818
2819 if (pin1 != -1) {
2820 /*
2821 * Ok, does IRQ0 through the IOAPIC work?
2822 */
2823 if (no_pin1) {
2824 add_pin_to_irq(0, apic1, pin1);
2825 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2826 }
2827 unmask_IO_APIC_irq(0);
2828 if (timer_irq_works()) {
2829 if (nmi_watchdog == NMI_IO_APIC) {
2830 setup_nmi();
2831 enable_8259A_irq(0);
2832 }
2833 if (disable_timer_pin_1 > 0)
2834 clear_IO_APIC_pin(0, pin1);
2835 goto out;
2836 }
2837 #ifdef CONFIG_INTR_REMAP
2838 if (intr_remapping_enabled)
2839 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2840 #endif
2841 clear_IO_APIC_pin(apic1, pin1);
2842 if (!no_pin1)
2843 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2844 "8254 timer not connected to IO-APIC\n");
2845
2846 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2847 "(IRQ0) through the 8259A ...\n");
2848 apic_printk(APIC_QUIET, KERN_INFO
2849 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2850 /*
2851 * legacy devices should be connected to IO APIC #0
2852 */
2853 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2854 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2855 unmask_IO_APIC_irq(0);
2856 enable_8259A_irq(0);
2857 if (timer_irq_works()) {
2858 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2859 timer_through_8259 = 1;
2860 if (nmi_watchdog == NMI_IO_APIC) {
2861 disable_8259A_irq(0);
2862 setup_nmi();
2863 enable_8259A_irq(0);
2864 }
2865 goto out;
2866 }
2867 /*
2868 * Cleanup, just in case ...
2869 */
2870 disable_8259A_irq(0);
2871 clear_IO_APIC_pin(apic2, pin2);
2872 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2873 }
2874
2875 if (nmi_watchdog == NMI_IO_APIC) {
2876 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2877 "through the IO-APIC - disabling NMI Watchdog!\n");
2878 nmi_watchdog = NMI_NONE;
2879 }
2880 #ifdef CONFIG_X86_32
2881 timer_ack = 0;
2882 #endif
2883
2884 apic_printk(APIC_QUIET, KERN_INFO
2885 "...trying to set up timer as Virtual Wire IRQ...\n");
2886
2887 lapic_register_intr(0);
2888 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
2889 enable_8259A_irq(0);
2890
2891 if (timer_irq_works()) {
2892 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2893 goto out;
2894 }
2895 disable_8259A_irq(0);
2896 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2897 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2898
2899 apic_printk(APIC_QUIET, KERN_INFO
2900 "...trying to set up timer as ExtINT IRQ...\n");
2901
2902 init_8259A(0);
2903 make_8259A_irq(0);
2904 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2905
2906 unlock_ExtINT_logic();
2907
2908 if (timer_irq_works()) {
2909 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2910 goto out;
2911 }
2912 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2913 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2914 "report. Then try booting with the 'noapic' option.\n");
2915 out:
2916 local_irq_restore(flags);
2917 }
2918
2919 /*
2920 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2921 * to devices. However there may be an I/O APIC pin available for
2922 * this interrupt regardless. The pin may be left unconnected, but
2923 * typically it will be reused as an ExtINT cascade interrupt for
2924 * the master 8259A. In the MPS case such a pin will normally be
2925 * reported as an ExtINT interrupt in the MP table. With ACPI
2926 * there is no provision for ExtINT interrupts, and in the absence
2927 * of an override it would be treated as an ordinary ISA I/O APIC
2928 * interrupt, that is edge-triggered and unmasked by default. We
2929 * used to do this, but it caused problems on some systems because
2930 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2931 * the same ExtINT cascade interrupt to drive the local APIC of the
2932 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2933 * the I/O APIC in all cases now. No actual device should request
2934 * it anyway. --macro
2935 */
2936 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2937
2938 void __init setup_IO_APIC(void)
2939 {
2940
2941 #ifdef CONFIG_X86_32
2942 enable_IO_APIC();
2943 #else
2944 /*
2945 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2946 */
2947 #endif
2948
2949 io_apic_irqs = ~PIC_IRQS;
2950
2951 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2952 /*
2953 * Set up IO-APIC IRQ routing.
2954 */
2955 #ifdef CONFIG_X86_32
2956 if (!acpi_ioapic)
2957 setup_ioapic_ids_from_mpc();
2958 #endif
2959 sync_Arb_IDs();
2960 setup_IO_APIC_irqs();
2961 init_IO_APIC_traps();
2962 check_timer();
2963 }
2964
2965 /*
2966 * Called after all the initialization is done. If we didnt find any
2967 * APIC bugs then we can allow the modify fast path
2968 */
2969
2970 static int __init io_apic_bug_finalize(void)
2971 {
2972 if (sis_apic_bug == -1)
2973 sis_apic_bug = 0;
2974 return 0;
2975 }
2976
2977 late_initcall(io_apic_bug_finalize);
2978
2979 struct sysfs_ioapic_data {
2980 struct sys_device dev;
2981 struct IO_APIC_route_entry entry[0];
2982 };
2983 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2984
2985 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2986 {
2987 struct IO_APIC_route_entry *entry;
2988 struct sysfs_ioapic_data *data;
2989 int i;
2990
2991 data = container_of(dev, struct sysfs_ioapic_data, dev);
2992 entry = data->entry;
2993 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2994 *entry = ioapic_read_entry(dev->id, i);
2995
2996 return 0;
2997 }
2998
2999 static int ioapic_resume(struct sys_device *dev)
3000 {
3001 struct IO_APIC_route_entry *entry;
3002 struct sysfs_ioapic_data *data;
3003 unsigned long flags;
3004 union IO_APIC_reg_00 reg_00;
3005 int i;
3006
3007 data = container_of(dev, struct sysfs_ioapic_data, dev);
3008 entry = data->entry;
3009
3010 spin_lock_irqsave(&ioapic_lock, flags);
3011 reg_00.raw = io_apic_read(dev->id, 0);
3012 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
3013 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
3014 io_apic_write(dev->id, 0, reg_00.raw);
3015 }
3016 spin_unlock_irqrestore(&ioapic_lock, flags);
3017 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3018 ioapic_write_entry(dev->id, i, entry[i]);
3019
3020 return 0;
3021 }
3022
3023 static struct sysdev_class ioapic_sysdev_class = {
3024 .name = "ioapic",
3025 .suspend = ioapic_suspend,
3026 .resume = ioapic_resume,
3027 };
3028
3029 static int __init ioapic_init_sysfs(void)
3030 {
3031 struct sys_device * dev;
3032 int i, size, error;
3033
3034 error = sysdev_class_register(&ioapic_sysdev_class);
3035 if (error)
3036 return error;
3037
3038 for (i = 0; i < nr_ioapics; i++ ) {
3039 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3040 * sizeof(struct IO_APIC_route_entry);
3041 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3042 if (!mp_ioapic_data[i]) {
3043 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3044 continue;
3045 }
3046 dev = &mp_ioapic_data[i]->dev;
3047 dev->id = i;
3048 dev->cls = &ioapic_sysdev_class;
3049 error = sysdev_register(dev);
3050 if (error) {
3051 kfree(mp_ioapic_data[i]);
3052 mp_ioapic_data[i] = NULL;
3053 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3054 continue;
3055 }
3056 }
3057
3058 return 0;
3059 }
3060
3061 device_initcall(ioapic_init_sysfs);
3062
3063 /*
3064 * Dynamic irq allocate and deallocation
3065 */
3066 unsigned int create_irq_nr(unsigned int irq_want)
3067 {
3068 /* Allocate an unused irq */
3069 unsigned int irq;
3070 unsigned int new;
3071 unsigned long flags;
3072 struct irq_cfg *cfg_new;
3073
3074 #ifndef CONFIG_HAVE_SPARSE_IRQ
3075 irq_want = nr_irqs - 1;
3076 #endif
3077
3078 irq = 0;
3079 spin_lock_irqsave(&vector_lock, flags);
3080 for (new = irq_want; new > 0; new--) {
3081 if (platform_legacy_irq(new))
3082 continue;
3083 cfg_new = irq_cfg(new);
3084 if (cfg_new && cfg_new->vector != 0)
3085 continue;
3086 /* check if need to create one */
3087 if (!cfg_new)
3088 cfg_new = irq_cfg_alloc(new);
3089 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
3090 irq = new;
3091 break;
3092 }
3093 spin_unlock_irqrestore(&vector_lock, flags);
3094
3095 if (irq > 0) {
3096 dynamic_irq_init(irq);
3097 }
3098 return irq;
3099 }
3100
3101 int create_irq(void)
3102 {
3103 int irq;
3104
3105 irq = create_irq_nr(nr_irqs - 1);
3106
3107 if (irq == 0)
3108 irq = -1;
3109
3110 return irq;
3111 }
3112
3113 void destroy_irq(unsigned int irq)
3114 {
3115 unsigned long flags;
3116
3117 dynamic_irq_cleanup(irq);
3118
3119 #ifdef CONFIG_INTR_REMAP
3120 free_irte(irq);
3121 #endif
3122 spin_lock_irqsave(&vector_lock, flags);
3123 __clear_irq_vector(irq);
3124 spin_unlock_irqrestore(&vector_lock, flags);
3125 }
3126
3127 /*
3128 * MSI message composition
3129 */
3130 #ifdef CONFIG_PCI_MSI
3131 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
3132 {
3133 struct irq_cfg *cfg;
3134 int err;
3135 unsigned dest;
3136 cpumask_t tmp;
3137
3138 tmp = TARGET_CPUS;
3139 err = assign_irq_vector(irq, tmp);
3140 if (err)
3141 return err;
3142
3143 cfg = irq_cfg(irq);
3144 cpus_and(tmp, cfg->domain, tmp);
3145 dest = cpu_mask_to_apicid(tmp);
3146
3147 #ifdef CONFIG_INTR_REMAP
3148 if (irq_remapped(irq)) {
3149 struct irte irte;
3150 int ir_index;
3151 u16 sub_handle;
3152
3153 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3154 BUG_ON(ir_index == -1);
3155
3156 memset (&irte, 0, sizeof(irte));
3157
3158 irte.present = 1;
3159 irte.dst_mode = INT_DEST_MODE;
3160 irte.trigger_mode = 0; /* edge */
3161 irte.dlvry_mode = INT_DELIVERY_MODE;
3162 irte.vector = cfg->vector;
3163 irte.dest_id = IRTE_DEST(dest);
3164
3165 modify_irte(irq, &irte);
3166
3167 msg->address_hi = MSI_ADDR_BASE_HI;
3168 msg->data = sub_handle;
3169 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3170 MSI_ADDR_IR_SHV |
3171 MSI_ADDR_IR_INDEX1(ir_index) |
3172 MSI_ADDR_IR_INDEX2(ir_index);
3173 } else
3174 #endif
3175 {
3176 msg->address_hi = MSI_ADDR_BASE_HI;
3177 msg->address_lo =
3178 MSI_ADDR_BASE_LO |
3179 ((INT_DEST_MODE == 0) ?
3180 MSI_ADDR_DEST_MODE_PHYSICAL:
3181 MSI_ADDR_DEST_MODE_LOGICAL) |
3182 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3183 MSI_ADDR_REDIRECTION_CPU:
3184 MSI_ADDR_REDIRECTION_LOWPRI) |
3185 MSI_ADDR_DEST_ID(dest);
3186
3187 msg->data =
3188 MSI_DATA_TRIGGER_EDGE |
3189 MSI_DATA_LEVEL_ASSERT |
3190 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3191 MSI_DATA_DELIVERY_FIXED:
3192 MSI_DATA_DELIVERY_LOWPRI) |
3193 MSI_DATA_VECTOR(cfg->vector);
3194 }
3195 return err;
3196 }
3197
3198 #ifdef CONFIG_SMP
3199 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3200 {
3201 struct irq_cfg *cfg;
3202 struct msi_msg msg;
3203 unsigned int dest;
3204 cpumask_t tmp;
3205 struct irq_desc *desc;
3206
3207 cpus_and(tmp, mask, cpu_online_map);
3208 if (cpus_empty(tmp))
3209 return;
3210
3211 if (assign_irq_vector(irq, mask))
3212 return;
3213
3214 cfg = irq_cfg(irq);
3215 cpus_and(tmp, cfg->domain, mask);
3216 dest = cpu_mask_to_apicid(tmp);
3217
3218 read_msi_msg(irq, &msg);
3219
3220 msg.data &= ~MSI_DATA_VECTOR_MASK;
3221 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3222 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3223 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3224
3225 write_msi_msg(irq, &msg);
3226 desc = irq_to_desc(irq);
3227 desc->affinity = mask;
3228 }
3229
3230 #ifdef CONFIG_INTR_REMAP
3231 /*
3232 * Migrate the MSI irq to another cpumask. This migration is
3233 * done in the process context using interrupt-remapping hardware.
3234 */
3235 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3236 {
3237 struct irq_cfg *cfg;
3238 unsigned int dest;
3239 cpumask_t tmp, cleanup_mask;
3240 struct irte irte;
3241 struct irq_desc *desc;
3242
3243 cpus_and(tmp, mask, cpu_online_map);
3244 if (cpus_empty(tmp))
3245 return;
3246
3247 if (get_irte(irq, &irte))
3248 return;
3249
3250 if (assign_irq_vector(irq, mask))
3251 return;
3252
3253 cfg = irq_cfg(irq);
3254 cpus_and(tmp, cfg->domain, mask);
3255 dest = cpu_mask_to_apicid(tmp);
3256
3257 irte.vector = cfg->vector;
3258 irte.dest_id = IRTE_DEST(dest);
3259
3260 /*
3261 * atomically update the IRTE with the new destination and vector.
3262 */
3263 modify_irte(irq, &irte);
3264
3265 /*
3266 * After this point, all the interrupts will start arriving
3267 * at the new destination. So, time to cleanup the previous
3268 * vector allocation.
3269 */
3270 if (cfg->move_in_progress) {
3271 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3272 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3273 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3274 cfg->move_in_progress = 0;
3275 }
3276
3277 desc = irq_to_desc(irq);
3278 desc->affinity = mask;
3279 }
3280 #endif
3281 #endif /* CONFIG_SMP */
3282
3283 /*
3284 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3285 * which implement the MSI or MSI-X Capability Structure.
3286 */
3287 static struct irq_chip msi_chip = {
3288 .name = "PCI-MSI",
3289 .unmask = unmask_msi_irq,
3290 .mask = mask_msi_irq,
3291 .ack = ack_apic_edge,
3292 #ifdef CONFIG_SMP
3293 .set_affinity = set_msi_irq_affinity,
3294 #endif
3295 .retrigger = ioapic_retrigger_irq,
3296 };
3297
3298 #ifdef CONFIG_INTR_REMAP
3299 static struct irq_chip msi_ir_chip = {
3300 .name = "IR-PCI-MSI",
3301 .unmask = unmask_msi_irq,
3302 .mask = mask_msi_irq,
3303 .ack = ack_x2apic_edge,
3304 #ifdef CONFIG_SMP
3305 .set_affinity = ir_set_msi_irq_affinity,
3306 #endif
3307 .retrigger = ioapic_retrigger_irq,
3308 };
3309
3310 /*
3311 * Map the PCI dev to the corresponding remapping hardware unit
3312 * and allocate 'nvec' consecutive interrupt-remapping table entries
3313 * in it.
3314 */
3315 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3316 {
3317 struct intel_iommu *iommu;
3318 int index;
3319
3320 iommu = map_dev_to_ir(dev);
3321 if (!iommu) {
3322 printk(KERN_ERR
3323 "Unable to map PCI %s to iommu\n", pci_name(dev));
3324 return -ENOENT;
3325 }
3326
3327 index = alloc_irte(iommu, irq, nvec);
3328 if (index < 0) {
3329 printk(KERN_ERR
3330 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3331 pci_name(dev));
3332 return -ENOSPC;
3333 }
3334 return index;
3335 }
3336 #endif
3337
3338 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3339 {
3340 int ret;
3341 struct msi_msg msg;
3342
3343 ret = msi_compose_msg(dev, irq, &msg);
3344 if (ret < 0)
3345 return ret;
3346
3347 set_irq_msi(irq, desc);
3348 write_msi_msg(irq, &msg);
3349
3350 #ifdef CONFIG_INTR_REMAP
3351 if (irq_remapped(irq)) {
3352 struct irq_desc *desc = irq_to_desc(irq);
3353 /*
3354 * irq migration in process context
3355 */
3356 desc->status |= IRQ_MOVE_PCNTXT;
3357 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3358 } else
3359 #endif
3360 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3361
3362 return 0;
3363 }
3364
3365 static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
3366 {
3367 unsigned int irq;
3368
3369 irq = dev->bus->number;
3370 irq <<= 8;
3371 irq |= dev->devfn;
3372 irq <<= 12;
3373
3374 return irq;
3375 }
3376
3377 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3378 {
3379 unsigned int irq;
3380 int ret;
3381 unsigned int irq_want;
3382
3383 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3384
3385 irq = create_irq_nr(irq_want);
3386 if (irq == 0)
3387 return -1;
3388
3389 #ifdef CONFIG_INTR_REMAP
3390 if (!intr_remapping_enabled)
3391 goto no_ir;
3392
3393 ret = msi_alloc_irte(dev, irq, 1);
3394 if (ret < 0)
3395 goto error;
3396 no_ir:
3397 #endif
3398 ret = setup_msi_irq(dev, desc, irq);
3399 if (ret < 0) {
3400 destroy_irq(irq);
3401 return ret;
3402 }
3403 return 0;
3404
3405 #ifdef CONFIG_INTR_REMAP
3406 error:
3407 destroy_irq(irq);
3408 return ret;
3409 #endif
3410 }
3411
3412 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3413 {
3414 unsigned int irq;
3415 int ret, sub_handle;
3416 struct msi_desc *desc;
3417 unsigned int irq_want;
3418
3419 #ifdef CONFIG_INTR_REMAP
3420 struct intel_iommu *iommu = 0;
3421 int index = 0;
3422 #endif
3423
3424 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3425 sub_handle = 0;
3426 list_for_each_entry(desc, &dev->msi_list, list) {
3427 irq = create_irq_nr(irq_want--);
3428 if (irq == 0)
3429 return -1;
3430 #ifdef CONFIG_INTR_REMAP
3431 if (!intr_remapping_enabled)
3432 goto no_ir;
3433
3434 if (!sub_handle) {
3435 /*
3436 * allocate the consecutive block of IRTE's
3437 * for 'nvec'
3438 */
3439 index = msi_alloc_irte(dev, irq, nvec);
3440 if (index < 0) {
3441 ret = index;
3442 goto error;
3443 }
3444 } else {
3445 iommu = map_dev_to_ir(dev);
3446 if (!iommu) {
3447 ret = -ENOENT;
3448 goto error;
3449 }
3450 /*
3451 * setup the mapping between the irq and the IRTE
3452 * base index, the sub_handle pointing to the
3453 * appropriate interrupt remap table entry.
3454 */
3455 set_irte_irq(irq, iommu, index, sub_handle);
3456 }
3457 no_ir:
3458 #endif
3459 ret = setup_msi_irq(dev, desc, irq);
3460 if (ret < 0)
3461 goto error;
3462 sub_handle++;
3463 }
3464 return 0;
3465
3466 error:
3467 destroy_irq(irq);
3468 return ret;
3469 }
3470
3471 void arch_teardown_msi_irq(unsigned int irq)
3472 {
3473 destroy_irq(irq);
3474 }
3475
3476 #ifdef CONFIG_DMAR
3477 #ifdef CONFIG_SMP
3478 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3479 {
3480 struct irq_cfg *cfg;
3481 struct msi_msg msg;
3482 unsigned int dest;
3483 cpumask_t tmp;
3484 struct irq_desc *desc;
3485
3486 cpus_and(tmp, mask, cpu_online_map);
3487 if (cpus_empty(tmp))
3488 return;
3489
3490 if (assign_irq_vector(irq, mask))
3491 return;
3492
3493 cfg = irq_cfg(irq);
3494 cpus_and(tmp, cfg->domain, mask);
3495 dest = cpu_mask_to_apicid(tmp);
3496
3497 dmar_msi_read(irq, &msg);
3498
3499 msg.data &= ~MSI_DATA_VECTOR_MASK;
3500 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3501 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3502 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3503
3504 dmar_msi_write(irq, &msg);
3505 desc = irq_to_desc(irq);
3506 desc->affinity = mask;
3507 }
3508 #endif /* CONFIG_SMP */
3509
3510 struct irq_chip dmar_msi_type = {
3511 .name = "DMAR_MSI",
3512 .unmask = dmar_msi_unmask,
3513 .mask = dmar_msi_mask,
3514 .ack = ack_apic_edge,
3515 #ifdef CONFIG_SMP
3516 .set_affinity = dmar_msi_set_affinity,
3517 #endif
3518 .retrigger = ioapic_retrigger_irq,
3519 };
3520
3521 int arch_setup_dmar_msi(unsigned int irq)
3522 {
3523 int ret;
3524 struct msi_msg msg;
3525
3526 ret = msi_compose_msg(NULL, irq, &msg);
3527 if (ret < 0)
3528 return ret;
3529 dmar_msi_write(irq, &msg);
3530 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3531 "edge");
3532 return 0;
3533 }
3534 #endif
3535
3536 #ifdef CONFIG_HPET_TIMER
3537
3538 #ifdef CONFIG_SMP
3539 static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3540 {
3541 struct irq_cfg *cfg;
3542 struct irq_desc *desc;
3543 struct msi_msg msg;
3544 unsigned int dest;
3545 cpumask_t tmp;
3546
3547 cpus_and(tmp, mask, cpu_online_map);
3548 if (cpus_empty(tmp))
3549 return;
3550
3551 if (assign_irq_vector(irq, mask))
3552 return;
3553
3554 cfg = irq_cfg(irq);
3555 cpus_and(tmp, cfg->domain, mask);
3556 dest = cpu_mask_to_apicid(tmp);
3557
3558 hpet_msi_read(irq, &msg);
3559
3560 msg.data &= ~MSI_DATA_VECTOR_MASK;
3561 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3562 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3563 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3564
3565 hpet_msi_write(irq, &msg);
3566 desc = irq_to_desc(irq);
3567 desc->affinity = mask;
3568 }
3569 #endif /* CONFIG_SMP */
3570
3571 struct irq_chip hpet_msi_type = {
3572 .name = "HPET_MSI",
3573 .unmask = hpet_msi_unmask,
3574 .mask = hpet_msi_mask,
3575 .ack = ack_apic_edge,
3576 #ifdef CONFIG_SMP
3577 .set_affinity = hpet_msi_set_affinity,
3578 #endif
3579 .retrigger = ioapic_retrigger_irq,
3580 };
3581
3582 int arch_setup_hpet_msi(unsigned int irq)
3583 {
3584 int ret;
3585 struct msi_msg msg;
3586
3587 ret = msi_compose_msg(NULL, irq, &msg);
3588 if (ret < 0)
3589 return ret;
3590
3591 hpet_msi_write(irq, &msg);
3592 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3593 "edge");
3594 return 0;
3595 }
3596 #endif
3597
3598 #endif /* CONFIG_PCI_MSI */
3599 /*
3600 * Hypertransport interrupt support
3601 */
3602 #ifdef CONFIG_HT_IRQ
3603
3604 #ifdef CONFIG_SMP
3605
3606 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3607 {
3608 struct ht_irq_msg msg;
3609 fetch_ht_irq_msg(irq, &msg);
3610
3611 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3612 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3613
3614 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3615 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3616
3617 write_ht_irq_msg(irq, &msg);
3618 }
3619
3620 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3621 {
3622 struct irq_cfg *cfg;
3623 unsigned int dest;
3624 cpumask_t tmp;
3625 struct irq_desc *desc;
3626
3627 cpus_and(tmp, mask, cpu_online_map);
3628 if (cpus_empty(tmp))
3629 return;
3630
3631 if (assign_irq_vector(irq, mask))
3632 return;
3633
3634 cfg = irq_cfg(irq);
3635 cpus_and(tmp, cfg->domain, mask);
3636 dest = cpu_mask_to_apicid(tmp);
3637
3638 target_ht_irq(irq, dest, cfg->vector);
3639 desc = irq_to_desc(irq);
3640 desc->affinity = mask;
3641 }
3642 #endif
3643
3644 static struct irq_chip ht_irq_chip = {
3645 .name = "PCI-HT",
3646 .mask = mask_ht_irq,
3647 .unmask = unmask_ht_irq,
3648 .ack = ack_apic_edge,
3649 #ifdef CONFIG_SMP
3650 .set_affinity = set_ht_irq_affinity,
3651 #endif
3652 .retrigger = ioapic_retrigger_irq,
3653 };
3654
3655 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3656 {
3657 struct irq_cfg *cfg;
3658 int err;
3659 cpumask_t tmp;
3660
3661 tmp = TARGET_CPUS;
3662 err = assign_irq_vector(irq, tmp);
3663 if (!err) {
3664 struct ht_irq_msg msg;
3665 unsigned dest;
3666
3667 cfg = irq_cfg(irq);
3668 cpus_and(tmp, cfg->domain, tmp);
3669 dest = cpu_mask_to_apicid(tmp);
3670
3671 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3672
3673 msg.address_lo =
3674 HT_IRQ_LOW_BASE |
3675 HT_IRQ_LOW_DEST_ID(dest) |
3676 HT_IRQ_LOW_VECTOR(cfg->vector) |
3677 ((INT_DEST_MODE == 0) ?
3678 HT_IRQ_LOW_DM_PHYSICAL :
3679 HT_IRQ_LOW_DM_LOGICAL) |
3680 HT_IRQ_LOW_RQEOI_EDGE |
3681 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3682 HT_IRQ_LOW_MT_FIXED :
3683 HT_IRQ_LOW_MT_ARBITRATED) |
3684 HT_IRQ_LOW_IRQ_MASKED;
3685
3686 write_ht_irq_msg(irq, &msg);
3687
3688 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3689 handle_edge_irq, "edge");
3690 }
3691 return err;
3692 }
3693 #endif /* CONFIG_HT_IRQ */
3694
3695 int __init io_apic_get_redir_entries (int ioapic)
3696 {
3697 union IO_APIC_reg_01 reg_01;
3698 unsigned long flags;
3699
3700 spin_lock_irqsave(&ioapic_lock, flags);
3701 reg_01.raw = io_apic_read(ioapic, 1);
3702 spin_unlock_irqrestore(&ioapic_lock, flags);
3703
3704 return reg_01.bits.entries;
3705 }
3706
3707 int __init probe_nr_irqs(void)
3708 {
3709 int idx;
3710 int nr = 0;
3711 #ifndef CONFIG_XEN
3712 int nr_min = 32;
3713 #else
3714 int nr_min = NR_IRQS;
3715 #endif
3716
3717 for (idx = 0; idx < nr_ioapics; idx++)
3718 nr += io_apic_get_redir_entries(idx) + 1;
3719
3720 /* double it for hotplug and msi and nmi */
3721 nr <<= 1;
3722
3723 /* something wrong ? */
3724 if (nr < nr_min)
3725 nr = nr_min;
3726
3727 return nr;
3728 }
3729
3730 /* --------------------------------------------------------------------------
3731 ACPI-based IOAPIC Configuration
3732 -------------------------------------------------------------------------- */
3733
3734 #ifdef CONFIG_ACPI
3735
3736 #ifdef CONFIG_X86_32
3737 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3738 {
3739 union IO_APIC_reg_00 reg_00;
3740 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3741 physid_mask_t tmp;
3742 unsigned long flags;
3743 int i = 0;
3744
3745 /*
3746 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3747 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3748 * supports up to 16 on one shared APIC bus.
3749 *
3750 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3751 * advantage of new APIC bus architecture.
3752 */
3753
3754 if (physids_empty(apic_id_map))
3755 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
3756
3757 spin_lock_irqsave(&ioapic_lock, flags);
3758 reg_00.raw = io_apic_read(ioapic, 0);
3759 spin_unlock_irqrestore(&ioapic_lock, flags);
3760
3761 if (apic_id >= get_physical_broadcast()) {
3762 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3763 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3764 apic_id = reg_00.bits.ID;
3765 }
3766
3767 /*
3768 * Every APIC in a system must have a unique ID or we get lots of nice
3769 * 'stuck on smp_invalidate_needed IPI wait' messages.
3770 */
3771 if (check_apicid_used(apic_id_map, apic_id)) {
3772
3773 for (i = 0; i < get_physical_broadcast(); i++) {
3774 if (!check_apicid_used(apic_id_map, i))
3775 break;
3776 }
3777
3778 if (i == get_physical_broadcast())
3779 panic("Max apic_id exceeded!\n");
3780
3781 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3782 "trying %d\n", ioapic, apic_id, i);
3783
3784 apic_id = i;
3785 }
3786
3787 tmp = apicid_to_cpu_present(apic_id);
3788 physids_or(apic_id_map, apic_id_map, tmp);
3789
3790 if (reg_00.bits.ID != apic_id) {
3791 reg_00.bits.ID = apic_id;
3792
3793 spin_lock_irqsave(&ioapic_lock, flags);
3794 io_apic_write(ioapic, 0, reg_00.raw);
3795 reg_00.raw = io_apic_read(ioapic, 0);
3796 spin_unlock_irqrestore(&ioapic_lock, flags);
3797
3798 /* Sanity check */
3799 if (reg_00.bits.ID != apic_id) {
3800 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3801 return -1;
3802 }
3803 }
3804
3805 apic_printk(APIC_VERBOSE, KERN_INFO
3806 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3807
3808 return apic_id;
3809 }
3810
3811 int __init io_apic_get_version(int ioapic)
3812 {
3813 union IO_APIC_reg_01 reg_01;
3814 unsigned long flags;
3815
3816 spin_lock_irqsave(&ioapic_lock, flags);
3817 reg_01.raw = io_apic_read(ioapic, 1);
3818 spin_unlock_irqrestore(&ioapic_lock, flags);
3819
3820 return reg_01.bits.version;
3821 }
3822 #endif
3823
3824 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
3825 {
3826 if (!IO_APIC_IRQ(irq)) {
3827 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3828 ioapic);
3829 return -EINVAL;
3830 }
3831
3832 /*
3833 * IRQs < 16 are already in the irq_2_pin[] map
3834 */
3835 if (irq >= 16)
3836 add_pin_to_irq(irq, ioapic, pin);
3837
3838 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
3839
3840 return 0;
3841 }
3842
3843
3844 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3845 {
3846 int i;
3847
3848 if (skip_ioapic_setup)
3849 return -1;
3850
3851 for (i = 0; i < mp_irq_entries; i++)
3852 if (mp_irqs[i].mp_irqtype == mp_INT &&
3853 mp_irqs[i].mp_srcbusirq == bus_irq)
3854 break;
3855 if (i >= mp_irq_entries)
3856 return -1;
3857
3858 *trigger = irq_trigger(i);
3859 *polarity = irq_polarity(i);
3860 return 0;
3861 }
3862
3863 #endif /* CONFIG_ACPI */
3864
3865 /*
3866 * This function currently is only a helper for the i386 smp boot process where
3867 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3868 * so mask in all cases should simply be TARGET_CPUS
3869 */
3870 #ifdef CONFIG_SMP
3871 void __init setup_ioapic_dest(void)
3872 {
3873 int pin, ioapic, irq, irq_entry;
3874 struct irq_cfg *cfg;
3875
3876 if (skip_ioapic_setup == 1)
3877 return;
3878
3879 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3880 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3881 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3882 if (irq_entry == -1)
3883 continue;
3884 irq = pin_2_irq(irq_entry, ioapic, pin);
3885
3886 /* setup_IO_APIC_irqs could fail to get vector for some device
3887 * when you have too many devices, because at that time only boot
3888 * cpu is online.
3889 */
3890 cfg = irq_cfg(irq);
3891 if (!cfg->vector)
3892 setup_IO_APIC_irq(ioapic, pin, irq,
3893 irq_trigger(irq_entry),
3894 irq_polarity(irq_entry));
3895 #ifdef CONFIG_INTR_REMAP
3896 else if (intr_remapping_enabled)
3897 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3898 #endif
3899 else
3900 set_ioapic_affinity_irq(irq, TARGET_CPUS);
3901 }
3902
3903 }
3904 }
3905 #endif
3906
3907 #define IOAPIC_RESOURCE_NAME_SIZE 11
3908
3909 static struct resource *ioapic_resources;
3910
3911 static struct resource * __init ioapic_setup_resources(void)
3912 {
3913 unsigned long n;
3914 struct resource *res;
3915 char *mem;
3916 int i;
3917
3918 if (nr_ioapics <= 0)
3919 return NULL;
3920
3921 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3922 n *= nr_ioapics;
3923
3924 mem = alloc_bootmem(n);
3925 res = (void *)mem;
3926
3927 if (mem != NULL) {
3928 mem += sizeof(struct resource) * nr_ioapics;
3929
3930 for (i = 0; i < nr_ioapics; i++) {
3931 res[i].name = mem;
3932 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3933 sprintf(mem, "IOAPIC %u", i);
3934 mem += IOAPIC_RESOURCE_NAME_SIZE;
3935 }
3936 }
3937
3938 ioapic_resources = res;
3939
3940 return res;
3941 }
3942
3943 void __init ioapic_init_mappings(void)
3944 {
3945 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3946 int i;
3947 struct resource *ioapic_res;
3948
3949 ioapic_res = ioapic_setup_resources();
3950 for (i = 0; i < nr_ioapics; i++) {
3951 if (smp_found_config) {
3952 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3953 #ifdef CONFIG_X86_32
3954 if (!ioapic_phys) {
3955 printk(KERN_ERR
3956 "WARNING: bogus zero IO-APIC "
3957 "address found in MPTABLE, "
3958 "disabling IO/APIC support!\n");
3959 smp_found_config = 0;
3960 skip_ioapic_setup = 1;
3961 goto fake_ioapic_page;
3962 }
3963 #endif
3964 } else {
3965 #ifdef CONFIG_X86_32
3966 fake_ioapic_page:
3967 #endif
3968 ioapic_phys = (unsigned long)
3969 alloc_bootmem_pages(PAGE_SIZE);
3970 ioapic_phys = __pa(ioapic_phys);
3971 }
3972 set_fixmap_nocache(idx, ioapic_phys);
3973 apic_printk(APIC_VERBOSE,
3974 "mapped IOAPIC to %08lx (%08lx)\n",
3975 __fix_to_virt(idx), ioapic_phys);
3976 idx++;
3977
3978 if (ioapic_res != NULL) {
3979 ioapic_res->start = ioapic_phys;
3980 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3981 ioapic_res++;
3982 }
3983 }
3984 }
3985
3986 static int __init ioapic_insert_resources(void)
3987 {
3988 int i;
3989 struct resource *r = ioapic_resources;
3990
3991 if (!r) {
3992 printk(KERN_ERR
3993 "IO APIC resources could be not be allocated.\n");
3994 return -1;
3995 }
3996
3997 for (i = 0; i < nr_ioapics; i++) {
3998 insert_resource(&iomem_resource, r);
3999 r++;
4000 }
4001
4002 return 0;
4003 }
4004
4005 /* Insert the IO APIC resources after PCI initialization has occured to handle
4006 * IO APICS that are mapped in on a BAR in PCI space. */
4007 late_initcall(ioapic_insert_resources);
This page took 0.107606 seconds and 6 git commands to generate.