Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / x86 / kernel / apic / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
39 #ifdef CONFIG_ACPI
40 #include <acpi/acpi_bus.h>
41 #endif
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
45
46 #include <asm/idle.h>
47 #include <asm/io.h>
48 #include <asm/smp.h>
49 #include <asm/cpu.h>
50 #include <asm/desc.h>
51 #include <asm/proto.h>
52 #include <asm/acpi.h>
53 #include <asm/dma.h>
54 #include <asm/timer.h>
55 #include <asm/i8259.h>
56 #include <asm/nmi.h>
57 #include <asm/msidef.h>
58 #include <asm/hypertransport.h>
59 #include <asm/setup.h>
60 #include <asm/irq_remapping.h>
61 #include <asm/hpet.h>
62 #include <asm/hw_irq.h>
63
64 #include <asm/apic.h>
65
66 #define __apicdebuginit(type) static type __init
67 #define for_each_irq_pin(entry, head) \
68 for (entry = head; entry; entry = entry->next)
69
70 /*
71 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes
73 */
74 int sis_apic_bug = -1;
75
76 static DEFINE_SPINLOCK(ioapic_lock);
77 static DEFINE_SPINLOCK(vector_lock);
78
79 /*
80 * # of IRQ routing registers
81 */
82 int nr_ioapic_registers[MAX_IO_APICS];
83
84 /* I/O APIC entries */
85 struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
86 int nr_ioapics;
87
88 /* IO APIC gsi routing info */
89 struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
90
91 /* MP IRQ source entries */
92 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
93
94 /* # of MP IRQ source entries */
95 int mp_irq_entries;
96
97 /* Number of legacy interrupts */
98 static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
99 /* GSI interrupts */
100 static int nr_irqs_gsi = NR_IRQS_LEGACY;
101
102 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
103 int mp_bus_id_to_type[MAX_MP_BUSSES];
104 #endif
105
106 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
107
108 int skip_ioapic_setup;
109
110 void arch_disable_smp_support(void)
111 {
112 #ifdef CONFIG_PCI
113 noioapicquirk = 1;
114 noioapicreroute = -1;
115 #endif
116 skip_ioapic_setup = 1;
117 }
118
119 static int __init parse_noapic(char *str)
120 {
121 /* disable IO-APIC */
122 arch_disable_smp_support();
123 return 0;
124 }
125 early_param("noapic", parse_noapic);
126
127 struct irq_pin_list {
128 int apic, pin;
129 struct irq_pin_list *next;
130 };
131
132 static struct irq_pin_list *get_one_free_irq_2_pin(int node)
133 {
134 struct irq_pin_list *pin;
135
136 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
137
138 return pin;
139 }
140
141 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
142 #ifdef CONFIG_SPARSE_IRQ
143 static struct irq_cfg irq_cfgx[] = {
144 #else
145 static struct irq_cfg irq_cfgx[NR_IRQS] = {
146 #endif
147 [0] = { .vector = IRQ0_VECTOR, },
148 [1] = { .vector = IRQ1_VECTOR, },
149 [2] = { .vector = IRQ2_VECTOR, },
150 [3] = { .vector = IRQ3_VECTOR, },
151 [4] = { .vector = IRQ4_VECTOR, },
152 [5] = { .vector = IRQ5_VECTOR, },
153 [6] = { .vector = IRQ6_VECTOR, },
154 [7] = { .vector = IRQ7_VECTOR, },
155 [8] = { .vector = IRQ8_VECTOR, },
156 [9] = { .vector = IRQ9_VECTOR, },
157 [10] = { .vector = IRQ10_VECTOR, },
158 [11] = { .vector = IRQ11_VECTOR, },
159 [12] = { .vector = IRQ12_VECTOR, },
160 [13] = { .vector = IRQ13_VECTOR, },
161 [14] = { .vector = IRQ14_VECTOR, },
162 [15] = { .vector = IRQ15_VECTOR, },
163 };
164
165 void __init io_apic_disable_legacy(void)
166 {
167 nr_legacy_irqs = 0;
168 nr_irqs_gsi = 0;
169 }
170
171 int __init arch_early_irq_init(void)
172 {
173 struct irq_cfg *cfg;
174 struct irq_desc *desc;
175 int count;
176 int node;
177 int i;
178
179 cfg = irq_cfgx;
180 count = ARRAY_SIZE(irq_cfgx);
181 node= cpu_to_node(boot_cpu_id);
182
183 for (i = 0; i < count; i++) {
184 desc = irq_to_desc(i);
185 desc->chip_data = &cfg[i];
186 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
187 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
188 if (i < nr_legacy_irqs)
189 cpumask_setall(cfg[i].domain);
190 }
191
192 return 0;
193 }
194
195 #ifdef CONFIG_SPARSE_IRQ
196 struct irq_cfg *irq_cfg(unsigned int irq)
197 {
198 struct irq_cfg *cfg = NULL;
199 struct irq_desc *desc;
200
201 desc = irq_to_desc(irq);
202 if (desc)
203 cfg = desc->chip_data;
204
205 return cfg;
206 }
207
208 static struct irq_cfg *get_one_free_irq_cfg(int node)
209 {
210 struct irq_cfg *cfg;
211
212 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
213 if (cfg) {
214 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
215 kfree(cfg);
216 cfg = NULL;
217 } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
218 GFP_ATOMIC, node)) {
219 free_cpumask_var(cfg->domain);
220 kfree(cfg);
221 cfg = NULL;
222 }
223 }
224
225 return cfg;
226 }
227
228 int arch_init_chip_data(struct irq_desc *desc, int node)
229 {
230 struct irq_cfg *cfg;
231
232 cfg = desc->chip_data;
233 if (!cfg) {
234 desc->chip_data = get_one_free_irq_cfg(node);
235 if (!desc->chip_data) {
236 printk(KERN_ERR "can not alloc irq_cfg\n");
237 BUG_ON(1);
238 }
239 }
240
241 return 0;
242 }
243
244 /* for move_irq_desc */
245 static void
246 init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node)
247 {
248 struct irq_pin_list *old_entry, *head, *tail, *entry;
249
250 cfg->irq_2_pin = NULL;
251 old_entry = old_cfg->irq_2_pin;
252 if (!old_entry)
253 return;
254
255 entry = get_one_free_irq_2_pin(node);
256 if (!entry)
257 return;
258
259 entry->apic = old_entry->apic;
260 entry->pin = old_entry->pin;
261 head = entry;
262 tail = entry;
263 old_entry = old_entry->next;
264 while (old_entry) {
265 entry = get_one_free_irq_2_pin(node);
266 if (!entry) {
267 entry = head;
268 while (entry) {
269 head = entry->next;
270 kfree(entry);
271 entry = head;
272 }
273 /* still use the old one */
274 return;
275 }
276 entry->apic = old_entry->apic;
277 entry->pin = old_entry->pin;
278 tail->next = entry;
279 tail = entry;
280 old_entry = old_entry->next;
281 }
282
283 tail->next = NULL;
284 cfg->irq_2_pin = head;
285 }
286
287 static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
288 {
289 struct irq_pin_list *entry, *next;
290
291 if (old_cfg->irq_2_pin == cfg->irq_2_pin)
292 return;
293
294 entry = old_cfg->irq_2_pin;
295
296 while (entry) {
297 next = entry->next;
298 kfree(entry);
299 entry = next;
300 }
301 old_cfg->irq_2_pin = NULL;
302 }
303
304 void arch_init_copy_chip_data(struct irq_desc *old_desc,
305 struct irq_desc *desc, int node)
306 {
307 struct irq_cfg *cfg;
308 struct irq_cfg *old_cfg;
309
310 cfg = get_one_free_irq_cfg(node);
311
312 if (!cfg)
313 return;
314
315 desc->chip_data = cfg;
316
317 old_cfg = old_desc->chip_data;
318
319 memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
320
321 init_copy_irq_2_pin(old_cfg, cfg, node);
322 }
323
324 static void free_irq_cfg(struct irq_cfg *old_cfg)
325 {
326 kfree(old_cfg);
327 }
328
329 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
330 {
331 struct irq_cfg *old_cfg, *cfg;
332
333 old_cfg = old_desc->chip_data;
334 cfg = desc->chip_data;
335
336 if (old_cfg == cfg)
337 return;
338
339 if (old_cfg) {
340 free_irq_2_pin(old_cfg, cfg);
341 free_irq_cfg(old_cfg);
342 old_desc->chip_data = NULL;
343 }
344 }
345 /* end for move_irq_desc */
346
347 #else
348 struct irq_cfg *irq_cfg(unsigned int irq)
349 {
350 return irq < nr_irqs ? irq_cfgx + irq : NULL;
351 }
352
353 #endif
354
355 struct io_apic {
356 unsigned int index;
357 unsigned int unused[3];
358 unsigned int data;
359 unsigned int unused2[11];
360 unsigned int eoi;
361 };
362
363 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
364 {
365 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
366 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
367 }
368
369 static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
370 {
371 struct io_apic __iomem *io_apic = io_apic_base(apic);
372 writel(vector, &io_apic->eoi);
373 }
374
375 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
376 {
377 struct io_apic __iomem *io_apic = io_apic_base(apic);
378 writel(reg, &io_apic->index);
379 return readl(&io_apic->data);
380 }
381
382 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
383 {
384 struct io_apic __iomem *io_apic = io_apic_base(apic);
385 writel(reg, &io_apic->index);
386 writel(value, &io_apic->data);
387 }
388
389 /*
390 * Re-write a value: to be used for read-modify-write
391 * cycles where the read already set up the index register.
392 *
393 * Older SiS APIC requires we rewrite the index register
394 */
395 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
396 {
397 struct io_apic __iomem *io_apic = io_apic_base(apic);
398
399 if (sis_apic_bug)
400 writel(reg, &io_apic->index);
401 writel(value, &io_apic->data);
402 }
403
404 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
405 {
406 struct irq_pin_list *entry;
407 unsigned long flags;
408
409 spin_lock_irqsave(&ioapic_lock, flags);
410 for_each_irq_pin(entry, cfg->irq_2_pin) {
411 unsigned int reg;
412 int pin;
413
414 pin = entry->pin;
415 reg = io_apic_read(entry->apic, 0x10 + pin*2);
416 /* Is the remote IRR bit set? */
417 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
418 spin_unlock_irqrestore(&ioapic_lock, flags);
419 return true;
420 }
421 }
422 spin_unlock_irqrestore(&ioapic_lock, flags);
423
424 return false;
425 }
426
427 union entry_union {
428 struct { u32 w1, w2; };
429 struct IO_APIC_route_entry entry;
430 };
431
432 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
433 {
434 union entry_union eu;
435 unsigned long flags;
436 spin_lock_irqsave(&ioapic_lock, flags);
437 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
438 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
439 spin_unlock_irqrestore(&ioapic_lock, flags);
440 return eu.entry;
441 }
442
443 /*
444 * When we write a new IO APIC routing entry, we need to write the high
445 * word first! If the mask bit in the low word is clear, we will enable
446 * the interrupt, and we need to make sure the entry is fully populated
447 * before that happens.
448 */
449 static void
450 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
451 {
452 union entry_union eu = {{0, 0}};
453
454 eu.entry = e;
455 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
456 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
457 }
458
459 void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
460 {
461 unsigned long flags;
462 spin_lock_irqsave(&ioapic_lock, flags);
463 __ioapic_write_entry(apic, pin, e);
464 spin_unlock_irqrestore(&ioapic_lock, flags);
465 }
466
467 /*
468 * When we mask an IO APIC routing entry, we need to write the low
469 * word first, in order to set the mask bit before we change the
470 * high bits!
471 */
472 static void ioapic_mask_entry(int apic, int pin)
473 {
474 unsigned long flags;
475 union entry_union eu = { .entry.mask = 1 };
476
477 spin_lock_irqsave(&ioapic_lock, flags);
478 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
479 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
480 spin_unlock_irqrestore(&ioapic_lock, flags);
481 }
482
483 /*
484 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
485 * shared ISA-space IRQs, so we have to support them. We are super
486 * fast in the common case, and fast for shared ISA-space IRQs.
487 */
488 static int
489 add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin)
490 {
491 struct irq_pin_list **last, *entry;
492
493 /* don't allow duplicates */
494 last = &cfg->irq_2_pin;
495 for_each_irq_pin(entry, cfg->irq_2_pin) {
496 if (entry->apic == apic && entry->pin == pin)
497 return 0;
498 last = &entry->next;
499 }
500
501 entry = get_one_free_irq_2_pin(node);
502 if (!entry) {
503 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
504 node, apic, pin);
505 return -ENOMEM;
506 }
507 entry->apic = apic;
508 entry->pin = pin;
509
510 *last = entry;
511 return 0;
512 }
513
514 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
515 {
516 if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin))
517 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
518 }
519
520 /*
521 * Reroute an IRQ to a different pin.
522 */
523 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
524 int oldapic, int oldpin,
525 int newapic, int newpin)
526 {
527 struct irq_pin_list *entry;
528
529 for_each_irq_pin(entry, cfg->irq_2_pin) {
530 if (entry->apic == oldapic && entry->pin == oldpin) {
531 entry->apic = newapic;
532 entry->pin = newpin;
533 /* every one is different, right? */
534 return;
535 }
536 }
537
538 /* old apic/pin didn't exist, so just add new ones */
539 add_pin_to_irq_node(cfg, node, newapic, newpin);
540 }
541
542 static void __io_apic_modify_irq(struct irq_pin_list *entry,
543 int mask_and, int mask_or,
544 void (*final)(struct irq_pin_list *entry))
545 {
546 unsigned int reg, pin;
547
548 pin = entry->pin;
549 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
550 reg &= mask_and;
551 reg |= mask_or;
552 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
553 if (final)
554 final(entry);
555 }
556
557 static void io_apic_modify_irq(struct irq_cfg *cfg,
558 int mask_and, int mask_or,
559 void (*final)(struct irq_pin_list *entry))
560 {
561 struct irq_pin_list *entry;
562
563 for_each_irq_pin(entry, cfg->irq_2_pin)
564 __io_apic_modify_irq(entry, mask_and, mask_or, final);
565 }
566
567 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
568 {
569 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
570 IO_APIC_REDIR_MASKED, NULL);
571 }
572
573 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
574 {
575 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
576 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
577 }
578
579 static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
580 {
581 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
582 }
583
584 static void io_apic_sync(struct irq_pin_list *entry)
585 {
586 /*
587 * Synchronize the IO-APIC and the CPU by doing
588 * a dummy read from the IO-APIC
589 */
590 struct io_apic __iomem *io_apic;
591 io_apic = io_apic_base(entry->apic);
592 readl(&io_apic->data);
593 }
594
595 static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
596 {
597 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
598 }
599
600 static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
601 {
602 struct irq_cfg *cfg = desc->chip_data;
603 unsigned long flags;
604
605 BUG_ON(!cfg);
606
607 spin_lock_irqsave(&ioapic_lock, flags);
608 __mask_IO_APIC_irq(cfg);
609 spin_unlock_irqrestore(&ioapic_lock, flags);
610 }
611
612 static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
613 {
614 struct irq_cfg *cfg = desc->chip_data;
615 unsigned long flags;
616
617 spin_lock_irqsave(&ioapic_lock, flags);
618 __unmask_IO_APIC_irq(cfg);
619 spin_unlock_irqrestore(&ioapic_lock, flags);
620 }
621
622 static void mask_IO_APIC_irq(unsigned int irq)
623 {
624 struct irq_desc *desc = irq_to_desc(irq);
625
626 mask_IO_APIC_irq_desc(desc);
627 }
628 static void unmask_IO_APIC_irq(unsigned int irq)
629 {
630 struct irq_desc *desc = irq_to_desc(irq);
631
632 unmask_IO_APIC_irq_desc(desc);
633 }
634
635 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
636 {
637 struct IO_APIC_route_entry entry;
638
639 /* Check delivery_mode to be sure we're not clearing an SMI pin */
640 entry = ioapic_read_entry(apic, pin);
641 if (entry.delivery_mode == dest_SMI)
642 return;
643 /*
644 * Disable it in the IO-APIC irq-routing table:
645 */
646 ioapic_mask_entry(apic, pin);
647 }
648
649 static void clear_IO_APIC (void)
650 {
651 int apic, pin;
652
653 for (apic = 0; apic < nr_ioapics; apic++)
654 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
655 clear_IO_APIC_pin(apic, pin);
656 }
657
658 #ifdef CONFIG_X86_32
659 /*
660 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
661 * specific CPU-side IRQs.
662 */
663
664 #define MAX_PIRQS 8
665 static int pirq_entries[MAX_PIRQS] = {
666 [0 ... MAX_PIRQS - 1] = -1
667 };
668
669 static int __init ioapic_pirq_setup(char *str)
670 {
671 int i, max;
672 int ints[MAX_PIRQS+1];
673
674 get_options(str, ARRAY_SIZE(ints), ints);
675
676 apic_printk(APIC_VERBOSE, KERN_INFO
677 "PIRQ redirection, working around broken MP-BIOS.\n");
678 max = MAX_PIRQS;
679 if (ints[0] < MAX_PIRQS)
680 max = ints[0];
681
682 for (i = 0; i < max; i++) {
683 apic_printk(APIC_VERBOSE, KERN_DEBUG
684 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
685 /*
686 * PIRQs are mapped upside down, usually.
687 */
688 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
689 }
690 return 1;
691 }
692
693 __setup("pirq=", ioapic_pirq_setup);
694 #endif /* CONFIG_X86_32 */
695
696 struct IO_APIC_route_entry **alloc_ioapic_entries(void)
697 {
698 int apic;
699 struct IO_APIC_route_entry **ioapic_entries;
700
701 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
702 GFP_ATOMIC);
703 if (!ioapic_entries)
704 return 0;
705
706 for (apic = 0; apic < nr_ioapics; apic++) {
707 ioapic_entries[apic] =
708 kzalloc(sizeof(struct IO_APIC_route_entry) *
709 nr_ioapic_registers[apic], GFP_ATOMIC);
710 if (!ioapic_entries[apic])
711 goto nomem;
712 }
713
714 return ioapic_entries;
715
716 nomem:
717 while (--apic >= 0)
718 kfree(ioapic_entries[apic]);
719 kfree(ioapic_entries);
720
721 return 0;
722 }
723
724 /*
725 * Saves all the IO-APIC RTE's
726 */
727 int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
728 {
729 int apic, pin;
730
731 if (!ioapic_entries)
732 return -ENOMEM;
733
734 for (apic = 0; apic < nr_ioapics; apic++) {
735 if (!ioapic_entries[apic])
736 return -ENOMEM;
737
738 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
739 ioapic_entries[apic][pin] =
740 ioapic_read_entry(apic, pin);
741 }
742
743 return 0;
744 }
745
746 /*
747 * Mask all IO APIC entries.
748 */
749 void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
750 {
751 int apic, pin;
752
753 if (!ioapic_entries)
754 return;
755
756 for (apic = 0; apic < nr_ioapics; apic++) {
757 if (!ioapic_entries[apic])
758 break;
759
760 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
761 struct IO_APIC_route_entry entry;
762
763 entry = ioapic_entries[apic][pin];
764 if (!entry.mask) {
765 entry.mask = 1;
766 ioapic_write_entry(apic, pin, entry);
767 }
768 }
769 }
770 }
771
772 /*
773 * Restore IO APIC entries which was saved in ioapic_entries.
774 */
775 int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
776 {
777 int apic, pin;
778
779 if (!ioapic_entries)
780 return -ENOMEM;
781
782 for (apic = 0; apic < nr_ioapics; apic++) {
783 if (!ioapic_entries[apic])
784 return -ENOMEM;
785
786 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
787 ioapic_write_entry(apic, pin,
788 ioapic_entries[apic][pin]);
789 }
790 return 0;
791 }
792
793 void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
794 {
795 int apic;
796
797 for (apic = 0; apic < nr_ioapics; apic++)
798 kfree(ioapic_entries[apic]);
799
800 kfree(ioapic_entries);
801 }
802
803 /*
804 * Find the IRQ entry number of a certain pin.
805 */
806 static int find_irq_entry(int apic, int pin, int type)
807 {
808 int i;
809
810 for (i = 0; i < mp_irq_entries; i++)
811 if (mp_irqs[i].irqtype == type &&
812 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
813 mp_irqs[i].dstapic == MP_APIC_ALL) &&
814 mp_irqs[i].dstirq == pin)
815 return i;
816
817 return -1;
818 }
819
820 /*
821 * Find the pin to which IRQ[irq] (ISA) is connected
822 */
823 static int __init find_isa_irq_pin(int irq, int type)
824 {
825 int i;
826
827 for (i = 0; i < mp_irq_entries; i++) {
828 int lbus = mp_irqs[i].srcbus;
829
830 if (test_bit(lbus, mp_bus_not_pci) &&
831 (mp_irqs[i].irqtype == type) &&
832 (mp_irqs[i].srcbusirq == irq))
833
834 return mp_irqs[i].dstirq;
835 }
836 return -1;
837 }
838
839 static int __init find_isa_irq_apic(int irq, int type)
840 {
841 int i;
842
843 for (i = 0; i < mp_irq_entries; i++) {
844 int lbus = mp_irqs[i].srcbus;
845
846 if (test_bit(lbus, mp_bus_not_pci) &&
847 (mp_irqs[i].irqtype == type) &&
848 (mp_irqs[i].srcbusirq == irq))
849 break;
850 }
851 if (i < mp_irq_entries) {
852 int apic;
853 for(apic = 0; apic < nr_ioapics; apic++) {
854 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
855 return apic;
856 }
857 }
858
859 return -1;
860 }
861
862 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
863 /*
864 * EISA Edge/Level control register, ELCR
865 */
866 static int EISA_ELCR(unsigned int irq)
867 {
868 if (irq < nr_legacy_irqs) {
869 unsigned int port = 0x4d0 + (irq >> 3);
870 return (inb(port) >> (irq & 7)) & 1;
871 }
872 apic_printk(APIC_VERBOSE, KERN_INFO
873 "Broken MPtable reports ISA irq %d\n", irq);
874 return 0;
875 }
876
877 #endif
878
879 /* ISA interrupts are always polarity zero edge triggered,
880 * when listed as conforming in the MP table. */
881
882 #define default_ISA_trigger(idx) (0)
883 #define default_ISA_polarity(idx) (0)
884
885 /* EISA interrupts are always polarity zero and can be edge or level
886 * trigger depending on the ELCR value. If an interrupt is listed as
887 * EISA conforming in the MP table, that means its trigger type must
888 * be read in from the ELCR */
889
890 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
891 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
892
893 /* PCI interrupts are always polarity one level triggered,
894 * when listed as conforming in the MP table. */
895
896 #define default_PCI_trigger(idx) (1)
897 #define default_PCI_polarity(idx) (1)
898
899 /* MCA interrupts are always polarity zero level triggered,
900 * when listed as conforming in the MP table. */
901
902 #define default_MCA_trigger(idx) (1)
903 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
904
905 static int MPBIOS_polarity(int idx)
906 {
907 int bus = mp_irqs[idx].srcbus;
908 int polarity;
909
910 /*
911 * Determine IRQ line polarity (high active or low active):
912 */
913 switch (mp_irqs[idx].irqflag & 3)
914 {
915 case 0: /* conforms, ie. bus-type dependent polarity */
916 if (test_bit(bus, mp_bus_not_pci))
917 polarity = default_ISA_polarity(idx);
918 else
919 polarity = default_PCI_polarity(idx);
920 break;
921 case 1: /* high active */
922 {
923 polarity = 0;
924 break;
925 }
926 case 2: /* reserved */
927 {
928 printk(KERN_WARNING "broken BIOS!!\n");
929 polarity = 1;
930 break;
931 }
932 case 3: /* low active */
933 {
934 polarity = 1;
935 break;
936 }
937 default: /* invalid */
938 {
939 printk(KERN_WARNING "broken BIOS!!\n");
940 polarity = 1;
941 break;
942 }
943 }
944 return polarity;
945 }
946
947 static int MPBIOS_trigger(int idx)
948 {
949 int bus = mp_irqs[idx].srcbus;
950 int trigger;
951
952 /*
953 * Determine IRQ trigger mode (edge or level sensitive):
954 */
955 switch ((mp_irqs[idx].irqflag>>2) & 3)
956 {
957 case 0: /* conforms, ie. bus-type dependent */
958 if (test_bit(bus, mp_bus_not_pci))
959 trigger = default_ISA_trigger(idx);
960 else
961 trigger = default_PCI_trigger(idx);
962 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
963 switch (mp_bus_id_to_type[bus]) {
964 case MP_BUS_ISA: /* ISA pin */
965 {
966 /* set before the switch */
967 break;
968 }
969 case MP_BUS_EISA: /* EISA pin */
970 {
971 trigger = default_EISA_trigger(idx);
972 break;
973 }
974 case MP_BUS_PCI: /* PCI pin */
975 {
976 /* set before the switch */
977 break;
978 }
979 case MP_BUS_MCA: /* MCA pin */
980 {
981 trigger = default_MCA_trigger(idx);
982 break;
983 }
984 default:
985 {
986 printk(KERN_WARNING "broken BIOS!!\n");
987 trigger = 1;
988 break;
989 }
990 }
991 #endif
992 break;
993 case 1: /* edge */
994 {
995 trigger = 0;
996 break;
997 }
998 case 2: /* reserved */
999 {
1000 printk(KERN_WARNING "broken BIOS!!\n");
1001 trigger = 1;
1002 break;
1003 }
1004 case 3: /* level */
1005 {
1006 trigger = 1;
1007 break;
1008 }
1009 default: /* invalid */
1010 {
1011 printk(KERN_WARNING "broken BIOS!!\n");
1012 trigger = 0;
1013 break;
1014 }
1015 }
1016 return trigger;
1017 }
1018
1019 static inline int irq_polarity(int idx)
1020 {
1021 return MPBIOS_polarity(idx);
1022 }
1023
1024 static inline int irq_trigger(int idx)
1025 {
1026 return MPBIOS_trigger(idx);
1027 }
1028
1029 int (*ioapic_renumber_irq)(int ioapic, int irq);
1030 static int pin_2_irq(int idx, int apic, int pin)
1031 {
1032 int irq, i;
1033 int bus = mp_irqs[idx].srcbus;
1034
1035 /*
1036 * Debugging check, we are in big trouble if this message pops up!
1037 */
1038 if (mp_irqs[idx].dstirq != pin)
1039 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1040
1041 if (test_bit(bus, mp_bus_not_pci)) {
1042 irq = mp_irqs[idx].srcbusirq;
1043 } else {
1044 /*
1045 * PCI IRQs are mapped in order
1046 */
1047 i = irq = 0;
1048 while (i < apic)
1049 irq += nr_ioapic_registers[i++];
1050 irq += pin;
1051 /*
1052 * For MPS mode, so far only needed by ES7000 platform
1053 */
1054 if (ioapic_renumber_irq)
1055 irq = ioapic_renumber_irq(apic, irq);
1056 }
1057
1058 #ifdef CONFIG_X86_32
1059 /*
1060 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1061 */
1062 if ((pin >= 16) && (pin <= 23)) {
1063 if (pirq_entries[pin-16] != -1) {
1064 if (!pirq_entries[pin-16]) {
1065 apic_printk(APIC_VERBOSE, KERN_DEBUG
1066 "disabling PIRQ%d\n", pin-16);
1067 } else {
1068 irq = pirq_entries[pin-16];
1069 apic_printk(APIC_VERBOSE, KERN_DEBUG
1070 "using PIRQ%d -> IRQ %d\n",
1071 pin-16, irq);
1072 }
1073 }
1074 }
1075 #endif
1076
1077 return irq;
1078 }
1079
1080 /*
1081 * Find a specific PCI IRQ entry.
1082 * Not an __init, possibly needed by modules
1083 */
1084 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1085 struct io_apic_irq_attr *irq_attr)
1086 {
1087 int apic, i, best_guess = -1;
1088
1089 apic_printk(APIC_DEBUG,
1090 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1091 bus, slot, pin);
1092 if (test_bit(bus, mp_bus_not_pci)) {
1093 apic_printk(APIC_VERBOSE,
1094 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1095 return -1;
1096 }
1097 for (i = 0; i < mp_irq_entries; i++) {
1098 int lbus = mp_irqs[i].srcbus;
1099
1100 for (apic = 0; apic < nr_ioapics; apic++)
1101 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1102 mp_irqs[i].dstapic == MP_APIC_ALL)
1103 break;
1104
1105 if (!test_bit(lbus, mp_bus_not_pci) &&
1106 !mp_irqs[i].irqtype &&
1107 (bus == lbus) &&
1108 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1109 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1110
1111 if (!(apic || IO_APIC_IRQ(irq)))
1112 continue;
1113
1114 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1115 set_io_apic_irq_attr(irq_attr, apic,
1116 mp_irqs[i].dstirq,
1117 irq_trigger(i),
1118 irq_polarity(i));
1119 return irq;
1120 }
1121 /*
1122 * Use the first all-but-pin matching entry as a
1123 * best-guess fuzzy result for broken mptables.
1124 */
1125 if (best_guess < 0) {
1126 set_io_apic_irq_attr(irq_attr, apic,
1127 mp_irqs[i].dstirq,
1128 irq_trigger(i),
1129 irq_polarity(i));
1130 best_guess = irq;
1131 }
1132 }
1133 }
1134 return best_guess;
1135 }
1136 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1137
1138 void lock_vector_lock(void)
1139 {
1140 /* Used to the online set of cpus does not change
1141 * during assign_irq_vector.
1142 */
1143 spin_lock(&vector_lock);
1144 }
1145
1146 void unlock_vector_lock(void)
1147 {
1148 spin_unlock(&vector_lock);
1149 }
1150
1151 static int
1152 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1153 {
1154 /*
1155 * NOTE! The local APIC isn't very good at handling
1156 * multiple interrupts at the same interrupt level.
1157 * As the interrupt level is determined by taking the
1158 * vector number and shifting that right by 4, we
1159 * want to spread these out a bit so that they don't
1160 * all fall in the same interrupt level.
1161 *
1162 * Also, we've got to be careful not to trash gate
1163 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1164 */
1165 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1166 unsigned int old_vector;
1167 int cpu, err;
1168 cpumask_var_t tmp_mask;
1169
1170 if (cfg->move_in_progress)
1171 return -EBUSY;
1172
1173 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1174 return -ENOMEM;
1175
1176 old_vector = cfg->vector;
1177 if (old_vector) {
1178 cpumask_and(tmp_mask, mask, cpu_online_mask);
1179 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1180 if (!cpumask_empty(tmp_mask)) {
1181 free_cpumask_var(tmp_mask);
1182 return 0;
1183 }
1184 }
1185
1186 /* Only try and allocate irqs on cpus that are present */
1187 err = -ENOSPC;
1188 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1189 int new_cpu;
1190 int vector, offset;
1191
1192 apic->vector_allocation_domain(cpu, tmp_mask);
1193
1194 vector = current_vector;
1195 offset = current_offset;
1196 next:
1197 vector += 8;
1198 if (vector >= first_system_vector) {
1199 /* If out of vectors on large boxen, must share them. */
1200 offset = (offset + 1) % 8;
1201 vector = FIRST_DEVICE_VECTOR + offset;
1202 }
1203 if (unlikely(current_vector == vector))
1204 continue;
1205
1206 if (test_bit(vector, used_vectors))
1207 goto next;
1208
1209 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1210 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1211 goto next;
1212 /* Found one! */
1213 current_vector = vector;
1214 current_offset = offset;
1215 if (old_vector) {
1216 cfg->move_in_progress = 1;
1217 cpumask_copy(cfg->old_domain, cfg->domain);
1218 }
1219 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1220 per_cpu(vector_irq, new_cpu)[vector] = irq;
1221 cfg->vector = vector;
1222 cpumask_copy(cfg->domain, tmp_mask);
1223 err = 0;
1224 break;
1225 }
1226 free_cpumask_var(tmp_mask);
1227 return err;
1228 }
1229
1230 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1231 {
1232 int err;
1233 unsigned long flags;
1234
1235 spin_lock_irqsave(&vector_lock, flags);
1236 err = __assign_irq_vector(irq, cfg, mask);
1237 spin_unlock_irqrestore(&vector_lock, flags);
1238 return err;
1239 }
1240
1241 static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1242 {
1243 int cpu, vector;
1244
1245 BUG_ON(!cfg->vector);
1246
1247 vector = cfg->vector;
1248 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1249 per_cpu(vector_irq, cpu)[vector] = -1;
1250
1251 cfg->vector = 0;
1252 cpumask_clear(cfg->domain);
1253
1254 if (likely(!cfg->move_in_progress))
1255 return;
1256 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1257 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1258 vector++) {
1259 if (per_cpu(vector_irq, cpu)[vector] != irq)
1260 continue;
1261 per_cpu(vector_irq, cpu)[vector] = -1;
1262 break;
1263 }
1264 }
1265 cfg->move_in_progress = 0;
1266 }
1267
1268 void __setup_vector_irq(int cpu)
1269 {
1270 /* Initialize vector_irq on a new cpu */
1271 /* This function must be called with vector_lock held */
1272 int irq, vector;
1273 struct irq_cfg *cfg;
1274 struct irq_desc *desc;
1275
1276 /* Mark the inuse vectors */
1277 for_each_irq_desc(irq, desc) {
1278 cfg = desc->chip_data;
1279 if (!cpumask_test_cpu(cpu, cfg->domain))
1280 continue;
1281 vector = cfg->vector;
1282 per_cpu(vector_irq, cpu)[vector] = irq;
1283 }
1284 /* Mark the free vectors */
1285 for (vector = 0; vector < NR_VECTORS; ++vector) {
1286 irq = per_cpu(vector_irq, cpu)[vector];
1287 if (irq < 0)
1288 continue;
1289
1290 cfg = irq_cfg(irq);
1291 if (!cpumask_test_cpu(cpu, cfg->domain))
1292 per_cpu(vector_irq, cpu)[vector] = -1;
1293 }
1294 }
1295
1296 static struct irq_chip ioapic_chip;
1297 static struct irq_chip ir_ioapic_chip;
1298
1299 #define IOAPIC_AUTO -1
1300 #define IOAPIC_EDGE 0
1301 #define IOAPIC_LEVEL 1
1302
1303 #ifdef CONFIG_X86_32
1304 static inline int IO_APIC_irq_trigger(int irq)
1305 {
1306 int apic, idx, pin;
1307
1308 for (apic = 0; apic < nr_ioapics; apic++) {
1309 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1310 idx = find_irq_entry(apic, pin, mp_INT);
1311 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1312 return irq_trigger(idx);
1313 }
1314 }
1315 /*
1316 * nonexistent IRQs are edge default
1317 */
1318 return 0;
1319 }
1320 #else
1321 static inline int IO_APIC_irq_trigger(int irq)
1322 {
1323 return 1;
1324 }
1325 #endif
1326
1327 static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1328 {
1329
1330 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1331 trigger == IOAPIC_LEVEL)
1332 desc->status |= IRQ_LEVEL;
1333 else
1334 desc->status &= ~IRQ_LEVEL;
1335
1336 if (irq_remapped(irq)) {
1337 desc->status |= IRQ_MOVE_PCNTXT;
1338 if (trigger)
1339 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1340 handle_fasteoi_irq,
1341 "fasteoi");
1342 else
1343 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1344 handle_edge_irq, "edge");
1345 return;
1346 }
1347
1348 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1349 trigger == IOAPIC_LEVEL)
1350 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1351 handle_fasteoi_irq,
1352 "fasteoi");
1353 else
1354 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1355 handle_edge_irq, "edge");
1356 }
1357
1358 int setup_ioapic_entry(int apic_id, int irq,
1359 struct IO_APIC_route_entry *entry,
1360 unsigned int destination, int trigger,
1361 int polarity, int vector, int pin)
1362 {
1363 /*
1364 * add it to the IO-APIC irq-routing table:
1365 */
1366 memset(entry,0,sizeof(*entry));
1367
1368 if (intr_remapping_enabled) {
1369 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1370 struct irte irte;
1371 struct IR_IO_APIC_route_entry *ir_entry =
1372 (struct IR_IO_APIC_route_entry *) entry;
1373 int index;
1374
1375 if (!iommu)
1376 panic("No mapping iommu for ioapic %d\n", apic_id);
1377
1378 index = alloc_irte(iommu, irq, 1);
1379 if (index < 0)
1380 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1381
1382 memset(&irte, 0, sizeof(irte));
1383
1384 irte.present = 1;
1385 irte.dst_mode = apic->irq_dest_mode;
1386 /*
1387 * Trigger mode in the IRTE will always be edge, and the
1388 * actual level or edge trigger will be setup in the IO-APIC
1389 * RTE. This will help simplify level triggered irq migration.
1390 * For more details, see the comments above explainig IO-APIC
1391 * irq migration in the presence of interrupt-remapping.
1392 */
1393 irte.trigger_mode = 0;
1394 irte.dlvry_mode = apic->irq_delivery_mode;
1395 irte.vector = vector;
1396 irte.dest_id = IRTE_DEST(destination);
1397
1398 /* Set source-id of interrupt request */
1399 set_ioapic_sid(&irte, apic_id);
1400
1401 modify_irte(irq, &irte);
1402
1403 ir_entry->index2 = (index >> 15) & 0x1;
1404 ir_entry->zero = 0;
1405 ir_entry->format = 1;
1406 ir_entry->index = (index & 0x7fff);
1407 /*
1408 * IO-APIC RTE will be configured with virtual vector.
1409 * irq handler will do the explicit EOI to the io-apic.
1410 */
1411 ir_entry->vector = pin;
1412 } else {
1413 entry->delivery_mode = apic->irq_delivery_mode;
1414 entry->dest_mode = apic->irq_dest_mode;
1415 entry->dest = destination;
1416 entry->vector = vector;
1417 }
1418
1419 entry->mask = 0; /* enable IRQ */
1420 entry->trigger = trigger;
1421 entry->polarity = polarity;
1422
1423 /* Mask level triggered irqs.
1424 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1425 */
1426 if (trigger)
1427 entry->mask = 1;
1428 return 0;
1429 }
1430
1431 static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
1432 int trigger, int polarity)
1433 {
1434 struct irq_cfg *cfg;
1435 struct IO_APIC_route_entry entry;
1436 unsigned int dest;
1437
1438 if (!IO_APIC_IRQ(irq))
1439 return;
1440
1441 cfg = desc->chip_data;
1442
1443 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1444 return;
1445
1446 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1447
1448 apic_printk(APIC_VERBOSE,KERN_DEBUG
1449 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1450 "IRQ %d Mode:%i Active:%i)\n",
1451 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1452 irq, trigger, polarity);
1453
1454
1455 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1456 dest, trigger, polarity, cfg->vector, pin)) {
1457 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1458 mp_ioapics[apic_id].apicid, pin);
1459 __clear_irq_vector(irq, cfg);
1460 return;
1461 }
1462
1463 ioapic_register_intr(irq, desc, trigger);
1464 if (irq < nr_legacy_irqs)
1465 disable_8259A_irq(irq);
1466
1467 ioapic_write_entry(apic_id, pin, entry);
1468 }
1469
1470 static struct {
1471 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
1472 } mp_ioapic_routing[MAX_IO_APICS];
1473
1474 static void __init setup_IO_APIC_irqs(void)
1475 {
1476 int apic_id = 0, pin, idx, irq;
1477 int notcon = 0;
1478 struct irq_desc *desc;
1479 struct irq_cfg *cfg;
1480 int node = cpu_to_node(boot_cpu_id);
1481
1482 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1483
1484 #ifdef CONFIG_ACPI
1485 if (!acpi_disabled && acpi_ioapic) {
1486 apic_id = mp_find_ioapic(0);
1487 if (apic_id < 0)
1488 apic_id = 0;
1489 }
1490 #endif
1491
1492 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1493 idx = find_irq_entry(apic_id, pin, mp_INT);
1494 if (idx == -1) {
1495 if (!notcon) {
1496 notcon = 1;
1497 apic_printk(APIC_VERBOSE,
1498 KERN_DEBUG " %d-%d",
1499 mp_ioapics[apic_id].apicid, pin);
1500 } else
1501 apic_printk(APIC_VERBOSE, " %d-%d",
1502 mp_ioapics[apic_id].apicid, pin);
1503 continue;
1504 }
1505 if (notcon) {
1506 apic_printk(APIC_VERBOSE,
1507 " (apicid-pin) not connected\n");
1508 notcon = 0;
1509 }
1510
1511 irq = pin_2_irq(idx, apic_id, pin);
1512
1513 /*
1514 * Skip the timer IRQ if there's a quirk handler
1515 * installed and if it returns 1:
1516 */
1517 if (apic->multi_timer_check &&
1518 apic->multi_timer_check(apic_id, irq))
1519 continue;
1520
1521 desc = irq_to_desc_alloc_node(irq, node);
1522 if (!desc) {
1523 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1524 continue;
1525 }
1526 cfg = desc->chip_data;
1527 add_pin_to_irq_node(cfg, node, apic_id, pin);
1528 /*
1529 * don't mark it in pin_programmed, so later acpi could
1530 * set it correctly when irq < 16
1531 */
1532 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1533 irq_trigger(idx), irq_polarity(idx));
1534 }
1535
1536 if (notcon)
1537 apic_printk(APIC_VERBOSE,
1538 " (apicid-pin) not connected\n");
1539 }
1540
1541 /*
1542 * Set up the timer pin, possibly with the 8259A-master behind.
1543 */
1544 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1545 int vector)
1546 {
1547 struct IO_APIC_route_entry entry;
1548
1549 if (intr_remapping_enabled)
1550 return;
1551
1552 memset(&entry, 0, sizeof(entry));
1553
1554 /*
1555 * We use logical delivery to get the timer IRQ
1556 * to the first CPU.
1557 */
1558 entry.dest_mode = apic->irq_dest_mode;
1559 entry.mask = 0; /* don't mask IRQ for edge */
1560 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1561 entry.delivery_mode = apic->irq_delivery_mode;
1562 entry.polarity = 0;
1563 entry.trigger = 0;
1564 entry.vector = vector;
1565
1566 /*
1567 * The timer IRQ doesn't have to know that behind the
1568 * scene we may have a 8259A-master in AEOI mode ...
1569 */
1570 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1571
1572 /*
1573 * Add it to the IO-APIC irq-routing table:
1574 */
1575 ioapic_write_entry(apic_id, pin, entry);
1576 }
1577
1578
1579 __apicdebuginit(void) print_IO_APIC(void)
1580 {
1581 int apic, i;
1582 union IO_APIC_reg_00 reg_00;
1583 union IO_APIC_reg_01 reg_01;
1584 union IO_APIC_reg_02 reg_02;
1585 union IO_APIC_reg_03 reg_03;
1586 unsigned long flags;
1587 struct irq_cfg *cfg;
1588 struct irq_desc *desc;
1589 unsigned int irq;
1590
1591 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1592 for (i = 0; i < nr_ioapics; i++)
1593 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1594 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1595
1596 /*
1597 * We are a bit conservative about what we expect. We have to
1598 * know about every hardware change ASAP.
1599 */
1600 printk(KERN_INFO "testing the IO APIC.......................\n");
1601
1602 for (apic = 0; apic < nr_ioapics; apic++) {
1603
1604 spin_lock_irqsave(&ioapic_lock, flags);
1605 reg_00.raw = io_apic_read(apic, 0);
1606 reg_01.raw = io_apic_read(apic, 1);
1607 if (reg_01.bits.version >= 0x10)
1608 reg_02.raw = io_apic_read(apic, 2);
1609 if (reg_01.bits.version >= 0x20)
1610 reg_03.raw = io_apic_read(apic, 3);
1611 spin_unlock_irqrestore(&ioapic_lock, flags);
1612
1613 printk("\n");
1614 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1615 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1616 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1617 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1618 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1619
1620 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1621 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1622
1623 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1624 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1625
1626 /*
1627 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1628 * but the value of reg_02 is read as the previous read register
1629 * value, so ignore it if reg_02 == reg_01.
1630 */
1631 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1632 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1633 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1634 }
1635
1636 /*
1637 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1638 * or reg_03, but the value of reg_0[23] is read as the previous read
1639 * register value, so ignore it if reg_03 == reg_0[12].
1640 */
1641 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1642 reg_03.raw != reg_01.raw) {
1643 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1644 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1645 }
1646
1647 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1648
1649 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1650 " Stat Dmod Deli Vect: \n");
1651
1652 for (i = 0; i <= reg_01.bits.entries; i++) {
1653 struct IO_APIC_route_entry entry;
1654
1655 entry = ioapic_read_entry(apic, i);
1656
1657 printk(KERN_DEBUG " %02x %03X ",
1658 i,
1659 entry.dest
1660 );
1661
1662 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1663 entry.mask,
1664 entry.trigger,
1665 entry.irr,
1666 entry.polarity,
1667 entry.delivery_status,
1668 entry.dest_mode,
1669 entry.delivery_mode,
1670 entry.vector
1671 );
1672 }
1673 }
1674 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1675 for_each_irq_desc(irq, desc) {
1676 struct irq_pin_list *entry;
1677
1678 cfg = desc->chip_data;
1679 entry = cfg->irq_2_pin;
1680 if (!entry)
1681 continue;
1682 printk(KERN_DEBUG "IRQ%d ", irq);
1683 for_each_irq_pin(entry, cfg->irq_2_pin)
1684 printk("-> %d:%d", entry->apic, entry->pin);
1685 printk("\n");
1686 }
1687
1688 printk(KERN_INFO ".................................... done.\n");
1689
1690 return;
1691 }
1692
1693 __apicdebuginit(void) print_APIC_field(int base)
1694 {
1695 int i;
1696
1697 printk(KERN_DEBUG);
1698
1699 for (i = 0; i < 8; i++)
1700 printk(KERN_CONT "%08x", apic_read(base + i*0x10));
1701
1702 printk(KERN_CONT "\n");
1703 }
1704
1705 __apicdebuginit(void) print_local_APIC(void *dummy)
1706 {
1707 unsigned int i, v, ver, maxlvt;
1708 u64 icr;
1709
1710 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1711 smp_processor_id(), hard_smp_processor_id());
1712 v = apic_read(APIC_ID);
1713 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1714 v = apic_read(APIC_LVR);
1715 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1716 ver = GET_APIC_VERSION(v);
1717 maxlvt = lapic_get_maxlvt();
1718
1719 v = apic_read(APIC_TASKPRI);
1720 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1721
1722 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1723 if (!APIC_XAPIC(ver)) {
1724 v = apic_read(APIC_ARBPRI);
1725 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1726 v & APIC_ARBPRI_MASK);
1727 }
1728 v = apic_read(APIC_PROCPRI);
1729 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1730 }
1731
1732 /*
1733 * Remote read supported only in the 82489DX and local APIC for
1734 * Pentium processors.
1735 */
1736 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1737 v = apic_read(APIC_RRR);
1738 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1739 }
1740
1741 v = apic_read(APIC_LDR);
1742 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1743 if (!x2apic_enabled()) {
1744 v = apic_read(APIC_DFR);
1745 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1746 }
1747 v = apic_read(APIC_SPIV);
1748 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1749
1750 printk(KERN_DEBUG "... APIC ISR field:\n");
1751 print_APIC_field(APIC_ISR);
1752 printk(KERN_DEBUG "... APIC TMR field:\n");
1753 print_APIC_field(APIC_TMR);
1754 printk(KERN_DEBUG "... APIC IRR field:\n");
1755 print_APIC_field(APIC_IRR);
1756
1757 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1758 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1759 apic_write(APIC_ESR, 0);
1760
1761 v = apic_read(APIC_ESR);
1762 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1763 }
1764
1765 icr = apic_icr_read();
1766 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1767 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1768
1769 v = apic_read(APIC_LVTT);
1770 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1771
1772 if (maxlvt > 3) { /* PC is LVT#4. */
1773 v = apic_read(APIC_LVTPC);
1774 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1775 }
1776 v = apic_read(APIC_LVT0);
1777 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1778 v = apic_read(APIC_LVT1);
1779 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1780
1781 if (maxlvt > 2) { /* ERR is LVT#3. */
1782 v = apic_read(APIC_LVTERR);
1783 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1784 }
1785
1786 v = apic_read(APIC_TMICT);
1787 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1788 v = apic_read(APIC_TMCCT);
1789 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1790 v = apic_read(APIC_TDCR);
1791 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1792
1793 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1794 v = apic_read(APIC_EFEAT);
1795 maxlvt = (v >> 16) & 0xff;
1796 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1797 v = apic_read(APIC_ECTRL);
1798 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1799 for (i = 0; i < maxlvt; i++) {
1800 v = apic_read(APIC_EILVTn(i));
1801 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1802 }
1803 }
1804 printk("\n");
1805 }
1806
1807 __apicdebuginit(void) print_local_APICs(int maxcpu)
1808 {
1809 int cpu;
1810
1811 if (!maxcpu)
1812 return;
1813
1814 preempt_disable();
1815 for_each_online_cpu(cpu) {
1816 if (cpu >= maxcpu)
1817 break;
1818 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1819 }
1820 preempt_enable();
1821 }
1822
1823 __apicdebuginit(void) print_PIC(void)
1824 {
1825 unsigned int v;
1826 unsigned long flags;
1827
1828 if (!nr_legacy_irqs)
1829 return;
1830
1831 printk(KERN_DEBUG "\nprinting PIC contents\n");
1832
1833 spin_lock_irqsave(&i8259A_lock, flags);
1834
1835 v = inb(0xa1) << 8 | inb(0x21);
1836 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1837
1838 v = inb(0xa0) << 8 | inb(0x20);
1839 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1840
1841 outb(0x0b,0xa0);
1842 outb(0x0b,0x20);
1843 v = inb(0xa0) << 8 | inb(0x20);
1844 outb(0x0a,0xa0);
1845 outb(0x0a,0x20);
1846
1847 spin_unlock_irqrestore(&i8259A_lock, flags);
1848
1849 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1850
1851 v = inb(0x4d1) << 8 | inb(0x4d0);
1852 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1853 }
1854
1855 static int __initdata show_lapic = 1;
1856 static __init int setup_show_lapic(char *arg)
1857 {
1858 int num = -1;
1859
1860 if (strcmp(arg, "all") == 0) {
1861 show_lapic = CONFIG_NR_CPUS;
1862 } else {
1863 get_option(&arg, &num);
1864 if (num >= 0)
1865 show_lapic = num;
1866 }
1867
1868 return 1;
1869 }
1870 __setup("show_lapic=", setup_show_lapic);
1871
1872 __apicdebuginit(int) print_ICs(void)
1873 {
1874 if (apic_verbosity == APIC_QUIET)
1875 return 0;
1876
1877 print_PIC();
1878
1879 /* don't print out if apic is not there */
1880 if (!cpu_has_apic && !apic_from_smp_config())
1881 return 0;
1882
1883 print_local_APICs(show_lapic);
1884 print_IO_APIC();
1885
1886 return 0;
1887 }
1888
1889 fs_initcall(print_ICs);
1890
1891
1892 /* Where if anywhere is the i8259 connect in external int mode */
1893 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1894
1895 void __init enable_IO_APIC(void)
1896 {
1897 union IO_APIC_reg_01 reg_01;
1898 int i8259_apic, i8259_pin;
1899 int apic;
1900 unsigned long flags;
1901
1902 /*
1903 * The number of IO-APIC IRQ registers (== #pins):
1904 */
1905 for (apic = 0; apic < nr_ioapics; apic++) {
1906 spin_lock_irqsave(&ioapic_lock, flags);
1907 reg_01.raw = io_apic_read(apic, 1);
1908 spin_unlock_irqrestore(&ioapic_lock, flags);
1909 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1910 }
1911
1912 if (!nr_legacy_irqs)
1913 return;
1914
1915 for(apic = 0; apic < nr_ioapics; apic++) {
1916 int pin;
1917 /* See if any of the pins is in ExtINT mode */
1918 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1919 struct IO_APIC_route_entry entry;
1920 entry = ioapic_read_entry(apic, pin);
1921
1922 /* If the interrupt line is enabled and in ExtInt mode
1923 * I have found the pin where the i8259 is connected.
1924 */
1925 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1926 ioapic_i8259.apic = apic;
1927 ioapic_i8259.pin = pin;
1928 goto found_i8259;
1929 }
1930 }
1931 }
1932 found_i8259:
1933 /* Look to see what if the MP table has reported the ExtINT */
1934 /* If we could not find the appropriate pin by looking at the ioapic
1935 * the i8259 probably is not connected the ioapic but give the
1936 * mptable a chance anyway.
1937 */
1938 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1939 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1940 /* Trust the MP table if nothing is setup in the hardware */
1941 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1942 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1943 ioapic_i8259.pin = i8259_pin;
1944 ioapic_i8259.apic = i8259_apic;
1945 }
1946 /* Complain if the MP table and the hardware disagree */
1947 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1948 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1949 {
1950 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1951 }
1952
1953 /*
1954 * Do not trust the IO-APIC being empty at bootup
1955 */
1956 clear_IO_APIC();
1957 }
1958
1959 /*
1960 * Not an __init, needed by the reboot code
1961 */
1962 void disable_IO_APIC(void)
1963 {
1964 /*
1965 * Clear the IO-APIC before rebooting:
1966 */
1967 clear_IO_APIC();
1968
1969 if (!nr_legacy_irqs)
1970 return;
1971
1972 /*
1973 * If the i8259 is routed through an IOAPIC
1974 * Put that IOAPIC in virtual wire mode
1975 * so legacy interrupts can be delivered.
1976 *
1977 * With interrupt-remapping, for now we will use virtual wire A mode,
1978 * as virtual wire B is little complex (need to configure both
1979 * IOAPIC RTE aswell as interrupt-remapping table entry).
1980 * As this gets called during crash dump, keep this simple for now.
1981 */
1982 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
1983 struct IO_APIC_route_entry entry;
1984
1985 memset(&entry, 0, sizeof(entry));
1986 entry.mask = 0; /* Enabled */
1987 entry.trigger = 0; /* Edge */
1988 entry.irr = 0;
1989 entry.polarity = 0; /* High */
1990 entry.delivery_status = 0;
1991 entry.dest_mode = 0; /* Physical */
1992 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1993 entry.vector = 0;
1994 entry.dest = read_apic_id();
1995
1996 /*
1997 * Add it to the IO-APIC irq-routing table:
1998 */
1999 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
2000 }
2001
2002 /*
2003 * Use virtual wire A mode when interrupt remapping is enabled.
2004 */
2005 if (cpu_has_apic || apic_from_smp_config())
2006 disconnect_bsp_APIC(!intr_remapping_enabled &&
2007 ioapic_i8259.pin != -1);
2008 }
2009
2010 #ifdef CONFIG_X86_32
2011 /*
2012 * function to set the IO-APIC physical IDs based on the
2013 * values stored in the MPC table.
2014 *
2015 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2016 */
2017
2018 void __init setup_ioapic_ids_from_mpc(void)
2019 {
2020 union IO_APIC_reg_00 reg_00;
2021 physid_mask_t phys_id_present_map;
2022 int apic_id;
2023 int i;
2024 unsigned char old_id;
2025 unsigned long flags;
2026
2027 if (acpi_ioapic)
2028 return;
2029 /*
2030 * Don't check I/O APIC IDs for xAPIC systems. They have
2031 * no meaning without the serial APIC bus.
2032 */
2033 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2034 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2035 return;
2036 /*
2037 * This is broken; anything with a real cpu count has to
2038 * circumvent this idiocy regardless.
2039 */
2040 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
2041
2042 /*
2043 * Set the IOAPIC ID to the value stored in the MPC table.
2044 */
2045 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2046
2047 /* Read the register 0 value */
2048 spin_lock_irqsave(&ioapic_lock, flags);
2049 reg_00.raw = io_apic_read(apic_id, 0);
2050 spin_unlock_irqrestore(&ioapic_lock, flags);
2051
2052 old_id = mp_ioapics[apic_id].apicid;
2053
2054 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
2055 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2056 apic_id, mp_ioapics[apic_id].apicid);
2057 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2058 reg_00.bits.ID);
2059 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
2060 }
2061
2062 /*
2063 * Sanity check, is the ID really free? Every APIC in a
2064 * system must have a unique ID or we get lots of nice
2065 * 'stuck on smp_invalidate_needed IPI wait' messages.
2066 */
2067 if (apic->check_apicid_used(&phys_id_present_map,
2068 mp_ioapics[apic_id].apicid)) {
2069 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2070 apic_id, mp_ioapics[apic_id].apicid);
2071 for (i = 0; i < get_physical_broadcast(); i++)
2072 if (!physid_isset(i, phys_id_present_map))
2073 break;
2074 if (i >= get_physical_broadcast())
2075 panic("Max APIC ID exceeded!\n");
2076 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2077 i);
2078 physid_set(i, phys_id_present_map);
2079 mp_ioapics[apic_id].apicid = i;
2080 } else {
2081 physid_mask_t tmp;
2082 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
2083 apic_printk(APIC_VERBOSE, "Setting %d in the "
2084 "phys_id_present_map\n",
2085 mp_ioapics[apic_id].apicid);
2086 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2087 }
2088
2089
2090 /*
2091 * We need to adjust the IRQ routing table
2092 * if the ID changed.
2093 */
2094 if (old_id != mp_ioapics[apic_id].apicid)
2095 for (i = 0; i < mp_irq_entries; i++)
2096 if (mp_irqs[i].dstapic == old_id)
2097 mp_irqs[i].dstapic
2098 = mp_ioapics[apic_id].apicid;
2099
2100 /*
2101 * Read the right value from the MPC table and
2102 * write it into the ID register.
2103 */
2104 apic_printk(APIC_VERBOSE, KERN_INFO
2105 "...changing IO-APIC physical APIC ID to %d ...",
2106 mp_ioapics[apic_id].apicid);
2107
2108 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2109 spin_lock_irqsave(&ioapic_lock, flags);
2110 io_apic_write(apic_id, 0, reg_00.raw);
2111 spin_unlock_irqrestore(&ioapic_lock, flags);
2112
2113 /*
2114 * Sanity check
2115 */
2116 spin_lock_irqsave(&ioapic_lock, flags);
2117 reg_00.raw = io_apic_read(apic_id, 0);
2118 spin_unlock_irqrestore(&ioapic_lock, flags);
2119 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2120 printk("could not set ID!\n");
2121 else
2122 apic_printk(APIC_VERBOSE, " ok.\n");
2123 }
2124 }
2125 #endif
2126
2127 int no_timer_check __initdata;
2128
2129 static int __init notimercheck(char *s)
2130 {
2131 no_timer_check = 1;
2132 return 1;
2133 }
2134 __setup("no_timer_check", notimercheck);
2135
2136 /*
2137 * There is a nasty bug in some older SMP boards, their mptable lies
2138 * about the timer IRQ. We do the following to work around the situation:
2139 *
2140 * - timer IRQ defaults to IO-APIC IRQ
2141 * - if this function detects that timer IRQs are defunct, then we fall
2142 * back to ISA timer IRQs
2143 */
2144 static int __init timer_irq_works(void)
2145 {
2146 unsigned long t1 = jiffies;
2147 unsigned long flags;
2148
2149 if (no_timer_check)
2150 return 1;
2151
2152 local_save_flags(flags);
2153 local_irq_enable();
2154 /* Let ten ticks pass... */
2155 mdelay((10 * 1000) / HZ);
2156 local_irq_restore(flags);
2157
2158 /*
2159 * Expect a few ticks at least, to be sure some possible
2160 * glue logic does not lock up after one or two first
2161 * ticks in a non-ExtINT mode. Also the local APIC
2162 * might have cached one ExtINT interrupt. Finally, at
2163 * least one tick may be lost due to delays.
2164 */
2165
2166 /* jiffies wrap? */
2167 if (time_after(jiffies, t1 + 4))
2168 return 1;
2169 return 0;
2170 }
2171
2172 /*
2173 * In the SMP+IOAPIC case it might happen that there are an unspecified
2174 * number of pending IRQ events unhandled. These cases are very rare,
2175 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2176 * better to do it this way as thus we do not have to be aware of
2177 * 'pending' interrupts in the IRQ path, except at this point.
2178 */
2179 /*
2180 * Edge triggered needs to resend any interrupt
2181 * that was delayed but this is now handled in the device
2182 * independent code.
2183 */
2184
2185 /*
2186 * Starting up a edge-triggered IO-APIC interrupt is
2187 * nasty - we need to make sure that we get the edge.
2188 * If it is already asserted for some reason, we need
2189 * return 1 to indicate that is was pending.
2190 *
2191 * This is not complete - we should be able to fake
2192 * an edge even if it isn't on the 8259A...
2193 */
2194
2195 static unsigned int startup_ioapic_irq(unsigned int irq)
2196 {
2197 int was_pending = 0;
2198 unsigned long flags;
2199 struct irq_cfg *cfg;
2200
2201 spin_lock_irqsave(&ioapic_lock, flags);
2202 if (irq < nr_legacy_irqs) {
2203 disable_8259A_irq(irq);
2204 if (i8259A_irq_pending(irq))
2205 was_pending = 1;
2206 }
2207 cfg = irq_cfg(irq);
2208 __unmask_IO_APIC_irq(cfg);
2209 spin_unlock_irqrestore(&ioapic_lock, flags);
2210
2211 return was_pending;
2212 }
2213
2214 static int ioapic_retrigger_irq(unsigned int irq)
2215 {
2216
2217 struct irq_cfg *cfg = irq_cfg(irq);
2218 unsigned long flags;
2219
2220 spin_lock_irqsave(&vector_lock, flags);
2221 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2222 spin_unlock_irqrestore(&vector_lock, flags);
2223
2224 return 1;
2225 }
2226
2227 /*
2228 * Level and edge triggered IO-APIC interrupts need different handling,
2229 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2230 * handled with the level-triggered descriptor, but that one has slightly
2231 * more overhead. Level-triggered interrupts cannot be handled with the
2232 * edge-triggered handler, without risking IRQ storms and other ugly
2233 * races.
2234 */
2235
2236 #ifdef CONFIG_SMP
2237 void send_cleanup_vector(struct irq_cfg *cfg)
2238 {
2239 cpumask_var_t cleanup_mask;
2240
2241 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2242 unsigned int i;
2243 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2244 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2245 } else {
2246 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2247 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2248 free_cpumask_var(cleanup_mask);
2249 }
2250 cfg->move_in_progress = 0;
2251 }
2252
2253 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2254 {
2255 int apic, pin;
2256 struct irq_pin_list *entry;
2257 u8 vector = cfg->vector;
2258
2259 for_each_irq_pin(entry, cfg->irq_2_pin) {
2260 unsigned int reg;
2261
2262 apic = entry->apic;
2263 pin = entry->pin;
2264 /*
2265 * With interrupt-remapping, destination information comes
2266 * from interrupt-remapping table entry.
2267 */
2268 if (!irq_remapped(irq))
2269 io_apic_write(apic, 0x11 + pin*2, dest);
2270 reg = io_apic_read(apic, 0x10 + pin*2);
2271 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2272 reg |= vector;
2273 io_apic_modify(apic, 0x10 + pin*2, reg);
2274 }
2275 }
2276
2277 /*
2278 * Either sets desc->affinity to a valid value, and returns
2279 * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
2280 * leaves desc->affinity untouched.
2281 */
2282 unsigned int
2283 set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
2284 {
2285 struct irq_cfg *cfg;
2286 unsigned int irq;
2287
2288 if (!cpumask_intersects(mask, cpu_online_mask))
2289 return BAD_APICID;
2290
2291 irq = desc->irq;
2292 cfg = desc->chip_data;
2293 if (assign_irq_vector(irq, cfg, mask))
2294 return BAD_APICID;
2295
2296 cpumask_copy(desc->affinity, mask);
2297
2298 return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
2299 }
2300
2301 static int
2302 set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2303 {
2304 struct irq_cfg *cfg;
2305 unsigned long flags;
2306 unsigned int dest;
2307 unsigned int irq;
2308 int ret = -1;
2309
2310 irq = desc->irq;
2311 cfg = desc->chip_data;
2312
2313 spin_lock_irqsave(&ioapic_lock, flags);
2314 dest = set_desc_affinity(desc, mask);
2315 if (dest != BAD_APICID) {
2316 /* Only the high 8 bits are valid. */
2317 dest = SET_APIC_LOGICAL_ID(dest);
2318 __target_IO_APIC_irq(irq, dest, cfg);
2319 ret = 0;
2320 }
2321 spin_unlock_irqrestore(&ioapic_lock, flags);
2322
2323 return ret;
2324 }
2325
2326 static int
2327 set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
2328 {
2329 struct irq_desc *desc;
2330
2331 desc = irq_to_desc(irq);
2332
2333 return set_ioapic_affinity_irq_desc(desc, mask);
2334 }
2335
2336 #ifdef CONFIG_INTR_REMAP
2337
2338 /*
2339 * Migrate the IO-APIC irq in the presence of intr-remapping.
2340 *
2341 * For both level and edge triggered, irq migration is a simple atomic
2342 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
2343 *
2344 * For level triggered, we eliminate the io-apic RTE modification (with the
2345 * updated vector information), by using a virtual vector (io-apic pin number).
2346 * Real vector that is used for interrupting cpu will be coming from
2347 * the interrupt-remapping table entry.
2348 */
2349 static int
2350 migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2351 {
2352 struct irq_cfg *cfg;
2353 struct irte irte;
2354 unsigned int dest;
2355 unsigned int irq;
2356 int ret = -1;
2357
2358 if (!cpumask_intersects(mask, cpu_online_mask))
2359 return ret;
2360
2361 irq = desc->irq;
2362 if (get_irte(irq, &irte))
2363 return ret;
2364
2365 cfg = desc->chip_data;
2366 if (assign_irq_vector(irq, cfg, mask))
2367 return ret;
2368
2369 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2370
2371 irte.vector = cfg->vector;
2372 irte.dest_id = IRTE_DEST(dest);
2373
2374 /*
2375 * Modified the IRTE and flushes the Interrupt entry cache.
2376 */
2377 modify_irte(irq, &irte);
2378
2379 if (cfg->move_in_progress)
2380 send_cleanup_vector(cfg);
2381
2382 cpumask_copy(desc->affinity, mask);
2383
2384 return 0;
2385 }
2386
2387 /*
2388 * Migrates the IRQ destination in the process context.
2389 */
2390 static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2391 const struct cpumask *mask)
2392 {
2393 return migrate_ioapic_irq_desc(desc, mask);
2394 }
2395 static int set_ir_ioapic_affinity_irq(unsigned int irq,
2396 const struct cpumask *mask)
2397 {
2398 struct irq_desc *desc = irq_to_desc(irq);
2399
2400 return set_ir_ioapic_affinity_irq_desc(desc, mask);
2401 }
2402 #else
2403 static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2404 const struct cpumask *mask)
2405 {
2406 return 0;
2407 }
2408 #endif
2409
2410 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2411 {
2412 unsigned vector, me;
2413
2414 ack_APIC_irq();
2415 exit_idle();
2416 irq_enter();
2417
2418 me = smp_processor_id();
2419 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2420 unsigned int irq;
2421 unsigned int irr;
2422 struct irq_desc *desc;
2423 struct irq_cfg *cfg;
2424 irq = __get_cpu_var(vector_irq)[vector];
2425
2426 if (irq == -1)
2427 continue;
2428
2429 desc = irq_to_desc(irq);
2430 if (!desc)
2431 continue;
2432
2433 cfg = irq_cfg(irq);
2434 raw_spin_lock(&desc->lock);
2435
2436 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2437 goto unlock;
2438
2439 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2440 /*
2441 * Check if the vector that needs to be cleanedup is
2442 * registered at the cpu's IRR. If so, then this is not
2443 * the best time to clean it up. Lets clean it up in the
2444 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2445 * to myself.
2446 */
2447 if (irr & (1 << (vector % 32))) {
2448 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2449 goto unlock;
2450 }
2451 __get_cpu_var(vector_irq)[vector] = -1;
2452 unlock:
2453 raw_spin_unlock(&desc->lock);
2454 }
2455
2456 irq_exit();
2457 }
2458
2459 static void __irq_complete_move(struct irq_desc **descp, unsigned vector)
2460 {
2461 struct irq_desc *desc = *descp;
2462 struct irq_cfg *cfg = desc->chip_data;
2463 unsigned me;
2464
2465 if (likely(!cfg->move_in_progress))
2466 return;
2467
2468 me = smp_processor_id();
2469
2470 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2471 send_cleanup_vector(cfg);
2472 }
2473
2474 static void irq_complete_move(struct irq_desc **descp)
2475 {
2476 __irq_complete_move(descp, ~get_irq_regs()->orig_ax);
2477 }
2478
2479 void irq_force_complete_move(int irq)
2480 {
2481 struct irq_desc *desc = irq_to_desc(irq);
2482 struct irq_cfg *cfg = desc->chip_data;
2483
2484 __irq_complete_move(&desc, cfg->vector);
2485 }
2486 #else
2487 static inline void irq_complete_move(struct irq_desc **descp) {}
2488 #endif
2489
2490 static void ack_apic_edge(unsigned int irq)
2491 {
2492 struct irq_desc *desc = irq_to_desc(irq);
2493
2494 irq_complete_move(&desc);
2495 move_native_irq(irq);
2496 ack_APIC_irq();
2497 }
2498
2499 atomic_t irq_mis_count;
2500
2501 /*
2502 * IO-APIC versions below 0x20 don't support EOI register.
2503 * For the record, here is the information about various versions:
2504 * 0Xh 82489DX
2505 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
2506 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
2507 * 30h-FFh Reserved
2508 *
2509 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
2510 * version as 0x2. This is an error with documentation and these ICH chips
2511 * use io-apic's of version 0x20.
2512 *
2513 * For IO-APIC's with EOI register, we use that to do an explicit EOI.
2514 * Otherwise, we simulate the EOI message manually by changing the trigger
2515 * mode to edge and then back to level, with RTE being masked during this.
2516 */
2517 static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2518 {
2519 struct irq_pin_list *entry;
2520
2521 for_each_irq_pin(entry, cfg->irq_2_pin) {
2522 if (mp_ioapics[entry->apic].apicver >= 0x20) {
2523 /*
2524 * Intr-remapping uses pin number as the virtual vector
2525 * in the RTE. Actual vector is programmed in
2526 * intr-remapping table entry. Hence for the io-apic
2527 * EOI we use the pin number.
2528 */
2529 if (irq_remapped(irq))
2530 io_apic_eoi(entry->apic, entry->pin);
2531 else
2532 io_apic_eoi(entry->apic, cfg->vector);
2533 } else {
2534 __mask_and_edge_IO_APIC_irq(entry);
2535 __unmask_and_level_IO_APIC_irq(entry);
2536 }
2537 }
2538 }
2539
2540 static void eoi_ioapic_irq(struct irq_desc *desc)
2541 {
2542 struct irq_cfg *cfg;
2543 unsigned long flags;
2544 unsigned int irq;
2545
2546 irq = desc->irq;
2547 cfg = desc->chip_data;
2548
2549 spin_lock_irqsave(&ioapic_lock, flags);
2550 __eoi_ioapic_irq(irq, cfg);
2551 spin_unlock_irqrestore(&ioapic_lock, flags);
2552 }
2553
2554 static void ack_apic_level(unsigned int irq)
2555 {
2556 struct irq_desc *desc = irq_to_desc(irq);
2557 unsigned long v;
2558 int i;
2559 struct irq_cfg *cfg;
2560 int do_unmask_irq = 0;
2561
2562 irq_complete_move(&desc);
2563 #ifdef CONFIG_GENERIC_PENDING_IRQ
2564 /* If we are moving the irq we need to mask it */
2565 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2566 do_unmask_irq = 1;
2567 mask_IO_APIC_irq_desc(desc);
2568 }
2569 #endif
2570
2571 /*
2572 * It appears there is an erratum which affects at least version 0x11
2573 * of I/O APIC (that's the 82093AA and cores integrated into various
2574 * chipsets). Under certain conditions a level-triggered interrupt is
2575 * erroneously delivered as edge-triggered one but the respective IRR
2576 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2577 * message but it will never arrive and further interrupts are blocked
2578 * from the source. The exact reason is so far unknown, but the
2579 * phenomenon was observed when two consecutive interrupt requests
2580 * from a given source get delivered to the same CPU and the source is
2581 * temporarily disabled in between.
2582 *
2583 * A workaround is to simulate an EOI message manually. We achieve it
2584 * by setting the trigger mode to edge and then to level when the edge
2585 * trigger mode gets detected in the TMR of a local APIC for a
2586 * level-triggered interrupt. We mask the source for the time of the
2587 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2588 * The idea is from Manfred Spraul. --macro
2589 *
2590 * Also in the case when cpu goes offline, fixup_irqs() will forward
2591 * any unhandled interrupt on the offlined cpu to the new cpu
2592 * destination that is handling the corresponding interrupt. This
2593 * interrupt forwarding is done via IPI's. Hence, in this case also
2594 * level-triggered io-apic interrupt will be seen as an edge
2595 * interrupt in the IRR. And we can't rely on the cpu's EOI
2596 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
2597 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
2598 * supporting EOI register, we do an explicit EOI to clear the
2599 * remote IRR and on IO-APIC's which don't have an EOI register,
2600 * we use the above logic (mask+edge followed by unmask+level) from
2601 * Manfred Spraul to clear the remote IRR.
2602 */
2603 cfg = desc->chip_data;
2604 i = cfg->vector;
2605 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2606
2607 /*
2608 * We must acknowledge the irq before we move it or the acknowledge will
2609 * not propagate properly.
2610 */
2611 ack_APIC_irq();
2612
2613 /*
2614 * Tail end of clearing remote IRR bit (either by delivering the EOI
2615 * message via io-apic EOI register write or simulating it using
2616 * mask+edge followed by unnask+level logic) manually when the
2617 * level triggered interrupt is seen as the edge triggered interrupt
2618 * at the cpu.
2619 */
2620 if (!(v & (1 << (i & 0x1f)))) {
2621 atomic_inc(&irq_mis_count);
2622
2623 eoi_ioapic_irq(desc);
2624 }
2625
2626 /* Now we can move and renable the irq */
2627 if (unlikely(do_unmask_irq)) {
2628 /* Only migrate the irq if the ack has been received.
2629 *
2630 * On rare occasions the broadcast level triggered ack gets
2631 * delayed going to ioapics, and if we reprogram the
2632 * vector while Remote IRR is still set the irq will never
2633 * fire again.
2634 *
2635 * To prevent this scenario we read the Remote IRR bit
2636 * of the ioapic. This has two effects.
2637 * - On any sane system the read of the ioapic will
2638 * flush writes (and acks) going to the ioapic from
2639 * this cpu.
2640 * - We get to see if the ACK has actually been delivered.
2641 *
2642 * Based on failed experiments of reprogramming the
2643 * ioapic entry from outside of irq context starting
2644 * with masking the ioapic entry and then polling until
2645 * Remote IRR was clear before reprogramming the
2646 * ioapic I don't trust the Remote IRR bit to be
2647 * completey accurate.
2648 *
2649 * However there appears to be no other way to plug
2650 * this race, so if the Remote IRR bit is not
2651 * accurate and is causing problems then it is a hardware bug
2652 * and you can go talk to the chipset vendor about it.
2653 */
2654 cfg = desc->chip_data;
2655 if (!io_apic_level_ack_pending(cfg))
2656 move_masked_irq(irq);
2657 unmask_IO_APIC_irq_desc(desc);
2658 }
2659 }
2660
2661 #ifdef CONFIG_INTR_REMAP
2662 static void ir_ack_apic_edge(unsigned int irq)
2663 {
2664 ack_APIC_irq();
2665 }
2666
2667 static void ir_ack_apic_level(unsigned int irq)
2668 {
2669 struct irq_desc *desc = irq_to_desc(irq);
2670
2671 ack_APIC_irq();
2672 eoi_ioapic_irq(desc);
2673 }
2674 #endif /* CONFIG_INTR_REMAP */
2675
2676 static struct irq_chip ioapic_chip __read_mostly = {
2677 .name = "IO-APIC",
2678 .startup = startup_ioapic_irq,
2679 .mask = mask_IO_APIC_irq,
2680 .unmask = unmask_IO_APIC_irq,
2681 .ack = ack_apic_edge,
2682 .eoi = ack_apic_level,
2683 #ifdef CONFIG_SMP
2684 .set_affinity = set_ioapic_affinity_irq,
2685 #endif
2686 .retrigger = ioapic_retrigger_irq,
2687 };
2688
2689 static struct irq_chip ir_ioapic_chip __read_mostly = {
2690 .name = "IR-IO-APIC",
2691 .startup = startup_ioapic_irq,
2692 .mask = mask_IO_APIC_irq,
2693 .unmask = unmask_IO_APIC_irq,
2694 #ifdef CONFIG_INTR_REMAP
2695 .ack = ir_ack_apic_edge,
2696 .eoi = ir_ack_apic_level,
2697 #ifdef CONFIG_SMP
2698 .set_affinity = set_ir_ioapic_affinity_irq,
2699 #endif
2700 #endif
2701 .retrigger = ioapic_retrigger_irq,
2702 };
2703
2704 static inline void init_IO_APIC_traps(void)
2705 {
2706 int irq;
2707 struct irq_desc *desc;
2708 struct irq_cfg *cfg;
2709
2710 /*
2711 * NOTE! The local APIC isn't very good at handling
2712 * multiple interrupts at the same interrupt level.
2713 * As the interrupt level is determined by taking the
2714 * vector number and shifting that right by 4, we
2715 * want to spread these out a bit so that they don't
2716 * all fall in the same interrupt level.
2717 *
2718 * Also, we've got to be careful not to trash gate
2719 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2720 */
2721 for_each_irq_desc(irq, desc) {
2722 cfg = desc->chip_data;
2723 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2724 /*
2725 * Hmm.. We don't have an entry for this,
2726 * so default to an old-fashioned 8259
2727 * interrupt if we can..
2728 */
2729 if (irq < nr_legacy_irqs)
2730 make_8259A_irq(irq);
2731 else
2732 /* Strange. Oh, well.. */
2733 desc->chip = &no_irq_chip;
2734 }
2735 }
2736 }
2737
2738 /*
2739 * The local APIC irq-chip implementation:
2740 */
2741
2742 static void mask_lapic_irq(unsigned int irq)
2743 {
2744 unsigned long v;
2745
2746 v = apic_read(APIC_LVT0);
2747 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2748 }
2749
2750 static void unmask_lapic_irq(unsigned int irq)
2751 {
2752 unsigned long v;
2753
2754 v = apic_read(APIC_LVT0);
2755 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2756 }
2757
2758 static void ack_lapic_irq(unsigned int irq)
2759 {
2760 ack_APIC_irq();
2761 }
2762
2763 static struct irq_chip lapic_chip __read_mostly = {
2764 .name = "local-APIC",
2765 .mask = mask_lapic_irq,
2766 .unmask = unmask_lapic_irq,
2767 .ack = ack_lapic_irq,
2768 };
2769
2770 static void lapic_register_intr(int irq, struct irq_desc *desc)
2771 {
2772 desc->status &= ~IRQ_LEVEL;
2773 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2774 "edge");
2775 }
2776
2777 static void __init setup_nmi(void)
2778 {
2779 /*
2780 * Dirty trick to enable the NMI watchdog ...
2781 * We put the 8259A master into AEOI mode and
2782 * unmask on all local APICs LVT0 as NMI.
2783 *
2784 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2785 * is from Maciej W. Rozycki - so we do not have to EOI from
2786 * the NMI handler or the timer interrupt.
2787 */
2788 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2789
2790 enable_NMI_through_LVT0();
2791
2792 apic_printk(APIC_VERBOSE, " done.\n");
2793 }
2794
2795 /*
2796 * This looks a bit hackish but it's about the only one way of sending
2797 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2798 * not support the ExtINT mode, unfortunately. We need to send these
2799 * cycles as some i82489DX-based boards have glue logic that keeps the
2800 * 8259A interrupt line asserted until INTA. --macro
2801 */
2802 static inline void __init unlock_ExtINT_logic(void)
2803 {
2804 int apic, pin, i;
2805 struct IO_APIC_route_entry entry0, entry1;
2806 unsigned char save_control, save_freq_select;
2807
2808 pin = find_isa_irq_pin(8, mp_INT);
2809 if (pin == -1) {
2810 WARN_ON_ONCE(1);
2811 return;
2812 }
2813 apic = find_isa_irq_apic(8, mp_INT);
2814 if (apic == -1) {
2815 WARN_ON_ONCE(1);
2816 return;
2817 }
2818
2819 entry0 = ioapic_read_entry(apic, pin);
2820 clear_IO_APIC_pin(apic, pin);
2821
2822 memset(&entry1, 0, sizeof(entry1));
2823
2824 entry1.dest_mode = 0; /* physical delivery */
2825 entry1.mask = 0; /* unmask IRQ now */
2826 entry1.dest = hard_smp_processor_id();
2827 entry1.delivery_mode = dest_ExtINT;
2828 entry1.polarity = entry0.polarity;
2829 entry1.trigger = 0;
2830 entry1.vector = 0;
2831
2832 ioapic_write_entry(apic, pin, entry1);
2833
2834 save_control = CMOS_READ(RTC_CONTROL);
2835 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2836 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2837 RTC_FREQ_SELECT);
2838 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2839
2840 i = 100;
2841 while (i-- > 0) {
2842 mdelay(10);
2843 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2844 i -= 10;
2845 }
2846
2847 CMOS_WRITE(save_control, RTC_CONTROL);
2848 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2849 clear_IO_APIC_pin(apic, pin);
2850
2851 ioapic_write_entry(apic, pin, entry0);
2852 }
2853
2854 static int disable_timer_pin_1 __initdata;
2855 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2856 static int __init disable_timer_pin_setup(char *arg)
2857 {
2858 disable_timer_pin_1 = 1;
2859 return 0;
2860 }
2861 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2862
2863 int timer_through_8259 __initdata;
2864
2865 /*
2866 * This code may look a bit paranoid, but it's supposed to cooperate with
2867 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2868 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2869 * fanatically on his truly buggy board.
2870 *
2871 * FIXME: really need to revamp this for all platforms.
2872 */
2873 static inline void __init check_timer(void)
2874 {
2875 struct irq_desc *desc = irq_to_desc(0);
2876 struct irq_cfg *cfg = desc->chip_data;
2877 int node = cpu_to_node(boot_cpu_id);
2878 int apic1, pin1, apic2, pin2;
2879 unsigned long flags;
2880 int no_pin1 = 0;
2881
2882 local_irq_save(flags);
2883
2884 /*
2885 * get/set the timer IRQ vector:
2886 */
2887 disable_8259A_irq(0);
2888 assign_irq_vector(0, cfg, apic->target_cpus());
2889
2890 /*
2891 * As IRQ0 is to be enabled in the 8259A, the virtual
2892 * wire has to be disabled in the local APIC. Also
2893 * timer interrupts need to be acknowledged manually in
2894 * the 8259A for the i82489DX when using the NMI
2895 * watchdog as that APIC treats NMIs as level-triggered.
2896 * The AEOI mode will finish them in the 8259A
2897 * automatically.
2898 */
2899 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2900 init_8259A(1);
2901 #ifdef CONFIG_X86_32
2902 {
2903 unsigned int ver;
2904
2905 ver = apic_read(APIC_LVR);
2906 ver = GET_APIC_VERSION(ver);
2907 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2908 }
2909 #endif
2910
2911 pin1 = find_isa_irq_pin(0, mp_INT);
2912 apic1 = find_isa_irq_apic(0, mp_INT);
2913 pin2 = ioapic_i8259.pin;
2914 apic2 = ioapic_i8259.apic;
2915
2916 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2917 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2918 cfg->vector, apic1, pin1, apic2, pin2);
2919
2920 /*
2921 * Some BIOS writers are clueless and report the ExtINTA
2922 * I/O APIC input from the cascaded 8259A as the timer
2923 * interrupt input. So just in case, if only one pin
2924 * was found above, try it both directly and through the
2925 * 8259A.
2926 */
2927 if (pin1 == -1) {
2928 if (intr_remapping_enabled)
2929 panic("BIOS bug: timer not connected to IO-APIC");
2930 pin1 = pin2;
2931 apic1 = apic2;
2932 no_pin1 = 1;
2933 } else if (pin2 == -1) {
2934 pin2 = pin1;
2935 apic2 = apic1;
2936 }
2937
2938 if (pin1 != -1) {
2939 /*
2940 * Ok, does IRQ0 through the IOAPIC work?
2941 */
2942 if (no_pin1) {
2943 add_pin_to_irq_node(cfg, node, apic1, pin1);
2944 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2945 } else {
2946 /* for edge trigger, setup_IO_APIC_irq already
2947 * leave it unmasked.
2948 * so only need to unmask if it is level-trigger
2949 * do we really have level trigger timer?
2950 */
2951 int idx;
2952 idx = find_irq_entry(apic1, pin1, mp_INT);
2953 if (idx != -1 && irq_trigger(idx))
2954 unmask_IO_APIC_irq_desc(desc);
2955 }
2956 if (timer_irq_works()) {
2957 if (nmi_watchdog == NMI_IO_APIC) {
2958 setup_nmi();
2959 enable_8259A_irq(0);
2960 }
2961 if (disable_timer_pin_1 > 0)
2962 clear_IO_APIC_pin(0, pin1);
2963 goto out;
2964 }
2965 if (intr_remapping_enabled)
2966 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2967 local_irq_disable();
2968 clear_IO_APIC_pin(apic1, pin1);
2969 if (!no_pin1)
2970 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2971 "8254 timer not connected to IO-APIC\n");
2972
2973 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2974 "(IRQ0) through the 8259A ...\n");
2975 apic_printk(APIC_QUIET, KERN_INFO
2976 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2977 /*
2978 * legacy devices should be connected to IO APIC #0
2979 */
2980 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2981 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2982 enable_8259A_irq(0);
2983 if (timer_irq_works()) {
2984 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2985 timer_through_8259 = 1;
2986 if (nmi_watchdog == NMI_IO_APIC) {
2987 disable_8259A_irq(0);
2988 setup_nmi();
2989 enable_8259A_irq(0);
2990 }
2991 goto out;
2992 }
2993 /*
2994 * Cleanup, just in case ...
2995 */
2996 local_irq_disable();
2997 disable_8259A_irq(0);
2998 clear_IO_APIC_pin(apic2, pin2);
2999 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
3000 }
3001
3002 if (nmi_watchdog == NMI_IO_APIC) {
3003 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
3004 "through the IO-APIC - disabling NMI Watchdog!\n");
3005 nmi_watchdog = NMI_NONE;
3006 }
3007 #ifdef CONFIG_X86_32
3008 timer_ack = 0;
3009 #endif
3010
3011 apic_printk(APIC_QUIET, KERN_INFO
3012 "...trying to set up timer as Virtual Wire IRQ...\n");
3013
3014 lapic_register_intr(0, desc);
3015 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
3016 enable_8259A_irq(0);
3017
3018 if (timer_irq_works()) {
3019 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3020 goto out;
3021 }
3022 local_irq_disable();
3023 disable_8259A_irq(0);
3024 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3025 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
3026
3027 apic_printk(APIC_QUIET, KERN_INFO
3028 "...trying to set up timer as ExtINT IRQ...\n");
3029
3030 init_8259A(0);
3031 make_8259A_irq(0);
3032 apic_write(APIC_LVT0, APIC_DM_EXTINT);
3033
3034 unlock_ExtINT_logic();
3035
3036 if (timer_irq_works()) {
3037 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3038 goto out;
3039 }
3040 local_irq_disable();
3041 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
3042 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3043 "report. Then try booting with the 'noapic' option.\n");
3044 out:
3045 local_irq_restore(flags);
3046 }
3047
3048 /*
3049 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
3050 * to devices. However there may be an I/O APIC pin available for
3051 * this interrupt regardless. The pin may be left unconnected, but
3052 * typically it will be reused as an ExtINT cascade interrupt for
3053 * the master 8259A. In the MPS case such a pin will normally be
3054 * reported as an ExtINT interrupt in the MP table. With ACPI
3055 * there is no provision for ExtINT interrupts, and in the absence
3056 * of an override it would be treated as an ordinary ISA I/O APIC
3057 * interrupt, that is edge-triggered and unmasked by default. We
3058 * used to do this, but it caused problems on some systems because
3059 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
3060 * the same ExtINT cascade interrupt to drive the local APIC of the
3061 * bootstrap processor. Therefore we refrain from routing IRQ2 to
3062 * the I/O APIC in all cases now. No actual device should request
3063 * it anyway. --macro
3064 */
3065 #define PIC_IRQS (1UL << PIC_CASCADE_IR)
3066
3067 void __init setup_IO_APIC(void)
3068 {
3069
3070 /*
3071 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3072 */
3073 io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
3074
3075 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
3076 /*
3077 * Set up IO-APIC IRQ routing.
3078 */
3079 x86_init.mpparse.setup_ioapic_ids();
3080
3081 sync_Arb_IDs();
3082 setup_IO_APIC_irqs();
3083 init_IO_APIC_traps();
3084 if (nr_legacy_irqs)
3085 check_timer();
3086 }
3087
3088 /*
3089 * Called after all the initialization is done. If we didnt find any
3090 * APIC bugs then we can allow the modify fast path
3091 */
3092
3093 static int __init io_apic_bug_finalize(void)
3094 {
3095 if (sis_apic_bug == -1)
3096 sis_apic_bug = 0;
3097 return 0;
3098 }
3099
3100 late_initcall(io_apic_bug_finalize);
3101
3102 struct sysfs_ioapic_data {
3103 struct sys_device dev;
3104 struct IO_APIC_route_entry entry[0];
3105 };
3106 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
3107
3108 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
3109 {
3110 struct IO_APIC_route_entry *entry;
3111 struct sysfs_ioapic_data *data;
3112 int i;
3113
3114 data = container_of(dev, struct sysfs_ioapic_data, dev);
3115 entry = data->entry;
3116 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
3117 *entry = ioapic_read_entry(dev->id, i);
3118
3119 return 0;
3120 }
3121
3122 static int ioapic_resume(struct sys_device *dev)
3123 {
3124 struct IO_APIC_route_entry *entry;
3125 struct sysfs_ioapic_data *data;
3126 unsigned long flags;
3127 union IO_APIC_reg_00 reg_00;
3128 int i;
3129
3130 data = container_of(dev, struct sysfs_ioapic_data, dev);
3131 entry = data->entry;
3132
3133 spin_lock_irqsave(&ioapic_lock, flags);
3134 reg_00.raw = io_apic_read(dev->id, 0);
3135 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3136 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3137 io_apic_write(dev->id, 0, reg_00.raw);
3138 }
3139 spin_unlock_irqrestore(&ioapic_lock, flags);
3140 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3141 ioapic_write_entry(dev->id, i, entry[i]);
3142
3143 return 0;
3144 }
3145
3146 static struct sysdev_class ioapic_sysdev_class = {
3147 .name = "ioapic",
3148 .suspend = ioapic_suspend,
3149 .resume = ioapic_resume,
3150 };
3151
3152 static int __init ioapic_init_sysfs(void)
3153 {
3154 struct sys_device * dev;
3155 int i, size, error;
3156
3157 error = sysdev_class_register(&ioapic_sysdev_class);
3158 if (error)
3159 return error;
3160
3161 for (i = 0; i < nr_ioapics; i++ ) {
3162 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3163 * sizeof(struct IO_APIC_route_entry);
3164 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3165 if (!mp_ioapic_data[i]) {
3166 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3167 continue;
3168 }
3169 dev = &mp_ioapic_data[i]->dev;
3170 dev->id = i;
3171 dev->cls = &ioapic_sysdev_class;
3172 error = sysdev_register(dev);
3173 if (error) {
3174 kfree(mp_ioapic_data[i]);
3175 mp_ioapic_data[i] = NULL;
3176 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3177 continue;
3178 }
3179 }
3180
3181 return 0;
3182 }
3183
3184 device_initcall(ioapic_init_sysfs);
3185
3186 /*
3187 * Dynamic irq allocate and deallocation
3188 */
3189 unsigned int create_irq_nr(unsigned int irq_want, int node)
3190 {
3191 /* Allocate an unused irq */
3192 unsigned int irq;
3193 unsigned int new;
3194 unsigned long flags;
3195 struct irq_cfg *cfg_new = NULL;
3196 struct irq_desc *desc_new = NULL;
3197
3198 irq = 0;
3199 if (irq_want < nr_irqs_gsi)
3200 irq_want = nr_irqs_gsi;
3201
3202 spin_lock_irqsave(&vector_lock, flags);
3203 for (new = irq_want; new < nr_irqs; new++) {
3204 desc_new = irq_to_desc_alloc_node(new, node);
3205 if (!desc_new) {
3206 printk(KERN_INFO "can not get irq_desc for %d\n", new);
3207 continue;
3208 }
3209 cfg_new = desc_new->chip_data;
3210
3211 if (cfg_new->vector != 0)
3212 continue;
3213
3214 desc_new = move_irq_desc(desc_new, node);
3215 cfg_new = desc_new->chip_data;
3216
3217 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3218 irq = new;
3219 break;
3220 }
3221 spin_unlock_irqrestore(&vector_lock, flags);
3222
3223 if (irq > 0) {
3224 dynamic_irq_init(irq);
3225 /* restore it, in case dynamic_irq_init clear it */
3226 if (desc_new)
3227 desc_new->chip_data = cfg_new;
3228 }
3229 return irq;
3230 }
3231
3232 int create_irq(void)
3233 {
3234 int node = cpu_to_node(boot_cpu_id);
3235 unsigned int irq_want;
3236 int irq;
3237
3238 irq_want = nr_irqs_gsi;
3239 irq = create_irq_nr(irq_want, node);
3240
3241 if (irq == 0)
3242 irq = -1;
3243
3244 return irq;
3245 }
3246
3247 void destroy_irq(unsigned int irq)
3248 {
3249 unsigned long flags;
3250 struct irq_cfg *cfg;
3251 struct irq_desc *desc;
3252
3253 /* store it, in case dynamic_irq_cleanup clear it */
3254 desc = irq_to_desc(irq);
3255 cfg = desc->chip_data;
3256 dynamic_irq_cleanup(irq);
3257 /* connect back irq_cfg */
3258 desc->chip_data = cfg;
3259
3260 free_irte(irq);
3261 spin_lock_irqsave(&vector_lock, flags);
3262 __clear_irq_vector(irq, cfg);
3263 spin_unlock_irqrestore(&vector_lock, flags);
3264 }
3265
3266 /*
3267 * MSI message composition
3268 */
3269 #ifdef CONFIG_PCI_MSI
3270 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3271 struct msi_msg *msg, u8 hpet_id)
3272 {
3273 struct irq_cfg *cfg;
3274 int err;
3275 unsigned dest;
3276
3277 if (disable_apic)
3278 return -ENXIO;
3279
3280 cfg = irq_cfg(irq);
3281 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3282 if (err)
3283 return err;
3284
3285 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3286
3287 if (irq_remapped(irq)) {
3288 struct irte irte;
3289 int ir_index;
3290 u16 sub_handle;
3291
3292 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3293 BUG_ON(ir_index == -1);
3294
3295 memset (&irte, 0, sizeof(irte));
3296
3297 irte.present = 1;
3298 irte.dst_mode = apic->irq_dest_mode;
3299 irte.trigger_mode = 0; /* edge */
3300 irte.dlvry_mode = apic->irq_delivery_mode;
3301 irte.vector = cfg->vector;
3302 irte.dest_id = IRTE_DEST(dest);
3303
3304 /* Set source-id of interrupt request */
3305 if (pdev)
3306 set_msi_sid(&irte, pdev);
3307 else
3308 set_hpet_sid(&irte, hpet_id);
3309
3310 modify_irte(irq, &irte);
3311
3312 msg->address_hi = MSI_ADDR_BASE_HI;
3313 msg->data = sub_handle;
3314 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3315 MSI_ADDR_IR_SHV |
3316 MSI_ADDR_IR_INDEX1(ir_index) |
3317 MSI_ADDR_IR_INDEX2(ir_index);
3318 } else {
3319 if (x2apic_enabled())
3320 msg->address_hi = MSI_ADDR_BASE_HI |
3321 MSI_ADDR_EXT_DEST_ID(dest);
3322 else
3323 msg->address_hi = MSI_ADDR_BASE_HI;
3324
3325 msg->address_lo =
3326 MSI_ADDR_BASE_LO |
3327 ((apic->irq_dest_mode == 0) ?
3328 MSI_ADDR_DEST_MODE_PHYSICAL:
3329 MSI_ADDR_DEST_MODE_LOGICAL) |
3330 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3331 MSI_ADDR_REDIRECTION_CPU:
3332 MSI_ADDR_REDIRECTION_LOWPRI) |
3333 MSI_ADDR_DEST_ID(dest);
3334
3335 msg->data =
3336 MSI_DATA_TRIGGER_EDGE |
3337 MSI_DATA_LEVEL_ASSERT |
3338 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3339 MSI_DATA_DELIVERY_FIXED:
3340 MSI_DATA_DELIVERY_LOWPRI) |
3341 MSI_DATA_VECTOR(cfg->vector);
3342 }
3343 return err;
3344 }
3345
3346 #ifdef CONFIG_SMP
3347 static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3348 {
3349 struct irq_desc *desc = irq_to_desc(irq);
3350 struct irq_cfg *cfg;
3351 struct msi_msg msg;
3352 unsigned int dest;
3353
3354 dest = set_desc_affinity(desc, mask);
3355 if (dest == BAD_APICID)
3356 return -1;
3357
3358 cfg = desc->chip_data;
3359
3360 read_msi_msg_desc(desc, &msg);
3361
3362 msg.data &= ~MSI_DATA_VECTOR_MASK;
3363 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3364 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3365 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3366
3367 write_msi_msg_desc(desc, &msg);
3368
3369 return 0;
3370 }
3371 #ifdef CONFIG_INTR_REMAP
3372 /*
3373 * Migrate the MSI irq to another cpumask. This migration is
3374 * done in the process context using interrupt-remapping hardware.
3375 */
3376 static int
3377 ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3378 {
3379 struct irq_desc *desc = irq_to_desc(irq);
3380 struct irq_cfg *cfg = desc->chip_data;
3381 unsigned int dest;
3382 struct irte irte;
3383
3384 if (get_irte(irq, &irte))
3385 return -1;
3386
3387 dest = set_desc_affinity(desc, mask);
3388 if (dest == BAD_APICID)
3389 return -1;
3390
3391 irte.vector = cfg->vector;
3392 irte.dest_id = IRTE_DEST(dest);
3393
3394 /*
3395 * atomically update the IRTE with the new destination and vector.
3396 */
3397 modify_irte(irq, &irte);
3398
3399 /*
3400 * After this point, all the interrupts will start arriving
3401 * at the new destination. So, time to cleanup the previous
3402 * vector allocation.
3403 */
3404 if (cfg->move_in_progress)
3405 send_cleanup_vector(cfg);
3406
3407 return 0;
3408 }
3409
3410 #endif
3411 #endif /* CONFIG_SMP */
3412
3413 /*
3414 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3415 * which implement the MSI or MSI-X Capability Structure.
3416 */
3417 static struct irq_chip msi_chip = {
3418 .name = "PCI-MSI",
3419 .unmask = unmask_msi_irq,
3420 .mask = mask_msi_irq,
3421 .ack = ack_apic_edge,
3422 #ifdef CONFIG_SMP
3423 .set_affinity = set_msi_irq_affinity,
3424 #endif
3425 .retrigger = ioapic_retrigger_irq,
3426 };
3427
3428 static struct irq_chip msi_ir_chip = {
3429 .name = "IR-PCI-MSI",
3430 .unmask = unmask_msi_irq,
3431 .mask = mask_msi_irq,
3432 #ifdef CONFIG_INTR_REMAP
3433 .ack = ir_ack_apic_edge,
3434 #ifdef CONFIG_SMP
3435 .set_affinity = ir_set_msi_irq_affinity,
3436 #endif
3437 #endif
3438 .retrigger = ioapic_retrigger_irq,
3439 };
3440
3441 /*
3442 * Map the PCI dev to the corresponding remapping hardware unit
3443 * and allocate 'nvec' consecutive interrupt-remapping table entries
3444 * in it.
3445 */
3446 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3447 {
3448 struct intel_iommu *iommu;
3449 int index;
3450
3451 iommu = map_dev_to_ir(dev);
3452 if (!iommu) {
3453 printk(KERN_ERR
3454 "Unable to map PCI %s to iommu\n", pci_name(dev));
3455 return -ENOENT;
3456 }
3457
3458 index = alloc_irte(iommu, irq, nvec);
3459 if (index < 0) {
3460 printk(KERN_ERR
3461 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3462 pci_name(dev));
3463 return -ENOSPC;
3464 }
3465 return index;
3466 }
3467
3468 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3469 {
3470 int ret;
3471 struct msi_msg msg;
3472
3473 ret = msi_compose_msg(dev, irq, &msg, -1);
3474 if (ret < 0)
3475 return ret;
3476
3477 set_irq_msi(irq, msidesc);
3478 write_msi_msg(irq, &msg);
3479
3480 if (irq_remapped(irq)) {
3481 struct irq_desc *desc = irq_to_desc(irq);
3482 /*
3483 * irq migration in process context
3484 */
3485 desc->status |= IRQ_MOVE_PCNTXT;
3486 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3487 } else
3488 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3489
3490 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3491
3492 return 0;
3493 }
3494
3495 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3496 {
3497 unsigned int irq;
3498 int ret, sub_handle;
3499 struct msi_desc *msidesc;
3500 unsigned int irq_want;
3501 struct intel_iommu *iommu = NULL;
3502 int index = 0;
3503 int node;
3504
3505 /* x86 doesn't support multiple MSI yet */
3506 if (type == PCI_CAP_ID_MSI && nvec > 1)
3507 return 1;
3508
3509 node = dev_to_node(&dev->dev);
3510 irq_want = nr_irqs_gsi;
3511 sub_handle = 0;
3512 list_for_each_entry(msidesc, &dev->msi_list, list) {
3513 irq = create_irq_nr(irq_want, node);
3514 if (irq == 0)
3515 return -1;
3516 irq_want = irq + 1;
3517 if (!intr_remapping_enabled)
3518 goto no_ir;
3519
3520 if (!sub_handle) {
3521 /*
3522 * allocate the consecutive block of IRTE's
3523 * for 'nvec'
3524 */
3525 index = msi_alloc_irte(dev, irq, nvec);
3526 if (index < 0) {
3527 ret = index;
3528 goto error;
3529 }
3530 } else {
3531 iommu = map_dev_to_ir(dev);
3532 if (!iommu) {
3533 ret = -ENOENT;
3534 goto error;
3535 }
3536 /*
3537 * setup the mapping between the irq and the IRTE
3538 * base index, the sub_handle pointing to the
3539 * appropriate interrupt remap table entry.
3540 */
3541 set_irte_irq(irq, iommu, index, sub_handle);
3542 }
3543 no_ir:
3544 ret = setup_msi_irq(dev, msidesc, irq);
3545 if (ret < 0)
3546 goto error;
3547 sub_handle++;
3548 }
3549 return 0;
3550
3551 error:
3552 destroy_irq(irq);
3553 return ret;
3554 }
3555
3556 void arch_teardown_msi_irq(unsigned int irq)
3557 {
3558 destroy_irq(irq);
3559 }
3560
3561 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3562 #ifdef CONFIG_SMP
3563 static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3564 {
3565 struct irq_desc *desc = irq_to_desc(irq);
3566 struct irq_cfg *cfg;
3567 struct msi_msg msg;
3568 unsigned int dest;
3569
3570 dest = set_desc_affinity(desc, mask);
3571 if (dest == BAD_APICID)
3572 return -1;
3573
3574 cfg = desc->chip_data;
3575
3576 dmar_msi_read(irq, &msg);
3577
3578 msg.data &= ~MSI_DATA_VECTOR_MASK;
3579 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3580 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3581 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3582
3583 dmar_msi_write(irq, &msg);
3584
3585 return 0;
3586 }
3587
3588 #endif /* CONFIG_SMP */
3589
3590 static struct irq_chip dmar_msi_type = {
3591 .name = "DMAR_MSI",
3592 .unmask = dmar_msi_unmask,
3593 .mask = dmar_msi_mask,
3594 .ack = ack_apic_edge,
3595 #ifdef CONFIG_SMP
3596 .set_affinity = dmar_msi_set_affinity,
3597 #endif
3598 .retrigger = ioapic_retrigger_irq,
3599 };
3600
3601 int arch_setup_dmar_msi(unsigned int irq)
3602 {
3603 int ret;
3604 struct msi_msg msg;
3605
3606 ret = msi_compose_msg(NULL, irq, &msg, -1);
3607 if (ret < 0)
3608 return ret;
3609 dmar_msi_write(irq, &msg);
3610 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3611 "edge");
3612 return 0;
3613 }
3614 #endif
3615
3616 #ifdef CONFIG_HPET_TIMER
3617
3618 #ifdef CONFIG_SMP
3619 static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3620 {
3621 struct irq_desc *desc = irq_to_desc(irq);
3622 struct irq_cfg *cfg;
3623 struct msi_msg msg;
3624 unsigned int dest;
3625
3626 dest = set_desc_affinity(desc, mask);
3627 if (dest == BAD_APICID)
3628 return -1;
3629
3630 cfg = desc->chip_data;
3631
3632 hpet_msi_read(irq, &msg);
3633
3634 msg.data &= ~MSI_DATA_VECTOR_MASK;
3635 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3636 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3637 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3638
3639 hpet_msi_write(irq, &msg);
3640
3641 return 0;
3642 }
3643
3644 #endif /* CONFIG_SMP */
3645
3646 static struct irq_chip ir_hpet_msi_type = {
3647 .name = "IR-HPET_MSI",
3648 .unmask = hpet_msi_unmask,
3649 .mask = hpet_msi_mask,
3650 #ifdef CONFIG_INTR_REMAP
3651 .ack = ir_ack_apic_edge,
3652 #ifdef CONFIG_SMP
3653 .set_affinity = ir_set_msi_irq_affinity,
3654 #endif
3655 #endif
3656 .retrigger = ioapic_retrigger_irq,
3657 };
3658
3659 static struct irq_chip hpet_msi_type = {
3660 .name = "HPET_MSI",
3661 .unmask = hpet_msi_unmask,
3662 .mask = hpet_msi_mask,
3663 .ack = ack_apic_edge,
3664 #ifdef CONFIG_SMP
3665 .set_affinity = hpet_msi_set_affinity,
3666 #endif
3667 .retrigger = ioapic_retrigger_irq,
3668 };
3669
3670 int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3671 {
3672 int ret;
3673 struct msi_msg msg;
3674 struct irq_desc *desc = irq_to_desc(irq);
3675
3676 if (intr_remapping_enabled) {
3677 struct intel_iommu *iommu = map_hpet_to_ir(id);
3678 int index;
3679
3680 if (!iommu)
3681 return -1;
3682
3683 index = alloc_irte(iommu, irq, 1);
3684 if (index < 0)
3685 return -1;
3686 }
3687
3688 ret = msi_compose_msg(NULL, irq, &msg, id);
3689 if (ret < 0)
3690 return ret;
3691
3692 hpet_msi_write(irq, &msg);
3693 desc->status |= IRQ_MOVE_PCNTXT;
3694 if (irq_remapped(irq))
3695 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
3696 handle_edge_irq, "edge");
3697 else
3698 set_irq_chip_and_handler_name(irq, &hpet_msi_type,
3699 handle_edge_irq, "edge");
3700
3701 return 0;
3702 }
3703 #endif
3704
3705 #endif /* CONFIG_PCI_MSI */
3706 /*
3707 * Hypertransport interrupt support
3708 */
3709 #ifdef CONFIG_HT_IRQ
3710
3711 #ifdef CONFIG_SMP
3712
3713 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3714 {
3715 struct ht_irq_msg msg;
3716 fetch_ht_irq_msg(irq, &msg);
3717
3718 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3719 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3720
3721 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3722 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3723
3724 write_ht_irq_msg(irq, &msg);
3725 }
3726
3727 static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3728 {
3729 struct irq_desc *desc = irq_to_desc(irq);
3730 struct irq_cfg *cfg;
3731 unsigned int dest;
3732
3733 dest = set_desc_affinity(desc, mask);
3734 if (dest == BAD_APICID)
3735 return -1;
3736
3737 cfg = desc->chip_data;
3738
3739 target_ht_irq(irq, dest, cfg->vector);
3740
3741 return 0;
3742 }
3743
3744 #endif
3745
3746 static struct irq_chip ht_irq_chip = {
3747 .name = "PCI-HT",
3748 .mask = mask_ht_irq,
3749 .unmask = unmask_ht_irq,
3750 .ack = ack_apic_edge,
3751 #ifdef CONFIG_SMP
3752 .set_affinity = set_ht_irq_affinity,
3753 #endif
3754 .retrigger = ioapic_retrigger_irq,
3755 };
3756
3757 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3758 {
3759 struct irq_cfg *cfg;
3760 int err;
3761
3762 if (disable_apic)
3763 return -ENXIO;
3764
3765 cfg = irq_cfg(irq);
3766 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3767 if (!err) {
3768 struct ht_irq_msg msg;
3769 unsigned dest;
3770
3771 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3772 apic->target_cpus());
3773
3774 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3775
3776 msg.address_lo =
3777 HT_IRQ_LOW_BASE |
3778 HT_IRQ_LOW_DEST_ID(dest) |
3779 HT_IRQ_LOW_VECTOR(cfg->vector) |
3780 ((apic->irq_dest_mode == 0) ?
3781 HT_IRQ_LOW_DM_PHYSICAL :
3782 HT_IRQ_LOW_DM_LOGICAL) |
3783 HT_IRQ_LOW_RQEOI_EDGE |
3784 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3785 HT_IRQ_LOW_MT_FIXED :
3786 HT_IRQ_LOW_MT_ARBITRATED) |
3787 HT_IRQ_LOW_IRQ_MASKED;
3788
3789 write_ht_irq_msg(irq, &msg);
3790
3791 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3792 handle_edge_irq, "edge");
3793
3794 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3795 }
3796 return err;
3797 }
3798 #endif /* CONFIG_HT_IRQ */
3799
3800 int __init io_apic_get_redir_entries (int ioapic)
3801 {
3802 union IO_APIC_reg_01 reg_01;
3803 unsigned long flags;
3804
3805 spin_lock_irqsave(&ioapic_lock, flags);
3806 reg_01.raw = io_apic_read(ioapic, 1);
3807 spin_unlock_irqrestore(&ioapic_lock, flags);
3808
3809 return reg_01.bits.entries;
3810 }
3811
3812 void __init probe_nr_irqs_gsi(void)
3813 {
3814 int nr = 0;
3815
3816 nr = acpi_probe_gsi();
3817 if (nr > nr_irqs_gsi) {
3818 nr_irqs_gsi = nr;
3819 } else {
3820 /* for acpi=off or acpi is not compiled in */
3821 int idx;
3822
3823 nr = 0;
3824 for (idx = 0; idx < nr_ioapics; idx++)
3825 nr += io_apic_get_redir_entries(idx) + 1;
3826
3827 if (nr > nr_irqs_gsi)
3828 nr_irqs_gsi = nr;
3829 }
3830
3831 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3832 }
3833
3834 #ifdef CONFIG_SPARSE_IRQ
3835 int __init arch_probe_nr_irqs(void)
3836 {
3837 int nr;
3838
3839 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3840 nr_irqs = NR_VECTORS * nr_cpu_ids;
3841
3842 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3843 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3844 /*
3845 * for MSI and HT dyn irq
3846 */
3847 nr += nr_irqs_gsi * 16;
3848 #endif
3849 if (nr < nr_irqs)
3850 nr_irqs = nr;
3851
3852 return 0;
3853 }
3854 #endif
3855
3856 static int __io_apic_set_pci_routing(struct device *dev, int irq,
3857 struct io_apic_irq_attr *irq_attr)
3858 {
3859 struct irq_desc *desc;
3860 struct irq_cfg *cfg;
3861 int node;
3862 int ioapic, pin;
3863 int trigger, polarity;
3864
3865 ioapic = irq_attr->ioapic;
3866 if (!IO_APIC_IRQ(irq)) {
3867 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3868 ioapic);
3869 return -EINVAL;
3870 }
3871
3872 if (dev)
3873 node = dev_to_node(dev);
3874 else
3875 node = cpu_to_node(boot_cpu_id);
3876
3877 desc = irq_to_desc_alloc_node(irq, node);
3878 if (!desc) {
3879 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3880 return 0;
3881 }
3882
3883 pin = irq_attr->ioapic_pin;
3884 trigger = irq_attr->trigger;
3885 polarity = irq_attr->polarity;
3886
3887 /*
3888 * IRQs < 16 are already in the irq_2_pin[] map
3889 */
3890 if (irq >= nr_legacy_irqs) {
3891 cfg = desc->chip_data;
3892 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
3893 printk(KERN_INFO "can not add pin %d for irq %d\n",
3894 pin, irq);
3895 return 0;
3896 }
3897 }
3898
3899 setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity);
3900
3901 return 0;
3902 }
3903
3904 int io_apic_set_pci_routing(struct device *dev, int irq,
3905 struct io_apic_irq_attr *irq_attr)
3906 {
3907 int ioapic, pin;
3908 /*
3909 * Avoid pin reprogramming. PRTs typically include entries
3910 * with redundant pin->gsi mappings (but unique PCI devices);
3911 * we only program the IOAPIC on the first.
3912 */
3913 ioapic = irq_attr->ioapic;
3914 pin = irq_attr->ioapic_pin;
3915 if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
3916 pr_debug("Pin %d-%d already programmed\n",
3917 mp_ioapics[ioapic].apicid, pin);
3918 return 0;
3919 }
3920 set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
3921
3922 return __io_apic_set_pci_routing(dev, irq, irq_attr);
3923 }
3924
3925 u8 __init io_apic_unique_id(u8 id)
3926 {
3927 #ifdef CONFIG_X86_32
3928 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3929 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3930 return io_apic_get_unique_id(nr_ioapics, id);
3931 else
3932 return id;
3933 #else
3934 int i;
3935 DECLARE_BITMAP(used, 256);
3936
3937 bitmap_zero(used, 256);
3938 for (i = 0; i < nr_ioapics; i++) {
3939 struct mpc_ioapic *ia = &mp_ioapics[i];
3940 __set_bit(ia->apicid, used);
3941 }
3942 if (!test_bit(id, used))
3943 return id;
3944 return find_first_zero_bit(used, 256);
3945 #endif
3946 }
3947
3948 #ifdef CONFIG_X86_32
3949 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3950 {
3951 union IO_APIC_reg_00 reg_00;
3952 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3953 physid_mask_t tmp;
3954 unsigned long flags;
3955 int i = 0;
3956
3957 /*
3958 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3959 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3960 * supports up to 16 on one shared APIC bus.
3961 *
3962 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3963 * advantage of new APIC bus architecture.
3964 */
3965
3966 if (physids_empty(apic_id_map))
3967 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3968
3969 spin_lock_irqsave(&ioapic_lock, flags);
3970 reg_00.raw = io_apic_read(ioapic, 0);
3971 spin_unlock_irqrestore(&ioapic_lock, flags);
3972
3973 if (apic_id >= get_physical_broadcast()) {
3974 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3975 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3976 apic_id = reg_00.bits.ID;
3977 }
3978
3979 /*
3980 * Every APIC in a system must have a unique ID or we get lots of nice
3981 * 'stuck on smp_invalidate_needed IPI wait' messages.
3982 */
3983 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
3984
3985 for (i = 0; i < get_physical_broadcast(); i++) {
3986 if (!apic->check_apicid_used(&apic_id_map, i))
3987 break;
3988 }
3989
3990 if (i == get_physical_broadcast())
3991 panic("Max apic_id exceeded!\n");
3992
3993 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3994 "trying %d\n", ioapic, apic_id, i);
3995
3996 apic_id = i;
3997 }
3998
3999 apic->apicid_to_cpu_present(apic_id, &tmp);
4000 physids_or(apic_id_map, apic_id_map, tmp);
4001
4002 if (reg_00.bits.ID != apic_id) {
4003 reg_00.bits.ID = apic_id;
4004
4005 spin_lock_irqsave(&ioapic_lock, flags);
4006 io_apic_write(ioapic, 0, reg_00.raw);
4007 reg_00.raw = io_apic_read(ioapic, 0);
4008 spin_unlock_irqrestore(&ioapic_lock, flags);
4009
4010 /* Sanity check */
4011 if (reg_00.bits.ID != apic_id) {
4012 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
4013 return -1;
4014 }
4015 }
4016
4017 apic_printk(APIC_VERBOSE, KERN_INFO
4018 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
4019
4020 return apic_id;
4021 }
4022 #endif
4023
4024 int __init io_apic_get_version(int ioapic)
4025 {
4026 union IO_APIC_reg_01 reg_01;
4027 unsigned long flags;
4028
4029 spin_lock_irqsave(&ioapic_lock, flags);
4030 reg_01.raw = io_apic_read(ioapic, 1);
4031 spin_unlock_irqrestore(&ioapic_lock, flags);
4032
4033 return reg_01.bits.version;
4034 }
4035
4036 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
4037 {
4038 int i;
4039
4040 if (skip_ioapic_setup)
4041 return -1;
4042
4043 for (i = 0; i < mp_irq_entries; i++)
4044 if (mp_irqs[i].irqtype == mp_INT &&
4045 mp_irqs[i].srcbusirq == bus_irq)
4046 break;
4047 if (i >= mp_irq_entries)
4048 return -1;
4049
4050 *trigger = irq_trigger(i);
4051 *polarity = irq_polarity(i);
4052 return 0;
4053 }
4054
4055 /*
4056 * This function currently is only a helper for the i386 smp boot process where
4057 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4058 * so mask in all cases should simply be apic->target_cpus()
4059 */
4060 #ifdef CONFIG_SMP
4061 void __init setup_ioapic_dest(void)
4062 {
4063 int pin, ioapic = 0, irq, irq_entry;
4064 struct irq_desc *desc;
4065 const struct cpumask *mask;
4066
4067 if (skip_ioapic_setup == 1)
4068 return;
4069
4070 #ifdef CONFIG_ACPI
4071 if (!acpi_disabled && acpi_ioapic) {
4072 ioapic = mp_find_ioapic(0);
4073 if (ioapic < 0)
4074 ioapic = 0;
4075 }
4076 #endif
4077
4078 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
4079 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
4080 if (irq_entry == -1)
4081 continue;
4082 irq = pin_2_irq(irq_entry, ioapic, pin);
4083
4084 desc = irq_to_desc(irq);
4085
4086 /*
4087 * Honour affinities which have been set in early boot
4088 */
4089 if (desc->status &
4090 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4091 mask = desc->affinity;
4092 else
4093 mask = apic->target_cpus();
4094
4095 if (intr_remapping_enabled)
4096 set_ir_ioapic_affinity_irq_desc(desc, mask);
4097 else
4098 set_ioapic_affinity_irq_desc(desc, mask);
4099 }
4100
4101 }
4102 #endif
4103
4104 #define IOAPIC_RESOURCE_NAME_SIZE 11
4105
4106 static struct resource *ioapic_resources;
4107
4108 static struct resource * __init ioapic_setup_resources(int nr_ioapics)
4109 {
4110 unsigned long n;
4111 struct resource *res;
4112 char *mem;
4113 int i;
4114
4115 if (nr_ioapics <= 0)
4116 return NULL;
4117
4118 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
4119 n *= nr_ioapics;
4120
4121 mem = alloc_bootmem(n);
4122 res = (void *)mem;
4123
4124 mem += sizeof(struct resource) * nr_ioapics;
4125
4126 for (i = 0; i < nr_ioapics; i++) {
4127 res[i].name = mem;
4128 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
4129 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
4130 mem += IOAPIC_RESOURCE_NAME_SIZE;
4131 }
4132
4133 ioapic_resources = res;
4134
4135 return res;
4136 }
4137
4138 void __init ioapic_init_mappings(void)
4139 {
4140 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
4141 struct resource *ioapic_res;
4142 int i;
4143
4144 ioapic_res = ioapic_setup_resources(nr_ioapics);
4145 for (i = 0; i < nr_ioapics; i++) {
4146 if (smp_found_config) {
4147 ioapic_phys = mp_ioapics[i].apicaddr;
4148 #ifdef CONFIG_X86_32
4149 if (!ioapic_phys) {
4150 printk(KERN_ERR
4151 "WARNING: bogus zero IO-APIC "
4152 "address found in MPTABLE, "
4153 "disabling IO/APIC support!\n");
4154 smp_found_config = 0;
4155 skip_ioapic_setup = 1;
4156 goto fake_ioapic_page;
4157 }
4158 #endif
4159 } else {
4160 #ifdef CONFIG_X86_32
4161 fake_ioapic_page:
4162 #endif
4163 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
4164 ioapic_phys = __pa(ioapic_phys);
4165 }
4166 set_fixmap_nocache(idx, ioapic_phys);
4167 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
4168 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
4169 ioapic_phys);
4170 idx++;
4171
4172 ioapic_res->start = ioapic_phys;
4173 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
4174 ioapic_res++;
4175 }
4176 }
4177
4178 void __init ioapic_insert_resources(void)
4179 {
4180 int i;
4181 struct resource *r = ioapic_resources;
4182
4183 if (!r) {
4184 if (nr_ioapics > 0)
4185 printk(KERN_ERR
4186 "IO APIC resources couldn't be allocated.\n");
4187 return;
4188 }
4189
4190 for (i = 0; i < nr_ioapics; i++) {
4191 insert_resource(&iomem_resource, r);
4192 r++;
4193 }
4194 }
4195
4196 int mp_find_ioapic(int gsi)
4197 {
4198 int i = 0;
4199
4200 /* Find the IOAPIC that manages this GSI. */
4201 for (i = 0; i < nr_ioapics; i++) {
4202 if ((gsi >= mp_gsi_routing[i].gsi_base)
4203 && (gsi <= mp_gsi_routing[i].gsi_end))
4204 return i;
4205 }
4206
4207 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
4208 return -1;
4209 }
4210
4211 int mp_find_ioapic_pin(int ioapic, int gsi)
4212 {
4213 if (WARN_ON(ioapic == -1))
4214 return -1;
4215 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
4216 return -1;
4217
4218 return gsi - mp_gsi_routing[ioapic].gsi_base;
4219 }
4220
4221 static int bad_ioapic(unsigned long address)
4222 {
4223 if (nr_ioapics >= MAX_IO_APICS) {
4224 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
4225 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
4226 return 1;
4227 }
4228 if (!address) {
4229 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
4230 " found in table, skipping!\n");
4231 return 1;
4232 }
4233 return 0;
4234 }
4235
4236 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4237 {
4238 int idx = 0;
4239
4240 if (bad_ioapic(address))
4241 return;
4242
4243 idx = nr_ioapics;
4244
4245 mp_ioapics[idx].type = MP_IOAPIC;
4246 mp_ioapics[idx].flags = MPC_APIC_USABLE;
4247 mp_ioapics[idx].apicaddr = address;
4248
4249 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
4250 mp_ioapics[idx].apicid = io_apic_unique_id(id);
4251 mp_ioapics[idx].apicver = io_apic_get_version(idx);
4252
4253 /*
4254 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
4255 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
4256 */
4257 mp_gsi_routing[idx].gsi_base = gsi_base;
4258 mp_gsi_routing[idx].gsi_end = gsi_base +
4259 io_apic_get_redir_entries(idx);
4260
4261 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
4262 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
4263 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
4264 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
4265
4266 nr_ioapics++;
4267 }
This page took 0.127769 seconds and 6 git commands to generate.