Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[deliverable/linux.git] / arch / x86 / kernel / io_apic_64.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/acpi.h>
31 #include <linux/sysdev.h>
32 #include <linux/msi.h>
33 #include <linux/htirq.h>
34 #include <linux/dmar.h>
35 #include <linux/jiffies.h>
36 #ifdef CONFIG_ACPI
37 #include <acpi/acpi_bus.h>
38 #endif
39 #include <linux/bootmem.h>
40
41 #include <asm/idle.h>
42 #include <asm/io.h>
43 #include <asm/smp.h>
44 #include <asm/desc.h>
45 #include <asm/proto.h>
46 #include <asm/acpi.h>
47 #include <asm/dma.h>
48 #include <asm/i8259.h>
49 #include <asm/nmi.h>
50 #include <asm/msidef.h>
51 #include <asm/hypertransport.h>
52
53 #include <mach_ipi.h>
54 #include <mach_apic.h>
55
56 struct irq_cfg {
57 cpumask_t domain;
58 cpumask_t old_domain;
59 unsigned move_cleanup_count;
60 u8 vector;
61 u8 move_in_progress : 1;
62 };
63
64 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
65 static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
66 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
67 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
68 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
69 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
70 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
71 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
72 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
73 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
74 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
75 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
76 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
77 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
78 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
79 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
80 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
81 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
82 };
83
84 static int assign_irq_vector(int irq, cpumask_t mask);
85
86 int first_system_vector = 0xfe;
87
88 char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
89
90 #define __apicdebuginit __init
91
92 int sis_apic_bug; /* not actually supported, dummy for compile */
93
94 static int no_timer_check;
95
96 static int disable_timer_pin_1 __initdata;
97
98 int timer_through_8259 __initdata;
99
100 /* Where if anywhere is the i8259 connect in external int mode */
101 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
102
103 static DEFINE_SPINLOCK(ioapic_lock);
104 static DEFINE_SPINLOCK(vector_lock);
105
106 /*
107 * # of IRQ routing registers
108 */
109 int nr_ioapic_registers[MAX_IO_APICS];
110
111 /* I/O APIC entries */
112 struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
113 int nr_ioapics;
114
115 /* MP IRQ source entries */
116 struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
117
118 /* # of MP IRQ source entries */
119 int mp_irq_entries;
120
121 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
122
123 /*
124 * Rough estimation of how many shared IRQs there are, can
125 * be changed anytime.
126 */
127 #define MAX_PLUS_SHARED_IRQS NR_IRQS
128 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
129
130 /*
131 * This is performance-critical, we want to do it O(1)
132 *
133 * the indexing order of this array favors 1:1 mappings
134 * between pins and IRQs.
135 */
136
137 static struct irq_pin_list {
138 short apic, pin, next;
139 } irq_2_pin[PIN_MAP_SIZE];
140
141 struct io_apic {
142 unsigned int index;
143 unsigned int unused[3];
144 unsigned int data;
145 };
146
147 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
148 {
149 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
150 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
151 }
152
153 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
154 {
155 struct io_apic __iomem *io_apic = io_apic_base(apic);
156 writel(reg, &io_apic->index);
157 return readl(&io_apic->data);
158 }
159
160 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
161 {
162 struct io_apic __iomem *io_apic = io_apic_base(apic);
163 writel(reg, &io_apic->index);
164 writel(value, &io_apic->data);
165 }
166
167 /*
168 * Re-write a value: to be used for read-modify-write
169 * cycles where the read already set up the index register.
170 */
171 static inline void io_apic_modify(unsigned int apic, unsigned int value)
172 {
173 struct io_apic __iomem *io_apic = io_apic_base(apic);
174 writel(value, &io_apic->data);
175 }
176
177 static bool io_apic_level_ack_pending(unsigned int irq)
178 {
179 struct irq_pin_list *entry;
180 unsigned long flags;
181
182 spin_lock_irqsave(&ioapic_lock, flags);
183 entry = irq_2_pin + irq;
184 for (;;) {
185 unsigned int reg;
186 int pin;
187
188 pin = entry->pin;
189 if (pin == -1)
190 break;
191 reg = io_apic_read(entry->apic, 0x10 + pin*2);
192 /* Is the remote IRR bit set? */
193 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
194 spin_unlock_irqrestore(&ioapic_lock, flags);
195 return true;
196 }
197 if (!entry->next)
198 break;
199 entry = irq_2_pin + entry->next;
200 }
201 spin_unlock_irqrestore(&ioapic_lock, flags);
202
203 return false;
204 }
205
206 /*
207 * Synchronize the IO-APIC and the CPU by doing
208 * a dummy read from the IO-APIC
209 */
210 static inline void io_apic_sync(unsigned int apic)
211 {
212 struct io_apic __iomem *io_apic = io_apic_base(apic);
213 readl(&io_apic->data);
214 }
215
216 #define __DO_ACTION(R, ACTION, FINAL) \
217 \
218 { \
219 int pin; \
220 struct irq_pin_list *entry = irq_2_pin + irq; \
221 \
222 BUG_ON(irq >= NR_IRQS); \
223 for (;;) { \
224 unsigned int reg; \
225 pin = entry->pin; \
226 if (pin == -1) \
227 break; \
228 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
229 reg ACTION; \
230 io_apic_modify(entry->apic, reg); \
231 FINAL; \
232 if (!entry->next) \
233 break; \
234 entry = irq_2_pin + entry->next; \
235 } \
236 }
237
238 union entry_union {
239 struct { u32 w1, w2; };
240 struct IO_APIC_route_entry entry;
241 };
242
243 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
244 {
245 union entry_union eu;
246 unsigned long flags;
247 spin_lock_irqsave(&ioapic_lock, flags);
248 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
249 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
250 spin_unlock_irqrestore(&ioapic_lock, flags);
251 return eu.entry;
252 }
253
254 /*
255 * When we write a new IO APIC routing entry, we need to write the high
256 * word first! If the mask bit in the low word is clear, we will enable
257 * the interrupt, and we need to make sure the entry is fully populated
258 * before that happens.
259 */
260 static void
261 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
262 {
263 union entry_union eu;
264 eu.entry = e;
265 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
266 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
267 }
268
269 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
270 {
271 unsigned long flags;
272 spin_lock_irqsave(&ioapic_lock, flags);
273 __ioapic_write_entry(apic, pin, e);
274 spin_unlock_irqrestore(&ioapic_lock, flags);
275 }
276
277 /*
278 * When we mask an IO APIC routing entry, we need to write the low
279 * word first, in order to set the mask bit before we change the
280 * high bits!
281 */
282 static void ioapic_mask_entry(int apic, int pin)
283 {
284 unsigned long flags;
285 union entry_union eu = { .entry.mask = 1 };
286
287 spin_lock_irqsave(&ioapic_lock, flags);
288 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
289 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
290 spin_unlock_irqrestore(&ioapic_lock, flags);
291 }
292
293 #ifdef CONFIG_SMP
294 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
295 {
296 int apic, pin;
297 struct irq_pin_list *entry = irq_2_pin + irq;
298
299 BUG_ON(irq >= NR_IRQS);
300 for (;;) {
301 unsigned int reg;
302 apic = entry->apic;
303 pin = entry->pin;
304 if (pin == -1)
305 break;
306 io_apic_write(apic, 0x11 + pin*2, dest);
307 reg = io_apic_read(apic, 0x10 + pin*2);
308 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
309 reg |= vector;
310 io_apic_modify(apic, reg);
311 if (!entry->next)
312 break;
313 entry = irq_2_pin + entry->next;
314 }
315 }
316
317 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
318 {
319 struct irq_cfg *cfg = irq_cfg + irq;
320 unsigned long flags;
321 unsigned int dest;
322 cpumask_t tmp;
323
324 cpus_and(tmp, mask, cpu_online_map);
325 if (cpus_empty(tmp))
326 return;
327
328 if (assign_irq_vector(irq, mask))
329 return;
330
331 cpus_and(tmp, cfg->domain, mask);
332 dest = cpu_mask_to_apicid(tmp);
333
334 /*
335 * Only the high 8 bits are valid.
336 */
337 dest = SET_APIC_LOGICAL_ID(dest);
338
339 spin_lock_irqsave(&ioapic_lock, flags);
340 __target_IO_APIC_irq(irq, dest, cfg->vector);
341 irq_desc[irq].affinity = mask;
342 spin_unlock_irqrestore(&ioapic_lock, flags);
343 }
344 #endif
345
346 /*
347 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
348 * shared ISA-space IRQs, so we have to support them. We are super
349 * fast in the common case, and fast for shared ISA-space IRQs.
350 */
351 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
352 {
353 static int first_free_entry = NR_IRQS;
354 struct irq_pin_list *entry = irq_2_pin + irq;
355
356 BUG_ON(irq >= NR_IRQS);
357 while (entry->next)
358 entry = irq_2_pin + entry->next;
359
360 if (entry->pin != -1) {
361 entry->next = first_free_entry;
362 entry = irq_2_pin + entry->next;
363 if (++first_free_entry >= PIN_MAP_SIZE)
364 panic("io_apic.c: ran out of irq_2_pin entries!");
365 }
366 entry->apic = apic;
367 entry->pin = pin;
368 }
369
370 /*
371 * Reroute an IRQ to a different pin.
372 */
373 static void __init replace_pin_at_irq(unsigned int irq,
374 int oldapic, int oldpin,
375 int newapic, int newpin)
376 {
377 struct irq_pin_list *entry = irq_2_pin + irq;
378
379 while (1) {
380 if (entry->apic == oldapic && entry->pin == oldpin) {
381 entry->apic = newapic;
382 entry->pin = newpin;
383 }
384 if (!entry->next)
385 break;
386 entry = irq_2_pin + entry->next;
387 }
388 }
389
390
391 #define DO_ACTION(name,R,ACTION, FINAL) \
392 \
393 static void name##_IO_APIC_irq (unsigned int irq) \
394 __DO_ACTION(R, ACTION, FINAL)
395
396 /* mask = 1 */
397 DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
398
399 /* mask = 0 */
400 DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, )
401
402 static void mask_IO_APIC_irq (unsigned int irq)
403 {
404 unsigned long flags;
405
406 spin_lock_irqsave(&ioapic_lock, flags);
407 __mask_IO_APIC_irq(irq);
408 spin_unlock_irqrestore(&ioapic_lock, flags);
409 }
410
411 static void unmask_IO_APIC_irq (unsigned int irq)
412 {
413 unsigned long flags;
414
415 spin_lock_irqsave(&ioapic_lock, flags);
416 __unmask_IO_APIC_irq(irq);
417 spin_unlock_irqrestore(&ioapic_lock, flags);
418 }
419
420 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
421 {
422 struct IO_APIC_route_entry entry;
423
424 /* Check delivery_mode to be sure we're not clearing an SMI pin */
425 entry = ioapic_read_entry(apic, pin);
426 if (entry.delivery_mode == dest_SMI)
427 return;
428 /*
429 * Disable it in the IO-APIC irq-routing table:
430 */
431 ioapic_mask_entry(apic, pin);
432 }
433
434 static void clear_IO_APIC (void)
435 {
436 int apic, pin;
437
438 for (apic = 0; apic < nr_ioapics; apic++)
439 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
440 clear_IO_APIC_pin(apic, pin);
441 }
442
443 int skip_ioapic_setup;
444 int ioapic_force;
445
446 static int __init parse_noapic(char *str)
447 {
448 disable_ioapic_setup();
449 return 0;
450 }
451 early_param("noapic", parse_noapic);
452
453 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
454 static int __init disable_timer_pin_setup(char *arg)
455 {
456 disable_timer_pin_1 = 1;
457 return 1;
458 }
459 __setup("disable_timer_pin_1", disable_timer_pin_setup);
460
461
462 /*
463 * Find the IRQ entry number of a certain pin.
464 */
465 static int find_irq_entry(int apic, int pin, int type)
466 {
467 int i;
468
469 for (i = 0; i < mp_irq_entries; i++)
470 if (mp_irqs[i].mp_irqtype == type &&
471 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
472 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
473 mp_irqs[i].mp_dstirq == pin)
474 return i;
475
476 return -1;
477 }
478
479 /*
480 * Find the pin to which IRQ[irq] (ISA) is connected
481 */
482 static int __init find_isa_irq_pin(int irq, int type)
483 {
484 int i;
485
486 for (i = 0; i < mp_irq_entries; i++) {
487 int lbus = mp_irqs[i].mp_srcbus;
488
489 if (test_bit(lbus, mp_bus_not_pci) &&
490 (mp_irqs[i].mp_irqtype == type) &&
491 (mp_irqs[i].mp_srcbusirq == irq))
492
493 return mp_irqs[i].mp_dstirq;
494 }
495 return -1;
496 }
497
498 static int __init find_isa_irq_apic(int irq, int type)
499 {
500 int i;
501
502 for (i = 0; i < mp_irq_entries; i++) {
503 int lbus = mp_irqs[i].mp_srcbus;
504
505 if (test_bit(lbus, mp_bus_not_pci) &&
506 (mp_irqs[i].mp_irqtype == type) &&
507 (mp_irqs[i].mp_srcbusirq == irq))
508 break;
509 }
510 if (i < mp_irq_entries) {
511 int apic;
512 for(apic = 0; apic < nr_ioapics; apic++) {
513 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
514 return apic;
515 }
516 }
517
518 return -1;
519 }
520
521 /*
522 * Find a specific PCI IRQ entry.
523 * Not an __init, possibly needed by modules
524 */
525 static int pin_2_irq(int idx, int apic, int pin);
526
527 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
528 {
529 int apic, i, best_guess = -1;
530
531 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
532 bus, slot, pin);
533 if (test_bit(bus, mp_bus_not_pci)) {
534 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
535 return -1;
536 }
537 for (i = 0; i < mp_irq_entries; i++) {
538 int lbus = mp_irqs[i].mp_srcbus;
539
540 for (apic = 0; apic < nr_ioapics; apic++)
541 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
542 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
543 break;
544
545 if (!test_bit(lbus, mp_bus_not_pci) &&
546 !mp_irqs[i].mp_irqtype &&
547 (bus == lbus) &&
548 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
549 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
550
551 if (!(apic || IO_APIC_IRQ(irq)))
552 continue;
553
554 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
555 return irq;
556 /*
557 * Use the first all-but-pin matching entry as a
558 * best-guess fuzzy result for broken mptables.
559 */
560 if (best_guess < 0)
561 best_guess = irq;
562 }
563 }
564 BUG_ON(best_guess >= NR_IRQS);
565 return best_guess;
566 }
567
568 /* ISA interrupts are always polarity zero edge triggered,
569 * when listed as conforming in the MP table. */
570
571 #define default_ISA_trigger(idx) (0)
572 #define default_ISA_polarity(idx) (0)
573
574 /* PCI interrupts are always polarity one level triggered,
575 * when listed as conforming in the MP table. */
576
577 #define default_PCI_trigger(idx) (1)
578 #define default_PCI_polarity(idx) (1)
579
580 static int MPBIOS_polarity(int idx)
581 {
582 int bus = mp_irqs[idx].mp_srcbus;
583 int polarity;
584
585 /*
586 * Determine IRQ line polarity (high active or low active):
587 */
588 switch (mp_irqs[idx].mp_irqflag & 3)
589 {
590 case 0: /* conforms, ie. bus-type dependent polarity */
591 if (test_bit(bus, mp_bus_not_pci))
592 polarity = default_ISA_polarity(idx);
593 else
594 polarity = default_PCI_polarity(idx);
595 break;
596 case 1: /* high active */
597 {
598 polarity = 0;
599 break;
600 }
601 case 2: /* reserved */
602 {
603 printk(KERN_WARNING "broken BIOS!!\n");
604 polarity = 1;
605 break;
606 }
607 case 3: /* low active */
608 {
609 polarity = 1;
610 break;
611 }
612 default: /* invalid */
613 {
614 printk(KERN_WARNING "broken BIOS!!\n");
615 polarity = 1;
616 break;
617 }
618 }
619 return polarity;
620 }
621
622 static int MPBIOS_trigger(int idx)
623 {
624 int bus = mp_irqs[idx].mp_srcbus;
625 int trigger;
626
627 /*
628 * Determine IRQ trigger mode (edge or level sensitive):
629 */
630 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
631 {
632 case 0: /* conforms, ie. bus-type dependent */
633 if (test_bit(bus, mp_bus_not_pci))
634 trigger = default_ISA_trigger(idx);
635 else
636 trigger = default_PCI_trigger(idx);
637 break;
638 case 1: /* edge */
639 {
640 trigger = 0;
641 break;
642 }
643 case 2: /* reserved */
644 {
645 printk(KERN_WARNING "broken BIOS!!\n");
646 trigger = 1;
647 break;
648 }
649 case 3: /* level */
650 {
651 trigger = 1;
652 break;
653 }
654 default: /* invalid */
655 {
656 printk(KERN_WARNING "broken BIOS!!\n");
657 trigger = 0;
658 break;
659 }
660 }
661 return trigger;
662 }
663
664 static inline int irq_polarity(int idx)
665 {
666 return MPBIOS_polarity(idx);
667 }
668
669 static inline int irq_trigger(int idx)
670 {
671 return MPBIOS_trigger(idx);
672 }
673
674 static int pin_2_irq(int idx, int apic, int pin)
675 {
676 int irq, i;
677 int bus = mp_irqs[idx].mp_srcbus;
678
679 /*
680 * Debugging check, we are in big trouble if this message pops up!
681 */
682 if (mp_irqs[idx].mp_dstirq != pin)
683 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
684
685 if (test_bit(bus, mp_bus_not_pci)) {
686 irq = mp_irqs[idx].mp_srcbusirq;
687 } else {
688 /*
689 * PCI IRQs are mapped in order
690 */
691 i = irq = 0;
692 while (i < apic)
693 irq += nr_ioapic_registers[i++];
694 irq += pin;
695 }
696 BUG_ON(irq >= NR_IRQS);
697 return irq;
698 }
699
700 void lock_vector_lock(void)
701 {
702 /* Used to the online set of cpus does not change
703 * during assign_irq_vector.
704 */
705 spin_lock(&vector_lock);
706 }
707
708 void unlock_vector_lock(void)
709 {
710 spin_unlock(&vector_lock);
711 }
712
713 static int __assign_irq_vector(int irq, cpumask_t mask)
714 {
715 /*
716 * NOTE! The local APIC isn't very good at handling
717 * multiple interrupts at the same interrupt level.
718 * As the interrupt level is determined by taking the
719 * vector number and shifting that right by 4, we
720 * want to spread these out a bit so that they don't
721 * all fall in the same interrupt level.
722 *
723 * Also, we've got to be careful not to trash gate
724 * 0x80, because int 0x80 is hm, kind of importantish. ;)
725 */
726 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
727 unsigned int old_vector;
728 int cpu;
729 struct irq_cfg *cfg;
730
731 BUG_ON((unsigned)irq >= NR_IRQS);
732 cfg = &irq_cfg[irq];
733
734 /* Only try and allocate irqs on cpus that are present */
735 cpus_and(mask, mask, cpu_online_map);
736
737 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
738 return -EBUSY;
739
740 old_vector = cfg->vector;
741 if (old_vector) {
742 cpumask_t tmp;
743 cpus_and(tmp, cfg->domain, mask);
744 if (!cpus_empty(tmp))
745 return 0;
746 }
747
748 for_each_cpu_mask_nr(cpu, mask) {
749 cpumask_t domain, new_mask;
750 int new_cpu;
751 int vector, offset;
752
753 domain = vector_allocation_domain(cpu);
754 cpus_and(new_mask, domain, cpu_online_map);
755
756 vector = current_vector;
757 offset = current_offset;
758 next:
759 vector += 8;
760 if (vector >= first_system_vector) {
761 /* If we run out of vectors on large boxen, must share them. */
762 offset = (offset + 1) % 8;
763 vector = FIRST_DEVICE_VECTOR + offset;
764 }
765 if (unlikely(current_vector == vector))
766 continue;
767 if (vector == IA32_SYSCALL_VECTOR)
768 goto next;
769 for_each_cpu_mask_nr(new_cpu, new_mask)
770 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
771 goto next;
772 /* Found one! */
773 current_vector = vector;
774 current_offset = offset;
775 if (old_vector) {
776 cfg->move_in_progress = 1;
777 cfg->old_domain = cfg->domain;
778 }
779 for_each_cpu_mask_nr(new_cpu, new_mask)
780 per_cpu(vector_irq, new_cpu)[vector] = irq;
781 cfg->vector = vector;
782 cfg->domain = domain;
783 return 0;
784 }
785 return -ENOSPC;
786 }
787
788 static int assign_irq_vector(int irq, cpumask_t mask)
789 {
790 int err;
791 unsigned long flags;
792
793 spin_lock_irqsave(&vector_lock, flags);
794 err = __assign_irq_vector(irq, mask);
795 spin_unlock_irqrestore(&vector_lock, flags);
796 return err;
797 }
798
799 static void __clear_irq_vector(int irq)
800 {
801 struct irq_cfg *cfg;
802 cpumask_t mask;
803 int cpu, vector;
804
805 BUG_ON((unsigned)irq >= NR_IRQS);
806 cfg = &irq_cfg[irq];
807 BUG_ON(!cfg->vector);
808
809 vector = cfg->vector;
810 cpus_and(mask, cfg->domain, cpu_online_map);
811 for_each_cpu_mask_nr(cpu, mask)
812 per_cpu(vector_irq, cpu)[vector] = -1;
813
814 cfg->vector = 0;
815 cpus_clear(cfg->domain);
816 }
817
818 void __setup_vector_irq(int cpu)
819 {
820 /* Initialize vector_irq on a new cpu */
821 /* This function must be called with vector_lock held */
822 int irq, vector;
823
824 /* Mark the inuse vectors */
825 for (irq = 0; irq < NR_IRQS; ++irq) {
826 if (!cpu_isset(cpu, irq_cfg[irq].domain))
827 continue;
828 vector = irq_cfg[irq].vector;
829 per_cpu(vector_irq, cpu)[vector] = irq;
830 }
831 /* Mark the free vectors */
832 for (vector = 0; vector < NR_VECTORS; ++vector) {
833 irq = per_cpu(vector_irq, cpu)[vector];
834 if (irq < 0)
835 continue;
836 if (!cpu_isset(cpu, irq_cfg[irq].domain))
837 per_cpu(vector_irq, cpu)[vector] = -1;
838 }
839 }
840
841 static struct irq_chip ioapic_chip;
842
843 static void ioapic_register_intr(int irq, unsigned long trigger)
844 {
845 if (trigger) {
846 irq_desc[irq].status |= IRQ_LEVEL;
847 set_irq_chip_and_handler_name(irq, &ioapic_chip,
848 handle_fasteoi_irq, "fasteoi");
849 } else {
850 irq_desc[irq].status &= ~IRQ_LEVEL;
851 set_irq_chip_and_handler_name(irq, &ioapic_chip,
852 handle_edge_irq, "edge");
853 }
854 }
855
856 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
857 int trigger, int polarity)
858 {
859 struct irq_cfg *cfg = irq_cfg + irq;
860 struct IO_APIC_route_entry entry;
861 cpumask_t mask;
862
863 if (!IO_APIC_IRQ(irq))
864 return;
865
866 mask = TARGET_CPUS;
867 if (assign_irq_vector(irq, mask))
868 return;
869
870 cpus_and(mask, cfg->domain, mask);
871
872 apic_printk(APIC_VERBOSE,KERN_DEBUG
873 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
874 "IRQ %d Mode:%i Active:%i)\n",
875 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
876 irq, trigger, polarity);
877
878 /*
879 * add it to the IO-APIC irq-routing table:
880 */
881 memset(&entry,0,sizeof(entry));
882
883 entry.delivery_mode = INT_DELIVERY_MODE;
884 entry.dest_mode = INT_DEST_MODE;
885 entry.dest = cpu_mask_to_apicid(mask);
886 entry.mask = 0; /* enable IRQ */
887 entry.trigger = trigger;
888 entry.polarity = polarity;
889 entry.vector = cfg->vector;
890
891 /* Mask level triggered irqs.
892 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
893 */
894 if (trigger)
895 entry.mask = 1;
896
897 ioapic_register_intr(irq, trigger);
898 if (irq < 16)
899 disable_8259A_irq(irq);
900
901 ioapic_write_entry(apic, pin, entry);
902 }
903
904 static void __init setup_IO_APIC_irqs(void)
905 {
906 int apic, pin, idx, irq, first_notcon = 1;
907
908 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
909
910 for (apic = 0; apic < nr_ioapics; apic++) {
911 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
912
913 idx = find_irq_entry(apic,pin,mp_INT);
914 if (idx == -1) {
915 if (first_notcon) {
916 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
917 first_notcon = 0;
918 } else
919 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
920 continue;
921 }
922 if (!first_notcon) {
923 apic_printk(APIC_VERBOSE, " not connected.\n");
924 first_notcon = 1;
925 }
926
927 irq = pin_2_irq(idx, apic, pin);
928 add_pin_to_irq(irq, apic, pin);
929
930 setup_IO_APIC_irq(apic, pin, irq,
931 irq_trigger(idx), irq_polarity(idx));
932 }
933 }
934
935 if (!first_notcon)
936 apic_printk(APIC_VERBOSE, " not connected.\n");
937 }
938
939 /*
940 * Set up the timer pin, possibly with the 8259A-master behind.
941 */
942 static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
943 int vector)
944 {
945 struct IO_APIC_route_entry entry;
946
947 memset(&entry, 0, sizeof(entry));
948
949 /*
950 * We use logical delivery to get the timer IRQ
951 * to the first CPU.
952 */
953 entry.dest_mode = INT_DEST_MODE;
954 entry.mask = 1; /* mask IRQ now */
955 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
956 entry.delivery_mode = INT_DELIVERY_MODE;
957 entry.polarity = 0;
958 entry.trigger = 0;
959 entry.vector = vector;
960
961 /*
962 * The timer IRQ doesn't have to know that behind the
963 * scene we may have a 8259A-master in AEOI mode ...
964 */
965 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
966
967 /*
968 * Add it to the IO-APIC irq-routing table:
969 */
970 ioapic_write_entry(apic, pin, entry);
971 }
972
973 void __apicdebuginit print_IO_APIC(void)
974 {
975 int apic, i;
976 union IO_APIC_reg_00 reg_00;
977 union IO_APIC_reg_01 reg_01;
978 union IO_APIC_reg_02 reg_02;
979 unsigned long flags;
980
981 if (apic_verbosity == APIC_QUIET)
982 return;
983
984 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
985 for (i = 0; i < nr_ioapics; i++)
986 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
987 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
988
989 /*
990 * We are a bit conservative about what we expect. We have to
991 * know about every hardware change ASAP.
992 */
993 printk(KERN_INFO "testing the IO APIC.......................\n");
994
995 for (apic = 0; apic < nr_ioapics; apic++) {
996
997 spin_lock_irqsave(&ioapic_lock, flags);
998 reg_00.raw = io_apic_read(apic, 0);
999 reg_01.raw = io_apic_read(apic, 1);
1000 if (reg_01.bits.version >= 0x10)
1001 reg_02.raw = io_apic_read(apic, 2);
1002 spin_unlock_irqrestore(&ioapic_lock, flags);
1003
1004 printk("\n");
1005 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1006 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1007 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1008
1009 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1010 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1011
1012 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1013 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1014
1015 if (reg_01.bits.version >= 0x10) {
1016 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1017 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1018 }
1019
1020 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1021
1022 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1023 " Stat Dmod Deli Vect: \n");
1024
1025 for (i = 0; i <= reg_01.bits.entries; i++) {
1026 struct IO_APIC_route_entry entry;
1027
1028 entry = ioapic_read_entry(apic, i);
1029
1030 printk(KERN_DEBUG " %02x %03X ",
1031 i,
1032 entry.dest
1033 );
1034
1035 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1036 entry.mask,
1037 entry.trigger,
1038 entry.irr,
1039 entry.polarity,
1040 entry.delivery_status,
1041 entry.dest_mode,
1042 entry.delivery_mode,
1043 entry.vector
1044 );
1045 }
1046 }
1047 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1048 for (i = 0; i < NR_IRQS; i++) {
1049 struct irq_pin_list *entry = irq_2_pin + i;
1050 if (entry->pin < 0)
1051 continue;
1052 printk(KERN_DEBUG "IRQ%d ", i);
1053 for (;;) {
1054 printk("-> %d:%d", entry->apic, entry->pin);
1055 if (!entry->next)
1056 break;
1057 entry = irq_2_pin + entry->next;
1058 }
1059 printk("\n");
1060 }
1061
1062 printk(KERN_INFO ".................................... done.\n");
1063
1064 return;
1065 }
1066
1067 #if 0
1068
1069 static __apicdebuginit void print_APIC_bitfield (int base)
1070 {
1071 unsigned int v;
1072 int i, j;
1073
1074 if (apic_verbosity == APIC_QUIET)
1075 return;
1076
1077 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1078 for (i = 0; i < 8; i++) {
1079 v = apic_read(base + i*0x10);
1080 for (j = 0; j < 32; j++) {
1081 if (v & (1<<j))
1082 printk("1");
1083 else
1084 printk("0");
1085 }
1086 printk("\n");
1087 }
1088 }
1089
1090 void __apicdebuginit print_local_APIC(void * dummy)
1091 {
1092 unsigned int v, ver, maxlvt;
1093
1094 if (apic_verbosity == APIC_QUIET)
1095 return;
1096
1097 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1098 smp_processor_id(), hard_smp_processor_id());
1099 v = apic_read(APIC_ID);
1100 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(read_apic_id()));
1101 v = apic_read(APIC_LVR);
1102 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1103 ver = GET_APIC_VERSION(v);
1104 maxlvt = lapic_get_maxlvt();
1105
1106 v = apic_read(APIC_TASKPRI);
1107 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1108
1109 v = apic_read(APIC_ARBPRI);
1110 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1111 v & APIC_ARBPRI_MASK);
1112 v = apic_read(APIC_PROCPRI);
1113 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1114
1115 v = apic_read(APIC_EOI);
1116 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1117 v = apic_read(APIC_RRR);
1118 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1119 v = apic_read(APIC_LDR);
1120 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1121 v = apic_read(APIC_DFR);
1122 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1123 v = apic_read(APIC_SPIV);
1124 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1125
1126 printk(KERN_DEBUG "... APIC ISR field:\n");
1127 print_APIC_bitfield(APIC_ISR);
1128 printk(KERN_DEBUG "... APIC TMR field:\n");
1129 print_APIC_bitfield(APIC_TMR);
1130 printk(KERN_DEBUG "... APIC IRR field:\n");
1131 print_APIC_bitfield(APIC_IRR);
1132
1133 v = apic_read(APIC_ESR);
1134 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1135
1136 v = apic_read(APIC_ICR);
1137 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1138 v = apic_read(APIC_ICR2);
1139 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1140
1141 v = apic_read(APIC_LVTT);
1142 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1143
1144 if (maxlvt > 3) { /* PC is LVT#4. */
1145 v = apic_read(APIC_LVTPC);
1146 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1147 }
1148 v = apic_read(APIC_LVT0);
1149 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1150 v = apic_read(APIC_LVT1);
1151 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1152
1153 if (maxlvt > 2) { /* ERR is LVT#3. */
1154 v = apic_read(APIC_LVTERR);
1155 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1156 }
1157
1158 v = apic_read(APIC_TMICT);
1159 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1160 v = apic_read(APIC_TMCCT);
1161 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1162 v = apic_read(APIC_TDCR);
1163 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1164 printk("\n");
1165 }
1166
1167 void print_all_local_APICs (void)
1168 {
1169 on_each_cpu(print_local_APIC, NULL, 1);
1170 }
1171
1172 void __apicdebuginit print_PIC(void)
1173 {
1174 unsigned int v;
1175 unsigned long flags;
1176
1177 if (apic_verbosity == APIC_QUIET)
1178 return;
1179
1180 printk(KERN_DEBUG "\nprinting PIC contents\n");
1181
1182 spin_lock_irqsave(&i8259A_lock, flags);
1183
1184 v = inb(0xa1) << 8 | inb(0x21);
1185 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1186
1187 v = inb(0xa0) << 8 | inb(0x20);
1188 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1189
1190 outb(0x0b,0xa0);
1191 outb(0x0b,0x20);
1192 v = inb(0xa0) << 8 | inb(0x20);
1193 outb(0x0a,0xa0);
1194 outb(0x0a,0x20);
1195
1196 spin_unlock_irqrestore(&i8259A_lock, flags);
1197
1198 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1199
1200 v = inb(0x4d1) << 8 | inb(0x4d0);
1201 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1202 }
1203
1204 #endif /* 0 */
1205
1206 void __init enable_IO_APIC(void)
1207 {
1208 union IO_APIC_reg_01 reg_01;
1209 int i8259_apic, i8259_pin;
1210 int i, apic;
1211 unsigned long flags;
1212
1213 for (i = 0; i < PIN_MAP_SIZE; i++) {
1214 irq_2_pin[i].pin = -1;
1215 irq_2_pin[i].next = 0;
1216 }
1217
1218 /*
1219 * The number of IO-APIC IRQ registers (== #pins):
1220 */
1221 for (apic = 0; apic < nr_ioapics; apic++) {
1222 spin_lock_irqsave(&ioapic_lock, flags);
1223 reg_01.raw = io_apic_read(apic, 1);
1224 spin_unlock_irqrestore(&ioapic_lock, flags);
1225 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1226 }
1227 for(apic = 0; apic < nr_ioapics; apic++) {
1228 int pin;
1229 /* See if any of the pins is in ExtINT mode */
1230 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1231 struct IO_APIC_route_entry entry;
1232 entry = ioapic_read_entry(apic, pin);
1233
1234 /* If the interrupt line is enabled and in ExtInt mode
1235 * I have found the pin where the i8259 is connected.
1236 */
1237 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1238 ioapic_i8259.apic = apic;
1239 ioapic_i8259.pin = pin;
1240 goto found_i8259;
1241 }
1242 }
1243 }
1244 found_i8259:
1245 /* Look to see what if the MP table has reported the ExtINT */
1246 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1247 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1248 /* Trust the MP table if nothing is setup in the hardware */
1249 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1250 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1251 ioapic_i8259.pin = i8259_pin;
1252 ioapic_i8259.apic = i8259_apic;
1253 }
1254 /* Complain if the MP table and the hardware disagree */
1255 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1256 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1257 {
1258 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1259 }
1260
1261 /*
1262 * Do not trust the IO-APIC being empty at bootup
1263 */
1264 clear_IO_APIC();
1265 }
1266
1267 /*
1268 * Not an __init, needed by the reboot code
1269 */
1270 void disable_IO_APIC(void)
1271 {
1272 /*
1273 * Clear the IO-APIC before rebooting:
1274 */
1275 clear_IO_APIC();
1276
1277 /*
1278 * If the i8259 is routed through an IOAPIC
1279 * Put that IOAPIC in virtual wire mode
1280 * so legacy interrupts can be delivered.
1281 */
1282 if (ioapic_i8259.pin != -1) {
1283 struct IO_APIC_route_entry entry;
1284
1285 memset(&entry, 0, sizeof(entry));
1286 entry.mask = 0; /* Enabled */
1287 entry.trigger = 0; /* Edge */
1288 entry.irr = 0;
1289 entry.polarity = 0; /* High */
1290 entry.delivery_status = 0;
1291 entry.dest_mode = 0; /* Physical */
1292 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1293 entry.vector = 0;
1294 entry.dest = GET_APIC_ID(read_apic_id());
1295
1296 /*
1297 * Add it to the IO-APIC irq-routing table:
1298 */
1299 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1300 }
1301
1302 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1303 }
1304
1305 /*
1306 * There is a nasty bug in some older SMP boards, their mptable lies
1307 * about the timer IRQ. We do the following to work around the situation:
1308 *
1309 * - timer IRQ defaults to IO-APIC IRQ
1310 * - if this function detects that timer IRQs are defunct, then we fall
1311 * back to ISA timer IRQs
1312 */
1313 static int __init timer_irq_works(void)
1314 {
1315 unsigned long t1 = jiffies;
1316 unsigned long flags;
1317
1318 local_save_flags(flags);
1319 local_irq_enable();
1320 /* Let ten ticks pass... */
1321 mdelay((10 * 1000) / HZ);
1322 local_irq_restore(flags);
1323
1324 /*
1325 * Expect a few ticks at least, to be sure some possible
1326 * glue logic does not lock up after one or two first
1327 * ticks in a non-ExtINT mode. Also the local APIC
1328 * might have cached one ExtINT interrupt. Finally, at
1329 * least one tick may be lost due to delays.
1330 */
1331
1332 /* jiffies wrap? */
1333 if (time_after(jiffies, t1 + 4))
1334 return 1;
1335 return 0;
1336 }
1337
1338 /*
1339 * In the SMP+IOAPIC case it might happen that there are an unspecified
1340 * number of pending IRQ events unhandled. These cases are very rare,
1341 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1342 * better to do it this way as thus we do not have to be aware of
1343 * 'pending' interrupts in the IRQ path, except at this point.
1344 */
1345 /*
1346 * Edge triggered needs to resend any interrupt
1347 * that was delayed but this is now handled in the device
1348 * independent code.
1349 */
1350
1351 /*
1352 * Starting up a edge-triggered IO-APIC interrupt is
1353 * nasty - we need to make sure that we get the edge.
1354 * If it is already asserted for some reason, we need
1355 * return 1 to indicate that is was pending.
1356 *
1357 * This is not complete - we should be able to fake
1358 * an edge even if it isn't on the 8259A...
1359 */
1360
1361 static unsigned int startup_ioapic_irq(unsigned int irq)
1362 {
1363 int was_pending = 0;
1364 unsigned long flags;
1365
1366 spin_lock_irqsave(&ioapic_lock, flags);
1367 if (irq < 16) {
1368 disable_8259A_irq(irq);
1369 if (i8259A_irq_pending(irq))
1370 was_pending = 1;
1371 }
1372 __unmask_IO_APIC_irq(irq);
1373 spin_unlock_irqrestore(&ioapic_lock, flags);
1374
1375 return was_pending;
1376 }
1377
1378 static int ioapic_retrigger_irq(unsigned int irq)
1379 {
1380 struct irq_cfg *cfg = &irq_cfg[irq];
1381 unsigned long flags;
1382
1383 spin_lock_irqsave(&vector_lock, flags);
1384 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
1385 spin_unlock_irqrestore(&vector_lock, flags);
1386
1387 return 1;
1388 }
1389
1390 /*
1391 * Level and edge triggered IO-APIC interrupts need different handling,
1392 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1393 * handled with the level-triggered descriptor, but that one has slightly
1394 * more overhead. Level-triggered interrupts cannot be handled with the
1395 * edge-triggered handler, without risking IRQ storms and other ugly
1396 * races.
1397 */
1398
1399 #ifdef CONFIG_SMP
1400 asmlinkage void smp_irq_move_cleanup_interrupt(void)
1401 {
1402 unsigned vector, me;
1403 ack_APIC_irq();
1404 exit_idle();
1405 irq_enter();
1406
1407 me = smp_processor_id();
1408 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1409 unsigned int irq;
1410 struct irq_desc *desc;
1411 struct irq_cfg *cfg;
1412 irq = __get_cpu_var(vector_irq)[vector];
1413 if (irq >= NR_IRQS)
1414 continue;
1415
1416 desc = irq_desc + irq;
1417 cfg = irq_cfg + irq;
1418 spin_lock(&desc->lock);
1419 if (!cfg->move_cleanup_count)
1420 goto unlock;
1421
1422 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1423 goto unlock;
1424
1425 __get_cpu_var(vector_irq)[vector] = -1;
1426 cfg->move_cleanup_count--;
1427 unlock:
1428 spin_unlock(&desc->lock);
1429 }
1430
1431 irq_exit();
1432 }
1433
1434 static void irq_complete_move(unsigned int irq)
1435 {
1436 struct irq_cfg *cfg = irq_cfg + irq;
1437 unsigned vector, me;
1438
1439 if (likely(!cfg->move_in_progress))
1440 return;
1441
1442 vector = ~get_irq_regs()->orig_ax;
1443 me = smp_processor_id();
1444 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
1445 cpumask_t cleanup_mask;
1446
1447 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1448 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1449 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1450 cfg->move_in_progress = 0;
1451 }
1452 }
1453 #else
1454 static inline void irq_complete_move(unsigned int irq) {}
1455 #endif
1456
1457 static void ack_apic_edge(unsigned int irq)
1458 {
1459 irq_complete_move(irq);
1460 move_native_irq(irq);
1461 ack_APIC_irq();
1462 }
1463
1464 static void ack_apic_level(unsigned int irq)
1465 {
1466 int do_unmask_irq = 0;
1467
1468 irq_complete_move(irq);
1469 #ifdef CONFIG_GENERIC_PENDING_IRQ
1470 /* If we are moving the irq we need to mask it */
1471 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
1472 do_unmask_irq = 1;
1473 mask_IO_APIC_irq(irq);
1474 }
1475 #endif
1476
1477 /*
1478 * We must acknowledge the irq before we move it or the acknowledge will
1479 * not propagate properly.
1480 */
1481 ack_APIC_irq();
1482
1483 /* Now we can move and renable the irq */
1484 if (unlikely(do_unmask_irq)) {
1485 /* Only migrate the irq if the ack has been received.
1486 *
1487 * On rare occasions the broadcast level triggered ack gets
1488 * delayed going to ioapics, and if we reprogram the
1489 * vector while Remote IRR is still set the irq will never
1490 * fire again.
1491 *
1492 * To prevent this scenario we read the Remote IRR bit
1493 * of the ioapic. This has two effects.
1494 * - On any sane system the read of the ioapic will
1495 * flush writes (and acks) going to the ioapic from
1496 * this cpu.
1497 * - We get to see if the ACK has actually been delivered.
1498 *
1499 * Based on failed experiments of reprogramming the
1500 * ioapic entry from outside of irq context starting
1501 * with masking the ioapic entry and then polling until
1502 * Remote IRR was clear before reprogramming the
1503 * ioapic I don't trust the Remote IRR bit to be
1504 * completey accurate.
1505 *
1506 * However there appears to be no other way to plug
1507 * this race, so if the Remote IRR bit is not
1508 * accurate and is causing problems then it is a hardware bug
1509 * and you can go talk to the chipset vendor about it.
1510 */
1511 if (!io_apic_level_ack_pending(irq))
1512 move_masked_irq(irq);
1513 unmask_IO_APIC_irq(irq);
1514 }
1515 }
1516
1517 static struct irq_chip ioapic_chip __read_mostly = {
1518 .name = "IO-APIC",
1519 .startup = startup_ioapic_irq,
1520 .mask = mask_IO_APIC_irq,
1521 .unmask = unmask_IO_APIC_irq,
1522 .ack = ack_apic_edge,
1523 .eoi = ack_apic_level,
1524 #ifdef CONFIG_SMP
1525 .set_affinity = set_ioapic_affinity_irq,
1526 #endif
1527 .retrigger = ioapic_retrigger_irq,
1528 };
1529
1530 static inline void init_IO_APIC_traps(void)
1531 {
1532 int irq;
1533
1534 /*
1535 * NOTE! The local APIC isn't very good at handling
1536 * multiple interrupts at the same interrupt level.
1537 * As the interrupt level is determined by taking the
1538 * vector number and shifting that right by 4, we
1539 * want to spread these out a bit so that they don't
1540 * all fall in the same interrupt level.
1541 *
1542 * Also, we've got to be careful not to trash gate
1543 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1544 */
1545 for (irq = 0; irq < NR_IRQS ; irq++) {
1546 if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) {
1547 /*
1548 * Hmm.. We don't have an entry for this,
1549 * so default to an old-fashioned 8259
1550 * interrupt if we can..
1551 */
1552 if (irq < 16)
1553 make_8259A_irq(irq);
1554 else
1555 /* Strange. Oh, well.. */
1556 irq_desc[irq].chip = &no_irq_chip;
1557 }
1558 }
1559 }
1560
1561 static void unmask_lapic_irq(unsigned int irq)
1562 {
1563 unsigned long v;
1564
1565 v = apic_read(APIC_LVT0);
1566 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1567 }
1568
1569 static void mask_lapic_irq(unsigned int irq)
1570 {
1571 unsigned long v;
1572
1573 v = apic_read(APIC_LVT0);
1574 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1575 }
1576
1577 static void ack_lapic_irq (unsigned int irq)
1578 {
1579 ack_APIC_irq();
1580 }
1581
1582 static struct irq_chip lapic_chip __read_mostly = {
1583 .name = "local-APIC",
1584 .mask = mask_lapic_irq,
1585 .unmask = unmask_lapic_irq,
1586 .ack = ack_lapic_irq,
1587 };
1588
1589 static void lapic_register_intr(int irq)
1590 {
1591 irq_desc[irq].status &= ~IRQ_LEVEL;
1592 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
1593 "edge");
1594 }
1595
1596 static void __init setup_nmi(void)
1597 {
1598 /*
1599 * Dirty trick to enable the NMI watchdog ...
1600 * We put the 8259A master into AEOI mode and
1601 * unmask on all local APICs LVT0 as NMI.
1602 *
1603 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1604 * is from Maciej W. Rozycki - so we do not have to EOI from
1605 * the NMI handler or the timer interrupt.
1606 */
1607 printk(KERN_INFO "activating NMI Watchdog ...");
1608
1609 enable_NMI_through_LVT0();
1610
1611 printk(" done.\n");
1612 }
1613
1614 /*
1615 * This looks a bit hackish but it's about the only one way of sending
1616 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1617 * not support the ExtINT mode, unfortunately. We need to send these
1618 * cycles as some i82489DX-based boards have glue logic that keeps the
1619 * 8259A interrupt line asserted until INTA. --macro
1620 */
1621 static inline void __init unlock_ExtINT_logic(void)
1622 {
1623 int apic, pin, i;
1624 struct IO_APIC_route_entry entry0, entry1;
1625 unsigned char save_control, save_freq_select;
1626
1627 pin = find_isa_irq_pin(8, mp_INT);
1628 apic = find_isa_irq_apic(8, mp_INT);
1629 if (pin == -1)
1630 return;
1631
1632 entry0 = ioapic_read_entry(apic, pin);
1633
1634 clear_IO_APIC_pin(apic, pin);
1635
1636 memset(&entry1, 0, sizeof(entry1));
1637
1638 entry1.dest_mode = 0; /* physical delivery */
1639 entry1.mask = 0; /* unmask IRQ now */
1640 entry1.dest = hard_smp_processor_id();
1641 entry1.delivery_mode = dest_ExtINT;
1642 entry1.polarity = entry0.polarity;
1643 entry1.trigger = 0;
1644 entry1.vector = 0;
1645
1646 ioapic_write_entry(apic, pin, entry1);
1647
1648 save_control = CMOS_READ(RTC_CONTROL);
1649 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1650 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1651 RTC_FREQ_SELECT);
1652 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1653
1654 i = 100;
1655 while (i-- > 0) {
1656 mdelay(10);
1657 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1658 i -= 10;
1659 }
1660
1661 CMOS_WRITE(save_control, RTC_CONTROL);
1662 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1663 clear_IO_APIC_pin(apic, pin);
1664
1665 ioapic_write_entry(apic, pin, entry0);
1666 }
1667
1668 /*
1669 * This code may look a bit paranoid, but it's supposed to cooperate with
1670 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1671 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1672 * fanatically on his truly buggy board.
1673 *
1674 * FIXME: really need to revamp this for modern platforms only.
1675 */
1676 static inline void __init check_timer(void)
1677 {
1678 struct irq_cfg *cfg = irq_cfg + 0;
1679 int apic1, pin1, apic2, pin2;
1680 unsigned long flags;
1681 int no_pin1 = 0;
1682
1683 local_irq_save(flags);
1684
1685 /*
1686 * get/set the timer IRQ vector:
1687 */
1688 disable_8259A_irq(0);
1689 assign_irq_vector(0, TARGET_CPUS);
1690
1691 /*
1692 * As IRQ0 is to be enabled in the 8259A, the virtual
1693 * wire has to be disabled in the local APIC.
1694 */
1695 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1696 init_8259A(1);
1697
1698 pin1 = find_isa_irq_pin(0, mp_INT);
1699 apic1 = find_isa_irq_apic(0, mp_INT);
1700 pin2 = ioapic_i8259.pin;
1701 apic2 = ioapic_i8259.apic;
1702
1703 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
1704 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
1705 cfg->vector, apic1, pin1, apic2, pin2);
1706
1707 /*
1708 * Some BIOS writers are clueless and report the ExtINTA
1709 * I/O APIC input from the cascaded 8259A as the timer
1710 * interrupt input. So just in case, if only one pin
1711 * was found above, try it both directly and through the
1712 * 8259A.
1713 */
1714 if (pin1 == -1) {
1715 pin1 = pin2;
1716 apic1 = apic2;
1717 no_pin1 = 1;
1718 } else if (pin2 == -1) {
1719 pin2 = pin1;
1720 apic2 = apic1;
1721 }
1722
1723 if (pin1 != -1) {
1724 /*
1725 * Ok, does IRQ0 through the IOAPIC work?
1726 */
1727 if (no_pin1) {
1728 add_pin_to_irq(0, apic1, pin1);
1729 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
1730 }
1731 unmask_IO_APIC_irq(0);
1732 if (!no_timer_check && timer_irq_works()) {
1733 if (nmi_watchdog == NMI_IO_APIC) {
1734 setup_nmi();
1735 enable_8259A_irq(0);
1736 }
1737 if (disable_timer_pin_1 > 0)
1738 clear_IO_APIC_pin(0, pin1);
1739 goto out;
1740 }
1741 clear_IO_APIC_pin(apic1, pin1);
1742 if (!no_pin1)
1743 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
1744 "8254 timer not connected to IO-APIC\n");
1745
1746 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
1747 "(IRQ0) through the 8259A ...\n");
1748 apic_printk(APIC_QUIET, KERN_INFO
1749 "..... (found apic %d pin %d) ...\n", apic2, pin2);
1750 /*
1751 * legacy devices should be connected to IO APIC #0
1752 */
1753 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
1754 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
1755 unmask_IO_APIC_irq(0);
1756 enable_8259A_irq(0);
1757 if (timer_irq_works()) {
1758 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
1759 timer_through_8259 = 1;
1760 if (nmi_watchdog == NMI_IO_APIC) {
1761 disable_8259A_irq(0);
1762 setup_nmi();
1763 enable_8259A_irq(0);
1764 }
1765 goto out;
1766 }
1767 /*
1768 * Cleanup, just in case ...
1769 */
1770 disable_8259A_irq(0);
1771 clear_IO_APIC_pin(apic2, pin2);
1772 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
1773 }
1774
1775 if (nmi_watchdog == NMI_IO_APIC) {
1776 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
1777 "through the IO-APIC - disabling NMI Watchdog!\n");
1778 nmi_watchdog = NMI_NONE;
1779 }
1780
1781 apic_printk(APIC_QUIET, KERN_INFO
1782 "...trying to set up timer as Virtual Wire IRQ...\n");
1783
1784 lapic_register_intr(0);
1785 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
1786 enable_8259A_irq(0);
1787
1788 if (timer_irq_works()) {
1789 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
1790 goto out;
1791 }
1792 disable_8259A_irq(0);
1793 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
1794 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
1795
1796 apic_printk(APIC_QUIET, KERN_INFO
1797 "...trying to set up timer as ExtINT IRQ...\n");
1798
1799 init_8259A(0);
1800 make_8259A_irq(0);
1801 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1802
1803 unlock_ExtINT_logic();
1804
1805 if (timer_irq_works()) {
1806 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
1807 goto out;
1808 }
1809 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
1810 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
1811 "report. Then try booting with the 'noapic' option.\n");
1812 out:
1813 local_irq_restore(flags);
1814 }
1815
1816 static int __init notimercheck(char *s)
1817 {
1818 no_timer_check = 1;
1819 return 1;
1820 }
1821 __setup("no_timer_check", notimercheck);
1822
1823 /*
1824 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
1825 * to devices. However there may be an I/O APIC pin available for
1826 * this interrupt regardless. The pin may be left unconnected, but
1827 * typically it will be reused as an ExtINT cascade interrupt for
1828 * the master 8259A. In the MPS case such a pin will normally be
1829 * reported as an ExtINT interrupt in the MP table. With ACPI
1830 * there is no provision for ExtINT interrupts, and in the absence
1831 * of an override it would be treated as an ordinary ISA I/O APIC
1832 * interrupt, that is edge-triggered and unmasked by default. We
1833 * used to do this, but it caused problems on some systems because
1834 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
1835 * the same ExtINT cascade interrupt to drive the local APIC of the
1836 * bootstrap processor. Therefore we refrain from routing IRQ2 to
1837 * the I/O APIC in all cases now. No actual device should request
1838 * it anyway. --macro
1839 */
1840 #define PIC_IRQS (1<<2)
1841
1842 void __init setup_IO_APIC(void)
1843 {
1844
1845 /*
1846 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
1847 */
1848
1849 io_apic_irqs = ~PIC_IRQS;
1850
1851 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1852
1853 sync_Arb_IDs();
1854 setup_IO_APIC_irqs();
1855 init_IO_APIC_traps();
1856 check_timer();
1857 if (!acpi_ioapic)
1858 print_IO_APIC();
1859 }
1860
1861 struct sysfs_ioapic_data {
1862 struct sys_device dev;
1863 struct IO_APIC_route_entry entry[0];
1864 };
1865 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1866
1867 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1868 {
1869 struct IO_APIC_route_entry *entry;
1870 struct sysfs_ioapic_data *data;
1871 int i;
1872
1873 data = container_of(dev, struct sysfs_ioapic_data, dev);
1874 entry = data->entry;
1875 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
1876 *entry = ioapic_read_entry(dev->id, i);
1877
1878 return 0;
1879 }
1880
1881 static int ioapic_resume(struct sys_device *dev)
1882 {
1883 struct IO_APIC_route_entry *entry;
1884 struct sysfs_ioapic_data *data;
1885 unsigned long flags;
1886 union IO_APIC_reg_00 reg_00;
1887 int i;
1888
1889 data = container_of(dev, struct sysfs_ioapic_data, dev);
1890 entry = data->entry;
1891
1892 spin_lock_irqsave(&ioapic_lock, flags);
1893 reg_00.raw = io_apic_read(dev->id, 0);
1894 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
1895 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
1896 io_apic_write(dev->id, 0, reg_00.raw);
1897 }
1898 spin_unlock_irqrestore(&ioapic_lock, flags);
1899 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
1900 ioapic_write_entry(dev->id, i, entry[i]);
1901
1902 return 0;
1903 }
1904
1905 static struct sysdev_class ioapic_sysdev_class = {
1906 .name = "ioapic",
1907 .suspend = ioapic_suspend,
1908 .resume = ioapic_resume,
1909 };
1910
1911 static int __init ioapic_init_sysfs(void)
1912 {
1913 struct sys_device * dev;
1914 int i, size, error;
1915
1916 error = sysdev_class_register(&ioapic_sysdev_class);
1917 if (error)
1918 return error;
1919
1920 for (i = 0; i < nr_ioapics; i++ ) {
1921 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1922 * sizeof(struct IO_APIC_route_entry);
1923 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
1924 if (!mp_ioapic_data[i]) {
1925 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1926 continue;
1927 }
1928 dev = &mp_ioapic_data[i]->dev;
1929 dev->id = i;
1930 dev->cls = &ioapic_sysdev_class;
1931 error = sysdev_register(dev);
1932 if (error) {
1933 kfree(mp_ioapic_data[i]);
1934 mp_ioapic_data[i] = NULL;
1935 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1936 continue;
1937 }
1938 }
1939
1940 return 0;
1941 }
1942
1943 device_initcall(ioapic_init_sysfs);
1944
1945 /*
1946 * Dynamic irq allocate and deallocation
1947 */
1948 int create_irq(void)
1949 {
1950 /* Allocate an unused irq */
1951 int irq;
1952 int new;
1953 unsigned long flags;
1954
1955 irq = -ENOSPC;
1956 spin_lock_irqsave(&vector_lock, flags);
1957 for (new = (NR_IRQS - 1); new >= 0; new--) {
1958 if (platform_legacy_irq(new))
1959 continue;
1960 if (irq_cfg[new].vector != 0)
1961 continue;
1962 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
1963 irq = new;
1964 break;
1965 }
1966 spin_unlock_irqrestore(&vector_lock, flags);
1967
1968 if (irq >= 0) {
1969 dynamic_irq_init(irq);
1970 }
1971 return irq;
1972 }
1973
1974 void destroy_irq(unsigned int irq)
1975 {
1976 unsigned long flags;
1977
1978 dynamic_irq_cleanup(irq);
1979
1980 spin_lock_irqsave(&vector_lock, flags);
1981 __clear_irq_vector(irq);
1982 spin_unlock_irqrestore(&vector_lock, flags);
1983 }
1984
1985 /*
1986 * MSI message composition
1987 */
1988 #ifdef CONFIG_PCI_MSI
1989 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
1990 {
1991 struct irq_cfg *cfg = irq_cfg + irq;
1992 int err;
1993 unsigned dest;
1994 cpumask_t tmp;
1995
1996 tmp = TARGET_CPUS;
1997 err = assign_irq_vector(irq, tmp);
1998 if (!err) {
1999 cpus_and(tmp, cfg->domain, tmp);
2000 dest = cpu_mask_to_apicid(tmp);
2001
2002 msg->address_hi = MSI_ADDR_BASE_HI;
2003 msg->address_lo =
2004 MSI_ADDR_BASE_LO |
2005 ((INT_DEST_MODE == 0) ?
2006 MSI_ADDR_DEST_MODE_PHYSICAL:
2007 MSI_ADDR_DEST_MODE_LOGICAL) |
2008 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2009 MSI_ADDR_REDIRECTION_CPU:
2010 MSI_ADDR_REDIRECTION_LOWPRI) |
2011 MSI_ADDR_DEST_ID(dest);
2012
2013 msg->data =
2014 MSI_DATA_TRIGGER_EDGE |
2015 MSI_DATA_LEVEL_ASSERT |
2016 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2017 MSI_DATA_DELIVERY_FIXED:
2018 MSI_DATA_DELIVERY_LOWPRI) |
2019 MSI_DATA_VECTOR(cfg->vector);
2020 }
2021 return err;
2022 }
2023
2024 #ifdef CONFIG_SMP
2025 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2026 {
2027 struct irq_cfg *cfg = irq_cfg + irq;
2028 struct msi_msg msg;
2029 unsigned int dest;
2030 cpumask_t tmp;
2031
2032 cpus_and(tmp, mask, cpu_online_map);
2033 if (cpus_empty(tmp))
2034 return;
2035
2036 if (assign_irq_vector(irq, mask))
2037 return;
2038
2039 cpus_and(tmp, cfg->domain, mask);
2040 dest = cpu_mask_to_apicid(tmp);
2041
2042 read_msi_msg(irq, &msg);
2043
2044 msg.data &= ~MSI_DATA_VECTOR_MASK;
2045 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2046 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2047 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2048
2049 write_msi_msg(irq, &msg);
2050 irq_desc[irq].affinity = mask;
2051 }
2052 #endif /* CONFIG_SMP */
2053
2054 /*
2055 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2056 * which implement the MSI or MSI-X Capability Structure.
2057 */
2058 static struct irq_chip msi_chip = {
2059 .name = "PCI-MSI",
2060 .unmask = unmask_msi_irq,
2061 .mask = mask_msi_irq,
2062 .ack = ack_apic_edge,
2063 #ifdef CONFIG_SMP
2064 .set_affinity = set_msi_irq_affinity,
2065 #endif
2066 .retrigger = ioapic_retrigger_irq,
2067 };
2068
2069 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2070 {
2071 struct msi_msg msg;
2072 int irq, ret;
2073 irq = create_irq();
2074 if (irq < 0)
2075 return irq;
2076
2077 ret = msi_compose_msg(dev, irq, &msg);
2078 if (ret < 0) {
2079 destroy_irq(irq);
2080 return ret;
2081 }
2082
2083 set_irq_msi(irq, desc);
2084 write_msi_msg(irq, &msg);
2085
2086 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2087
2088 return 0;
2089 }
2090
2091 void arch_teardown_msi_irq(unsigned int irq)
2092 {
2093 destroy_irq(irq);
2094 }
2095
2096 #ifdef CONFIG_DMAR
2097 #ifdef CONFIG_SMP
2098 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2099 {
2100 struct irq_cfg *cfg = irq_cfg + irq;
2101 struct msi_msg msg;
2102 unsigned int dest;
2103 cpumask_t tmp;
2104
2105 cpus_and(tmp, mask, cpu_online_map);
2106 if (cpus_empty(tmp))
2107 return;
2108
2109 if (assign_irq_vector(irq, mask))
2110 return;
2111
2112 cpus_and(tmp, cfg->domain, mask);
2113 dest = cpu_mask_to_apicid(tmp);
2114
2115 dmar_msi_read(irq, &msg);
2116
2117 msg.data &= ~MSI_DATA_VECTOR_MASK;
2118 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2119 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2120 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2121
2122 dmar_msi_write(irq, &msg);
2123 irq_desc[irq].affinity = mask;
2124 }
2125 #endif /* CONFIG_SMP */
2126
2127 struct irq_chip dmar_msi_type = {
2128 .name = "DMAR_MSI",
2129 .unmask = dmar_msi_unmask,
2130 .mask = dmar_msi_mask,
2131 .ack = ack_apic_edge,
2132 #ifdef CONFIG_SMP
2133 .set_affinity = dmar_msi_set_affinity,
2134 #endif
2135 .retrigger = ioapic_retrigger_irq,
2136 };
2137
2138 int arch_setup_dmar_msi(unsigned int irq)
2139 {
2140 int ret;
2141 struct msi_msg msg;
2142
2143 ret = msi_compose_msg(NULL, irq, &msg);
2144 if (ret < 0)
2145 return ret;
2146 dmar_msi_write(irq, &msg);
2147 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2148 "edge");
2149 return 0;
2150 }
2151 #endif
2152
2153 #endif /* CONFIG_PCI_MSI */
2154 /*
2155 * Hypertransport interrupt support
2156 */
2157 #ifdef CONFIG_HT_IRQ
2158
2159 #ifdef CONFIG_SMP
2160
2161 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
2162 {
2163 struct ht_irq_msg msg;
2164 fetch_ht_irq_msg(irq, &msg);
2165
2166 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
2167 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
2168
2169 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
2170 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
2171
2172 write_ht_irq_msg(irq, &msg);
2173 }
2174
2175 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2176 {
2177 struct irq_cfg *cfg = irq_cfg + irq;
2178 unsigned int dest;
2179 cpumask_t tmp;
2180
2181 cpus_and(tmp, mask, cpu_online_map);
2182 if (cpus_empty(tmp))
2183 return;
2184
2185 if (assign_irq_vector(irq, mask))
2186 return;
2187
2188 cpus_and(tmp, cfg->domain, mask);
2189 dest = cpu_mask_to_apicid(tmp);
2190
2191 target_ht_irq(irq, dest, cfg->vector);
2192 irq_desc[irq].affinity = mask;
2193 }
2194 #endif
2195
2196 static struct irq_chip ht_irq_chip = {
2197 .name = "PCI-HT",
2198 .mask = mask_ht_irq,
2199 .unmask = unmask_ht_irq,
2200 .ack = ack_apic_edge,
2201 #ifdef CONFIG_SMP
2202 .set_affinity = set_ht_irq_affinity,
2203 #endif
2204 .retrigger = ioapic_retrigger_irq,
2205 };
2206
2207 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2208 {
2209 struct irq_cfg *cfg = irq_cfg + irq;
2210 int err;
2211 cpumask_t tmp;
2212
2213 tmp = TARGET_CPUS;
2214 err = assign_irq_vector(irq, tmp);
2215 if (!err) {
2216 struct ht_irq_msg msg;
2217 unsigned dest;
2218
2219 cpus_and(tmp, cfg->domain, tmp);
2220 dest = cpu_mask_to_apicid(tmp);
2221
2222 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
2223
2224 msg.address_lo =
2225 HT_IRQ_LOW_BASE |
2226 HT_IRQ_LOW_DEST_ID(dest) |
2227 HT_IRQ_LOW_VECTOR(cfg->vector) |
2228 ((INT_DEST_MODE == 0) ?
2229 HT_IRQ_LOW_DM_PHYSICAL :
2230 HT_IRQ_LOW_DM_LOGICAL) |
2231 HT_IRQ_LOW_RQEOI_EDGE |
2232 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2233 HT_IRQ_LOW_MT_FIXED :
2234 HT_IRQ_LOW_MT_ARBITRATED) |
2235 HT_IRQ_LOW_IRQ_MASKED;
2236
2237 write_ht_irq_msg(irq, &msg);
2238
2239 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2240 handle_edge_irq, "edge");
2241 }
2242 return err;
2243 }
2244 #endif /* CONFIG_HT_IRQ */
2245
2246 /* --------------------------------------------------------------------------
2247 ACPI-based IOAPIC Configuration
2248 -------------------------------------------------------------------------- */
2249
2250 #ifdef CONFIG_ACPI
2251
2252 #define IO_APIC_MAX_ID 0xFE
2253
2254 int __init io_apic_get_redir_entries (int ioapic)
2255 {
2256 union IO_APIC_reg_01 reg_01;
2257 unsigned long flags;
2258
2259 spin_lock_irqsave(&ioapic_lock, flags);
2260 reg_01.raw = io_apic_read(ioapic, 1);
2261 spin_unlock_irqrestore(&ioapic_lock, flags);
2262
2263 return reg_01.bits.entries;
2264 }
2265
2266
2267 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2268 {
2269 if (!IO_APIC_IRQ(irq)) {
2270 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2271 ioapic);
2272 return -EINVAL;
2273 }
2274
2275 /*
2276 * IRQs < 16 are already in the irq_2_pin[] map
2277 */
2278 if (irq >= 16)
2279 add_pin_to_irq(irq, ioapic, pin);
2280
2281 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
2282
2283 return 0;
2284 }
2285
2286
2287 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2288 {
2289 int i;
2290
2291 if (skip_ioapic_setup)
2292 return -1;
2293
2294 for (i = 0; i < mp_irq_entries; i++)
2295 if (mp_irqs[i].mp_irqtype == mp_INT &&
2296 mp_irqs[i].mp_srcbusirq == bus_irq)
2297 break;
2298 if (i >= mp_irq_entries)
2299 return -1;
2300
2301 *trigger = irq_trigger(i);
2302 *polarity = irq_polarity(i);
2303 return 0;
2304 }
2305
2306 #endif /* CONFIG_ACPI */
2307
2308 /*
2309 * This function currently is only a helper for the i386 smp boot process where
2310 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2311 * so mask in all cases should simply be TARGET_CPUS
2312 */
2313 #ifdef CONFIG_SMP
2314 void __init setup_ioapic_dest(void)
2315 {
2316 int pin, ioapic, irq, irq_entry;
2317
2318 if (skip_ioapic_setup == 1)
2319 return;
2320
2321 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2322 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2323 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2324 if (irq_entry == -1)
2325 continue;
2326 irq = pin_2_irq(irq_entry, ioapic, pin);
2327
2328 /* setup_IO_APIC_irqs could fail to get vector for some device
2329 * when you have too many devices, because at that time only boot
2330 * cpu is online.
2331 */
2332 if (!irq_cfg[irq].vector)
2333 setup_IO_APIC_irq(ioapic, pin, irq,
2334 irq_trigger(irq_entry),
2335 irq_polarity(irq_entry));
2336 else
2337 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2338 }
2339
2340 }
2341 }
2342 #endif
2343
2344 #define IOAPIC_RESOURCE_NAME_SIZE 11
2345
2346 static struct resource *ioapic_resources;
2347
2348 static struct resource * __init ioapic_setup_resources(void)
2349 {
2350 unsigned long n;
2351 struct resource *res;
2352 char *mem;
2353 int i;
2354
2355 if (nr_ioapics <= 0)
2356 return NULL;
2357
2358 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
2359 n *= nr_ioapics;
2360
2361 mem = alloc_bootmem(n);
2362 res = (void *)mem;
2363
2364 if (mem != NULL) {
2365 mem += sizeof(struct resource) * nr_ioapics;
2366
2367 for (i = 0; i < nr_ioapics; i++) {
2368 res[i].name = mem;
2369 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2370 sprintf(mem, "IOAPIC %u", i);
2371 mem += IOAPIC_RESOURCE_NAME_SIZE;
2372 }
2373 }
2374
2375 ioapic_resources = res;
2376
2377 return res;
2378 }
2379
2380 void __init ioapic_init_mappings(void)
2381 {
2382 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
2383 struct resource *ioapic_res;
2384 int i;
2385
2386 ioapic_res = ioapic_setup_resources();
2387 for (i = 0; i < nr_ioapics; i++) {
2388 if (smp_found_config) {
2389 ioapic_phys = mp_ioapics[i].mp_apicaddr;
2390 } else {
2391 ioapic_phys = (unsigned long)
2392 alloc_bootmem_pages(PAGE_SIZE);
2393 ioapic_phys = __pa(ioapic_phys);
2394 }
2395 set_fixmap_nocache(idx, ioapic_phys);
2396 apic_printk(APIC_VERBOSE,
2397 "mapped IOAPIC to %016lx (%016lx)\n",
2398 __fix_to_virt(idx), ioapic_phys);
2399 idx++;
2400
2401 if (ioapic_res != NULL) {
2402 ioapic_res->start = ioapic_phys;
2403 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
2404 ioapic_res++;
2405 }
2406 }
2407 }
2408
2409 static int __init ioapic_insert_resources(void)
2410 {
2411 int i;
2412 struct resource *r = ioapic_resources;
2413
2414 if (!r) {
2415 printk(KERN_ERR
2416 "IO APIC resources could be not be allocated.\n");
2417 return -1;
2418 }
2419
2420 for (i = 0; i < nr_ioapics; i++) {
2421 insert_resource(&iomem_resource, r);
2422 r++;
2423 }
2424
2425 return 0;
2426 }
2427
2428 /* Insert the IO APIC resources after PCI initialization has occured to handle
2429 * IO APICS that are mapped in on a BAR in PCI space. */
2430 late_initcall(ioapic_insert_resources);
2431
This page took 0.077398 seconds and 6 git commands to generate.