[PATCH] x86_64: serialize assign_irq_vector() use of static variables
[deliverable/linux.git] / arch / x86_64 / kernel / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/config.h>
29 #include <linux/smp_lock.h>
30 #include <linux/mc146818rtc.h>
31 #include <linux/acpi.h>
32 #include <linux/sysdev.h>
33 #ifdef CONFIG_ACPI
34 #include <acpi/acpi_bus.h>
35 #endif
36
37 #include <asm/io.h>
38 #include <asm/smp.h>
39 #include <asm/desc.h>
40 #include <asm/proto.h>
41 #include <asm/mach_apic.h>
42 #include <asm/acpi.h>
43 #include <asm/dma.h>
44
45 #define __apicdebuginit __init
46
47 int sis_apic_bug; /* not actually supported, dummy for compile */
48
49 static int no_timer_check;
50
51 int disable_timer_pin_1 __initdata;
52
53 int timer_over_8254 __initdata = 0;
54
55 /* Where if anywhere is the i8259 connect in external int mode */
56 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
57
58 static DEFINE_SPINLOCK(ioapic_lock);
59 static DEFINE_SPINLOCK(vector_lock);
60
61 /*
62 * # of IRQ routing registers
63 */
64 int nr_ioapic_registers[MAX_IO_APICS];
65
66 /*
67 * Rough estimation of how many shared IRQs there are, can
68 * be changed anytime.
69 */
70 #define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
71 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
72
73 /*
74 * This is performance-critical, we want to do it O(1)
75 *
76 * the indexing order of this array favors 1:1 mappings
77 * between pins and IRQs.
78 */
79
80 static struct irq_pin_list {
81 short apic, pin, next;
82 } irq_2_pin[PIN_MAP_SIZE];
83
84 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
85 #ifdef CONFIG_PCI_MSI
86 #define vector_to_irq(vector) \
87 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
88 #else
89 #define vector_to_irq(vector) (vector)
90 #endif
91
92 #define __DO_ACTION(R, ACTION, FINAL) \
93 \
94 { \
95 int pin; \
96 struct irq_pin_list *entry = irq_2_pin + irq; \
97 \
98 BUG_ON(irq >= NR_IRQS); \
99 for (;;) { \
100 unsigned int reg; \
101 pin = entry->pin; \
102 if (pin == -1) \
103 break; \
104 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
105 reg ACTION; \
106 io_apic_modify(entry->apic, reg); \
107 if (!entry->next) \
108 break; \
109 entry = irq_2_pin + entry->next; \
110 } \
111 FINAL; \
112 }
113
114 #ifdef CONFIG_SMP
115 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
116 {
117 unsigned long flags;
118 unsigned int dest;
119 cpumask_t tmp;
120
121 cpus_and(tmp, mask, cpu_online_map);
122 if (cpus_empty(tmp))
123 tmp = TARGET_CPUS;
124
125 cpus_and(mask, tmp, CPU_MASK_ALL);
126
127 dest = cpu_mask_to_apicid(mask);
128
129 /*
130 * Only the high 8 bits are valid.
131 */
132 dest = SET_APIC_LOGICAL_ID(dest);
133
134 spin_lock_irqsave(&ioapic_lock, flags);
135 __DO_ACTION(1, = dest, )
136 set_irq_info(irq, mask);
137 spin_unlock_irqrestore(&ioapic_lock, flags);
138 }
139 #endif
140
141 static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
142
143 /*
144 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
145 * shared ISA-space IRQs, so we have to support them. We are super
146 * fast in the common case, and fast for shared ISA-space IRQs.
147 */
148 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
149 {
150 static int first_free_entry = NR_IRQS;
151 struct irq_pin_list *entry = irq_2_pin + irq;
152
153 BUG_ON(irq >= NR_IRQS);
154 while (entry->next)
155 entry = irq_2_pin + entry->next;
156
157 if (entry->pin != -1) {
158 entry->next = first_free_entry;
159 entry = irq_2_pin + entry->next;
160 if (++first_free_entry >= PIN_MAP_SIZE)
161 panic("io_apic.c: ran out of irq_2_pin entries!");
162 }
163 entry->apic = apic;
164 entry->pin = pin;
165 }
166
167
168 #define DO_ACTION(name,R,ACTION, FINAL) \
169 \
170 static void name##_IO_APIC_irq (unsigned int irq) \
171 __DO_ACTION(R, ACTION, FINAL)
172
173 DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
174 /* mask = 1 */
175 DO_ACTION( __unmask, 0, &= 0xfffeffff, )
176 /* mask = 0 */
177
178 static void mask_IO_APIC_irq (unsigned int irq)
179 {
180 unsigned long flags;
181
182 spin_lock_irqsave(&ioapic_lock, flags);
183 __mask_IO_APIC_irq(irq);
184 spin_unlock_irqrestore(&ioapic_lock, flags);
185 }
186
187 static void unmask_IO_APIC_irq (unsigned int irq)
188 {
189 unsigned long flags;
190
191 spin_lock_irqsave(&ioapic_lock, flags);
192 __unmask_IO_APIC_irq(irq);
193 spin_unlock_irqrestore(&ioapic_lock, flags);
194 }
195
196 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
197 {
198 struct IO_APIC_route_entry entry;
199 unsigned long flags;
200
201 /* Check delivery_mode to be sure we're not clearing an SMI pin */
202 spin_lock_irqsave(&ioapic_lock, flags);
203 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
204 *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
205 spin_unlock_irqrestore(&ioapic_lock, flags);
206 if (entry.delivery_mode == dest_SMI)
207 return;
208 /*
209 * Disable it in the IO-APIC irq-routing table:
210 */
211 memset(&entry, 0, sizeof(entry));
212 entry.mask = 1;
213 spin_lock_irqsave(&ioapic_lock, flags);
214 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
215 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
216 spin_unlock_irqrestore(&ioapic_lock, flags);
217 }
218
219 static void clear_IO_APIC (void)
220 {
221 int apic, pin;
222
223 for (apic = 0; apic < nr_ioapics; apic++)
224 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
225 clear_IO_APIC_pin(apic, pin);
226 }
227
228 /*
229 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
230 * specific CPU-side IRQs.
231 */
232
233 #define MAX_PIRQS 8
234 static int pirq_entries [MAX_PIRQS];
235 static int pirqs_enabled;
236 int skip_ioapic_setup;
237 int ioapic_force;
238
239 /* dummy parsing: see setup.c */
240
241 static int __init disable_ioapic_setup(char *str)
242 {
243 skip_ioapic_setup = 1;
244 return 1;
245 }
246
247 static int __init enable_ioapic_setup(char *str)
248 {
249 ioapic_force = 1;
250 skip_ioapic_setup = 0;
251 return 1;
252 }
253
254 __setup("noapic", disable_ioapic_setup);
255 __setup("apic", enable_ioapic_setup);
256
257 static int __init setup_disable_8254_timer(char *s)
258 {
259 timer_over_8254 = -1;
260 return 1;
261 }
262 static int __init setup_enable_8254_timer(char *s)
263 {
264 timer_over_8254 = 2;
265 return 1;
266 }
267
268 __setup("disable_8254_timer", setup_disable_8254_timer);
269 __setup("enable_8254_timer", setup_enable_8254_timer);
270
271 #include <asm/pci-direct.h>
272 #include <linux/pci_ids.h>
273 #include <linux/pci.h>
274
275
276 #ifdef CONFIG_ACPI
277
278 static int nvidia_hpet_detected __initdata;
279
280 static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
281 {
282 nvidia_hpet_detected = 1;
283 return 0;
284 }
285 #endif
286
287 /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
288 off. Check for an Nvidia or VIA PCI bridge and turn it off.
289 Use pci direct infrastructure because this runs before the PCI subsystem.
290
291 Can be overwritten with "apic"
292
293 And another hack to disable the IOMMU on VIA chipsets.
294
295 ... and others. Really should move this somewhere else.
296
297 Kludge-O-Rama. */
298 void __init check_ioapic(void)
299 {
300 int num,slot,func;
301 /* Poor man's PCI discovery */
302 for (num = 0; num < 32; num++) {
303 for (slot = 0; slot < 32; slot++) {
304 for (func = 0; func < 8; func++) {
305 u32 class;
306 u32 vendor;
307 u8 type;
308 class = read_pci_config(num,slot,func,
309 PCI_CLASS_REVISION);
310 if (class == 0xffffffff)
311 break;
312
313 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
314 continue;
315
316 vendor = read_pci_config(num, slot, func,
317 PCI_VENDOR_ID);
318 vendor &= 0xffff;
319 switch (vendor) {
320 case PCI_VENDOR_ID_VIA:
321 #ifdef CONFIG_GART_IOMMU
322 if ((end_pfn > MAX_DMA32_PFN ||
323 force_iommu) &&
324 !iommu_aperture_allowed) {
325 printk(KERN_INFO
326 "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
327 iommu_aperture_disabled = 1;
328 }
329 #endif
330 return;
331 case PCI_VENDOR_ID_NVIDIA:
332 #ifdef CONFIG_ACPI
333 /*
334 * All timer overrides on Nvidia are
335 * wrong unless HPET is enabled.
336 */
337 nvidia_hpet_detected = 0;
338 acpi_table_parse(ACPI_HPET,
339 nvidia_hpet_check);
340 if (nvidia_hpet_detected == 0) {
341 acpi_skip_timer_override = 1;
342 printk(KERN_INFO "Nvidia board "
343 "detected. Ignoring ACPI "
344 "timer override.\n");
345 }
346 #endif
347 /* RED-PEN skip them on mptables too? */
348 return;
349
350 /* This should be actually default, but
351 for 2.6.16 let's do it for ATI only where
352 it's really needed. */
353 case PCI_VENDOR_ID_ATI:
354 if (timer_over_8254 == 1) {
355 timer_over_8254 = 0;
356 printk(KERN_INFO
357 "ATI board detected. Disabling timer routing over 8254.\n");
358 }
359 return;
360 }
361
362
363 /* No multi-function device? */
364 type = read_pci_config_byte(num,slot,func,
365 PCI_HEADER_TYPE);
366 if (!(type & 0x80))
367 break;
368 }
369 }
370 }
371 }
372
373 static int __init ioapic_pirq_setup(char *str)
374 {
375 int i, max;
376 int ints[MAX_PIRQS+1];
377
378 get_options(str, ARRAY_SIZE(ints), ints);
379
380 for (i = 0; i < MAX_PIRQS; i++)
381 pirq_entries[i] = -1;
382
383 pirqs_enabled = 1;
384 apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
385 max = MAX_PIRQS;
386 if (ints[0] < MAX_PIRQS)
387 max = ints[0];
388
389 for (i = 0; i < max; i++) {
390 apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
391 /*
392 * PIRQs are mapped upside down, usually.
393 */
394 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
395 }
396 return 1;
397 }
398
399 __setup("pirq=", ioapic_pirq_setup);
400
401 /*
402 * Find the IRQ entry number of a certain pin.
403 */
404 static int find_irq_entry(int apic, int pin, int type)
405 {
406 int i;
407
408 for (i = 0; i < mp_irq_entries; i++)
409 if (mp_irqs[i].mpc_irqtype == type &&
410 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
411 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
412 mp_irqs[i].mpc_dstirq == pin)
413 return i;
414
415 return -1;
416 }
417
418 /*
419 * Find the pin to which IRQ[irq] (ISA) is connected
420 */
421 static int __init find_isa_irq_pin(int irq, int type)
422 {
423 int i;
424
425 for (i = 0; i < mp_irq_entries; i++) {
426 int lbus = mp_irqs[i].mpc_srcbus;
427
428 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
429 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
430 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
431 (mp_irqs[i].mpc_irqtype == type) &&
432 (mp_irqs[i].mpc_srcbusirq == irq))
433
434 return mp_irqs[i].mpc_dstirq;
435 }
436 return -1;
437 }
438
439 static int __init find_isa_irq_apic(int irq, int type)
440 {
441 int i;
442
443 for (i = 0; i < mp_irq_entries; i++) {
444 int lbus = mp_irqs[i].mpc_srcbus;
445
446 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
447 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
448 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
449 (mp_irqs[i].mpc_irqtype == type) &&
450 (mp_irqs[i].mpc_srcbusirq == irq))
451 break;
452 }
453 if (i < mp_irq_entries) {
454 int apic;
455 for(apic = 0; apic < nr_ioapics; apic++) {
456 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
457 return apic;
458 }
459 }
460
461 return -1;
462 }
463
464 /*
465 * Find a specific PCI IRQ entry.
466 * Not an __init, possibly needed by modules
467 */
468 static int pin_2_irq(int idx, int apic, int pin);
469
470 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
471 {
472 int apic, i, best_guess = -1;
473
474 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
475 bus, slot, pin);
476 if (mp_bus_id_to_pci_bus[bus] == -1) {
477 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
478 return -1;
479 }
480 for (i = 0; i < mp_irq_entries; i++) {
481 int lbus = mp_irqs[i].mpc_srcbus;
482
483 for (apic = 0; apic < nr_ioapics; apic++)
484 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
485 mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
486 break;
487
488 if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
489 !mp_irqs[i].mpc_irqtype &&
490 (bus == lbus) &&
491 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
492 int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
493
494 if (!(apic || IO_APIC_IRQ(irq)))
495 continue;
496
497 if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
498 return irq;
499 /*
500 * Use the first all-but-pin matching entry as a
501 * best-guess fuzzy result for broken mptables.
502 */
503 if (best_guess < 0)
504 best_guess = irq;
505 }
506 }
507 BUG_ON(best_guess >= NR_IRQS);
508 return best_guess;
509 }
510
511 /*
512 * EISA Edge/Level control register, ELCR
513 */
514 static int EISA_ELCR(unsigned int irq)
515 {
516 if (irq < 16) {
517 unsigned int port = 0x4d0 + (irq >> 3);
518 return (inb(port) >> (irq & 7)) & 1;
519 }
520 apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
521 return 0;
522 }
523
524 /* EISA interrupts are always polarity zero and can be edge or level
525 * trigger depending on the ELCR value. If an interrupt is listed as
526 * EISA conforming in the MP table, that means its trigger type must
527 * be read in from the ELCR */
528
529 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
530 #define default_EISA_polarity(idx) (0)
531
532 /* ISA interrupts are always polarity zero edge triggered,
533 * when listed as conforming in the MP table. */
534
535 #define default_ISA_trigger(idx) (0)
536 #define default_ISA_polarity(idx) (0)
537
538 /* PCI interrupts are always polarity one level triggered,
539 * when listed as conforming in the MP table. */
540
541 #define default_PCI_trigger(idx) (1)
542 #define default_PCI_polarity(idx) (1)
543
544 /* MCA interrupts are always polarity zero level triggered,
545 * when listed as conforming in the MP table. */
546
547 #define default_MCA_trigger(idx) (1)
548 #define default_MCA_polarity(idx) (0)
549
550 static int __init MPBIOS_polarity(int idx)
551 {
552 int bus = mp_irqs[idx].mpc_srcbus;
553 int polarity;
554
555 /*
556 * Determine IRQ line polarity (high active or low active):
557 */
558 switch (mp_irqs[idx].mpc_irqflag & 3)
559 {
560 case 0: /* conforms, ie. bus-type dependent polarity */
561 {
562 switch (mp_bus_id_to_type[bus])
563 {
564 case MP_BUS_ISA: /* ISA pin */
565 {
566 polarity = default_ISA_polarity(idx);
567 break;
568 }
569 case MP_BUS_EISA: /* EISA pin */
570 {
571 polarity = default_EISA_polarity(idx);
572 break;
573 }
574 case MP_BUS_PCI: /* PCI pin */
575 {
576 polarity = default_PCI_polarity(idx);
577 break;
578 }
579 case MP_BUS_MCA: /* MCA pin */
580 {
581 polarity = default_MCA_polarity(idx);
582 break;
583 }
584 default:
585 {
586 printk(KERN_WARNING "broken BIOS!!\n");
587 polarity = 1;
588 break;
589 }
590 }
591 break;
592 }
593 case 1: /* high active */
594 {
595 polarity = 0;
596 break;
597 }
598 case 2: /* reserved */
599 {
600 printk(KERN_WARNING "broken BIOS!!\n");
601 polarity = 1;
602 break;
603 }
604 case 3: /* low active */
605 {
606 polarity = 1;
607 break;
608 }
609 default: /* invalid */
610 {
611 printk(KERN_WARNING "broken BIOS!!\n");
612 polarity = 1;
613 break;
614 }
615 }
616 return polarity;
617 }
618
619 static int MPBIOS_trigger(int idx)
620 {
621 int bus = mp_irqs[idx].mpc_srcbus;
622 int trigger;
623
624 /*
625 * Determine IRQ trigger mode (edge or level sensitive):
626 */
627 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
628 {
629 case 0: /* conforms, ie. bus-type dependent */
630 {
631 switch (mp_bus_id_to_type[bus])
632 {
633 case MP_BUS_ISA: /* ISA pin */
634 {
635 trigger = default_ISA_trigger(idx);
636 break;
637 }
638 case MP_BUS_EISA: /* EISA pin */
639 {
640 trigger = default_EISA_trigger(idx);
641 break;
642 }
643 case MP_BUS_PCI: /* PCI pin */
644 {
645 trigger = default_PCI_trigger(idx);
646 break;
647 }
648 case MP_BUS_MCA: /* MCA pin */
649 {
650 trigger = default_MCA_trigger(idx);
651 break;
652 }
653 default:
654 {
655 printk(KERN_WARNING "broken BIOS!!\n");
656 trigger = 1;
657 break;
658 }
659 }
660 break;
661 }
662 case 1: /* edge */
663 {
664 trigger = 0;
665 break;
666 }
667 case 2: /* reserved */
668 {
669 printk(KERN_WARNING "broken BIOS!!\n");
670 trigger = 1;
671 break;
672 }
673 case 3: /* level */
674 {
675 trigger = 1;
676 break;
677 }
678 default: /* invalid */
679 {
680 printk(KERN_WARNING "broken BIOS!!\n");
681 trigger = 0;
682 break;
683 }
684 }
685 return trigger;
686 }
687
688 static inline int irq_polarity(int idx)
689 {
690 return MPBIOS_polarity(idx);
691 }
692
693 static inline int irq_trigger(int idx)
694 {
695 return MPBIOS_trigger(idx);
696 }
697
698 static int next_irq = 16;
699
700 /*
701 * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
702 * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
703 * from ACPI, which can reach 800 in large boxen.
704 *
705 * Compact the sparse GSI space into a sequential IRQ series and reuse
706 * vectors if possible.
707 */
708 int gsi_irq_sharing(int gsi)
709 {
710 int i, tries, vector;
711
712 BUG_ON(gsi >= NR_IRQ_VECTORS);
713
714 if (platform_legacy_irq(gsi))
715 return gsi;
716
717 if (gsi_2_irq[gsi] != 0xFF)
718 return (int)gsi_2_irq[gsi];
719
720 tries = NR_IRQS;
721 try_again:
722 vector = assign_irq_vector(gsi);
723
724 /*
725 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
726 * use of vector and if found, return that IRQ. However, we never want
727 * to share legacy IRQs, which usually have a different trigger mode
728 * than PCI.
729 */
730 for (i = 0; i < NR_IRQS; i++)
731 if (IO_APIC_VECTOR(i) == vector)
732 break;
733 if (platform_legacy_irq(i)) {
734 if (--tries >= 0) {
735 IO_APIC_VECTOR(i) = 0;
736 goto try_again;
737 }
738 panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
739 }
740 if (i < NR_IRQS) {
741 gsi_2_irq[gsi] = i;
742 printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
743 gsi, vector, i);
744 return i;
745 }
746
747 i = next_irq++;
748 BUG_ON(i >= NR_IRQS);
749 gsi_2_irq[gsi] = i;
750 IO_APIC_VECTOR(i) = vector;
751 printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
752 gsi, vector, i);
753 return i;
754 }
755
756 static int pin_2_irq(int idx, int apic, int pin)
757 {
758 int irq, i;
759 int bus = mp_irqs[idx].mpc_srcbus;
760
761 /*
762 * Debugging check, we are in big trouble if this message pops up!
763 */
764 if (mp_irqs[idx].mpc_dstirq != pin)
765 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
766
767 switch (mp_bus_id_to_type[bus])
768 {
769 case MP_BUS_ISA: /* ISA pin */
770 case MP_BUS_EISA:
771 case MP_BUS_MCA:
772 {
773 irq = mp_irqs[idx].mpc_srcbusirq;
774 break;
775 }
776 case MP_BUS_PCI: /* PCI pin */
777 {
778 /*
779 * PCI IRQs are mapped in order
780 */
781 i = irq = 0;
782 while (i < apic)
783 irq += nr_ioapic_registers[i++];
784 irq += pin;
785 irq = gsi_irq_sharing(irq);
786 break;
787 }
788 default:
789 {
790 printk(KERN_ERR "unknown bus type %d.\n",bus);
791 irq = 0;
792 break;
793 }
794 }
795 BUG_ON(irq >= NR_IRQS);
796
797 /*
798 * PCI IRQ command line redirection. Yes, limits are hardcoded.
799 */
800 if ((pin >= 16) && (pin <= 23)) {
801 if (pirq_entries[pin-16] != -1) {
802 if (!pirq_entries[pin-16]) {
803 apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
804 } else {
805 irq = pirq_entries[pin-16];
806 apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
807 pin-16, irq);
808 }
809 }
810 }
811 BUG_ON(irq >= NR_IRQS);
812 return irq;
813 }
814
815 static inline int IO_APIC_irq_trigger(int irq)
816 {
817 int apic, idx, pin;
818
819 for (apic = 0; apic < nr_ioapics; apic++) {
820 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
821 idx = find_irq_entry(apic,pin,mp_INT);
822 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
823 return irq_trigger(idx);
824 }
825 }
826 /*
827 * nonexistent IRQs are edge default
828 */
829 return 0;
830 }
831
832 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
833 u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
834
835 int assign_irq_vector(int irq)
836 {
837 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
838 int vector;
839
840 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
841
842 spin_lock(&vector_lock);
843
844 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
845 spin_unlock(&vector_lock);
846 return IO_APIC_VECTOR(irq);
847 }
848 next:
849 current_vector += 8;
850 if (current_vector == IA32_SYSCALL_VECTOR)
851 goto next;
852
853 if (current_vector >= FIRST_SYSTEM_VECTOR) {
854 /* If we run out of vectors on large boxen, must share them. */
855 offset = (offset + 1) % 8;
856 current_vector = FIRST_DEVICE_VECTOR + offset;
857 }
858
859 vector = current_vector;
860 vector_irq[vector] = irq;
861 if (irq != AUTO_ASSIGN)
862 IO_APIC_VECTOR(irq) = vector;
863
864 spin_unlock(&vector_lock);
865
866 return vector;
867 }
868
869 extern void (*interrupt[NR_IRQS])(void);
870 static struct hw_interrupt_type ioapic_level_type;
871 static struct hw_interrupt_type ioapic_edge_type;
872
873 #define IOAPIC_AUTO -1
874 #define IOAPIC_EDGE 0
875 #define IOAPIC_LEVEL 1
876
877 static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
878 {
879 if (use_pci_vector() && !platform_legacy_irq(irq)) {
880 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
881 trigger == IOAPIC_LEVEL)
882 irq_desc[vector].handler = &ioapic_level_type;
883 else
884 irq_desc[vector].handler = &ioapic_edge_type;
885 set_intr_gate(vector, interrupt[vector]);
886 } else {
887 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
888 trigger == IOAPIC_LEVEL)
889 irq_desc[irq].handler = &ioapic_level_type;
890 else
891 irq_desc[irq].handler = &ioapic_edge_type;
892 set_intr_gate(vector, interrupt[irq]);
893 }
894 }
895
896 static void __init setup_IO_APIC_irqs(void)
897 {
898 struct IO_APIC_route_entry entry;
899 int apic, pin, idx, irq, first_notcon = 1, vector;
900 unsigned long flags;
901
902 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
903
904 for (apic = 0; apic < nr_ioapics; apic++) {
905 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
906
907 /*
908 * add it to the IO-APIC irq-routing table:
909 */
910 memset(&entry,0,sizeof(entry));
911
912 entry.delivery_mode = INT_DELIVERY_MODE;
913 entry.dest_mode = INT_DEST_MODE;
914 entry.mask = 0; /* enable IRQ */
915 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
916
917 idx = find_irq_entry(apic,pin,mp_INT);
918 if (idx == -1) {
919 if (first_notcon) {
920 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
921 first_notcon = 0;
922 } else
923 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
924 continue;
925 }
926
927 entry.trigger = irq_trigger(idx);
928 entry.polarity = irq_polarity(idx);
929
930 if (irq_trigger(idx)) {
931 entry.trigger = 1;
932 entry.mask = 1;
933 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
934 }
935
936 irq = pin_2_irq(idx, apic, pin);
937 add_pin_to_irq(irq, apic, pin);
938
939 if (!apic && !IO_APIC_IRQ(irq))
940 continue;
941
942 if (IO_APIC_IRQ(irq)) {
943 vector = assign_irq_vector(irq);
944 entry.vector = vector;
945
946 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
947 if (!apic && (irq < 16))
948 disable_8259A_irq(irq);
949 }
950 spin_lock_irqsave(&ioapic_lock, flags);
951 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
952 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
953 set_native_irq_info(irq, TARGET_CPUS);
954 spin_unlock_irqrestore(&ioapic_lock, flags);
955 }
956 }
957
958 if (!first_notcon)
959 apic_printk(APIC_VERBOSE," not connected.\n");
960 }
961
962 /*
963 * Set up the 8259A-master output pin as broadcast to all
964 * CPUs.
965 */
966 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
967 {
968 struct IO_APIC_route_entry entry;
969 unsigned long flags;
970
971 memset(&entry,0,sizeof(entry));
972
973 disable_8259A_irq(0);
974
975 /* mask LVT0 */
976 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
977
978 /*
979 * We use logical delivery to get the timer IRQ
980 * to the first CPU.
981 */
982 entry.dest_mode = INT_DEST_MODE;
983 entry.mask = 0; /* unmask IRQ now */
984 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
985 entry.delivery_mode = INT_DELIVERY_MODE;
986 entry.polarity = 0;
987 entry.trigger = 0;
988 entry.vector = vector;
989
990 /*
991 * The timer IRQ doesn't have to know that behind the
992 * scene we have a 8259A-master in AEOI mode ...
993 */
994 irq_desc[0].handler = &ioapic_edge_type;
995
996 /*
997 * Add it to the IO-APIC irq-routing table:
998 */
999 spin_lock_irqsave(&ioapic_lock, flags);
1000 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
1001 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
1002 spin_unlock_irqrestore(&ioapic_lock, flags);
1003
1004 enable_8259A_irq(0);
1005 }
1006
1007 void __init UNEXPECTED_IO_APIC(void)
1008 {
1009 }
1010
1011 void __apicdebuginit print_IO_APIC(void)
1012 {
1013 int apic, i;
1014 union IO_APIC_reg_00 reg_00;
1015 union IO_APIC_reg_01 reg_01;
1016 union IO_APIC_reg_02 reg_02;
1017 unsigned long flags;
1018
1019 if (apic_verbosity == APIC_QUIET)
1020 return;
1021
1022 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1023 for (i = 0; i < nr_ioapics; i++)
1024 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1025 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
1026
1027 /*
1028 * We are a bit conservative about what we expect. We have to
1029 * know about every hardware change ASAP.
1030 */
1031 printk(KERN_INFO "testing the IO APIC.......................\n");
1032
1033 for (apic = 0; apic < nr_ioapics; apic++) {
1034
1035 spin_lock_irqsave(&ioapic_lock, flags);
1036 reg_00.raw = io_apic_read(apic, 0);
1037 reg_01.raw = io_apic_read(apic, 1);
1038 if (reg_01.bits.version >= 0x10)
1039 reg_02.raw = io_apic_read(apic, 2);
1040 spin_unlock_irqrestore(&ioapic_lock, flags);
1041
1042 printk("\n");
1043 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
1044 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1045 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1046 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
1047 UNEXPECTED_IO_APIC();
1048
1049 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1050 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1051 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
1052 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
1053 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
1054 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
1055 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
1056 (reg_01.bits.entries != 0x2E) &&
1057 (reg_01.bits.entries != 0x3F) &&
1058 (reg_01.bits.entries != 0x03)
1059 )
1060 UNEXPECTED_IO_APIC();
1061
1062 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1063 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1064 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
1065 (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
1066 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
1067 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
1068 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
1069 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
1070 )
1071 UNEXPECTED_IO_APIC();
1072 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
1073 UNEXPECTED_IO_APIC();
1074
1075 if (reg_01.bits.version >= 0x10) {
1076 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1077 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1078 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
1079 UNEXPECTED_IO_APIC();
1080 }
1081
1082 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1083
1084 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
1085 " Stat Dest Deli Vect: \n");
1086
1087 for (i = 0; i <= reg_01.bits.entries; i++) {
1088 struct IO_APIC_route_entry entry;
1089
1090 spin_lock_irqsave(&ioapic_lock, flags);
1091 *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
1092 *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
1093 spin_unlock_irqrestore(&ioapic_lock, flags);
1094
1095 printk(KERN_DEBUG " %02x %03X %02X ",
1096 i,
1097 entry.dest.logical.logical_dest,
1098 entry.dest.physical.physical_dest
1099 );
1100
1101 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1102 entry.mask,
1103 entry.trigger,
1104 entry.irr,
1105 entry.polarity,
1106 entry.delivery_status,
1107 entry.dest_mode,
1108 entry.delivery_mode,
1109 entry.vector
1110 );
1111 }
1112 }
1113 if (use_pci_vector())
1114 printk(KERN_INFO "Using vector-based indexing\n");
1115 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1116 for (i = 0; i < NR_IRQS; i++) {
1117 struct irq_pin_list *entry = irq_2_pin + i;
1118 if (entry->pin < 0)
1119 continue;
1120 if (use_pci_vector() && !platform_legacy_irq(i))
1121 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
1122 else
1123 printk(KERN_DEBUG "IRQ%d ", i);
1124 for (;;) {
1125 printk("-> %d:%d", entry->apic, entry->pin);
1126 if (!entry->next)
1127 break;
1128 entry = irq_2_pin + entry->next;
1129 }
1130 printk("\n");
1131 }
1132
1133 printk(KERN_INFO ".................................... done.\n");
1134
1135 return;
1136 }
1137
1138 #if 0
1139
1140 static __apicdebuginit void print_APIC_bitfield (int base)
1141 {
1142 unsigned int v;
1143 int i, j;
1144
1145 if (apic_verbosity == APIC_QUIET)
1146 return;
1147
1148 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1149 for (i = 0; i < 8; i++) {
1150 v = apic_read(base + i*0x10);
1151 for (j = 0; j < 32; j++) {
1152 if (v & (1<<j))
1153 printk("1");
1154 else
1155 printk("0");
1156 }
1157 printk("\n");
1158 }
1159 }
1160
1161 void __apicdebuginit print_local_APIC(void * dummy)
1162 {
1163 unsigned int v, ver, maxlvt;
1164
1165 if (apic_verbosity == APIC_QUIET)
1166 return;
1167
1168 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1169 smp_processor_id(), hard_smp_processor_id());
1170 v = apic_read(APIC_ID);
1171 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
1172 v = apic_read(APIC_LVR);
1173 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1174 ver = GET_APIC_VERSION(v);
1175 maxlvt = get_maxlvt();
1176
1177 v = apic_read(APIC_TASKPRI);
1178 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1179
1180 v = apic_read(APIC_ARBPRI);
1181 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1182 v & APIC_ARBPRI_MASK);
1183 v = apic_read(APIC_PROCPRI);
1184 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1185
1186 v = apic_read(APIC_EOI);
1187 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1188 v = apic_read(APIC_RRR);
1189 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1190 v = apic_read(APIC_LDR);
1191 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1192 v = apic_read(APIC_DFR);
1193 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1194 v = apic_read(APIC_SPIV);
1195 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1196
1197 printk(KERN_DEBUG "... APIC ISR field:\n");
1198 print_APIC_bitfield(APIC_ISR);
1199 printk(KERN_DEBUG "... APIC TMR field:\n");
1200 print_APIC_bitfield(APIC_TMR);
1201 printk(KERN_DEBUG "... APIC IRR field:\n");
1202 print_APIC_bitfield(APIC_IRR);
1203
1204 v = apic_read(APIC_ESR);
1205 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1206
1207 v = apic_read(APIC_ICR);
1208 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1209 v = apic_read(APIC_ICR2);
1210 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1211
1212 v = apic_read(APIC_LVTT);
1213 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1214
1215 if (maxlvt > 3) { /* PC is LVT#4. */
1216 v = apic_read(APIC_LVTPC);
1217 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1218 }
1219 v = apic_read(APIC_LVT0);
1220 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1221 v = apic_read(APIC_LVT1);
1222 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1223
1224 if (maxlvt > 2) { /* ERR is LVT#3. */
1225 v = apic_read(APIC_LVTERR);
1226 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1227 }
1228
1229 v = apic_read(APIC_TMICT);
1230 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1231 v = apic_read(APIC_TMCCT);
1232 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1233 v = apic_read(APIC_TDCR);
1234 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1235 printk("\n");
1236 }
1237
1238 void print_all_local_APICs (void)
1239 {
1240 on_each_cpu(print_local_APIC, NULL, 1, 1);
1241 }
1242
1243 void __apicdebuginit print_PIC(void)
1244 {
1245 unsigned int v;
1246 unsigned long flags;
1247
1248 if (apic_verbosity == APIC_QUIET)
1249 return;
1250
1251 printk(KERN_DEBUG "\nprinting PIC contents\n");
1252
1253 spin_lock_irqsave(&i8259A_lock, flags);
1254
1255 v = inb(0xa1) << 8 | inb(0x21);
1256 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1257
1258 v = inb(0xa0) << 8 | inb(0x20);
1259 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1260
1261 outb(0x0b,0xa0);
1262 outb(0x0b,0x20);
1263 v = inb(0xa0) << 8 | inb(0x20);
1264 outb(0x0a,0xa0);
1265 outb(0x0a,0x20);
1266
1267 spin_unlock_irqrestore(&i8259A_lock, flags);
1268
1269 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1270
1271 v = inb(0x4d1) << 8 | inb(0x4d0);
1272 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1273 }
1274
1275 #endif /* 0 */
1276
1277 static void __init enable_IO_APIC(void)
1278 {
1279 union IO_APIC_reg_01 reg_01;
1280 int i8259_apic, i8259_pin;
1281 int i, apic;
1282 unsigned long flags;
1283
1284 for (i = 0; i < PIN_MAP_SIZE; i++) {
1285 irq_2_pin[i].pin = -1;
1286 irq_2_pin[i].next = 0;
1287 }
1288 if (!pirqs_enabled)
1289 for (i = 0; i < MAX_PIRQS; i++)
1290 pirq_entries[i] = -1;
1291
1292 /*
1293 * The number of IO-APIC IRQ registers (== #pins):
1294 */
1295 for (apic = 0; apic < nr_ioapics; apic++) {
1296 spin_lock_irqsave(&ioapic_lock, flags);
1297 reg_01.raw = io_apic_read(apic, 1);
1298 spin_unlock_irqrestore(&ioapic_lock, flags);
1299 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1300 }
1301 for(apic = 0; apic < nr_ioapics; apic++) {
1302 int pin;
1303 /* See if any of the pins is in ExtINT mode */
1304 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1305 struct IO_APIC_route_entry entry;
1306 spin_lock_irqsave(&ioapic_lock, flags);
1307 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1308 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1309 spin_unlock_irqrestore(&ioapic_lock, flags);
1310
1311
1312 /* If the interrupt line is enabled and in ExtInt mode
1313 * I have found the pin where the i8259 is connected.
1314 */
1315 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1316 ioapic_i8259.apic = apic;
1317 ioapic_i8259.pin = pin;
1318 goto found_i8259;
1319 }
1320 }
1321 }
1322 found_i8259:
1323 /* Look to see what if the MP table has reported the ExtINT */
1324 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1325 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1326 /* Trust the MP table if nothing is setup in the hardware */
1327 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1328 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1329 ioapic_i8259.pin = i8259_pin;
1330 ioapic_i8259.apic = i8259_apic;
1331 }
1332 /* Complain if the MP table and the hardware disagree */
1333 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1334 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1335 {
1336 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1337 }
1338
1339 /*
1340 * Do not trust the IO-APIC being empty at bootup
1341 */
1342 clear_IO_APIC();
1343 }
1344
1345 /*
1346 * Not an __init, needed by the reboot code
1347 */
1348 void disable_IO_APIC(void)
1349 {
1350 /*
1351 * Clear the IO-APIC before rebooting:
1352 */
1353 clear_IO_APIC();
1354
1355 /*
1356 * If the i8259 is routed through an IOAPIC
1357 * Put that IOAPIC in virtual wire mode
1358 * so legacy interrupts can be delivered.
1359 */
1360 if (ioapic_i8259.pin != -1) {
1361 struct IO_APIC_route_entry entry;
1362 unsigned long flags;
1363
1364 memset(&entry, 0, sizeof(entry));
1365 entry.mask = 0; /* Enabled */
1366 entry.trigger = 0; /* Edge */
1367 entry.irr = 0;
1368 entry.polarity = 0; /* High */
1369 entry.delivery_status = 0;
1370 entry.dest_mode = 0; /* Physical */
1371 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1372 entry.vector = 0;
1373 entry.dest.physical.physical_dest =
1374 GET_APIC_ID(apic_read(APIC_ID));
1375
1376 /*
1377 * Add it to the IO-APIC irq-routing table:
1378 */
1379 spin_lock_irqsave(&ioapic_lock, flags);
1380 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1381 *(((int *)&entry)+1));
1382 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1383 *(((int *)&entry)+0));
1384 spin_unlock_irqrestore(&ioapic_lock, flags);
1385 }
1386
1387 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1388 }
1389
1390 /*
1391 * function to set the IO-APIC physical IDs based on the
1392 * values stored in the MPC table.
1393 *
1394 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1395 */
1396
1397 static void __init setup_ioapic_ids_from_mpc (void)
1398 {
1399 union IO_APIC_reg_00 reg_00;
1400 int apic;
1401 int i;
1402 unsigned char old_id;
1403 unsigned long flags;
1404
1405 /*
1406 * Set the IOAPIC ID to the value stored in the MPC table.
1407 */
1408 for (apic = 0; apic < nr_ioapics; apic++) {
1409
1410 /* Read the register 0 value */
1411 spin_lock_irqsave(&ioapic_lock, flags);
1412 reg_00.raw = io_apic_read(apic, 0);
1413 spin_unlock_irqrestore(&ioapic_lock, flags);
1414
1415 old_id = mp_ioapics[apic].mpc_apicid;
1416
1417
1418 printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
1419
1420
1421 /*
1422 * We need to adjust the IRQ routing table
1423 * if the ID changed.
1424 */
1425 if (old_id != mp_ioapics[apic].mpc_apicid)
1426 for (i = 0; i < mp_irq_entries; i++)
1427 if (mp_irqs[i].mpc_dstapic == old_id)
1428 mp_irqs[i].mpc_dstapic
1429 = mp_ioapics[apic].mpc_apicid;
1430
1431 /*
1432 * Read the right value from the MPC table and
1433 * write it into the ID register.
1434 */
1435 apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
1436 mp_ioapics[apic].mpc_apicid);
1437
1438 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1439 spin_lock_irqsave(&ioapic_lock, flags);
1440 io_apic_write(apic, 0, reg_00.raw);
1441 spin_unlock_irqrestore(&ioapic_lock, flags);
1442
1443 /*
1444 * Sanity check
1445 */
1446 spin_lock_irqsave(&ioapic_lock, flags);
1447 reg_00.raw = io_apic_read(apic, 0);
1448 spin_unlock_irqrestore(&ioapic_lock, flags);
1449 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1450 printk("could not set ID!\n");
1451 else
1452 apic_printk(APIC_VERBOSE," ok.\n");
1453 }
1454 }
1455
1456 /*
1457 * There is a nasty bug in some older SMP boards, their mptable lies
1458 * about the timer IRQ. We do the following to work around the situation:
1459 *
1460 * - timer IRQ defaults to IO-APIC IRQ
1461 * - if this function detects that timer IRQs are defunct, then we fall
1462 * back to ISA timer IRQs
1463 */
1464 static int __init timer_irq_works(void)
1465 {
1466 unsigned long t1 = jiffies;
1467
1468 local_irq_enable();
1469 /* Let ten ticks pass... */
1470 mdelay((10 * 1000) / HZ);
1471
1472 /*
1473 * Expect a few ticks at least, to be sure some possible
1474 * glue logic does not lock up after one or two first
1475 * ticks in a non-ExtINT mode. Also the local APIC
1476 * might have cached one ExtINT interrupt. Finally, at
1477 * least one tick may be lost due to delays.
1478 */
1479
1480 /* jiffies wrap? */
1481 if (jiffies - t1 > 4)
1482 return 1;
1483 return 0;
1484 }
1485
1486 /*
1487 * In the SMP+IOAPIC case it might happen that there are an unspecified
1488 * number of pending IRQ events unhandled. These cases are very rare,
1489 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1490 * better to do it this way as thus we do not have to be aware of
1491 * 'pending' interrupts in the IRQ path, except at this point.
1492 */
1493 /*
1494 * Edge triggered needs to resend any interrupt
1495 * that was delayed but this is now handled in the device
1496 * independent code.
1497 */
1498
1499 /*
1500 * Starting up a edge-triggered IO-APIC interrupt is
1501 * nasty - we need to make sure that we get the edge.
1502 * If it is already asserted for some reason, we need
1503 * return 1 to indicate that is was pending.
1504 *
1505 * This is not complete - we should be able to fake
1506 * an edge even if it isn't on the 8259A...
1507 */
1508
1509 static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1510 {
1511 int was_pending = 0;
1512 unsigned long flags;
1513
1514 spin_lock_irqsave(&ioapic_lock, flags);
1515 if (irq < 16) {
1516 disable_8259A_irq(irq);
1517 if (i8259A_irq_pending(irq))
1518 was_pending = 1;
1519 }
1520 __unmask_IO_APIC_irq(irq);
1521 spin_unlock_irqrestore(&ioapic_lock, flags);
1522
1523 return was_pending;
1524 }
1525
1526 /*
1527 * Once we have recorded IRQ_PENDING already, we can mask the
1528 * interrupt for real. This prevents IRQ storms from unhandled
1529 * devices.
1530 */
1531 static void ack_edge_ioapic_irq(unsigned int irq)
1532 {
1533 move_irq(irq);
1534 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1535 == (IRQ_PENDING | IRQ_DISABLED))
1536 mask_IO_APIC_irq(irq);
1537 ack_APIC_irq();
1538 }
1539
1540 /*
1541 * Level triggered interrupts can just be masked,
1542 * and shutting down and starting up the interrupt
1543 * is the same as enabling and disabling them -- except
1544 * with a startup need to return a "was pending" value.
1545 *
1546 * Level triggered interrupts are special because we
1547 * do not touch any IO-APIC register while handling
1548 * them. We ack the APIC in the end-IRQ handler, not
1549 * in the start-IRQ-handler. Protection against reentrance
1550 * from the same interrupt is still provided, both by the
1551 * generic IRQ layer and by the fact that an unacked local
1552 * APIC does not accept IRQs.
1553 */
1554 static unsigned int startup_level_ioapic_irq (unsigned int irq)
1555 {
1556 unmask_IO_APIC_irq(irq);
1557
1558 return 0; /* don't check for pending */
1559 }
1560
1561 static void end_level_ioapic_irq (unsigned int irq)
1562 {
1563 move_irq(irq);
1564 ack_APIC_irq();
1565 }
1566
1567 #ifdef CONFIG_PCI_MSI
1568 static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1569 {
1570 int irq = vector_to_irq(vector);
1571
1572 return startup_edge_ioapic_irq(irq);
1573 }
1574
1575 static void ack_edge_ioapic_vector(unsigned int vector)
1576 {
1577 int irq = vector_to_irq(vector);
1578
1579 move_native_irq(vector);
1580 ack_edge_ioapic_irq(irq);
1581 }
1582
1583 static unsigned int startup_level_ioapic_vector (unsigned int vector)
1584 {
1585 int irq = vector_to_irq(vector);
1586
1587 return startup_level_ioapic_irq (irq);
1588 }
1589
1590 static void end_level_ioapic_vector (unsigned int vector)
1591 {
1592 int irq = vector_to_irq(vector);
1593
1594 move_native_irq(vector);
1595 end_level_ioapic_irq(irq);
1596 }
1597
1598 static void mask_IO_APIC_vector (unsigned int vector)
1599 {
1600 int irq = vector_to_irq(vector);
1601
1602 mask_IO_APIC_irq(irq);
1603 }
1604
1605 static void unmask_IO_APIC_vector (unsigned int vector)
1606 {
1607 int irq = vector_to_irq(vector);
1608
1609 unmask_IO_APIC_irq(irq);
1610 }
1611
1612 #ifdef CONFIG_SMP
1613 static void set_ioapic_affinity_vector (unsigned int vector,
1614 cpumask_t cpu_mask)
1615 {
1616 int irq = vector_to_irq(vector);
1617
1618 set_native_irq_info(vector, cpu_mask);
1619 set_ioapic_affinity_irq(irq, cpu_mask);
1620 }
1621 #endif // CONFIG_SMP
1622 #endif // CONFIG_PCI_MSI
1623
1624 /*
1625 * Level and edge triggered IO-APIC interrupts need different handling,
1626 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1627 * handled with the level-triggered descriptor, but that one has slightly
1628 * more overhead. Level-triggered interrupts cannot be handled with the
1629 * edge-triggered handler, without risking IRQ storms and other ugly
1630 * races.
1631 */
1632
1633 static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
1634 .typename = "IO-APIC-edge",
1635 .startup = startup_edge_ioapic,
1636 .shutdown = shutdown_edge_ioapic,
1637 .enable = enable_edge_ioapic,
1638 .disable = disable_edge_ioapic,
1639 .ack = ack_edge_ioapic,
1640 .end = end_edge_ioapic,
1641 #ifdef CONFIG_SMP
1642 .set_affinity = set_ioapic_affinity,
1643 #endif
1644 };
1645
1646 static struct hw_interrupt_type ioapic_level_type __read_mostly = {
1647 .typename = "IO-APIC-level",
1648 .startup = startup_level_ioapic,
1649 .shutdown = shutdown_level_ioapic,
1650 .enable = enable_level_ioapic,
1651 .disable = disable_level_ioapic,
1652 .ack = mask_and_ack_level_ioapic,
1653 .end = end_level_ioapic,
1654 #ifdef CONFIG_SMP
1655 .set_affinity = set_ioapic_affinity,
1656 #endif
1657 };
1658
1659 static inline void init_IO_APIC_traps(void)
1660 {
1661 int irq;
1662
1663 /*
1664 * NOTE! The local APIC isn't very good at handling
1665 * multiple interrupts at the same interrupt level.
1666 * As the interrupt level is determined by taking the
1667 * vector number and shifting that right by 4, we
1668 * want to spread these out a bit so that they don't
1669 * all fall in the same interrupt level.
1670 *
1671 * Also, we've got to be careful not to trash gate
1672 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1673 */
1674 for (irq = 0; irq < NR_IRQS ; irq++) {
1675 int tmp = irq;
1676 if (use_pci_vector()) {
1677 if (!platform_legacy_irq(tmp))
1678 if ((tmp = vector_to_irq(tmp)) == -1)
1679 continue;
1680 }
1681 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
1682 /*
1683 * Hmm.. We don't have an entry for this,
1684 * so default to an old-fashioned 8259
1685 * interrupt if we can..
1686 */
1687 if (irq < 16)
1688 make_8259A_irq(irq);
1689 else
1690 /* Strange. Oh, well.. */
1691 irq_desc[irq].handler = &no_irq_type;
1692 }
1693 }
1694 }
1695
1696 static void enable_lapic_irq (unsigned int irq)
1697 {
1698 unsigned long v;
1699
1700 v = apic_read(APIC_LVT0);
1701 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1702 }
1703
1704 static void disable_lapic_irq (unsigned int irq)
1705 {
1706 unsigned long v;
1707
1708 v = apic_read(APIC_LVT0);
1709 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1710 }
1711
1712 static void ack_lapic_irq (unsigned int irq)
1713 {
1714 ack_APIC_irq();
1715 }
1716
1717 static void end_lapic_irq (unsigned int i) { /* nothing */ }
1718
1719 static struct hw_interrupt_type lapic_irq_type __read_mostly = {
1720 .typename = "local-APIC-edge",
1721 .startup = NULL, /* startup_irq() not used for IRQ0 */
1722 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1723 .enable = enable_lapic_irq,
1724 .disable = disable_lapic_irq,
1725 .ack = ack_lapic_irq,
1726 .end = end_lapic_irq,
1727 };
1728
1729 static void setup_nmi (void)
1730 {
1731 /*
1732 * Dirty trick to enable the NMI watchdog ...
1733 * We put the 8259A master into AEOI mode and
1734 * unmask on all local APICs LVT0 as NMI.
1735 *
1736 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1737 * is from Maciej W. Rozycki - so we do not have to EOI from
1738 * the NMI handler or the timer interrupt.
1739 */
1740 printk(KERN_INFO "activating NMI Watchdog ...");
1741
1742 enable_NMI_through_LVT0(NULL);
1743
1744 printk(" done.\n");
1745 }
1746
1747 /*
1748 * This looks a bit hackish but it's about the only one way of sending
1749 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1750 * not support the ExtINT mode, unfortunately. We need to send these
1751 * cycles as some i82489DX-based boards have glue logic that keeps the
1752 * 8259A interrupt line asserted until INTA. --macro
1753 */
1754 static inline void unlock_ExtINT_logic(void)
1755 {
1756 int apic, pin, i;
1757 struct IO_APIC_route_entry entry0, entry1;
1758 unsigned char save_control, save_freq_select;
1759 unsigned long flags;
1760
1761 pin = find_isa_irq_pin(8, mp_INT);
1762 apic = find_isa_irq_apic(8, mp_INT);
1763 if (pin == -1)
1764 return;
1765
1766 spin_lock_irqsave(&ioapic_lock, flags);
1767 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1768 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1769 spin_unlock_irqrestore(&ioapic_lock, flags);
1770 clear_IO_APIC_pin(apic, pin);
1771
1772 memset(&entry1, 0, sizeof(entry1));
1773
1774 entry1.dest_mode = 0; /* physical delivery */
1775 entry1.mask = 0; /* unmask IRQ now */
1776 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1777 entry1.delivery_mode = dest_ExtINT;
1778 entry1.polarity = entry0.polarity;
1779 entry1.trigger = 0;
1780 entry1.vector = 0;
1781
1782 spin_lock_irqsave(&ioapic_lock, flags);
1783 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1784 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1785 spin_unlock_irqrestore(&ioapic_lock, flags);
1786
1787 save_control = CMOS_READ(RTC_CONTROL);
1788 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1789 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1790 RTC_FREQ_SELECT);
1791 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1792
1793 i = 100;
1794 while (i-- > 0) {
1795 mdelay(10);
1796 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1797 i -= 10;
1798 }
1799
1800 CMOS_WRITE(save_control, RTC_CONTROL);
1801 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1802 clear_IO_APIC_pin(apic, pin);
1803
1804 spin_lock_irqsave(&ioapic_lock, flags);
1805 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1806 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1807 spin_unlock_irqrestore(&ioapic_lock, flags);
1808 }
1809
1810 int timer_uses_ioapic_pin_0;
1811
1812 /*
1813 * This code may look a bit paranoid, but it's supposed to cooperate with
1814 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1815 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1816 * fanatically on his truly buggy board.
1817 *
1818 * FIXME: really need to revamp this for modern platforms only.
1819 */
1820 static inline void check_timer(void)
1821 {
1822 int apic1, pin1, apic2, pin2;
1823 int vector;
1824
1825 /*
1826 * get/set the timer IRQ vector:
1827 */
1828 disable_8259A_irq(0);
1829 vector = assign_irq_vector(0);
1830 set_intr_gate(vector, interrupt[0]);
1831
1832 /*
1833 * Subtle, code in do_timer_interrupt() expects an AEOI
1834 * mode for the 8259A whenever interrupts are routed
1835 * through I/O APICs. Also IRQ0 has to be enabled in
1836 * the 8259A which implies the virtual wire has to be
1837 * disabled in the local APIC.
1838 */
1839 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1840 init_8259A(1);
1841 if (timer_over_8254 > 0)
1842 enable_8259A_irq(0);
1843
1844 pin1 = find_isa_irq_pin(0, mp_INT);
1845 apic1 = find_isa_irq_apic(0, mp_INT);
1846 pin2 = ioapic_i8259.pin;
1847 apic2 = ioapic_i8259.apic;
1848
1849 if (pin1 == 0)
1850 timer_uses_ioapic_pin_0 = 1;
1851
1852 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1853 vector, apic1, pin1, apic2, pin2);
1854
1855 if (pin1 != -1) {
1856 /*
1857 * Ok, does IRQ0 through the IOAPIC work?
1858 */
1859 unmask_IO_APIC_irq(0);
1860 if (!no_timer_check && timer_irq_works()) {
1861 nmi_watchdog_default();
1862 if (nmi_watchdog == NMI_IO_APIC) {
1863 disable_8259A_irq(0);
1864 setup_nmi();
1865 enable_8259A_irq(0);
1866 }
1867 if (disable_timer_pin_1 > 0)
1868 clear_IO_APIC_pin(0, pin1);
1869 return;
1870 }
1871 clear_IO_APIC_pin(apic1, pin1);
1872 apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
1873 "connected to IO-APIC\n");
1874 }
1875
1876 apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
1877 "through the 8259A ... ");
1878 if (pin2 != -1) {
1879 apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
1880 apic2, pin2);
1881 /*
1882 * legacy devices should be connected to IO APIC #0
1883 */
1884 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1885 if (timer_irq_works()) {
1886 apic_printk(APIC_VERBOSE," works.\n");
1887 nmi_watchdog_default();
1888 if (nmi_watchdog == NMI_IO_APIC) {
1889 setup_nmi();
1890 }
1891 return;
1892 }
1893 /*
1894 * Cleanup, just in case ...
1895 */
1896 clear_IO_APIC_pin(apic2, pin2);
1897 }
1898 apic_printk(APIC_VERBOSE," failed.\n");
1899
1900 if (nmi_watchdog == NMI_IO_APIC) {
1901 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1902 nmi_watchdog = 0;
1903 }
1904
1905 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1906
1907 disable_8259A_irq(0);
1908 irq_desc[0].handler = &lapic_irq_type;
1909 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1910 enable_8259A_irq(0);
1911
1912 if (timer_irq_works()) {
1913 apic_printk(APIC_VERBOSE," works.\n");
1914 return;
1915 }
1916 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1917 apic_printk(APIC_VERBOSE," failed.\n");
1918
1919 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1920
1921 init_8259A(0);
1922 make_8259A_irq(0);
1923 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1924
1925 unlock_ExtINT_logic();
1926
1927 if (timer_irq_works()) {
1928 apic_printk(APIC_VERBOSE," works.\n");
1929 return;
1930 }
1931 apic_printk(APIC_VERBOSE," failed :(.\n");
1932 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1933 }
1934
1935 static int __init notimercheck(char *s)
1936 {
1937 no_timer_check = 1;
1938 return 1;
1939 }
1940 __setup("no_timer_check", notimercheck);
1941
1942 /*
1943 *
1944 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1945 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1946 * Linux doesn't really care, as it's not actually used
1947 * for any interrupt handling anyway.
1948 */
1949 #define PIC_IRQS (1<<2)
1950
1951 void __init setup_IO_APIC(void)
1952 {
1953 enable_IO_APIC();
1954
1955 if (acpi_ioapic)
1956 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1957 else
1958 io_apic_irqs = ~PIC_IRQS;
1959
1960 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1961
1962 /*
1963 * Set up the IO-APIC IRQ routing table.
1964 */
1965 if (!acpi_ioapic)
1966 setup_ioapic_ids_from_mpc();
1967 sync_Arb_IDs();
1968 setup_IO_APIC_irqs();
1969 init_IO_APIC_traps();
1970 check_timer();
1971 if (!acpi_ioapic)
1972 print_IO_APIC();
1973 }
1974
1975 struct sysfs_ioapic_data {
1976 struct sys_device dev;
1977 struct IO_APIC_route_entry entry[0];
1978 };
1979 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1980
1981 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1982 {
1983 struct IO_APIC_route_entry *entry;
1984 struct sysfs_ioapic_data *data;
1985 unsigned long flags;
1986 int i;
1987
1988 data = container_of(dev, struct sysfs_ioapic_data, dev);
1989 entry = data->entry;
1990 spin_lock_irqsave(&ioapic_lock, flags);
1991 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1992 *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
1993 *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
1994 }
1995 spin_unlock_irqrestore(&ioapic_lock, flags);
1996
1997 return 0;
1998 }
1999
2000 static int ioapic_resume(struct sys_device *dev)
2001 {
2002 struct IO_APIC_route_entry *entry;
2003 struct sysfs_ioapic_data *data;
2004 unsigned long flags;
2005 union IO_APIC_reg_00 reg_00;
2006 int i;
2007
2008 data = container_of(dev, struct sysfs_ioapic_data, dev);
2009 entry = data->entry;
2010
2011 spin_lock_irqsave(&ioapic_lock, flags);
2012 reg_00.raw = io_apic_read(dev->id, 0);
2013 if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
2014 reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
2015 io_apic_write(dev->id, 0, reg_00.raw);
2016 }
2017 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
2018 io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
2019 io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
2020 }
2021 spin_unlock_irqrestore(&ioapic_lock, flags);
2022
2023 return 0;
2024 }
2025
2026 static struct sysdev_class ioapic_sysdev_class = {
2027 set_kset_name("ioapic"),
2028 .suspend = ioapic_suspend,
2029 .resume = ioapic_resume,
2030 };
2031
2032 static int __init ioapic_init_sysfs(void)
2033 {
2034 struct sys_device * dev;
2035 int i, size, error = 0;
2036
2037 error = sysdev_class_register(&ioapic_sysdev_class);
2038 if (error)
2039 return error;
2040
2041 for (i = 0; i < nr_ioapics; i++ ) {
2042 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2043 * sizeof(struct IO_APIC_route_entry);
2044 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
2045 if (!mp_ioapic_data[i]) {
2046 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2047 continue;
2048 }
2049 memset(mp_ioapic_data[i], 0, size);
2050 dev = &mp_ioapic_data[i]->dev;
2051 dev->id = i;
2052 dev->cls = &ioapic_sysdev_class;
2053 error = sysdev_register(dev);
2054 if (error) {
2055 kfree(mp_ioapic_data[i]);
2056 mp_ioapic_data[i] = NULL;
2057 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2058 continue;
2059 }
2060 }
2061
2062 return 0;
2063 }
2064
2065 device_initcall(ioapic_init_sysfs);
2066
2067 /* --------------------------------------------------------------------------
2068 ACPI-based IOAPIC Configuration
2069 -------------------------------------------------------------------------- */
2070
2071 #ifdef CONFIG_ACPI
2072
2073 #define IO_APIC_MAX_ID 0xFE
2074
2075 int __init io_apic_get_version (int ioapic)
2076 {
2077 union IO_APIC_reg_01 reg_01;
2078 unsigned long flags;
2079
2080 spin_lock_irqsave(&ioapic_lock, flags);
2081 reg_01.raw = io_apic_read(ioapic, 1);
2082 spin_unlock_irqrestore(&ioapic_lock, flags);
2083
2084 return reg_01.bits.version;
2085 }
2086
2087
2088 int __init io_apic_get_redir_entries (int ioapic)
2089 {
2090 union IO_APIC_reg_01 reg_01;
2091 unsigned long flags;
2092
2093 spin_lock_irqsave(&ioapic_lock, flags);
2094 reg_01.raw = io_apic_read(ioapic, 1);
2095 spin_unlock_irqrestore(&ioapic_lock, flags);
2096
2097 return reg_01.bits.entries;
2098 }
2099
2100
2101 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2102 {
2103 struct IO_APIC_route_entry entry;
2104 unsigned long flags;
2105
2106 if (!IO_APIC_IRQ(irq)) {
2107 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2108 ioapic);
2109 return -EINVAL;
2110 }
2111
2112 /*
2113 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2114 * Note that we mask (disable) IRQs now -- these get enabled when the
2115 * corresponding device driver registers for this IRQ.
2116 */
2117
2118 memset(&entry,0,sizeof(entry));
2119
2120 entry.delivery_mode = INT_DELIVERY_MODE;
2121 entry.dest_mode = INT_DEST_MODE;
2122 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2123 entry.trigger = triggering;
2124 entry.polarity = polarity;
2125 entry.mask = 1; /* Disabled (masked) */
2126
2127 irq = gsi_irq_sharing(irq);
2128 /*
2129 * IRQs < 16 are already in the irq_2_pin[] map
2130 */
2131 if (irq >= 16)
2132 add_pin_to_irq(irq, ioapic, pin);
2133
2134 entry.vector = assign_irq_vector(irq);
2135
2136 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
2137 "IRQ %d Mode:%i Active:%i)\n", ioapic,
2138 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2139 triggering, polarity);
2140
2141 ioapic_register_intr(irq, entry.vector, triggering);
2142
2143 if (!ioapic && (irq < 16))
2144 disable_8259A_irq(irq);
2145
2146 spin_lock_irqsave(&ioapic_lock, flags);
2147 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
2148 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
2149 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
2150 spin_unlock_irqrestore(&ioapic_lock, flags);
2151
2152 return 0;
2153 }
2154
2155 #endif /* CONFIG_ACPI */
2156
2157
2158 /*
2159 * This function currently is only a helper for the i386 smp boot process where
2160 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2161 * so mask in all cases should simply be TARGET_CPUS
2162 */
2163 #ifdef CONFIG_SMP
2164 void __init setup_ioapic_dest(void)
2165 {
2166 int pin, ioapic, irq, irq_entry;
2167
2168 if (skip_ioapic_setup == 1)
2169 return;
2170
2171 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2172 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2173 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2174 if (irq_entry == -1)
2175 continue;
2176 irq = pin_2_irq(irq_entry, ioapic, pin);
2177 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2178 }
2179
2180 }
2181 }
2182 #endif
This page took 0.243937 seconds and 5 git commands to generate.