[PATCH] genirq: add ->retrigger() irq op to consolidate hw_irq_resend()
[deliverable/linux.git] / arch / x86_64 / kernel / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/config.h>
29 #include <linux/smp_lock.h>
30 #include <linux/mc146818rtc.h>
31 #include <linux/acpi.h>
32 #include <linux/sysdev.h>
33 #ifdef CONFIG_ACPI
34 #include <acpi/acpi_bus.h>
35 #endif
36
37 #include <asm/io.h>
38 #include <asm/smp.h>
39 #include <asm/desc.h>
40 #include <asm/proto.h>
41 #include <asm/mach_apic.h>
42 #include <asm/acpi.h>
43 #include <asm/dma.h>
44 #include <asm/nmi.h>
45
46 #define __apicdebuginit __init
47
48 int sis_apic_bug; /* not actually supported, dummy for compile */
49
50 static int no_timer_check;
51
52 int disable_timer_pin_1 __initdata;
53
54 int timer_over_8254 __initdata = 0;
55
56 /* Where if anywhere is the i8259 connect in external int mode */
57 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
58
59 static DEFINE_SPINLOCK(ioapic_lock);
60 static DEFINE_SPINLOCK(vector_lock);
61
62 /*
63 * # of IRQ routing registers
64 */
65 int nr_ioapic_registers[MAX_IO_APICS];
66
67 /*
68 * Rough estimation of how many shared IRQs there are, can
69 * be changed anytime.
70 */
71 #define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
72 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
73
74 /*
75 * This is performance-critical, we want to do it O(1)
76 *
77 * the indexing order of this array favors 1:1 mappings
78 * between pins and IRQs.
79 */
80
81 static struct irq_pin_list {
82 short apic, pin, next;
83 } irq_2_pin[PIN_MAP_SIZE];
84
85 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
86 #ifdef CONFIG_PCI_MSI
87 #define vector_to_irq(vector) \
88 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
89 #else
90 #define vector_to_irq(vector) (vector)
91 #endif
92
93 #define __DO_ACTION(R, ACTION, FINAL) \
94 \
95 { \
96 int pin; \
97 struct irq_pin_list *entry = irq_2_pin + irq; \
98 \
99 BUG_ON(irq >= NR_IRQS); \
100 for (;;) { \
101 unsigned int reg; \
102 pin = entry->pin; \
103 if (pin == -1) \
104 break; \
105 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
106 reg ACTION; \
107 io_apic_modify(entry->apic, reg); \
108 if (!entry->next) \
109 break; \
110 entry = irq_2_pin + entry->next; \
111 } \
112 FINAL; \
113 }
114
115 #ifdef CONFIG_SMP
116 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
117 {
118 unsigned long flags;
119 unsigned int dest;
120 cpumask_t tmp;
121
122 cpus_and(tmp, mask, cpu_online_map);
123 if (cpus_empty(tmp))
124 tmp = TARGET_CPUS;
125
126 cpus_and(mask, tmp, CPU_MASK_ALL);
127
128 dest = cpu_mask_to_apicid(mask);
129
130 /*
131 * Only the high 8 bits are valid.
132 */
133 dest = SET_APIC_LOGICAL_ID(dest);
134
135 spin_lock_irqsave(&ioapic_lock, flags);
136 __DO_ACTION(1, = dest, )
137 set_irq_info(irq, mask);
138 spin_unlock_irqrestore(&ioapic_lock, flags);
139 }
140 #endif
141
142 static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
143
144 /*
145 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
146 * shared ISA-space IRQs, so we have to support them. We are super
147 * fast in the common case, and fast for shared ISA-space IRQs.
148 */
149 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
150 {
151 static int first_free_entry = NR_IRQS;
152 struct irq_pin_list *entry = irq_2_pin + irq;
153
154 BUG_ON(irq >= NR_IRQS);
155 while (entry->next)
156 entry = irq_2_pin + entry->next;
157
158 if (entry->pin != -1) {
159 entry->next = first_free_entry;
160 entry = irq_2_pin + entry->next;
161 if (++first_free_entry >= PIN_MAP_SIZE)
162 panic("io_apic.c: ran out of irq_2_pin entries!");
163 }
164 entry->apic = apic;
165 entry->pin = pin;
166 }
167
168
169 #define DO_ACTION(name,R,ACTION, FINAL) \
170 \
171 static void name##_IO_APIC_irq (unsigned int irq) \
172 __DO_ACTION(R, ACTION, FINAL)
173
174 DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
175 /* mask = 1 */
176 DO_ACTION( __unmask, 0, &= 0xfffeffff, )
177 /* mask = 0 */
178
179 static void mask_IO_APIC_irq (unsigned int irq)
180 {
181 unsigned long flags;
182
183 spin_lock_irqsave(&ioapic_lock, flags);
184 __mask_IO_APIC_irq(irq);
185 spin_unlock_irqrestore(&ioapic_lock, flags);
186 }
187
188 static void unmask_IO_APIC_irq (unsigned int irq)
189 {
190 unsigned long flags;
191
192 spin_lock_irqsave(&ioapic_lock, flags);
193 __unmask_IO_APIC_irq(irq);
194 spin_unlock_irqrestore(&ioapic_lock, flags);
195 }
196
197 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
198 {
199 struct IO_APIC_route_entry entry;
200 unsigned long flags;
201
202 /* Check delivery_mode to be sure we're not clearing an SMI pin */
203 spin_lock_irqsave(&ioapic_lock, flags);
204 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
205 *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
206 spin_unlock_irqrestore(&ioapic_lock, flags);
207 if (entry.delivery_mode == dest_SMI)
208 return;
209 /*
210 * Disable it in the IO-APIC irq-routing table:
211 */
212 memset(&entry, 0, sizeof(entry));
213 entry.mask = 1;
214 spin_lock_irqsave(&ioapic_lock, flags);
215 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
216 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
217 spin_unlock_irqrestore(&ioapic_lock, flags);
218 }
219
220 static void clear_IO_APIC (void)
221 {
222 int apic, pin;
223
224 for (apic = 0; apic < nr_ioapics; apic++)
225 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
226 clear_IO_APIC_pin(apic, pin);
227 }
228
229 /*
230 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
231 * specific CPU-side IRQs.
232 */
233
234 #define MAX_PIRQS 8
235 static int pirq_entries [MAX_PIRQS];
236 static int pirqs_enabled;
237 int skip_ioapic_setup;
238 int ioapic_force;
239
240 /* dummy parsing: see setup.c */
241
242 static int __init disable_ioapic_setup(char *str)
243 {
244 skip_ioapic_setup = 1;
245 return 1;
246 }
247
248 static int __init enable_ioapic_setup(char *str)
249 {
250 ioapic_force = 1;
251 skip_ioapic_setup = 0;
252 return 1;
253 }
254
255 __setup("noapic", disable_ioapic_setup);
256 __setup("apic", enable_ioapic_setup);
257
258 static int __init setup_disable_8254_timer(char *s)
259 {
260 timer_over_8254 = -1;
261 return 1;
262 }
263 static int __init setup_enable_8254_timer(char *s)
264 {
265 timer_over_8254 = 2;
266 return 1;
267 }
268
269 __setup("disable_8254_timer", setup_disable_8254_timer);
270 __setup("enable_8254_timer", setup_enable_8254_timer);
271
272 #include <asm/pci-direct.h>
273 #include <linux/pci_ids.h>
274 #include <linux/pci.h>
275
276
277 #ifdef CONFIG_ACPI
278
279 static int nvidia_hpet_detected __initdata;
280
281 static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
282 {
283 nvidia_hpet_detected = 1;
284 return 0;
285 }
286 #endif
287
288 /* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
289 off. Check for an Nvidia or VIA PCI bridge and turn it off.
290 Use pci direct infrastructure because this runs before the PCI subsystem.
291
292 Can be overwritten with "apic"
293
294 And another hack to disable the IOMMU on VIA chipsets.
295
296 ... and others. Really should move this somewhere else.
297
298 Kludge-O-Rama. */
299 void __init check_ioapic(void)
300 {
301 int num,slot,func;
302 /* Poor man's PCI discovery */
303 for (num = 0; num < 32; num++) {
304 for (slot = 0; slot < 32; slot++) {
305 for (func = 0; func < 8; func++) {
306 u32 class;
307 u32 vendor;
308 u8 type;
309 class = read_pci_config(num,slot,func,
310 PCI_CLASS_REVISION);
311 if (class == 0xffffffff)
312 break;
313
314 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
315 continue;
316
317 vendor = read_pci_config(num, slot, func,
318 PCI_VENDOR_ID);
319 vendor &= 0xffff;
320 switch (vendor) {
321 case PCI_VENDOR_ID_VIA:
322 #ifdef CONFIG_IOMMU
323 if ((end_pfn > MAX_DMA32_PFN ||
324 force_iommu) &&
325 !iommu_aperture_allowed) {
326 printk(KERN_INFO
327 "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
328 iommu_aperture_disabled = 1;
329 }
330 #endif
331 return;
332 case PCI_VENDOR_ID_NVIDIA:
333 #ifdef CONFIG_ACPI
334 /*
335 * All timer overrides on Nvidia are
336 * wrong unless HPET is enabled.
337 */
338 nvidia_hpet_detected = 0;
339 acpi_table_parse(ACPI_HPET,
340 nvidia_hpet_check);
341 if (nvidia_hpet_detected == 0) {
342 acpi_skip_timer_override = 1;
343 printk(KERN_INFO "Nvidia board "
344 "detected. Ignoring ACPI "
345 "timer override.\n");
346 }
347 #endif
348 /* RED-PEN skip them on mptables too? */
349 return;
350
351 /* This should be actually default, but
352 for 2.6.16 let's do it for ATI only where
353 it's really needed. */
354 case PCI_VENDOR_ID_ATI:
355 if (timer_over_8254 == 1) {
356 timer_over_8254 = 0;
357 printk(KERN_INFO
358 "ATI board detected. Disabling timer routing over 8254.\n");
359 }
360 return;
361 }
362
363
364 /* No multi-function device? */
365 type = read_pci_config_byte(num,slot,func,
366 PCI_HEADER_TYPE);
367 if (!(type & 0x80))
368 break;
369 }
370 }
371 }
372 }
373
374 static int __init ioapic_pirq_setup(char *str)
375 {
376 int i, max;
377 int ints[MAX_PIRQS+1];
378
379 get_options(str, ARRAY_SIZE(ints), ints);
380
381 for (i = 0; i < MAX_PIRQS; i++)
382 pirq_entries[i] = -1;
383
384 pirqs_enabled = 1;
385 apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
386 max = MAX_PIRQS;
387 if (ints[0] < MAX_PIRQS)
388 max = ints[0];
389
390 for (i = 0; i < max; i++) {
391 apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
392 /*
393 * PIRQs are mapped upside down, usually.
394 */
395 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
396 }
397 return 1;
398 }
399
400 __setup("pirq=", ioapic_pirq_setup);
401
402 /*
403 * Find the IRQ entry number of a certain pin.
404 */
405 static int find_irq_entry(int apic, int pin, int type)
406 {
407 int i;
408
409 for (i = 0; i < mp_irq_entries; i++)
410 if (mp_irqs[i].mpc_irqtype == type &&
411 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
412 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
413 mp_irqs[i].mpc_dstirq == pin)
414 return i;
415
416 return -1;
417 }
418
419 /*
420 * Find the pin to which IRQ[irq] (ISA) is connected
421 */
422 static int __init find_isa_irq_pin(int irq, int type)
423 {
424 int i;
425
426 for (i = 0; i < mp_irq_entries; i++) {
427 int lbus = mp_irqs[i].mpc_srcbus;
428
429 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
430 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
431 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
432 (mp_irqs[i].mpc_irqtype == type) &&
433 (mp_irqs[i].mpc_srcbusirq == irq))
434
435 return mp_irqs[i].mpc_dstirq;
436 }
437 return -1;
438 }
439
440 static int __init find_isa_irq_apic(int irq, int type)
441 {
442 int i;
443
444 for (i = 0; i < mp_irq_entries; i++) {
445 int lbus = mp_irqs[i].mpc_srcbus;
446
447 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
448 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
449 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
450 (mp_irqs[i].mpc_irqtype == type) &&
451 (mp_irqs[i].mpc_srcbusirq == irq))
452 break;
453 }
454 if (i < mp_irq_entries) {
455 int apic;
456 for(apic = 0; apic < nr_ioapics; apic++) {
457 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
458 return apic;
459 }
460 }
461
462 return -1;
463 }
464
465 /*
466 * Find a specific PCI IRQ entry.
467 * Not an __init, possibly needed by modules
468 */
469 static int pin_2_irq(int idx, int apic, int pin);
470
471 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
472 {
473 int apic, i, best_guess = -1;
474
475 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
476 bus, slot, pin);
477 if (mp_bus_id_to_pci_bus[bus] == -1) {
478 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
479 return -1;
480 }
481 for (i = 0; i < mp_irq_entries; i++) {
482 int lbus = mp_irqs[i].mpc_srcbus;
483
484 for (apic = 0; apic < nr_ioapics; apic++)
485 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
486 mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
487 break;
488
489 if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
490 !mp_irqs[i].mpc_irqtype &&
491 (bus == lbus) &&
492 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
493 int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
494
495 if (!(apic || IO_APIC_IRQ(irq)))
496 continue;
497
498 if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
499 return irq;
500 /*
501 * Use the first all-but-pin matching entry as a
502 * best-guess fuzzy result for broken mptables.
503 */
504 if (best_guess < 0)
505 best_guess = irq;
506 }
507 }
508 BUG_ON(best_guess >= NR_IRQS);
509 return best_guess;
510 }
511
512 /*
513 * EISA Edge/Level control register, ELCR
514 */
515 static int EISA_ELCR(unsigned int irq)
516 {
517 if (irq < 16) {
518 unsigned int port = 0x4d0 + (irq >> 3);
519 return (inb(port) >> (irq & 7)) & 1;
520 }
521 apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
522 return 0;
523 }
524
525 /* EISA interrupts are always polarity zero and can be edge or level
526 * trigger depending on the ELCR value. If an interrupt is listed as
527 * EISA conforming in the MP table, that means its trigger type must
528 * be read in from the ELCR */
529
530 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
531 #define default_EISA_polarity(idx) (0)
532
533 /* ISA interrupts are always polarity zero edge triggered,
534 * when listed as conforming in the MP table. */
535
536 #define default_ISA_trigger(idx) (0)
537 #define default_ISA_polarity(idx) (0)
538
539 /* PCI interrupts are always polarity one level triggered,
540 * when listed as conforming in the MP table. */
541
542 #define default_PCI_trigger(idx) (1)
543 #define default_PCI_polarity(idx) (1)
544
545 /* MCA interrupts are always polarity zero level triggered,
546 * when listed as conforming in the MP table. */
547
548 #define default_MCA_trigger(idx) (1)
549 #define default_MCA_polarity(idx) (0)
550
551 static int __init MPBIOS_polarity(int idx)
552 {
553 int bus = mp_irqs[idx].mpc_srcbus;
554 int polarity;
555
556 /*
557 * Determine IRQ line polarity (high active or low active):
558 */
559 switch (mp_irqs[idx].mpc_irqflag & 3)
560 {
561 case 0: /* conforms, ie. bus-type dependent polarity */
562 {
563 switch (mp_bus_id_to_type[bus])
564 {
565 case MP_BUS_ISA: /* ISA pin */
566 {
567 polarity = default_ISA_polarity(idx);
568 break;
569 }
570 case MP_BUS_EISA: /* EISA pin */
571 {
572 polarity = default_EISA_polarity(idx);
573 break;
574 }
575 case MP_BUS_PCI: /* PCI pin */
576 {
577 polarity = default_PCI_polarity(idx);
578 break;
579 }
580 case MP_BUS_MCA: /* MCA pin */
581 {
582 polarity = default_MCA_polarity(idx);
583 break;
584 }
585 default:
586 {
587 printk(KERN_WARNING "broken BIOS!!\n");
588 polarity = 1;
589 break;
590 }
591 }
592 break;
593 }
594 case 1: /* high active */
595 {
596 polarity = 0;
597 break;
598 }
599 case 2: /* reserved */
600 {
601 printk(KERN_WARNING "broken BIOS!!\n");
602 polarity = 1;
603 break;
604 }
605 case 3: /* low active */
606 {
607 polarity = 1;
608 break;
609 }
610 default: /* invalid */
611 {
612 printk(KERN_WARNING "broken BIOS!!\n");
613 polarity = 1;
614 break;
615 }
616 }
617 return polarity;
618 }
619
620 static int MPBIOS_trigger(int idx)
621 {
622 int bus = mp_irqs[idx].mpc_srcbus;
623 int trigger;
624
625 /*
626 * Determine IRQ trigger mode (edge or level sensitive):
627 */
628 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
629 {
630 case 0: /* conforms, ie. bus-type dependent */
631 {
632 switch (mp_bus_id_to_type[bus])
633 {
634 case MP_BUS_ISA: /* ISA pin */
635 {
636 trigger = default_ISA_trigger(idx);
637 break;
638 }
639 case MP_BUS_EISA: /* EISA pin */
640 {
641 trigger = default_EISA_trigger(idx);
642 break;
643 }
644 case MP_BUS_PCI: /* PCI pin */
645 {
646 trigger = default_PCI_trigger(idx);
647 break;
648 }
649 case MP_BUS_MCA: /* MCA pin */
650 {
651 trigger = default_MCA_trigger(idx);
652 break;
653 }
654 default:
655 {
656 printk(KERN_WARNING "broken BIOS!!\n");
657 trigger = 1;
658 break;
659 }
660 }
661 break;
662 }
663 case 1: /* edge */
664 {
665 trigger = 0;
666 break;
667 }
668 case 2: /* reserved */
669 {
670 printk(KERN_WARNING "broken BIOS!!\n");
671 trigger = 1;
672 break;
673 }
674 case 3: /* level */
675 {
676 trigger = 1;
677 break;
678 }
679 default: /* invalid */
680 {
681 printk(KERN_WARNING "broken BIOS!!\n");
682 trigger = 0;
683 break;
684 }
685 }
686 return trigger;
687 }
688
689 static inline int irq_polarity(int idx)
690 {
691 return MPBIOS_polarity(idx);
692 }
693
694 static inline int irq_trigger(int idx)
695 {
696 return MPBIOS_trigger(idx);
697 }
698
699 static int next_irq = 16;
700
701 /*
702 * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
703 * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
704 * from ACPI, which can reach 800 in large boxen.
705 *
706 * Compact the sparse GSI space into a sequential IRQ series and reuse
707 * vectors if possible.
708 */
709 int gsi_irq_sharing(int gsi)
710 {
711 int i, tries, vector;
712
713 BUG_ON(gsi >= NR_IRQ_VECTORS);
714
715 if (platform_legacy_irq(gsi))
716 return gsi;
717
718 if (gsi_2_irq[gsi] != 0xFF)
719 return (int)gsi_2_irq[gsi];
720
721 tries = NR_IRQS;
722 try_again:
723 vector = assign_irq_vector(gsi);
724
725 /*
726 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
727 * use of vector and if found, return that IRQ. However, we never want
728 * to share legacy IRQs, which usually have a different trigger mode
729 * than PCI.
730 */
731 for (i = 0; i < NR_IRQS; i++)
732 if (IO_APIC_VECTOR(i) == vector)
733 break;
734 if (platform_legacy_irq(i)) {
735 if (--tries >= 0) {
736 IO_APIC_VECTOR(i) = 0;
737 goto try_again;
738 }
739 panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
740 }
741 if (i < NR_IRQS) {
742 gsi_2_irq[gsi] = i;
743 printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
744 gsi, vector, i);
745 return i;
746 }
747
748 i = next_irq++;
749 BUG_ON(i >= NR_IRQS);
750 gsi_2_irq[gsi] = i;
751 IO_APIC_VECTOR(i) = vector;
752 printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
753 gsi, vector, i);
754 return i;
755 }
756
757 static int pin_2_irq(int idx, int apic, int pin)
758 {
759 int irq, i;
760 int bus = mp_irqs[idx].mpc_srcbus;
761
762 /*
763 * Debugging check, we are in big trouble if this message pops up!
764 */
765 if (mp_irqs[idx].mpc_dstirq != pin)
766 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
767
768 switch (mp_bus_id_to_type[bus])
769 {
770 case MP_BUS_ISA: /* ISA pin */
771 case MP_BUS_EISA:
772 case MP_BUS_MCA:
773 {
774 irq = mp_irqs[idx].mpc_srcbusirq;
775 break;
776 }
777 case MP_BUS_PCI: /* PCI pin */
778 {
779 /*
780 * PCI IRQs are mapped in order
781 */
782 i = irq = 0;
783 while (i < apic)
784 irq += nr_ioapic_registers[i++];
785 irq += pin;
786 irq = gsi_irq_sharing(irq);
787 break;
788 }
789 default:
790 {
791 printk(KERN_ERR "unknown bus type %d.\n",bus);
792 irq = 0;
793 break;
794 }
795 }
796 BUG_ON(irq >= NR_IRQS);
797
798 /*
799 * PCI IRQ command line redirection. Yes, limits are hardcoded.
800 */
801 if ((pin >= 16) && (pin <= 23)) {
802 if (pirq_entries[pin-16] != -1) {
803 if (!pirq_entries[pin-16]) {
804 apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
805 } else {
806 irq = pirq_entries[pin-16];
807 apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
808 pin-16, irq);
809 }
810 }
811 }
812 BUG_ON(irq >= NR_IRQS);
813 return irq;
814 }
815
816 static inline int IO_APIC_irq_trigger(int irq)
817 {
818 int apic, idx, pin;
819
820 for (apic = 0; apic < nr_ioapics; apic++) {
821 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
822 idx = find_irq_entry(apic,pin,mp_INT);
823 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
824 return irq_trigger(idx);
825 }
826 }
827 /*
828 * nonexistent IRQs are edge default
829 */
830 return 0;
831 }
832
833 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
834 u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
835
836 int assign_irq_vector(int irq)
837 {
838 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
839 unsigned long flags;
840 int vector;
841
842 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
843
844 spin_lock_irqsave(&vector_lock, flags);
845
846 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
847 spin_unlock_irqrestore(&vector_lock, flags);
848 return IO_APIC_VECTOR(irq);
849 }
850 next:
851 current_vector += 8;
852 if (current_vector == IA32_SYSCALL_VECTOR)
853 goto next;
854
855 if (current_vector >= FIRST_SYSTEM_VECTOR) {
856 /* If we run out of vectors on large boxen, must share them. */
857 offset = (offset + 1) % 8;
858 current_vector = FIRST_DEVICE_VECTOR + offset;
859 }
860
861 vector = current_vector;
862 vector_irq[vector] = irq;
863 if (irq != AUTO_ASSIGN)
864 IO_APIC_VECTOR(irq) = vector;
865
866 spin_unlock_irqrestore(&vector_lock, flags);
867
868 return vector;
869 }
870
871 extern void (*interrupt[NR_IRQS])(void);
872 static struct hw_interrupt_type ioapic_level_type;
873 static struct hw_interrupt_type ioapic_edge_type;
874
875 #define IOAPIC_AUTO -1
876 #define IOAPIC_EDGE 0
877 #define IOAPIC_LEVEL 1
878
879 static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
880 {
881 unsigned idx;
882
883 idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
884
885 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
886 trigger == IOAPIC_LEVEL)
887 irq_desc[idx].chip = &ioapic_level_type;
888 else
889 irq_desc[idx].chip = &ioapic_edge_type;
890 set_intr_gate(vector, interrupt[idx]);
891 }
892
893 static void __init setup_IO_APIC_irqs(void)
894 {
895 struct IO_APIC_route_entry entry;
896 int apic, pin, idx, irq, first_notcon = 1, vector;
897 unsigned long flags;
898
899 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
900
901 for (apic = 0; apic < nr_ioapics; apic++) {
902 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
903
904 /*
905 * add it to the IO-APIC irq-routing table:
906 */
907 memset(&entry,0,sizeof(entry));
908
909 entry.delivery_mode = INT_DELIVERY_MODE;
910 entry.dest_mode = INT_DEST_MODE;
911 entry.mask = 0; /* enable IRQ */
912 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
913
914 idx = find_irq_entry(apic,pin,mp_INT);
915 if (idx == -1) {
916 if (first_notcon) {
917 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
918 first_notcon = 0;
919 } else
920 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
921 continue;
922 }
923
924 entry.trigger = irq_trigger(idx);
925 entry.polarity = irq_polarity(idx);
926
927 if (irq_trigger(idx)) {
928 entry.trigger = 1;
929 entry.mask = 1;
930 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
931 }
932
933 irq = pin_2_irq(idx, apic, pin);
934 add_pin_to_irq(irq, apic, pin);
935
936 if (!apic && !IO_APIC_IRQ(irq))
937 continue;
938
939 if (IO_APIC_IRQ(irq)) {
940 vector = assign_irq_vector(irq);
941 entry.vector = vector;
942
943 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
944 if (!apic && (irq < 16))
945 disable_8259A_irq(irq);
946 }
947 spin_lock_irqsave(&ioapic_lock, flags);
948 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
949 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
950 set_native_irq_info(irq, TARGET_CPUS);
951 spin_unlock_irqrestore(&ioapic_lock, flags);
952 }
953 }
954
955 if (!first_notcon)
956 apic_printk(APIC_VERBOSE," not connected.\n");
957 }
958
959 /*
960 * Set up the 8259A-master output pin as broadcast to all
961 * CPUs.
962 */
963 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
964 {
965 struct IO_APIC_route_entry entry;
966 unsigned long flags;
967
968 memset(&entry,0,sizeof(entry));
969
970 disable_8259A_irq(0);
971
972 /* mask LVT0 */
973 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
974
975 /*
976 * We use logical delivery to get the timer IRQ
977 * to the first CPU.
978 */
979 entry.dest_mode = INT_DEST_MODE;
980 entry.mask = 0; /* unmask IRQ now */
981 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
982 entry.delivery_mode = INT_DELIVERY_MODE;
983 entry.polarity = 0;
984 entry.trigger = 0;
985 entry.vector = vector;
986
987 /*
988 * The timer IRQ doesn't have to know that behind the
989 * scene we have a 8259A-master in AEOI mode ...
990 */
991 irq_desc[0].chip = &ioapic_edge_type;
992
993 /*
994 * Add it to the IO-APIC irq-routing table:
995 */
996 spin_lock_irqsave(&ioapic_lock, flags);
997 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
998 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
999 spin_unlock_irqrestore(&ioapic_lock, flags);
1000
1001 enable_8259A_irq(0);
1002 }
1003
1004 void __init UNEXPECTED_IO_APIC(void)
1005 {
1006 }
1007
1008 void __apicdebuginit print_IO_APIC(void)
1009 {
1010 int apic, i;
1011 union IO_APIC_reg_00 reg_00;
1012 union IO_APIC_reg_01 reg_01;
1013 union IO_APIC_reg_02 reg_02;
1014 unsigned long flags;
1015
1016 if (apic_verbosity == APIC_QUIET)
1017 return;
1018
1019 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1020 for (i = 0; i < nr_ioapics; i++)
1021 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1022 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
1023
1024 /*
1025 * We are a bit conservative about what we expect. We have to
1026 * know about every hardware change ASAP.
1027 */
1028 printk(KERN_INFO "testing the IO APIC.......................\n");
1029
1030 for (apic = 0; apic < nr_ioapics; apic++) {
1031
1032 spin_lock_irqsave(&ioapic_lock, flags);
1033 reg_00.raw = io_apic_read(apic, 0);
1034 reg_01.raw = io_apic_read(apic, 1);
1035 if (reg_01.bits.version >= 0x10)
1036 reg_02.raw = io_apic_read(apic, 2);
1037 spin_unlock_irqrestore(&ioapic_lock, flags);
1038
1039 printk("\n");
1040 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
1041 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1042 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1043 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
1044 UNEXPECTED_IO_APIC();
1045
1046 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1047 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1048 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
1049 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
1050 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
1051 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
1052 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
1053 (reg_01.bits.entries != 0x2E) &&
1054 (reg_01.bits.entries != 0x3F) &&
1055 (reg_01.bits.entries != 0x03)
1056 )
1057 UNEXPECTED_IO_APIC();
1058
1059 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1060 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1061 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
1062 (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
1063 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
1064 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
1065 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
1066 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
1067 )
1068 UNEXPECTED_IO_APIC();
1069 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
1070 UNEXPECTED_IO_APIC();
1071
1072 if (reg_01.bits.version >= 0x10) {
1073 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1074 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1075 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
1076 UNEXPECTED_IO_APIC();
1077 }
1078
1079 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1080
1081 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
1082 " Stat Dest Deli Vect: \n");
1083
1084 for (i = 0; i <= reg_01.bits.entries; i++) {
1085 struct IO_APIC_route_entry entry;
1086
1087 spin_lock_irqsave(&ioapic_lock, flags);
1088 *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
1089 *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
1090 spin_unlock_irqrestore(&ioapic_lock, flags);
1091
1092 printk(KERN_DEBUG " %02x %03X %02X ",
1093 i,
1094 entry.dest.logical.logical_dest,
1095 entry.dest.physical.physical_dest
1096 );
1097
1098 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1099 entry.mask,
1100 entry.trigger,
1101 entry.irr,
1102 entry.polarity,
1103 entry.delivery_status,
1104 entry.dest_mode,
1105 entry.delivery_mode,
1106 entry.vector
1107 );
1108 }
1109 }
1110 if (use_pci_vector())
1111 printk(KERN_INFO "Using vector-based indexing\n");
1112 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1113 for (i = 0; i < NR_IRQS; i++) {
1114 struct irq_pin_list *entry = irq_2_pin + i;
1115 if (entry->pin < 0)
1116 continue;
1117 if (use_pci_vector() && !platform_legacy_irq(i))
1118 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
1119 else
1120 printk(KERN_DEBUG "IRQ%d ", i);
1121 for (;;) {
1122 printk("-> %d:%d", entry->apic, entry->pin);
1123 if (!entry->next)
1124 break;
1125 entry = irq_2_pin + entry->next;
1126 }
1127 printk("\n");
1128 }
1129
1130 printk(KERN_INFO ".................................... done.\n");
1131
1132 return;
1133 }
1134
1135 #if 0
1136
1137 static __apicdebuginit void print_APIC_bitfield (int base)
1138 {
1139 unsigned int v;
1140 int i, j;
1141
1142 if (apic_verbosity == APIC_QUIET)
1143 return;
1144
1145 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1146 for (i = 0; i < 8; i++) {
1147 v = apic_read(base + i*0x10);
1148 for (j = 0; j < 32; j++) {
1149 if (v & (1<<j))
1150 printk("1");
1151 else
1152 printk("0");
1153 }
1154 printk("\n");
1155 }
1156 }
1157
1158 void __apicdebuginit print_local_APIC(void * dummy)
1159 {
1160 unsigned int v, ver, maxlvt;
1161
1162 if (apic_verbosity == APIC_QUIET)
1163 return;
1164
1165 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1166 smp_processor_id(), hard_smp_processor_id());
1167 v = apic_read(APIC_ID);
1168 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
1169 v = apic_read(APIC_LVR);
1170 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1171 ver = GET_APIC_VERSION(v);
1172 maxlvt = get_maxlvt();
1173
1174 v = apic_read(APIC_TASKPRI);
1175 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1176
1177 v = apic_read(APIC_ARBPRI);
1178 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1179 v & APIC_ARBPRI_MASK);
1180 v = apic_read(APIC_PROCPRI);
1181 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1182
1183 v = apic_read(APIC_EOI);
1184 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1185 v = apic_read(APIC_RRR);
1186 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1187 v = apic_read(APIC_LDR);
1188 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1189 v = apic_read(APIC_DFR);
1190 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1191 v = apic_read(APIC_SPIV);
1192 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1193
1194 printk(KERN_DEBUG "... APIC ISR field:\n");
1195 print_APIC_bitfield(APIC_ISR);
1196 printk(KERN_DEBUG "... APIC TMR field:\n");
1197 print_APIC_bitfield(APIC_TMR);
1198 printk(KERN_DEBUG "... APIC IRR field:\n");
1199 print_APIC_bitfield(APIC_IRR);
1200
1201 v = apic_read(APIC_ESR);
1202 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1203
1204 v = apic_read(APIC_ICR);
1205 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1206 v = apic_read(APIC_ICR2);
1207 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1208
1209 v = apic_read(APIC_LVTT);
1210 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1211
1212 if (maxlvt > 3) { /* PC is LVT#4. */
1213 v = apic_read(APIC_LVTPC);
1214 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1215 }
1216 v = apic_read(APIC_LVT0);
1217 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1218 v = apic_read(APIC_LVT1);
1219 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1220
1221 if (maxlvt > 2) { /* ERR is LVT#3. */
1222 v = apic_read(APIC_LVTERR);
1223 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1224 }
1225
1226 v = apic_read(APIC_TMICT);
1227 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1228 v = apic_read(APIC_TMCCT);
1229 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1230 v = apic_read(APIC_TDCR);
1231 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1232 printk("\n");
1233 }
1234
1235 void print_all_local_APICs (void)
1236 {
1237 on_each_cpu(print_local_APIC, NULL, 1, 1);
1238 }
1239
1240 void __apicdebuginit print_PIC(void)
1241 {
1242 unsigned int v;
1243 unsigned long flags;
1244
1245 if (apic_verbosity == APIC_QUIET)
1246 return;
1247
1248 printk(KERN_DEBUG "\nprinting PIC contents\n");
1249
1250 spin_lock_irqsave(&i8259A_lock, flags);
1251
1252 v = inb(0xa1) << 8 | inb(0x21);
1253 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1254
1255 v = inb(0xa0) << 8 | inb(0x20);
1256 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1257
1258 outb(0x0b,0xa0);
1259 outb(0x0b,0x20);
1260 v = inb(0xa0) << 8 | inb(0x20);
1261 outb(0x0a,0xa0);
1262 outb(0x0a,0x20);
1263
1264 spin_unlock_irqrestore(&i8259A_lock, flags);
1265
1266 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1267
1268 v = inb(0x4d1) << 8 | inb(0x4d0);
1269 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1270 }
1271
1272 #endif /* 0 */
1273
1274 static void __init enable_IO_APIC(void)
1275 {
1276 union IO_APIC_reg_01 reg_01;
1277 int i8259_apic, i8259_pin;
1278 int i, apic;
1279 unsigned long flags;
1280
1281 for (i = 0; i < PIN_MAP_SIZE; i++) {
1282 irq_2_pin[i].pin = -1;
1283 irq_2_pin[i].next = 0;
1284 }
1285 if (!pirqs_enabled)
1286 for (i = 0; i < MAX_PIRQS; i++)
1287 pirq_entries[i] = -1;
1288
1289 /*
1290 * The number of IO-APIC IRQ registers (== #pins):
1291 */
1292 for (apic = 0; apic < nr_ioapics; apic++) {
1293 spin_lock_irqsave(&ioapic_lock, flags);
1294 reg_01.raw = io_apic_read(apic, 1);
1295 spin_unlock_irqrestore(&ioapic_lock, flags);
1296 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1297 }
1298 for(apic = 0; apic < nr_ioapics; apic++) {
1299 int pin;
1300 /* See if any of the pins is in ExtINT mode */
1301 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1302 struct IO_APIC_route_entry entry;
1303 spin_lock_irqsave(&ioapic_lock, flags);
1304 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1305 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1306 spin_unlock_irqrestore(&ioapic_lock, flags);
1307
1308
1309 /* If the interrupt line is enabled and in ExtInt mode
1310 * I have found the pin where the i8259 is connected.
1311 */
1312 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1313 ioapic_i8259.apic = apic;
1314 ioapic_i8259.pin = pin;
1315 goto found_i8259;
1316 }
1317 }
1318 }
1319 found_i8259:
1320 /* Look to see what if the MP table has reported the ExtINT */
1321 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1322 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1323 /* Trust the MP table if nothing is setup in the hardware */
1324 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1325 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1326 ioapic_i8259.pin = i8259_pin;
1327 ioapic_i8259.apic = i8259_apic;
1328 }
1329 /* Complain if the MP table and the hardware disagree */
1330 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1331 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1332 {
1333 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1334 }
1335
1336 /*
1337 * Do not trust the IO-APIC being empty at bootup
1338 */
1339 clear_IO_APIC();
1340 }
1341
1342 /*
1343 * Not an __init, needed by the reboot code
1344 */
1345 void disable_IO_APIC(void)
1346 {
1347 /*
1348 * Clear the IO-APIC before rebooting:
1349 */
1350 clear_IO_APIC();
1351
1352 /*
1353 * If the i8259 is routed through an IOAPIC
1354 * Put that IOAPIC in virtual wire mode
1355 * so legacy interrupts can be delivered.
1356 */
1357 if (ioapic_i8259.pin != -1) {
1358 struct IO_APIC_route_entry entry;
1359 unsigned long flags;
1360
1361 memset(&entry, 0, sizeof(entry));
1362 entry.mask = 0; /* Enabled */
1363 entry.trigger = 0; /* Edge */
1364 entry.irr = 0;
1365 entry.polarity = 0; /* High */
1366 entry.delivery_status = 0;
1367 entry.dest_mode = 0; /* Physical */
1368 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1369 entry.vector = 0;
1370 entry.dest.physical.physical_dest =
1371 GET_APIC_ID(apic_read(APIC_ID));
1372
1373 /*
1374 * Add it to the IO-APIC irq-routing table:
1375 */
1376 spin_lock_irqsave(&ioapic_lock, flags);
1377 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1378 *(((int *)&entry)+1));
1379 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1380 *(((int *)&entry)+0));
1381 spin_unlock_irqrestore(&ioapic_lock, flags);
1382 }
1383
1384 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1385 }
1386
1387 /*
1388 * function to set the IO-APIC physical IDs based on the
1389 * values stored in the MPC table.
1390 *
1391 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1392 */
1393
1394 static void __init setup_ioapic_ids_from_mpc (void)
1395 {
1396 union IO_APIC_reg_00 reg_00;
1397 int apic;
1398 int i;
1399 unsigned char old_id;
1400 unsigned long flags;
1401
1402 /*
1403 * Set the IOAPIC ID to the value stored in the MPC table.
1404 */
1405 for (apic = 0; apic < nr_ioapics; apic++) {
1406
1407 /* Read the register 0 value */
1408 spin_lock_irqsave(&ioapic_lock, flags);
1409 reg_00.raw = io_apic_read(apic, 0);
1410 spin_unlock_irqrestore(&ioapic_lock, flags);
1411
1412 old_id = mp_ioapics[apic].mpc_apicid;
1413
1414
1415 printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
1416
1417
1418 /*
1419 * We need to adjust the IRQ routing table
1420 * if the ID changed.
1421 */
1422 if (old_id != mp_ioapics[apic].mpc_apicid)
1423 for (i = 0; i < mp_irq_entries; i++)
1424 if (mp_irqs[i].mpc_dstapic == old_id)
1425 mp_irqs[i].mpc_dstapic
1426 = mp_ioapics[apic].mpc_apicid;
1427
1428 /*
1429 * Read the right value from the MPC table and
1430 * write it into the ID register.
1431 */
1432 apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
1433 mp_ioapics[apic].mpc_apicid);
1434
1435 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1436 spin_lock_irqsave(&ioapic_lock, flags);
1437 io_apic_write(apic, 0, reg_00.raw);
1438 spin_unlock_irqrestore(&ioapic_lock, flags);
1439
1440 /*
1441 * Sanity check
1442 */
1443 spin_lock_irqsave(&ioapic_lock, flags);
1444 reg_00.raw = io_apic_read(apic, 0);
1445 spin_unlock_irqrestore(&ioapic_lock, flags);
1446 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1447 printk("could not set ID!\n");
1448 else
1449 apic_printk(APIC_VERBOSE," ok.\n");
1450 }
1451 }
1452
1453 /*
1454 * There is a nasty bug in some older SMP boards, their mptable lies
1455 * about the timer IRQ. We do the following to work around the situation:
1456 *
1457 * - timer IRQ defaults to IO-APIC IRQ
1458 * - if this function detects that timer IRQs are defunct, then we fall
1459 * back to ISA timer IRQs
1460 */
1461 static int __init timer_irq_works(void)
1462 {
1463 unsigned long t1 = jiffies;
1464
1465 local_irq_enable();
1466 /* Let ten ticks pass... */
1467 mdelay((10 * 1000) / HZ);
1468
1469 /*
1470 * Expect a few ticks at least, to be sure some possible
1471 * glue logic does not lock up after one or two first
1472 * ticks in a non-ExtINT mode. Also the local APIC
1473 * might have cached one ExtINT interrupt. Finally, at
1474 * least one tick may be lost due to delays.
1475 */
1476
1477 /* jiffies wrap? */
1478 if (jiffies - t1 > 4)
1479 return 1;
1480 return 0;
1481 }
1482
1483 /*
1484 * In the SMP+IOAPIC case it might happen that there are an unspecified
1485 * number of pending IRQ events unhandled. These cases are very rare,
1486 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1487 * better to do it this way as thus we do not have to be aware of
1488 * 'pending' interrupts in the IRQ path, except at this point.
1489 */
1490 /*
1491 * Edge triggered needs to resend any interrupt
1492 * that was delayed but this is now handled in the device
1493 * independent code.
1494 */
1495
1496 /*
1497 * Starting up a edge-triggered IO-APIC interrupt is
1498 * nasty - we need to make sure that we get the edge.
1499 * If it is already asserted for some reason, we need
1500 * return 1 to indicate that is was pending.
1501 *
1502 * This is not complete - we should be able to fake
1503 * an edge even if it isn't on the 8259A...
1504 */
1505
1506 static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1507 {
1508 int was_pending = 0;
1509 unsigned long flags;
1510
1511 spin_lock_irqsave(&ioapic_lock, flags);
1512 if (irq < 16) {
1513 disable_8259A_irq(irq);
1514 if (i8259A_irq_pending(irq))
1515 was_pending = 1;
1516 }
1517 __unmask_IO_APIC_irq(irq);
1518 spin_unlock_irqrestore(&ioapic_lock, flags);
1519
1520 return was_pending;
1521 }
1522
1523 /*
1524 * Once we have recorded IRQ_PENDING already, we can mask the
1525 * interrupt for real. This prevents IRQ storms from unhandled
1526 * devices.
1527 */
1528 static void ack_edge_ioapic_irq(unsigned int irq)
1529 {
1530 move_irq(irq);
1531 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1532 == (IRQ_PENDING | IRQ_DISABLED))
1533 mask_IO_APIC_irq(irq);
1534 ack_APIC_irq();
1535 }
1536
1537 /*
1538 * Level triggered interrupts can just be masked,
1539 * and shutting down and starting up the interrupt
1540 * is the same as enabling and disabling them -- except
1541 * with a startup need to return a "was pending" value.
1542 *
1543 * Level triggered interrupts are special because we
1544 * do not touch any IO-APIC register while handling
1545 * them. We ack the APIC in the end-IRQ handler, not
1546 * in the start-IRQ-handler. Protection against reentrance
1547 * from the same interrupt is still provided, both by the
1548 * generic IRQ layer and by the fact that an unacked local
1549 * APIC does not accept IRQs.
1550 */
1551 static unsigned int startup_level_ioapic_irq (unsigned int irq)
1552 {
1553 unmask_IO_APIC_irq(irq);
1554
1555 return 0; /* don't check for pending */
1556 }
1557
1558 static void end_level_ioapic_irq (unsigned int irq)
1559 {
1560 move_irq(irq);
1561 ack_APIC_irq();
1562 }
1563
1564 #ifdef CONFIG_PCI_MSI
1565 static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1566 {
1567 int irq = vector_to_irq(vector);
1568
1569 return startup_edge_ioapic_irq(irq);
1570 }
1571
1572 static void ack_edge_ioapic_vector(unsigned int vector)
1573 {
1574 int irq = vector_to_irq(vector);
1575
1576 move_native_irq(vector);
1577 ack_edge_ioapic_irq(irq);
1578 }
1579
1580 static unsigned int startup_level_ioapic_vector (unsigned int vector)
1581 {
1582 int irq = vector_to_irq(vector);
1583
1584 return startup_level_ioapic_irq (irq);
1585 }
1586
1587 static void end_level_ioapic_vector (unsigned int vector)
1588 {
1589 int irq = vector_to_irq(vector);
1590
1591 move_native_irq(vector);
1592 end_level_ioapic_irq(irq);
1593 }
1594
1595 static void mask_IO_APIC_vector (unsigned int vector)
1596 {
1597 int irq = vector_to_irq(vector);
1598
1599 mask_IO_APIC_irq(irq);
1600 }
1601
1602 static void unmask_IO_APIC_vector (unsigned int vector)
1603 {
1604 int irq = vector_to_irq(vector);
1605
1606 unmask_IO_APIC_irq(irq);
1607 }
1608
1609 #ifdef CONFIG_SMP
1610 static void set_ioapic_affinity_vector (unsigned int vector,
1611 cpumask_t cpu_mask)
1612 {
1613 int irq = vector_to_irq(vector);
1614
1615 set_native_irq_info(vector, cpu_mask);
1616 set_ioapic_affinity_irq(irq, cpu_mask);
1617 }
1618 #endif // CONFIG_SMP
1619 #endif // CONFIG_PCI_MSI
1620
1621 static int ioapic_retrigger(unsigned int irq)
1622 {
1623 send_IPI_self(IO_APIC_VECTOR(irq));
1624
1625 return 1;
1626 }
1627
1628 /*
1629 * Level and edge triggered IO-APIC interrupts need different handling,
1630 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1631 * handled with the level-triggered descriptor, but that one has slightly
1632 * more overhead. Level-triggered interrupts cannot be handled with the
1633 * edge-triggered handler, without risking IRQ storms and other ugly
1634 * races.
1635 */
1636
1637 static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
1638 .typename = "IO-APIC-edge",
1639 .startup = startup_edge_ioapic,
1640 .shutdown = shutdown_edge_ioapic,
1641 .enable = enable_edge_ioapic,
1642 .disable = disable_edge_ioapic,
1643 .ack = ack_edge_ioapic,
1644 .end = end_edge_ioapic,
1645 #ifdef CONFIG_SMP
1646 .set_affinity = set_ioapic_affinity,
1647 #endif
1648 .retrigger = ioapic_retrigger,
1649 };
1650
1651 static struct hw_interrupt_type ioapic_level_type __read_mostly = {
1652 .typename = "IO-APIC-level",
1653 .startup = startup_level_ioapic,
1654 .shutdown = shutdown_level_ioapic,
1655 .enable = enable_level_ioapic,
1656 .disable = disable_level_ioapic,
1657 .ack = mask_and_ack_level_ioapic,
1658 .end = end_level_ioapic,
1659 #ifdef CONFIG_SMP
1660 .set_affinity = set_ioapic_affinity,
1661 #endif
1662 .retrigger = ioapic_retrigger,
1663 };
1664
1665 static inline void init_IO_APIC_traps(void)
1666 {
1667 int irq;
1668
1669 /*
1670 * NOTE! The local APIC isn't very good at handling
1671 * multiple interrupts at the same interrupt level.
1672 * As the interrupt level is determined by taking the
1673 * vector number and shifting that right by 4, we
1674 * want to spread these out a bit so that they don't
1675 * all fall in the same interrupt level.
1676 *
1677 * Also, we've got to be careful not to trash gate
1678 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1679 */
1680 for (irq = 0; irq < NR_IRQS ; irq++) {
1681 int tmp = irq;
1682 if (use_pci_vector()) {
1683 if (!platform_legacy_irq(tmp))
1684 if ((tmp = vector_to_irq(tmp)) == -1)
1685 continue;
1686 }
1687 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
1688 /*
1689 * Hmm.. We don't have an entry for this,
1690 * so default to an old-fashioned 8259
1691 * interrupt if we can..
1692 */
1693 if (irq < 16)
1694 make_8259A_irq(irq);
1695 else
1696 /* Strange. Oh, well.. */
1697 irq_desc[irq].chip = &no_irq_type;
1698 }
1699 }
1700 }
1701
1702 static void enable_lapic_irq (unsigned int irq)
1703 {
1704 unsigned long v;
1705
1706 v = apic_read(APIC_LVT0);
1707 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1708 }
1709
1710 static void disable_lapic_irq (unsigned int irq)
1711 {
1712 unsigned long v;
1713
1714 v = apic_read(APIC_LVT0);
1715 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1716 }
1717
1718 static void ack_lapic_irq (unsigned int irq)
1719 {
1720 ack_APIC_irq();
1721 }
1722
1723 static void end_lapic_irq (unsigned int i) { /* nothing */ }
1724
1725 static struct hw_interrupt_type lapic_irq_type __read_mostly = {
1726 .typename = "local-APIC-edge",
1727 .startup = NULL, /* startup_irq() not used for IRQ0 */
1728 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1729 .enable = enable_lapic_irq,
1730 .disable = disable_lapic_irq,
1731 .ack = ack_lapic_irq,
1732 .end = end_lapic_irq,
1733 };
1734
1735 static void setup_nmi (void)
1736 {
1737 /*
1738 * Dirty trick to enable the NMI watchdog ...
1739 * We put the 8259A master into AEOI mode and
1740 * unmask on all local APICs LVT0 as NMI.
1741 *
1742 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1743 * is from Maciej W. Rozycki - so we do not have to EOI from
1744 * the NMI handler or the timer interrupt.
1745 */
1746 printk(KERN_INFO "activating NMI Watchdog ...");
1747
1748 enable_NMI_through_LVT0(NULL);
1749
1750 printk(" done.\n");
1751 }
1752
1753 /*
1754 * This looks a bit hackish but it's about the only one way of sending
1755 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1756 * not support the ExtINT mode, unfortunately. We need to send these
1757 * cycles as some i82489DX-based boards have glue logic that keeps the
1758 * 8259A interrupt line asserted until INTA. --macro
1759 */
1760 static inline void unlock_ExtINT_logic(void)
1761 {
1762 int apic, pin, i;
1763 struct IO_APIC_route_entry entry0, entry1;
1764 unsigned char save_control, save_freq_select;
1765 unsigned long flags;
1766
1767 pin = find_isa_irq_pin(8, mp_INT);
1768 apic = find_isa_irq_apic(8, mp_INT);
1769 if (pin == -1)
1770 return;
1771
1772 spin_lock_irqsave(&ioapic_lock, flags);
1773 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1774 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1775 spin_unlock_irqrestore(&ioapic_lock, flags);
1776 clear_IO_APIC_pin(apic, pin);
1777
1778 memset(&entry1, 0, sizeof(entry1));
1779
1780 entry1.dest_mode = 0; /* physical delivery */
1781 entry1.mask = 0; /* unmask IRQ now */
1782 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1783 entry1.delivery_mode = dest_ExtINT;
1784 entry1.polarity = entry0.polarity;
1785 entry1.trigger = 0;
1786 entry1.vector = 0;
1787
1788 spin_lock_irqsave(&ioapic_lock, flags);
1789 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1790 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1791 spin_unlock_irqrestore(&ioapic_lock, flags);
1792
1793 save_control = CMOS_READ(RTC_CONTROL);
1794 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1795 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1796 RTC_FREQ_SELECT);
1797 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1798
1799 i = 100;
1800 while (i-- > 0) {
1801 mdelay(10);
1802 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1803 i -= 10;
1804 }
1805
1806 CMOS_WRITE(save_control, RTC_CONTROL);
1807 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1808 clear_IO_APIC_pin(apic, pin);
1809
1810 spin_lock_irqsave(&ioapic_lock, flags);
1811 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1812 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1813 spin_unlock_irqrestore(&ioapic_lock, flags);
1814 }
1815
1816 int timer_uses_ioapic_pin_0;
1817
1818 /*
1819 * This code may look a bit paranoid, but it's supposed to cooperate with
1820 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1821 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1822 * fanatically on his truly buggy board.
1823 *
1824 * FIXME: really need to revamp this for modern platforms only.
1825 */
1826 static inline void check_timer(void)
1827 {
1828 int apic1, pin1, apic2, pin2;
1829 int vector;
1830
1831 /*
1832 * get/set the timer IRQ vector:
1833 */
1834 disable_8259A_irq(0);
1835 vector = assign_irq_vector(0);
1836 set_intr_gate(vector, interrupt[0]);
1837
1838 /*
1839 * Subtle, code in do_timer_interrupt() expects an AEOI
1840 * mode for the 8259A whenever interrupts are routed
1841 * through I/O APICs. Also IRQ0 has to be enabled in
1842 * the 8259A which implies the virtual wire has to be
1843 * disabled in the local APIC.
1844 */
1845 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1846 init_8259A(1);
1847 if (timer_over_8254 > 0)
1848 enable_8259A_irq(0);
1849
1850 pin1 = find_isa_irq_pin(0, mp_INT);
1851 apic1 = find_isa_irq_apic(0, mp_INT);
1852 pin2 = ioapic_i8259.pin;
1853 apic2 = ioapic_i8259.apic;
1854
1855 if (pin1 == 0)
1856 timer_uses_ioapic_pin_0 = 1;
1857
1858 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1859 vector, apic1, pin1, apic2, pin2);
1860
1861 if (pin1 != -1) {
1862 /*
1863 * Ok, does IRQ0 through the IOAPIC work?
1864 */
1865 unmask_IO_APIC_irq(0);
1866 if (!no_timer_check && timer_irq_works()) {
1867 nmi_watchdog_default();
1868 if (nmi_watchdog == NMI_IO_APIC) {
1869 disable_8259A_irq(0);
1870 setup_nmi();
1871 enable_8259A_irq(0);
1872 }
1873 if (disable_timer_pin_1 > 0)
1874 clear_IO_APIC_pin(0, pin1);
1875 return;
1876 }
1877 clear_IO_APIC_pin(apic1, pin1);
1878 apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
1879 "connected to IO-APIC\n");
1880 }
1881
1882 apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
1883 "through the 8259A ... ");
1884 if (pin2 != -1) {
1885 apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
1886 apic2, pin2);
1887 /*
1888 * legacy devices should be connected to IO APIC #0
1889 */
1890 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1891 if (timer_irq_works()) {
1892 apic_printk(APIC_VERBOSE," works.\n");
1893 nmi_watchdog_default();
1894 if (nmi_watchdog == NMI_IO_APIC) {
1895 setup_nmi();
1896 }
1897 return;
1898 }
1899 /*
1900 * Cleanup, just in case ...
1901 */
1902 clear_IO_APIC_pin(apic2, pin2);
1903 }
1904 apic_printk(APIC_VERBOSE," failed.\n");
1905
1906 if (nmi_watchdog == NMI_IO_APIC) {
1907 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1908 nmi_watchdog = 0;
1909 }
1910
1911 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1912
1913 disable_8259A_irq(0);
1914 irq_desc[0].chip = &lapic_irq_type;
1915 apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1916 enable_8259A_irq(0);
1917
1918 if (timer_irq_works()) {
1919 apic_printk(APIC_VERBOSE," works.\n");
1920 return;
1921 }
1922 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1923 apic_printk(APIC_VERBOSE," failed.\n");
1924
1925 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1926
1927 init_8259A(0);
1928 make_8259A_irq(0);
1929 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1930
1931 unlock_ExtINT_logic();
1932
1933 if (timer_irq_works()) {
1934 apic_printk(APIC_VERBOSE," works.\n");
1935 return;
1936 }
1937 apic_printk(APIC_VERBOSE," failed :(.\n");
1938 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1939 }
1940
1941 static int __init notimercheck(char *s)
1942 {
1943 no_timer_check = 1;
1944 return 1;
1945 }
1946 __setup("no_timer_check", notimercheck);
1947
1948 /*
1949 *
1950 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1951 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1952 * Linux doesn't really care, as it's not actually used
1953 * for any interrupt handling anyway.
1954 */
1955 #define PIC_IRQS (1<<2)
1956
1957 void __init setup_IO_APIC(void)
1958 {
1959 enable_IO_APIC();
1960
1961 if (acpi_ioapic)
1962 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1963 else
1964 io_apic_irqs = ~PIC_IRQS;
1965
1966 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1967
1968 /*
1969 * Set up the IO-APIC IRQ routing table.
1970 */
1971 if (!acpi_ioapic)
1972 setup_ioapic_ids_from_mpc();
1973 sync_Arb_IDs();
1974 setup_IO_APIC_irqs();
1975 init_IO_APIC_traps();
1976 check_timer();
1977 if (!acpi_ioapic)
1978 print_IO_APIC();
1979 }
1980
1981 struct sysfs_ioapic_data {
1982 struct sys_device dev;
1983 struct IO_APIC_route_entry entry[0];
1984 };
1985 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1986
1987 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1988 {
1989 struct IO_APIC_route_entry *entry;
1990 struct sysfs_ioapic_data *data;
1991 unsigned long flags;
1992 int i;
1993
1994 data = container_of(dev, struct sysfs_ioapic_data, dev);
1995 entry = data->entry;
1996 spin_lock_irqsave(&ioapic_lock, flags);
1997 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1998 *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
1999 *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
2000 }
2001 spin_unlock_irqrestore(&ioapic_lock, flags);
2002
2003 return 0;
2004 }
2005
2006 static int ioapic_resume(struct sys_device *dev)
2007 {
2008 struct IO_APIC_route_entry *entry;
2009 struct sysfs_ioapic_data *data;
2010 unsigned long flags;
2011 union IO_APIC_reg_00 reg_00;
2012 int i;
2013
2014 data = container_of(dev, struct sysfs_ioapic_data, dev);
2015 entry = data->entry;
2016
2017 spin_lock_irqsave(&ioapic_lock, flags);
2018 reg_00.raw = io_apic_read(dev->id, 0);
2019 if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
2020 reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
2021 io_apic_write(dev->id, 0, reg_00.raw);
2022 }
2023 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
2024 io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
2025 io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
2026 }
2027 spin_unlock_irqrestore(&ioapic_lock, flags);
2028
2029 return 0;
2030 }
2031
2032 static struct sysdev_class ioapic_sysdev_class = {
2033 set_kset_name("ioapic"),
2034 .suspend = ioapic_suspend,
2035 .resume = ioapic_resume,
2036 };
2037
2038 static int __init ioapic_init_sysfs(void)
2039 {
2040 struct sys_device * dev;
2041 int i, size, error = 0;
2042
2043 error = sysdev_class_register(&ioapic_sysdev_class);
2044 if (error)
2045 return error;
2046
2047 for (i = 0; i < nr_ioapics; i++ ) {
2048 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2049 * sizeof(struct IO_APIC_route_entry);
2050 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
2051 if (!mp_ioapic_data[i]) {
2052 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2053 continue;
2054 }
2055 memset(mp_ioapic_data[i], 0, size);
2056 dev = &mp_ioapic_data[i]->dev;
2057 dev->id = i;
2058 dev->cls = &ioapic_sysdev_class;
2059 error = sysdev_register(dev);
2060 if (error) {
2061 kfree(mp_ioapic_data[i]);
2062 mp_ioapic_data[i] = NULL;
2063 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2064 continue;
2065 }
2066 }
2067
2068 return 0;
2069 }
2070
2071 device_initcall(ioapic_init_sysfs);
2072
2073 /* --------------------------------------------------------------------------
2074 ACPI-based IOAPIC Configuration
2075 -------------------------------------------------------------------------- */
2076
2077 #ifdef CONFIG_ACPI
2078
2079 #define IO_APIC_MAX_ID 0xFE
2080
2081 int __init io_apic_get_version (int ioapic)
2082 {
2083 union IO_APIC_reg_01 reg_01;
2084 unsigned long flags;
2085
2086 spin_lock_irqsave(&ioapic_lock, flags);
2087 reg_01.raw = io_apic_read(ioapic, 1);
2088 spin_unlock_irqrestore(&ioapic_lock, flags);
2089
2090 return reg_01.bits.version;
2091 }
2092
2093
2094 int __init io_apic_get_redir_entries (int ioapic)
2095 {
2096 union IO_APIC_reg_01 reg_01;
2097 unsigned long flags;
2098
2099 spin_lock_irqsave(&ioapic_lock, flags);
2100 reg_01.raw = io_apic_read(ioapic, 1);
2101 spin_unlock_irqrestore(&ioapic_lock, flags);
2102
2103 return reg_01.bits.entries;
2104 }
2105
2106
2107 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
2108 {
2109 struct IO_APIC_route_entry entry;
2110 unsigned long flags;
2111
2112 if (!IO_APIC_IRQ(irq)) {
2113 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2114 ioapic);
2115 return -EINVAL;
2116 }
2117
2118 /*
2119 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
2120 * Note that we mask (disable) IRQs now -- these get enabled when the
2121 * corresponding device driver registers for this IRQ.
2122 */
2123
2124 memset(&entry,0,sizeof(entry));
2125
2126 entry.delivery_mode = INT_DELIVERY_MODE;
2127 entry.dest_mode = INT_DEST_MODE;
2128 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2129 entry.trigger = triggering;
2130 entry.polarity = polarity;
2131 entry.mask = 1; /* Disabled (masked) */
2132
2133 irq = gsi_irq_sharing(irq);
2134 /*
2135 * IRQs < 16 are already in the irq_2_pin[] map
2136 */
2137 if (irq >= 16)
2138 add_pin_to_irq(irq, ioapic, pin);
2139
2140 entry.vector = assign_irq_vector(irq);
2141
2142 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
2143 "IRQ %d Mode:%i Active:%i)\n", ioapic,
2144 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2145 triggering, polarity);
2146
2147 ioapic_register_intr(irq, entry.vector, triggering);
2148
2149 if (!ioapic && (irq < 16))
2150 disable_8259A_irq(irq);
2151
2152 spin_lock_irqsave(&ioapic_lock, flags);
2153 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
2154 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
2155 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
2156 spin_unlock_irqrestore(&ioapic_lock, flags);
2157
2158 return 0;
2159 }
2160
2161 #endif /* CONFIG_ACPI */
2162
2163
2164 /*
2165 * This function currently is only a helper for the i386 smp boot process where
2166 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2167 * so mask in all cases should simply be TARGET_CPUS
2168 */
2169 #ifdef CONFIG_SMP
2170 void __init setup_ioapic_dest(void)
2171 {
2172 int pin, ioapic, irq, irq_entry;
2173
2174 if (skip_ioapic_setup == 1)
2175 return;
2176
2177 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2178 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2179 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2180 if (irq_entry == -1)
2181 continue;
2182 irq = pin_2_irq(irq_entry, ioapic, pin);
2183 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2184 }
2185
2186 }
2187 }
2188 #endif
This page took 0.147289 seconds and 5 git commands to generate.