[ACPI] ACPICA 20050930
[deliverable/linux.git] / arch / x86_64 / kernel / io_apic.c
CommitLineData
1da177e4
LT
1/*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23#include <linux/mm.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
28#include <linux/config.h>
29#include <linux/smp_lock.h>
30#include <linux/mc146818rtc.h>
31#include <linux/acpi.h>
32#include <linux/sysdev.h>
33
34#include <asm/io.h>
35#include <asm/smp.h>
36#include <asm/desc.h>
37#include <asm/proto.h>
38#include <asm/mach_apic.h>
8d916406 39#include <asm/acpi.h>
1da177e4
LT
40
41#define __apicdebuginit __init
42
43int sis_apic_bug; /* not actually supported, dummy for compile */
44
14d98cad
AK
45static int no_timer_check;
46
66759a01
CE
47int disable_timer_pin_1 __initdata;
48
1da177e4
LT
49static DEFINE_SPINLOCK(ioapic_lock);
50
51/*
52 * # of IRQ routing registers
53 */
54int nr_ioapic_registers[MAX_IO_APICS];
55
56/*
57 * Rough estimation of how many shared IRQs there are, can
58 * be changed anytime.
59 */
6004e1b7 60#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
1da177e4
LT
61#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
62
63/*
64 * This is performance-critical, we want to do it O(1)
65 *
66 * the indexing order of this array favors 1:1 mappings
67 * between pins and IRQs.
68 */
69
70static struct irq_pin_list {
71 short apic, pin, next;
72} irq_2_pin[PIN_MAP_SIZE];
73
6c231b7b 74int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
1da177e4
LT
75#ifdef CONFIG_PCI_MSI
76#define vector_to_irq(vector) \
77 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
78#else
79#define vector_to_irq(vector) (vector)
80#endif
81
54d5d424
AR
82#define __DO_ACTION(R, ACTION, FINAL) \
83 \
84{ \
85 int pin; \
86 struct irq_pin_list *entry = irq_2_pin + irq; \
87 \
6004e1b7 88 BUG_ON(irq >= NR_IRQS); \
54d5d424
AR
89 for (;;) { \
90 unsigned int reg; \
91 pin = entry->pin; \
92 if (pin == -1) \
93 break; \
94 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
95 reg ACTION; \
96 io_apic_modify(entry->apic, reg); \
97 if (!entry->next) \
98 break; \
99 entry = irq_2_pin + entry->next; \
100 } \
101 FINAL; \
102}
103
104#ifdef CONFIG_SMP
105static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
106{
107 unsigned long flags;
108 unsigned int dest;
109 cpumask_t tmp;
110
111 cpus_and(tmp, mask, cpu_online_map);
112 if (cpus_empty(tmp))
113 tmp = TARGET_CPUS;
114
115 cpus_and(mask, tmp, CPU_MASK_ALL);
116
117 dest = cpu_mask_to_apicid(mask);
118
119 /*
120 * Only the high 8 bits are valid.
121 */
122 dest = SET_APIC_LOGICAL_ID(dest);
123
124 spin_lock_irqsave(&ioapic_lock, flags);
125 __DO_ACTION(1, = dest, )
126 set_irq_info(irq, mask);
127 spin_unlock_irqrestore(&ioapic_lock, flags);
128}
129#endif
130
6004e1b7
JC
131static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
132
1da177e4
LT
133/*
134 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
135 * shared ISA-space IRQs, so we have to support them. We are super
136 * fast in the common case, and fast for shared ISA-space IRQs.
137 */
138static void add_pin_to_irq(unsigned int irq, int apic, int pin)
139{
140 static int first_free_entry = NR_IRQS;
141 struct irq_pin_list *entry = irq_2_pin + irq;
142
6004e1b7 143 BUG_ON(irq >= NR_IRQS);
1da177e4
LT
144 while (entry->next)
145 entry = irq_2_pin + entry->next;
146
147 if (entry->pin != -1) {
148 entry->next = first_free_entry;
149 entry = irq_2_pin + entry->next;
150 if (++first_free_entry >= PIN_MAP_SIZE)
6004e1b7 151 panic("io_apic.c: ran out of irq_2_pin entries!");
1da177e4
LT
152 }
153 entry->apic = apic;
154 entry->pin = pin;
155}
156
1da177e4
LT
157
158#define DO_ACTION(name,R,ACTION, FINAL) \
159 \
160 static void name##_IO_APIC_irq (unsigned int irq) \
161 __DO_ACTION(R, ACTION, FINAL)
162
163DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
164 /* mask = 1 */
165DO_ACTION( __unmask, 0, &= 0xfffeffff, )
166 /* mask = 0 */
167
168static void mask_IO_APIC_irq (unsigned int irq)
169{
170 unsigned long flags;
171
172 spin_lock_irqsave(&ioapic_lock, flags);
173 __mask_IO_APIC_irq(irq);
174 spin_unlock_irqrestore(&ioapic_lock, flags);
175}
176
177static void unmask_IO_APIC_irq (unsigned int irq)
178{
179 unsigned long flags;
180
181 spin_lock_irqsave(&ioapic_lock, flags);
182 __unmask_IO_APIC_irq(irq);
183 spin_unlock_irqrestore(&ioapic_lock, flags);
184}
185
186static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
187{
188 struct IO_APIC_route_entry entry;
189 unsigned long flags;
190
191 /* Check delivery_mode to be sure we're not clearing an SMI pin */
192 spin_lock_irqsave(&ioapic_lock, flags);
193 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
194 *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
195 spin_unlock_irqrestore(&ioapic_lock, flags);
196 if (entry.delivery_mode == dest_SMI)
197 return;
198 /*
199 * Disable it in the IO-APIC irq-routing table:
200 */
201 memset(&entry, 0, sizeof(entry));
202 entry.mask = 1;
203 spin_lock_irqsave(&ioapic_lock, flags);
204 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
205 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
206 spin_unlock_irqrestore(&ioapic_lock, flags);
207}
208
209static void clear_IO_APIC (void)
210{
211 int apic, pin;
212
213 for (apic = 0; apic < nr_ioapics; apic++)
214 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
215 clear_IO_APIC_pin(apic, pin);
216}
217
218/*
219 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
220 * specific CPU-side IRQs.
221 */
222
223#define MAX_PIRQS 8
224static int pirq_entries [MAX_PIRQS];
225static int pirqs_enabled;
226int skip_ioapic_setup;
227int ioapic_force;
228
229/* dummy parsing: see setup.c */
230
231static int __init disable_ioapic_setup(char *str)
232{
233 skip_ioapic_setup = 1;
234 return 1;
235}
236
237static int __init enable_ioapic_setup(char *str)
238{
239 ioapic_force = 1;
240 skip_ioapic_setup = 0;
241 return 1;
242}
243
244__setup("noapic", disable_ioapic_setup);
245__setup("apic", enable_ioapic_setup);
246
247#include <asm/pci-direct.h>
248#include <linux/pci_ids.h>
249#include <linux/pci.h>
250
251/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
252 off. Check for an Nvidia or VIA PCI bridge and turn it off.
253 Use pci direct infrastructure because this runs before the PCI subsystem.
254
255 Can be overwritten with "apic"
256
257 And another hack to disable the IOMMU on VIA chipsets.
258
259 Kludge-O-Rama. */
260void __init check_ioapic(void)
261{
262 int num,slot,func;
263 if (ioapic_force)
264 return;
265
266 /* Poor man's PCI discovery */
267 for (num = 0; num < 32; num++) {
268 for (slot = 0; slot < 32; slot++) {
269 for (func = 0; func < 8; func++) {
270 u32 class;
271 u32 vendor;
272 u8 type;
273 class = read_pci_config(num,slot,func,
274 PCI_CLASS_REVISION);
275 if (class == 0xffffffff)
276 break;
277
278 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
279 continue;
280
281 vendor = read_pci_config(num, slot, func,
282 PCI_VENDOR_ID);
283 vendor &= 0xffff;
284 switch (vendor) {
285 case PCI_VENDOR_ID_VIA:
286#ifdef CONFIG_GART_IOMMU
287 if ((end_pfn >= (0xffffffff>>PAGE_SHIFT) ||
288 force_iommu) &&
289 !iommu_aperture_allowed) {
290 printk(KERN_INFO
291 "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n");
292 iommu_aperture_disabled = 1;
293 }
294#endif
295 return;
296 case PCI_VENDOR_ID_NVIDIA:
297#ifdef CONFIG_ACPI
298 /* All timer overrides on Nvidia
299 seem to be wrong. Skip them. */
300 acpi_skip_timer_override = 1;
301 printk(KERN_INFO
302 "Nvidia board detected. Ignoring ACPI timer override.\n");
303#endif
304 /* RED-PEN skip them on mptables too? */
305 return;
306 }
307
308 /* No multi-function device? */
309 type = read_pci_config_byte(num,slot,func,
310 PCI_HEADER_TYPE);
311 if (!(type & 0x80))
312 break;
313 }
314 }
315 }
316}
317
318static int __init ioapic_pirq_setup(char *str)
319{
320 int i, max;
321 int ints[MAX_PIRQS+1];
322
323 get_options(str, ARRAY_SIZE(ints), ints);
324
325 for (i = 0; i < MAX_PIRQS; i++)
326 pirq_entries[i] = -1;
327
328 pirqs_enabled = 1;
329 apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
330 max = MAX_PIRQS;
331 if (ints[0] < MAX_PIRQS)
332 max = ints[0];
333
334 for (i = 0; i < max; i++) {
335 apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
336 /*
337 * PIRQs are mapped upside down, usually.
338 */
339 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
340 }
341 return 1;
342}
343
344__setup("pirq=", ioapic_pirq_setup);
345
346/*
347 * Find the IRQ entry number of a certain pin.
348 */
349static int find_irq_entry(int apic, int pin, int type)
350{
351 int i;
352
353 for (i = 0; i < mp_irq_entries; i++)
354 if (mp_irqs[i].mpc_irqtype == type &&
355 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
356 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
357 mp_irqs[i].mpc_dstirq == pin)
358 return i;
359
360 return -1;
361}
362
363/*
364 * Find the pin to which IRQ[irq] (ISA) is connected
365 */
208fb931 366static int find_isa_irq_pin(int irq, int type)
1da177e4
LT
367{
368 int i;
369
370 for (i = 0; i < mp_irq_entries; i++) {
371 int lbus = mp_irqs[i].mpc_srcbus;
372
373 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
374 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
375 mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
376 (mp_irqs[i].mpc_irqtype == type) &&
377 (mp_irqs[i].mpc_srcbusirq == irq))
378
379 return mp_irqs[i].mpc_dstirq;
380 }
381 return -1;
382}
383
384/*
385 * Find a specific PCI IRQ entry.
386 * Not an __init, possibly needed by modules
387 */
388static int pin_2_irq(int idx, int apic, int pin);
389
390int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
391{
392 int apic, i, best_guess = -1;
393
394 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
395 bus, slot, pin);
396 if (mp_bus_id_to_pci_bus[bus] == -1) {
397 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
398 return -1;
399 }
400 for (i = 0; i < mp_irq_entries; i++) {
401 int lbus = mp_irqs[i].mpc_srcbus;
402
403 for (apic = 0; apic < nr_ioapics; apic++)
404 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
405 mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
406 break;
407
408 if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
409 !mp_irqs[i].mpc_irqtype &&
410 (bus == lbus) &&
411 (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
412 int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
413
414 if (!(apic || IO_APIC_IRQ(irq)))
415 continue;
416
417 if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
418 return irq;
419 /*
420 * Use the first all-but-pin matching entry as a
421 * best-guess fuzzy result for broken mptables.
422 */
423 if (best_guess < 0)
424 best_guess = irq;
425 }
426 }
6004e1b7 427 BUG_ON(best_guess >= NR_IRQS);
1da177e4
LT
428 return best_guess;
429}
430
431/*
432 * EISA Edge/Level control register, ELCR
433 */
434static int EISA_ELCR(unsigned int irq)
435{
436 if (irq < 16) {
437 unsigned int port = 0x4d0 + (irq >> 3);
438 return (inb(port) >> (irq & 7)) & 1;
439 }
440 apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
441 return 0;
442}
443
444/* EISA interrupts are always polarity zero and can be edge or level
445 * trigger depending on the ELCR value. If an interrupt is listed as
446 * EISA conforming in the MP table, that means its trigger type must
447 * be read in from the ELCR */
448
449#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
450#define default_EISA_polarity(idx) (0)
451
452/* ISA interrupts are always polarity zero edge triggered,
453 * when listed as conforming in the MP table. */
454
455#define default_ISA_trigger(idx) (0)
456#define default_ISA_polarity(idx) (0)
457
458/* PCI interrupts are always polarity one level triggered,
459 * when listed as conforming in the MP table. */
460
461#define default_PCI_trigger(idx) (1)
462#define default_PCI_polarity(idx) (1)
463
464/* MCA interrupts are always polarity zero level triggered,
465 * when listed as conforming in the MP table. */
466
467#define default_MCA_trigger(idx) (1)
468#define default_MCA_polarity(idx) (0)
469
470static int __init MPBIOS_polarity(int idx)
471{
472 int bus = mp_irqs[idx].mpc_srcbus;
473 int polarity;
474
475 /*
476 * Determine IRQ line polarity (high active or low active):
477 */
478 switch (mp_irqs[idx].mpc_irqflag & 3)
479 {
480 case 0: /* conforms, ie. bus-type dependent polarity */
481 {
482 switch (mp_bus_id_to_type[bus])
483 {
484 case MP_BUS_ISA: /* ISA pin */
485 {
486 polarity = default_ISA_polarity(idx);
487 break;
488 }
489 case MP_BUS_EISA: /* EISA pin */
490 {
491 polarity = default_EISA_polarity(idx);
492 break;
493 }
494 case MP_BUS_PCI: /* PCI pin */
495 {
496 polarity = default_PCI_polarity(idx);
497 break;
498 }
499 case MP_BUS_MCA: /* MCA pin */
500 {
501 polarity = default_MCA_polarity(idx);
502 break;
503 }
504 default:
505 {
506 printk(KERN_WARNING "broken BIOS!!\n");
507 polarity = 1;
508 break;
509 }
510 }
511 break;
512 }
513 case 1: /* high active */
514 {
515 polarity = 0;
516 break;
517 }
518 case 2: /* reserved */
519 {
520 printk(KERN_WARNING "broken BIOS!!\n");
521 polarity = 1;
522 break;
523 }
524 case 3: /* low active */
525 {
526 polarity = 1;
527 break;
528 }
529 default: /* invalid */
530 {
531 printk(KERN_WARNING "broken BIOS!!\n");
532 polarity = 1;
533 break;
534 }
535 }
536 return polarity;
537}
538
539static int MPBIOS_trigger(int idx)
540{
541 int bus = mp_irqs[idx].mpc_srcbus;
542 int trigger;
543
544 /*
545 * Determine IRQ trigger mode (edge or level sensitive):
546 */
547 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
548 {
549 case 0: /* conforms, ie. bus-type dependent */
550 {
551 switch (mp_bus_id_to_type[bus])
552 {
553 case MP_BUS_ISA: /* ISA pin */
554 {
555 trigger = default_ISA_trigger(idx);
556 break;
557 }
558 case MP_BUS_EISA: /* EISA pin */
559 {
560 trigger = default_EISA_trigger(idx);
561 break;
562 }
563 case MP_BUS_PCI: /* PCI pin */
564 {
565 trigger = default_PCI_trigger(idx);
566 break;
567 }
568 case MP_BUS_MCA: /* MCA pin */
569 {
570 trigger = default_MCA_trigger(idx);
571 break;
572 }
573 default:
574 {
575 printk(KERN_WARNING "broken BIOS!!\n");
576 trigger = 1;
577 break;
578 }
579 }
580 break;
581 }
582 case 1: /* edge */
583 {
584 trigger = 0;
585 break;
586 }
587 case 2: /* reserved */
588 {
589 printk(KERN_WARNING "broken BIOS!!\n");
590 trigger = 1;
591 break;
592 }
593 case 3: /* level */
594 {
595 trigger = 1;
596 break;
597 }
598 default: /* invalid */
599 {
600 printk(KERN_WARNING "broken BIOS!!\n");
601 trigger = 0;
602 break;
603 }
604 }
605 return trigger;
606}
607
608static inline int irq_polarity(int idx)
609{
610 return MPBIOS_polarity(idx);
611}
612
613static inline int irq_trigger(int idx)
614{
615 return MPBIOS_trigger(idx);
616}
617
6004e1b7
JC
618static int next_irq = 16;
619
620/*
621 * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
622 * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
623 * from ACPI, which can reach 800 in large boxen.
624 *
625 * Compact the sparse GSI space into a sequential IRQ series and reuse
626 * vectors if possible.
627 */
628int gsi_irq_sharing(int gsi)
629{
630 int i, tries, vector;
631
632 BUG_ON(gsi >= NR_IRQ_VECTORS);
633
634 if (platform_legacy_irq(gsi))
635 return gsi;
636
637 if (gsi_2_irq[gsi] != 0xFF)
638 return (int)gsi_2_irq[gsi];
639
640 tries = NR_IRQS;
641 try_again:
642 vector = assign_irq_vector(gsi);
643
644 /*
645 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
646 * use of vector and if found, return that IRQ. However, we never want
647 * to share legacy IRQs, which usually have a different trigger mode
648 * than PCI.
649 */
650 for (i = 0; i < NR_IRQS; i++)
651 if (IO_APIC_VECTOR(i) == vector)
652 break;
653 if (platform_legacy_irq(i)) {
654 if (--tries >= 0) {
655 IO_APIC_VECTOR(i) = 0;
656 goto try_again;
657 }
658 panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
659 }
660 if (i < NR_IRQS) {
661 gsi_2_irq[gsi] = i;
662 printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
663 gsi, vector, i);
664 return i;
665 }
666
667 i = next_irq++;
668 BUG_ON(i >= NR_IRQS);
669 gsi_2_irq[gsi] = i;
670 IO_APIC_VECTOR(i) = vector;
671 printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
672 gsi, vector, i);
673 return i;
674}
675
1da177e4
LT
676static int pin_2_irq(int idx, int apic, int pin)
677{
678 int irq, i;
679 int bus = mp_irqs[idx].mpc_srcbus;
680
681 /*
682 * Debugging check, we are in big trouble if this message pops up!
683 */
684 if (mp_irqs[idx].mpc_dstirq != pin)
685 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
686
687 switch (mp_bus_id_to_type[bus])
688 {
689 case MP_BUS_ISA: /* ISA pin */
690 case MP_BUS_EISA:
691 case MP_BUS_MCA:
692 {
693 irq = mp_irqs[idx].mpc_srcbusirq;
694 break;
695 }
696 case MP_BUS_PCI: /* PCI pin */
697 {
698 /*
699 * PCI IRQs are mapped in order
700 */
701 i = irq = 0;
702 while (i < apic)
703 irq += nr_ioapic_registers[i++];
704 irq += pin;
6004e1b7 705 irq = gsi_irq_sharing(irq);
1da177e4
LT
706 break;
707 }
708 default:
709 {
710 printk(KERN_ERR "unknown bus type %d.\n",bus);
711 irq = 0;
712 break;
713 }
714 }
6004e1b7 715 BUG_ON(irq >= NR_IRQS);
1da177e4
LT
716
717 /*
718 * PCI IRQ command line redirection. Yes, limits are hardcoded.
719 */
720 if ((pin >= 16) && (pin <= 23)) {
721 if (pirq_entries[pin-16] != -1) {
722 if (!pirq_entries[pin-16]) {
723 apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
724 } else {
725 irq = pirq_entries[pin-16];
726 apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
727 pin-16, irq);
728 }
729 }
730 }
6004e1b7 731 BUG_ON(irq >= NR_IRQS);
1da177e4
LT
732 return irq;
733}
734
735static inline int IO_APIC_irq_trigger(int irq)
736{
737 int apic, idx, pin;
738
739 for (apic = 0; apic < nr_ioapics; apic++) {
740 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
741 idx = find_irq_entry(apic,pin,mp_INT);
742 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
743 return irq_trigger(idx);
744 }
745 }
746 /*
747 * nonexistent IRQs are edge default
748 */
749 return 0;
750}
751
752/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
6c231b7b 753u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
1da177e4
LT
754
755int assign_irq_vector(int irq)
756{
757 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
758
6004e1b7
JC
759 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
760 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
1da177e4
LT
761 return IO_APIC_VECTOR(irq);
762next:
763 current_vector += 8;
764 if (current_vector == IA32_SYSCALL_VECTOR)
765 goto next;
766
767 if (current_vector >= FIRST_SYSTEM_VECTOR) {
6004e1b7
JC
768 /* If we run out of vectors on large boxen, must share them. */
769 offset = (offset + 1) % 8;
1da177e4
LT
770 current_vector = FIRST_DEVICE_VECTOR + offset;
771 }
772
773 vector_irq[current_vector] = irq;
774 if (irq != AUTO_ASSIGN)
775 IO_APIC_VECTOR(irq) = current_vector;
776
777 return current_vector;
778}
779
780extern void (*interrupt[NR_IRQS])(void);
781static struct hw_interrupt_type ioapic_level_type;
782static struct hw_interrupt_type ioapic_edge_type;
783
784#define IOAPIC_AUTO -1
785#define IOAPIC_EDGE 0
786#define IOAPIC_LEVEL 1
787
788static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
789{
790 if (use_pci_vector() && !platform_legacy_irq(irq)) {
791 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
792 trigger == IOAPIC_LEVEL)
793 irq_desc[vector].handler = &ioapic_level_type;
794 else
795 irq_desc[vector].handler = &ioapic_edge_type;
796 set_intr_gate(vector, interrupt[vector]);
797 } else {
798 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
799 trigger == IOAPIC_LEVEL)
800 irq_desc[irq].handler = &ioapic_level_type;
801 else
802 irq_desc[irq].handler = &ioapic_edge_type;
803 set_intr_gate(vector, interrupt[irq]);
804 }
805}
806
807static void __init setup_IO_APIC_irqs(void)
808{
809 struct IO_APIC_route_entry entry;
810 int apic, pin, idx, irq, first_notcon = 1, vector;
811 unsigned long flags;
812
813 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
814
815 for (apic = 0; apic < nr_ioapics; apic++) {
816 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
817
818 /*
819 * add it to the IO-APIC irq-routing table:
820 */
821 memset(&entry,0,sizeof(entry));
822
823 entry.delivery_mode = INT_DELIVERY_MODE;
824 entry.dest_mode = INT_DEST_MODE;
825 entry.mask = 0; /* enable IRQ */
826 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
827
828 idx = find_irq_entry(apic,pin,mp_INT);
829 if (idx == -1) {
830 if (first_notcon) {
831 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
832 first_notcon = 0;
833 } else
834 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
835 continue;
836 }
837
838 entry.trigger = irq_trigger(idx);
839 entry.polarity = irq_polarity(idx);
840
841 if (irq_trigger(idx)) {
842 entry.trigger = 1;
843 entry.mask = 1;
844 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
845 }
846
847 irq = pin_2_irq(idx, apic, pin);
848 add_pin_to_irq(irq, apic, pin);
849
850 if (!apic && !IO_APIC_IRQ(irq))
851 continue;
852
853 if (IO_APIC_IRQ(irq)) {
854 vector = assign_irq_vector(irq);
855 entry.vector = vector;
856
857 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
858 if (!apic && (irq < 16))
859 disable_8259A_irq(irq);
860 }
861 spin_lock_irqsave(&ioapic_lock, flags);
862 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
863 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
54d5d424 864 set_native_irq_info(irq, TARGET_CPUS);
1da177e4
LT
865 spin_unlock_irqrestore(&ioapic_lock, flags);
866 }
867 }
868
869 if (!first_notcon)
870 apic_printk(APIC_VERBOSE," not connected.\n");
871}
872
873/*
874 * Set up the 8259A-master output pin as broadcast to all
875 * CPUs.
876 */
877static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
878{
879 struct IO_APIC_route_entry entry;
880 unsigned long flags;
881
882 memset(&entry,0,sizeof(entry));
883
884 disable_8259A_irq(0);
885
886 /* mask LVT0 */
887 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
888
889 /*
890 * We use logical delivery to get the timer IRQ
891 * to the first CPU.
892 */
893 entry.dest_mode = INT_DEST_MODE;
894 entry.mask = 0; /* unmask IRQ now */
895 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
896 entry.delivery_mode = INT_DELIVERY_MODE;
897 entry.polarity = 0;
898 entry.trigger = 0;
899 entry.vector = vector;
900
901 /*
902 * The timer IRQ doesn't have to know that behind the
903 * scene we have a 8259A-master in AEOI mode ...
904 */
905 irq_desc[0].handler = &ioapic_edge_type;
906
907 /*
908 * Add it to the IO-APIC irq-routing table:
909 */
910 spin_lock_irqsave(&ioapic_lock, flags);
911 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
912 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
913 spin_unlock_irqrestore(&ioapic_lock, flags);
914
915 enable_8259A_irq(0);
916}
917
918void __init UNEXPECTED_IO_APIC(void)
919{
920}
921
922void __apicdebuginit print_IO_APIC(void)
923{
924 int apic, i;
925 union IO_APIC_reg_00 reg_00;
926 union IO_APIC_reg_01 reg_01;
927 union IO_APIC_reg_02 reg_02;
928 unsigned long flags;
929
930 if (apic_verbosity == APIC_QUIET)
931 return;
932
933 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
934 for (i = 0; i < nr_ioapics; i++)
935 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
936 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
937
938 /*
939 * We are a bit conservative about what we expect. We have to
940 * know about every hardware change ASAP.
941 */
942 printk(KERN_INFO "testing the IO APIC.......................\n");
943
944 for (apic = 0; apic < nr_ioapics; apic++) {
945
946 spin_lock_irqsave(&ioapic_lock, flags);
947 reg_00.raw = io_apic_read(apic, 0);
948 reg_01.raw = io_apic_read(apic, 1);
949 if (reg_01.bits.version >= 0x10)
950 reg_02.raw = io_apic_read(apic, 2);
951 spin_unlock_irqrestore(&ioapic_lock, flags);
952
953 printk("\n");
954 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
955 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
956 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
957 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
958 UNEXPECTED_IO_APIC();
959
960 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
961 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
962 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
963 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
964 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
965 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
966 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
967 (reg_01.bits.entries != 0x2E) &&
968 (reg_01.bits.entries != 0x3F) &&
969 (reg_01.bits.entries != 0x03)
970 )
971 UNEXPECTED_IO_APIC();
972
973 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
974 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
975 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
976 (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
977 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
978 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
979 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
980 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
981 )
982 UNEXPECTED_IO_APIC();
983 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
984 UNEXPECTED_IO_APIC();
985
986 if (reg_01.bits.version >= 0x10) {
987 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
988 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
989 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
990 UNEXPECTED_IO_APIC();
991 }
992
993 printk(KERN_DEBUG ".... IRQ redirection table:\n");
994
995 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
996 " Stat Dest Deli Vect: \n");
997
998 for (i = 0; i <= reg_01.bits.entries; i++) {
999 struct IO_APIC_route_entry entry;
1000
1001 spin_lock_irqsave(&ioapic_lock, flags);
1002 *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
1003 *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
1004 spin_unlock_irqrestore(&ioapic_lock, flags);
1005
1006 printk(KERN_DEBUG " %02x %03X %02X ",
1007 i,
1008 entry.dest.logical.logical_dest,
1009 entry.dest.physical.physical_dest
1010 );
1011
1012 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1013 entry.mask,
1014 entry.trigger,
1015 entry.irr,
1016 entry.polarity,
1017 entry.delivery_status,
1018 entry.dest_mode,
1019 entry.delivery_mode,
1020 entry.vector
1021 );
1022 }
1023 }
1024 if (use_pci_vector())
1025 printk(KERN_INFO "Using vector-based indexing\n");
1026 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1027 for (i = 0; i < NR_IRQS; i++) {
1028 struct irq_pin_list *entry = irq_2_pin + i;
1029 if (entry->pin < 0)
1030 continue;
1031 if (use_pci_vector() && !platform_legacy_irq(i))
1032 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
1033 else
1034 printk(KERN_DEBUG "IRQ%d ", i);
1035 for (;;) {
1036 printk("-> %d:%d", entry->apic, entry->pin);
1037 if (!entry->next)
1038 break;
1039 entry = irq_2_pin + entry->next;
1040 }
1041 printk("\n");
1042 }
1043
1044 printk(KERN_INFO ".................................... done.\n");
1045
1046 return;
1047}
1048
1049#if 0
1050
1051static __apicdebuginit void print_APIC_bitfield (int base)
1052{
1053 unsigned int v;
1054 int i, j;
1055
1056 if (apic_verbosity == APIC_QUIET)
1057 return;
1058
1059 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1060 for (i = 0; i < 8; i++) {
1061 v = apic_read(base + i*0x10);
1062 for (j = 0; j < 32; j++) {
1063 if (v & (1<<j))
1064 printk("1");
1065 else
1066 printk("0");
1067 }
1068 printk("\n");
1069 }
1070}
1071
1072void __apicdebuginit print_local_APIC(void * dummy)
1073{
1074 unsigned int v, ver, maxlvt;
1075
1076 if (apic_verbosity == APIC_QUIET)
1077 return;
1078
1079 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1080 smp_processor_id(), hard_smp_processor_id());
1081 v = apic_read(APIC_ID);
1082 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
1083 v = apic_read(APIC_LVR);
1084 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1085 ver = GET_APIC_VERSION(v);
1086 maxlvt = get_maxlvt();
1087
1088 v = apic_read(APIC_TASKPRI);
1089 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1090
5a40b7c2
AK
1091 v = apic_read(APIC_ARBPRI);
1092 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1093 v & APIC_ARBPRI_MASK);
1094 v = apic_read(APIC_PROCPRI);
1095 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1da177e4
LT
1096
1097 v = apic_read(APIC_EOI);
1098 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1099 v = apic_read(APIC_RRR);
1100 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1101 v = apic_read(APIC_LDR);
1102 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1103 v = apic_read(APIC_DFR);
1104 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1105 v = apic_read(APIC_SPIV);
1106 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1107
1108 printk(KERN_DEBUG "... APIC ISR field:\n");
1109 print_APIC_bitfield(APIC_ISR);
1110 printk(KERN_DEBUG "... APIC TMR field:\n");
1111 print_APIC_bitfield(APIC_TMR);
1112 printk(KERN_DEBUG "... APIC IRR field:\n");
1113 print_APIC_bitfield(APIC_IRR);
1114
5a40b7c2
AK
1115 v = apic_read(APIC_ESR);
1116 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1da177e4
LT
1117
1118 v = apic_read(APIC_ICR);
1119 printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
1120 v = apic_read(APIC_ICR2);
1121 printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
1122
1123 v = apic_read(APIC_LVTT);
1124 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1125
1126 if (maxlvt > 3) { /* PC is LVT#4. */
1127 v = apic_read(APIC_LVTPC);
1128 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1129 }
1130 v = apic_read(APIC_LVT0);
1131 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1132 v = apic_read(APIC_LVT1);
1133 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1134
1135 if (maxlvt > 2) { /* ERR is LVT#3. */
1136 v = apic_read(APIC_LVTERR);
1137 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1138 }
1139
1140 v = apic_read(APIC_TMICT);
1141 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1142 v = apic_read(APIC_TMCCT);
1143 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1144 v = apic_read(APIC_TDCR);
1145 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1146 printk("\n");
1147}
1148
1149void print_all_local_APICs (void)
1150{
1151 on_each_cpu(print_local_APIC, NULL, 1, 1);
1152}
1153
1154void __apicdebuginit print_PIC(void)
1155{
1da177e4
LT
1156 unsigned int v;
1157 unsigned long flags;
1158
1159 if (apic_verbosity == APIC_QUIET)
1160 return;
1161
1162 printk(KERN_DEBUG "\nprinting PIC contents\n");
1163
1164 spin_lock_irqsave(&i8259A_lock, flags);
1165
1166 v = inb(0xa1) << 8 | inb(0x21);
1167 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1168
1169 v = inb(0xa0) << 8 | inb(0x20);
1170 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1171
1172 outb(0x0b,0xa0);
1173 outb(0x0b,0x20);
1174 v = inb(0xa0) << 8 | inb(0x20);
1175 outb(0x0a,0xa0);
1176 outb(0x0a,0x20);
1177
1178 spin_unlock_irqrestore(&i8259A_lock, flags);
1179
1180 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1181
1182 v = inb(0x4d1) << 8 | inb(0x4d0);
1183 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1184}
1185
1186#endif /* 0 */
1187
1188static void __init enable_IO_APIC(void)
1189{
1190 union IO_APIC_reg_01 reg_01;
1191 int i;
1192 unsigned long flags;
1193
1194 for (i = 0; i < PIN_MAP_SIZE; i++) {
1195 irq_2_pin[i].pin = -1;
1196 irq_2_pin[i].next = 0;
1197 }
1198 if (!pirqs_enabled)
1199 for (i = 0; i < MAX_PIRQS; i++)
1200 pirq_entries[i] = -1;
1201
1202 /*
1203 * The number of IO-APIC IRQ registers (== #pins):
1204 */
1205 for (i = 0; i < nr_ioapics; i++) {
1206 spin_lock_irqsave(&ioapic_lock, flags);
1207 reg_01.raw = io_apic_read(i, 1);
1208 spin_unlock_irqrestore(&ioapic_lock, flags);
1209 nr_ioapic_registers[i] = reg_01.bits.entries+1;
1210 }
1211
1212 /*
1213 * Do not trust the IO-APIC being empty at bootup
1214 */
1215 clear_IO_APIC();
1216}
1217
1218/*
1219 * Not an __init, needed by the reboot code
1220 */
1221void disable_IO_APIC(void)
1222{
208fb931 1223 int pin;
1da177e4
LT
1224 /*
1225 * Clear the IO-APIC before rebooting:
1226 */
1227 clear_IO_APIC();
1228
208fb931 1229 /*
0b968d23 1230 * If the i8259 is routed through an IOAPIC
208fb931 1231 * Put that IOAPIC in virtual wire mode
0b968d23 1232 * so legacy interrupts can be delivered.
208fb931
EB
1233 */
1234 pin = find_isa_irq_pin(0, mp_ExtINT);
1235 if (pin != -1) {
1236 struct IO_APIC_route_entry entry;
1237 unsigned long flags;
1238
1239 memset(&entry, 0, sizeof(entry));
1240 entry.mask = 0; /* Enabled */
1241 entry.trigger = 0; /* Edge */
1242 entry.irr = 0;
1243 entry.polarity = 0; /* High */
1244 entry.delivery_status = 0;
1245 entry.dest_mode = 0; /* Physical */
1246 entry.delivery_mode = 7; /* ExtInt */
1247 entry.vector = 0;
1248 entry.dest.physical.physical_dest = 0;
1249
1250
1251 /*
1252 * Add it to the IO-APIC irq-routing table:
1253 */
1254 spin_lock_irqsave(&ioapic_lock, flags);
1255 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
1256 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
1257 spin_unlock_irqrestore(&ioapic_lock, flags);
1258 }
1259
1260 disconnect_bsp_APIC(pin != -1);
1da177e4
LT
1261}
1262
1263/*
1264 * function to set the IO-APIC physical IDs based on the
1265 * values stored in the MPC table.
1266 *
1267 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1268 */
1269
1270static void __init setup_ioapic_ids_from_mpc (void)
1271{
1272 union IO_APIC_reg_00 reg_00;
1273 int apic;
1274 int i;
1275 unsigned char old_id;
1276 unsigned long flags;
1277
1278 /*
1279 * Set the IOAPIC ID to the value stored in the MPC table.
1280 */
1281 for (apic = 0; apic < nr_ioapics; apic++) {
1282
1283 /* Read the register 0 value */
1284 spin_lock_irqsave(&ioapic_lock, flags);
1285 reg_00.raw = io_apic_read(apic, 0);
1286 spin_unlock_irqrestore(&ioapic_lock, flags);
1287
1288 old_id = mp_ioapics[apic].mpc_apicid;
1289
1290
1291 printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
1292
1293
1294 /*
1295 * We need to adjust the IRQ routing table
1296 * if the ID changed.
1297 */
1298 if (old_id != mp_ioapics[apic].mpc_apicid)
1299 for (i = 0; i < mp_irq_entries; i++)
1300 if (mp_irqs[i].mpc_dstapic == old_id)
1301 mp_irqs[i].mpc_dstapic
1302 = mp_ioapics[apic].mpc_apicid;
1303
1304 /*
1305 * Read the right value from the MPC table and
1306 * write it into the ID register.
1307 */
1308 apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
1309 mp_ioapics[apic].mpc_apicid);
1310
1311 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1312 spin_lock_irqsave(&ioapic_lock, flags);
1313 io_apic_write(apic, 0, reg_00.raw);
1314 spin_unlock_irqrestore(&ioapic_lock, flags);
1315
1316 /*
1317 * Sanity check
1318 */
1319 spin_lock_irqsave(&ioapic_lock, flags);
1320 reg_00.raw = io_apic_read(apic, 0);
1321 spin_unlock_irqrestore(&ioapic_lock, flags);
1322 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1323 printk("could not set ID!\n");
1324 else
1325 apic_printk(APIC_VERBOSE," ok.\n");
1326 }
1327}
1328
1329/*
1330 * There is a nasty bug in some older SMP boards, their mptable lies
1331 * about the timer IRQ. We do the following to work around the situation:
1332 *
1333 * - timer IRQ defaults to IO-APIC IRQ
1334 * - if this function detects that timer IRQs are defunct, then we fall
1335 * back to ISA timer IRQs
1336 */
1337static int __init timer_irq_works(void)
1338{
1339 unsigned long t1 = jiffies;
1340
1341 local_irq_enable();
1342 /* Let ten ticks pass... */
1343 mdelay((10 * 1000) / HZ);
1344
1345 /*
1346 * Expect a few ticks at least, to be sure some possible
1347 * glue logic does not lock up after one or two first
1348 * ticks in a non-ExtINT mode. Also the local APIC
1349 * might have cached one ExtINT interrupt. Finally, at
1350 * least one tick may be lost due to delays.
1351 */
1352
1353 /* jiffies wrap? */
1354 if (jiffies - t1 > 4)
1355 return 1;
1356 return 0;
1357}
1358
1359/*
1360 * In the SMP+IOAPIC case it might happen that there are an unspecified
1361 * number of pending IRQ events unhandled. These cases are very rare,
1362 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1363 * better to do it this way as thus we do not have to be aware of
1364 * 'pending' interrupts in the IRQ path, except at this point.
1365 */
1366/*
1367 * Edge triggered needs to resend any interrupt
1368 * that was delayed but this is now handled in the device
1369 * independent code.
1370 */
1371
1372/*
1373 * Starting up a edge-triggered IO-APIC interrupt is
1374 * nasty - we need to make sure that we get the edge.
1375 * If it is already asserted for some reason, we need
1376 * return 1 to indicate that is was pending.
1377 *
1378 * This is not complete - we should be able to fake
1379 * an edge even if it isn't on the 8259A...
1380 */
1381
1382static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1383{
1384 int was_pending = 0;
1385 unsigned long flags;
1386
1387 spin_lock_irqsave(&ioapic_lock, flags);
1388 if (irq < 16) {
1389 disable_8259A_irq(irq);
1390 if (i8259A_irq_pending(irq))
1391 was_pending = 1;
1392 }
1393 __unmask_IO_APIC_irq(irq);
1394 spin_unlock_irqrestore(&ioapic_lock, flags);
1395
1396 return was_pending;
1397}
1398
1399/*
1400 * Once we have recorded IRQ_PENDING already, we can mask the
1401 * interrupt for real. This prevents IRQ storms from unhandled
1402 * devices.
1403 */
1404static void ack_edge_ioapic_irq(unsigned int irq)
1405{
54d5d424 1406 move_irq(irq);
1da177e4
LT
1407 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1408 == (IRQ_PENDING | IRQ_DISABLED))
1409 mask_IO_APIC_irq(irq);
1410 ack_APIC_irq();
1411}
1412
1413/*
1414 * Level triggered interrupts can just be masked,
1415 * and shutting down and starting up the interrupt
1416 * is the same as enabling and disabling them -- except
1417 * with a startup need to return a "was pending" value.
1418 *
1419 * Level triggered interrupts are special because we
1420 * do not touch any IO-APIC register while handling
1421 * them. We ack the APIC in the end-IRQ handler, not
1422 * in the start-IRQ-handler. Protection against reentrance
1423 * from the same interrupt is still provided, both by the
1424 * generic IRQ layer and by the fact that an unacked local
1425 * APIC does not accept IRQs.
1426 */
1427static unsigned int startup_level_ioapic_irq (unsigned int irq)
1428{
1429 unmask_IO_APIC_irq(irq);
1430
1431 return 0; /* don't check for pending */
1432}
1433
1434static void end_level_ioapic_irq (unsigned int irq)
1435{
54d5d424 1436 move_irq(irq);
1da177e4
LT
1437 ack_APIC_irq();
1438}
1439
1da177e4
LT
1440#ifdef CONFIG_PCI_MSI
1441static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1442{
1443 int irq = vector_to_irq(vector);
1444
1445 return startup_edge_ioapic_irq(irq);
1446}
1447
1448static void ack_edge_ioapic_vector(unsigned int vector)
1449{
1450 int irq = vector_to_irq(vector);
1451
54d5d424 1452 move_native_irq(vector);
1da177e4
LT
1453 ack_edge_ioapic_irq(irq);
1454}
1455
1456static unsigned int startup_level_ioapic_vector (unsigned int vector)
1457{
1458 int irq = vector_to_irq(vector);
1459
1460 return startup_level_ioapic_irq (irq);
1461}
1462
1463static void end_level_ioapic_vector (unsigned int vector)
1464{
1465 int irq = vector_to_irq(vector);
1466
54d5d424 1467 move_native_irq(vector);
1da177e4
LT
1468 end_level_ioapic_irq(irq);
1469}
1470
1471static void mask_IO_APIC_vector (unsigned int vector)
1472{
1473 int irq = vector_to_irq(vector);
1474
1475 mask_IO_APIC_irq(irq);
1476}
1477
1478static void unmask_IO_APIC_vector (unsigned int vector)
1479{
1480 int irq = vector_to_irq(vector);
1481
1482 unmask_IO_APIC_irq(irq);
1483}
1484
54d5d424 1485#ifdef CONFIG_SMP
1da177e4
LT
1486static void set_ioapic_affinity_vector (unsigned int vector,
1487 cpumask_t cpu_mask)
1488{
1489 int irq = vector_to_irq(vector);
1490
54d5d424 1491 set_native_irq_info(vector, cpu_mask);
1da177e4
LT
1492 set_ioapic_affinity_irq(irq, cpu_mask);
1493}
54d5d424
AR
1494#endif // CONFIG_SMP
1495#endif // CONFIG_PCI_MSI
1da177e4
LT
1496
1497/*
1498 * Level and edge triggered IO-APIC interrupts need different handling,
1499 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1500 * handled with the level-triggered descriptor, but that one has slightly
1501 * more overhead. Level-triggered interrupts cannot be handled with the
1502 * edge-triggered handler, without risking IRQ storms and other ugly
1503 * races.
1504 */
1505
6c231b7b 1506static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
1da177e4
LT
1507 .typename = "IO-APIC-edge",
1508 .startup = startup_edge_ioapic,
1509 .shutdown = shutdown_edge_ioapic,
1510 .enable = enable_edge_ioapic,
1511 .disable = disable_edge_ioapic,
1512 .ack = ack_edge_ioapic,
1513 .end = end_edge_ioapic,
54d5d424 1514#ifdef CONFIG_SMP
1da177e4 1515 .set_affinity = set_ioapic_affinity,
54d5d424 1516#endif
1da177e4
LT
1517};
1518
6c231b7b 1519static struct hw_interrupt_type ioapic_level_type __read_mostly = {
1da177e4
LT
1520 .typename = "IO-APIC-level",
1521 .startup = startup_level_ioapic,
1522 .shutdown = shutdown_level_ioapic,
1523 .enable = enable_level_ioapic,
1524 .disable = disable_level_ioapic,
1525 .ack = mask_and_ack_level_ioapic,
1526 .end = end_level_ioapic,
54d5d424 1527#ifdef CONFIG_SMP
1da177e4 1528 .set_affinity = set_ioapic_affinity,
54d5d424 1529#endif
1da177e4
LT
1530};
1531
1532static inline void init_IO_APIC_traps(void)
1533{
1534 int irq;
1535
1536 /*
1537 * NOTE! The local APIC isn't very good at handling
1538 * multiple interrupts at the same interrupt level.
1539 * As the interrupt level is determined by taking the
1540 * vector number and shifting that right by 4, we
1541 * want to spread these out a bit so that they don't
1542 * all fall in the same interrupt level.
1543 *
1544 * Also, we've got to be careful not to trash gate
1545 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1546 */
1547 for (irq = 0; irq < NR_IRQS ; irq++) {
1548 int tmp = irq;
1549 if (use_pci_vector()) {
1550 if (!platform_legacy_irq(tmp))
1551 if ((tmp = vector_to_irq(tmp)) == -1)
1552 continue;
1553 }
1554 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
1555 /*
1556 * Hmm.. We don't have an entry for this,
1557 * so default to an old-fashioned 8259
1558 * interrupt if we can..
1559 */
1560 if (irq < 16)
1561 make_8259A_irq(irq);
1562 else
1563 /* Strange. Oh, well.. */
1564 irq_desc[irq].handler = &no_irq_type;
1565 }
1566 }
1567}
1568
1569static void enable_lapic_irq (unsigned int irq)
1570{
1571 unsigned long v;
1572
1573 v = apic_read(APIC_LVT0);
1574 apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
1575}
1576
1577static void disable_lapic_irq (unsigned int irq)
1578{
1579 unsigned long v;
1580
1581 v = apic_read(APIC_LVT0);
1582 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
1583}
1584
1585static void ack_lapic_irq (unsigned int irq)
1586{
1587 ack_APIC_irq();
1588}
1589
1590static void end_lapic_irq (unsigned int i) { /* nothing */ }
1591
6c231b7b 1592static struct hw_interrupt_type lapic_irq_type __read_mostly = {
1da177e4
LT
1593 .typename = "local-APIC-edge",
1594 .startup = NULL, /* startup_irq() not used for IRQ0 */
1595 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1596 .enable = enable_lapic_irq,
1597 .disable = disable_lapic_irq,
1598 .ack = ack_lapic_irq,
1599 .end = end_lapic_irq,
1600};
1601
1602static void setup_nmi (void)
1603{
1604 /*
1605 * Dirty trick to enable the NMI watchdog ...
1606 * We put the 8259A master into AEOI mode and
1607 * unmask on all local APICs LVT0 as NMI.
1608 *
1609 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1610 * is from Maciej W. Rozycki - so we do not have to EOI from
1611 * the NMI handler or the timer interrupt.
1612 */
1613 printk(KERN_INFO "activating NMI Watchdog ...");
1614
1615 enable_NMI_through_LVT0(NULL);
1616
1617 printk(" done.\n");
1618}
1619
1620/*
1621 * This looks a bit hackish but it's about the only one way of sending
1622 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1623 * not support the ExtINT mode, unfortunately. We need to send these
1624 * cycles as some i82489DX-based boards have glue logic that keeps the
1625 * 8259A interrupt line asserted until INTA. --macro
1626 */
1627static inline void unlock_ExtINT_logic(void)
1628{
1629 int pin, i;
1630 struct IO_APIC_route_entry entry0, entry1;
1631 unsigned char save_control, save_freq_select;
1632 unsigned long flags;
1633
1634 pin = find_isa_irq_pin(8, mp_INT);
1635 if (pin == -1)
1636 return;
1637
1638 spin_lock_irqsave(&ioapic_lock, flags);
1639 *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
1640 *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
1641 spin_unlock_irqrestore(&ioapic_lock, flags);
1642 clear_IO_APIC_pin(0, pin);
1643
1644 memset(&entry1, 0, sizeof(entry1));
1645
1646 entry1.dest_mode = 0; /* physical delivery */
1647 entry1.mask = 0; /* unmask IRQ now */
1648 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1649 entry1.delivery_mode = dest_ExtINT;
1650 entry1.polarity = entry0.polarity;
1651 entry1.trigger = 0;
1652 entry1.vector = 0;
1653
1654 spin_lock_irqsave(&ioapic_lock, flags);
1655 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1656 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1657 spin_unlock_irqrestore(&ioapic_lock, flags);
1658
1659 save_control = CMOS_READ(RTC_CONTROL);
1660 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1661 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1662 RTC_FREQ_SELECT);
1663 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1664
1665 i = 100;
1666 while (i-- > 0) {
1667 mdelay(10);
1668 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1669 i -= 10;
1670 }
1671
1672 CMOS_WRITE(save_control, RTC_CONTROL);
1673 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1674 clear_IO_APIC_pin(0, pin);
1675
1676 spin_lock_irqsave(&ioapic_lock, flags);
1677 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1678 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1679 spin_unlock_irqrestore(&ioapic_lock, flags);
1680}
1681
1682/*
1683 * This code may look a bit paranoid, but it's supposed to cooperate with
1684 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1685 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1686 * fanatically on his truly buggy board.
1687 */
1688static inline void check_timer(void)
1689{
1690 int pin1, pin2;
1691 int vector;
1692
1693 /*
1694 * get/set the timer IRQ vector:
1695 */
1696 disable_8259A_irq(0);
1697 vector = assign_irq_vector(0);
1698 set_intr_gate(vector, interrupt[0]);
1699
1700 /*
1701 * Subtle, code in do_timer_interrupt() expects an AEOI
1702 * mode for the 8259A whenever interrupts are routed
1703 * through I/O APICs. Also IRQ0 has to be enabled in
1704 * the 8259A which implies the virtual wire has to be
1705 * disabled in the local APIC.
1706 */
1707 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1708 init_8259A(1);
1709 enable_8259A_irq(0);
1710
1711 pin1 = find_isa_irq_pin(0, mp_INT);
1712 pin2 = find_isa_irq_pin(0, mp_ExtINT);
1713
1714 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
1715
1716 if (pin1 != -1) {
1717 /*
1718 * Ok, does IRQ0 through the IOAPIC work?
1719 */
1720 unmask_IO_APIC_irq(0);
14d98cad 1721 if (!no_timer_check && timer_irq_works()) {
1da177e4
LT
1722 nmi_watchdog_default();
1723 if (nmi_watchdog == NMI_IO_APIC) {
1724 disable_8259A_irq(0);
1725 setup_nmi();
1726 enable_8259A_irq(0);
1da177e4 1727 }
66759a01
CE
1728 if (disable_timer_pin_1 > 0)
1729 clear_IO_APIC_pin(0, pin1);
1da177e4
LT
1730 return;
1731 }
1732 clear_IO_APIC_pin(0, pin1);
1733 apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
1734 }
1735
1736 apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
1737 if (pin2 != -1) {
1738 apic_printk(APIC_VERBOSE,"\n..... (found pin %d) ...", pin2);
1739 /*
1740 * legacy devices should be connected to IO APIC #0
1741 */
1742 setup_ExtINT_IRQ0_pin(pin2, vector);
1743 if (timer_irq_works()) {
1744 printk("works.\n");
1745 nmi_watchdog_default();
1746 if (nmi_watchdog == NMI_IO_APIC) {
1747 setup_nmi();
1da177e4
LT
1748 }
1749 return;
1750 }
1751 /*
1752 * Cleanup, just in case ...
1753 */
1754 clear_IO_APIC_pin(0, pin2);
1755 }
1756 printk(" failed.\n");
1757
1758 if (nmi_watchdog) {
1759 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1760 nmi_watchdog = 0;
1761 }
1762
1763 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1764
1765 disable_8259A_irq(0);
1766 irq_desc[0].handler = &lapic_irq_type;
1767 apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1768 enable_8259A_irq(0);
1769
1770 if (timer_irq_works()) {
1771 apic_printk(APIC_QUIET, " works.\n");
1772 return;
1773 }
1774 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1775 apic_printk(APIC_VERBOSE," failed.\n");
1776
1777 apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1778
1779 init_8259A(0);
1780 make_8259A_irq(0);
1781 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
1782
1783 unlock_ExtINT_logic();
1784
1785 if (timer_irq_works()) {
1786 apic_printk(APIC_VERBOSE," works.\n");
1787 return;
1788 }
1789 apic_printk(APIC_VERBOSE," failed :(.\n");
1790 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1791}
1792
14d98cad
AK
1793static int __init notimercheck(char *s)
1794{
1795 no_timer_check = 1;
1796 return 1;
1797}
1798__setup("no_timer_check", notimercheck);
1799
1da177e4
LT
1800/*
1801 *
1802 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1803 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1804 * Linux doesn't really care, as it's not actually used
1805 * for any interrupt handling anyway.
1806 */
1807#define PIC_IRQS (1<<2)
1808
1809void __init setup_IO_APIC(void)
1810{
1811 enable_IO_APIC();
1812
1813 if (acpi_ioapic)
1814 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1815 else
1816 io_apic_irqs = ~PIC_IRQS;
1817
1818 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
1819
1820 /*
1821 * Set up the IO-APIC IRQ routing table.
1822 */
1823 if (!acpi_ioapic)
1824 setup_ioapic_ids_from_mpc();
1825 sync_Arb_IDs();
1826 setup_IO_APIC_irqs();
1827 init_IO_APIC_traps();
1828 check_timer();
1829 if (!acpi_ioapic)
1830 print_IO_APIC();
1831}
1832
1833struct sysfs_ioapic_data {
1834 struct sys_device dev;
1835 struct IO_APIC_route_entry entry[0];
1836};
1837static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1838
0b9c33a7 1839static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
1840{
1841 struct IO_APIC_route_entry *entry;
1842 struct sysfs_ioapic_data *data;
1843 unsigned long flags;
1844 int i;
1845
1846 data = container_of(dev, struct sysfs_ioapic_data, dev);
1847 entry = data->entry;
1848 spin_lock_irqsave(&ioapic_lock, flags);
1849 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1850 *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
1851 *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
1852 }
1853 spin_unlock_irqrestore(&ioapic_lock, flags);
1854
1855 return 0;
1856}
1857
1858static int ioapic_resume(struct sys_device *dev)
1859{
1860 struct IO_APIC_route_entry *entry;
1861 struct sysfs_ioapic_data *data;
1862 unsigned long flags;
1863 union IO_APIC_reg_00 reg_00;
1864 int i;
1865
1866 data = container_of(dev, struct sysfs_ioapic_data, dev);
1867 entry = data->entry;
1868
1869 spin_lock_irqsave(&ioapic_lock, flags);
1870 reg_00.raw = io_apic_read(dev->id, 0);
1871 if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
1872 reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
1873 io_apic_write(dev->id, 0, reg_00.raw);
1874 }
1875 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
1876 io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
1877 io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
1878 }
1879 spin_unlock_irqrestore(&ioapic_lock, flags);
1880
1881 return 0;
1882}
1883
1884static struct sysdev_class ioapic_sysdev_class = {
1885 set_kset_name("ioapic"),
1886 .suspend = ioapic_suspend,
1887 .resume = ioapic_resume,
1888};
1889
1890static int __init ioapic_init_sysfs(void)
1891{
1892 struct sys_device * dev;
1893 int i, size, error = 0;
1894
1895 error = sysdev_class_register(&ioapic_sysdev_class);
1896 if (error)
1897 return error;
1898
1899 for (i = 0; i < nr_ioapics; i++ ) {
1900 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1901 * sizeof(struct IO_APIC_route_entry);
1902 mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
1903 if (!mp_ioapic_data[i]) {
1904 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1905 continue;
1906 }
1907 memset(mp_ioapic_data[i], 0, size);
1908 dev = &mp_ioapic_data[i]->dev;
1909 dev->id = i;
1910 dev->cls = &ioapic_sysdev_class;
1911 error = sysdev_register(dev);
1912 if (error) {
1913 kfree(mp_ioapic_data[i]);
1914 mp_ioapic_data[i] = NULL;
1915 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1916 continue;
1917 }
1918 }
1919
1920 return 0;
1921}
1922
1923device_initcall(ioapic_init_sysfs);
1924
1925/* --------------------------------------------------------------------------
1926 ACPI-based IOAPIC Configuration
1927 -------------------------------------------------------------------------- */
1928
888ba6c6 1929#ifdef CONFIG_ACPI
1da177e4
LT
1930
1931#define IO_APIC_MAX_ID 0xFE
1932
1da177e4
LT
1933int __init io_apic_get_version (int ioapic)
1934{
1935 union IO_APIC_reg_01 reg_01;
1936 unsigned long flags;
1937
1938 spin_lock_irqsave(&ioapic_lock, flags);
1939 reg_01.raw = io_apic_read(ioapic, 1);
1940 spin_unlock_irqrestore(&ioapic_lock, flags);
1941
1942 return reg_01.bits.version;
1943}
1944
1945
1946int __init io_apic_get_redir_entries (int ioapic)
1947{
1948 union IO_APIC_reg_01 reg_01;
1949 unsigned long flags;
1950
1951 spin_lock_irqsave(&ioapic_lock, flags);
1952 reg_01.raw = io_apic_read(ioapic, 1);
1953 spin_unlock_irqrestore(&ioapic_lock, flags);
1954
1955 return reg_01.bits.entries;
1956}
1957
1958
50eca3eb 1959int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
1da177e4
LT
1960{
1961 struct IO_APIC_route_entry entry;
1962 unsigned long flags;
1963
1964 if (!IO_APIC_IRQ(irq)) {
1965 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
1966 ioapic);
1967 return -EINVAL;
1968 }
1969
1970 /*
1971 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
1972 * Note that we mask (disable) IRQs now -- these get enabled when the
1973 * corresponding device driver registers for this IRQ.
1974 */
1975
1976 memset(&entry,0,sizeof(entry));
1977
1978 entry.delivery_mode = INT_DELIVERY_MODE;
1979 entry.dest_mode = INT_DEST_MODE;
1980 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
50eca3eb
BM
1981 entry.trigger = triggering;
1982 entry.polarity = polarity;
1da177e4
LT
1983 entry.mask = 1; /* Disabled (masked) */
1984
6004e1b7 1985 irq = gsi_irq_sharing(irq);
1da177e4
LT
1986 /*
1987 * IRQs < 16 are already in the irq_2_pin[] map
1988 */
1989 if (irq >= 16)
1990 add_pin_to_irq(irq, ioapic, pin);
1991
1992 entry.vector = assign_irq_vector(irq);
1993
1994 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
1995 "IRQ %d Mode:%i Active:%i)\n", ioapic,
1996 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
50eca3eb 1997 triggering, polarity);
1da177e4 1998
50eca3eb 1999 ioapic_register_intr(irq, entry.vector, triggering);
1da177e4
LT
2000
2001 if (!ioapic && (irq < 16))
2002 disable_8259A_irq(irq);
2003
2004 spin_lock_irqsave(&ioapic_lock, flags);
2005 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
2006 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
54d5d424 2007 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
1da177e4
LT
2008 spin_unlock_irqrestore(&ioapic_lock, flags);
2009
2010 return 0;
2011}
2012
888ba6c6 2013#endif /* CONFIG_ACPI */
1da177e4
LT
2014
2015
2016/*
2017 * This function currently is only a helper for the i386 smp boot process where
2018 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2019 * so mask in all cases should simply be TARGET_CPUS
2020 */
54d5d424 2021#ifdef CONFIG_SMP
1da177e4
LT
2022void __init setup_ioapic_dest(void)
2023{
2024 int pin, ioapic, irq, irq_entry;
2025
2026 if (skip_ioapic_setup == 1)
2027 return;
2028
2029 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2030 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2031 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2032 if (irq_entry == -1)
2033 continue;
2034 irq = pin_2_irq(irq_entry, ioapic, pin);
2035 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2036 }
2037
2038 }
2039}
54d5d424 2040#endif
This page took 0.156107 seconds and 5 git commands to generate.