2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
49 #include <asm/proto.h>
52 #include <asm/timer.h>
53 #include <asm/i8259.h>
55 #include <asm/msidef.h>
56 #include <asm/hypertransport.h>
57 #include <asm/setup.h>
58 #include <asm/irq_remapping.h>
61 #include <mach_apic.h>
62 #include <mach_apicdef.h>
64 #define __apicdebuginit(type) static type __init
67 * Is the SiS APIC rmw bug present ?
68 * -1 = don't know, 0 = no, 1 = yes
70 int sis_apic_bug
= -1;
72 static DEFINE_SPINLOCK(ioapic_lock
);
73 static DEFINE_SPINLOCK(vector_lock
);
76 * # of IRQ routing registers
78 int nr_ioapic_registers
[MAX_IO_APICS
];
80 /* I/O APIC entries */
81 struct mp_config_ioapic mp_ioapics
[MAX_IO_APICS
];
84 /* MP IRQ source entries */
85 struct mp_config_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
87 /* # of MP IRQ source entries */
90 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
91 int mp_bus_id_to_type
[MAX_MP_BUSSES
];
94 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
96 int skip_ioapic_setup
;
98 static int __init
parse_noapic(char *str
)
100 /* disable IO-APIC */
101 disable_ioapic_setup();
104 early_param("noapic", parse_noapic
);
110 #ifdef CONFIG_HAVE_SPARSE_IRQ
111 struct irq_cfg
*next
;
113 struct irq_pin_list
*irq_2_pin
;
115 cpumask_t old_domain
;
116 unsigned move_cleanup_count
;
118 u8 move_in_progress
: 1;
121 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
122 static struct irq_cfg irq_cfg_legacy
[] __initdata
= {
123 [0] = { .irq
= 0, .domain
= CPU_MASK_ALL
, .vector
= IRQ0_VECTOR
, },
124 [1] = { .irq
= 1, .domain
= CPU_MASK_ALL
, .vector
= IRQ1_VECTOR
, },
125 [2] = { .irq
= 2, .domain
= CPU_MASK_ALL
, .vector
= IRQ2_VECTOR
, },
126 [3] = { .irq
= 3, .domain
= CPU_MASK_ALL
, .vector
= IRQ3_VECTOR
, },
127 [4] = { .irq
= 4, .domain
= CPU_MASK_ALL
, .vector
= IRQ4_VECTOR
, },
128 [5] = { .irq
= 5, .domain
= CPU_MASK_ALL
, .vector
= IRQ5_VECTOR
, },
129 [6] = { .irq
= 6, .domain
= CPU_MASK_ALL
, .vector
= IRQ6_VECTOR
, },
130 [7] = { .irq
= 7, .domain
= CPU_MASK_ALL
, .vector
= IRQ7_VECTOR
, },
131 [8] = { .irq
= 8, .domain
= CPU_MASK_ALL
, .vector
= IRQ8_VECTOR
, },
132 [9] = { .irq
= 9, .domain
= CPU_MASK_ALL
, .vector
= IRQ9_VECTOR
, },
133 [10] = { .irq
= 10, .domain
= CPU_MASK_ALL
, .vector
= IRQ10_VECTOR
, },
134 [11] = { .irq
= 11, .domain
= CPU_MASK_ALL
, .vector
= IRQ11_VECTOR
, },
135 [12] = { .irq
= 12, .domain
= CPU_MASK_ALL
, .vector
= IRQ12_VECTOR
, },
136 [13] = { .irq
= 13, .domain
= CPU_MASK_ALL
, .vector
= IRQ13_VECTOR
, },
137 [14] = { .irq
= 14, .domain
= CPU_MASK_ALL
, .vector
= IRQ14_VECTOR
, },
138 [15] = { .irq
= 15, .domain
= CPU_MASK_ALL
, .vector
= IRQ15_VECTOR
, },
141 static struct irq_cfg irq_cfg_init
= { .irq
= -1U, };
143 static void init_one_irq_cfg(struct irq_cfg
*cfg
)
145 memcpy(cfg
, &irq_cfg_init
, sizeof(struct irq_cfg
));
148 static struct irq_cfg
*irq_cfgx
;
151 * Protect the irq_cfgx_free freelist:
153 static DEFINE_SPINLOCK(irq_cfg_lock
);
155 #ifdef CONFIG_HAVE_SPARSE_IRQ
156 static struct irq_cfg
*irq_cfgx_free
;
158 static void __init
init_work(void *data
)
160 struct dyn_array
*da
= data
;
167 memcpy(cfg
, irq_cfg_legacy
, sizeof(irq_cfg_legacy
));
169 legacy_count
= sizeof(irq_cfg_legacy
)/sizeof(irq_cfg_legacy
[0]);
170 for (i
= legacy_count
; i
< *da
->nr
; i
++)
171 init_one_irq_cfg(&cfg
[i
]);
173 #ifdef CONFIG_HAVE_SPARSE_IRQ
174 for (i
= 1; i
< *da
->nr
; i
++)
175 cfg
[i
-1].next
= &cfg
[i
];
177 irq_cfgx_free
= &irq_cfgx
[legacy_count
];
178 irq_cfgx
[legacy_count
- 1].next
= NULL
;
182 #ifdef CONFIG_HAVE_SPARSE_IRQ
183 /* need to be biger than size of irq_cfg_legacy */
184 static int nr_irq_cfg
= 32;
186 static int __init
parse_nr_irq_cfg(char *arg
)
189 nr_irq_cfg
= simple_strtoul(arg
, NULL
, 0);
196 early_param("nr_irq_cfg", parse_nr_irq_cfg
);
198 #define for_each_irq_cfg(irqX, cfg) \
199 for (cfg = irq_cfgx, irqX = cfg->irq; cfg; cfg = cfg->next, irqX = cfg ? cfg->irq : -1U)
202 DEFINE_DYN_ARRAY(irq_cfgx
, sizeof(struct irq_cfg
), nr_irq_cfg
, PAGE_SIZE
, init_work
);
204 static struct irq_cfg
*irq_cfg(unsigned int irq
)
219 static struct irq_cfg
*irq_cfg_alloc(unsigned int irq
)
221 struct irq_cfg
*cfg
, *cfg_pri
;
226 cfg_pri
= cfg
= irq_cfgx
;
236 spin_lock_irqsave(&irq_cfg_lock
, flags
);
237 if (!irq_cfgx_free
) {
239 unsigned long total_bytes
;
241 * we run out of pre-allocate ones, allocate more
243 printk(KERN_DEBUG
"try to get more irq_cfg %d\n", nr_irq_cfg
);
245 total_bytes
= sizeof(struct irq_cfg
) * nr_irq_cfg
;
247 cfg
= kzalloc(total_bytes
, GFP_ATOMIC
);
249 cfg
= __alloc_bootmem_nopanic(total_bytes
, PAGE_SIZE
, 0);
252 panic("please boot with nr_irq_cfg= %d\n", count
* 2);
255 printk(KERN_DEBUG
"irq_irq ==> [%#lx - %#lx]\n", phys
, phys
+ total_bytes
);
257 for (i
= 0; i
< nr_irq_cfg
; i
++)
258 init_one_irq_cfg(&cfg
[i
]);
260 for (i
= 1; i
< nr_irq_cfg
; i
++)
261 cfg
[i
-1].next
= &cfg
[i
];
267 irq_cfgx_free
= irq_cfgx_free
->next
;
275 spin_unlock_irqrestore(&irq_cfg_lock
, flags
);
277 printk(KERN_DEBUG
"found new irq_cfg for irq %d\n", cfg
->irq
);
278 #ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
280 /* dump the results */
283 unsigned long bytes
= sizeof(struct irq_cfg
);
285 printk(KERN_DEBUG
"=========================== %d\n", irq
);
286 printk(KERN_DEBUG
"irq_cfg dump after get that for %d\n", irq
);
287 for_each_irq_cfg(cfg
) {
289 printk(KERN_DEBUG
"irq_cfg %d ==> [%#lx - %#lx]\n", cfg
->irq
, phys
, phys
+ bytes
);
291 printk(KERN_DEBUG
"===========================\n");
298 #define for_each_irq_cfg(irq, cfg) \
299 for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
301 DEFINE_DYN_ARRAY(irq_cfgx
, sizeof(struct irq_cfg
), nr_irqs
, PAGE_SIZE
, init_work
);
303 struct irq_cfg
*irq_cfg(unsigned int irq
)
306 return &irq_cfgx
[irq
];
310 struct irq_cfg
*irq_cfg_alloc(unsigned int irq
)
317 * This is performance-critical, we want to do it O(1)
319 * the indexing order of this array favors 1:1 mappings
320 * between pins and IRQs.
323 struct irq_pin_list
{
325 struct irq_pin_list
*next
;
328 static struct irq_pin_list
*irq_2_pin_head
;
329 /* fill one page ? */
330 static int nr_irq_2_pin
= 0x100;
331 static struct irq_pin_list
*irq_2_pin_ptr
;
332 static void __init
irq_2_pin_init_work(void *data
)
334 struct dyn_array
*da
= data
;
335 struct irq_pin_list
*pin
;
340 for (i
= 1; i
< *da
->nr
; i
++)
341 pin
[i
-1].next
= &pin
[i
];
343 irq_2_pin_ptr
= &pin
[0];
345 DEFINE_DYN_ARRAY(irq_2_pin_head
, sizeof(struct irq_pin_list
), nr_irq_2_pin
, PAGE_SIZE
, irq_2_pin_init_work
);
347 static struct irq_pin_list
*get_one_free_irq_2_pin(void)
349 struct irq_pin_list
*pin
;
355 irq_2_pin_ptr
= pin
->next
;
361 * we run out of pre-allocate ones, allocate more
363 printk(KERN_DEBUG
"try to get more irq_2_pin %d\n", nr_irq_2_pin
);
366 pin
= kzalloc(sizeof(struct irq_pin_list
)*nr_irq_2_pin
,
369 pin
= __alloc_bootmem_nopanic(sizeof(struct irq_pin_list
) *
370 nr_irq_2_pin
, PAGE_SIZE
, 0);
373 panic("can not get more irq_2_pin\n");
375 for (i
= 1; i
< nr_irq_2_pin
; i
++)
376 pin
[i
-1].next
= &pin
[i
];
378 irq_2_pin_ptr
= pin
->next
;
386 unsigned int unused
[3];
390 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
392 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
393 + (mp_ioapics
[idx
].mp_apicaddr
& ~PAGE_MASK
);
396 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
398 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
399 writel(reg
, &io_apic
->index
);
400 return readl(&io_apic
->data
);
403 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
405 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
406 writel(reg
, &io_apic
->index
);
407 writel(value
, &io_apic
->data
);
411 * Re-write a value: to be used for read-modify-write
412 * cycles where the read already set up the index register.
414 * Older SiS APIC requires we rewrite the index register
416 static inline void io_apic_modify(unsigned int apic
, unsigned int reg
, unsigned int value
)
418 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
420 writel(reg
, &io_apic
->index
);
421 writel(value
, &io_apic
->data
);
424 static bool io_apic_level_ack_pending(unsigned int irq
)
426 struct irq_pin_list
*entry
;
428 struct irq_cfg
*cfg
= irq_cfg(irq
);
430 spin_lock_irqsave(&ioapic_lock
, flags
);
431 entry
= cfg
->irq_2_pin
;
439 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
440 /* Is the remote IRR bit set? */
441 if (reg
& IO_APIC_REDIR_REMOTE_IRR
) {
442 spin_unlock_irqrestore(&ioapic_lock
, flags
);
449 spin_unlock_irqrestore(&ioapic_lock
, flags
);
455 struct { u32 w1
, w2
; };
456 struct IO_APIC_route_entry entry
;
459 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
461 union entry_union eu
;
463 spin_lock_irqsave(&ioapic_lock
, flags
);
464 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
465 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
466 spin_unlock_irqrestore(&ioapic_lock
, flags
);
471 * When we write a new IO APIC routing entry, we need to write the high
472 * word first! If the mask bit in the low word is clear, we will enable
473 * the interrupt, and we need to make sure the entry is fully populated
474 * before that happens.
477 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
479 union entry_union eu
;
481 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
482 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
485 static void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
488 spin_lock_irqsave(&ioapic_lock
, flags
);
489 __ioapic_write_entry(apic
, pin
, e
);
490 spin_unlock_irqrestore(&ioapic_lock
, flags
);
494 * When we mask an IO APIC routing entry, we need to write the low
495 * word first, in order to set the mask bit before we change the
498 static void ioapic_mask_entry(int apic
, int pin
)
501 union entry_union eu
= { .entry
.mask
= 1 };
503 spin_lock_irqsave(&ioapic_lock
, flags
);
504 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
505 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
506 spin_unlock_irqrestore(&ioapic_lock
, flags
);
510 static void __target_IO_APIC_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
514 struct irq_pin_list
*entry
;
517 entry
= cfg
->irq_2_pin
;
526 #ifdef CONFIG_INTR_REMAP
528 * With interrupt-remapping, destination information comes
529 * from interrupt-remapping table entry.
531 if (!irq_remapped(irq
))
532 io_apic_write(apic
, 0x11 + pin
*2, dest
);
534 io_apic_write(apic
, 0x11 + pin
*2, dest
);
536 reg
= io_apic_read(apic
, 0x10 + pin
*2);
537 reg
&= ~IO_APIC_REDIR_VECTOR_MASK
;
539 io_apic_modify(apic
, 0x10 + pin
*2, reg
);
546 static int assign_irq_vector(int irq
, cpumask_t mask
);
548 static void set_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
554 struct irq_desc
*desc
;
556 cpus_and(tmp
, mask
, cpu_online_map
);
561 if (assign_irq_vector(irq
, mask
))
564 cpus_and(tmp
, cfg
->domain
, mask
);
565 dest
= cpu_mask_to_apicid(tmp
);
567 * Only the high 8 bits are valid.
569 dest
= SET_APIC_LOGICAL_ID(dest
);
571 desc
= irq_to_desc(irq
);
572 spin_lock_irqsave(&ioapic_lock
, flags
);
573 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
574 desc
->affinity
= mask
;
575 spin_unlock_irqrestore(&ioapic_lock
, flags
);
577 #endif /* CONFIG_SMP */
580 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
581 * shared ISA-space IRQs, so we have to support them. We are super
582 * fast in the common case, and fast for shared ISA-space IRQs.
584 static void add_pin_to_irq(unsigned int irq
, int apic
, int pin
)
587 struct irq_pin_list
*entry
;
589 /* first time to refer irq_cfg, so with new */
590 cfg
= irq_cfg_alloc(irq
);
591 entry
= cfg
->irq_2_pin
;
593 entry
= get_one_free_irq_2_pin();
594 cfg
->irq_2_pin
= entry
;
597 printk(KERN_DEBUG
" 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq
, apic
, pin
);
601 while (entry
->next
) {
602 /* not again, please */
603 if (entry
->apic
== apic
&& entry
->pin
== pin
)
609 entry
->next
= get_one_free_irq_2_pin();
613 printk(KERN_DEBUG
" x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq
, apic
, pin
);
617 * Reroute an IRQ to a different pin.
619 static void __init
replace_pin_at_irq(unsigned int irq
,
620 int oldapic
, int oldpin
,
621 int newapic
, int newpin
)
623 struct irq_cfg
*cfg
= irq_cfg(irq
);
624 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
628 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
629 entry
->apic
= newapic
;
632 /* every one is different, right? */
638 /* why? call replace before add? */
640 add_pin_to_irq(irq
, newapic
, newpin
);
643 #define __DO_ACTION(R, ACTION_ENABLE, ACTION_DISABLE, FINAL) \
647 struct irq_cfg *cfg; \
648 struct irq_pin_list *entry; \
650 cfg = irq_cfg(irq); \
651 entry = cfg->irq_2_pin; \
657 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
658 reg ACTION_DISABLE; \
660 io_apic_modify(entry->apic, 0x10 + R + pin*2, reg); \
664 entry = entry->next; \
668 #define DO_ACTION(name,R, ACTION_ENABLE, ACTION_DISABLE, FINAL) \
670 static void name##_IO_APIC_irq (unsigned int irq) \
671 __DO_ACTION(R, ACTION_ENABLE, ACTION_DISABLE, FINAL)
674 DO_ACTION(__unmask
, 0, |= 0, &= ~IO_APIC_REDIR_MASKED
, )
678 * Synchronize the IO-APIC and the CPU by doing
679 * a dummy read from the IO-APIC
681 static inline void io_apic_sync(unsigned int apic
)
683 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
684 readl(&io_apic
->data
);
688 DO_ACTION(__mask
, 0, |= IO_APIC_REDIR_MASKED
, &= ~0, io_apic_sync(entry
->apic
))
693 DO_ACTION(__mask
, 0, |= IO_APIC_REDIR_MASKED
, &= ~0, )
695 /* mask = 1, trigger = 0 */
696 DO_ACTION(__mask_and_edge
, 0, |= IO_APIC_REDIR_MASKED
, &= ~IO_APIC_REDIR_LEVEL_TRIGGER
, )
698 /* mask = 0, trigger = 1 */
699 DO_ACTION(__unmask_and_level
, 0, |= IO_APIC_REDIR_LEVEL_TRIGGER
, &= ~IO_APIC_REDIR_MASKED
, )
703 static void mask_IO_APIC_irq (unsigned int irq
)
707 spin_lock_irqsave(&ioapic_lock
, flags
);
708 __mask_IO_APIC_irq(irq
);
709 spin_unlock_irqrestore(&ioapic_lock
, flags
);
712 static void unmask_IO_APIC_irq (unsigned int irq
)
716 spin_lock_irqsave(&ioapic_lock
, flags
);
717 __unmask_IO_APIC_irq(irq
);
718 spin_unlock_irqrestore(&ioapic_lock
, flags
);
721 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
723 struct IO_APIC_route_entry entry
;
725 /* Check delivery_mode to be sure we're not clearing an SMI pin */
726 entry
= ioapic_read_entry(apic
, pin
);
727 if (entry
.delivery_mode
== dest_SMI
)
730 * Disable it in the IO-APIC irq-routing table:
732 ioapic_mask_entry(apic
, pin
);
735 static void clear_IO_APIC (void)
739 for (apic
= 0; apic
< nr_ioapics
; apic
++)
740 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
741 clear_IO_APIC_pin(apic
, pin
);
744 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
745 void send_IPI_self(int vector
)
752 apic_wait_icr_idle();
753 cfg
= APIC_DM_FIXED
| APIC_DEST_SELF
| vector
| APIC_DEST_LOGICAL
;
755 * Send the IPI. The write to APIC_ICR fires this off.
757 apic_write(APIC_ICR
, cfg
);
759 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
763 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
764 * specific CPU-side IRQs.
768 static int pirq_entries
[MAX_PIRQS
];
769 static int pirqs_enabled
;
771 static int __init
ioapic_pirq_setup(char *str
)
774 int ints
[MAX_PIRQS
+1];
776 get_options(str
, ARRAY_SIZE(ints
), ints
);
778 for (i
= 0; i
< MAX_PIRQS
; i
++)
779 pirq_entries
[i
] = -1;
782 apic_printk(APIC_VERBOSE
, KERN_INFO
783 "PIRQ redirection, working around broken MP-BIOS.\n");
785 if (ints
[0] < MAX_PIRQS
)
788 for (i
= 0; i
< max
; i
++) {
789 apic_printk(APIC_VERBOSE
, KERN_DEBUG
790 "... PIRQ%d -> IRQ %d\n", i
, ints
[i
+1]);
792 * PIRQs are mapped upside down, usually.
794 pirq_entries
[MAX_PIRQS
-i
-1] = ints
[i
+1];
799 __setup("pirq=", ioapic_pirq_setup
);
800 #endif /* CONFIG_X86_32 */
802 #ifdef CONFIG_INTR_REMAP
803 /* I/O APIC RTE contents at the OS boot up */
804 static struct IO_APIC_route_entry
*early_ioapic_entries
[MAX_IO_APICS
];
807 * Saves and masks all the unmasked IO-APIC RTE's
809 int save_mask_IO_APIC_setup(void)
811 union IO_APIC_reg_01 reg_01
;
816 * The number of IO-APIC IRQ registers (== #pins):
818 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
819 spin_lock_irqsave(&ioapic_lock
, flags
);
820 reg_01
.raw
= io_apic_read(apic
, 1);
821 spin_unlock_irqrestore(&ioapic_lock
, flags
);
822 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
825 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
826 early_ioapic_entries
[apic
] =
827 kzalloc(sizeof(struct IO_APIC_route_entry
) *
828 nr_ioapic_registers
[apic
], GFP_KERNEL
);
829 if (!early_ioapic_entries
[apic
])
833 for (apic
= 0; apic
< nr_ioapics
; apic
++)
834 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
835 struct IO_APIC_route_entry entry
;
837 entry
= early_ioapic_entries
[apic
][pin
] =
838 ioapic_read_entry(apic
, pin
);
841 ioapic_write_entry(apic
, pin
, entry
);
847 void restore_IO_APIC_setup(void)
851 for (apic
= 0; apic
< nr_ioapics
; apic
++)
852 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
853 ioapic_write_entry(apic
, pin
,
854 early_ioapic_entries
[apic
][pin
]);
857 void reinit_intr_remapped_IO_APIC(int intr_remapping
)
860 * for now plain restore of previous settings.
861 * TBD: In the case of OS enabling interrupt-remapping,
862 * IO-APIC RTE's need to be setup to point to interrupt-remapping
863 * table entries. for now, do a plain restore, and wait for
864 * the setup_IO_APIC_irqs() to do proper initialization.
866 restore_IO_APIC_setup();
871 * Find the IRQ entry number of a certain pin.
873 static int find_irq_entry(int apic
, int pin
, int type
)
877 for (i
= 0; i
< mp_irq_entries
; i
++)
878 if (mp_irqs
[i
].mp_irqtype
== type
&&
879 (mp_irqs
[i
].mp_dstapic
== mp_ioapics
[apic
].mp_apicid
||
880 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
) &&
881 mp_irqs
[i
].mp_dstirq
== pin
)
888 * Find the pin to which IRQ[irq] (ISA) is connected
890 static int __init
find_isa_irq_pin(int irq
, int type
)
894 for (i
= 0; i
< mp_irq_entries
; i
++) {
895 int lbus
= mp_irqs
[i
].mp_srcbus
;
897 if (test_bit(lbus
, mp_bus_not_pci
) &&
898 (mp_irqs
[i
].mp_irqtype
== type
) &&
899 (mp_irqs
[i
].mp_srcbusirq
== irq
))
901 return mp_irqs
[i
].mp_dstirq
;
906 static int __init
find_isa_irq_apic(int irq
, int type
)
910 for (i
= 0; i
< mp_irq_entries
; i
++) {
911 int lbus
= mp_irqs
[i
].mp_srcbus
;
913 if (test_bit(lbus
, mp_bus_not_pci
) &&
914 (mp_irqs
[i
].mp_irqtype
== type
) &&
915 (mp_irqs
[i
].mp_srcbusirq
== irq
))
918 if (i
< mp_irq_entries
) {
920 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
921 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
)
930 * Find a specific PCI IRQ entry.
931 * Not an __init, possibly needed by modules
933 static int pin_2_irq(int idx
, int apic
, int pin
);
935 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
)
937 int apic
, i
, best_guess
= -1;
939 apic_printk(APIC_DEBUG
, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
941 if (test_bit(bus
, mp_bus_not_pci
)) {
942 apic_printk(APIC_VERBOSE
, "PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
945 for (i
= 0; i
< mp_irq_entries
; i
++) {
946 int lbus
= mp_irqs
[i
].mp_srcbus
;
948 for (apic
= 0; apic
< nr_ioapics
; apic
++)
949 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
||
950 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
)
953 if (!test_bit(lbus
, mp_bus_not_pci
) &&
954 !mp_irqs
[i
].mp_irqtype
&&
956 (slot
== ((mp_irqs
[i
].mp_srcbusirq
>> 2) & 0x1f))) {
957 int irq
= pin_2_irq(i
,apic
,mp_irqs
[i
].mp_dstirq
);
959 if (!(apic
|| IO_APIC_IRQ(irq
)))
962 if (pin
== (mp_irqs
[i
].mp_srcbusirq
& 3))
965 * Use the first all-but-pin matching entry as a
966 * best-guess fuzzy result for broken mptables.
975 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector
);
977 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
979 * EISA Edge/Level control register, ELCR
981 static int EISA_ELCR(unsigned int irq
)
984 unsigned int port
= 0x4d0 + (irq
>> 3);
985 return (inb(port
) >> (irq
& 7)) & 1;
987 apic_printk(APIC_VERBOSE
, KERN_INFO
988 "Broken MPtable reports ISA irq %d\n", irq
);
994 /* ISA interrupts are always polarity zero edge triggered,
995 * when listed as conforming in the MP table. */
997 #define default_ISA_trigger(idx) (0)
998 #define default_ISA_polarity(idx) (0)
1000 /* EISA interrupts are always polarity zero and can be edge or level
1001 * trigger depending on the ELCR value. If an interrupt is listed as
1002 * EISA conforming in the MP table, that means its trigger type must
1003 * be read in from the ELCR */
1005 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
1006 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
1008 /* PCI interrupts are always polarity one level triggered,
1009 * when listed as conforming in the MP table. */
1011 #define default_PCI_trigger(idx) (1)
1012 #define default_PCI_polarity(idx) (1)
1014 /* MCA interrupts are always polarity zero level triggered,
1015 * when listed as conforming in the MP table. */
1017 #define default_MCA_trigger(idx) (1)
1018 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
1020 static int MPBIOS_polarity(int idx
)
1022 int bus
= mp_irqs
[idx
].mp_srcbus
;
1026 * Determine IRQ line polarity (high active or low active):
1028 switch (mp_irqs
[idx
].mp_irqflag
& 3)
1030 case 0: /* conforms, ie. bus-type dependent polarity */
1031 if (test_bit(bus
, mp_bus_not_pci
))
1032 polarity
= default_ISA_polarity(idx
);
1034 polarity
= default_PCI_polarity(idx
);
1036 case 1: /* high active */
1041 case 2: /* reserved */
1043 printk(KERN_WARNING
"broken BIOS!!\n");
1047 case 3: /* low active */
1052 default: /* invalid */
1054 printk(KERN_WARNING
"broken BIOS!!\n");
1062 static int MPBIOS_trigger(int idx
)
1064 int bus
= mp_irqs
[idx
].mp_srcbus
;
1068 * Determine IRQ trigger mode (edge or level sensitive):
1070 switch ((mp_irqs
[idx
].mp_irqflag
>>2) & 3)
1072 case 0: /* conforms, ie. bus-type dependent */
1073 if (test_bit(bus
, mp_bus_not_pci
))
1074 trigger
= default_ISA_trigger(idx
);
1076 trigger
= default_PCI_trigger(idx
);
1077 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1078 switch (mp_bus_id_to_type
[bus
]) {
1079 case MP_BUS_ISA
: /* ISA pin */
1081 /* set before the switch */
1084 case MP_BUS_EISA
: /* EISA pin */
1086 trigger
= default_EISA_trigger(idx
);
1089 case MP_BUS_PCI
: /* PCI pin */
1091 /* set before the switch */
1094 case MP_BUS_MCA
: /* MCA pin */
1096 trigger
= default_MCA_trigger(idx
);
1101 printk(KERN_WARNING
"broken BIOS!!\n");
1113 case 2: /* reserved */
1115 printk(KERN_WARNING
"broken BIOS!!\n");
1124 default: /* invalid */
1126 printk(KERN_WARNING
"broken BIOS!!\n");
1134 static inline int irq_polarity(int idx
)
1136 return MPBIOS_polarity(idx
);
1139 static inline int irq_trigger(int idx
)
1141 return MPBIOS_trigger(idx
);
1144 int (*ioapic_renumber_irq
)(int ioapic
, int irq
);
1145 static int pin_2_irq(int idx
, int apic
, int pin
)
1148 int bus
= mp_irqs
[idx
].mp_srcbus
;
1151 * Debugging check, we are in big trouble if this message pops up!
1153 if (mp_irqs
[idx
].mp_dstirq
!= pin
)
1154 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
1156 if (test_bit(bus
, mp_bus_not_pci
)) {
1157 irq
= mp_irqs
[idx
].mp_srcbusirq
;
1160 * PCI IRQs are mapped in order
1164 irq
+= nr_ioapic_registers
[i
++];
1167 * For MPS mode, so far only needed by ES7000 platform
1169 if (ioapic_renumber_irq
)
1170 irq
= ioapic_renumber_irq(apic
, irq
);
1173 #ifdef CONFIG_X86_32
1175 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1177 if ((pin
>= 16) && (pin
<= 23)) {
1178 if (pirq_entries
[pin
-16] != -1) {
1179 if (!pirq_entries
[pin
-16]) {
1180 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1181 "disabling PIRQ%d\n", pin
-16);
1183 irq
= pirq_entries
[pin
-16];
1184 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1185 "using PIRQ%d -> IRQ %d\n",
1195 void lock_vector_lock(void)
1197 /* Used to the online set of cpus does not change
1198 * during assign_irq_vector.
1200 spin_lock(&vector_lock
);
1203 void unlock_vector_lock(void)
1205 spin_unlock(&vector_lock
);
1208 static int __assign_irq_vector(int irq
, cpumask_t mask
)
1211 * NOTE! The local APIC isn't very good at handling
1212 * multiple interrupts at the same interrupt level.
1213 * As the interrupt level is determined by taking the
1214 * vector number and shifting that right by 4, we
1215 * want to spread these out a bit so that they don't
1216 * all fall in the same interrupt level.
1218 * Also, we've got to be careful not to trash gate
1219 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1221 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
= 0;
1222 unsigned int old_vector
;
1224 struct irq_cfg
*cfg
;
1228 /* Only try and allocate irqs on cpus that are present */
1229 cpus_and(mask
, mask
, cpu_online_map
);
1231 if ((cfg
->move_in_progress
) || cfg
->move_cleanup_count
)
1234 old_vector
= cfg
->vector
;
1237 cpus_and(tmp
, cfg
->domain
, mask
);
1238 if (!cpus_empty(tmp
))
1242 for_each_cpu_mask_nr(cpu
, mask
) {
1243 cpumask_t domain
, new_mask
;
1247 domain
= vector_allocation_domain(cpu
);
1248 cpus_and(new_mask
, domain
, cpu_online_map
);
1250 vector
= current_vector
;
1251 offset
= current_offset
;
1254 if (vector
>= first_system_vector
) {
1255 /* If we run out of vectors on large boxen, must share them. */
1256 offset
= (offset
+ 1) % 8;
1257 vector
= FIRST_DEVICE_VECTOR
+ offset
;
1259 if (unlikely(current_vector
== vector
))
1261 #ifdef CONFIG_X86_64
1262 if (vector
== IA32_SYSCALL_VECTOR
)
1265 if (vector
== SYSCALL_VECTOR
)
1268 for_each_cpu_mask_nr(new_cpu
, new_mask
)
1269 if (per_cpu(vector_irq
, new_cpu
)[vector
] != -1)
1272 current_vector
= vector
;
1273 current_offset
= offset
;
1275 cfg
->move_in_progress
= 1;
1276 cfg
->old_domain
= cfg
->domain
;
1278 for_each_cpu_mask_nr(new_cpu
, new_mask
)
1279 per_cpu(vector_irq
, new_cpu
)[vector
] = irq
;
1280 cfg
->vector
= vector
;
1281 cfg
->domain
= domain
;
1287 static int assign_irq_vector(int irq
, cpumask_t mask
)
1290 unsigned long flags
;
1292 spin_lock_irqsave(&vector_lock
, flags
);
1293 err
= __assign_irq_vector(irq
, mask
);
1294 spin_unlock_irqrestore(&vector_lock
, flags
);
1298 static void __clear_irq_vector(int irq
)
1300 struct irq_cfg
*cfg
;
1305 BUG_ON(!cfg
->vector
);
1307 vector
= cfg
->vector
;
1308 cpus_and(mask
, cfg
->domain
, cpu_online_map
);
1309 for_each_cpu_mask_nr(cpu
, mask
)
1310 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1313 cpus_clear(cfg
->domain
);
1316 void __setup_vector_irq(int cpu
)
1318 /* Initialize vector_irq on a new cpu */
1319 /* This function must be called with vector_lock held */
1321 struct irq_cfg
*cfg
;
1323 /* Mark the inuse vectors */
1324 for_each_irq_cfg(irq
, cfg
) {
1325 if (!cpu_isset(cpu
, cfg
->domain
))
1327 vector
= cfg
->vector
;
1328 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
1330 /* Mark the free vectors */
1331 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
1332 irq
= per_cpu(vector_irq
, cpu
)[vector
];
1337 if (!cpu_isset(cpu
, cfg
->domain
))
1338 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1342 static struct irq_chip ioapic_chip
;
1343 #ifdef CONFIG_INTR_REMAP
1344 static struct irq_chip ir_ioapic_chip
;
1347 #define IOAPIC_AUTO -1
1348 #define IOAPIC_EDGE 0
1349 #define IOAPIC_LEVEL 1
1351 #ifdef CONFIG_X86_32
1352 static inline int IO_APIC_irq_trigger(int irq
)
1356 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1357 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1358 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1359 if ((idx
!= -1) && (irq
== pin_2_irq(idx
, apic
, pin
)))
1360 return irq_trigger(idx
);
1364 * nonexistent IRQs are edge default
1369 static inline int IO_APIC_irq_trigger(int irq
)
1375 static void ioapic_register_intr(int irq
, unsigned long trigger
)
1377 struct irq_desc
*desc
;
1379 /* first time to use this irq_desc */
1381 desc
= irq_to_desc(irq
);
1383 desc
= irq_to_desc_alloc(irq
);
1385 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1386 trigger
== IOAPIC_LEVEL
)
1387 desc
->status
|= IRQ_LEVEL
;
1389 desc
->status
&= ~IRQ_LEVEL
;
1391 #ifdef CONFIG_INTR_REMAP
1392 if (irq_remapped(irq
)) {
1393 desc
->status
|= IRQ_MOVE_PCNTXT
;
1395 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1399 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1400 handle_edge_irq
, "edge");
1404 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1405 trigger
== IOAPIC_LEVEL
)
1406 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1410 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1411 handle_edge_irq
, "edge");
1414 static int setup_ioapic_entry(int apic
, int irq
,
1415 struct IO_APIC_route_entry
*entry
,
1416 unsigned int destination
, int trigger
,
1417 int polarity
, int vector
)
1420 * add it to the IO-APIC irq-routing table:
1422 memset(entry
,0,sizeof(*entry
));
1424 #ifdef CONFIG_INTR_REMAP
1425 if (intr_remapping_enabled
) {
1426 struct intel_iommu
*iommu
= map_ioapic_to_ir(apic
);
1428 struct IR_IO_APIC_route_entry
*ir_entry
=
1429 (struct IR_IO_APIC_route_entry
*) entry
;
1433 panic("No mapping iommu for ioapic %d\n", apic
);
1435 index
= alloc_irte(iommu
, irq
, 1);
1437 panic("Failed to allocate IRTE for ioapic %d\n", apic
);
1439 memset(&irte
, 0, sizeof(irte
));
1442 irte
.dst_mode
= INT_DEST_MODE
;
1443 irte
.trigger_mode
= trigger
;
1444 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
1445 irte
.vector
= vector
;
1446 irte
.dest_id
= IRTE_DEST(destination
);
1448 modify_irte(irq
, &irte
);
1450 ir_entry
->index2
= (index
>> 15) & 0x1;
1452 ir_entry
->format
= 1;
1453 ir_entry
->index
= (index
& 0x7fff);
1457 entry
->delivery_mode
= INT_DELIVERY_MODE
;
1458 entry
->dest_mode
= INT_DEST_MODE
;
1459 entry
->dest
= destination
;
1462 entry
->mask
= 0; /* enable IRQ */
1463 entry
->trigger
= trigger
;
1464 entry
->polarity
= polarity
;
1465 entry
->vector
= vector
;
1467 /* Mask level triggered irqs.
1468 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1475 static void setup_IO_APIC_irq(int apic
, int pin
, unsigned int irq
,
1476 int trigger
, int polarity
)
1478 struct irq_cfg
*cfg
;
1479 struct IO_APIC_route_entry entry
;
1482 if (!IO_APIC_IRQ(irq
))
1488 if (assign_irq_vector(irq
, mask
))
1491 cpus_and(mask
, cfg
->domain
, mask
);
1493 apic_printk(APIC_VERBOSE
,KERN_DEBUG
1494 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1495 "IRQ %d Mode:%i Active:%i)\n",
1496 apic
, mp_ioapics
[apic
].mp_apicid
, pin
, cfg
->vector
,
1497 irq
, trigger
, polarity
);
1500 if (setup_ioapic_entry(mp_ioapics
[apic
].mp_apicid
, irq
, &entry
,
1501 cpu_mask_to_apicid(mask
), trigger
, polarity
,
1503 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1504 mp_ioapics
[apic
].mp_apicid
, pin
);
1505 __clear_irq_vector(irq
);
1509 ioapic_register_intr(irq
, trigger
);
1511 disable_8259A_irq(irq
);
1513 ioapic_write_entry(apic
, pin
, entry
);
1516 static void __init
setup_IO_APIC_irqs(void)
1518 int apic
, pin
, idx
, irq
, first_notcon
= 1;
1520 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1522 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1523 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1525 idx
= find_irq_entry(apic
,pin
,mp_INT
);
1528 apic_printk(APIC_VERBOSE
, KERN_DEBUG
" IO-APIC (apicid-pin) %d-%d", mp_ioapics
[apic
].mp_apicid
, pin
);
1531 apic_printk(APIC_VERBOSE
, ", %d-%d", mp_ioapics
[apic
].mp_apicid
, pin
);
1534 if (!first_notcon
) {
1535 apic_printk(APIC_VERBOSE
, " not connected.\n");
1539 irq
= pin_2_irq(idx
, apic
, pin
);
1540 #ifdef CONFIG_X86_32
1541 if (multi_timer_check(apic
, irq
))
1544 add_pin_to_irq(irq
, apic
, pin
);
1546 setup_IO_APIC_irq(apic
, pin
, irq
,
1547 irq_trigger(idx
), irq_polarity(idx
));
1552 apic_printk(APIC_VERBOSE
, " not connected.\n");
1556 * Set up the timer pin, possibly with the 8259A-master behind.
1558 static void __init
setup_timer_IRQ0_pin(unsigned int apic
, unsigned int pin
,
1561 struct IO_APIC_route_entry entry
;
1563 #ifdef CONFIG_INTR_REMAP
1564 if (intr_remapping_enabled
)
1568 memset(&entry
, 0, sizeof(entry
));
1571 * We use logical delivery to get the timer IRQ
1574 entry
.dest_mode
= INT_DEST_MODE
;
1575 entry
.mask
= 1; /* mask IRQ now */
1576 entry
.dest
= cpu_mask_to_apicid(TARGET_CPUS
);
1577 entry
.delivery_mode
= INT_DELIVERY_MODE
;
1580 entry
.vector
= vector
;
1583 * The timer IRQ doesn't have to know that behind the
1584 * scene we may have a 8259A-master in AEOI mode ...
1586 set_irq_chip_and_handler_name(0, &ioapic_chip
, handle_edge_irq
, "edge");
1589 * Add it to the IO-APIC irq-routing table:
1591 ioapic_write_entry(apic
, pin
, entry
);
1595 __apicdebuginit(void) print_IO_APIC(void)
1598 union IO_APIC_reg_00 reg_00
;
1599 union IO_APIC_reg_01 reg_01
;
1600 union IO_APIC_reg_02 reg_02
;
1601 union IO_APIC_reg_03 reg_03
;
1602 unsigned long flags
;
1603 struct irq_cfg
*cfg
;
1606 if (apic_verbosity
== APIC_QUIET
)
1609 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1610 for (i
= 0; i
< nr_ioapics
; i
++)
1611 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1612 mp_ioapics
[i
].mp_apicid
, nr_ioapic_registers
[i
]);
1615 * We are a bit conservative about what we expect. We have to
1616 * know about every hardware change ASAP.
1618 printk(KERN_INFO
"testing the IO APIC.......................\n");
1620 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1622 spin_lock_irqsave(&ioapic_lock
, flags
);
1623 reg_00
.raw
= io_apic_read(apic
, 0);
1624 reg_01
.raw
= io_apic_read(apic
, 1);
1625 if (reg_01
.bits
.version
>= 0x10)
1626 reg_02
.raw
= io_apic_read(apic
, 2);
1627 if (reg_01
.bits
.version
>= 0x20)
1628 reg_03
.raw
= io_apic_read(apic
, 3);
1629 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1632 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].mp_apicid
);
1633 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1634 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1635 printk(KERN_DEBUG
"....... : Delivery Type: %X\n", reg_00
.bits
.delivery_type
);
1636 printk(KERN_DEBUG
"....... : LTS : %X\n", reg_00
.bits
.LTS
);
1638 printk(KERN_DEBUG
".... register #01: %08X\n", *(int *)®_01
);
1639 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1641 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1642 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1645 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1646 * but the value of reg_02 is read as the previous read register
1647 * value, so ignore it if reg_02 == reg_01.
1649 if (reg_01
.bits
.version
>= 0x10 && reg_02
.raw
!= reg_01
.raw
) {
1650 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1651 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1655 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1656 * or reg_03, but the value of reg_0[23] is read as the previous read
1657 * register value, so ignore it if reg_03 == reg_0[12].
1659 if (reg_01
.bits
.version
>= 0x20 && reg_03
.raw
!= reg_02
.raw
&&
1660 reg_03
.raw
!= reg_01
.raw
) {
1661 printk(KERN_DEBUG
".... register #03: %08X\n", reg_03
.raw
);
1662 printk(KERN_DEBUG
"....... : Boot DT : %X\n", reg_03
.bits
.boot_DT
);
1665 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1667 printk(KERN_DEBUG
" NR Dst Mask Trig IRR Pol"
1668 " Stat Dmod Deli Vect: \n");
1670 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1671 struct IO_APIC_route_entry entry
;
1673 entry
= ioapic_read_entry(apic
, i
);
1675 printk(KERN_DEBUG
" %02x %03X ",
1680 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1685 entry
.delivery_status
,
1687 entry
.delivery_mode
,
1692 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1693 for_each_irq_cfg(irq
, cfg
) {
1694 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
1697 printk(KERN_DEBUG
"IRQ%d ", irq
);
1699 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1702 entry
= entry
->next
;
1707 printk(KERN_INFO
".................................... done.\n");
1712 __apicdebuginit(void) print_APIC_bitfield(int base
)
1717 if (apic_verbosity
== APIC_QUIET
)
1720 printk(KERN_DEBUG
"0123456789abcdef0123456789abcdef\n" KERN_DEBUG
);
1721 for (i
= 0; i
< 8; i
++) {
1722 v
= apic_read(base
+ i
*0x10);
1723 for (j
= 0; j
< 32; j
++) {
1733 __apicdebuginit(void) print_local_APIC(void *dummy
)
1735 unsigned int v
, ver
, maxlvt
;
1738 if (apic_verbosity
== APIC_QUIET
)
1741 printk("\n" KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1742 smp_processor_id(), hard_smp_processor_id());
1743 v
= apic_read(APIC_ID
);
1744 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1745 v
= apic_read(APIC_LVR
);
1746 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1747 ver
= GET_APIC_VERSION(v
);
1748 maxlvt
= lapic_get_maxlvt();
1750 v
= apic_read(APIC_TASKPRI
);
1751 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1753 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1754 if (!APIC_XAPIC(ver
)) {
1755 v
= apic_read(APIC_ARBPRI
);
1756 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1757 v
& APIC_ARBPRI_MASK
);
1759 v
= apic_read(APIC_PROCPRI
);
1760 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1764 * Remote read supported only in the 82489DX and local APIC for
1765 * Pentium processors.
1767 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
1768 v
= apic_read(APIC_RRR
);
1769 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1772 v
= apic_read(APIC_LDR
);
1773 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1774 if (!x2apic_enabled()) {
1775 v
= apic_read(APIC_DFR
);
1776 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1778 v
= apic_read(APIC_SPIV
);
1779 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1781 printk(KERN_DEBUG
"... APIC ISR field:\n");
1782 print_APIC_bitfield(APIC_ISR
);
1783 printk(KERN_DEBUG
"... APIC TMR field:\n");
1784 print_APIC_bitfield(APIC_TMR
);
1785 printk(KERN_DEBUG
"... APIC IRR field:\n");
1786 print_APIC_bitfield(APIC_IRR
);
1788 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1789 if (maxlvt
> 3) /* Due to the Pentium erratum 3AP. */
1790 apic_write(APIC_ESR
, 0);
1792 v
= apic_read(APIC_ESR
);
1793 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1796 icr
= apic_icr_read();
1797 printk(KERN_DEBUG
"... APIC ICR: %08x\n", (u32
)icr
);
1798 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
1800 v
= apic_read(APIC_LVTT
);
1801 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1803 if (maxlvt
> 3) { /* PC is LVT#4. */
1804 v
= apic_read(APIC_LVTPC
);
1805 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1807 v
= apic_read(APIC_LVT0
);
1808 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1809 v
= apic_read(APIC_LVT1
);
1810 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1812 if (maxlvt
> 2) { /* ERR is LVT#3. */
1813 v
= apic_read(APIC_LVTERR
);
1814 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1817 v
= apic_read(APIC_TMICT
);
1818 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1819 v
= apic_read(APIC_TMCCT
);
1820 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1821 v
= apic_read(APIC_TDCR
);
1822 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1826 __apicdebuginit(void) print_all_local_APICs(void)
1831 for_each_online_cpu(cpu
)
1832 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
1836 __apicdebuginit(void) print_PIC(void)
1839 unsigned long flags
;
1841 if (apic_verbosity
== APIC_QUIET
)
1844 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1846 spin_lock_irqsave(&i8259A_lock
, flags
);
1848 v
= inb(0xa1) << 8 | inb(0x21);
1849 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1851 v
= inb(0xa0) << 8 | inb(0x20);
1852 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1856 v
= inb(0xa0) << 8 | inb(0x20);
1860 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1862 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1864 v
= inb(0x4d1) << 8 | inb(0x4d0);
1865 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1868 __apicdebuginit(int) print_all_ICs(void)
1871 print_all_local_APICs();
1877 fs_initcall(print_all_ICs
);
1880 /* Where if anywhere is the i8259 connect in external int mode */
1881 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
1883 void __init
enable_IO_APIC(void)
1885 union IO_APIC_reg_01 reg_01
;
1886 int i8259_apic
, i8259_pin
;
1888 unsigned long flags
;
1890 #ifdef CONFIG_X86_32
1893 for (i
= 0; i
< MAX_PIRQS
; i
++)
1894 pirq_entries
[i
] = -1;
1898 * The number of IO-APIC IRQ registers (== #pins):
1900 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1901 spin_lock_irqsave(&ioapic_lock
, flags
);
1902 reg_01
.raw
= io_apic_read(apic
, 1);
1903 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1904 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1906 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
1908 /* See if any of the pins is in ExtINT mode */
1909 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1910 struct IO_APIC_route_entry entry
;
1911 entry
= ioapic_read_entry(apic
, pin
);
1913 /* If the interrupt line is enabled and in ExtInt mode
1914 * I have found the pin where the i8259 is connected.
1916 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1917 ioapic_i8259
.apic
= apic
;
1918 ioapic_i8259
.pin
= pin
;
1924 /* Look to see what if the MP table has reported the ExtINT */
1925 /* If we could not find the appropriate pin by looking at the ioapic
1926 * the i8259 probably is not connected the ioapic but give the
1927 * mptable a chance anyway.
1929 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
1930 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
1931 /* Trust the MP table if nothing is setup in the hardware */
1932 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
1933 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
1934 ioapic_i8259
.pin
= i8259_pin
;
1935 ioapic_i8259
.apic
= i8259_apic
;
1937 /* Complain if the MP table and the hardware disagree */
1938 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
1939 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
1941 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
1945 * Do not trust the IO-APIC being empty at bootup
1951 * Not an __init, needed by the reboot code
1953 void disable_IO_APIC(void)
1956 * Clear the IO-APIC before rebooting:
1961 * If the i8259 is routed through an IOAPIC
1962 * Put that IOAPIC in virtual wire mode
1963 * so legacy interrupts can be delivered.
1965 if (ioapic_i8259
.pin
!= -1) {
1966 struct IO_APIC_route_entry entry
;
1968 memset(&entry
, 0, sizeof(entry
));
1969 entry
.mask
= 0; /* Enabled */
1970 entry
.trigger
= 0; /* Edge */
1972 entry
.polarity
= 0; /* High */
1973 entry
.delivery_status
= 0;
1974 entry
.dest_mode
= 0; /* Physical */
1975 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
1977 entry
.dest
= read_apic_id();
1980 * Add it to the IO-APIC irq-routing table:
1982 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
1985 disconnect_bsp_APIC(ioapic_i8259
.pin
!= -1);
1988 #ifdef CONFIG_X86_32
1990 * function to set the IO-APIC physical IDs based on the
1991 * values stored in the MPC table.
1993 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1996 static void __init
setup_ioapic_ids_from_mpc(void)
1998 union IO_APIC_reg_00 reg_00
;
1999 physid_mask_t phys_id_present_map
;
2002 unsigned char old_id
;
2003 unsigned long flags
;
2005 if (x86_quirks
->setup_ioapic_ids
&& x86_quirks
->setup_ioapic_ids())
2009 * Don't check I/O APIC IDs for xAPIC systems. They have
2010 * no meaning without the serial APIC bus.
2012 if (!(boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
2013 || APIC_XAPIC(apic_version
[boot_cpu_physical_apicid
]))
2016 * This is broken; anything with a real cpu count has to
2017 * circumvent this idiocy regardless.
2019 phys_id_present_map
= ioapic_phys_id_map(phys_cpu_present_map
);
2022 * Set the IOAPIC ID to the value stored in the MPC table.
2024 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
2026 /* Read the register 0 value */
2027 spin_lock_irqsave(&ioapic_lock
, flags
);
2028 reg_00
.raw
= io_apic_read(apic
, 0);
2029 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2031 old_id
= mp_ioapics
[apic
].mp_apicid
;
2033 if (mp_ioapics
[apic
].mp_apicid
>= get_physical_broadcast()) {
2034 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2035 apic
, mp_ioapics
[apic
].mp_apicid
);
2036 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2038 mp_ioapics
[apic
].mp_apicid
= reg_00
.bits
.ID
;
2042 * Sanity check, is the ID really free? Every APIC in a
2043 * system must have a unique ID or we get lots of nice
2044 * 'stuck on smp_invalidate_needed IPI wait' messages.
2046 if (check_apicid_used(phys_id_present_map
,
2047 mp_ioapics
[apic
].mp_apicid
)) {
2048 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2049 apic
, mp_ioapics
[apic
].mp_apicid
);
2050 for (i
= 0; i
< get_physical_broadcast(); i
++)
2051 if (!physid_isset(i
, phys_id_present_map
))
2053 if (i
>= get_physical_broadcast())
2054 panic("Max APIC ID exceeded!\n");
2055 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
2057 physid_set(i
, phys_id_present_map
);
2058 mp_ioapics
[apic
].mp_apicid
= i
;
2061 tmp
= apicid_to_cpu_present(mp_ioapics
[apic
].mp_apicid
);
2062 apic_printk(APIC_VERBOSE
, "Setting %d in the "
2063 "phys_id_present_map\n",
2064 mp_ioapics
[apic
].mp_apicid
);
2065 physids_or(phys_id_present_map
, phys_id_present_map
, tmp
);
2070 * We need to adjust the IRQ routing table
2071 * if the ID changed.
2073 if (old_id
!= mp_ioapics
[apic
].mp_apicid
)
2074 for (i
= 0; i
< mp_irq_entries
; i
++)
2075 if (mp_irqs
[i
].mp_dstapic
== old_id
)
2076 mp_irqs
[i
].mp_dstapic
2077 = mp_ioapics
[apic
].mp_apicid
;
2080 * Read the right value from the MPC table and
2081 * write it into the ID register.
2083 apic_printk(APIC_VERBOSE
, KERN_INFO
2084 "...changing IO-APIC physical APIC ID to %d ...",
2085 mp_ioapics
[apic
].mp_apicid
);
2087 reg_00
.bits
.ID
= mp_ioapics
[apic
].mp_apicid
;
2088 spin_lock_irqsave(&ioapic_lock
, flags
);
2089 io_apic_write(apic
, 0, reg_00
.raw
);
2090 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2095 spin_lock_irqsave(&ioapic_lock
, flags
);
2096 reg_00
.raw
= io_apic_read(apic
, 0);
2097 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2098 if (reg_00
.bits
.ID
!= mp_ioapics
[apic
].mp_apicid
)
2099 printk("could not set ID!\n");
2101 apic_printk(APIC_VERBOSE
, " ok.\n");
2106 int no_timer_check __initdata
;
2108 static int __init
notimercheck(char *s
)
2113 __setup("no_timer_check", notimercheck
);
2116 * There is a nasty bug in some older SMP boards, their mptable lies
2117 * about the timer IRQ. We do the following to work around the situation:
2119 * - timer IRQ defaults to IO-APIC IRQ
2120 * - if this function detects that timer IRQs are defunct, then we fall
2121 * back to ISA timer IRQs
2123 static int __init
timer_irq_works(void)
2125 unsigned long t1
= jiffies
;
2126 unsigned long flags
;
2131 local_save_flags(flags
);
2133 /* Let ten ticks pass... */
2134 mdelay((10 * 1000) / HZ
);
2135 local_irq_restore(flags
);
2138 * Expect a few ticks at least, to be sure some possible
2139 * glue logic does not lock up after one or two first
2140 * ticks in a non-ExtINT mode. Also the local APIC
2141 * might have cached one ExtINT interrupt. Finally, at
2142 * least one tick may be lost due to delays.
2146 if (time_after(jiffies
, t1
+ 4))
2152 * In the SMP+IOAPIC case it might happen that there are an unspecified
2153 * number of pending IRQ events unhandled. These cases are very rare,
2154 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2155 * better to do it this way as thus we do not have to be aware of
2156 * 'pending' interrupts in the IRQ path, except at this point.
2159 * Edge triggered needs to resend any interrupt
2160 * that was delayed but this is now handled in the device
2165 * Starting up a edge-triggered IO-APIC interrupt is
2166 * nasty - we need to make sure that we get the edge.
2167 * If it is already asserted for some reason, we need
2168 * return 1 to indicate that is was pending.
2170 * This is not complete - we should be able to fake
2171 * an edge even if it isn't on the 8259A...
2174 static unsigned int startup_ioapic_irq(unsigned int irq
)
2176 int was_pending
= 0;
2177 unsigned long flags
;
2179 spin_lock_irqsave(&ioapic_lock
, flags
);
2181 disable_8259A_irq(irq
);
2182 if (i8259A_irq_pending(irq
))
2185 __unmask_IO_APIC_irq(irq
);
2186 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2191 #ifdef CONFIG_X86_64
2192 static int ioapic_retrigger_irq(unsigned int irq
)
2195 struct irq_cfg
*cfg
= irq_cfg(irq
);
2196 unsigned long flags
;
2198 spin_lock_irqsave(&vector_lock
, flags
);
2199 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg
->domain
)), cfg
->vector
);
2200 spin_unlock_irqrestore(&vector_lock
, flags
);
2205 static int ioapic_retrigger_irq(unsigned int irq
)
2207 send_IPI_self(irq_cfg(irq
)->vector
);
2214 * Level and edge triggered IO-APIC interrupts need different handling,
2215 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2216 * handled with the level-triggered descriptor, but that one has slightly
2217 * more overhead. Level-triggered interrupts cannot be handled with the
2218 * edge-triggered handler, without risking IRQ storms and other ugly
2224 #ifdef CONFIG_INTR_REMAP
2225 static void ir_irq_migration(struct work_struct
*work
);
2227 static DECLARE_DELAYED_WORK(ir_migration_work
, ir_irq_migration
);
2230 * Migrate the IO-APIC irq in the presence of intr-remapping.
2232 * For edge triggered, irq migration is a simple atomic update(of vector
2233 * and cpu destination) of IRTE and flush the hardware cache.
2235 * For level triggered, we need to modify the io-apic RTE aswell with the update
2236 * vector information, along with modifying IRTE with vector and destination.
2237 * So irq migration for level triggered is little bit more complex compared to
2238 * edge triggered migration. But the good news is, we use the same algorithm
2239 * for level triggered migration as we have today, only difference being,
2240 * we now initiate the irq migration from process context instead of the
2241 * interrupt context.
2243 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2244 * suppression) to the IO-APIC, level triggered irq migration will also be
2245 * as simple as edge triggered migration and we can do the irq migration
2246 * with a simple atomic update to IO-APIC RTE.
2248 static void migrate_ioapic_irq(int irq
, cpumask_t mask
)
2250 struct irq_cfg
*cfg
;
2251 struct irq_desc
*desc
;
2252 cpumask_t tmp
, cleanup_mask
;
2254 int modify_ioapic_rte
;
2256 unsigned long flags
;
2258 cpus_and(tmp
, mask
, cpu_online_map
);
2259 if (cpus_empty(tmp
))
2262 if (get_irte(irq
, &irte
))
2265 if (assign_irq_vector(irq
, mask
))
2269 cpus_and(tmp
, cfg
->domain
, mask
);
2270 dest
= cpu_mask_to_apicid(tmp
);
2272 desc
= irq_to_desc(irq
);
2273 modify_ioapic_rte
= desc
->status
& IRQ_LEVEL
;
2274 if (modify_ioapic_rte
) {
2275 spin_lock_irqsave(&ioapic_lock
, flags
);
2276 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
2277 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2280 irte
.vector
= cfg
->vector
;
2281 irte
.dest_id
= IRTE_DEST(dest
);
2284 * Modified the IRTE and flushes the Interrupt entry cache.
2286 modify_irte(irq
, &irte
);
2288 if (cfg
->move_in_progress
) {
2289 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
2290 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
2291 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2292 cfg
->move_in_progress
= 0;
2295 desc
->affinity
= mask
;
2298 static int migrate_irq_remapped_level(int irq
)
2301 struct irq_desc
*desc
= irq_to_desc(irq
);
2303 mask_IO_APIC_irq(irq
);
2305 if (io_apic_level_ack_pending(irq
)) {
2307 * Interrupt in progress. Migrating irq now will change the
2308 * vector information in the IO-APIC RTE and that will confuse
2309 * the EOI broadcast performed by cpu.
2310 * So, delay the irq migration to the next instance.
2312 schedule_delayed_work(&ir_migration_work
, 1);
2316 /* everthing is clear. we have right of way */
2317 migrate_ioapic_irq(irq
, desc
->pending_mask
);
2320 desc
->status
&= ~IRQ_MOVE_PENDING
;
2321 cpus_clear(desc
->pending_mask
);
2324 unmask_IO_APIC_irq(irq
);
2328 static void ir_irq_migration(struct work_struct
*work
)
2331 struct irq_desc
*desc
;
2333 for_each_irq_desc(irq
, desc
) {
2334 if (desc
->status
& IRQ_MOVE_PENDING
) {
2335 unsigned long flags
;
2337 spin_lock_irqsave(&desc
->lock
, flags
);
2338 if (!desc
->chip
->set_affinity
||
2339 !(desc
->status
& IRQ_MOVE_PENDING
)) {
2340 desc
->status
&= ~IRQ_MOVE_PENDING
;
2341 spin_unlock_irqrestore(&desc
->lock
, flags
);
2345 desc
->chip
->set_affinity(irq
, desc
->pending_mask
);
2346 spin_unlock_irqrestore(&desc
->lock
, flags
);
2352 * Migrates the IRQ destination in the process context.
2354 static void set_ir_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
2356 struct irq_desc
*desc
= irq_to_desc(irq
);
2358 if (desc
->status
& IRQ_LEVEL
) {
2359 desc
->status
|= IRQ_MOVE_PENDING
;
2360 desc
->pending_mask
= mask
;
2361 migrate_irq_remapped_level(irq
);
2365 migrate_ioapic_irq(irq
, mask
);
2369 asmlinkage
void smp_irq_move_cleanup_interrupt(void)
2371 unsigned vector
, me
;
2373 #ifdef CONFIG_X86_64
2378 me
= smp_processor_id();
2379 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
2381 struct irq_desc
*desc
;
2382 struct irq_cfg
*cfg
;
2383 irq
= __get_cpu_var(vector_irq
)[vector
];
2385 desc
= irq_to_desc(irq
);
2390 spin_lock(&desc
->lock
);
2391 if (!cfg
->move_cleanup_count
)
2394 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
))
2397 __get_cpu_var(vector_irq
)[vector
] = -1;
2398 cfg
->move_cleanup_count
--;
2400 spin_unlock(&desc
->lock
);
2406 static void irq_complete_move(unsigned int irq
)
2408 struct irq_cfg
*cfg
= irq_cfg(irq
);
2409 unsigned vector
, me
;
2411 if (likely(!cfg
->move_in_progress
))
2414 vector
= ~get_irq_regs()->orig_ax
;
2415 me
= smp_processor_id();
2416 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
)) {
2417 cpumask_t cleanup_mask
;
2419 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
2420 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
2421 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2422 cfg
->move_in_progress
= 0;
2426 static inline void irq_complete_move(unsigned int irq
) {}
2428 #ifdef CONFIG_INTR_REMAP
2429 static void ack_x2apic_level(unsigned int irq
)
2434 static void ack_x2apic_edge(unsigned int irq
)
2440 static void ack_apic_edge(unsigned int irq
)
2442 irq_complete_move(irq
);
2443 move_native_irq(irq
);
2447 #ifdef CONFIG_X86_32
2448 atomic_t irq_mis_count
;
2451 static void ack_apic_level(unsigned int irq
)
2453 #ifdef CONFIG_X86_32
2457 int do_unmask_irq
= 0;
2459 irq_complete_move(irq
);
2460 #ifdef CONFIG_GENERIC_PENDING_IRQ
2461 /* If we are moving the irq we need to mask it */
2462 if (unlikely(irq_to_desc(irq
)->status
& IRQ_MOVE_PENDING
)) {
2464 mask_IO_APIC_irq(irq
);
2468 #ifdef CONFIG_X86_32
2470 * It appears there is an erratum which affects at least version 0x11
2471 * of I/O APIC (that's the 82093AA and cores integrated into various
2472 * chipsets). Under certain conditions a level-triggered interrupt is
2473 * erroneously delivered as edge-triggered one but the respective IRR
2474 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2475 * message but it will never arrive and further interrupts are blocked
2476 * from the source. The exact reason is so far unknown, but the
2477 * phenomenon was observed when two consecutive interrupt requests
2478 * from a given source get delivered to the same CPU and the source is
2479 * temporarily disabled in between.
2481 * A workaround is to simulate an EOI message manually. We achieve it
2482 * by setting the trigger mode to edge and then to level when the edge
2483 * trigger mode gets detected in the TMR of a local APIC for a
2484 * level-triggered interrupt. We mask the source for the time of the
2485 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2486 * The idea is from Manfred Spraul. --macro
2488 i
= irq_cfg(irq
)->vector
;
2490 v
= apic_read(APIC_TMR
+ ((i
& ~0x1f) >> 1));
2494 * We must acknowledge the irq before we move it or the acknowledge will
2495 * not propagate properly.
2499 /* Now we can move and renable the irq */
2500 if (unlikely(do_unmask_irq
)) {
2501 /* Only migrate the irq if the ack has been received.
2503 * On rare occasions the broadcast level triggered ack gets
2504 * delayed going to ioapics, and if we reprogram the
2505 * vector while Remote IRR is still set the irq will never
2508 * To prevent this scenario we read the Remote IRR bit
2509 * of the ioapic. This has two effects.
2510 * - On any sane system the read of the ioapic will
2511 * flush writes (and acks) going to the ioapic from
2513 * - We get to see if the ACK has actually been delivered.
2515 * Based on failed experiments of reprogramming the
2516 * ioapic entry from outside of irq context starting
2517 * with masking the ioapic entry and then polling until
2518 * Remote IRR was clear before reprogramming the
2519 * ioapic I don't trust the Remote IRR bit to be
2520 * completey accurate.
2522 * However there appears to be no other way to plug
2523 * this race, so if the Remote IRR bit is not
2524 * accurate and is causing problems then it is a hardware bug
2525 * and you can go talk to the chipset vendor about it.
2527 if (!io_apic_level_ack_pending(irq
))
2528 move_masked_irq(irq
);
2529 unmask_IO_APIC_irq(irq
);
2532 #ifdef CONFIG_X86_32
2533 if (!(v
& (1 << (i
& 0x1f)))) {
2534 atomic_inc(&irq_mis_count
);
2535 spin_lock(&ioapic_lock
);
2536 __mask_and_edge_IO_APIC_irq(irq
);
2537 __unmask_and_level_IO_APIC_irq(irq
);
2538 spin_unlock(&ioapic_lock
);
2543 static struct irq_chip ioapic_chip __read_mostly
= {
2545 .startup
= startup_ioapic_irq
,
2546 .mask
= mask_IO_APIC_irq
,
2547 .unmask
= unmask_IO_APIC_irq
,
2548 .ack
= ack_apic_edge
,
2549 .eoi
= ack_apic_level
,
2551 .set_affinity
= set_ioapic_affinity_irq
,
2553 .retrigger
= ioapic_retrigger_irq
,
2556 #ifdef CONFIG_INTR_REMAP
2557 static struct irq_chip ir_ioapic_chip __read_mostly
= {
2558 .name
= "IR-IO-APIC",
2559 .startup
= startup_ioapic_irq
,
2560 .mask
= mask_IO_APIC_irq
,
2561 .unmask
= unmask_IO_APIC_irq
,
2562 .ack
= ack_x2apic_edge
,
2563 .eoi
= ack_x2apic_level
,
2565 .set_affinity
= set_ir_ioapic_affinity_irq
,
2567 .retrigger
= ioapic_retrigger_irq
,
2571 static inline void init_IO_APIC_traps(void)
2574 struct irq_desc
*desc
;
2575 struct irq_cfg
*cfg
;
2578 * NOTE! The local APIC isn't very good at handling
2579 * multiple interrupts at the same interrupt level.
2580 * As the interrupt level is determined by taking the
2581 * vector number and shifting that right by 4, we
2582 * want to spread these out a bit so that they don't
2583 * all fall in the same interrupt level.
2585 * Also, we've got to be careful not to trash gate
2586 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2588 for_each_irq_cfg(irq
, cfg
) {
2589 if (IO_APIC_IRQ(irq
) && !cfg
->vector
) {
2591 * Hmm.. We don't have an entry for this,
2592 * so default to an old-fashioned 8259
2593 * interrupt if we can..
2596 make_8259A_irq(irq
);
2598 desc
= irq_to_desc(irq
);
2599 /* Strange. Oh, well.. */
2600 desc
->chip
= &no_irq_chip
;
2607 * The local APIC irq-chip implementation:
2610 static void mask_lapic_irq(unsigned int irq
)
2614 v
= apic_read(APIC_LVT0
);
2615 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
2618 static void unmask_lapic_irq(unsigned int irq
)
2622 v
= apic_read(APIC_LVT0
);
2623 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
2626 static void ack_lapic_irq (unsigned int irq
)
2631 static struct irq_chip lapic_chip __read_mostly
= {
2632 .name
= "local-APIC",
2633 .mask
= mask_lapic_irq
,
2634 .unmask
= unmask_lapic_irq
,
2635 .ack
= ack_lapic_irq
,
2638 static void lapic_register_intr(int irq
)
2640 struct irq_desc
*desc
;
2642 desc
= irq_to_desc(irq
);
2643 desc
->status
&= ~IRQ_LEVEL
;
2644 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
2648 static void __init
setup_nmi(void)
2651 * Dirty trick to enable the NMI watchdog ...
2652 * We put the 8259A master into AEOI mode and
2653 * unmask on all local APICs LVT0 as NMI.
2655 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2656 * is from Maciej W. Rozycki - so we do not have to EOI from
2657 * the NMI handler or the timer interrupt.
2659 apic_printk(APIC_VERBOSE
, KERN_INFO
"activating NMI Watchdog ...");
2661 enable_NMI_through_LVT0();
2663 apic_printk(APIC_VERBOSE
, " done.\n");
2667 * This looks a bit hackish but it's about the only one way of sending
2668 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2669 * not support the ExtINT mode, unfortunately. We need to send these
2670 * cycles as some i82489DX-based boards have glue logic that keeps the
2671 * 8259A interrupt line asserted until INTA. --macro
2673 static inline void __init
unlock_ExtINT_logic(void)
2676 struct IO_APIC_route_entry entry0
, entry1
;
2677 unsigned char save_control
, save_freq_select
;
2679 pin
= find_isa_irq_pin(8, mp_INT
);
2684 apic
= find_isa_irq_apic(8, mp_INT
);
2690 entry0
= ioapic_read_entry(apic
, pin
);
2691 clear_IO_APIC_pin(apic
, pin
);
2693 memset(&entry1
, 0, sizeof(entry1
));
2695 entry1
.dest_mode
= 0; /* physical delivery */
2696 entry1
.mask
= 0; /* unmask IRQ now */
2697 entry1
.dest
= hard_smp_processor_id();
2698 entry1
.delivery_mode
= dest_ExtINT
;
2699 entry1
.polarity
= entry0
.polarity
;
2703 ioapic_write_entry(apic
, pin
, entry1
);
2705 save_control
= CMOS_READ(RTC_CONTROL
);
2706 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
2707 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
2709 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
2714 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
2718 CMOS_WRITE(save_control
, RTC_CONTROL
);
2719 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
2720 clear_IO_APIC_pin(apic
, pin
);
2722 ioapic_write_entry(apic
, pin
, entry0
);
2725 static int disable_timer_pin_1 __initdata
;
2726 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2727 static int __init
disable_timer_pin_setup(char *arg
)
2729 disable_timer_pin_1
= 1;
2732 early_param("disable_timer_pin_1", disable_timer_pin_setup
);
2734 int timer_through_8259 __initdata
;
2737 * This code may look a bit paranoid, but it's supposed to cooperate with
2738 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2739 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2740 * fanatically on his truly buggy board.
2742 * FIXME: really need to revamp this for all platforms.
2744 static inline void __init
check_timer(void)
2746 struct irq_cfg
*cfg
= irq_cfg(0);
2747 int apic1
, pin1
, apic2
, pin2
;
2748 unsigned long flags
;
2752 local_irq_save(flags
);
2754 ver
= apic_read(APIC_LVR
);
2755 ver
= GET_APIC_VERSION(ver
);
2758 * get/set the timer IRQ vector:
2760 disable_8259A_irq(0);
2761 assign_irq_vector(0, TARGET_CPUS
);
2764 * As IRQ0 is to be enabled in the 8259A, the virtual
2765 * wire has to be disabled in the local APIC. Also
2766 * timer interrupts need to be acknowledged manually in
2767 * the 8259A for the i82489DX when using the NMI
2768 * watchdog as that APIC treats NMIs as level-triggered.
2769 * The AEOI mode will finish them in the 8259A
2772 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2774 #ifdef CONFIG_X86_32
2775 timer_ack
= (nmi_watchdog
== NMI_IO_APIC
&& !APIC_INTEGRATED(ver
));
2778 pin1
= find_isa_irq_pin(0, mp_INT
);
2779 apic1
= find_isa_irq_apic(0, mp_INT
);
2780 pin2
= ioapic_i8259
.pin
;
2781 apic2
= ioapic_i8259
.apic
;
2783 apic_printk(APIC_QUIET
, KERN_INFO
"..TIMER: vector=0x%02X "
2784 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2785 cfg
->vector
, apic1
, pin1
, apic2
, pin2
);
2788 * Some BIOS writers are clueless and report the ExtINTA
2789 * I/O APIC input from the cascaded 8259A as the timer
2790 * interrupt input. So just in case, if only one pin
2791 * was found above, try it both directly and through the
2795 #ifdef CONFIG_INTR_REMAP
2796 if (intr_remapping_enabled
)
2797 panic("BIOS bug: timer not connected to IO-APIC");
2802 } else if (pin2
== -1) {
2809 * Ok, does IRQ0 through the IOAPIC work?
2812 add_pin_to_irq(0, apic1
, pin1
);
2813 setup_timer_IRQ0_pin(apic1
, pin1
, cfg
->vector
);
2815 unmask_IO_APIC_irq(0);
2816 if (timer_irq_works()) {
2817 if (nmi_watchdog
== NMI_IO_APIC
) {
2819 enable_8259A_irq(0);
2821 if (disable_timer_pin_1
> 0)
2822 clear_IO_APIC_pin(0, pin1
);
2825 #ifdef CONFIG_INTR_REMAP
2826 if (intr_remapping_enabled
)
2827 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2829 clear_IO_APIC_pin(apic1
, pin1
);
2831 apic_printk(APIC_QUIET
, KERN_ERR
"..MP-BIOS bug: "
2832 "8254 timer not connected to IO-APIC\n");
2834 apic_printk(APIC_QUIET
, KERN_INFO
"...trying to set up timer "
2835 "(IRQ0) through the 8259A ...\n");
2836 apic_printk(APIC_QUIET
, KERN_INFO
2837 "..... (found apic %d pin %d) ...\n", apic2
, pin2
);
2839 * legacy devices should be connected to IO APIC #0
2841 replace_pin_at_irq(0, apic1
, pin1
, apic2
, pin2
);
2842 setup_timer_IRQ0_pin(apic2
, pin2
, cfg
->vector
);
2843 unmask_IO_APIC_irq(0);
2844 enable_8259A_irq(0);
2845 if (timer_irq_works()) {
2846 apic_printk(APIC_QUIET
, KERN_INFO
"....... works.\n");
2847 timer_through_8259
= 1;
2848 if (nmi_watchdog
== NMI_IO_APIC
) {
2849 disable_8259A_irq(0);
2851 enable_8259A_irq(0);
2856 * Cleanup, just in case ...
2858 disable_8259A_irq(0);
2859 clear_IO_APIC_pin(apic2
, pin2
);
2860 apic_printk(APIC_QUIET
, KERN_INFO
"....... failed.\n");
2863 if (nmi_watchdog
== NMI_IO_APIC
) {
2864 apic_printk(APIC_QUIET
, KERN_WARNING
"timer doesn't work "
2865 "through the IO-APIC - disabling NMI Watchdog!\n");
2866 nmi_watchdog
= NMI_NONE
;
2868 #ifdef CONFIG_X86_32
2872 apic_printk(APIC_QUIET
, KERN_INFO
2873 "...trying to set up timer as Virtual Wire IRQ...\n");
2875 lapic_register_intr(0);
2876 apic_write(APIC_LVT0
, APIC_DM_FIXED
| cfg
->vector
); /* Fixed mode */
2877 enable_8259A_irq(0);
2879 if (timer_irq_works()) {
2880 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2883 disable_8259A_irq(0);
2884 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| cfg
->vector
);
2885 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed.\n");
2887 apic_printk(APIC_QUIET
, KERN_INFO
2888 "...trying to set up timer as ExtINT IRQ...\n");
2892 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
2894 unlock_ExtINT_logic();
2896 if (timer_irq_works()) {
2897 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2900 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed :(.\n");
2901 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2902 "report. Then try booting with the 'noapic' option.\n");
2904 local_irq_restore(flags
);
2908 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2909 * to devices. However there may be an I/O APIC pin available for
2910 * this interrupt regardless. The pin may be left unconnected, but
2911 * typically it will be reused as an ExtINT cascade interrupt for
2912 * the master 8259A. In the MPS case such a pin will normally be
2913 * reported as an ExtINT interrupt in the MP table. With ACPI
2914 * there is no provision for ExtINT interrupts, and in the absence
2915 * of an override it would be treated as an ordinary ISA I/O APIC
2916 * interrupt, that is edge-triggered and unmasked by default. We
2917 * used to do this, but it caused problems on some systems because
2918 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2919 * the same ExtINT cascade interrupt to drive the local APIC of the
2920 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2921 * the I/O APIC in all cases now. No actual device should request
2922 * it anyway. --macro
2924 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2926 void __init
setup_IO_APIC(void)
2929 #ifdef CONFIG_X86_32
2933 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2937 io_apic_irqs
= ~PIC_IRQS
;
2939 apic_printk(APIC_VERBOSE
, "ENABLING IO-APIC IRQs\n");
2941 * Set up IO-APIC IRQ routing.
2943 #ifdef CONFIG_X86_32
2945 setup_ioapic_ids_from_mpc();
2948 setup_IO_APIC_irqs();
2949 init_IO_APIC_traps();
2954 * Called after all the initialization is done. If we didnt find any
2955 * APIC bugs then we can allow the modify fast path
2958 static int __init
io_apic_bug_finalize(void)
2960 if (sis_apic_bug
== -1)
2965 late_initcall(io_apic_bug_finalize
);
2967 struct sysfs_ioapic_data
{
2968 struct sys_device dev
;
2969 struct IO_APIC_route_entry entry
[0];
2971 static struct sysfs_ioapic_data
* mp_ioapic_data
[MAX_IO_APICS
];
2973 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
2975 struct IO_APIC_route_entry
*entry
;
2976 struct sysfs_ioapic_data
*data
;
2979 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2980 entry
= data
->entry
;
2981 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++, entry
++ )
2982 *entry
= ioapic_read_entry(dev
->id
, i
);
2987 static int ioapic_resume(struct sys_device
*dev
)
2989 struct IO_APIC_route_entry
*entry
;
2990 struct sysfs_ioapic_data
*data
;
2991 unsigned long flags
;
2992 union IO_APIC_reg_00 reg_00
;
2995 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2996 entry
= data
->entry
;
2998 spin_lock_irqsave(&ioapic_lock
, flags
);
2999 reg_00
.raw
= io_apic_read(dev
->id
, 0);
3000 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].mp_apicid
) {
3001 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].mp_apicid
;
3002 io_apic_write(dev
->id
, 0, reg_00
.raw
);
3004 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3005 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
3006 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
3011 static struct sysdev_class ioapic_sysdev_class
= {
3013 .suspend
= ioapic_suspend
,
3014 .resume
= ioapic_resume
,
3017 static int __init
ioapic_init_sysfs(void)
3019 struct sys_device
* dev
;
3022 error
= sysdev_class_register(&ioapic_sysdev_class
);
3026 for (i
= 0; i
< nr_ioapics
; i
++ ) {
3027 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
3028 * sizeof(struct IO_APIC_route_entry
);
3029 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
3030 if (!mp_ioapic_data
[i
]) {
3031 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3034 dev
= &mp_ioapic_data
[i
]->dev
;
3036 dev
->cls
= &ioapic_sysdev_class
;
3037 error
= sysdev_register(dev
);
3039 kfree(mp_ioapic_data
[i
]);
3040 mp_ioapic_data
[i
] = NULL
;
3041 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
3049 device_initcall(ioapic_init_sysfs
);
3052 * Dynamic irq allocate and deallocation
3054 unsigned int create_irq_nr(unsigned int irq_want
)
3056 /* Allocate an unused irq */
3059 unsigned long flags
;
3060 struct irq_cfg
*cfg_new
;
3062 #ifndef CONFIG_HAVE_SPARSE_IRQ
3063 irq_want
= nr_irqs
- 1;
3067 spin_lock_irqsave(&vector_lock
, flags
);
3068 for (new = irq_want
; new > 0; new--) {
3069 if (platform_legacy_irq(new))
3071 cfg_new
= irq_cfg(new);
3072 if (cfg_new
&& cfg_new
->vector
!= 0)
3074 /* check if need to create one */
3076 cfg_new
= irq_cfg_alloc(new);
3077 if (__assign_irq_vector(new, TARGET_CPUS
) == 0)
3081 spin_unlock_irqrestore(&vector_lock
, flags
);
3084 dynamic_irq_init(irq
);
3089 int create_irq(void)
3093 irq
= create_irq_nr(nr_irqs
- 1);
3101 void destroy_irq(unsigned int irq
)
3103 unsigned long flags
;
3105 dynamic_irq_cleanup(irq
);
3107 #ifdef CONFIG_INTR_REMAP
3110 spin_lock_irqsave(&vector_lock
, flags
);
3111 __clear_irq_vector(irq
);
3112 spin_unlock_irqrestore(&vector_lock
, flags
);
3116 * MSI message composition
3118 #ifdef CONFIG_PCI_MSI
3119 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
3121 struct irq_cfg
*cfg
;
3127 err
= assign_irq_vector(irq
, tmp
);
3132 cpus_and(tmp
, cfg
->domain
, tmp
);
3133 dest
= cpu_mask_to_apicid(tmp
);
3135 #ifdef CONFIG_INTR_REMAP
3136 if (irq_remapped(irq
)) {
3141 ir_index
= map_irq_to_irte_handle(irq
, &sub_handle
);
3142 BUG_ON(ir_index
== -1);
3144 memset (&irte
, 0, sizeof(irte
));
3147 irte
.dst_mode
= INT_DEST_MODE
;
3148 irte
.trigger_mode
= 0; /* edge */
3149 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
3150 irte
.vector
= cfg
->vector
;
3151 irte
.dest_id
= IRTE_DEST(dest
);
3153 modify_irte(irq
, &irte
);
3155 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3156 msg
->data
= sub_handle
;
3157 msg
->address_lo
= MSI_ADDR_BASE_LO
| MSI_ADDR_IR_EXT_INT
|
3159 MSI_ADDR_IR_INDEX1(ir_index
) |
3160 MSI_ADDR_IR_INDEX2(ir_index
);
3164 msg
->address_hi
= MSI_ADDR_BASE_HI
;
3167 ((INT_DEST_MODE
== 0) ?
3168 MSI_ADDR_DEST_MODE_PHYSICAL
:
3169 MSI_ADDR_DEST_MODE_LOGICAL
) |
3170 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3171 MSI_ADDR_REDIRECTION_CPU
:
3172 MSI_ADDR_REDIRECTION_LOWPRI
) |
3173 MSI_ADDR_DEST_ID(dest
);
3176 MSI_DATA_TRIGGER_EDGE
|
3177 MSI_DATA_LEVEL_ASSERT
|
3178 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3179 MSI_DATA_DELIVERY_FIXED
:
3180 MSI_DATA_DELIVERY_LOWPRI
) |
3181 MSI_DATA_VECTOR(cfg
->vector
);
3187 static void set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
3189 struct irq_cfg
*cfg
;
3193 struct irq_desc
*desc
;
3195 cpus_and(tmp
, mask
, cpu_online_map
);
3196 if (cpus_empty(tmp
))
3199 if (assign_irq_vector(irq
, mask
))
3203 cpus_and(tmp
, cfg
->domain
, mask
);
3204 dest
= cpu_mask_to_apicid(tmp
);
3206 read_msi_msg(irq
, &msg
);
3208 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3209 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3210 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3211 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3213 write_msi_msg(irq
, &msg
);
3214 desc
= irq_to_desc(irq
);
3215 desc
->affinity
= mask
;
3218 #ifdef CONFIG_INTR_REMAP
3220 * Migrate the MSI irq to another cpumask. This migration is
3221 * done in the process context using interrupt-remapping hardware.
3223 static void ir_set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
3225 struct irq_cfg
*cfg
;
3227 cpumask_t tmp
, cleanup_mask
;
3229 struct irq_desc
*desc
;
3231 cpus_and(tmp
, mask
, cpu_online_map
);
3232 if (cpus_empty(tmp
))
3235 if (get_irte(irq
, &irte
))
3238 if (assign_irq_vector(irq
, mask
))
3242 cpus_and(tmp
, cfg
->domain
, mask
);
3243 dest
= cpu_mask_to_apicid(tmp
);
3245 irte
.vector
= cfg
->vector
;
3246 irte
.dest_id
= IRTE_DEST(dest
);
3249 * atomically update the IRTE with the new destination and vector.
3251 modify_irte(irq
, &irte
);
3254 * After this point, all the interrupts will start arriving
3255 * at the new destination. So, time to cleanup the previous
3256 * vector allocation.
3258 if (cfg
->move_in_progress
) {
3259 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
3260 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
3261 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
3262 cfg
->move_in_progress
= 0;
3265 desc
= irq_to_desc(irq
);
3266 desc
->affinity
= mask
;
3269 #endif /* CONFIG_SMP */
3272 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3273 * which implement the MSI or MSI-X Capability Structure.
3275 static struct irq_chip msi_chip
= {
3277 .unmask
= unmask_msi_irq
,
3278 .mask
= mask_msi_irq
,
3279 .ack
= ack_apic_edge
,
3281 .set_affinity
= set_msi_irq_affinity
,
3283 .retrigger
= ioapic_retrigger_irq
,
3286 #ifdef CONFIG_INTR_REMAP
3287 static struct irq_chip msi_ir_chip
= {
3288 .name
= "IR-PCI-MSI",
3289 .unmask
= unmask_msi_irq
,
3290 .mask
= mask_msi_irq
,
3291 .ack
= ack_x2apic_edge
,
3293 .set_affinity
= ir_set_msi_irq_affinity
,
3295 .retrigger
= ioapic_retrigger_irq
,
3299 * Map the PCI dev to the corresponding remapping hardware unit
3300 * and allocate 'nvec' consecutive interrupt-remapping table entries
3303 static int msi_alloc_irte(struct pci_dev
*dev
, int irq
, int nvec
)
3305 struct intel_iommu
*iommu
;
3308 iommu
= map_dev_to_ir(dev
);
3311 "Unable to map PCI %s to iommu\n", pci_name(dev
));
3315 index
= alloc_irte(iommu
, irq
, nvec
);
3318 "Unable to allocate %d IRTE for PCI %s\n", nvec
,
3326 static int setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
, int irq
)
3331 ret
= msi_compose_msg(dev
, irq
, &msg
);
3335 set_irq_msi(irq
, desc
);
3336 write_msi_msg(irq
, &msg
);
3338 #ifdef CONFIG_INTR_REMAP
3339 if (irq_remapped(irq
)) {
3340 struct irq_desc
*desc
= irq_to_desc(irq
);
3342 * irq migration in process context
3344 desc
->status
|= IRQ_MOVE_PCNTXT
;
3345 set_irq_chip_and_handler_name(irq
, &msi_ir_chip
, handle_edge_irq
, "edge");
3348 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
, "edge");
3353 static unsigned int build_irq_for_pci_dev(struct pci_dev
*dev
)
3357 irq
= dev
->bus
->number
;
3365 int arch_setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
)
3369 unsigned int irq_want
;
3371 irq_want
= build_irq_for_pci_dev(dev
) + 0x100;
3373 irq
= create_irq_nr(irq_want
);
3377 #ifdef CONFIG_INTR_REMAP
3378 if (!intr_remapping_enabled
)
3381 ret
= msi_alloc_irte(dev
, irq
, 1);
3386 ret
= setup_msi_irq(dev
, desc
, irq
);
3393 #ifdef CONFIG_INTR_REMAP
3400 int arch_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
3403 int ret
, sub_handle
;
3404 struct msi_desc
*desc
;
3405 unsigned int irq_want
;
3407 #ifdef CONFIG_INTR_REMAP
3408 struct intel_iommu
*iommu
= 0;
3412 irq_want
= build_irq_for_pci_dev(dev
) + 0x100;
3414 list_for_each_entry(desc
, &dev
->msi_list
, list
) {
3415 irq
= create_irq_nr(irq_want
--);
3418 #ifdef CONFIG_INTR_REMAP
3419 if (!intr_remapping_enabled
)
3424 * allocate the consecutive block of IRTE's
3427 index
= msi_alloc_irte(dev
, irq
, nvec
);
3433 iommu
= map_dev_to_ir(dev
);
3439 * setup the mapping between the irq and the IRTE
3440 * base index, the sub_handle pointing to the
3441 * appropriate interrupt remap table entry.
3443 set_irte_irq(irq
, iommu
, index
, sub_handle
);
3447 ret
= setup_msi_irq(dev
, desc
, irq
);
3459 void arch_teardown_msi_irq(unsigned int irq
)
3466 static void dmar_msi_set_affinity(unsigned int irq
, cpumask_t mask
)
3468 struct irq_cfg
*cfg
;
3472 struct irq_desc
*desc
;
3474 cpus_and(tmp
, mask
, cpu_online_map
);
3475 if (cpus_empty(tmp
))
3478 if (assign_irq_vector(irq
, mask
))
3482 cpus_and(tmp
, cfg
->domain
, mask
);
3483 dest
= cpu_mask_to_apicid(tmp
);
3485 dmar_msi_read(irq
, &msg
);
3487 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3488 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3489 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3490 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3492 dmar_msi_write(irq
, &msg
);
3493 desc
= irq_to_desc(irq
);
3494 desc
->affinity
= mask
;
3496 #endif /* CONFIG_SMP */
3498 struct irq_chip dmar_msi_type
= {
3500 .unmask
= dmar_msi_unmask
,
3501 .mask
= dmar_msi_mask
,
3502 .ack
= ack_apic_edge
,
3504 .set_affinity
= dmar_msi_set_affinity
,
3506 .retrigger
= ioapic_retrigger_irq
,
3509 int arch_setup_dmar_msi(unsigned int irq
)
3514 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3517 dmar_msi_write(irq
, &msg
);
3518 set_irq_chip_and_handler_name(irq
, &dmar_msi_type
, handle_edge_irq
,
3524 #endif /* CONFIG_PCI_MSI */
3526 * Hypertransport interrupt support
3528 #ifdef CONFIG_HT_IRQ
3532 static void target_ht_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
3534 struct ht_irq_msg msg
;
3535 fetch_ht_irq_msg(irq
, &msg
);
3537 msg
.address_lo
&= ~(HT_IRQ_LOW_VECTOR_MASK
| HT_IRQ_LOW_DEST_ID_MASK
);
3538 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
3540 msg
.address_lo
|= HT_IRQ_LOW_VECTOR(vector
) | HT_IRQ_LOW_DEST_ID(dest
);
3541 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
3543 write_ht_irq_msg(irq
, &msg
);
3546 static void set_ht_irq_affinity(unsigned int irq
, cpumask_t mask
)
3548 struct irq_cfg
*cfg
;
3551 struct irq_desc
*desc
;
3553 cpus_and(tmp
, mask
, cpu_online_map
);
3554 if (cpus_empty(tmp
))
3557 if (assign_irq_vector(irq
, mask
))
3561 cpus_and(tmp
, cfg
->domain
, mask
);
3562 dest
= cpu_mask_to_apicid(tmp
);
3564 target_ht_irq(irq
, dest
, cfg
->vector
);
3565 desc
= irq_to_desc(irq
);
3566 desc
->affinity
= mask
;
3570 static struct irq_chip ht_irq_chip
= {
3572 .mask
= mask_ht_irq
,
3573 .unmask
= unmask_ht_irq
,
3574 .ack
= ack_apic_edge
,
3576 .set_affinity
= set_ht_irq_affinity
,
3578 .retrigger
= ioapic_retrigger_irq
,
3581 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
3583 struct irq_cfg
*cfg
;
3588 err
= assign_irq_vector(irq
, tmp
);
3590 struct ht_irq_msg msg
;
3594 cpus_and(tmp
, cfg
->domain
, tmp
);
3595 dest
= cpu_mask_to_apicid(tmp
);
3597 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
3601 HT_IRQ_LOW_DEST_ID(dest
) |
3602 HT_IRQ_LOW_VECTOR(cfg
->vector
) |
3603 ((INT_DEST_MODE
== 0) ?
3604 HT_IRQ_LOW_DM_PHYSICAL
:
3605 HT_IRQ_LOW_DM_LOGICAL
) |
3606 HT_IRQ_LOW_RQEOI_EDGE
|
3607 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3608 HT_IRQ_LOW_MT_FIXED
:
3609 HT_IRQ_LOW_MT_ARBITRATED
) |
3610 HT_IRQ_LOW_IRQ_MASKED
;
3612 write_ht_irq_msg(irq
, &msg
);
3614 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
3615 handle_edge_irq
, "edge");
3619 #endif /* CONFIG_HT_IRQ */
3621 int __init
io_apic_get_redir_entries (int ioapic
)
3623 union IO_APIC_reg_01 reg_01
;
3624 unsigned long flags
;
3626 spin_lock_irqsave(&ioapic_lock
, flags
);
3627 reg_01
.raw
= io_apic_read(ioapic
, 1);
3628 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3630 return reg_01
.bits
.entries
;
3633 int __init
probe_nr_irqs(void)
3640 int nr_min
= NR_IRQS
;
3643 for (idx
= 0; idx
< nr_ioapics
; idx
++)
3644 nr
+= io_apic_get_redir_entries(idx
) + 1;
3646 /* double it for hotplug and msi and nmi */
3649 /* something wrong ? */
3656 /* --------------------------------------------------------------------------
3657 ACPI-based IOAPIC Configuration
3658 -------------------------------------------------------------------------- */
3662 #ifdef CONFIG_X86_32
3663 int __init
io_apic_get_unique_id(int ioapic
, int apic_id
)
3665 union IO_APIC_reg_00 reg_00
;
3666 static physid_mask_t apic_id_map
= PHYSID_MASK_NONE
;
3668 unsigned long flags
;
3672 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3673 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3674 * supports up to 16 on one shared APIC bus.
3676 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3677 * advantage of new APIC bus architecture.
3680 if (physids_empty(apic_id_map
))
3681 apic_id_map
= ioapic_phys_id_map(phys_cpu_present_map
);
3683 spin_lock_irqsave(&ioapic_lock
, flags
);
3684 reg_00
.raw
= io_apic_read(ioapic
, 0);
3685 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3687 if (apic_id
>= get_physical_broadcast()) {
3688 printk(KERN_WARNING
"IOAPIC[%d]: Invalid apic_id %d, trying "
3689 "%d\n", ioapic
, apic_id
, reg_00
.bits
.ID
);
3690 apic_id
= reg_00
.bits
.ID
;
3694 * Every APIC in a system must have a unique ID or we get lots of nice
3695 * 'stuck on smp_invalidate_needed IPI wait' messages.
3697 if (check_apicid_used(apic_id_map
, apic_id
)) {
3699 for (i
= 0; i
< get_physical_broadcast(); i
++) {
3700 if (!check_apicid_used(apic_id_map
, i
))
3704 if (i
== get_physical_broadcast())
3705 panic("Max apic_id exceeded!\n");
3707 printk(KERN_WARNING
"IOAPIC[%d]: apic_id %d already used, "
3708 "trying %d\n", ioapic
, apic_id
, i
);
3713 tmp
= apicid_to_cpu_present(apic_id
);
3714 physids_or(apic_id_map
, apic_id_map
, tmp
);
3716 if (reg_00
.bits
.ID
!= apic_id
) {
3717 reg_00
.bits
.ID
= apic_id
;
3719 spin_lock_irqsave(&ioapic_lock
, flags
);
3720 io_apic_write(ioapic
, 0, reg_00
.raw
);
3721 reg_00
.raw
= io_apic_read(ioapic
, 0);
3722 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3725 if (reg_00
.bits
.ID
!= apic_id
) {
3726 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic
);
3731 apic_printk(APIC_VERBOSE
, KERN_INFO
3732 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic
, apic_id
);
3737 int __init
io_apic_get_version(int ioapic
)
3739 union IO_APIC_reg_01 reg_01
;
3740 unsigned long flags
;
3742 spin_lock_irqsave(&ioapic_lock
, flags
);
3743 reg_01
.raw
= io_apic_read(ioapic
, 1);
3744 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3746 return reg_01
.bits
.version
;
3750 int io_apic_set_pci_routing (int ioapic
, int pin
, int irq
, int triggering
, int polarity
)
3752 if (!IO_APIC_IRQ(irq
)) {
3753 apic_printk(APIC_QUIET
,KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
3759 * IRQs < 16 are already in the irq_2_pin[] map
3762 add_pin_to_irq(irq
, ioapic
, pin
);
3764 setup_IO_APIC_irq(ioapic
, pin
, irq
, triggering
, polarity
);
3770 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
3774 if (skip_ioapic_setup
)
3777 for (i
= 0; i
< mp_irq_entries
; i
++)
3778 if (mp_irqs
[i
].mp_irqtype
== mp_INT
&&
3779 mp_irqs
[i
].mp_srcbusirq
== bus_irq
)
3781 if (i
>= mp_irq_entries
)
3784 *trigger
= irq_trigger(i
);
3785 *polarity
= irq_polarity(i
);
3789 #endif /* CONFIG_ACPI */
3792 * This function currently is only a helper for the i386 smp boot process where
3793 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3794 * so mask in all cases should simply be TARGET_CPUS
3797 void __init
setup_ioapic_dest(void)
3799 int pin
, ioapic
, irq
, irq_entry
;
3800 struct irq_cfg
*cfg
;
3802 if (skip_ioapic_setup
== 1)
3805 for (ioapic
= 0; ioapic
< nr_ioapics
; ioapic
++) {
3806 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
3807 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
3808 if (irq_entry
== -1)
3810 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
3812 /* setup_IO_APIC_irqs could fail to get vector for some device
3813 * when you have too many devices, because at that time only boot
3818 setup_IO_APIC_irq(ioapic
, pin
, irq
,
3819 irq_trigger(irq_entry
),
3820 irq_polarity(irq_entry
));
3821 #ifdef CONFIG_INTR_REMAP
3822 else if (intr_remapping_enabled
)
3823 set_ir_ioapic_affinity_irq(irq
, TARGET_CPUS
);
3826 set_ioapic_affinity_irq(irq
, TARGET_CPUS
);
3833 #define IOAPIC_RESOURCE_NAME_SIZE 11
3835 static struct resource
*ioapic_resources
;
3837 static struct resource
* __init
ioapic_setup_resources(void)
3840 struct resource
*res
;
3844 if (nr_ioapics
<= 0)
3847 n
= IOAPIC_RESOURCE_NAME_SIZE
+ sizeof(struct resource
);
3850 mem
= alloc_bootmem(n
);
3854 mem
+= sizeof(struct resource
) * nr_ioapics
;
3856 for (i
= 0; i
< nr_ioapics
; i
++) {
3858 res
[i
].flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
3859 sprintf(mem
, "IOAPIC %u", i
);
3860 mem
+= IOAPIC_RESOURCE_NAME_SIZE
;
3864 ioapic_resources
= res
;
3869 void __init
ioapic_init_mappings(void)
3871 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
3873 struct resource
*ioapic_res
;
3875 ioapic_res
= ioapic_setup_resources();
3876 for (i
= 0; i
< nr_ioapics
; i
++) {
3877 if (smp_found_config
) {
3878 ioapic_phys
= mp_ioapics
[i
].mp_apicaddr
;
3879 #ifdef CONFIG_X86_32
3882 "WARNING: bogus zero IO-APIC "
3883 "address found in MPTABLE, "
3884 "disabling IO/APIC support!\n");
3885 smp_found_config
= 0;
3886 skip_ioapic_setup
= 1;
3887 goto fake_ioapic_page
;
3891 #ifdef CONFIG_X86_32
3894 ioapic_phys
= (unsigned long)
3895 alloc_bootmem_pages(PAGE_SIZE
);
3896 ioapic_phys
= __pa(ioapic_phys
);
3898 set_fixmap_nocache(idx
, ioapic_phys
);
3899 apic_printk(APIC_VERBOSE
,
3900 "mapped IOAPIC to %08lx (%08lx)\n",
3901 __fix_to_virt(idx
), ioapic_phys
);
3904 if (ioapic_res
!= NULL
) {
3905 ioapic_res
->start
= ioapic_phys
;
3906 ioapic_res
->end
= ioapic_phys
+ (4 * 1024) - 1;
3912 static int __init
ioapic_insert_resources(void)
3915 struct resource
*r
= ioapic_resources
;
3919 "IO APIC resources could be not be allocated.\n");
3923 for (i
= 0; i
< nr_ioapics
; i
++) {
3924 insert_resource(&iomem_resource
, r
);
3931 /* Insert the IO APIC resources after PCI initialization has occured to handle
3932 * IO APICS that are mapped in on a BAR in PCI space. */
3933 late_initcall(ioapic_insert_resources
);