irq: make irqs in kernel stat use per_cpu_dyn_array
[deliverable/linux.git] / arch / x86 / kernel / io_apic_64.c
CommitLineData
1da177e4
LT
1/*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23#include <linux/mm.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
589e367f 28#include <linux/pci.h>
1da177e4
LT
29#include <linux/mc146818rtc.h>
30#include <linux/acpi.h>
31#include <linux/sysdev.h>
3b7d1921 32#include <linux/msi.h>
95d77884 33#include <linux/htirq.h>
3460a6d9 34#include <linux/dmar.h>
1d16b53e 35#include <linux/jiffies.h>
ab688059
AK
36#ifdef CONFIG_ACPI
37#include <acpi/acpi_bus.h>
38#endif
3e35a0e5 39#include <linux/bootmem.h>
89027d35 40#include <linux/dmar.h>
1da177e4 41
61014292 42#include <asm/idle.h>
1da177e4
LT
43#include <asm/io.h>
44#include <asm/smp.h>
45#include <asm/desc.h>
46#include <asm/proto.h>
8d916406 47#include <asm/acpi.h>
ca8642f6 48#include <asm/dma.h>
17c44697 49#include <asm/i8259.h>
3e4ff115 50#include <asm/nmi.h>
589e367f 51#include <asm/msidef.h>
8b955b0d 52#include <asm/hypertransport.h>
89027d35 53#include <asm/irq_remapping.h>
1da177e4 54
5af5573e 55#include <mach_ipi.h>
dd46e3ca 56#include <mach_apic.h>
5af5573e 57
32f71aff
MR
58#define __apicdebuginit(type) static type __init
59
13a79503
EB
60struct irq_cfg {
61 cpumask_t domain;
61014292
EB
62 cpumask_t old_domain;
63 unsigned move_cleanup_count;
13a79503 64 u8 vector;
61014292 65 u8 move_in_progress : 1;
13a79503
EB
66};
67
68/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
7223daf5 69static struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
bc5e81a1
EB
70 [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
71 [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
72 [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
73 [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
74 [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
75 [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
76 [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
77 [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
78 [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
79 [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
80 [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
81 [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
82 [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
83 [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
84 [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
85 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
13a79503
EB
86};
87
dfbffdd8 88static int assign_irq_vector(int irq, cpumask_t mask);
04b9267b 89
305b92a2
AM
90int first_system_vector = 0xfe;
91
92char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
93
1da177e4
LT
94int sis_apic_bug; /* not actually supported, dummy for compile */
95
14d98cad
AK
96static int no_timer_check;
97
fea5f1e1
LT
98static int disable_timer_pin_1 __initdata;
99
35542c5e 100int timer_through_8259 __initdata;
fea5f1e1 101
1008fddc
EB
102/* Where if anywhere is the i8259 connect in external int mode */
103static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
104
1da177e4 105static DEFINE_SPINLOCK(ioapic_lock);
d388e5fd 106static DEFINE_SPINLOCK(vector_lock);
1da177e4
LT
107
108/*
109 * # of IRQ routing registers
110 */
111int nr_ioapic_registers[MAX_IO_APICS];
112
4dc2f96c
SS
113/* I/O APIC RTE contents at the OS boot up */
114struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
115
9c7408f3 116/* I/O APIC entries */
ec2cd0a2 117struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
9c7408f3
AS
118int nr_ioapics;
119
350bae1d 120/* MP IRQ source entries */
2fddb6e2 121struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
350bae1d
AS
122
123/* # of MP IRQ source entries */
124int mp_irq_entries;
125
8732fc4b
AS
126DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
127
1da177e4
LT
128/*
129 * Rough estimation of how many shared IRQs there are, can
130 * be changed anytime.
131 */
e273d140 132#define MAX_PLUS_SHARED_IRQS NR_IRQS
1da177e4
LT
133#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
134
0799e432 135int pin_map_size = PIN_MAP_SIZE;
1da177e4
LT
136/*
137 * This is performance-critical, we want to do it O(1)
138 *
139 * the indexing order of this array favors 1:1 mappings
140 * between pins and IRQs.
141 */
142
143static struct irq_pin_list {
144 short apic, pin, next;
145} irq_2_pin[PIN_MAP_SIZE];
146
6c0ffb9d
LT
147struct io_apic {
148 unsigned int index;
149 unsigned int unused[3];
150 unsigned int data;
151};
152
153static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
154{
155 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
ec2cd0a2 156 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
6c0ffb9d
LT
157}
158
159static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
160{
161 struct io_apic __iomem *io_apic = io_apic_base(apic);
162 writel(reg, &io_apic->index);
163 return readl(&io_apic->data);
164}
165
166static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
167{
168 struct io_apic __iomem *io_apic = io_apic_base(apic);
169 writel(reg, &io_apic->index);
170 writel(value, &io_apic->data);
171}
172
173/*
174 * Re-write a value: to be used for read-modify-write
175 * cycles where the read already set up the index register.
176 */
177static inline void io_apic_modify(unsigned int apic, unsigned int value)
178{
179 struct io_apic __iomem *io_apic = io_apic_base(apic);
180 writel(value, &io_apic->data);
181}
182
9d25cb08 183static bool io_apic_level_ack_pending(unsigned int irq)
ef3e28c5
EB
184{
185 struct irq_pin_list *entry;
186 unsigned long flags;
ef3e28c5
EB
187
188 spin_lock_irqsave(&ioapic_lock, flags);
189 entry = irq_2_pin + irq;
190 for (;;) {
191 unsigned int reg;
192 int pin;
193
194 pin = entry->pin;
195 if (pin == -1)
196 break;
197 reg = io_apic_read(entry->apic, 0x10 + pin*2);
198 /* Is the remote IRR bit set? */
46b3b4ef 199 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
9d25cb08
AM
200 spin_unlock_irqrestore(&ioapic_lock, flags);
201 return true;
202 }
ef3e28c5
EB
203 if (!entry->next)
204 break;
205 entry = irq_2_pin + entry->next;
206 }
207 spin_unlock_irqrestore(&ioapic_lock, flags);
9d25cb08
AM
208
209 return false;
ef3e28c5
EB
210}
211
6c0ffb9d
LT
212/*
213 * Synchronize the IO-APIC and the CPU by doing
214 * a dummy read from the IO-APIC
215 */
216static inline void io_apic_sync(unsigned int apic)
217{
218 struct io_apic __iomem *io_apic = io_apic_base(apic);
219 readl(&io_apic->data);
220}
221
54d5d424
AR
222#define __DO_ACTION(R, ACTION, FINAL) \
223 \
224{ \
225 int pin; \
226 struct irq_pin_list *entry = irq_2_pin + irq; \
227 \
0799e432 228 BUG_ON(irq >= nr_irqs); \
54d5d424
AR
229 for (;;) { \
230 unsigned int reg; \
231 pin = entry->pin; \
232 if (pin == -1) \
233 break; \
234 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
235 reg ACTION; \
236 io_apic_modify(entry->apic, reg); \
f45bcd70 237 FINAL; \
54d5d424
AR
238 if (!entry->next) \
239 break; \
240 entry = irq_2_pin + entry->next; \
241 } \
54d5d424
AR
242}
243
eea0e11c
AK
244union entry_union {
245 struct { u32 w1, w2; };
246 struct IO_APIC_route_entry entry;
247};
248
249static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
250{
251 union entry_union eu;
252 unsigned long flags;
253 spin_lock_irqsave(&ioapic_lock, flags);
254 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
255 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
256 spin_unlock_irqrestore(&ioapic_lock, flags);
257 return eu.entry;
258}
259
48797ebd
LT
260/*
261 * When we write a new IO APIC routing entry, we need to write the high
262 * word first! If the mask bit in the low word is clear, we will enable
263 * the interrupt, and we need to make sure the entry is fully populated
264 * before that happens.
265 */
516d2836
AK
266static void
267__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
eea0e11c 268{
eea0e11c
AK
269 union entry_union eu;
270 eu.entry = e;
48797ebd
LT
271 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
272 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
516d2836
AK
273}
274
275static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
276{
277 unsigned long flags;
278 spin_lock_irqsave(&ioapic_lock, flags);
279 __ioapic_write_entry(apic, pin, e);
48797ebd
LT
280 spin_unlock_irqrestore(&ioapic_lock, flags);
281}
282
283/*
284 * When we mask an IO APIC routing entry, we need to write the low
285 * word first, in order to set the mask bit before we change the
286 * high bits!
287 */
288static void ioapic_mask_entry(int apic, int pin)
289{
290 unsigned long flags;
291 union entry_union eu = { .entry.mask = 1 };
292
eea0e11c
AK
293 spin_lock_irqsave(&ioapic_lock, flags);
294 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
295 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
296 spin_unlock_irqrestore(&ioapic_lock, flags);
297}
298
54d5d424 299#ifdef CONFIG_SMP
550f2299
EB
300static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
301{
302 int apic, pin;
303 struct irq_pin_list *entry = irq_2_pin + irq;
304
0799e432 305 BUG_ON(irq >= nr_irqs);
550f2299
EB
306 for (;;) {
307 unsigned int reg;
308 apic = entry->apic;
309 pin = entry->pin;
310 if (pin == -1)
311 break;
89027d35
SS
312 /*
313 * With interrupt-remapping, destination information comes
314 * from interrupt-remapping table entry.
315 */
316 if (!irq_remapped(irq))
317 io_apic_write(apic, 0x11 + pin*2, dest);
550f2299 318 reg = io_apic_read(apic, 0x10 + pin*2);
46b3b4ef 319 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
550f2299
EB
320 reg |= vector;
321 io_apic_modify(apic, reg);
322 if (!entry->next)
323 break;
324 entry = irq_2_pin + entry->next;
325 }
326}
327
54d5d424
AR
328static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
329{
dfbffdd8 330 struct irq_cfg *cfg = irq_cfg + irq;
54d5d424
AR
331 unsigned long flags;
332 unsigned int dest;
333 cpumask_t tmp;
334
335 cpus_and(tmp, mask, cpu_online_map);
336 if (cpus_empty(tmp))
5ff5115e 337 return;
54d5d424 338
dfbffdd8 339 if (assign_irq_vector(irq, mask))
550f2299
EB
340 return;
341
dfbffdd8 342 cpus_and(tmp, cfg->domain, mask);
550f2299 343 dest = cpu_mask_to_apicid(tmp);
54d5d424
AR
344
345 /*
346 * Only the high 8 bits are valid.
347 */
348 dest = SET_APIC_LOGICAL_ID(dest);
349
350 spin_lock_irqsave(&ioapic_lock, flags);
dfbffdd8 351 __target_IO_APIC_irq(irq, dest, cfg->vector);
9f0a5ba5 352 irq_desc[irq].affinity = mask;
54d5d424
AR
353 spin_unlock_irqrestore(&ioapic_lock, flags);
354}
355#endif
356
1da177e4
LT
357/*
358 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
359 * shared ISA-space IRQs, so we have to support them. We are super
360 * fast in the common case, and fast for shared ISA-space IRQs.
361 */
0799e432 362int first_free_entry = NR_IRQS;
1da177e4
LT
363static void add_pin_to_irq(unsigned int irq, int apic, int pin)
364{
1da177e4
LT
365 struct irq_pin_list *entry = irq_2_pin + irq;
366
0799e432 367 BUG_ON(irq >= nr_irqs);
1da177e4
LT
368 while (entry->next)
369 entry = irq_2_pin + entry->next;
370
371 if (entry->pin != -1) {
372 entry->next = first_free_entry;
373 entry = irq_2_pin + entry->next;
0799e432 374 if (++first_free_entry >= pin_map_size)
6004e1b7 375 panic("io_apic.c: ran out of irq_2_pin entries!");
1da177e4
LT
376 }
377 entry->apic = apic;
378 entry->pin = pin;
379}
380
0b9f4f49
MR
381/*
382 * Reroute an IRQ to a different pin.
383 */
384static void __init replace_pin_at_irq(unsigned int irq,
385 int oldapic, int oldpin,
386 int newapic, int newpin)
387{
388 struct irq_pin_list *entry = irq_2_pin + irq;
389
390 while (1) {
391 if (entry->apic == oldapic && entry->pin == oldpin) {
392 entry->apic = newapic;
393 entry->pin = newpin;
394 }
395 if (!entry->next)
396 break;
397 entry = irq_2_pin + entry->next;
398 }
399}
400
1da177e4
LT
401
402#define DO_ACTION(name,R,ACTION, FINAL) \
403 \
404 static void name##_IO_APIC_irq (unsigned int irq) \
405 __DO_ACTION(R, ACTION, FINAL)
406
46b3b4ef
CG
407/* mask = 1 */
408DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
409
410/* mask = 0 */
411DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, )
1da177e4
LT
412
413static void mask_IO_APIC_irq (unsigned int irq)
414{
415 unsigned long flags;
416
417 spin_lock_irqsave(&ioapic_lock, flags);
418 __mask_IO_APIC_irq(irq);
419 spin_unlock_irqrestore(&ioapic_lock, flags);
420}
421
422static void unmask_IO_APIC_irq (unsigned int irq)
423{
424 unsigned long flags;
425
426 spin_lock_irqsave(&ioapic_lock, flags);
427 __unmask_IO_APIC_irq(irq);
428 spin_unlock_irqrestore(&ioapic_lock, flags);
429}
430
431static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
432{
433 struct IO_APIC_route_entry entry;
1da177e4
LT
434
435 /* Check delivery_mode to be sure we're not clearing an SMI pin */
eea0e11c 436 entry = ioapic_read_entry(apic, pin);
1da177e4
LT
437 if (entry.delivery_mode == dest_SMI)
438 return;
439 /*
440 * Disable it in the IO-APIC irq-routing table:
441 */
48797ebd 442 ioapic_mask_entry(apic, pin);
1da177e4
LT
443}
444
445static void clear_IO_APIC (void)
446{
447 int apic, pin;
448
449 for (apic = 0; apic < nr_ioapics; apic++)
450 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
451 clear_IO_APIC_pin(apic, pin);
452}
453
4dc2f96c
SS
454/*
455 * Saves and masks all the unmasked IO-APIC RTE's
456 */
457int save_mask_IO_APIC_setup(void)
458{
459 union IO_APIC_reg_01 reg_01;
460 unsigned long flags;
461 int apic, pin;
462
463 /*
464 * The number of IO-APIC IRQ registers (== #pins):
465 */
466 for (apic = 0; apic < nr_ioapics; apic++) {
467 spin_lock_irqsave(&ioapic_lock, flags);
468 reg_01.raw = io_apic_read(apic, 1);
469 spin_unlock_irqrestore(&ioapic_lock, flags);
470 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
471 }
472
473 for (apic = 0; apic < nr_ioapics; apic++) {
474 early_ioapic_entries[apic] =
475 kzalloc(sizeof(struct IO_APIC_route_entry) *
476 nr_ioapic_registers[apic], GFP_KERNEL);
477 if (!early_ioapic_entries[apic])
478 return -ENOMEM;
479 }
480
481 for (apic = 0; apic < nr_ioapics; apic++)
482 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
483 struct IO_APIC_route_entry entry;
484
485 entry = early_ioapic_entries[apic][pin] =
486 ioapic_read_entry(apic, pin);
487 if (!entry.mask) {
488 entry.mask = 1;
489 ioapic_write_entry(apic, pin, entry);
490 }
491 }
492 return 0;
493}
494
495void restore_IO_APIC_setup(void)
496{
497 int apic, pin;
498
499 for (apic = 0; apic < nr_ioapics; apic++)
500 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
501 ioapic_write_entry(apic, pin,
502 early_ioapic_entries[apic][pin]);
503}
504
505void reinit_intr_remapped_IO_APIC(int intr_remapping)
506{
507 /*
508 * for now plain restore of previous settings.
509 * TBD: In the case of OS enabling interrupt-remapping,
510 * IO-APIC RTE's need to be setup to point to interrupt-remapping
511 * table entries. for now, do a plain restore, and wait for
512 * the setup_IO_APIC_irqs() to do proper initialization.
513 */
514 restore_IO_APIC_setup();
515}
516
1da177e4
LT
517int skip_ioapic_setup;
518int ioapic_force;
519
61ec7567 520static int __init parse_noapic(char *str)
1da177e4 521{
61ec7567 522 disable_ioapic_setup();
2c8c0e6b 523 return 0;
1da177e4 524}
61ec7567 525early_param("noapic", parse_noapic);
1da177e4 526
fea5f1e1
LT
527/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
528static int __init disable_timer_pin_setup(char *arg)
529{
530 disable_timer_pin_1 = 1;
531 return 1;
532}
533__setup("disable_timer_pin_1", disable_timer_pin_setup);
534
fea5f1e1 535
1da177e4
LT
536/*
537 * Find the IRQ entry number of a certain pin.
538 */
539static int find_irq_entry(int apic, int pin, int type)
540{
541 int i;
542
543 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
544 if (mp_irqs[i].mp_irqtype == type &&
545 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
546 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
547 mp_irqs[i].mp_dstirq == pin)
1da177e4
LT
548 return i;
549
550 return -1;
551}
552
553/*
554 * Find the pin to which IRQ[irq] (ISA) is connected
555 */
1008fddc 556static int __init find_isa_irq_pin(int irq, int type)
1da177e4
LT
557{
558 int i;
559
560 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 561 int lbus = mp_irqs[i].mp_srcbus;
1da177e4 562
55f05ffa 563 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
564 (mp_irqs[i].mp_irqtype == type) &&
565 (mp_irqs[i].mp_srcbusirq == irq))
1da177e4 566
2fddb6e2 567 return mp_irqs[i].mp_dstirq;
1da177e4
LT
568 }
569 return -1;
570}
571
1008fddc
EB
572static int __init find_isa_irq_apic(int irq, int type)
573{
574 int i;
575
576 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 577 int lbus = mp_irqs[i].mp_srcbus;
1008fddc 578
55f05ffa 579 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
580 (mp_irqs[i].mp_irqtype == type) &&
581 (mp_irqs[i].mp_srcbusirq == irq))
1008fddc
EB
582 break;
583 }
584 if (i < mp_irq_entries) {
585 int apic;
586 for(apic = 0; apic < nr_ioapics; apic++) {
2fddb6e2 587 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
1008fddc
EB
588 return apic;
589 }
590 }
591
592 return -1;
593}
594
1da177e4
LT
595/*
596 * Find a specific PCI IRQ entry.
597 * Not an __init, possibly needed by modules
598 */
599static int pin_2_irq(int idx, int apic, int pin);
600
601int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
602{
603 int apic, i, best_guess = -1;
604
605 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
606 bus, slot, pin);
ce6444d3 607 if (test_bit(bus, mp_bus_not_pci)) {
1da177e4
LT
608 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
609 return -1;
610 }
611 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 612 int lbus = mp_irqs[i].mp_srcbus;
1da177e4
LT
613
614 for (apic = 0; apic < nr_ioapics; apic++)
2fddb6e2
AS
615 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
616 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
1da177e4
LT
617 break;
618
55f05ffa 619 if (!test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2 620 !mp_irqs[i].mp_irqtype &&
1da177e4 621 (bus == lbus) &&
2fddb6e2
AS
622 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
623 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
1da177e4
LT
624
625 if (!(apic || IO_APIC_IRQ(irq)))
626 continue;
627
2fddb6e2 628 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
1da177e4
LT
629 return irq;
630 /*
631 * Use the first all-but-pin matching entry as a
632 * best-guess fuzzy result for broken mptables.
633 */
634 if (best_guess < 0)
635 best_guess = irq;
636 }
637 }
0799e432 638 BUG_ON(best_guess >= nr_irqs);
1da177e4
LT
639 return best_guess;
640}
641
1da177e4
LT
642/* ISA interrupts are always polarity zero edge triggered,
643 * when listed as conforming in the MP table. */
644
645#define default_ISA_trigger(idx) (0)
646#define default_ISA_polarity(idx) (0)
647
648/* PCI interrupts are always polarity one level triggered,
649 * when listed as conforming in the MP table. */
650
651#define default_PCI_trigger(idx) (1)
652#define default_PCI_polarity(idx) (1)
653
61fd47e0 654static int MPBIOS_polarity(int idx)
1da177e4 655{
2fddb6e2 656 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
657 int polarity;
658
659 /*
660 * Determine IRQ line polarity (high active or low active):
661 */
2fddb6e2 662 switch (mp_irqs[idx].mp_irqflag & 3)
1da177e4
LT
663 {
664 case 0: /* conforms, ie. bus-type dependent polarity */
55f05ffa
AK
665 if (test_bit(bus, mp_bus_not_pci))
666 polarity = default_ISA_polarity(idx);
667 else
668 polarity = default_PCI_polarity(idx);
1da177e4 669 break;
1da177e4
LT
670 case 1: /* high active */
671 {
672 polarity = 0;
673 break;
674 }
675 case 2: /* reserved */
676 {
677 printk(KERN_WARNING "broken BIOS!!\n");
678 polarity = 1;
679 break;
680 }
681 case 3: /* low active */
682 {
683 polarity = 1;
684 break;
685 }
686 default: /* invalid */
687 {
688 printk(KERN_WARNING "broken BIOS!!\n");
689 polarity = 1;
690 break;
691 }
692 }
693 return polarity;
694}
695
696static int MPBIOS_trigger(int idx)
697{
2fddb6e2 698 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
699 int trigger;
700
701 /*
702 * Determine IRQ trigger mode (edge or level sensitive):
703 */
2fddb6e2 704 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
1da177e4
LT
705 {
706 case 0: /* conforms, ie. bus-type dependent */
55f05ffa
AK
707 if (test_bit(bus, mp_bus_not_pci))
708 trigger = default_ISA_trigger(idx);
709 else
710 trigger = default_PCI_trigger(idx);
1da177e4 711 break;
1da177e4
LT
712 case 1: /* edge */
713 {
714 trigger = 0;
715 break;
716 }
717 case 2: /* reserved */
718 {
719 printk(KERN_WARNING "broken BIOS!!\n");
720 trigger = 1;
721 break;
722 }
723 case 3: /* level */
724 {
725 trigger = 1;
726 break;
727 }
728 default: /* invalid */
729 {
730 printk(KERN_WARNING "broken BIOS!!\n");
731 trigger = 0;
732 break;
733 }
734 }
735 return trigger;
736}
737
738static inline int irq_polarity(int idx)
739{
740 return MPBIOS_polarity(idx);
741}
742
743static inline int irq_trigger(int idx)
744{
745 return MPBIOS_trigger(idx);
746}
747
748static int pin_2_irq(int idx, int apic, int pin)
749{
750 int irq, i;
2fddb6e2 751 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
752
753 /*
754 * Debugging check, we are in big trouble if this message pops up!
755 */
2fddb6e2 756 if (mp_irqs[idx].mp_dstirq != pin)
1da177e4
LT
757 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
758
55f05ffa 759 if (test_bit(bus, mp_bus_not_pci)) {
2fddb6e2 760 irq = mp_irqs[idx].mp_srcbusirq;
55f05ffa
AK
761 } else {
762 /*
763 * PCI IRQs are mapped in order
764 */
765 i = irq = 0;
766 while (i < apic)
767 irq += nr_ioapic_registers[i++];
768 irq += pin;
1da177e4 769 }
0799e432 770 BUG_ON(irq >= nr_irqs);
1da177e4
LT
771 return irq;
772}
773
d388e5fd
EB
774void lock_vector_lock(void)
775{
776 /* Used to the online set of cpus does not change
777 * during assign_irq_vector.
778 */
779 spin_lock(&vector_lock);
780}
781
782void unlock_vector_lock(void)
783{
784 spin_unlock(&vector_lock);
785}
786
dfbffdd8 787static int __assign_irq_vector(int irq, cpumask_t mask)
1da177e4 788{
550f2299
EB
789 /*
790 * NOTE! The local APIC isn't very good at handling
791 * multiple interrupts at the same interrupt level.
792 * As the interrupt level is determined by taking the
793 * vector number and shifting that right by 4, we
794 * want to spread these out a bit so that they don't
795 * all fall in the same interrupt level.
796 *
797 * Also, we've got to be careful not to trash gate
798 * 0x80, because int 0x80 is hm, kind of importantish. ;)
799 */
d1752aa8 800 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
dfbffdd8 801 unsigned int old_vector;
550f2299 802 int cpu;
13a79503 803 struct irq_cfg *cfg;
1da177e4 804
0799e432 805 BUG_ON((unsigned)irq >= nr_irqs);
13a79503 806 cfg = &irq_cfg[irq];
0a1ad60d 807
70a0a535
EB
808 /* Only try and allocate irqs on cpus that are present */
809 cpus_and(mask, mask, cpu_online_map);
810
61014292
EB
811 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
812 return -EBUSY;
813
dfbffdd8
EB
814 old_vector = cfg->vector;
815 if (old_vector) {
816 cpumask_t tmp;
817 cpus_and(tmp, cfg->domain, mask);
818 if (!cpus_empty(tmp))
819 return 0;
0a1ad60d 820 }
550f2299 821
334ef7a7 822 for_each_cpu_mask_nr(cpu, mask) {
70a0a535 823 cpumask_t domain, new_mask;
61014292 824 int new_cpu;
550f2299 825 int vector, offset;
c7111c13
EB
826
827 domain = vector_allocation_domain(cpu);
70a0a535 828 cpus_and(new_mask, domain, cpu_online_map);
c7111c13 829
d1752aa8
EB
830 vector = current_vector;
831 offset = current_offset;
1da177e4 832next:
550f2299 833 vector += 8;
305b92a2 834 if (vector >= first_system_vector) {
550f2299
EB
835 /* If we run out of vectors on large boxen, must share them. */
836 offset = (offset + 1) % 8;
837 vector = FIRST_DEVICE_VECTOR + offset;
838 }
d1752aa8 839 if (unlikely(current_vector == vector))
550f2299
EB
840 continue;
841 if (vector == IA32_SYSCALL_VECTOR)
842 goto next;
334ef7a7 843 for_each_cpu_mask_nr(new_cpu, new_mask)
45edfd1d 844 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
c7111c13 845 goto next;
550f2299 846 /* Found one! */
d1752aa8
EB
847 current_vector = vector;
848 current_offset = offset;
61014292
EB
849 if (old_vector) {
850 cfg->move_in_progress = 1;
851 cfg->old_domain = cfg->domain;
852 }
334ef7a7 853 for_each_cpu_mask_nr(new_cpu, new_mask)
c7111c13 854 per_cpu(vector_irq, new_cpu)[vector] = irq;
13a79503
EB
855 cfg->vector = vector;
856 cfg->domain = domain;
dfbffdd8 857 return 0;
1da177e4 858 }
550f2299 859 return -ENOSPC;
04b9267b
EB
860}
861
dfbffdd8 862static int assign_irq_vector(int irq, cpumask_t mask)
04b9267b 863{
dfbffdd8 864 int err;
04b9267b 865 unsigned long flags;
0a1ad60d 866
04b9267b 867 spin_lock_irqsave(&vector_lock, flags);
dfbffdd8 868 err = __assign_irq_vector(irq, mask);
26a3c49c 869 spin_unlock_irqrestore(&vector_lock, flags);
dfbffdd8 870 return err;
1da177e4
LT
871}
872
5df0287e
YL
873static void __clear_irq_vector(int irq)
874{
13a79503 875 struct irq_cfg *cfg;
5df0287e
YL
876 cpumask_t mask;
877 int cpu, vector;
878
0799e432 879 BUG_ON((unsigned)irq >= nr_irqs);
13a79503
EB
880 cfg = &irq_cfg[irq];
881 BUG_ON(!cfg->vector);
5df0287e 882
13a79503
EB
883 vector = cfg->vector;
884 cpus_and(mask, cfg->domain, cpu_online_map);
334ef7a7 885 for_each_cpu_mask_nr(cpu, mask)
5df0287e
YL
886 per_cpu(vector_irq, cpu)[vector] = -1;
887
13a79503 888 cfg->vector = 0;
d366f8cb 889 cpus_clear(cfg->domain);
5df0287e
YL
890}
891
d388e5fd 892void __setup_vector_irq(int cpu)
70a0a535
EB
893{
894 /* Initialize vector_irq on a new cpu */
895 /* This function must be called with vector_lock held */
70a0a535
EB
896 int irq, vector;
897
70a0a535 898 /* Mark the inuse vectors */
0799e432 899 for (irq = 0; irq < nr_irqs; ++irq) {
13a79503 900 if (!cpu_isset(cpu, irq_cfg[irq].domain))
70a0a535 901 continue;
13a79503 902 vector = irq_cfg[irq].vector;
70a0a535
EB
903 per_cpu(vector_irq, cpu)[vector] = irq;
904 }
905 /* Mark the free vectors */
906 for (vector = 0; vector < NR_VECTORS; ++vector) {
907 irq = per_cpu(vector_irq, cpu)[vector];
908 if (irq < 0)
909 continue;
13a79503 910 if (!cpu_isset(cpu, irq_cfg[irq].domain))
70a0a535
EB
911 per_cpu(vector_irq, cpu)[vector] = -1;
912 }
913}
914
f29bd1ba 915static struct irq_chip ioapic_chip;
89027d35
SS
916#ifdef CONFIG_INTR_REMAP
917static struct irq_chip ir_ioapic_chip;
918#endif
1da177e4 919
a27bc06d 920static void ioapic_register_intr(int irq, unsigned long trigger)
1da177e4 921{
89027d35 922 if (trigger)
cc75b92d 923 irq_desc[irq].status |= IRQ_LEVEL;
89027d35 924 else
cc75b92d 925 irq_desc[irq].status &= ~IRQ_LEVEL;
89027d35
SS
926
927#ifdef CONFIG_INTR_REMAP
928 if (irq_remapped(irq)) {
929 irq_desc[irq].status |= IRQ_MOVE_PCNTXT;
930 if (trigger)
931 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
932 handle_fasteoi_irq,
933 "fasteoi");
934 else
935 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
936 handle_edge_irq, "edge");
937 return;
938 }
939#endif
940 if (trigger)
941 set_irq_chip_and_handler_name(irq, &ioapic_chip,
942 handle_fasteoi_irq,
943 "fasteoi");
944 else
a460e745
IM
945 set_irq_chip_and_handler_name(irq, &ioapic_chip,
946 handle_edge_irq, "edge");
89027d35
SS
947}
948
949static int setup_ioapic_entry(int apic, int irq,
950 struct IO_APIC_route_entry *entry,
951 unsigned int destination, int trigger,
952 int polarity, int vector)
953{
954 /*
955 * add it to the IO-APIC irq-routing table:
956 */
957 memset(entry,0,sizeof(*entry));
958
959#ifdef CONFIG_INTR_REMAP
960 if (intr_remapping_enabled) {
961 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
962 struct irte irte;
963 struct IR_IO_APIC_route_entry *ir_entry =
964 (struct IR_IO_APIC_route_entry *) entry;
965 int index;
966
967 if (!iommu)
968 panic("No mapping iommu for ioapic %d\n", apic);
969
970 index = alloc_irte(iommu, irq, 1);
971 if (index < 0)
972 panic("Failed to allocate IRTE for ioapic %d\n", apic);
973
974 memset(&irte, 0, sizeof(irte));
975
976 irte.present = 1;
977 irte.dst_mode = INT_DEST_MODE;
978 irte.trigger_mode = trigger;
979 irte.dlvry_mode = INT_DELIVERY_MODE;
980 irte.vector = vector;
981 irte.dest_id = IRTE_DEST(destination);
982
983 modify_irte(irq, &irte);
984
985 ir_entry->index2 = (index >> 15) & 0x1;
986 ir_entry->zero = 0;
987 ir_entry->format = 1;
988 ir_entry->index = (index & 0x7fff);
989 } else
990#endif
991 {
992 entry->delivery_mode = INT_DELIVERY_MODE;
993 entry->dest_mode = INT_DEST_MODE;
994 entry->dest = destination;
cc75b92d 995 }
89027d35
SS
996
997 entry->mask = 0; /* enable IRQ */
998 entry->trigger = trigger;
999 entry->polarity = polarity;
1000 entry->vector = vector;
1001
1002 /* Mask level triggered irqs.
1003 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1004 */
1005 if (trigger)
1006 entry->mask = 1;
1007 return 0;
1da177e4 1008}
a8c8a367
EB
1009
1010static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1011 int trigger, int polarity)
1da177e4 1012{
dfbffdd8 1013 struct irq_cfg *cfg = irq_cfg + irq;
1da177e4 1014 struct IO_APIC_route_entry entry;
a8c8a367 1015 cpumask_t mask;
1da177e4 1016
a8c8a367
EB
1017 if (!IO_APIC_IRQ(irq))
1018 return;
1019
dfbffdd8
EB
1020 mask = TARGET_CPUS;
1021 if (assign_irq_vector(irq, mask))
a8c8a367
EB
1022 return;
1023
dfbffdd8
EB
1024 cpus_and(mask, cfg->domain, mask);
1025
a8c8a367
EB
1026 apic_printk(APIC_VERBOSE,KERN_DEBUG
1027 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1028 "IRQ %d Mode:%i Active:%i)\n",
ec2cd0a2 1029 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
a8c8a367 1030 irq, trigger, polarity);
1da177e4 1031
1da177e4 1032
89027d35
SS
1033 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1034 cpu_mask_to_apicid(mask), trigger, polarity,
1035 cfg->vector)) {
1036 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1037 mp_ioapics[apic].mp_apicid, pin);
1038 __clear_irq_vector(irq);
1039 return;
1040 }
ad892f5e 1041
a8c8a367
EB
1042 ioapic_register_intr(irq, trigger);
1043 if (irq < 16)
1044 disable_8259A_irq(irq);
ad892f5e
YL
1045
1046 ioapic_write_entry(apic, pin, entry);
ad892f5e
YL
1047}
1048
1049static void __init setup_IO_APIC_irqs(void)
1050{
1051 int apic, pin, idx, irq, first_notcon = 1;
1052
1053 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1054
1055 for (apic = 0; apic < nr_ioapics; apic++) {
1056 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1da177e4
LT
1057
1058 idx = find_irq_entry(apic,pin,mp_INT);
1059 if (idx == -1) {
1060 if (first_notcon) {
ec2cd0a2 1061 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1da177e4
LT
1062 first_notcon = 0;
1063 } else
ec2cd0a2 1064 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1da177e4
LT
1065 continue;
1066 }
20d225b9
YL
1067 if (!first_notcon) {
1068 apic_printk(APIC_VERBOSE, " not connected.\n");
1069 first_notcon = 1;
1070 }
1da177e4 1071
1da177e4
LT
1072 irq = pin_2_irq(idx, apic, pin);
1073 add_pin_to_irq(irq, apic, pin);
1074
a8c8a367
EB
1075 setup_IO_APIC_irq(apic, pin, irq,
1076 irq_trigger(idx), irq_polarity(idx));
1da177e4
LT
1077 }
1078 }
1079
1080 if (!first_notcon)
20d225b9 1081 apic_printk(APIC_VERBOSE, " not connected.\n");
1da177e4
LT
1082}
1083
1084/*
f7633ce5 1085 * Set up the timer pin, possibly with the 8259A-master behind.
1da177e4 1086 */
f7633ce5
MR
1087static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1088 int vector)
1da177e4
LT
1089{
1090 struct IO_APIC_route_entry entry;
1da177e4 1091
89027d35
SS
1092 if (intr_remapping_enabled)
1093 return;
1094
a2249cba 1095 memset(&entry, 0, sizeof(entry));
1da177e4 1096
1da177e4
LT
1097 /*
1098 * We use logical delivery to get the timer IRQ
1099 * to the first CPU.
1100 */
1101 entry.dest_mode = INT_DEST_MODE;
03be7505 1102 entry.mask = 1; /* mask IRQ now */
ee4eff6f 1103 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1da177e4
LT
1104 entry.delivery_mode = INT_DELIVERY_MODE;
1105 entry.polarity = 0;
1106 entry.trigger = 0;
1107 entry.vector = vector;
1108
1109 /*
1110 * The timer IRQ doesn't have to know that behind the
f7633ce5 1111 * scene we may have a 8259A-master in AEOI mode ...
1da177e4 1112 */
a460e745 1113 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1da177e4
LT
1114
1115 /*
1116 * Add it to the IO-APIC irq-routing table:
1117 */
a2249cba 1118 ioapic_write_entry(apic, pin, entry);
1da177e4
LT
1119}
1120
32f71aff
MR
1121
1122__apicdebuginit(void) print_IO_APIC(void)
1da177e4
LT
1123{
1124 int apic, i;
1125 union IO_APIC_reg_00 reg_00;
1126 union IO_APIC_reg_01 reg_01;
1127 union IO_APIC_reg_02 reg_02;
1128 unsigned long flags;
1129
1130 if (apic_verbosity == APIC_QUIET)
1131 return;
1132
1133 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1134 for (i = 0; i < nr_ioapics; i++)
1135 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
ec2cd0a2 1136 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1da177e4
LT
1137
1138 /*
1139 * We are a bit conservative about what we expect. We have to
1140 * know about every hardware change ASAP.
1141 */
1142 printk(KERN_INFO "testing the IO APIC.......................\n");
1143
1144 for (apic = 0; apic < nr_ioapics; apic++) {
1145
1146 spin_lock_irqsave(&ioapic_lock, flags);
1147 reg_00.raw = io_apic_read(apic, 0);
1148 reg_01.raw = io_apic_read(apic, 1);
1149 if (reg_01.bits.version >= 0x10)
1150 reg_02.raw = io_apic_read(apic, 2);
1151 spin_unlock_irqrestore(&ioapic_lock, flags);
1152
1153 printk("\n");
ec2cd0a2 1154 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1da177e4
LT
1155 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1156 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1da177e4
LT
1157
1158 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1159 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1da177e4
LT
1160
1161 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1162 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1da177e4
LT
1163
1164 if (reg_01.bits.version >= 0x10) {
1165 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1166 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1da177e4
LT
1167 }
1168
1169 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1170
ee4eff6f
BR
1171 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1172 " Stat Dmod Deli Vect: \n");
1da177e4
LT
1173
1174 for (i = 0; i <= reg_01.bits.entries; i++) {
1175 struct IO_APIC_route_entry entry;
1176
eea0e11c 1177 entry = ioapic_read_entry(apic, i);
1da177e4 1178
ee4eff6f 1179 printk(KERN_DEBUG " %02x %03X ",
1da177e4 1180 i,
ee4eff6f 1181 entry.dest
1da177e4
LT
1182 );
1183
1184 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1185 entry.mask,
1186 entry.trigger,
1187 entry.irr,
1188 entry.polarity,
1189 entry.delivery_status,
1190 entry.dest_mode,
1191 entry.delivery_mode,
1192 entry.vector
1193 );
1194 }
1195 }
1da177e4 1196 printk(KERN_DEBUG "IRQ to pin mappings:\n");
0799e432 1197 for (i = 0; i < nr_irqs; i++) {
1da177e4
LT
1198 struct irq_pin_list *entry = irq_2_pin + i;
1199 if (entry->pin < 0)
1200 continue;
04b9267b 1201 printk(KERN_DEBUG "IRQ%d ", i);
1da177e4
LT
1202 for (;;) {
1203 printk("-> %d:%d", entry->apic, entry->pin);
1204 if (!entry->next)
1205 break;
1206 entry = irq_2_pin + entry->next;
1207 }
1208 printk("\n");
1209 }
1210
1211 printk(KERN_INFO ".................................... done.\n");
1212
1213 return;
1214}
1215
32f71aff 1216__apicdebuginit(void) print_APIC_bitfield(int base)
1da177e4
LT
1217{
1218 unsigned int v;
1219 int i, j;
1220
1221 if (apic_verbosity == APIC_QUIET)
1222 return;
1223
1224 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1225 for (i = 0; i < 8; i++) {
1226 v = apic_read(base + i*0x10);
1227 for (j = 0; j < 32; j++) {
1228 if (v & (1<<j))
1229 printk("1");
1230 else
1231 printk("0");
1232 }
1233 printk("\n");
1234 }
1235}
1236
32f71aff 1237__apicdebuginit(void) print_local_APIC(void *dummy)
1da177e4
LT
1238{
1239 unsigned int v, ver, maxlvt;
1b374e4d 1240 unsigned long icr;
1da177e4
LT
1241
1242 if (apic_verbosity == APIC_QUIET)
1243 return;
1244
1245 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1246 smp_processor_id(), hard_smp_processor_id());
66823114 1247 v = apic_read(APIC_ID);
4c9961d5 1248 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1da177e4
LT
1249 v = apic_read(APIC_LVR);
1250 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1251 ver = GET_APIC_VERSION(v);
37e650c7 1252 maxlvt = lapic_get_maxlvt();
1da177e4
LT
1253
1254 v = apic_read(APIC_TASKPRI);
1255 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1256
5a40b7c2
AK
1257 v = apic_read(APIC_ARBPRI);
1258 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1259 v & APIC_ARBPRI_MASK);
1260 v = apic_read(APIC_PROCPRI);
1261 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1da177e4
LT
1262
1263 v = apic_read(APIC_EOI);
1264 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1265 v = apic_read(APIC_RRR);
1266 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1267 v = apic_read(APIC_LDR);
1268 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1269 v = apic_read(APIC_DFR);
1270 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1271 v = apic_read(APIC_SPIV);
1272 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1273
1274 printk(KERN_DEBUG "... APIC ISR field:\n");
1275 print_APIC_bitfield(APIC_ISR);
1276 printk(KERN_DEBUG "... APIC TMR field:\n");
1277 print_APIC_bitfield(APIC_TMR);
1278 printk(KERN_DEBUG "... APIC IRR field:\n");
1279 print_APIC_bitfield(APIC_IRR);
1280
5a40b7c2
AK
1281 v = apic_read(APIC_ESR);
1282 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1da177e4 1283
1b374e4d 1284 icr = apic_icr_read();
d562353a
IM
1285 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1286 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1da177e4
LT
1287
1288 v = apic_read(APIC_LVTT);
1289 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1290
1291 if (maxlvt > 3) { /* PC is LVT#4. */
1292 v = apic_read(APIC_LVTPC);
1293 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1294 }
1295 v = apic_read(APIC_LVT0);
1296 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1297 v = apic_read(APIC_LVT1);
1298 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1299
1300 if (maxlvt > 2) { /* ERR is LVT#3. */
1301 v = apic_read(APIC_LVTERR);
1302 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1303 }
1304
1305 v = apic_read(APIC_TMICT);
1306 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1307 v = apic_read(APIC_TMCCT);
1308 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1309 v = apic_read(APIC_TDCR);
1310 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1311 printk("\n");
1312}
1313
32f71aff 1314__apicdebuginit(void) print_all_local_APICs(void)
1da177e4 1315{
15c8b6c1 1316 on_each_cpu(print_local_APIC, NULL, 1);
1da177e4
LT
1317}
1318
32f71aff 1319__apicdebuginit(void) print_PIC(void)
1da177e4 1320{
1da177e4
LT
1321 unsigned int v;
1322 unsigned long flags;
1323
1324 if (apic_verbosity == APIC_QUIET)
1325 return;
1326
1327 printk(KERN_DEBUG "\nprinting PIC contents\n");
1328
1329 spin_lock_irqsave(&i8259A_lock, flags);
1330
1331 v = inb(0xa1) << 8 | inb(0x21);
1332 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1333
1334 v = inb(0xa0) << 8 | inb(0x20);
1335 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1336
1337 outb(0x0b,0xa0);
1338 outb(0x0b,0x20);
1339 v = inb(0xa0) << 8 | inb(0x20);
1340 outb(0x0a,0xa0);
1341 outb(0x0a,0x20);
1342
1343 spin_unlock_irqrestore(&i8259A_lock, flags);
1344
1345 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1346
1347 v = inb(0x4d1) << 8 | inb(0x4d0);
1348 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1349}
1350
32f71aff
MR
1351__apicdebuginit(int) print_all_ICs(void)
1352{
1353 print_PIC();
1354 print_all_local_APICs();
1355 print_IO_APIC();
1356
1357 return 0;
1358}
1359
1360fs_initcall(print_all_ICs);
1361
1da177e4 1362
1c69524c 1363void __init enable_IO_APIC(void)
1da177e4
LT
1364{
1365 union IO_APIC_reg_01 reg_01;
1008fddc
EB
1366 int i8259_apic, i8259_pin;
1367 int i, apic;
1da177e4
LT
1368 unsigned long flags;
1369
0799e432 1370 for (i = 0; i < pin_map_size; i++) {
1da177e4
LT
1371 irq_2_pin[i].pin = -1;
1372 irq_2_pin[i].next = 0;
1373 }
1da177e4
LT
1374
1375 /*
1376 * The number of IO-APIC IRQ registers (== #pins):
1377 */
1008fddc 1378 for (apic = 0; apic < nr_ioapics; apic++) {
1da177e4 1379 spin_lock_irqsave(&ioapic_lock, flags);
1008fddc 1380 reg_01.raw = io_apic_read(apic, 1);
1da177e4 1381 spin_unlock_irqrestore(&ioapic_lock, flags);
1008fddc
EB
1382 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1383 }
1384 for(apic = 0; apic < nr_ioapics; apic++) {
1385 int pin;
1386 /* See if any of the pins is in ExtINT mode */
1387 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1388 struct IO_APIC_route_entry entry;
eea0e11c 1389 entry = ioapic_read_entry(apic, pin);
1008fddc
EB
1390
1391 /* If the interrupt line is enabled and in ExtInt mode
1392 * I have found the pin where the i8259 is connected.
1393 */
1394 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1395 ioapic_i8259.apic = apic;
1396 ioapic_i8259.pin = pin;
1397 goto found_i8259;
1398 }
1399 }
1400 }
1401 found_i8259:
1402 /* Look to see what if the MP table has reported the ExtINT */
1403 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1404 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1405 /* Trust the MP table if nothing is setup in the hardware */
1406 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1407 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1408 ioapic_i8259.pin = i8259_pin;
1409 ioapic_i8259.apic = i8259_apic;
1410 }
1411 /* Complain if the MP table and the hardware disagree */
1412 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1413 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1414 {
1415 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1da177e4
LT
1416 }
1417
1418 /*
1419 * Do not trust the IO-APIC being empty at bootup
1420 */
1421 clear_IO_APIC();
1422}
1423
1424/*
1425 * Not an __init, needed by the reboot code
1426 */
1427void disable_IO_APIC(void)
1428{
1429 /*
1430 * Clear the IO-APIC before rebooting:
1431 */
1432 clear_IO_APIC();
1433
208fb931 1434 /*
0b968d23 1435 * If the i8259 is routed through an IOAPIC
208fb931 1436 * Put that IOAPIC in virtual wire mode
0b968d23 1437 * so legacy interrupts can be delivered.
208fb931 1438 */
1008fddc 1439 if (ioapic_i8259.pin != -1) {
208fb931 1440 struct IO_APIC_route_entry entry;
208fb931
EB
1441
1442 memset(&entry, 0, sizeof(entry));
1443 entry.mask = 0; /* Enabled */
1444 entry.trigger = 0; /* Edge */
1445 entry.irr = 0;
1446 entry.polarity = 0; /* High */
1447 entry.delivery_status = 0;
1448 entry.dest_mode = 0; /* Physical */
1008fddc 1449 entry.delivery_mode = dest_ExtINT; /* ExtInt */
208fb931 1450 entry.vector = 0;
4c9961d5 1451 entry.dest = read_apic_id();
208fb931 1452
208fb931
EB
1453 /*
1454 * Add it to the IO-APIC irq-routing table:
1455 */
eea0e11c 1456 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
208fb931
EB
1457 }
1458
1008fddc 1459 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1da177e4
LT
1460}
1461
1da177e4
LT
1462/*
1463 * There is a nasty bug in some older SMP boards, their mptable lies
1464 * about the timer IRQ. We do the following to work around the situation:
1465 *
1466 * - timer IRQ defaults to IO-APIC IRQ
1467 * - if this function detects that timer IRQs are defunct, then we fall
1468 * back to ISA timer IRQs
1469 */
1470static int __init timer_irq_works(void)
1471{
1472 unsigned long t1 = jiffies;
4aae0702 1473 unsigned long flags;
1da177e4 1474
4aae0702 1475 local_save_flags(flags);
1da177e4
LT
1476 local_irq_enable();
1477 /* Let ten ticks pass... */
1478 mdelay((10 * 1000) / HZ);
4aae0702 1479 local_irq_restore(flags);
1da177e4
LT
1480
1481 /*
1482 * Expect a few ticks at least, to be sure some possible
1483 * glue logic does not lock up after one or two first
1484 * ticks in a non-ExtINT mode. Also the local APIC
1485 * might have cached one ExtINT interrupt. Finally, at
1486 * least one tick may be lost due to delays.
1487 */
1488
1489 /* jiffies wrap? */
1d16b53e 1490 if (time_after(jiffies, t1 + 4))
1da177e4
LT
1491 return 1;
1492 return 0;
1493}
1494
1495/*
1496 * In the SMP+IOAPIC case it might happen that there are an unspecified
1497 * number of pending IRQ events unhandled. These cases are very rare,
1498 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1499 * better to do it this way as thus we do not have to be aware of
1500 * 'pending' interrupts in the IRQ path, except at this point.
1501 */
1502/*
1503 * Edge triggered needs to resend any interrupt
1504 * that was delayed but this is now handled in the device
1505 * independent code.
1506 */
1507
1508/*
1509 * Starting up a edge-triggered IO-APIC interrupt is
1510 * nasty - we need to make sure that we get the edge.
1511 * If it is already asserted for some reason, we need
1512 * return 1 to indicate that is was pending.
1513 *
1514 * This is not complete - we should be able to fake
1515 * an edge even if it isn't on the 8259A...
1516 */
1517
f29bd1ba 1518static unsigned int startup_ioapic_irq(unsigned int irq)
1da177e4
LT
1519{
1520 int was_pending = 0;
1521 unsigned long flags;
1522
1523 spin_lock_irqsave(&ioapic_lock, flags);
1524 if (irq < 16) {
1525 disable_8259A_irq(irq);
1526 if (i8259A_irq_pending(irq))
1527 was_pending = 1;
1528 }
1529 __unmask_IO_APIC_irq(irq);
1530 spin_unlock_irqrestore(&ioapic_lock, flags);
1531
1532 return was_pending;
1533}
1534
04b9267b 1535static int ioapic_retrigger_irq(unsigned int irq)
c0ad90a3 1536{
13a79503 1537 struct irq_cfg *cfg = &irq_cfg[irq];
6bf2dafa 1538 unsigned long flags;
550f2299 1539
6bf2dafa 1540 spin_lock_irqsave(&vector_lock, flags);
cb6d2be6 1541 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
6bf2dafa 1542 spin_unlock_irqrestore(&vector_lock, flags);
c0ad90a3
IM
1543
1544 return 1;
1545}
1546
1da177e4
LT
1547/*
1548 * Level and edge triggered IO-APIC interrupts need different handling,
1549 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1550 * handled with the level-triggered descriptor, but that one has slightly
1551 * more overhead. Level-triggered interrupts cannot be handled with the
1552 * edge-triggered handler, without risking IRQ storms and other ugly
1553 * races.
1554 */
1555
61014292 1556#ifdef CONFIG_SMP
89027d35
SS
1557
1558#ifdef CONFIG_INTR_REMAP
1559static void ir_irq_migration(struct work_struct *work);
1560
1561static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
1562
1563/*
1564 * Migrate the IO-APIC irq in the presence of intr-remapping.
1565 *
1566 * For edge triggered, irq migration is a simple atomic update(of vector
1567 * and cpu destination) of IRTE and flush the hardware cache.
1568 *
1569 * For level triggered, we need to modify the io-apic RTE aswell with the update
1570 * vector information, along with modifying IRTE with vector and destination.
1571 * So irq migration for level triggered is little bit more complex compared to
1572 * edge triggered migration. But the good news is, we use the same algorithm
1573 * for level triggered migration as we have today, only difference being,
1574 * we now initiate the irq migration from process context instead of the
1575 * interrupt context.
1576 *
1577 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1578 * suppression) to the IO-APIC, level triggered irq migration will also be
1579 * as simple as edge triggered migration and we can do the irq migration
1580 * with a simple atomic update to IO-APIC RTE.
1581 */
1582static void migrate_ioapic_irq(int irq, cpumask_t mask)
1583{
1584 struct irq_cfg *cfg = irq_cfg + irq;
1585 struct irq_desc *desc = irq_desc + irq;
1586 cpumask_t tmp, cleanup_mask;
1587 struct irte irte;
1588 int modify_ioapic_rte = desc->status & IRQ_LEVEL;
1589 unsigned int dest;
1590 unsigned long flags;
1591
1592 cpus_and(tmp, mask, cpu_online_map);
1593 if (cpus_empty(tmp))
1594 return;
1595
1596 if (get_irte(irq, &irte))
1597 return;
1598
1599 if (assign_irq_vector(irq, mask))
1600 return;
1601
1602 cpus_and(tmp, cfg->domain, mask);
1603 dest = cpu_mask_to_apicid(tmp);
1604
1605 if (modify_ioapic_rte) {
1606 spin_lock_irqsave(&ioapic_lock, flags);
1607 __target_IO_APIC_irq(irq, dest, cfg->vector);
1608 spin_unlock_irqrestore(&ioapic_lock, flags);
1609 }
1610
1611 irte.vector = cfg->vector;
1612 irte.dest_id = IRTE_DEST(dest);
1613
1614 /*
1615 * Modified the IRTE and flushes the Interrupt entry cache.
1616 */
1617 modify_irte(irq, &irte);
1618
1619 if (cfg->move_in_progress) {
1620 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1621 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1622 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1623 cfg->move_in_progress = 0;
1624 }
1625
1626 irq_desc[irq].affinity = mask;
1627}
1628
1629static int migrate_irq_remapped_level(int irq)
1630{
1631 int ret = -1;
1632
1633 mask_IO_APIC_irq(irq);
1634
1635 if (io_apic_level_ack_pending(irq)) {
1636 /*
1637 * Interrupt in progress. Migrating irq now will change the
1638 * vector information in the IO-APIC RTE and that will confuse
1639 * the EOI broadcast performed by cpu.
1640 * So, delay the irq migration to the next instance.
1641 */
1642 schedule_delayed_work(&ir_migration_work, 1);
1643 goto unmask;
1644 }
1645
1646 /* everthing is clear. we have right of way */
1647 migrate_ioapic_irq(irq, irq_desc[irq].pending_mask);
1648
1649 ret = 0;
1650 irq_desc[irq].status &= ~IRQ_MOVE_PENDING;
1651 cpus_clear(irq_desc[irq].pending_mask);
1652
1653unmask:
1654 unmask_IO_APIC_irq(irq);
1655 return ret;
1656}
1657
1658static void ir_irq_migration(struct work_struct *work)
1659{
1660 int irq;
1661
0799e432 1662 for (irq = 0; irq < nr_irqs; irq++) {
89027d35
SS
1663 struct irq_desc *desc = irq_desc + irq;
1664 if (desc->status & IRQ_MOVE_PENDING) {
1665 unsigned long flags;
1666
1667 spin_lock_irqsave(&desc->lock, flags);
1668 if (!desc->chip->set_affinity ||
1669 !(desc->status & IRQ_MOVE_PENDING)) {
1670 desc->status &= ~IRQ_MOVE_PENDING;
1671 spin_unlock_irqrestore(&desc->lock, flags);
1672 continue;
1673 }
1674
1675 desc->chip->set_affinity(irq,
1676 irq_desc[irq].pending_mask);
1677 spin_unlock_irqrestore(&desc->lock, flags);
1678 }
1679 }
1680}
1681
1682/*
1683 * Migrates the IRQ destination in the process context.
1684 */
1685static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1686{
1687 if (irq_desc[irq].status & IRQ_LEVEL) {
1688 irq_desc[irq].status |= IRQ_MOVE_PENDING;
1689 irq_desc[irq].pending_mask = mask;
1690 migrate_irq_remapped_level(irq);
1691 return;
1692 }
1693
1694 migrate_ioapic_irq(irq, mask);
1695}
1696#endif
1697
61014292
EB
1698asmlinkage void smp_irq_move_cleanup_interrupt(void)
1699{
1700 unsigned vector, me;
1701 ack_APIC_irq();
1702 exit_idle();
1703 irq_enter();
1704
1705 me = smp_processor_id();
1706 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1707 unsigned int irq;
1708 struct irq_desc *desc;
1709 struct irq_cfg *cfg;
1710 irq = __get_cpu_var(vector_irq)[vector];
0799e432 1711 if (irq >= nr_irqs)
61014292
EB
1712 continue;
1713
1714 desc = irq_desc + irq;
1715 cfg = irq_cfg + irq;
1716 spin_lock(&desc->lock);
1717 if (!cfg->move_cleanup_count)
1718 goto unlock;
1719
1720 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1721 goto unlock;
1722
1723 __get_cpu_var(vector_irq)[vector] = -1;
1724 cfg->move_cleanup_count--;
1725unlock:
1726 spin_unlock(&desc->lock);
1727 }
1728
1729 irq_exit();
1730}
1731
1732static void irq_complete_move(unsigned int irq)
1733{
1734 struct irq_cfg *cfg = irq_cfg + irq;
1735 unsigned vector, me;
1736
1737 if (likely(!cfg->move_in_progress))
1738 return;
1739
65ea5b03 1740 vector = ~get_irq_regs()->orig_ax;
61014292 1741 me = smp_processor_id();
f0e13ae7 1742 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
61014292
EB
1743 cpumask_t cleanup_mask;
1744
1745 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1746 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1747 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1748 cfg->move_in_progress = 0;
1749 }
1750}
1751#else
1752static inline void irq_complete_move(unsigned int irq) {}
1753#endif
89027d35
SS
1754#ifdef CONFIG_INTR_REMAP
1755static void ack_x2apic_level(unsigned int irq)
1756{
1757 ack_x2APIC_irq();
1758}
1759
1760static void ack_x2apic_edge(unsigned int irq)
1761{
1762 ack_x2APIC_irq();
1763}
1764#endif
61014292 1765
0be6652f
EB
1766static void ack_apic_edge(unsigned int irq)
1767{
61014292 1768 irq_complete_move(irq);
0be6652f
EB
1769 move_native_irq(irq);
1770 ack_APIC_irq();
1771}
1772
1773static void ack_apic_level(unsigned int irq)
1774{
1775 int do_unmask_irq = 0;
1776
61014292 1777 irq_complete_move(irq);
52e3d90d 1778#ifdef CONFIG_GENERIC_PENDING_IRQ
0be6652f
EB
1779 /* If we are moving the irq we need to mask it */
1780 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
1781 do_unmask_irq = 1;
1782 mask_IO_APIC_irq(irq);
1783 }
1784#endif
1785
1786 /*
1787 * We must acknowledge the irq before we move it or the acknowledge will
beb7dd86 1788 * not propagate properly.
0be6652f
EB
1789 */
1790 ack_APIC_irq();
1791
1792 /* Now we can move and renable the irq */
ef3e28c5
EB
1793 if (unlikely(do_unmask_irq)) {
1794 /* Only migrate the irq if the ack has been received.
1795 *
1796 * On rare occasions the broadcast level triggered ack gets
1797 * delayed going to ioapics, and if we reprogram the
1798 * vector while Remote IRR is still set the irq will never
1799 * fire again.
1800 *
1801 * To prevent this scenario we read the Remote IRR bit
1802 * of the ioapic. This has two effects.
1803 * - On any sane system the read of the ioapic will
1804 * flush writes (and acks) going to the ioapic from
1805 * this cpu.
1806 * - We get to see if the ACK has actually been delivered.
1807 *
1808 * Based on failed experiments of reprogramming the
1809 * ioapic entry from outside of irq context starting
1810 * with masking the ioapic entry and then polling until
1811 * Remote IRR was clear before reprogramming the
1812 * ioapic I don't trust the Remote IRR bit to be
1813 * completey accurate.
1814 *
1815 * However there appears to be no other way to plug
1816 * this race, so if the Remote IRR bit is not
1817 * accurate and is causing problems then it is a hardware bug
1818 * and you can go talk to the chipset vendor about it.
1819 */
1820 if (!io_apic_level_ack_pending(irq))
1821 move_masked_irq(irq);
0be6652f 1822 unmask_IO_APIC_irq(irq);
ef3e28c5 1823 }
0be6652f
EB
1824}
1825
f29bd1ba
IM
1826static struct irq_chip ioapic_chip __read_mostly = {
1827 .name = "IO-APIC",
04b9267b
EB
1828 .startup = startup_ioapic_irq,
1829 .mask = mask_IO_APIC_irq,
1830 .unmask = unmask_IO_APIC_irq,
0be6652f
EB
1831 .ack = ack_apic_edge,
1832 .eoi = ack_apic_level,
54d5d424 1833#ifdef CONFIG_SMP
04b9267b 1834 .set_affinity = set_ioapic_affinity_irq,
54d5d424 1835#endif
04b9267b 1836 .retrigger = ioapic_retrigger_irq,
1da177e4
LT
1837};
1838
89027d35
SS
1839#ifdef CONFIG_INTR_REMAP
1840static struct irq_chip ir_ioapic_chip __read_mostly = {
1841 .name = "IR-IO-APIC",
1842 .startup = startup_ioapic_irq,
1843 .mask = mask_IO_APIC_irq,
1844 .unmask = unmask_IO_APIC_irq,
1845 .ack = ack_x2apic_edge,
1846 .eoi = ack_x2apic_level,
1847#ifdef CONFIG_SMP
1848 .set_affinity = set_ir_ioapic_affinity_irq,
1849#endif
1850 .retrigger = ioapic_retrigger_irq,
1851};
1852#endif
1853
1da177e4
LT
1854static inline void init_IO_APIC_traps(void)
1855{
1856 int irq;
1857
1858 /*
1859 * NOTE! The local APIC isn't very good at handling
1860 * multiple interrupts at the same interrupt level.
1861 * As the interrupt level is determined by taking the
1862 * vector number and shifting that right by 4, we
1863 * want to spread these out a bit so that they don't
1864 * all fall in the same interrupt level.
1865 *
1866 * Also, we've got to be careful not to trash gate
1867 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1868 */
0799e432 1869 for (irq = 0; irq < nr_irqs ; irq++) {
addfc66b 1870 if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) {
1da177e4
LT
1871 /*
1872 * Hmm.. We don't have an entry for this,
1873 * so default to an old-fashioned 8259
1874 * interrupt if we can..
1875 */
1876 if (irq < 16)
1877 make_8259A_irq(irq);
1878 else
1879 /* Strange. Oh, well.. */
f29bd1ba 1880 irq_desc[irq].chip = &no_irq_chip;
1da177e4
LT
1881 }
1882 }
1883}
1884
c88ac1df 1885static void unmask_lapic_irq(unsigned int irq)
1da177e4
LT
1886{
1887 unsigned long v;
1888
1889 v = apic_read(APIC_LVT0);
11a8e778 1890 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1da177e4
LT
1891}
1892
c88ac1df 1893static void mask_lapic_irq(unsigned int irq)
1da177e4
LT
1894{
1895 unsigned long v;
1896
1897 v = apic_read(APIC_LVT0);
11a8e778 1898 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1da177e4
LT
1899}
1900
1901static void ack_lapic_irq (unsigned int irq)
1902{
1903 ack_APIC_irq();
1904}
1905
c88ac1df
MR
1906static struct irq_chip lapic_chip __read_mostly = {
1907 .name = "local-APIC",
1908 .mask = mask_lapic_irq,
1909 .unmask = unmask_lapic_irq,
1910 .ack = ack_lapic_irq,
1da177e4
LT
1911};
1912
c88ac1df
MR
1913static void lapic_register_intr(int irq)
1914{
1915 irq_desc[irq].status &= ~IRQ_LEVEL;
1916 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
1917 "edge");
1918}
1919
e9427101 1920static void __init setup_nmi(void)
1da177e4
LT
1921{
1922 /*
1923 * Dirty trick to enable the NMI watchdog ...
1924 * We put the 8259A master into AEOI mode and
1925 * unmask on all local APICs LVT0 as NMI.
1926 *
1927 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1928 * is from Maciej W. Rozycki - so we do not have to EOI from
1929 * the NMI handler or the timer interrupt.
1930 */
1931 printk(KERN_INFO "activating NMI Watchdog ...");
1932
e9427101 1933 enable_NMI_through_LVT0();
1da177e4
LT
1934
1935 printk(" done.\n");
1936}
1937
1938/*
1939 * This looks a bit hackish but it's about the only one way of sending
1940 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1941 * not support the ExtINT mode, unfortunately. We need to send these
1942 * cycles as some i82489DX-based boards have glue logic that keeps the
1943 * 8259A interrupt line asserted until INTA. --macro
1944 */
5afca33a 1945static inline void __init unlock_ExtINT_logic(void)
1da177e4 1946{
1008fddc 1947 int apic, pin, i;
1da177e4
LT
1948 struct IO_APIC_route_entry entry0, entry1;
1949 unsigned char save_control, save_freq_select;
1da177e4 1950
1008fddc
EB
1951 pin = find_isa_irq_pin(8, mp_INT);
1952 apic = find_isa_irq_apic(8, mp_INT);
1da177e4
LT
1953 if (pin == -1)
1954 return;
1955
a2249cba
AM
1956 entry0 = ioapic_read_entry(apic, pin);
1957
1008fddc 1958 clear_IO_APIC_pin(apic, pin);
1da177e4
LT
1959
1960 memset(&entry1, 0, sizeof(entry1));
1961
1962 entry1.dest_mode = 0; /* physical delivery */
1963 entry1.mask = 0; /* unmask IRQ now */
ee4eff6f 1964 entry1.dest = hard_smp_processor_id();
1da177e4
LT
1965 entry1.delivery_mode = dest_ExtINT;
1966 entry1.polarity = entry0.polarity;
1967 entry1.trigger = 0;
1968 entry1.vector = 0;
1969
a2249cba 1970 ioapic_write_entry(apic, pin, entry1);
1da177e4
LT
1971
1972 save_control = CMOS_READ(RTC_CONTROL);
1973 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1974 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1975 RTC_FREQ_SELECT);
1976 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1977
1978 i = 100;
1979 while (i-- > 0) {
1980 mdelay(10);
1981 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1982 i -= 10;
1983 }
1984
1985 CMOS_WRITE(save_control, RTC_CONTROL);
1986 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1008fddc 1987 clear_IO_APIC_pin(apic, pin);
1da177e4 1988
a2249cba 1989 ioapic_write_entry(apic, pin, entry0);
1da177e4
LT
1990}
1991
1992/*
1993 * This code may look a bit paranoid, but it's supposed to cooperate with
1994 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1995 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1996 * fanatically on his truly buggy board.
fea5f1e1
LT
1997 *
1998 * FIXME: really need to revamp this for modern platforms only.
1da177e4 1999 */
e9427101 2000static inline void __init check_timer(void)
1da177e4 2001{
dfbffdd8 2002 struct irq_cfg *cfg = irq_cfg + 0;
1008fddc 2003 int apic1, pin1, apic2, pin2;
4aae0702 2004 unsigned long flags;
691874fa 2005 int no_pin1 = 0;
4aae0702
IM
2006
2007 local_irq_save(flags);
1da177e4
LT
2008
2009 /*
2010 * get/set the timer IRQ vector:
2011 */
2012 disable_8259A_irq(0);
dfbffdd8 2013 assign_irq_vector(0, TARGET_CPUS);
1da177e4
LT
2014
2015 /*
d11d5794
MR
2016 * As IRQ0 is to be enabled in the 8259A, the virtual
2017 * wire has to be disabled in the local APIC.
1da177e4 2018 */
11a8e778 2019 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1da177e4 2020 init_8259A(1);
1da177e4 2021
1008fddc
EB
2022 pin1 = find_isa_irq_pin(0, mp_INT);
2023 apic1 = find_isa_irq_apic(0, mp_INT);
2024 pin2 = ioapic_i8259.pin;
2025 apic2 = ioapic_i8259.apic;
1da177e4 2026
49a66a0b
MR
2027 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2028 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2029 cfg->vector, apic1, pin1, apic2, pin2);
b0268726 2030
691874fa
MR
2031 /*
2032 * Some BIOS writers are clueless and report the ExtINTA
2033 * I/O APIC input from the cascaded 8259A as the timer
2034 * interrupt input. So just in case, if only one pin
2035 * was found above, try it both directly and through the
2036 * 8259A.
2037 */
2038 if (pin1 == -1) {
89027d35
SS
2039 if (intr_remapping_enabled)
2040 panic("BIOS bug: timer not connected to IO-APIC");
691874fa
MR
2041 pin1 = pin2;
2042 apic1 = apic2;
2043 no_pin1 = 1;
2044 } else if (pin2 == -1) {
2045 pin2 = pin1;
2046 apic2 = apic1;
2047 }
2048
fea5f1e1
LT
2049 if (pin1 != -1) {
2050 /*
2051 * Ok, does IRQ0 through the IOAPIC work?
2052 */
691874fa
MR
2053 if (no_pin1) {
2054 add_pin_to_irq(0, apic1, pin1);
b1b57ee1 2055 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
691874fa 2056 }
fea5f1e1
LT
2057 unmask_IO_APIC_irq(0);
2058 if (!no_timer_check && timer_irq_works()) {
fea5f1e1 2059 if (nmi_watchdog == NMI_IO_APIC) {
fea5f1e1
LT
2060 setup_nmi();
2061 enable_8259A_irq(0);
2062 }
2063 if (disable_timer_pin_1 > 0)
2064 clear_IO_APIC_pin(0, pin1);
4aae0702 2065 goto out;
fea5f1e1 2066 }
89027d35
SS
2067 if (intr_remapping_enabled)
2068 panic("timer doesn't work through Interrupt-remapped IO-APIC");
fea5f1e1 2069 clear_IO_APIC_pin(apic1, pin1);
691874fa 2070 if (!no_pin1)
49a66a0b 2071 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
691874fa 2072 "8254 timer not connected to IO-APIC\n");
1da177e4 2073
49a66a0b
MR
2074 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2075 "(IRQ0) through the 8259A ...\n");
2076 apic_printk(APIC_QUIET, KERN_INFO
2077 "..... (found apic %d pin %d) ...\n", apic2, pin2);
fea5f1e1
LT
2078 /*
2079 * legacy devices should be connected to IO APIC #0
2080 */
0b9f4f49 2081 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
f7633ce5 2082 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
24742ece 2083 unmask_IO_APIC_irq(0);
ecd29476 2084 enable_8259A_irq(0);
fea5f1e1 2085 if (timer_irq_works()) {
49a66a0b 2086 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
35542c5e 2087 timer_through_8259 = 1;
fea5f1e1 2088 if (nmi_watchdog == NMI_IO_APIC) {
60134ebe 2089 disable_8259A_irq(0);
fea5f1e1 2090 setup_nmi();
60134ebe 2091 enable_8259A_irq(0);
fea5f1e1 2092 }
4aae0702 2093 goto out;
fea5f1e1
LT
2094 }
2095 /*
2096 * Cleanup, just in case ...
2097 */
ecd29476 2098 disable_8259A_irq(0);
fea5f1e1 2099 clear_IO_APIC_pin(apic2, pin2);
49a66a0b 2100 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
1da177e4 2101 }
1da177e4 2102
1f992153 2103 if (nmi_watchdog == NMI_IO_APIC) {
49a66a0b
MR
2104 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2105 "through the IO-APIC - disabling NMI Watchdog!\n");
067fa0ff 2106 nmi_watchdog = NMI_NONE;
1da177e4
LT
2107 }
2108
49a66a0b
MR
2109 apic_printk(APIC_QUIET, KERN_INFO
2110 "...trying to set up timer as Virtual Wire IRQ...\n");
1da177e4 2111
c88ac1df 2112 lapic_register_intr(0);
dfbffdd8 2113 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
1da177e4
LT
2114 enable_8259A_irq(0);
2115
2116 if (timer_irq_works()) {
49a66a0b 2117 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2118 goto out;
1da177e4 2119 }
e67465f1 2120 disable_8259A_irq(0);
dfbffdd8 2121 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
49a66a0b 2122 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
1da177e4 2123
49a66a0b
MR
2124 apic_printk(APIC_QUIET, KERN_INFO
2125 "...trying to set up timer as ExtINT IRQ...\n");
1da177e4
LT
2126
2127 init_8259A(0);
2128 make_8259A_irq(0);
11a8e778 2129 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1da177e4
LT
2130
2131 unlock_ExtINT_logic();
2132
2133 if (timer_irq_works()) {
49a66a0b 2134 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2135 goto out;
1da177e4 2136 }
49a66a0b
MR
2137 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2138 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2139 "report. Then try booting with the 'noapic' option.\n");
4aae0702
IM
2140out:
2141 local_irq_restore(flags);
1da177e4
LT
2142}
2143
14d98cad
AK
2144static int __init notimercheck(char *s)
2145{
2146 no_timer_check = 1;
2147 return 1;
2148}
2149__setup("no_timer_check", notimercheck);
2150
1da177e4 2151/*
af174783
MR
2152 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2153 * to devices. However there may be an I/O APIC pin available for
2154 * this interrupt regardless. The pin may be left unconnected, but
2155 * typically it will be reused as an ExtINT cascade interrupt for
2156 * the master 8259A. In the MPS case such a pin will normally be
2157 * reported as an ExtINT interrupt in the MP table. With ACPI
2158 * there is no provision for ExtINT interrupts, and in the absence
2159 * of an override it would be treated as an ordinary ISA I/O APIC
2160 * interrupt, that is edge-triggered and unmasked by default. We
2161 * used to do this, but it caused problems on some systems because
2162 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2163 * the same ExtINT cascade interrupt to drive the local APIC of the
2164 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2165 * the I/O APIC in all cases now. No actual device should request
2166 * it anyway. --macro
1da177e4
LT
2167 */
2168#define PIC_IRQS (1<<2)
2169
2170void __init setup_IO_APIC(void)
2171{
1c69524c
YL
2172
2173 /*
2174 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2175 */
1da177e4 2176
af174783 2177 io_apic_irqs = ~PIC_IRQS;
1da177e4
LT
2178
2179 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2180
1da177e4
LT
2181 sync_Arb_IDs();
2182 setup_IO_APIC_irqs();
2183 init_IO_APIC_traps();
2184 check_timer();
1da177e4
LT
2185}
2186
2187struct sysfs_ioapic_data {
2188 struct sys_device dev;
2189 struct IO_APIC_route_entry entry[0];
2190};
2191static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2192
0b9c33a7 2193static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
2194{
2195 struct IO_APIC_route_entry *entry;
2196 struct sysfs_ioapic_data *data;
1da177e4
LT
2197 int i;
2198
2199 data = container_of(dev, struct sysfs_ioapic_data, dev);
2200 entry = data->entry;
eea0e11c
AK
2201 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2202 *entry = ioapic_read_entry(dev->id, i);
1da177e4
LT
2203
2204 return 0;
2205}
2206
2207static int ioapic_resume(struct sys_device *dev)
2208{
2209 struct IO_APIC_route_entry *entry;
2210 struct sysfs_ioapic_data *data;
2211 unsigned long flags;
2212 union IO_APIC_reg_00 reg_00;
2213 int i;
2214
2215 data = container_of(dev, struct sysfs_ioapic_data, dev);
2216 entry = data->entry;
2217
2218 spin_lock_irqsave(&ioapic_lock, flags);
2219 reg_00.raw = io_apic_read(dev->id, 0);
ec2cd0a2
AS
2220 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2221 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
1da177e4
LT
2222 io_apic_write(dev->id, 0, reg_00.raw);
2223 }
1da177e4 2224 spin_unlock_irqrestore(&ioapic_lock, flags);
eea0e11c
AK
2225 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2226 ioapic_write_entry(dev->id, i, entry[i]);
1da177e4
LT
2227
2228 return 0;
2229}
2230
2231static struct sysdev_class ioapic_sysdev_class = {
af5ca3f4 2232 .name = "ioapic",
1da177e4
LT
2233 .suspend = ioapic_suspend,
2234 .resume = ioapic_resume,
2235};
2236
2237static int __init ioapic_init_sysfs(void)
2238{
2239 struct sys_device * dev;
cddf7ff7 2240 int i, size, error;
1da177e4
LT
2241
2242 error = sysdev_class_register(&ioapic_sysdev_class);
2243 if (error)
2244 return error;
2245
2246 for (i = 0; i < nr_ioapics; i++ ) {
2247 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2248 * sizeof(struct IO_APIC_route_entry);
cddf7ff7 2249 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
1da177e4
LT
2250 if (!mp_ioapic_data[i]) {
2251 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2252 continue;
2253 }
1da177e4
LT
2254 dev = &mp_ioapic_data[i]->dev;
2255 dev->id = i;
2256 dev->cls = &ioapic_sysdev_class;
2257 error = sysdev_register(dev);
2258 if (error) {
2259 kfree(mp_ioapic_data[i]);
2260 mp_ioapic_data[i] = NULL;
2261 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2262 continue;
2263 }
2264 }
2265
2266 return 0;
2267}
2268
2269device_initcall(ioapic_init_sysfs);
2270
c4fa0bbf 2271/*
04b9267b 2272 * Dynamic irq allocate and deallocation
c4fa0bbf
EB
2273 */
2274int create_irq(void)
2275{
04b9267b
EB
2276 /* Allocate an unused irq */
2277 int irq;
2278 int new;
c4fa0bbf 2279 unsigned long flags;
c4fa0bbf 2280
04b9267b
EB
2281 irq = -ENOSPC;
2282 spin_lock_irqsave(&vector_lock, flags);
0799e432 2283 for (new = (nr_irqs - 1); new >= 0; new--) {
04b9267b
EB
2284 if (platform_legacy_irq(new))
2285 continue;
13a79503 2286 if (irq_cfg[new].vector != 0)
04b9267b 2287 continue;
dfbffdd8 2288 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
04b9267b
EB
2289 irq = new;
2290 break;
2291 }
2292 spin_unlock_irqrestore(&vector_lock, flags);
c4fa0bbf 2293
04b9267b 2294 if (irq >= 0) {
c4fa0bbf
EB
2295 dynamic_irq_init(irq);
2296 }
2297 return irq;
2298}
2299
2300void destroy_irq(unsigned int irq)
2301{
2302 unsigned long flags;
c4fa0bbf
EB
2303
2304 dynamic_irq_cleanup(irq);
2305
75c46fa6
SS
2306#ifdef CONFIG_INTR_REMAP
2307 free_irte(irq);
2308#endif
c4fa0bbf 2309 spin_lock_irqsave(&vector_lock, flags);
5df0287e 2310 __clear_irq_vector(irq);
c4fa0bbf
EB
2311 spin_unlock_irqrestore(&vector_lock, flags);
2312}
c4fa0bbf 2313
589e367f 2314/*
676b1855 2315 * MSI message composition
589e367f
EB
2316 */
2317#ifdef CONFIG_PCI_MSI
3b7d1921 2318static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
589e367f 2319{
dfbffdd8
EB
2320 struct irq_cfg *cfg = irq_cfg + irq;
2321 int err;
589e367f 2322 unsigned dest;
c7111c13 2323 cpumask_t tmp;
589e367f 2324
dfbffdd8
EB
2325 tmp = TARGET_CPUS;
2326 err = assign_irq_vector(irq, tmp);
75c46fa6
SS
2327 if (err)
2328 return err;
2329
2330 cpus_and(tmp, cfg->domain, tmp);
2331 dest = cpu_mask_to_apicid(tmp);
2332
2333#ifdef CONFIG_INTR_REMAP
2334 if (irq_remapped(irq)) {
2335 struct irte irte;
2336 int ir_index;
2337 u16 sub_handle;
2338
2339 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
2340 BUG_ON(ir_index == -1);
2341
2342 memset (&irte, 0, sizeof(irte));
2343
2344 irte.present = 1;
2345 irte.dst_mode = INT_DEST_MODE;
2346 irte.trigger_mode = 0; /* edge */
2347 irte.dlvry_mode = INT_DELIVERY_MODE;
2348 irte.vector = cfg->vector;
2349 irte.dest_id = IRTE_DEST(dest);
2350
2351 modify_irte(irq, &irte);
589e367f 2352
75c46fa6
SS
2353 msg->address_hi = MSI_ADDR_BASE_HI;
2354 msg->data = sub_handle;
2355 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
2356 MSI_ADDR_IR_SHV |
2357 MSI_ADDR_IR_INDEX1(ir_index) |
2358 MSI_ADDR_IR_INDEX2(ir_index);
2359 } else
2360#endif
2361 {
589e367f
EB
2362 msg->address_hi = MSI_ADDR_BASE_HI;
2363 msg->address_lo =
2364 MSI_ADDR_BASE_LO |
2365 ((INT_DEST_MODE == 0) ?
2366 MSI_ADDR_DEST_MODE_PHYSICAL:
2367 MSI_ADDR_DEST_MODE_LOGICAL) |
2368 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2369 MSI_ADDR_REDIRECTION_CPU:
2370 MSI_ADDR_REDIRECTION_LOWPRI) |
2371 MSI_ADDR_DEST_ID(dest);
2372
2373 msg->data =
2374 MSI_DATA_TRIGGER_EDGE |
2375 MSI_DATA_LEVEL_ASSERT |
2376 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2377 MSI_DATA_DELIVERY_FIXED:
2378 MSI_DATA_DELIVERY_LOWPRI) |
dfbffdd8 2379 MSI_DATA_VECTOR(cfg->vector);
589e367f 2380 }
dfbffdd8 2381 return err;
589e367f
EB
2382}
2383
3b7d1921
EB
2384#ifdef CONFIG_SMP
2385static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
589e367f 2386{
dfbffdd8 2387 struct irq_cfg *cfg = irq_cfg + irq;
3b7d1921
EB
2388 struct msi_msg msg;
2389 unsigned int dest;
2390 cpumask_t tmp;
3b7d1921
EB
2391
2392 cpus_and(tmp, mask, cpu_online_map);
2393 if (cpus_empty(tmp))
5ff5115e 2394 return;
589e367f 2395
dfbffdd8 2396 if (assign_irq_vector(irq, mask))
3b7d1921 2397 return;
550f2299 2398
dfbffdd8 2399 cpus_and(tmp, cfg->domain, mask);
3b7d1921 2400 dest = cpu_mask_to_apicid(tmp);
589e367f 2401
3b7d1921
EB
2402 read_msi_msg(irq, &msg);
2403
2404 msg.data &= ~MSI_DATA_VECTOR_MASK;
dfbffdd8 2405 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3b7d1921
EB
2406 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2407 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2408
2409 write_msi_msg(irq, &msg);
9f0a5ba5 2410 irq_desc[irq].affinity = mask;
589e367f 2411}
75c46fa6
SS
2412
2413#ifdef CONFIG_INTR_REMAP
2414/*
2415 * Migrate the MSI irq to another cpumask. This migration is
2416 * done in the process context using interrupt-remapping hardware.
2417 */
2418static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2419{
2420 struct irq_cfg *cfg = irq_cfg + irq;
2421 unsigned int dest;
2422 cpumask_t tmp, cleanup_mask;
2423 struct irte irte;
2424
2425 cpus_and(tmp, mask, cpu_online_map);
2426 if (cpus_empty(tmp))
2427 return;
2428
2429 if (get_irte(irq, &irte))
2430 return;
2431
2432 if (assign_irq_vector(irq, mask))
2433 return;
2434
2435 cpus_and(tmp, cfg->domain, mask);
2436 dest = cpu_mask_to_apicid(tmp);
2437
2438 irte.vector = cfg->vector;
2439 irte.dest_id = IRTE_DEST(dest);
2440
2441 /*
2442 * atomically update the IRTE with the new destination and vector.
2443 */
2444 modify_irte(irq, &irte);
2445
2446 /*
2447 * After this point, all the interrupts will start arriving
2448 * at the new destination. So, time to cleanup the previous
2449 * vector allocation.
2450 */
2451 if (cfg->move_in_progress) {
2452 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2453 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2454 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2455 cfg->move_in_progress = 0;
2456 }
2457
2458 irq_desc[irq].affinity = mask;
2459}
2460#endif
3b7d1921 2461#endif /* CONFIG_SMP */
589e367f 2462
3b7d1921
EB
2463/*
2464 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2465 * which implement the MSI or MSI-X Capability Structure.
2466 */
2467static struct irq_chip msi_chip = {
2468 .name = "PCI-MSI",
2469 .unmask = unmask_msi_irq,
2470 .mask = mask_msi_irq,
2471 .ack = ack_apic_edge,
2472#ifdef CONFIG_SMP
2473 .set_affinity = set_msi_irq_affinity,
2474#endif
2475 .retrigger = ioapic_retrigger_irq,
589e367f
EB
2476};
2477
75c46fa6
SS
2478#ifdef CONFIG_INTR_REMAP
2479static struct irq_chip msi_ir_chip = {
2480 .name = "IR-PCI-MSI",
2481 .unmask = unmask_msi_irq,
2482 .mask = mask_msi_irq,
2483 .ack = ack_x2apic_edge,
2484#ifdef CONFIG_SMP
2485 .set_affinity = ir_set_msi_irq_affinity,
2486#endif
2487 .retrigger = ioapic_retrigger_irq,
2488};
2489
2490/*
2491 * Map the PCI dev to the corresponding remapping hardware unit
2492 * and allocate 'nvec' consecutive interrupt-remapping table entries
2493 * in it.
2494 */
2495static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3b7d1921 2496{
75c46fa6
SS
2497 struct intel_iommu *iommu;
2498 int index;
2499
2500 iommu = map_dev_to_ir(dev);
2501 if (!iommu) {
2502 printk(KERN_ERR
2503 "Unable to map PCI %s to iommu\n", pci_name(dev));
2504 return -ENOENT;
2505 }
2506
2507 index = alloc_irte(iommu, irq, nvec);
2508 if (index < 0) {
2509 printk(KERN_ERR
2510 "Unable to allocate %d IRTE for PCI %s\n", nvec,
2511 pci_name(dev));
2512 return -ENOSPC;
2513 }
2514 return index;
2515}
2516#endif
2517
2518static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2519{
2520 int ret;
3b7d1921 2521 struct msi_msg msg;
75c46fa6
SS
2522
2523 ret = msi_compose_msg(dev, irq, &msg);
2524 if (ret < 0)
2525 return ret;
2526
2527 set_irq_msi(irq, desc);
2528 write_msi_msg(irq, &msg);
2529
2530#ifdef CONFIG_INTR_REMAP
2531 if (irq_remapped(irq)) {
2532 struct irq_desc *desc = irq_desc + irq;
2533 /*
2534 * irq migration in process context
2535 */
2536 desc->status |= IRQ_MOVE_PCNTXT;
2537 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
2538 } else
2539#endif
2540 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2541
2542 return 0;
2543}
2544
2545int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2546{
f7feaca7 2547 int irq, ret;
75c46fa6 2548
f7feaca7
EB
2549 irq = create_irq();
2550 if (irq < 0)
2551 return irq;
2552
75c46fa6
SS
2553#ifdef CONFIG_INTR_REMAP
2554 if (!intr_remapping_enabled)
2555 goto no_ir;
2556
2557 ret = msi_alloc_irte(dev, irq, 1);
2558 if (ret < 0)
2559 goto error;
2560no_ir:
2561#endif
2562 ret = setup_msi_irq(dev, desc, irq);
f7feaca7
EB
2563 if (ret < 0) {
2564 destroy_irq(irq);
3b7d1921 2565 return ret;
f7feaca7 2566 }
75c46fa6 2567 return 0;
3b7d1921 2568
75c46fa6
SS
2569#ifdef CONFIG_INTR_REMAP
2570error:
2571 destroy_irq(irq);
2572 return ret;
2573#endif
2574}
3b7d1921 2575
75c46fa6
SS
2576int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
2577{
2578 int irq, ret, sub_handle;
2579 struct msi_desc *desc;
2580#ifdef CONFIG_INTR_REMAP
2581 struct intel_iommu *iommu = 0;
2582 int index = 0;
2583#endif
2584
2585 sub_handle = 0;
2586 list_for_each_entry(desc, &dev->msi_list, list) {
2587 irq = create_irq();
2588 if (irq < 0)
2589 return irq;
2590#ifdef CONFIG_INTR_REMAP
2591 if (!intr_remapping_enabled)
2592 goto no_ir;
3b7d1921 2593
75c46fa6
SS
2594 if (!sub_handle) {
2595 /*
2596 * allocate the consecutive block of IRTE's
2597 * for 'nvec'
2598 */
2599 index = msi_alloc_irte(dev, irq, nvec);
2600 if (index < 0) {
2601 ret = index;
2602 goto error;
2603 }
2604 } else {
2605 iommu = map_dev_to_ir(dev);
2606 if (!iommu) {
2607 ret = -ENOENT;
2608 goto error;
2609 }
2610 /*
2611 * setup the mapping between the irq and the IRTE
2612 * base index, the sub_handle pointing to the
2613 * appropriate interrupt remap table entry.
2614 */
2615 set_irte_irq(irq, iommu, index, sub_handle);
2616 }
2617no_ir:
2618#endif
2619 ret = setup_msi_irq(dev, desc, irq);
2620 if (ret < 0)
2621 goto error;
2622 sub_handle++;
2623 }
7fe3730d 2624 return 0;
75c46fa6
SS
2625
2626error:
2627 destroy_irq(irq);
2628 return ret;
3b7d1921
EB
2629}
2630
2631void arch_teardown_msi_irq(unsigned int irq)
2632{
f7feaca7 2633 destroy_irq(irq);
3b7d1921
EB
2634}
2635
3460a6d9
KA
2636#ifdef CONFIG_DMAR
2637#ifdef CONFIG_SMP
2638static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2639{
2640 struct irq_cfg *cfg = irq_cfg + irq;
2641 struct msi_msg msg;
2642 unsigned int dest;
2643 cpumask_t tmp;
2644
2645 cpus_and(tmp, mask, cpu_online_map);
2646 if (cpus_empty(tmp))
2647 return;
2648
2649 if (assign_irq_vector(irq, mask))
2650 return;
2651
2652 cpus_and(tmp, cfg->domain, mask);
2653 dest = cpu_mask_to_apicid(tmp);
2654
2655 dmar_msi_read(irq, &msg);
2656
2657 msg.data &= ~MSI_DATA_VECTOR_MASK;
2658 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2659 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2660 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2661
2662 dmar_msi_write(irq, &msg);
2663 irq_desc[irq].affinity = mask;
2664}
2665#endif /* CONFIG_SMP */
2666
2667struct irq_chip dmar_msi_type = {
2668 .name = "DMAR_MSI",
2669 .unmask = dmar_msi_unmask,
2670 .mask = dmar_msi_mask,
2671 .ack = ack_apic_edge,
2672#ifdef CONFIG_SMP
2673 .set_affinity = dmar_msi_set_affinity,
2674#endif
2675 .retrigger = ioapic_retrigger_irq,
2676};
2677
2678int arch_setup_dmar_msi(unsigned int irq)
2679{
2680 int ret;
2681 struct msi_msg msg;
2682
2683 ret = msi_compose_msg(NULL, irq, &msg);
2684 if (ret < 0)
2685 return ret;
2686 dmar_msi_write(irq, &msg);
2687 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2688 "edge");
2689 return 0;
2690}
2691#endif
589e367f 2692
3460a6d9 2693#endif /* CONFIG_PCI_MSI */
8b955b0d
EB
2694/*
2695 * Hypertransport interrupt support
2696 */
2697#ifdef CONFIG_HT_IRQ
2698
2699#ifdef CONFIG_SMP
2700
2701static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
2702{
ec68307c
EB
2703 struct ht_irq_msg msg;
2704 fetch_ht_irq_msg(irq, &msg);
8b955b0d 2705
ec68307c
EB
2706 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
2707 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
8b955b0d 2708
ec68307c
EB
2709 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
2710 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 2711
ec68307c 2712 write_ht_irq_msg(irq, &msg);
8b955b0d
EB
2713}
2714
2715static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2716{
dfbffdd8 2717 struct irq_cfg *cfg = irq_cfg + irq;
8b955b0d
EB
2718 unsigned int dest;
2719 cpumask_t tmp;
8b955b0d
EB
2720
2721 cpus_and(tmp, mask, cpu_online_map);
2722 if (cpus_empty(tmp))
5ff5115e 2723 return;
8b955b0d 2724
dfbffdd8 2725 if (assign_irq_vector(irq, mask))
8b955b0d
EB
2726 return;
2727
dfbffdd8 2728 cpus_and(tmp, cfg->domain, mask);
8b955b0d
EB
2729 dest = cpu_mask_to_apicid(tmp);
2730
dfbffdd8 2731 target_ht_irq(irq, dest, cfg->vector);
9f0a5ba5 2732 irq_desc[irq].affinity = mask;
8b955b0d
EB
2733}
2734#endif
2735
c37e108d 2736static struct irq_chip ht_irq_chip = {
8b955b0d
EB
2737 .name = "PCI-HT",
2738 .mask = mask_ht_irq,
2739 .unmask = unmask_ht_irq,
2740 .ack = ack_apic_edge,
2741#ifdef CONFIG_SMP
2742 .set_affinity = set_ht_irq_affinity,
2743#endif
2744 .retrigger = ioapic_retrigger_irq,
2745};
2746
2747int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2748{
dfbffdd8
EB
2749 struct irq_cfg *cfg = irq_cfg + irq;
2750 int err;
c7111c13 2751 cpumask_t tmp;
8b955b0d 2752
dfbffdd8
EB
2753 tmp = TARGET_CPUS;
2754 err = assign_irq_vector(irq, tmp);
2755 if (!err) {
ec68307c 2756 struct ht_irq_msg msg;
8b955b0d 2757 unsigned dest;
8b955b0d 2758
dfbffdd8 2759 cpus_and(tmp, cfg->domain, tmp);
8b955b0d
EB
2760 dest = cpu_mask_to_apicid(tmp);
2761
ec68307c 2762 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 2763
ec68307c
EB
2764 msg.address_lo =
2765 HT_IRQ_LOW_BASE |
8b955b0d 2766 HT_IRQ_LOW_DEST_ID(dest) |
dfbffdd8 2767 HT_IRQ_LOW_VECTOR(cfg->vector) |
8b955b0d
EB
2768 ((INT_DEST_MODE == 0) ?
2769 HT_IRQ_LOW_DM_PHYSICAL :
2770 HT_IRQ_LOW_DM_LOGICAL) |
2771 HT_IRQ_LOW_RQEOI_EDGE |
2772 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2773 HT_IRQ_LOW_MT_FIXED :
ec68307c
EB
2774 HT_IRQ_LOW_MT_ARBITRATED) |
2775 HT_IRQ_LOW_IRQ_MASKED;
8b955b0d 2776
ec68307c 2777 write_ht_irq_msg(irq, &msg);
8b955b0d 2778
a460e745
IM
2779 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2780 handle_edge_irq, "edge");
8b955b0d 2781 }
dfbffdd8 2782 return err;
8b955b0d
EB
2783}
2784#endif /* CONFIG_HT_IRQ */
2785
1da177e4
LT
2786/* --------------------------------------------------------------------------
2787 ACPI-based IOAPIC Configuration
2788 -------------------------------------------------------------------------- */
2789
888ba6c6 2790#ifdef CONFIG_ACPI
1da177e4
LT
2791
2792#define IO_APIC_MAX_ID 0xFE
2793
1da177e4
LT
2794int __init io_apic_get_redir_entries (int ioapic)
2795{
2796 union IO_APIC_reg_01 reg_01;
2797 unsigned long flags;
2798
2799 spin_lock_irqsave(&ioapic_lock, flags);
2800 reg_01.raw = io_apic_read(ioapic, 1);
2801 spin_unlock_irqrestore(&ioapic_lock, flags);
2802
2803 return reg_01.bits.entries;
2804}
2805
2806
50eca3eb 2807int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
1da177e4 2808{
1da177e4
LT
2809 if (!IO_APIC_IRQ(irq)) {
2810 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2811 ioapic);
2812 return -EINVAL;
2813 }
2814
550f2299
EB
2815 /*
2816 * IRQs < 16 are already in the irq_2_pin[] map
2817 */
2818 if (irq >= 16)
2819 add_pin_to_irq(irq, ioapic, pin);
2820
a8c8a367 2821 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
1da177e4
LT
2822
2823 return 0;
2824}
2825
1da177e4 2826
61fd47e0
SL
2827int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2828{
2829 int i;
2830
2831 if (skip_ioapic_setup)
2832 return -1;
2833
2834 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
2835 if (mp_irqs[i].mp_irqtype == mp_INT &&
2836 mp_irqs[i].mp_srcbusirq == bus_irq)
61fd47e0
SL
2837 break;
2838 if (i >= mp_irq_entries)
2839 return -1;
2840
2841 *trigger = irq_trigger(i);
2842 *polarity = irq_polarity(i);
2843 return 0;
2844}
2845
2846#endif /* CONFIG_ACPI */
1da177e4
LT
2847
2848/*
2849 * This function currently is only a helper for the i386 smp boot process where
2850 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2851 * so mask in all cases should simply be TARGET_CPUS
2852 */
54d5d424 2853#ifdef CONFIG_SMP
1da177e4
LT
2854void __init setup_ioapic_dest(void)
2855{
2856 int pin, ioapic, irq, irq_entry;
2857
2858 if (skip_ioapic_setup == 1)
2859 return;
2860
2861 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2862 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2863 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2864 if (irq_entry == -1)
2865 continue;
2866 irq = pin_2_irq(irq_entry, ioapic, pin);
ad892f5e
YL
2867
2868 /* setup_IO_APIC_irqs could fail to get vector for some device
2869 * when you have too many devices, because at that time only boot
2870 * cpu is online.
2871 */
13a79503 2872 if (!irq_cfg[irq].vector)
a8c8a367
EB
2873 setup_IO_APIC_irq(ioapic, pin, irq,
2874 irq_trigger(irq_entry),
2875 irq_polarity(irq_entry));
89027d35
SS
2876#ifdef CONFIG_INTR_REMAP
2877 else if (intr_remapping_enabled)
2878 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
2879#endif
ad892f5e
YL
2880 else
2881 set_ioapic_affinity_irq(irq, TARGET_CPUS);
1da177e4
LT
2882 }
2883
2884 }
2885}
54d5d424 2886#endif
61fd47e0 2887
3e35a0e5
TG
2888#define IOAPIC_RESOURCE_NAME_SIZE 11
2889
2890static struct resource *ioapic_resources;
2891
2892static struct resource * __init ioapic_setup_resources(void)
2893{
2894 unsigned long n;
2895 struct resource *res;
2896 char *mem;
2897 int i;
2898
2899 if (nr_ioapics <= 0)
2900 return NULL;
2901
2902 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
2903 n *= nr_ioapics;
2904
2905 mem = alloc_bootmem(n);
2906 res = (void *)mem;
2907
2908 if (mem != NULL) {
3e35a0e5
TG
2909 mem += sizeof(struct resource) * nr_ioapics;
2910
2911 for (i = 0; i < nr_ioapics; i++) {
2912 res[i].name = mem;
2913 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2914 sprintf(mem, "IOAPIC %u", i);
2915 mem += IOAPIC_RESOURCE_NAME_SIZE;
2916 }
2917 }
2918
2919 ioapic_resources = res;
2920
2921 return res;
2922}
2923
2924void __init ioapic_init_mappings(void)
2925{
2926 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
2927 struct resource *ioapic_res;
2928 int i;
2929
2930 ioapic_res = ioapic_setup_resources();
2931 for (i = 0; i < nr_ioapics; i++) {
2932 if (smp_found_config) {
ec2cd0a2 2933 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3e35a0e5
TG
2934 } else {
2935 ioapic_phys = (unsigned long)
2936 alloc_bootmem_pages(PAGE_SIZE);
2937 ioapic_phys = __pa(ioapic_phys);
2938 }
2939 set_fixmap_nocache(idx, ioapic_phys);
2940 apic_printk(APIC_VERBOSE,
2941 "mapped IOAPIC to %016lx (%016lx)\n",
2942 __fix_to_virt(idx), ioapic_phys);
2943 idx++;
2944
2945 if (ioapic_res != NULL) {
2946 ioapic_res->start = ioapic_phys;
2947 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
2948 ioapic_res++;
2949 }
2950 }
2951}
2952
2953static int __init ioapic_insert_resources(void)
2954{
2955 int i;
2956 struct resource *r = ioapic_resources;
2957
2958 if (!r) {
2959 printk(KERN_ERR
2960 "IO APIC resources could be not be allocated.\n");
2961 return -1;
2962 }
2963
2964 for (i = 0; i < nr_ioapics; i++) {
2965 insert_resource(&iomem_resource, r);
2966 r++;
2967 }
2968
2969 return 0;
2970}
2971
2972/* Insert the IO APIC resources after PCI initialization has occured to handle
2973 * IO APICS that are mapped in on a BAR in PCI space. */
2974late_initcall(ioapic_insert_resources);
2975
This page took 0.672195 seconds and 5 git commands to generate.