x86: add irq_cfg in io_apic_64.c
[deliverable/linux.git] / arch / x86 / kernel / io_apic_64.c
CommitLineData
1da177e4
LT
1/*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23#include <linux/mm.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
589e367f 28#include <linux/pci.h>
1da177e4
LT
29#include <linux/mc146818rtc.h>
30#include <linux/acpi.h>
31#include <linux/sysdev.h>
3b7d1921 32#include <linux/msi.h>
95d77884 33#include <linux/htirq.h>
3460a6d9 34#include <linux/dmar.h>
1d16b53e 35#include <linux/jiffies.h>
ab688059
AK
36#ifdef CONFIG_ACPI
37#include <acpi/acpi_bus.h>
38#endif
3e35a0e5 39#include <linux/bootmem.h>
89027d35 40#include <linux/dmar.h>
1da177e4 41
61014292 42#include <asm/idle.h>
1da177e4
LT
43#include <asm/io.h>
44#include <asm/smp.h>
45#include <asm/desc.h>
46#include <asm/proto.h>
8d916406 47#include <asm/acpi.h>
ca8642f6 48#include <asm/dma.h>
17c44697 49#include <asm/i8259.h>
3e4ff115 50#include <asm/nmi.h>
589e367f 51#include <asm/msidef.h>
8b955b0d 52#include <asm/hypertransport.h>
89027d35 53#include <asm/irq_remapping.h>
1da177e4 54
5af5573e 55#include <mach_ipi.h>
dd46e3ca 56#include <mach_apic.h>
5af5573e 57
32f71aff
MR
58#define __apicdebuginit(type) static type __init
59
3ac2de48
YL
60struct irq_cfg;
61
13a79503 62struct irq_cfg {
3ac2de48
YL
63 unsigned int irq;
64 struct irq_cfg *next;
13a79503 65 cpumask_t domain;
61014292
EB
66 cpumask_t old_domain;
67 unsigned move_cleanup_count;
13a79503 68 u8 vector;
61014292 69 u8 move_in_progress : 1;
13a79503
EB
70};
71
72/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
301e6190 73static struct irq_cfg irq_cfg_legacy[] __initdata = {
3ac2de48
YL
74 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
75 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
76 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
77 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
78 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
79 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
80 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
81 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
82 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
83 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
84 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
85 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
86 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
87 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
88 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
89 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
13a79503
EB
90};
91
3ac2de48
YL
92static struct irq_cfg irq_cfg_init = { .irq = -1U, };
93/* need to be biger than size of irq_cfg_legacy */
94static int nr_irq_cfg = 32;
95
96static int __init parse_nr_irq_cfg(char *arg)
97{
98 if (arg) {
99 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
100 if (nr_irq_cfg < 32)
101 nr_irq_cfg = 32;
102 }
103 return 0;
104}
105
106early_param("nr_irq_cfg", parse_nr_irq_cfg);
107
108static void init_one_irq_cfg(struct irq_cfg *cfg)
109{
110 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
111}
301e6190
YL
112
113static void __init init_work(void *data)
114{
115 struct dyn_array *da = data;
3ac2de48
YL
116 struct irq_cfg *cfg;
117 int i;
301e6190 118
3ac2de48
YL
119 cfg = *da->name;
120
121 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
122
123 i = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]);
124 for (; i < *da->nr; i++)
125 init_one_irq_cfg(&cfg[i]);
126
127 for (i = 1; i < *da->nr; i++)
128 cfg[i-1].next = &cfg[i];
301e6190
YL
129}
130
3ac2de48
YL
131static struct irq_cfg *irq_cfgx;
132DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
133
134static struct irq_cfg *irq_cfg(unsigned int irq)
135{
136 struct irq_cfg *cfg;
137
138 BUG_ON(irq == -1U);
139
140 cfg = &irq_cfgx[0];
141 while (cfg) {
142 if (cfg->irq == irq)
143 return cfg;
144
145 if (cfg->irq == -1U)
146 return NULL;
147
148 cfg = cfg->next;
149 }
150
151 return NULL;
152}
153
154static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
155{
156 struct irq_cfg *cfg, *cfg_pri;
157 int i;
158 int count = 0;
159
160 BUG_ON(irq == -1U);
161
162 cfg_pri = cfg = &irq_cfgx[0];
163 while (cfg) {
164 if (cfg->irq == irq)
165 return cfg;
166
167 if (cfg->irq == -1U) {
168 cfg->irq = irq;
169 return cfg;
170 }
171 cfg_pri = cfg;
172 cfg = cfg->next;
173 count++;
174 }
175
176 /*
177 * we run out of pre-allocate ones, allocate more
178 */
179 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
180
181 if (after_bootmem)
182 cfg = kzalloc(sizeof(struct irq_cfg)*nr_irq_cfg, GFP_ATOMIC);
183 else
184 cfg = __alloc_bootmem_nopanic(sizeof(struct irq_cfg)*nr_irq_cfg, PAGE_SIZE, 0);
185
186 if (!cfg)
187 panic("please boot with nr_irq_cfg= %d\n", count * 2);
188
189 for (i = 0; i < nr_irq_cfg; i++)
190 init_one_irq_cfg(&cfg[i]);
191
192 for (i = 1; i < nr_irq_cfg; i++)
193 cfg[i-1].next = &cfg[i];
194
195 cfg->irq = irq;
196 cfg_pri->next = cfg;
197
198 return cfg;
199}
301e6190 200
dfbffdd8 201static int assign_irq_vector(int irq, cpumask_t mask);
04b9267b 202
305b92a2
AM
203int first_system_vector = 0xfe;
204
205char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE};
206
1da177e4
LT
207int sis_apic_bug; /* not actually supported, dummy for compile */
208
14d98cad
AK
209static int no_timer_check;
210
fea5f1e1
LT
211static int disable_timer_pin_1 __initdata;
212
35542c5e 213int timer_through_8259 __initdata;
fea5f1e1 214
1008fddc
EB
215/* Where if anywhere is the i8259 connect in external int mode */
216static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
217
1da177e4 218static DEFINE_SPINLOCK(ioapic_lock);
d388e5fd 219static DEFINE_SPINLOCK(vector_lock);
1da177e4
LT
220
221/*
222 * # of IRQ routing registers
223 */
224int nr_ioapic_registers[MAX_IO_APICS];
225
4dc2f96c
SS
226/* I/O APIC RTE contents at the OS boot up */
227struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
228
9c7408f3 229/* I/O APIC entries */
ec2cd0a2 230struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
9c7408f3
AS
231int nr_ioapics;
232
350bae1d 233/* MP IRQ source entries */
2fddb6e2 234struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
350bae1d
AS
235
236/* # of MP IRQ source entries */
237int mp_irq_entries;
238
8732fc4b
AS
239DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
240
1da177e4
LT
241/*
242 * Rough estimation of how many shared IRQs there are, can
243 * be changed anytime.
244 */
1da177e4 245
301e6190
YL
246int pin_map_size;
247
1da177e4
LT
248/*
249 * This is performance-critical, we want to do it O(1)
250 *
251 * the indexing order of this array favors 1:1 mappings
252 * between pins and IRQs.
253 */
254
255static struct irq_pin_list {
301e6190
YL
256 short apic, pin;
257 int next;
258} *irq_2_pin;
259
260DEFINE_DYN_ARRAY(irq_2_pin, sizeof(struct irq_pin_list), pin_map_size, sizeof(struct irq_pin_list), NULL);
261
1da177e4 262
6c0ffb9d
LT
263struct io_apic {
264 unsigned int index;
265 unsigned int unused[3];
266 unsigned int data;
267};
268
269static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
270{
271 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
ec2cd0a2 272 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
6c0ffb9d
LT
273}
274
275static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
276{
277 struct io_apic __iomem *io_apic = io_apic_base(apic);
278 writel(reg, &io_apic->index);
279 return readl(&io_apic->data);
280}
281
282static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
283{
284 struct io_apic __iomem *io_apic = io_apic_base(apic);
285 writel(reg, &io_apic->index);
286 writel(value, &io_apic->data);
287}
288
289/*
290 * Re-write a value: to be used for read-modify-write
291 * cycles where the read already set up the index register.
292 */
293static inline void io_apic_modify(unsigned int apic, unsigned int value)
294{
295 struct io_apic __iomem *io_apic = io_apic_base(apic);
296 writel(value, &io_apic->data);
297}
298
9d25cb08 299static bool io_apic_level_ack_pending(unsigned int irq)
ef3e28c5
EB
300{
301 struct irq_pin_list *entry;
302 unsigned long flags;
ef3e28c5
EB
303
304 spin_lock_irqsave(&ioapic_lock, flags);
305 entry = irq_2_pin + irq;
306 for (;;) {
307 unsigned int reg;
308 int pin;
309
310 pin = entry->pin;
311 if (pin == -1)
312 break;
313 reg = io_apic_read(entry->apic, 0x10 + pin*2);
314 /* Is the remote IRR bit set? */
46b3b4ef 315 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
9d25cb08
AM
316 spin_unlock_irqrestore(&ioapic_lock, flags);
317 return true;
318 }
ef3e28c5
EB
319 if (!entry->next)
320 break;
321 entry = irq_2_pin + entry->next;
322 }
323 spin_unlock_irqrestore(&ioapic_lock, flags);
9d25cb08
AM
324
325 return false;
ef3e28c5
EB
326}
327
6c0ffb9d
LT
328/*
329 * Synchronize the IO-APIC and the CPU by doing
330 * a dummy read from the IO-APIC
331 */
332static inline void io_apic_sync(unsigned int apic)
333{
334 struct io_apic __iomem *io_apic = io_apic_base(apic);
335 readl(&io_apic->data);
336}
337
54d5d424
AR
338#define __DO_ACTION(R, ACTION, FINAL) \
339 \
340{ \
341 int pin; \
342 struct irq_pin_list *entry = irq_2_pin + irq; \
343 \
0799e432 344 BUG_ON(irq >= nr_irqs); \
54d5d424
AR
345 for (;;) { \
346 unsigned int reg; \
347 pin = entry->pin; \
348 if (pin == -1) \
349 break; \
350 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
351 reg ACTION; \
352 io_apic_modify(entry->apic, reg); \
f45bcd70 353 FINAL; \
54d5d424
AR
354 if (!entry->next) \
355 break; \
356 entry = irq_2_pin + entry->next; \
357 } \
54d5d424
AR
358}
359
eea0e11c
AK
360union entry_union {
361 struct { u32 w1, w2; };
362 struct IO_APIC_route_entry entry;
363};
364
365static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
366{
367 union entry_union eu;
368 unsigned long flags;
369 spin_lock_irqsave(&ioapic_lock, flags);
370 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
371 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
372 spin_unlock_irqrestore(&ioapic_lock, flags);
373 return eu.entry;
374}
375
48797ebd
LT
376/*
377 * When we write a new IO APIC routing entry, we need to write the high
378 * word first! If the mask bit in the low word is clear, we will enable
379 * the interrupt, and we need to make sure the entry is fully populated
380 * before that happens.
381 */
516d2836
AK
382static void
383__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
eea0e11c 384{
eea0e11c
AK
385 union entry_union eu;
386 eu.entry = e;
48797ebd
LT
387 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
388 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
516d2836
AK
389}
390
391static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
392{
393 unsigned long flags;
394 spin_lock_irqsave(&ioapic_lock, flags);
395 __ioapic_write_entry(apic, pin, e);
48797ebd
LT
396 spin_unlock_irqrestore(&ioapic_lock, flags);
397}
398
399/*
400 * When we mask an IO APIC routing entry, we need to write the low
401 * word first, in order to set the mask bit before we change the
402 * high bits!
403 */
404static void ioapic_mask_entry(int apic, int pin)
405{
406 unsigned long flags;
407 union entry_union eu = { .entry.mask = 1 };
408
eea0e11c
AK
409 spin_lock_irqsave(&ioapic_lock, flags);
410 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
411 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
412 spin_unlock_irqrestore(&ioapic_lock, flags);
413}
414
54d5d424 415#ifdef CONFIG_SMP
550f2299
EB
416static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
417{
418 int apic, pin;
419 struct irq_pin_list *entry = irq_2_pin + irq;
420
0799e432 421 BUG_ON(irq >= nr_irqs);
550f2299
EB
422 for (;;) {
423 unsigned int reg;
424 apic = entry->apic;
425 pin = entry->pin;
426 if (pin == -1)
427 break;
89027d35
SS
428 /*
429 * With interrupt-remapping, destination information comes
430 * from interrupt-remapping table entry.
431 */
432 if (!irq_remapped(irq))
433 io_apic_write(apic, 0x11 + pin*2, dest);
550f2299 434 reg = io_apic_read(apic, 0x10 + pin*2);
46b3b4ef 435 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
550f2299
EB
436 reg |= vector;
437 io_apic_modify(apic, reg);
438 if (!entry->next)
439 break;
440 entry = irq_2_pin + entry->next;
441 }
442}
443
54d5d424
AR
444static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
445{
3ac2de48 446 struct irq_cfg *cfg = irq_cfg(irq);
54d5d424
AR
447 unsigned long flags;
448 unsigned int dest;
449 cpumask_t tmp;
08678b08 450 struct irq_desc *desc;
54d5d424
AR
451
452 cpus_and(tmp, mask, cpu_online_map);
453 if (cpus_empty(tmp))
5ff5115e 454 return;
54d5d424 455
dfbffdd8 456 if (assign_irq_vector(irq, mask))
550f2299
EB
457 return;
458
dfbffdd8 459 cpus_and(tmp, cfg->domain, mask);
550f2299 460 dest = cpu_mask_to_apicid(tmp);
54d5d424
AR
461
462 /*
463 * Only the high 8 bits are valid.
464 */
465 dest = SET_APIC_LOGICAL_ID(dest);
466
08678b08 467 desc = irq_to_desc(irq);
54d5d424 468 spin_lock_irqsave(&ioapic_lock, flags);
dfbffdd8 469 __target_IO_APIC_irq(irq, dest, cfg->vector);
08678b08 470 desc->affinity = mask;
54d5d424
AR
471 spin_unlock_irqrestore(&ioapic_lock, flags);
472}
473#endif
474
1da177e4
LT
475/*
476 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
477 * shared ISA-space IRQs, so we have to support them. We are super
478 * fast in the common case, and fast for shared ISA-space IRQs.
479 */
301e6190 480int first_free_entry;
1da177e4
LT
481static void add_pin_to_irq(unsigned int irq, int apic, int pin)
482{
1da177e4
LT
483 struct irq_pin_list *entry = irq_2_pin + irq;
484
0799e432 485 BUG_ON(irq >= nr_irqs);
3ac2de48
YL
486 irq_cfg_alloc(irq);
487
1da177e4
LT
488 while (entry->next)
489 entry = irq_2_pin + entry->next;
490
491 if (entry->pin != -1) {
492 entry->next = first_free_entry;
493 entry = irq_2_pin + entry->next;
0799e432 494 if (++first_free_entry >= pin_map_size)
6004e1b7 495 panic("io_apic.c: ran out of irq_2_pin entries!");
1da177e4
LT
496 }
497 entry->apic = apic;
498 entry->pin = pin;
499}
500
0b9f4f49
MR
501/*
502 * Reroute an IRQ to a different pin.
503 */
504static void __init replace_pin_at_irq(unsigned int irq,
505 int oldapic, int oldpin,
506 int newapic, int newpin)
507{
508 struct irq_pin_list *entry = irq_2_pin + irq;
509
510 while (1) {
511 if (entry->apic == oldapic && entry->pin == oldpin) {
512 entry->apic = newapic;
513 entry->pin = newpin;
514 }
515 if (!entry->next)
516 break;
517 entry = irq_2_pin + entry->next;
518 }
519}
520
1da177e4
LT
521
522#define DO_ACTION(name,R,ACTION, FINAL) \
523 \
524 static void name##_IO_APIC_irq (unsigned int irq) \
525 __DO_ACTION(R, ACTION, FINAL)
526
46b3b4ef
CG
527/* mask = 1 */
528DO_ACTION(__mask, 0, |= IO_APIC_REDIR_MASKED, io_apic_sync(entry->apic))
529
530/* mask = 0 */
531DO_ACTION(__unmask, 0, &= ~IO_APIC_REDIR_MASKED, )
1da177e4
LT
532
533static void mask_IO_APIC_irq (unsigned int irq)
534{
535 unsigned long flags;
536
537 spin_lock_irqsave(&ioapic_lock, flags);
538 __mask_IO_APIC_irq(irq);
539 spin_unlock_irqrestore(&ioapic_lock, flags);
540}
541
542static void unmask_IO_APIC_irq (unsigned int irq)
543{
544 unsigned long flags;
545
546 spin_lock_irqsave(&ioapic_lock, flags);
547 __unmask_IO_APIC_irq(irq);
548 spin_unlock_irqrestore(&ioapic_lock, flags);
549}
550
551static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
552{
553 struct IO_APIC_route_entry entry;
1da177e4
LT
554
555 /* Check delivery_mode to be sure we're not clearing an SMI pin */
eea0e11c 556 entry = ioapic_read_entry(apic, pin);
1da177e4
LT
557 if (entry.delivery_mode == dest_SMI)
558 return;
559 /*
560 * Disable it in the IO-APIC irq-routing table:
561 */
48797ebd 562 ioapic_mask_entry(apic, pin);
1da177e4
LT
563}
564
565static void clear_IO_APIC (void)
566{
567 int apic, pin;
568
569 for (apic = 0; apic < nr_ioapics; apic++)
570 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
571 clear_IO_APIC_pin(apic, pin);
572}
573
4dc2f96c
SS
574/*
575 * Saves and masks all the unmasked IO-APIC RTE's
576 */
577int save_mask_IO_APIC_setup(void)
578{
579 union IO_APIC_reg_01 reg_01;
580 unsigned long flags;
581 int apic, pin;
582
583 /*
584 * The number of IO-APIC IRQ registers (== #pins):
585 */
586 for (apic = 0; apic < nr_ioapics; apic++) {
587 spin_lock_irqsave(&ioapic_lock, flags);
588 reg_01.raw = io_apic_read(apic, 1);
589 spin_unlock_irqrestore(&ioapic_lock, flags);
590 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
591 }
592
593 for (apic = 0; apic < nr_ioapics; apic++) {
594 early_ioapic_entries[apic] =
595 kzalloc(sizeof(struct IO_APIC_route_entry) *
596 nr_ioapic_registers[apic], GFP_KERNEL);
597 if (!early_ioapic_entries[apic])
598 return -ENOMEM;
599 }
600
601 for (apic = 0; apic < nr_ioapics; apic++)
602 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
603 struct IO_APIC_route_entry entry;
604
605 entry = early_ioapic_entries[apic][pin] =
606 ioapic_read_entry(apic, pin);
607 if (!entry.mask) {
608 entry.mask = 1;
609 ioapic_write_entry(apic, pin, entry);
610 }
611 }
612 return 0;
613}
614
615void restore_IO_APIC_setup(void)
616{
617 int apic, pin;
618
619 for (apic = 0; apic < nr_ioapics; apic++)
620 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
621 ioapic_write_entry(apic, pin,
622 early_ioapic_entries[apic][pin]);
623}
624
625void reinit_intr_remapped_IO_APIC(int intr_remapping)
626{
627 /*
628 * for now plain restore of previous settings.
629 * TBD: In the case of OS enabling interrupt-remapping,
630 * IO-APIC RTE's need to be setup to point to interrupt-remapping
631 * table entries. for now, do a plain restore, and wait for
632 * the setup_IO_APIC_irqs() to do proper initialization.
633 */
634 restore_IO_APIC_setup();
635}
636
1da177e4
LT
637int skip_ioapic_setup;
638int ioapic_force;
639
61ec7567 640static int __init parse_noapic(char *str)
1da177e4 641{
61ec7567 642 disable_ioapic_setup();
2c8c0e6b 643 return 0;
1da177e4 644}
61ec7567 645early_param("noapic", parse_noapic);
1da177e4 646
fea5f1e1
LT
647/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
648static int __init disable_timer_pin_setup(char *arg)
649{
650 disable_timer_pin_1 = 1;
651 return 1;
652}
653__setup("disable_timer_pin_1", disable_timer_pin_setup);
654
fea5f1e1 655
1da177e4
LT
656/*
657 * Find the IRQ entry number of a certain pin.
658 */
659static int find_irq_entry(int apic, int pin, int type)
660{
661 int i;
662
663 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
664 if (mp_irqs[i].mp_irqtype == type &&
665 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
666 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
667 mp_irqs[i].mp_dstirq == pin)
1da177e4
LT
668 return i;
669
670 return -1;
671}
672
673/*
674 * Find the pin to which IRQ[irq] (ISA) is connected
675 */
1008fddc 676static int __init find_isa_irq_pin(int irq, int type)
1da177e4
LT
677{
678 int i;
679
680 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 681 int lbus = mp_irqs[i].mp_srcbus;
1da177e4 682
55f05ffa 683 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
684 (mp_irqs[i].mp_irqtype == type) &&
685 (mp_irqs[i].mp_srcbusirq == irq))
1da177e4 686
2fddb6e2 687 return mp_irqs[i].mp_dstirq;
1da177e4
LT
688 }
689 return -1;
690}
691
1008fddc
EB
692static int __init find_isa_irq_apic(int irq, int type)
693{
694 int i;
695
696 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 697 int lbus = mp_irqs[i].mp_srcbus;
1008fddc 698
55f05ffa 699 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
700 (mp_irqs[i].mp_irqtype == type) &&
701 (mp_irqs[i].mp_srcbusirq == irq))
1008fddc
EB
702 break;
703 }
704 if (i < mp_irq_entries) {
705 int apic;
706 for(apic = 0; apic < nr_ioapics; apic++) {
2fddb6e2 707 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
1008fddc
EB
708 return apic;
709 }
710 }
711
712 return -1;
713}
714
1da177e4
LT
715/*
716 * Find a specific PCI IRQ entry.
717 * Not an __init, possibly needed by modules
718 */
719static int pin_2_irq(int idx, int apic, int pin);
720
721int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
722{
723 int apic, i, best_guess = -1;
724
725 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
726 bus, slot, pin);
ce6444d3 727 if (test_bit(bus, mp_bus_not_pci)) {
1da177e4
LT
728 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
729 return -1;
730 }
731 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 732 int lbus = mp_irqs[i].mp_srcbus;
1da177e4
LT
733
734 for (apic = 0; apic < nr_ioapics; apic++)
2fddb6e2
AS
735 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
736 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
1da177e4
LT
737 break;
738
55f05ffa 739 if (!test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2 740 !mp_irqs[i].mp_irqtype &&
1da177e4 741 (bus == lbus) &&
2fddb6e2
AS
742 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
743 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
1da177e4
LT
744
745 if (!(apic || IO_APIC_IRQ(irq)))
746 continue;
747
2fddb6e2 748 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
1da177e4
LT
749 return irq;
750 /*
751 * Use the first all-but-pin matching entry as a
752 * best-guess fuzzy result for broken mptables.
753 */
754 if (best_guess < 0)
755 best_guess = irq;
756 }
757 }
0799e432 758 BUG_ON(best_guess >= nr_irqs);
1da177e4
LT
759 return best_guess;
760}
761
1da177e4
LT
762/* ISA interrupts are always polarity zero edge triggered,
763 * when listed as conforming in the MP table. */
764
765#define default_ISA_trigger(idx) (0)
766#define default_ISA_polarity(idx) (0)
767
768/* PCI interrupts are always polarity one level triggered,
769 * when listed as conforming in the MP table. */
770
771#define default_PCI_trigger(idx) (1)
772#define default_PCI_polarity(idx) (1)
773
61fd47e0 774static int MPBIOS_polarity(int idx)
1da177e4 775{
2fddb6e2 776 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
777 int polarity;
778
779 /*
780 * Determine IRQ line polarity (high active or low active):
781 */
2fddb6e2 782 switch (mp_irqs[idx].mp_irqflag & 3)
1da177e4
LT
783 {
784 case 0: /* conforms, ie. bus-type dependent polarity */
55f05ffa
AK
785 if (test_bit(bus, mp_bus_not_pci))
786 polarity = default_ISA_polarity(idx);
787 else
788 polarity = default_PCI_polarity(idx);
1da177e4 789 break;
1da177e4
LT
790 case 1: /* high active */
791 {
792 polarity = 0;
793 break;
794 }
795 case 2: /* reserved */
796 {
797 printk(KERN_WARNING "broken BIOS!!\n");
798 polarity = 1;
799 break;
800 }
801 case 3: /* low active */
802 {
803 polarity = 1;
804 break;
805 }
806 default: /* invalid */
807 {
808 printk(KERN_WARNING "broken BIOS!!\n");
809 polarity = 1;
810 break;
811 }
812 }
813 return polarity;
814}
815
816static int MPBIOS_trigger(int idx)
817{
2fddb6e2 818 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
819 int trigger;
820
821 /*
822 * Determine IRQ trigger mode (edge or level sensitive):
823 */
2fddb6e2 824 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
1da177e4
LT
825 {
826 case 0: /* conforms, ie. bus-type dependent */
55f05ffa
AK
827 if (test_bit(bus, mp_bus_not_pci))
828 trigger = default_ISA_trigger(idx);
829 else
830 trigger = default_PCI_trigger(idx);
1da177e4 831 break;
1da177e4
LT
832 case 1: /* edge */
833 {
834 trigger = 0;
835 break;
836 }
837 case 2: /* reserved */
838 {
839 printk(KERN_WARNING "broken BIOS!!\n");
840 trigger = 1;
841 break;
842 }
843 case 3: /* level */
844 {
845 trigger = 1;
846 break;
847 }
848 default: /* invalid */
849 {
850 printk(KERN_WARNING "broken BIOS!!\n");
851 trigger = 0;
852 break;
853 }
854 }
855 return trigger;
856}
857
858static inline int irq_polarity(int idx)
859{
860 return MPBIOS_polarity(idx);
861}
862
863static inline int irq_trigger(int idx)
864{
865 return MPBIOS_trigger(idx);
866}
867
868static int pin_2_irq(int idx, int apic, int pin)
869{
870 int irq, i;
2fddb6e2 871 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
872
873 /*
874 * Debugging check, we are in big trouble if this message pops up!
875 */
2fddb6e2 876 if (mp_irqs[idx].mp_dstirq != pin)
1da177e4
LT
877 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
878
55f05ffa 879 if (test_bit(bus, mp_bus_not_pci)) {
2fddb6e2 880 irq = mp_irqs[idx].mp_srcbusirq;
55f05ffa
AK
881 } else {
882 /*
883 * PCI IRQs are mapped in order
884 */
885 i = irq = 0;
886 while (i < apic)
887 irq += nr_ioapic_registers[i++];
888 irq += pin;
1da177e4 889 }
0799e432 890 BUG_ON(irq >= nr_irqs);
1da177e4
LT
891 return irq;
892}
893
d388e5fd
EB
894void lock_vector_lock(void)
895{
896 /* Used to the online set of cpus does not change
897 * during assign_irq_vector.
898 */
899 spin_lock(&vector_lock);
900}
901
902void unlock_vector_lock(void)
903{
904 spin_unlock(&vector_lock);
905}
906
dfbffdd8 907static int __assign_irq_vector(int irq, cpumask_t mask)
1da177e4 908{
550f2299
EB
909 /*
910 * NOTE! The local APIC isn't very good at handling
911 * multiple interrupts at the same interrupt level.
912 * As the interrupt level is determined by taking the
913 * vector number and shifting that right by 4, we
914 * want to spread these out a bit so that they don't
915 * all fall in the same interrupt level.
916 *
917 * Also, we've got to be careful not to trash gate
918 * 0x80, because int 0x80 is hm, kind of importantish. ;)
919 */
d1752aa8 920 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
dfbffdd8 921 unsigned int old_vector;
550f2299 922 int cpu;
13a79503 923 struct irq_cfg *cfg;
1da177e4 924
0799e432 925 BUG_ON((unsigned)irq >= nr_irqs);
3ac2de48 926 cfg = irq_cfg(irq);
0a1ad60d 927
70a0a535
EB
928 /* Only try and allocate irqs on cpus that are present */
929 cpus_and(mask, mask, cpu_online_map);
930
61014292
EB
931 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
932 return -EBUSY;
933
dfbffdd8
EB
934 old_vector = cfg->vector;
935 if (old_vector) {
936 cpumask_t tmp;
937 cpus_and(tmp, cfg->domain, mask);
938 if (!cpus_empty(tmp))
939 return 0;
0a1ad60d 940 }
550f2299 941
334ef7a7 942 for_each_cpu_mask_nr(cpu, mask) {
70a0a535 943 cpumask_t domain, new_mask;
61014292 944 int new_cpu;
550f2299 945 int vector, offset;
c7111c13
EB
946
947 domain = vector_allocation_domain(cpu);
70a0a535 948 cpus_and(new_mask, domain, cpu_online_map);
c7111c13 949
d1752aa8
EB
950 vector = current_vector;
951 offset = current_offset;
1da177e4 952next:
550f2299 953 vector += 8;
305b92a2 954 if (vector >= first_system_vector) {
550f2299
EB
955 /* If we run out of vectors on large boxen, must share them. */
956 offset = (offset + 1) % 8;
957 vector = FIRST_DEVICE_VECTOR + offset;
958 }
d1752aa8 959 if (unlikely(current_vector == vector))
550f2299
EB
960 continue;
961 if (vector == IA32_SYSCALL_VECTOR)
962 goto next;
334ef7a7 963 for_each_cpu_mask_nr(new_cpu, new_mask)
45edfd1d 964 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
c7111c13 965 goto next;
550f2299 966 /* Found one! */
d1752aa8
EB
967 current_vector = vector;
968 current_offset = offset;
61014292
EB
969 if (old_vector) {
970 cfg->move_in_progress = 1;
971 cfg->old_domain = cfg->domain;
972 }
334ef7a7 973 for_each_cpu_mask_nr(new_cpu, new_mask)
c7111c13 974 per_cpu(vector_irq, new_cpu)[vector] = irq;
13a79503
EB
975 cfg->vector = vector;
976 cfg->domain = domain;
dfbffdd8 977 return 0;
1da177e4 978 }
550f2299 979 return -ENOSPC;
04b9267b
EB
980}
981
dfbffdd8 982static int assign_irq_vector(int irq, cpumask_t mask)
04b9267b 983{
dfbffdd8 984 int err;
04b9267b 985 unsigned long flags;
0a1ad60d 986
04b9267b 987 spin_lock_irqsave(&vector_lock, flags);
dfbffdd8 988 err = __assign_irq_vector(irq, mask);
26a3c49c 989 spin_unlock_irqrestore(&vector_lock, flags);
dfbffdd8 990 return err;
1da177e4
LT
991}
992
5df0287e
YL
993static void __clear_irq_vector(int irq)
994{
13a79503 995 struct irq_cfg *cfg;
5df0287e
YL
996 cpumask_t mask;
997 int cpu, vector;
998
0799e432 999 BUG_ON((unsigned)irq >= nr_irqs);
3ac2de48 1000 cfg = irq_cfg(irq);
13a79503 1001 BUG_ON(!cfg->vector);
5df0287e 1002
13a79503
EB
1003 vector = cfg->vector;
1004 cpus_and(mask, cfg->domain, cpu_online_map);
334ef7a7 1005 for_each_cpu_mask_nr(cpu, mask)
5df0287e
YL
1006 per_cpu(vector_irq, cpu)[vector] = -1;
1007
13a79503 1008 cfg->vector = 0;
d366f8cb 1009 cpus_clear(cfg->domain);
5df0287e
YL
1010}
1011
d388e5fd 1012void __setup_vector_irq(int cpu)
70a0a535
EB
1013{
1014 /* Initialize vector_irq on a new cpu */
1015 /* This function must be called with vector_lock held */
70a0a535
EB
1016 int irq, vector;
1017
70a0a535 1018 /* Mark the inuse vectors */
0799e432 1019 for (irq = 0; irq < nr_irqs; ++irq) {
3ac2de48
YL
1020 struct irq_cfg *cfg = irq_cfg(irq);
1021
1022 if (!cpu_isset(cpu, cfg->domain))
70a0a535 1023 continue;
3ac2de48 1024 vector = cfg->vector;
70a0a535
EB
1025 per_cpu(vector_irq, cpu)[vector] = irq;
1026 }
1027 /* Mark the free vectors */
1028 for (vector = 0; vector < NR_VECTORS; ++vector) {
3ac2de48
YL
1029 struct irq_cfg *cfg;
1030
70a0a535
EB
1031 irq = per_cpu(vector_irq, cpu)[vector];
1032 if (irq < 0)
1033 continue;
3ac2de48
YL
1034
1035 cfg = irq_cfg(irq);
1036 if (!cpu_isset(cpu, cfg->domain))
70a0a535
EB
1037 per_cpu(vector_irq, cpu)[vector] = -1;
1038 }
1039}
1040
f29bd1ba 1041static struct irq_chip ioapic_chip;
89027d35
SS
1042#ifdef CONFIG_INTR_REMAP
1043static struct irq_chip ir_ioapic_chip;
1044#endif
1da177e4 1045
a27bc06d 1046static void ioapic_register_intr(int irq, unsigned long trigger)
1da177e4 1047{
08678b08
YL
1048 struct irq_desc *desc;
1049
1050 desc = irq_to_desc(irq);
89027d35 1051 if (trigger)
08678b08 1052 desc->status |= IRQ_LEVEL;
89027d35 1053 else
08678b08 1054 desc->status &= ~IRQ_LEVEL;
89027d35
SS
1055
1056#ifdef CONFIG_INTR_REMAP
1057 if (irq_remapped(irq)) {
08678b08 1058 desc->status |= IRQ_MOVE_PCNTXT;
89027d35
SS
1059 if (trigger)
1060 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1061 handle_fasteoi_irq,
1062 "fasteoi");
1063 else
1064 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1065 handle_edge_irq, "edge");
1066 return;
1067 }
1068#endif
1069 if (trigger)
1070 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1071 handle_fasteoi_irq,
1072 "fasteoi");
1073 else
a460e745
IM
1074 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1075 handle_edge_irq, "edge");
89027d35
SS
1076}
1077
1078static int setup_ioapic_entry(int apic, int irq,
1079 struct IO_APIC_route_entry *entry,
1080 unsigned int destination, int trigger,
1081 int polarity, int vector)
1082{
1083 /*
1084 * add it to the IO-APIC irq-routing table:
1085 */
1086 memset(entry,0,sizeof(*entry));
1087
1088#ifdef CONFIG_INTR_REMAP
1089 if (intr_remapping_enabled) {
1090 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1091 struct irte irte;
1092 struct IR_IO_APIC_route_entry *ir_entry =
1093 (struct IR_IO_APIC_route_entry *) entry;
1094 int index;
1095
1096 if (!iommu)
1097 panic("No mapping iommu for ioapic %d\n", apic);
1098
1099 index = alloc_irte(iommu, irq, 1);
1100 if (index < 0)
1101 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1102
1103 memset(&irte, 0, sizeof(irte));
1104
1105 irte.present = 1;
1106 irte.dst_mode = INT_DEST_MODE;
1107 irte.trigger_mode = trigger;
1108 irte.dlvry_mode = INT_DELIVERY_MODE;
1109 irte.vector = vector;
1110 irte.dest_id = IRTE_DEST(destination);
1111
1112 modify_irte(irq, &irte);
1113
1114 ir_entry->index2 = (index >> 15) & 0x1;
1115 ir_entry->zero = 0;
1116 ir_entry->format = 1;
1117 ir_entry->index = (index & 0x7fff);
1118 } else
1119#endif
1120 {
1121 entry->delivery_mode = INT_DELIVERY_MODE;
1122 entry->dest_mode = INT_DEST_MODE;
1123 entry->dest = destination;
cc75b92d 1124 }
89027d35
SS
1125
1126 entry->mask = 0; /* enable IRQ */
1127 entry->trigger = trigger;
1128 entry->polarity = polarity;
1129 entry->vector = vector;
1130
1131 /* Mask level triggered irqs.
1132 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1133 */
1134 if (trigger)
1135 entry->mask = 1;
1136 return 0;
1da177e4 1137}
a8c8a367
EB
1138
1139static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1140 int trigger, int polarity)
1da177e4 1141{
3ac2de48 1142 struct irq_cfg *cfg;
1da177e4 1143 struct IO_APIC_route_entry entry;
a8c8a367 1144 cpumask_t mask;
1da177e4 1145
a8c8a367
EB
1146 if (!IO_APIC_IRQ(irq))
1147 return;
1148
3ac2de48
YL
1149 cfg = irq_cfg(irq);
1150
dfbffdd8
EB
1151 mask = TARGET_CPUS;
1152 if (assign_irq_vector(irq, mask))
a8c8a367
EB
1153 return;
1154
dfbffdd8
EB
1155 cpus_and(mask, cfg->domain, mask);
1156
a8c8a367
EB
1157 apic_printk(APIC_VERBOSE,KERN_DEBUG
1158 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1159 "IRQ %d Mode:%i Active:%i)\n",
ec2cd0a2 1160 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
a8c8a367 1161 irq, trigger, polarity);
1da177e4 1162
1da177e4 1163
89027d35
SS
1164 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1165 cpu_mask_to_apicid(mask), trigger, polarity,
1166 cfg->vector)) {
1167 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1168 mp_ioapics[apic].mp_apicid, pin);
1169 __clear_irq_vector(irq);
1170 return;
1171 }
ad892f5e 1172
a8c8a367
EB
1173 ioapic_register_intr(irq, trigger);
1174 if (irq < 16)
1175 disable_8259A_irq(irq);
ad892f5e
YL
1176
1177 ioapic_write_entry(apic, pin, entry);
ad892f5e
YL
1178}
1179
1180static void __init setup_IO_APIC_irqs(void)
1181{
1182 int apic, pin, idx, irq, first_notcon = 1;
1183
1184 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1185
1186 for (apic = 0; apic < nr_ioapics; apic++) {
1187 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1da177e4
LT
1188
1189 idx = find_irq_entry(apic,pin,mp_INT);
1190 if (idx == -1) {
1191 if (first_notcon) {
ec2cd0a2 1192 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1da177e4
LT
1193 first_notcon = 0;
1194 } else
ec2cd0a2 1195 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1da177e4
LT
1196 continue;
1197 }
20d225b9
YL
1198 if (!first_notcon) {
1199 apic_printk(APIC_VERBOSE, " not connected.\n");
1200 first_notcon = 1;
1201 }
1da177e4 1202
1da177e4
LT
1203 irq = pin_2_irq(idx, apic, pin);
1204 add_pin_to_irq(irq, apic, pin);
1205
a8c8a367
EB
1206 setup_IO_APIC_irq(apic, pin, irq,
1207 irq_trigger(idx), irq_polarity(idx));
1da177e4
LT
1208 }
1209 }
1210
1211 if (!first_notcon)
20d225b9 1212 apic_printk(APIC_VERBOSE, " not connected.\n");
1da177e4
LT
1213}
1214
1215/*
f7633ce5 1216 * Set up the timer pin, possibly with the 8259A-master behind.
1da177e4 1217 */
f7633ce5
MR
1218static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1219 int vector)
1da177e4
LT
1220{
1221 struct IO_APIC_route_entry entry;
1da177e4 1222
89027d35
SS
1223 if (intr_remapping_enabled)
1224 return;
1225
a2249cba 1226 memset(&entry, 0, sizeof(entry));
1da177e4 1227
1da177e4
LT
1228 /*
1229 * We use logical delivery to get the timer IRQ
1230 * to the first CPU.
1231 */
1232 entry.dest_mode = INT_DEST_MODE;
03be7505 1233 entry.mask = 1; /* mask IRQ now */
ee4eff6f 1234 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1da177e4
LT
1235 entry.delivery_mode = INT_DELIVERY_MODE;
1236 entry.polarity = 0;
1237 entry.trigger = 0;
1238 entry.vector = vector;
1239
1240 /*
1241 * The timer IRQ doesn't have to know that behind the
f7633ce5 1242 * scene we may have a 8259A-master in AEOI mode ...
1da177e4 1243 */
a460e745 1244 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1da177e4
LT
1245
1246 /*
1247 * Add it to the IO-APIC irq-routing table:
1248 */
a2249cba 1249 ioapic_write_entry(apic, pin, entry);
1da177e4
LT
1250}
1251
32f71aff
MR
1252
1253__apicdebuginit(void) print_IO_APIC(void)
1da177e4
LT
1254{
1255 int apic, i;
1256 union IO_APIC_reg_00 reg_00;
1257 union IO_APIC_reg_01 reg_01;
1258 union IO_APIC_reg_02 reg_02;
1259 unsigned long flags;
1260
1261 if (apic_verbosity == APIC_QUIET)
1262 return;
1263
1264 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1265 for (i = 0; i < nr_ioapics; i++)
1266 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
ec2cd0a2 1267 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1da177e4
LT
1268
1269 /*
1270 * We are a bit conservative about what we expect. We have to
1271 * know about every hardware change ASAP.
1272 */
1273 printk(KERN_INFO "testing the IO APIC.......................\n");
1274
1275 for (apic = 0; apic < nr_ioapics; apic++) {
1276
1277 spin_lock_irqsave(&ioapic_lock, flags);
1278 reg_00.raw = io_apic_read(apic, 0);
1279 reg_01.raw = io_apic_read(apic, 1);
1280 if (reg_01.bits.version >= 0x10)
1281 reg_02.raw = io_apic_read(apic, 2);
1282 spin_unlock_irqrestore(&ioapic_lock, flags);
1283
1284 printk("\n");
ec2cd0a2 1285 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1da177e4
LT
1286 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1287 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1da177e4
LT
1288
1289 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1290 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1da177e4
LT
1291
1292 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1293 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1da177e4
LT
1294
1295 if (reg_01.bits.version >= 0x10) {
1296 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1297 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1da177e4
LT
1298 }
1299
1300 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1301
ee4eff6f
BR
1302 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1303 " Stat Dmod Deli Vect: \n");
1da177e4
LT
1304
1305 for (i = 0; i <= reg_01.bits.entries; i++) {
1306 struct IO_APIC_route_entry entry;
1307
eea0e11c 1308 entry = ioapic_read_entry(apic, i);
1da177e4 1309
ee4eff6f 1310 printk(KERN_DEBUG " %02x %03X ",
1da177e4 1311 i,
ee4eff6f 1312 entry.dest
1da177e4
LT
1313 );
1314
1315 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1316 entry.mask,
1317 entry.trigger,
1318 entry.irr,
1319 entry.polarity,
1320 entry.delivery_status,
1321 entry.dest_mode,
1322 entry.delivery_mode,
1323 entry.vector
1324 );
1325 }
1326 }
1da177e4 1327 printk(KERN_DEBUG "IRQ to pin mappings:\n");
0799e432 1328 for (i = 0; i < nr_irqs; i++) {
1da177e4
LT
1329 struct irq_pin_list *entry = irq_2_pin + i;
1330 if (entry->pin < 0)
1331 continue;
04b9267b 1332 printk(KERN_DEBUG "IRQ%d ", i);
1da177e4
LT
1333 for (;;) {
1334 printk("-> %d:%d", entry->apic, entry->pin);
1335 if (!entry->next)
1336 break;
1337 entry = irq_2_pin + entry->next;
1338 }
1339 printk("\n");
1340 }
1341
1342 printk(KERN_INFO ".................................... done.\n");
1343
1344 return;
1345}
1346
32f71aff 1347__apicdebuginit(void) print_APIC_bitfield(int base)
1da177e4
LT
1348{
1349 unsigned int v;
1350 int i, j;
1351
1352 if (apic_verbosity == APIC_QUIET)
1353 return;
1354
1355 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1356 for (i = 0; i < 8; i++) {
1357 v = apic_read(base + i*0x10);
1358 for (j = 0; j < 32; j++) {
1359 if (v & (1<<j))
1360 printk("1");
1361 else
1362 printk("0");
1363 }
1364 printk("\n");
1365 }
1366}
1367
32f71aff 1368__apicdebuginit(void) print_local_APIC(void *dummy)
1da177e4
LT
1369{
1370 unsigned int v, ver, maxlvt;
1b374e4d 1371 unsigned long icr;
1da177e4
LT
1372
1373 if (apic_verbosity == APIC_QUIET)
1374 return;
1375
1376 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1377 smp_processor_id(), hard_smp_processor_id());
66823114 1378 v = apic_read(APIC_ID);
4c9961d5 1379 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1da177e4
LT
1380 v = apic_read(APIC_LVR);
1381 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1382 ver = GET_APIC_VERSION(v);
37e650c7 1383 maxlvt = lapic_get_maxlvt();
1da177e4
LT
1384
1385 v = apic_read(APIC_TASKPRI);
1386 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1387
5a40b7c2
AK
1388 v = apic_read(APIC_ARBPRI);
1389 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1390 v & APIC_ARBPRI_MASK);
1391 v = apic_read(APIC_PROCPRI);
1392 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1da177e4
LT
1393
1394 v = apic_read(APIC_EOI);
1395 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1396 v = apic_read(APIC_RRR);
1397 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1398 v = apic_read(APIC_LDR);
1399 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1400 v = apic_read(APIC_DFR);
1401 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1402 v = apic_read(APIC_SPIV);
1403 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1404
1405 printk(KERN_DEBUG "... APIC ISR field:\n");
1406 print_APIC_bitfield(APIC_ISR);
1407 printk(KERN_DEBUG "... APIC TMR field:\n");
1408 print_APIC_bitfield(APIC_TMR);
1409 printk(KERN_DEBUG "... APIC IRR field:\n");
1410 print_APIC_bitfield(APIC_IRR);
1411
5a40b7c2
AK
1412 v = apic_read(APIC_ESR);
1413 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1da177e4 1414
1b374e4d 1415 icr = apic_icr_read();
d562353a
IM
1416 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1417 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1da177e4
LT
1418
1419 v = apic_read(APIC_LVTT);
1420 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1421
1422 if (maxlvt > 3) { /* PC is LVT#4. */
1423 v = apic_read(APIC_LVTPC);
1424 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1425 }
1426 v = apic_read(APIC_LVT0);
1427 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1428 v = apic_read(APIC_LVT1);
1429 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1430
1431 if (maxlvt > 2) { /* ERR is LVT#3. */
1432 v = apic_read(APIC_LVTERR);
1433 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1434 }
1435
1436 v = apic_read(APIC_TMICT);
1437 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1438 v = apic_read(APIC_TMCCT);
1439 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1440 v = apic_read(APIC_TDCR);
1441 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1442 printk("\n");
1443}
1444
32f71aff 1445__apicdebuginit(void) print_all_local_APICs(void)
1da177e4 1446{
15c8b6c1 1447 on_each_cpu(print_local_APIC, NULL, 1);
1da177e4
LT
1448}
1449
32f71aff 1450__apicdebuginit(void) print_PIC(void)
1da177e4 1451{
1da177e4
LT
1452 unsigned int v;
1453 unsigned long flags;
1454
1455 if (apic_verbosity == APIC_QUIET)
1456 return;
1457
1458 printk(KERN_DEBUG "\nprinting PIC contents\n");
1459
1460 spin_lock_irqsave(&i8259A_lock, flags);
1461
1462 v = inb(0xa1) << 8 | inb(0x21);
1463 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1464
1465 v = inb(0xa0) << 8 | inb(0x20);
1466 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1467
1468 outb(0x0b,0xa0);
1469 outb(0x0b,0x20);
1470 v = inb(0xa0) << 8 | inb(0x20);
1471 outb(0x0a,0xa0);
1472 outb(0x0a,0x20);
1473
1474 spin_unlock_irqrestore(&i8259A_lock, flags);
1475
1476 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1477
1478 v = inb(0x4d1) << 8 | inb(0x4d0);
1479 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1480}
1481
32f71aff
MR
1482__apicdebuginit(int) print_all_ICs(void)
1483{
1484 print_PIC();
1485 print_all_local_APICs();
1486 print_IO_APIC();
1487
1488 return 0;
1489}
1490
1491fs_initcall(print_all_ICs);
1492
1da177e4 1493
1c69524c 1494void __init enable_IO_APIC(void)
1da177e4
LT
1495{
1496 union IO_APIC_reg_01 reg_01;
1008fddc
EB
1497 int i8259_apic, i8259_pin;
1498 int i, apic;
1da177e4
LT
1499 unsigned long flags;
1500
0799e432 1501 for (i = 0; i < pin_map_size; i++) {
1da177e4
LT
1502 irq_2_pin[i].pin = -1;
1503 irq_2_pin[i].next = 0;
1504 }
1da177e4
LT
1505
1506 /*
1507 * The number of IO-APIC IRQ registers (== #pins):
1508 */
1008fddc 1509 for (apic = 0; apic < nr_ioapics; apic++) {
1da177e4 1510 spin_lock_irqsave(&ioapic_lock, flags);
1008fddc 1511 reg_01.raw = io_apic_read(apic, 1);
1da177e4 1512 spin_unlock_irqrestore(&ioapic_lock, flags);
1008fddc
EB
1513 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1514 }
1515 for(apic = 0; apic < nr_ioapics; apic++) {
1516 int pin;
1517 /* See if any of the pins is in ExtINT mode */
1518 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1519 struct IO_APIC_route_entry entry;
eea0e11c 1520 entry = ioapic_read_entry(apic, pin);
1008fddc
EB
1521
1522 /* If the interrupt line is enabled and in ExtInt mode
1523 * I have found the pin where the i8259 is connected.
1524 */
1525 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1526 ioapic_i8259.apic = apic;
1527 ioapic_i8259.pin = pin;
1528 goto found_i8259;
1529 }
1530 }
1531 }
1532 found_i8259:
1533 /* Look to see what if the MP table has reported the ExtINT */
1534 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1535 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1536 /* Trust the MP table if nothing is setup in the hardware */
1537 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1538 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1539 ioapic_i8259.pin = i8259_pin;
1540 ioapic_i8259.apic = i8259_apic;
1541 }
1542 /* Complain if the MP table and the hardware disagree */
1543 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1544 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1545 {
1546 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1da177e4
LT
1547 }
1548
1549 /*
1550 * Do not trust the IO-APIC being empty at bootup
1551 */
1552 clear_IO_APIC();
1553}
1554
1555/*
1556 * Not an __init, needed by the reboot code
1557 */
1558void disable_IO_APIC(void)
1559{
1560 /*
1561 * Clear the IO-APIC before rebooting:
1562 */
1563 clear_IO_APIC();
1564
208fb931 1565 /*
0b968d23 1566 * If the i8259 is routed through an IOAPIC
208fb931 1567 * Put that IOAPIC in virtual wire mode
0b968d23 1568 * so legacy interrupts can be delivered.
208fb931 1569 */
1008fddc 1570 if (ioapic_i8259.pin != -1) {
208fb931 1571 struct IO_APIC_route_entry entry;
208fb931
EB
1572
1573 memset(&entry, 0, sizeof(entry));
1574 entry.mask = 0; /* Enabled */
1575 entry.trigger = 0; /* Edge */
1576 entry.irr = 0;
1577 entry.polarity = 0; /* High */
1578 entry.delivery_status = 0;
1579 entry.dest_mode = 0; /* Physical */
1008fddc 1580 entry.delivery_mode = dest_ExtINT; /* ExtInt */
208fb931 1581 entry.vector = 0;
4c9961d5 1582 entry.dest = read_apic_id();
208fb931 1583
208fb931
EB
1584 /*
1585 * Add it to the IO-APIC irq-routing table:
1586 */
eea0e11c 1587 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
208fb931
EB
1588 }
1589
1008fddc 1590 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1da177e4
LT
1591}
1592
1da177e4
LT
1593/*
1594 * There is a nasty bug in some older SMP boards, their mptable lies
1595 * about the timer IRQ. We do the following to work around the situation:
1596 *
1597 * - timer IRQ defaults to IO-APIC IRQ
1598 * - if this function detects that timer IRQs are defunct, then we fall
1599 * back to ISA timer IRQs
1600 */
1601static int __init timer_irq_works(void)
1602{
1603 unsigned long t1 = jiffies;
4aae0702 1604 unsigned long flags;
1da177e4 1605
4aae0702 1606 local_save_flags(flags);
1da177e4
LT
1607 local_irq_enable();
1608 /* Let ten ticks pass... */
1609 mdelay((10 * 1000) / HZ);
4aae0702 1610 local_irq_restore(flags);
1da177e4
LT
1611
1612 /*
1613 * Expect a few ticks at least, to be sure some possible
1614 * glue logic does not lock up after one or two first
1615 * ticks in a non-ExtINT mode. Also the local APIC
1616 * might have cached one ExtINT interrupt. Finally, at
1617 * least one tick may be lost due to delays.
1618 */
1619
1620 /* jiffies wrap? */
1d16b53e 1621 if (time_after(jiffies, t1 + 4))
1da177e4
LT
1622 return 1;
1623 return 0;
1624}
1625
1626/*
1627 * In the SMP+IOAPIC case it might happen that there are an unspecified
1628 * number of pending IRQ events unhandled. These cases are very rare,
1629 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1630 * better to do it this way as thus we do not have to be aware of
1631 * 'pending' interrupts in the IRQ path, except at this point.
1632 */
1633/*
1634 * Edge triggered needs to resend any interrupt
1635 * that was delayed but this is now handled in the device
1636 * independent code.
1637 */
1638
1639/*
1640 * Starting up a edge-triggered IO-APIC interrupt is
1641 * nasty - we need to make sure that we get the edge.
1642 * If it is already asserted for some reason, we need
1643 * return 1 to indicate that is was pending.
1644 *
1645 * This is not complete - we should be able to fake
1646 * an edge even if it isn't on the 8259A...
1647 */
1648
f29bd1ba 1649static unsigned int startup_ioapic_irq(unsigned int irq)
1da177e4
LT
1650{
1651 int was_pending = 0;
1652 unsigned long flags;
1653
1654 spin_lock_irqsave(&ioapic_lock, flags);
1655 if (irq < 16) {
1656 disable_8259A_irq(irq);
1657 if (i8259A_irq_pending(irq))
1658 was_pending = 1;
1659 }
1660 __unmask_IO_APIC_irq(irq);
1661 spin_unlock_irqrestore(&ioapic_lock, flags);
1662
1663 return was_pending;
1664}
1665
04b9267b 1666static int ioapic_retrigger_irq(unsigned int irq)
c0ad90a3 1667{
3ac2de48 1668 struct irq_cfg *cfg = irq_cfg(irq);
6bf2dafa 1669 unsigned long flags;
550f2299 1670
6bf2dafa 1671 spin_lock_irqsave(&vector_lock, flags);
cb6d2be6 1672 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
6bf2dafa 1673 spin_unlock_irqrestore(&vector_lock, flags);
c0ad90a3
IM
1674
1675 return 1;
1676}
1677
1da177e4
LT
1678/*
1679 * Level and edge triggered IO-APIC interrupts need different handling,
1680 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1681 * handled with the level-triggered descriptor, but that one has slightly
1682 * more overhead. Level-triggered interrupts cannot be handled with the
1683 * edge-triggered handler, without risking IRQ storms and other ugly
1684 * races.
1685 */
1686
61014292 1687#ifdef CONFIG_SMP
89027d35
SS
1688
1689#ifdef CONFIG_INTR_REMAP
1690static void ir_irq_migration(struct work_struct *work);
1691
1692static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
1693
1694/*
1695 * Migrate the IO-APIC irq in the presence of intr-remapping.
1696 *
1697 * For edge triggered, irq migration is a simple atomic update(of vector
1698 * and cpu destination) of IRTE and flush the hardware cache.
1699 *
1700 * For level triggered, we need to modify the io-apic RTE aswell with the update
1701 * vector information, along with modifying IRTE with vector and destination.
1702 * So irq migration for level triggered is little bit more complex compared to
1703 * edge triggered migration. But the good news is, we use the same algorithm
1704 * for level triggered migration as we have today, only difference being,
1705 * we now initiate the irq migration from process context instead of the
1706 * interrupt context.
1707 *
1708 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1709 * suppression) to the IO-APIC, level triggered irq migration will also be
1710 * as simple as edge triggered migration and we can do the irq migration
1711 * with a simple atomic update to IO-APIC RTE.
1712 */
1713static void migrate_ioapic_irq(int irq, cpumask_t mask)
1714{
3ac2de48 1715 struct irq_cfg *cfg;
08678b08 1716 struct irq_desc *desc;
89027d35
SS
1717 cpumask_t tmp, cleanup_mask;
1718 struct irte irte;
08678b08 1719 int modify_ioapic_rte;
89027d35
SS
1720 unsigned int dest;
1721 unsigned long flags;
1722
1723 cpus_and(tmp, mask, cpu_online_map);
1724 if (cpus_empty(tmp))
1725 return;
1726
1727 if (get_irte(irq, &irte))
1728 return;
1729
1730 if (assign_irq_vector(irq, mask))
1731 return;
1732
3ac2de48 1733 cfg = irq_cfg(irq);
89027d35
SS
1734 cpus_and(tmp, cfg->domain, mask);
1735 dest = cpu_mask_to_apicid(tmp);
1736
08678b08
YL
1737 desc = irq_to_desc(irq);
1738 modify_ioapic_rte = desc->status & IRQ_LEVEL;
89027d35
SS
1739 if (modify_ioapic_rte) {
1740 spin_lock_irqsave(&ioapic_lock, flags);
1741 __target_IO_APIC_irq(irq, dest, cfg->vector);
1742 spin_unlock_irqrestore(&ioapic_lock, flags);
1743 }
1744
1745 irte.vector = cfg->vector;
1746 irte.dest_id = IRTE_DEST(dest);
1747
1748 /*
1749 * Modified the IRTE and flushes the Interrupt entry cache.
1750 */
1751 modify_irte(irq, &irte);
1752
1753 if (cfg->move_in_progress) {
1754 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1755 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1756 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1757 cfg->move_in_progress = 0;
1758 }
1759
08678b08 1760 desc->affinity = mask;
89027d35
SS
1761}
1762
1763static int migrate_irq_remapped_level(int irq)
1764{
1765 int ret = -1;
08678b08 1766 struct irq_desc *desc = irq_to_desc(irq);
89027d35
SS
1767
1768 mask_IO_APIC_irq(irq);
1769
1770 if (io_apic_level_ack_pending(irq)) {
1771 /*
1772 * Interrupt in progress. Migrating irq now will change the
1773 * vector information in the IO-APIC RTE and that will confuse
1774 * the EOI broadcast performed by cpu.
1775 * So, delay the irq migration to the next instance.
1776 */
1777 schedule_delayed_work(&ir_migration_work, 1);
1778 goto unmask;
1779 }
1780
1781 /* everthing is clear. we have right of way */
08678b08 1782 migrate_ioapic_irq(irq, desc->pending_mask);
89027d35
SS
1783
1784 ret = 0;
08678b08
YL
1785 desc->status &= ~IRQ_MOVE_PENDING;
1786 cpus_clear(desc->pending_mask);
89027d35
SS
1787
1788unmask:
1789 unmask_IO_APIC_irq(irq);
1790 return ret;
1791}
1792
1793static void ir_irq_migration(struct work_struct *work)
1794{
1795 int irq;
1796
0799e432 1797 for (irq = 0; irq < nr_irqs; irq++) {
08678b08 1798 struct irq_desc *desc = irq_to_desc(irq);
89027d35
SS
1799 if (desc->status & IRQ_MOVE_PENDING) {
1800 unsigned long flags;
1801
1802 spin_lock_irqsave(&desc->lock, flags);
1803 if (!desc->chip->set_affinity ||
1804 !(desc->status & IRQ_MOVE_PENDING)) {
1805 desc->status &= ~IRQ_MOVE_PENDING;
1806 spin_unlock_irqrestore(&desc->lock, flags);
1807 continue;
1808 }
1809
08678b08 1810 desc->chip->set_affinity(irq, desc->pending_mask);
89027d35
SS
1811 spin_unlock_irqrestore(&desc->lock, flags);
1812 }
1813 }
1814}
1815
1816/*
1817 * Migrates the IRQ destination in the process context.
1818 */
1819static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1820{
08678b08
YL
1821 struct irq_desc *desc = irq_to_desc(irq);
1822
1823 if (desc->status & IRQ_LEVEL) {
1824 desc->status |= IRQ_MOVE_PENDING;
1825 desc->pending_mask = mask;
89027d35
SS
1826 migrate_irq_remapped_level(irq);
1827 return;
1828 }
1829
1830 migrate_ioapic_irq(irq, mask);
1831}
1832#endif
1833
61014292
EB
1834asmlinkage void smp_irq_move_cleanup_interrupt(void)
1835{
1836 unsigned vector, me;
1837 ack_APIC_irq();
1838 exit_idle();
1839 irq_enter();
1840
1841 me = smp_processor_id();
1842 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1843 unsigned int irq;
1844 struct irq_desc *desc;
1845 struct irq_cfg *cfg;
1846 irq = __get_cpu_var(vector_irq)[vector];
0799e432 1847 if (irq >= nr_irqs)
61014292
EB
1848 continue;
1849
08678b08 1850 desc = irq_to_desc(irq);
3ac2de48 1851 cfg = irq_cfg(irq);
61014292
EB
1852 spin_lock(&desc->lock);
1853 if (!cfg->move_cleanup_count)
1854 goto unlock;
1855
1856 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1857 goto unlock;
1858
1859 __get_cpu_var(vector_irq)[vector] = -1;
1860 cfg->move_cleanup_count--;
1861unlock:
1862 spin_unlock(&desc->lock);
1863 }
1864
1865 irq_exit();
1866}
1867
1868static void irq_complete_move(unsigned int irq)
1869{
3ac2de48 1870 struct irq_cfg *cfg = irq_cfg(irq);
61014292
EB
1871 unsigned vector, me;
1872
1873 if (likely(!cfg->move_in_progress))
1874 return;
1875
65ea5b03 1876 vector = ~get_irq_regs()->orig_ax;
61014292 1877 me = smp_processor_id();
f0e13ae7 1878 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
61014292
EB
1879 cpumask_t cleanup_mask;
1880
1881 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1882 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1883 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1884 cfg->move_in_progress = 0;
1885 }
1886}
1887#else
1888static inline void irq_complete_move(unsigned int irq) {}
1889#endif
89027d35
SS
1890#ifdef CONFIG_INTR_REMAP
1891static void ack_x2apic_level(unsigned int irq)
1892{
1893 ack_x2APIC_irq();
1894}
1895
1896static void ack_x2apic_edge(unsigned int irq)
1897{
1898 ack_x2APIC_irq();
1899}
1900#endif
61014292 1901
0be6652f
EB
1902static void ack_apic_edge(unsigned int irq)
1903{
61014292 1904 irq_complete_move(irq);
0be6652f
EB
1905 move_native_irq(irq);
1906 ack_APIC_irq();
1907}
1908
1909static void ack_apic_level(unsigned int irq)
1910{
1911 int do_unmask_irq = 0;
1912
61014292 1913 irq_complete_move(irq);
52e3d90d 1914#ifdef CONFIG_GENERIC_PENDING_IRQ
0be6652f 1915 /* If we are moving the irq we need to mask it */
08678b08 1916 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
0be6652f
EB
1917 do_unmask_irq = 1;
1918 mask_IO_APIC_irq(irq);
1919 }
1920#endif
1921
1922 /*
1923 * We must acknowledge the irq before we move it or the acknowledge will
beb7dd86 1924 * not propagate properly.
0be6652f
EB
1925 */
1926 ack_APIC_irq();
1927
1928 /* Now we can move and renable the irq */
ef3e28c5
EB
1929 if (unlikely(do_unmask_irq)) {
1930 /* Only migrate the irq if the ack has been received.
1931 *
1932 * On rare occasions the broadcast level triggered ack gets
1933 * delayed going to ioapics, and if we reprogram the
1934 * vector while Remote IRR is still set the irq will never
1935 * fire again.
1936 *
1937 * To prevent this scenario we read the Remote IRR bit
1938 * of the ioapic. This has two effects.
1939 * - On any sane system the read of the ioapic will
1940 * flush writes (and acks) going to the ioapic from
1941 * this cpu.
1942 * - We get to see if the ACK has actually been delivered.
1943 *
1944 * Based on failed experiments of reprogramming the
1945 * ioapic entry from outside of irq context starting
1946 * with masking the ioapic entry and then polling until
1947 * Remote IRR was clear before reprogramming the
1948 * ioapic I don't trust the Remote IRR bit to be
1949 * completey accurate.
1950 *
1951 * However there appears to be no other way to plug
1952 * this race, so if the Remote IRR bit is not
1953 * accurate and is causing problems then it is a hardware bug
1954 * and you can go talk to the chipset vendor about it.
1955 */
1956 if (!io_apic_level_ack_pending(irq))
1957 move_masked_irq(irq);
0be6652f 1958 unmask_IO_APIC_irq(irq);
ef3e28c5 1959 }
0be6652f
EB
1960}
1961
f29bd1ba
IM
1962static struct irq_chip ioapic_chip __read_mostly = {
1963 .name = "IO-APIC",
04b9267b
EB
1964 .startup = startup_ioapic_irq,
1965 .mask = mask_IO_APIC_irq,
1966 .unmask = unmask_IO_APIC_irq,
0be6652f
EB
1967 .ack = ack_apic_edge,
1968 .eoi = ack_apic_level,
54d5d424 1969#ifdef CONFIG_SMP
04b9267b 1970 .set_affinity = set_ioapic_affinity_irq,
54d5d424 1971#endif
04b9267b 1972 .retrigger = ioapic_retrigger_irq,
1da177e4
LT
1973};
1974
89027d35
SS
1975#ifdef CONFIG_INTR_REMAP
1976static struct irq_chip ir_ioapic_chip __read_mostly = {
1977 .name = "IR-IO-APIC",
1978 .startup = startup_ioapic_irq,
1979 .mask = mask_IO_APIC_irq,
1980 .unmask = unmask_IO_APIC_irq,
1981 .ack = ack_x2apic_edge,
1982 .eoi = ack_x2apic_level,
1983#ifdef CONFIG_SMP
1984 .set_affinity = set_ir_ioapic_affinity_irq,
1985#endif
1986 .retrigger = ioapic_retrigger_irq,
1987};
1988#endif
1989
1da177e4
LT
1990static inline void init_IO_APIC_traps(void)
1991{
1992 int irq;
08678b08 1993 struct irq_desc *desc;
1da177e4
LT
1994
1995 /*
1996 * NOTE! The local APIC isn't very good at handling
1997 * multiple interrupts at the same interrupt level.
1998 * As the interrupt level is determined by taking the
1999 * vector number and shifting that right by 4, we
2000 * want to spread these out a bit so that they don't
2001 * all fall in the same interrupt level.
2002 *
2003 * Also, we've got to be careful not to trash gate
2004 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2005 */
0799e432 2006 for (irq = 0; irq < nr_irqs ; irq++) {
3ac2de48
YL
2007 struct irq_cfg *cfg;
2008
2009 cfg = irq_cfg(irq);
2010 if (IO_APIC_IRQ(irq) && !cfg->vector) {
1da177e4
LT
2011 /*
2012 * Hmm.. We don't have an entry for this,
2013 * so default to an old-fashioned 8259
2014 * interrupt if we can..
2015 */
2016 if (irq < 16)
2017 make_8259A_irq(irq);
08678b08
YL
2018 else {
2019 desc = irq_to_desc(irq);
1da177e4 2020 /* Strange. Oh, well.. */
08678b08
YL
2021 desc->chip = &no_irq_chip;
2022 }
1da177e4
LT
2023 }
2024 }
2025}
2026
c88ac1df 2027static void unmask_lapic_irq(unsigned int irq)
1da177e4
LT
2028{
2029 unsigned long v;
2030
2031 v = apic_read(APIC_LVT0);
11a8e778 2032 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
1da177e4
LT
2033}
2034
c88ac1df 2035static void mask_lapic_irq(unsigned int irq)
1da177e4
LT
2036{
2037 unsigned long v;
2038
2039 v = apic_read(APIC_LVT0);
11a8e778 2040 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1da177e4
LT
2041}
2042
2043static void ack_lapic_irq (unsigned int irq)
2044{
2045 ack_APIC_irq();
2046}
2047
c88ac1df
MR
2048static struct irq_chip lapic_chip __read_mostly = {
2049 .name = "local-APIC",
2050 .mask = mask_lapic_irq,
2051 .unmask = unmask_lapic_irq,
2052 .ack = ack_lapic_irq,
1da177e4
LT
2053};
2054
c88ac1df
MR
2055static void lapic_register_intr(int irq)
2056{
08678b08
YL
2057 struct irq_desc *desc;
2058
2059 desc = irq_to_desc(irq);
2060 desc->status &= ~IRQ_LEVEL;
c88ac1df
MR
2061 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2062 "edge");
2063}
2064
e9427101 2065static void __init setup_nmi(void)
1da177e4
LT
2066{
2067 /*
2068 * Dirty trick to enable the NMI watchdog ...
2069 * We put the 8259A master into AEOI mode and
2070 * unmask on all local APICs LVT0 as NMI.
2071 *
2072 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2073 * is from Maciej W. Rozycki - so we do not have to EOI from
2074 * the NMI handler or the timer interrupt.
2075 */
2076 printk(KERN_INFO "activating NMI Watchdog ...");
2077
e9427101 2078 enable_NMI_through_LVT0();
1da177e4
LT
2079
2080 printk(" done.\n");
2081}
2082
2083/*
2084 * This looks a bit hackish but it's about the only one way of sending
2085 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2086 * not support the ExtINT mode, unfortunately. We need to send these
2087 * cycles as some i82489DX-based boards have glue logic that keeps the
2088 * 8259A interrupt line asserted until INTA. --macro
2089 */
5afca33a 2090static inline void __init unlock_ExtINT_logic(void)
1da177e4 2091{
1008fddc 2092 int apic, pin, i;
1da177e4
LT
2093 struct IO_APIC_route_entry entry0, entry1;
2094 unsigned char save_control, save_freq_select;
1da177e4 2095
1008fddc
EB
2096 pin = find_isa_irq_pin(8, mp_INT);
2097 apic = find_isa_irq_apic(8, mp_INT);
1da177e4
LT
2098 if (pin == -1)
2099 return;
2100
a2249cba
AM
2101 entry0 = ioapic_read_entry(apic, pin);
2102
1008fddc 2103 clear_IO_APIC_pin(apic, pin);
1da177e4
LT
2104
2105 memset(&entry1, 0, sizeof(entry1));
2106
2107 entry1.dest_mode = 0; /* physical delivery */
2108 entry1.mask = 0; /* unmask IRQ now */
ee4eff6f 2109 entry1.dest = hard_smp_processor_id();
1da177e4
LT
2110 entry1.delivery_mode = dest_ExtINT;
2111 entry1.polarity = entry0.polarity;
2112 entry1.trigger = 0;
2113 entry1.vector = 0;
2114
a2249cba 2115 ioapic_write_entry(apic, pin, entry1);
1da177e4
LT
2116
2117 save_control = CMOS_READ(RTC_CONTROL);
2118 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2119 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2120 RTC_FREQ_SELECT);
2121 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2122
2123 i = 100;
2124 while (i-- > 0) {
2125 mdelay(10);
2126 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2127 i -= 10;
2128 }
2129
2130 CMOS_WRITE(save_control, RTC_CONTROL);
2131 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1008fddc 2132 clear_IO_APIC_pin(apic, pin);
1da177e4 2133
a2249cba 2134 ioapic_write_entry(apic, pin, entry0);
1da177e4
LT
2135}
2136
2137/*
2138 * This code may look a bit paranoid, but it's supposed to cooperate with
2139 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2140 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2141 * fanatically on his truly buggy board.
fea5f1e1
LT
2142 *
2143 * FIXME: really need to revamp this for modern platforms only.
1da177e4 2144 */
e9427101 2145static inline void __init check_timer(void)
1da177e4 2146{
3ac2de48 2147 struct irq_cfg *cfg = irq_cfg(0);
1008fddc 2148 int apic1, pin1, apic2, pin2;
4aae0702 2149 unsigned long flags;
691874fa 2150 int no_pin1 = 0;
4aae0702
IM
2151
2152 local_irq_save(flags);
1da177e4
LT
2153
2154 /*
2155 * get/set the timer IRQ vector:
2156 */
2157 disable_8259A_irq(0);
dfbffdd8 2158 assign_irq_vector(0, TARGET_CPUS);
1da177e4
LT
2159
2160 /*
d11d5794
MR
2161 * As IRQ0 is to be enabled in the 8259A, the virtual
2162 * wire has to be disabled in the local APIC.
1da177e4 2163 */
11a8e778 2164 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1da177e4 2165 init_8259A(1);
1da177e4 2166
1008fddc
EB
2167 pin1 = find_isa_irq_pin(0, mp_INT);
2168 apic1 = find_isa_irq_apic(0, mp_INT);
2169 pin2 = ioapic_i8259.pin;
2170 apic2 = ioapic_i8259.apic;
1da177e4 2171
49a66a0b
MR
2172 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2173 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2174 cfg->vector, apic1, pin1, apic2, pin2);
b0268726 2175
691874fa
MR
2176 /*
2177 * Some BIOS writers are clueless and report the ExtINTA
2178 * I/O APIC input from the cascaded 8259A as the timer
2179 * interrupt input. So just in case, if only one pin
2180 * was found above, try it both directly and through the
2181 * 8259A.
2182 */
2183 if (pin1 == -1) {
89027d35
SS
2184 if (intr_remapping_enabled)
2185 panic("BIOS bug: timer not connected to IO-APIC");
691874fa
MR
2186 pin1 = pin2;
2187 apic1 = apic2;
2188 no_pin1 = 1;
2189 } else if (pin2 == -1) {
2190 pin2 = pin1;
2191 apic2 = apic1;
2192 }
2193
fea5f1e1
LT
2194 if (pin1 != -1) {
2195 /*
2196 * Ok, does IRQ0 through the IOAPIC work?
2197 */
691874fa
MR
2198 if (no_pin1) {
2199 add_pin_to_irq(0, apic1, pin1);
b1b57ee1 2200 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
691874fa 2201 }
fea5f1e1
LT
2202 unmask_IO_APIC_irq(0);
2203 if (!no_timer_check && timer_irq_works()) {
fea5f1e1 2204 if (nmi_watchdog == NMI_IO_APIC) {
fea5f1e1
LT
2205 setup_nmi();
2206 enable_8259A_irq(0);
2207 }
2208 if (disable_timer_pin_1 > 0)
2209 clear_IO_APIC_pin(0, pin1);
4aae0702 2210 goto out;
fea5f1e1 2211 }
89027d35
SS
2212 if (intr_remapping_enabled)
2213 panic("timer doesn't work through Interrupt-remapped IO-APIC");
fea5f1e1 2214 clear_IO_APIC_pin(apic1, pin1);
691874fa 2215 if (!no_pin1)
49a66a0b 2216 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
691874fa 2217 "8254 timer not connected to IO-APIC\n");
1da177e4 2218
49a66a0b
MR
2219 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2220 "(IRQ0) through the 8259A ...\n");
2221 apic_printk(APIC_QUIET, KERN_INFO
2222 "..... (found apic %d pin %d) ...\n", apic2, pin2);
fea5f1e1
LT
2223 /*
2224 * legacy devices should be connected to IO APIC #0
2225 */
0b9f4f49 2226 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
f7633ce5 2227 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
24742ece 2228 unmask_IO_APIC_irq(0);
ecd29476 2229 enable_8259A_irq(0);
fea5f1e1 2230 if (timer_irq_works()) {
49a66a0b 2231 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
35542c5e 2232 timer_through_8259 = 1;
fea5f1e1 2233 if (nmi_watchdog == NMI_IO_APIC) {
60134ebe 2234 disable_8259A_irq(0);
fea5f1e1 2235 setup_nmi();
60134ebe 2236 enable_8259A_irq(0);
fea5f1e1 2237 }
4aae0702 2238 goto out;
fea5f1e1
LT
2239 }
2240 /*
2241 * Cleanup, just in case ...
2242 */
ecd29476 2243 disable_8259A_irq(0);
fea5f1e1 2244 clear_IO_APIC_pin(apic2, pin2);
49a66a0b 2245 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
1da177e4 2246 }
1da177e4 2247
1f992153 2248 if (nmi_watchdog == NMI_IO_APIC) {
49a66a0b
MR
2249 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2250 "through the IO-APIC - disabling NMI Watchdog!\n");
067fa0ff 2251 nmi_watchdog = NMI_NONE;
1da177e4
LT
2252 }
2253
49a66a0b
MR
2254 apic_printk(APIC_QUIET, KERN_INFO
2255 "...trying to set up timer as Virtual Wire IRQ...\n");
1da177e4 2256
c88ac1df 2257 lapic_register_intr(0);
dfbffdd8 2258 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
1da177e4
LT
2259 enable_8259A_irq(0);
2260
2261 if (timer_irq_works()) {
49a66a0b 2262 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2263 goto out;
1da177e4 2264 }
e67465f1 2265 disable_8259A_irq(0);
dfbffdd8 2266 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
49a66a0b 2267 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
1da177e4 2268
49a66a0b
MR
2269 apic_printk(APIC_QUIET, KERN_INFO
2270 "...trying to set up timer as ExtINT IRQ...\n");
1da177e4
LT
2271
2272 init_8259A(0);
2273 make_8259A_irq(0);
11a8e778 2274 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1da177e4
LT
2275
2276 unlock_ExtINT_logic();
2277
2278 if (timer_irq_works()) {
49a66a0b 2279 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2280 goto out;
1da177e4 2281 }
49a66a0b
MR
2282 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2283 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2284 "report. Then try booting with the 'noapic' option.\n");
4aae0702
IM
2285out:
2286 local_irq_restore(flags);
1da177e4
LT
2287}
2288
14d98cad
AK
2289static int __init notimercheck(char *s)
2290{
2291 no_timer_check = 1;
2292 return 1;
2293}
2294__setup("no_timer_check", notimercheck);
2295
1da177e4 2296/*
af174783
MR
2297 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2298 * to devices. However there may be an I/O APIC pin available for
2299 * this interrupt regardless. The pin may be left unconnected, but
2300 * typically it will be reused as an ExtINT cascade interrupt for
2301 * the master 8259A. In the MPS case such a pin will normally be
2302 * reported as an ExtINT interrupt in the MP table. With ACPI
2303 * there is no provision for ExtINT interrupts, and in the absence
2304 * of an override it would be treated as an ordinary ISA I/O APIC
2305 * interrupt, that is edge-triggered and unmasked by default. We
2306 * used to do this, but it caused problems on some systems because
2307 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2308 * the same ExtINT cascade interrupt to drive the local APIC of the
2309 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2310 * the I/O APIC in all cases now. No actual device should request
2311 * it anyway. --macro
1da177e4
LT
2312 */
2313#define PIC_IRQS (1<<2)
2314
2315void __init setup_IO_APIC(void)
2316{
1c69524c
YL
2317
2318 /*
2319 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2320 */
1da177e4 2321
af174783 2322 io_apic_irqs = ~PIC_IRQS;
1da177e4
LT
2323
2324 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2325
1da177e4
LT
2326 sync_Arb_IDs();
2327 setup_IO_APIC_irqs();
2328 init_IO_APIC_traps();
2329 check_timer();
1da177e4
LT
2330}
2331
2332struct sysfs_ioapic_data {
2333 struct sys_device dev;
2334 struct IO_APIC_route_entry entry[0];
2335};
2336static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2337
0b9c33a7 2338static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
2339{
2340 struct IO_APIC_route_entry *entry;
2341 struct sysfs_ioapic_data *data;
1da177e4
LT
2342 int i;
2343
2344 data = container_of(dev, struct sysfs_ioapic_data, dev);
2345 entry = data->entry;
eea0e11c
AK
2346 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2347 *entry = ioapic_read_entry(dev->id, i);
1da177e4
LT
2348
2349 return 0;
2350}
2351
2352static int ioapic_resume(struct sys_device *dev)
2353{
2354 struct IO_APIC_route_entry *entry;
2355 struct sysfs_ioapic_data *data;
2356 unsigned long flags;
2357 union IO_APIC_reg_00 reg_00;
2358 int i;
2359
2360 data = container_of(dev, struct sysfs_ioapic_data, dev);
2361 entry = data->entry;
2362
2363 spin_lock_irqsave(&ioapic_lock, flags);
2364 reg_00.raw = io_apic_read(dev->id, 0);
ec2cd0a2
AS
2365 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2366 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
1da177e4
LT
2367 io_apic_write(dev->id, 0, reg_00.raw);
2368 }
1da177e4 2369 spin_unlock_irqrestore(&ioapic_lock, flags);
eea0e11c
AK
2370 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2371 ioapic_write_entry(dev->id, i, entry[i]);
1da177e4
LT
2372
2373 return 0;
2374}
2375
2376static struct sysdev_class ioapic_sysdev_class = {
af5ca3f4 2377 .name = "ioapic",
1da177e4
LT
2378 .suspend = ioapic_suspend,
2379 .resume = ioapic_resume,
2380};
2381
2382static int __init ioapic_init_sysfs(void)
2383{
2384 struct sys_device * dev;
cddf7ff7 2385 int i, size, error;
1da177e4
LT
2386
2387 error = sysdev_class_register(&ioapic_sysdev_class);
2388 if (error)
2389 return error;
2390
2391 for (i = 0; i < nr_ioapics; i++ ) {
2392 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
2393 * sizeof(struct IO_APIC_route_entry);
cddf7ff7 2394 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
1da177e4
LT
2395 if (!mp_ioapic_data[i]) {
2396 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2397 continue;
2398 }
1da177e4
LT
2399 dev = &mp_ioapic_data[i]->dev;
2400 dev->id = i;
2401 dev->cls = &ioapic_sysdev_class;
2402 error = sysdev_register(dev);
2403 if (error) {
2404 kfree(mp_ioapic_data[i]);
2405 mp_ioapic_data[i] = NULL;
2406 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2407 continue;
2408 }
2409 }
2410
2411 return 0;
2412}
2413
2414device_initcall(ioapic_init_sysfs);
2415
c4fa0bbf 2416/*
04b9267b 2417 * Dynamic irq allocate and deallocation
c4fa0bbf
EB
2418 */
2419int create_irq(void)
2420{
04b9267b
EB
2421 /* Allocate an unused irq */
2422 int irq;
2423 int new;
c4fa0bbf 2424 unsigned long flags;
3ac2de48 2425 struct irq_cfg *cfg_new;
c4fa0bbf 2426
04b9267b
EB
2427 irq = -ENOSPC;
2428 spin_lock_irqsave(&vector_lock, flags);
0799e432 2429 for (new = (nr_irqs - 1); new >= 0; new--) {
04b9267b
EB
2430 if (platform_legacy_irq(new))
2431 continue;
3ac2de48
YL
2432 cfg_new = irq_cfg(new);
2433 if (cfg_new && cfg_new->vector != 0)
04b9267b 2434 continue;
3ac2de48
YL
2435 /* check if need to create one */
2436 if (!cfg_new)
2437 cfg_new = irq_cfg_alloc(new);
dfbffdd8 2438 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
04b9267b
EB
2439 irq = new;
2440 break;
2441 }
2442 spin_unlock_irqrestore(&vector_lock, flags);
c4fa0bbf 2443
04b9267b 2444 if (irq >= 0) {
c4fa0bbf
EB
2445 dynamic_irq_init(irq);
2446 }
2447 return irq;
2448}
2449
2450void destroy_irq(unsigned int irq)
2451{
2452 unsigned long flags;
c4fa0bbf
EB
2453
2454 dynamic_irq_cleanup(irq);
2455
75c46fa6
SS
2456#ifdef CONFIG_INTR_REMAP
2457 free_irte(irq);
2458#endif
c4fa0bbf 2459 spin_lock_irqsave(&vector_lock, flags);
5df0287e 2460 __clear_irq_vector(irq);
c4fa0bbf
EB
2461 spin_unlock_irqrestore(&vector_lock, flags);
2462}
c4fa0bbf 2463
589e367f 2464/*
676b1855 2465 * MSI message composition
589e367f
EB
2466 */
2467#ifdef CONFIG_PCI_MSI
3b7d1921 2468static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
589e367f 2469{
3ac2de48 2470 struct irq_cfg *cfg;
dfbffdd8 2471 int err;
589e367f 2472 unsigned dest;
c7111c13 2473 cpumask_t tmp;
589e367f 2474
dfbffdd8
EB
2475 tmp = TARGET_CPUS;
2476 err = assign_irq_vector(irq, tmp);
75c46fa6
SS
2477 if (err)
2478 return err;
2479
3ac2de48 2480 cfg = irq_cfg(irq);
75c46fa6
SS
2481 cpus_and(tmp, cfg->domain, tmp);
2482 dest = cpu_mask_to_apicid(tmp);
2483
2484#ifdef CONFIG_INTR_REMAP
2485 if (irq_remapped(irq)) {
2486 struct irte irte;
2487 int ir_index;
2488 u16 sub_handle;
2489
2490 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
2491 BUG_ON(ir_index == -1);
2492
2493 memset (&irte, 0, sizeof(irte));
2494
2495 irte.present = 1;
2496 irte.dst_mode = INT_DEST_MODE;
2497 irte.trigger_mode = 0; /* edge */
2498 irte.dlvry_mode = INT_DELIVERY_MODE;
2499 irte.vector = cfg->vector;
2500 irte.dest_id = IRTE_DEST(dest);
2501
2502 modify_irte(irq, &irte);
589e367f 2503
75c46fa6
SS
2504 msg->address_hi = MSI_ADDR_BASE_HI;
2505 msg->data = sub_handle;
2506 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
2507 MSI_ADDR_IR_SHV |
2508 MSI_ADDR_IR_INDEX1(ir_index) |
2509 MSI_ADDR_IR_INDEX2(ir_index);
2510 } else
2511#endif
2512 {
589e367f
EB
2513 msg->address_hi = MSI_ADDR_BASE_HI;
2514 msg->address_lo =
2515 MSI_ADDR_BASE_LO |
2516 ((INT_DEST_MODE == 0) ?
2517 MSI_ADDR_DEST_MODE_PHYSICAL:
2518 MSI_ADDR_DEST_MODE_LOGICAL) |
2519 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2520 MSI_ADDR_REDIRECTION_CPU:
2521 MSI_ADDR_REDIRECTION_LOWPRI) |
2522 MSI_ADDR_DEST_ID(dest);
2523
2524 msg->data =
2525 MSI_DATA_TRIGGER_EDGE |
2526 MSI_DATA_LEVEL_ASSERT |
2527 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2528 MSI_DATA_DELIVERY_FIXED:
2529 MSI_DATA_DELIVERY_LOWPRI) |
dfbffdd8 2530 MSI_DATA_VECTOR(cfg->vector);
589e367f 2531 }
dfbffdd8 2532 return err;
589e367f
EB
2533}
2534
3b7d1921
EB
2535#ifdef CONFIG_SMP
2536static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
589e367f 2537{
3ac2de48 2538 struct irq_cfg *cfg;
3b7d1921
EB
2539 struct msi_msg msg;
2540 unsigned int dest;
2541 cpumask_t tmp;
08678b08 2542 struct irq_desc *desc;
3b7d1921
EB
2543
2544 cpus_and(tmp, mask, cpu_online_map);
2545 if (cpus_empty(tmp))
5ff5115e 2546 return;
589e367f 2547
dfbffdd8 2548 if (assign_irq_vector(irq, mask))
3b7d1921 2549 return;
550f2299 2550
3ac2de48 2551 cfg = irq_cfg(irq);
dfbffdd8 2552 cpus_and(tmp, cfg->domain, mask);
3b7d1921 2553 dest = cpu_mask_to_apicid(tmp);
589e367f 2554
3b7d1921
EB
2555 read_msi_msg(irq, &msg);
2556
2557 msg.data &= ~MSI_DATA_VECTOR_MASK;
dfbffdd8 2558 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3b7d1921
EB
2559 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2560 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2561
2562 write_msi_msg(irq, &msg);
08678b08
YL
2563 desc = irq_to_desc(irq);
2564 desc->affinity = mask;
589e367f 2565}
75c46fa6
SS
2566
2567#ifdef CONFIG_INTR_REMAP
2568/*
2569 * Migrate the MSI irq to another cpumask. This migration is
2570 * done in the process context using interrupt-remapping hardware.
2571 */
2572static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2573{
3ac2de48 2574 struct irq_cfg *cfg;
75c46fa6
SS
2575 unsigned int dest;
2576 cpumask_t tmp, cleanup_mask;
2577 struct irte irte;
08678b08 2578 struct irq_desc *desc;
75c46fa6
SS
2579
2580 cpus_and(tmp, mask, cpu_online_map);
2581 if (cpus_empty(tmp))
2582 return;
2583
2584 if (get_irte(irq, &irte))
2585 return;
2586
2587 if (assign_irq_vector(irq, mask))
2588 return;
2589
3ac2de48 2590 cfg = irq_cfg(irq);
75c46fa6
SS
2591 cpus_and(tmp, cfg->domain, mask);
2592 dest = cpu_mask_to_apicid(tmp);
2593
2594 irte.vector = cfg->vector;
2595 irte.dest_id = IRTE_DEST(dest);
2596
2597 /*
2598 * atomically update the IRTE with the new destination and vector.
2599 */
2600 modify_irte(irq, &irte);
2601
2602 /*
2603 * After this point, all the interrupts will start arriving
2604 * at the new destination. So, time to cleanup the previous
2605 * vector allocation.
2606 */
2607 if (cfg->move_in_progress) {
2608 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2609 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2610 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2611 cfg->move_in_progress = 0;
2612 }
2613
08678b08
YL
2614 desc = irq_to_desc(irq);
2615 desc->affinity = mask;
75c46fa6
SS
2616}
2617#endif
3b7d1921 2618#endif /* CONFIG_SMP */
589e367f 2619
3b7d1921
EB
2620/*
2621 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2622 * which implement the MSI or MSI-X Capability Structure.
2623 */
2624static struct irq_chip msi_chip = {
2625 .name = "PCI-MSI",
2626 .unmask = unmask_msi_irq,
2627 .mask = mask_msi_irq,
2628 .ack = ack_apic_edge,
2629#ifdef CONFIG_SMP
2630 .set_affinity = set_msi_irq_affinity,
2631#endif
2632 .retrigger = ioapic_retrigger_irq,
589e367f
EB
2633};
2634
75c46fa6
SS
2635#ifdef CONFIG_INTR_REMAP
2636static struct irq_chip msi_ir_chip = {
2637 .name = "IR-PCI-MSI",
2638 .unmask = unmask_msi_irq,
2639 .mask = mask_msi_irq,
2640 .ack = ack_x2apic_edge,
2641#ifdef CONFIG_SMP
2642 .set_affinity = ir_set_msi_irq_affinity,
2643#endif
2644 .retrigger = ioapic_retrigger_irq,
2645};
2646
2647/*
2648 * Map the PCI dev to the corresponding remapping hardware unit
2649 * and allocate 'nvec' consecutive interrupt-remapping table entries
2650 * in it.
2651 */
2652static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3b7d1921 2653{
75c46fa6
SS
2654 struct intel_iommu *iommu;
2655 int index;
2656
2657 iommu = map_dev_to_ir(dev);
2658 if (!iommu) {
2659 printk(KERN_ERR
2660 "Unable to map PCI %s to iommu\n", pci_name(dev));
2661 return -ENOENT;
2662 }
2663
2664 index = alloc_irte(iommu, irq, nvec);
2665 if (index < 0) {
2666 printk(KERN_ERR
2667 "Unable to allocate %d IRTE for PCI %s\n", nvec,
2668 pci_name(dev));
2669 return -ENOSPC;
2670 }
2671 return index;
2672}
2673#endif
2674
2675static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2676{
2677 int ret;
3b7d1921 2678 struct msi_msg msg;
75c46fa6
SS
2679
2680 ret = msi_compose_msg(dev, irq, &msg);
2681 if (ret < 0)
2682 return ret;
2683
2684 set_irq_msi(irq, desc);
2685 write_msi_msg(irq, &msg);
2686
2687#ifdef CONFIG_INTR_REMAP
2688 if (irq_remapped(irq)) {
08678b08 2689 struct irq_desc *desc = irq_to_desc(irq);
75c46fa6
SS
2690 /*
2691 * irq migration in process context
2692 */
2693 desc->status |= IRQ_MOVE_PCNTXT;
2694 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
2695 } else
2696#endif
2697 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2698
2699 return 0;
2700}
2701
2702int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2703{
f7feaca7 2704 int irq, ret;
75c46fa6 2705
f7feaca7
EB
2706 irq = create_irq();
2707 if (irq < 0)
2708 return irq;
2709
75c46fa6
SS
2710#ifdef CONFIG_INTR_REMAP
2711 if (!intr_remapping_enabled)
2712 goto no_ir;
2713
2714 ret = msi_alloc_irte(dev, irq, 1);
2715 if (ret < 0)
2716 goto error;
2717no_ir:
2718#endif
2719 ret = setup_msi_irq(dev, desc, irq);
f7feaca7
EB
2720 if (ret < 0) {
2721 destroy_irq(irq);
3b7d1921 2722 return ret;
f7feaca7 2723 }
75c46fa6 2724 return 0;
3b7d1921 2725
75c46fa6
SS
2726#ifdef CONFIG_INTR_REMAP
2727error:
2728 destroy_irq(irq);
2729 return ret;
2730#endif
2731}
3b7d1921 2732
75c46fa6
SS
2733int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
2734{
2735 int irq, ret, sub_handle;
2736 struct msi_desc *desc;
2737#ifdef CONFIG_INTR_REMAP
2738 struct intel_iommu *iommu = 0;
2739 int index = 0;
2740#endif
2741
2742 sub_handle = 0;
2743 list_for_each_entry(desc, &dev->msi_list, list) {
2744 irq = create_irq();
2745 if (irq < 0)
2746 return irq;
2747#ifdef CONFIG_INTR_REMAP
2748 if (!intr_remapping_enabled)
2749 goto no_ir;
3b7d1921 2750
75c46fa6
SS
2751 if (!sub_handle) {
2752 /*
2753 * allocate the consecutive block of IRTE's
2754 * for 'nvec'
2755 */
2756 index = msi_alloc_irte(dev, irq, nvec);
2757 if (index < 0) {
2758 ret = index;
2759 goto error;
2760 }
2761 } else {
2762 iommu = map_dev_to_ir(dev);
2763 if (!iommu) {
2764 ret = -ENOENT;
2765 goto error;
2766 }
2767 /*
2768 * setup the mapping between the irq and the IRTE
2769 * base index, the sub_handle pointing to the
2770 * appropriate interrupt remap table entry.
2771 */
2772 set_irte_irq(irq, iommu, index, sub_handle);
2773 }
2774no_ir:
2775#endif
2776 ret = setup_msi_irq(dev, desc, irq);
2777 if (ret < 0)
2778 goto error;
2779 sub_handle++;
2780 }
7fe3730d 2781 return 0;
75c46fa6
SS
2782
2783error:
2784 destroy_irq(irq);
2785 return ret;
3b7d1921
EB
2786}
2787
2788void arch_teardown_msi_irq(unsigned int irq)
2789{
f7feaca7 2790 destroy_irq(irq);
3b7d1921
EB
2791}
2792
3460a6d9
KA
2793#ifdef CONFIG_DMAR
2794#ifdef CONFIG_SMP
2795static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
2796{
3ac2de48 2797 struct irq_cfg *cfg;
3460a6d9
KA
2798 struct msi_msg msg;
2799 unsigned int dest;
2800 cpumask_t tmp;
08678b08 2801 struct irq_desc *desc;
3460a6d9
KA
2802
2803 cpus_and(tmp, mask, cpu_online_map);
2804 if (cpus_empty(tmp))
2805 return;
2806
2807 if (assign_irq_vector(irq, mask))
2808 return;
2809
3ac2de48 2810 cfg = irq_cfg(irq);
3460a6d9
KA
2811 cpus_and(tmp, cfg->domain, mask);
2812 dest = cpu_mask_to_apicid(tmp);
2813
2814 dmar_msi_read(irq, &msg);
2815
2816 msg.data &= ~MSI_DATA_VECTOR_MASK;
2817 msg.data |= MSI_DATA_VECTOR(cfg->vector);
2818 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2819 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2820
2821 dmar_msi_write(irq, &msg);
08678b08
YL
2822 desc = irq_to_desc(irq);
2823 desc->affinity = mask;
3460a6d9
KA
2824}
2825#endif /* CONFIG_SMP */
2826
2827struct irq_chip dmar_msi_type = {
2828 .name = "DMAR_MSI",
2829 .unmask = dmar_msi_unmask,
2830 .mask = dmar_msi_mask,
2831 .ack = ack_apic_edge,
2832#ifdef CONFIG_SMP
2833 .set_affinity = dmar_msi_set_affinity,
2834#endif
2835 .retrigger = ioapic_retrigger_irq,
2836};
2837
2838int arch_setup_dmar_msi(unsigned int irq)
2839{
2840 int ret;
2841 struct msi_msg msg;
2842
2843 ret = msi_compose_msg(NULL, irq, &msg);
2844 if (ret < 0)
2845 return ret;
2846 dmar_msi_write(irq, &msg);
2847 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
2848 "edge");
2849 return 0;
2850}
2851#endif
589e367f 2852
3460a6d9 2853#endif /* CONFIG_PCI_MSI */
8b955b0d
EB
2854/*
2855 * Hypertransport interrupt support
2856 */
2857#ifdef CONFIG_HT_IRQ
2858
2859#ifdef CONFIG_SMP
2860
2861static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
2862{
ec68307c
EB
2863 struct ht_irq_msg msg;
2864 fetch_ht_irq_msg(irq, &msg);
8b955b0d 2865
ec68307c
EB
2866 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
2867 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
8b955b0d 2868
ec68307c
EB
2869 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
2870 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 2871
ec68307c 2872 write_ht_irq_msg(irq, &msg);
8b955b0d
EB
2873}
2874
2875static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2876{
3ac2de48 2877 struct irq_cfg *cfg;
8b955b0d
EB
2878 unsigned int dest;
2879 cpumask_t tmp;
08678b08 2880 struct irq_desc *desc;
8b955b0d
EB
2881
2882 cpus_and(tmp, mask, cpu_online_map);
2883 if (cpus_empty(tmp))
5ff5115e 2884 return;
8b955b0d 2885
dfbffdd8 2886 if (assign_irq_vector(irq, mask))
8b955b0d
EB
2887 return;
2888
3ac2de48 2889 cfg = irq_cfg(irq);
dfbffdd8 2890 cpus_and(tmp, cfg->domain, mask);
8b955b0d
EB
2891 dest = cpu_mask_to_apicid(tmp);
2892
dfbffdd8 2893 target_ht_irq(irq, dest, cfg->vector);
08678b08
YL
2894 desc = irq_to_desc(irq);
2895 desc->affinity = mask;
8b955b0d
EB
2896}
2897#endif
2898
c37e108d 2899static struct irq_chip ht_irq_chip = {
8b955b0d
EB
2900 .name = "PCI-HT",
2901 .mask = mask_ht_irq,
2902 .unmask = unmask_ht_irq,
2903 .ack = ack_apic_edge,
2904#ifdef CONFIG_SMP
2905 .set_affinity = set_ht_irq_affinity,
2906#endif
2907 .retrigger = ioapic_retrigger_irq,
2908};
2909
2910int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2911{
3ac2de48 2912 struct irq_cfg *cfg;
dfbffdd8 2913 int err;
c7111c13 2914 cpumask_t tmp;
8b955b0d 2915
dfbffdd8
EB
2916 tmp = TARGET_CPUS;
2917 err = assign_irq_vector(irq, tmp);
2918 if (!err) {
ec68307c 2919 struct ht_irq_msg msg;
8b955b0d 2920 unsigned dest;
8b955b0d 2921
3ac2de48 2922 cfg = irq_cfg(irq);
dfbffdd8 2923 cpus_and(tmp, cfg->domain, tmp);
8b955b0d
EB
2924 dest = cpu_mask_to_apicid(tmp);
2925
ec68307c 2926 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 2927
ec68307c
EB
2928 msg.address_lo =
2929 HT_IRQ_LOW_BASE |
8b955b0d 2930 HT_IRQ_LOW_DEST_ID(dest) |
dfbffdd8 2931 HT_IRQ_LOW_VECTOR(cfg->vector) |
8b955b0d
EB
2932 ((INT_DEST_MODE == 0) ?
2933 HT_IRQ_LOW_DM_PHYSICAL :
2934 HT_IRQ_LOW_DM_LOGICAL) |
2935 HT_IRQ_LOW_RQEOI_EDGE |
2936 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2937 HT_IRQ_LOW_MT_FIXED :
ec68307c
EB
2938 HT_IRQ_LOW_MT_ARBITRATED) |
2939 HT_IRQ_LOW_IRQ_MASKED;
8b955b0d 2940
ec68307c 2941 write_ht_irq_msg(irq, &msg);
8b955b0d 2942
a460e745
IM
2943 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2944 handle_edge_irq, "edge");
8b955b0d 2945 }
dfbffdd8 2946 return err;
8b955b0d
EB
2947}
2948#endif /* CONFIG_HT_IRQ */
2949
1da177e4
LT
2950/* --------------------------------------------------------------------------
2951 ACPI-based IOAPIC Configuration
2952 -------------------------------------------------------------------------- */
2953
888ba6c6 2954#ifdef CONFIG_ACPI
1da177e4
LT
2955
2956#define IO_APIC_MAX_ID 0xFE
2957
1da177e4
LT
2958int __init io_apic_get_redir_entries (int ioapic)
2959{
2960 union IO_APIC_reg_01 reg_01;
2961 unsigned long flags;
2962
2963 spin_lock_irqsave(&ioapic_lock, flags);
2964 reg_01.raw = io_apic_read(ioapic, 1);
2965 spin_unlock_irqrestore(&ioapic_lock, flags);
2966
2967 return reg_01.bits.entries;
2968}
2969
2970
50eca3eb 2971int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
1da177e4 2972{
1da177e4
LT
2973 if (!IO_APIC_IRQ(irq)) {
2974 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2975 ioapic);
2976 return -EINVAL;
2977 }
2978
550f2299
EB
2979 /*
2980 * IRQs < 16 are already in the irq_2_pin[] map
2981 */
2982 if (irq >= 16)
2983 add_pin_to_irq(irq, ioapic, pin);
2984
a8c8a367 2985 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
1da177e4
LT
2986
2987 return 0;
2988}
2989
1da177e4 2990
61fd47e0
SL
2991int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2992{
2993 int i;
2994
2995 if (skip_ioapic_setup)
2996 return -1;
2997
2998 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
2999 if (mp_irqs[i].mp_irqtype == mp_INT &&
3000 mp_irqs[i].mp_srcbusirq == bus_irq)
61fd47e0
SL
3001 break;
3002 if (i >= mp_irq_entries)
3003 return -1;
3004
3005 *trigger = irq_trigger(i);
3006 *polarity = irq_polarity(i);
3007 return 0;
3008}
3009
3010#endif /* CONFIG_ACPI */
1da177e4
LT
3011
3012/*
3013 * This function currently is only a helper for the i386 smp boot process where
3014 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3015 * so mask in all cases should simply be TARGET_CPUS
3016 */
54d5d424 3017#ifdef CONFIG_SMP
1da177e4
LT
3018void __init setup_ioapic_dest(void)
3019{
3020 int pin, ioapic, irq, irq_entry;
3ac2de48 3021 struct irq_cfg *cfg;
1da177e4
LT
3022
3023 if (skip_ioapic_setup == 1)
3024 return;
3025
3026 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3027 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3028 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3029 if (irq_entry == -1)
3030 continue;
3031 irq = pin_2_irq(irq_entry, ioapic, pin);
ad892f5e
YL
3032
3033 /* setup_IO_APIC_irqs could fail to get vector for some device
3034 * when you have too many devices, because at that time only boot
3035 * cpu is online.
3036 */
3ac2de48
YL
3037 cfg = irq_cfg(irq);
3038 if (!cfg->vector)
a8c8a367
EB
3039 setup_IO_APIC_irq(ioapic, pin, irq,
3040 irq_trigger(irq_entry),
3041 irq_polarity(irq_entry));
89027d35
SS
3042#ifdef CONFIG_INTR_REMAP
3043 else if (intr_remapping_enabled)
3044 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3045#endif
ad892f5e
YL
3046 else
3047 set_ioapic_affinity_irq(irq, TARGET_CPUS);
1da177e4
LT
3048 }
3049
3050 }
3051}
54d5d424 3052#endif
61fd47e0 3053
3e35a0e5
TG
3054#define IOAPIC_RESOURCE_NAME_SIZE 11
3055
3056static struct resource *ioapic_resources;
3057
3058static struct resource * __init ioapic_setup_resources(void)
3059{
3060 unsigned long n;
3061 struct resource *res;
3062 char *mem;
3063 int i;
3064
3065 if (nr_ioapics <= 0)
3066 return NULL;
3067
3068 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3069 n *= nr_ioapics;
3070
3071 mem = alloc_bootmem(n);
3072 res = (void *)mem;
3073
3074 if (mem != NULL) {
3e35a0e5
TG
3075 mem += sizeof(struct resource) * nr_ioapics;
3076
3077 for (i = 0; i < nr_ioapics; i++) {
3078 res[i].name = mem;
3079 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3080 sprintf(mem, "IOAPIC %u", i);
3081 mem += IOAPIC_RESOURCE_NAME_SIZE;
3082 }
3083 }
3084
3085 ioapic_resources = res;
3086
3087 return res;
3088}
3089
3090void __init ioapic_init_mappings(void)
3091{
3092 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3093 struct resource *ioapic_res;
3094 int i;
3095
3096 ioapic_res = ioapic_setup_resources();
3097 for (i = 0; i < nr_ioapics; i++) {
3098 if (smp_found_config) {
ec2cd0a2 3099 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3e35a0e5
TG
3100 } else {
3101 ioapic_phys = (unsigned long)
3102 alloc_bootmem_pages(PAGE_SIZE);
3103 ioapic_phys = __pa(ioapic_phys);
3104 }
3105 set_fixmap_nocache(idx, ioapic_phys);
3106 apic_printk(APIC_VERBOSE,
3107 "mapped IOAPIC to %016lx (%016lx)\n",
3108 __fix_to_virt(idx), ioapic_phys);
3109 idx++;
3110
3111 if (ioapic_res != NULL) {
3112 ioapic_res->start = ioapic_phys;
3113 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3114 ioapic_res++;
3115 }
3116 }
3117}
3118
3119static int __init ioapic_insert_resources(void)
3120{
3121 int i;
3122 struct resource *r = ioapic_resources;
3123
3124 if (!r) {
3125 printk(KERN_ERR
3126 "IO APIC resources could be not be allocated.\n");
3127 return -1;
3128 }
3129
3130 for (i = 0; i < nr_ioapics; i++) {
3131 insert_resource(&iomem_resource, r);
3132 r++;
3133 }
3134
3135 return 0;
3136}
3137
3138/* Insert the IO APIC resources after PCI initialization has occured to handle
3139 * IO APICS that are mapped in on a BAR in PCI space. */
3140late_initcall(ioapic_insert_resources);
3141
This page took 1.314391 seconds and 5 git commands to generate.