x86: order variables in io_apic_xx.c
[deliverable/linux.git] / arch / x86 / kernel / io_apic_32.c
CommitLineData
1da177e4
LT
1/*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23#include <linux/mm.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
f3294a33 28#include <linux/bootmem.h>
1da177e4
LT
29#include <linux/mc146818rtc.h>
30#include <linux/compiler.h>
31#include <linux/acpi.h>
129f6946 32#include <linux/module.h>
1da177e4 33#include <linux/sysdev.h>
2d3fcc1c 34#include <linux/pci.h>
3b7d1921 35#include <linux/msi.h>
95d77884 36#include <linux/htirq.h>
7dfb7103 37#include <linux/freezer.h>
f26d6a2b 38#include <linux/kthread.h>
1d16b53e 39#include <linux/jiffies.h> /* time_after() */
54d5d424 40
1da177e4
LT
41#include <asm/io.h>
42#include <asm/smp.h>
43#include <asm/desc.h>
44#include <asm/timer.h>
306e440d 45#include <asm/i8259.h>
3e4ff115 46#include <asm/nmi.h>
2d3fcc1c 47#include <asm/msidef.h>
8b955b0d 48#include <asm/hypertransport.h>
a4dbc34d 49#include <asm/setup.h>
1da177e4 50
497c9a19 51#include <mach_ipi.h>
1da177e4 52#include <mach_apic.h>
874c4fe3 53#include <mach_apicdef.h>
1da177e4 54
32f71aff
MR
55#define __apicdebuginit(type) static type __init
56
1da177e4
LT
57/*
58 * Is the SiS APIC rmw bug present ?
59 * -1 = don't know, 0 = no, 1 = yes
60 */
61int sis_apic_bug = -1;
62
efa2559f
YL
63static DEFINE_SPINLOCK(ioapic_lock);
64static DEFINE_SPINLOCK(vector_lock);
65
301e6190 66int first_free_entry;
efa2559f
YL
67/*
68 * Rough estimation of how many shared IRQs there are, can
69 * be changed anytime.
70 */
71int pin_map_size;
72
1da177e4
LT
73/*
74 * # of IRQ routing registers
75 */
76int nr_ioapic_registers[MAX_IO_APICS];
77
9f640ccb 78/* I/O APIC entries */
ec2cd0a2 79struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
9f640ccb
AS
80int nr_ioapics;
81
584f734d 82/* MP IRQ source entries */
2fddb6e2 83struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
584f734d
AS
84
85/* # of MP IRQ source entries */
86int mp_irq_entries;
87
8732fc4b
AS
88#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
89int mp_bus_id_to_type[MAX_MP_BUSSES];
90#endif
91
92DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
93
efa2559f
YL
94int skip_ioapic_setup;
95
96static int __init parse_noapic(char *arg)
97{
98 /* disable IO-APIC */
99 disable_ioapic_setup();
100 return 0;
101}
102early_param("noapic", parse_noapic);
66759a01 103
da51a821 104struct irq_cfg;
0f978f45 105struct irq_pin_list;
a1420f39 106struct irq_cfg {
da51a821
YL
107 unsigned int irq;
108 struct irq_cfg *next;
0f978f45 109 struct irq_pin_list *irq_2_pin;
497c9a19
YL
110 cpumask_t domain;
111 cpumask_t old_domain;
112 unsigned move_cleanup_count;
a1420f39 113 u8 vector;
497c9a19 114 u8 move_in_progress : 1;
a1420f39
YL
115};
116
117
118/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
119static struct irq_cfg irq_cfg_legacy[] __initdata = {
497c9a19
YL
120 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
121 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
122 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
123 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
124 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
125 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
126 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
127 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
128 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
129 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
130 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
131 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
132 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
133 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
134 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
135 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
a1420f39
YL
136};
137
da51a821
YL
138static struct irq_cfg irq_cfg_init = { .irq = -1U, };
139/* need to be biger than size of irq_cfg_legacy */
140static int nr_irq_cfg = 32;
141
142static int __init parse_nr_irq_cfg(char *arg)
143{
144 if (arg) {
145 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
146 if (nr_irq_cfg < 32)
147 nr_irq_cfg = 32;
148 }
149 return 0;
150}
151
152early_param("nr_irq_cfg", parse_nr_irq_cfg);
153
154static void init_one_irq_cfg(struct irq_cfg *cfg)
155{
156 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
157}
158
159static struct irq_cfg *irq_cfgx;
160static struct irq_cfg *irq_cfgx_free;
a1420f39
YL
161static void __init init_work(void *data)
162{
da51a821
YL
163 struct dyn_array *da = data;
164 struct irq_cfg *cfg;
165 int legacy_count;
166 int i;
167
168 cfg = *da->name;
a1420f39 169
da51a821 170 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
a1420f39 171
da51a821
YL
172 legacy_count = sizeof(irq_cfg_legacy)/sizeof(irq_cfg_legacy[0]);
173 for (i = legacy_count; i < *da->nr; i++)
174 init_one_irq_cfg(&cfg[i]);
a1420f39 175
da51a821
YL
176 for (i = 1; i < *da->nr; i++)
177 cfg[i-1].next = &cfg[i];
a1420f39 178
da51a821
YL
179 irq_cfgx_free = &irq_cfgx[legacy_count];
180 irq_cfgx[legacy_count - 1].next = NULL;
a1420f39
YL
181}
182
da51a821
YL
183#define for_each_irq_cfg(cfg) \
184 for (cfg = irq_cfgx; cfg; cfg = cfg->next)
185
186DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
a1420f39
YL
187
188static struct irq_cfg *irq_cfg(unsigned int irq)
189{
da51a821
YL
190 struct irq_cfg *cfg;
191
192 cfg = irq_cfgx;
193 while (cfg) {
194 if (cfg->irq == irq)
195 return cfg;
196
197 cfg = cfg->next;
198 }
199
200 return NULL;
201}
202
203static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
204{
205 struct irq_cfg *cfg, *cfg_pri;
206 int i;
207 int count = 0;
208
209 cfg_pri = cfg = irq_cfgx;
210 while (cfg) {
211 if (cfg->irq == irq)
212 return cfg;
213
214 cfg_pri = cfg;
215 cfg = cfg->next;
216 count++;
217 }
218
219 if (!irq_cfgx_free) {
220 unsigned long phys;
221 unsigned long total_bytes;
222 /*
223 * we run out of pre-allocate ones, allocate more
224 */
225 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
226
227 total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
228 if (after_bootmem)
229 cfg = kzalloc(total_bytes, GFP_ATOMIC);
230 else
231 cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
a1420f39 232
da51a821
YL
233 if (!cfg)
234 panic("please boot with nr_irq_cfg= %d\n", count * 2);
235
236 phys = __pa(cfg);
237 printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
238
239 for (i = 0; i < nr_irq_cfg; i++)
240 init_one_irq_cfg(&cfg[i]);
241
242 for (i = 1; i < nr_irq_cfg; i++)
243 cfg[i-1].next = &cfg[i];
244
245 irq_cfgx_free = cfg;
246 }
247
248 cfg = irq_cfgx_free;
249 irq_cfgx_free = irq_cfgx_free->next;
250 cfg->next = NULL;
251 if (cfg_pri)
252 cfg_pri->next = cfg;
253 else
254 irq_cfgx = cfg;
255 cfg->irq = irq;
256 printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
257
258#ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
259 {
260 /* dump the results */
261 struct irq_cfg *cfg;
262 unsigned long phys;
263 unsigned long bytes = sizeof(struct irq_cfg);
264
265 printk(KERN_DEBUG "=========================== %d\n", irq);
266 printk(KERN_DEBUG "irq_cfg dump after get that for %d\n", irq);
267 for_each_irq_cfg(cfg) {
268 phys = __pa(cfg);
269 printk(KERN_DEBUG "irq_cfg %d ==> [%#lx - %#lx]\n", cfg->irq, phys, phys + bytes);
270 }
271 printk(KERN_DEBUG "===========================\n");
272 }
273#endif
274 return cfg;
a1420f39
YL
275}
276
1da177e4
LT
277/*
278 * This is performance-critical, we want to do it O(1)
279 *
280 * the indexing order of this array favors 1:1 mappings
281 * between pins and IRQs.
282 */
283
0f978f45
YL
284struct irq_pin_list {
285 int apic, pin;
286 struct irq_pin_list *next;
287};
288
289static struct irq_pin_list *irq_2_pin_head;
290/* fill one page ? */
291static int nr_irq_2_pin = 0x100;
292static struct irq_pin_list *irq_2_pin_ptr;
293static void __init irq_2_pin_init_work(void *data)
294{
295 struct dyn_array *da = data;
296 struct irq_pin_list *pin;
297 int i;
298
299 pin = *da->name;
300
301 for (i = 1; i < *da->nr; i++)
302 pin[i-1].next = &pin[i];
303
304 irq_2_pin_ptr = &pin[0];
305}
306DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
307
308static struct irq_pin_list *get_one_free_irq_2_pin(void)
309{
310 struct irq_pin_list *pin;
311 int i;
312
313 pin = irq_2_pin_ptr;
314
315 if (pin) {
316 irq_2_pin_ptr = pin->next;
317 pin->next = NULL;
318 return pin;
319 }
320
321 /*
322 * we run out of pre-allocate ones, allocate more
323 */
324 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
325
326 if (after_bootmem)
327 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
328 GFP_ATOMIC);
329 else
330 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
331 nr_irq_2_pin, PAGE_SIZE, 0);
332
333 if (!pin)
334 panic("can not get more irq_2_pin\n");
301e6190 335
0f978f45
YL
336 for (i = 1; i < nr_irq_2_pin; i++)
337 pin[i-1].next = &pin[i];
338
339 irq_2_pin_ptr = pin->next;
340 pin->next = NULL;
341
342 return pin;
343}
1da177e4 344
130fe05d
LT
345struct io_apic {
346 unsigned int index;
347 unsigned int unused[3];
348 unsigned int data;
349};
350
351static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
352{
353 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
ec2cd0a2 354 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
130fe05d
LT
355}
356
357static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
358{
359 struct io_apic __iomem *io_apic = io_apic_base(apic);
360 writel(reg, &io_apic->index);
361 return readl(&io_apic->data);
362}
363
364static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
365{
366 struct io_apic __iomem *io_apic = io_apic_base(apic);
367 writel(reg, &io_apic->index);
368 writel(value, &io_apic->data);
369}
370
371/*
372 * Re-write a value: to be used for read-modify-write
373 * cycles where the read already set up the index register.
374 *
375 * Older SiS APIC requires we rewrite the index register
376 */
377static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
378{
cb468984 379 volatile struct io_apic __iomem *io_apic = io_apic_base(apic);
130fe05d
LT
380 if (sis_apic_bug)
381 writel(reg, &io_apic->index);
382 writel(value, &io_apic->data);
383}
384
cf4c6a2f
AK
385union entry_union {
386 struct { u32 w1, w2; };
387 struct IO_APIC_route_entry entry;
388};
389
390static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
391{
392 union entry_union eu;
393 unsigned long flags;
394 spin_lock_irqsave(&ioapic_lock, flags);
395 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
396 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
397 spin_unlock_irqrestore(&ioapic_lock, flags);
398 return eu.entry;
399}
400
f9dadfa7
LT
401/*
402 * When we write a new IO APIC routing entry, we need to write the high
403 * word first! If the mask bit in the low word is clear, we will enable
404 * the interrupt, and we need to make sure the entry is fully populated
405 * before that happens.
406 */
d15512f4
AK
407static void
408__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
cf4c6a2f 409{
cf4c6a2f
AK
410 union entry_union eu;
411 eu.entry = e;
f9dadfa7
LT
412 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
413 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
d15512f4
AK
414}
415
416static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
417{
418 unsigned long flags;
419 spin_lock_irqsave(&ioapic_lock, flags);
420 __ioapic_write_entry(apic, pin, e);
f9dadfa7
LT
421 spin_unlock_irqrestore(&ioapic_lock, flags);
422}
423
424/*
425 * When we mask an IO APIC routing entry, we need to write the low
426 * word first, in order to set the mask bit before we change the
427 * high bits!
428 */
429static void ioapic_mask_entry(int apic, int pin)
430{
431 unsigned long flags;
432 union entry_union eu = { .entry.mask = 1 };
433
cf4c6a2f
AK
434 spin_lock_irqsave(&ioapic_lock, flags);
435 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
436 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
437 spin_unlock_irqrestore(&ioapic_lock, flags);
438}
439
497c9a19
YL
440#ifdef CONFIG_SMP
441static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
442{
443 int apic, pin;
444 struct irq_cfg *cfg;
445 struct irq_pin_list *entry;
446
447 cfg = irq_cfg(irq);
448 entry = cfg->irq_2_pin;
449 for (;;) {
450 unsigned int reg;
451
452 if (!entry)
453 break;
454
455 apic = entry->apic;
456 pin = entry->pin;
457 io_apic_write(apic, 0x11 + pin*2, dest);
458 reg = io_apic_read(apic, 0x10 + pin*2);
459 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
460 reg |= vector;
461 io_apic_modify(apic, 0x10 + pin *2, reg);
462 if (!entry->next)
463 break;
464 entry = entry->next;
465 }
466}
efa2559f
YL
467
468static int assign_irq_vector(int irq, cpumask_t mask);
469
497c9a19
YL
470static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
471{
472 struct irq_cfg *cfg;
473 unsigned long flags;
474 unsigned int dest;
475 cpumask_t tmp;
476
477 cfg = irq_cfg(irq);
478
479 cpus_and(tmp, mask, cpu_online_map);
480 if (cpus_empty(tmp))
481 return;
482
483 if (assign_irq_vector(irq, mask))
484 return;
485
486 cpus_and(tmp, cfg->domain, mask);
487
488 dest = cpu_mask_to_apicid(tmp);
489 /*
490 * Only the high 8 bits are valid.
491 */
492 dest = SET_APIC_LOGICAL_ID(dest);
493
494 spin_lock_irqsave(&ioapic_lock, flags);
495 __target_IO_APIC_irq(irq, dest, cfg->vector);
496 irq_to_desc(irq)->affinity = mask;
497 spin_unlock_irqrestore(&ioapic_lock, flags);
498}
499
500#endif /* CONFIG_SMP */
501
1da177e4
LT
502/*
503 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
504 * shared ISA-space IRQs, so we have to support them. We are super
505 * fast in the common case, and fast for shared ISA-space IRQs.
506 */
507static void add_pin_to_irq(unsigned int irq, int apic, int pin)
508{
0f978f45
YL
509 struct irq_cfg *cfg;
510 struct irq_pin_list *entry;
511
512 /* first time to refer irq_cfg, so with new */
513 cfg = irq_cfg_alloc(irq);
514 entry = cfg->irq_2_pin;
515 if (!entry) {
516 entry = get_one_free_irq_2_pin();
517 cfg->irq_2_pin = entry;
518 entry->apic = apic;
519 entry->pin = pin;
520 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
521 return;
522 }
1da177e4 523
0f978f45
YL
524 while (entry->next) {
525 /* not again, please */
526 if (entry->apic == apic && entry->pin == pin)
527 return;
1da177e4 528
0f978f45 529 entry = entry->next;
1da177e4 530 }
0f978f45
YL
531
532 entry->next = get_one_free_irq_2_pin();
533 entry = entry->next;
1da177e4
LT
534 entry->apic = apic;
535 entry->pin = pin;
0f978f45 536 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
1da177e4
LT
537}
538
539/*
540 * Reroute an IRQ to a different pin.
541 */
542static void __init replace_pin_at_irq(unsigned int irq,
543 int oldapic, int oldpin,
544 int newapic, int newpin)
545{
0f978f45
YL
546 struct irq_cfg *cfg = irq_cfg(irq);
547 struct irq_pin_list *entry = cfg->irq_2_pin;
548 int replaced = 0;
1da177e4 549
0f978f45 550 while (entry) {
1da177e4
LT
551 if (entry->apic == oldapic && entry->pin == oldpin) {
552 entry->apic = newapic;
553 entry->pin = newpin;
0f978f45
YL
554 replaced = 1;
555 /* every one is different, right? */
1da177e4 556 break;
0f978f45
YL
557 }
558 entry = entry->next;
1da177e4 559 }
0f978f45
YL
560
561 /* why? call replace before add? */
562 if (!replaced)
563 add_pin_to_irq(irq, newapic, newpin);
1da177e4
LT
564}
565
36062448 566static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable)
1da177e4 567{
0f978f45
YL
568 struct irq_cfg *cfg;
569 struct irq_pin_list *entry;
1da177e4
LT
570 unsigned int pin, reg;
571
0f978f45
YL
572 cfg = irq_cfg(irq);
573 entry = cfg->irq_2_pin;
1da177e4 574 for (;;) {
0f978f45 575 if (!entry)
1da177e4 576 break;
0f978f45 577 pin = entry->pin;
1da177e4
LT
578 reg = io_apic_read(entry->apic, 0x10 + pin*2);
579 reg &= ~disable;
580 reg |= enable;
581 io_apic_modify(entry->apic, 0x10 + pin*2, reg);
582 if (!entry->next)
583 break;
0f978f45 584 entry = entry->next;
1da177e4
LT
585 }
586}
587
588/* mask = 1 */
36062448 589static void __mask_IO_APIC_irq(unsigned int irq)
1da177e4 590{
46b3b4ef 591 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0);
1da177e4
LT
592}
593
594/* mask = 0 */
36062448 595static void __unmask_IO_APIC_irq(unsigned int irq)
1da177e4 596{
46b3b4ef 597 __modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED);
1da177e4
LT
598}
599
600/* mask = 1, trigger = 0 */
36062448 601static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
1da177e4 602{
46b3b4ef
CG
603 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED,
604 IO_APIC_REDIR_LEVEL_TRIGGER);
1da177e4
LT
605}
606
607/* mask = 0, trigger = 1 */
36062448 608static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
1da177e4 609{
46b3b4ef
CG
610 __modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER,
611 IO_APIC_REDIR_MASKED);
1da177e4
LT
612}
613
36062448 614static void mask_IO_APIC_irq(unsigned int irq)
1da177e4
LT
615{
616 unsigned long flags;
617
618 spin_lock_irqsave(&ioapic_lock, flags);
619 __mask_IO_APIC_irq(irq);
620 spin_unlock_irqrestore(&ioapic_lock, flags);
621}
622
36062448 623static void unmask_IO_APIC_irq(unsigned int irq)
1da177e4
LT
624{
625 unsigned long flags;
626
627 spin_lock_irqsave(&ioapic_lock, flags);
628 __unmask_IO_APIC_irq(irq);
629 spin_unlock_irqrestore(&ioapic_lock, flags);
630}
631
632static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
633{
634 struct IO_APIC_route_entry entry;
36062448 635
1da177e4 636 /* Check delivery_mode to be sure we're not clearing an SMI pin */
cf4c6a2f 637 entry = ioapic_read_entry(apic, pin);
1da177e4
LT
638 if (entry.delivery_mode == dest_SMI)
639 return;
640
641 /*
642 * Disable it in the IO-APIC irq-routing table:
643 */
f9dadfa7 644 ioapic_mask_entry(apic, pin);
1da177e4
LT
645}
646
36062448 647static void clear_IO_APIC(void)
1da177e4
LT
648{
649 int apic, pin;
650
651 for (apic = 0; apic < nr_ioapics; apic++)
652 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
653 clear_IO_APIC_pin(apic, pin);
654}
655
1da177e4 656#ifndef CONFIG_SMP
75604d7f 657void send_IPI_self(int vector)
1da177e4
LT
658{
659 unsigned int cfg;
660
661 /*
662 * Wait for idle.
663 */
664 apic_wait_icr_idle();
665 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
666 /*
667 * Send the IPI. The write to APIC_ICR fires this off.
668 */
593f4a78 669 apic_write(APIC_ICR, cfg);
1da177e4
LT
670}
671#endif /* !CONFIG_SMP */
672
673
674/*
675 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
676 * specific CPU-side IRQs.
677 */
678
679#define MAX_PIRQS 8
680static int pirq_entries [MAX_PIRQS];
681static int pirqs_enabled;
1da177e4 682
1da177e4
LT
683static int __init ioapic_pirq_setup(char *str)
684{
685 int i, max;
686 int ints[MAX_PIRQS+1];
687
688 get_options(str, ARRAY_SIZE(ints), ints);
689
690 for (i = 0; i < MAX_PIRQS; i++)
691 pirq_entries[i] = -1;
692
693 pirqs_enabled = 1;
694 apic_printk(APIC_VERBOSE, KERN_INFO
695 "PIRQ redirection, working around broken MP-BIOS.\n");
696 max = MAX_PIRQS;
697 if (ints[0] < MAX_PIRQS)
698 max = ints[0];
699
700 for (i = 0; i < max; i++) {
701 apic_printk(APIC_VERBOSE, KERN_DEBUG
702 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
703 /*
704 * PIRQs are mapped upside down, usually.
705 */
706 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
707 }
708 return 1;
709}
710
711__setup("pirq=", ioapic_pirq_setup);
712
713/*
714 * Find the IRQ entry number of a certain pin.
715 */
716static int find_irq_entry(int apic, int pin, int type)
717{
718 int i;
719
720 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
721 if (mp_irqs[i].mp_irqtype == type &&
722 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
723 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
724 mp_irqs[i].mp_dstirq == pin)
1da177e4
LT
725 return i;
726
727 return -1;
728}
729
730/*
731 * Find the pin to which IRQ[irq] (ISA) is connected
732 */
fcfd636a 733static int __init find_isa_irq_pin(int irq, int type)
1da177e4
LT
734{
735 int i;
736
737 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 738 int lbus = mp_irqs[i].mp_srcbus;
1da177e4 739
d27e2b8e 740 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
741 (mp_irqs[i].mp_irqtype == type) &&
742 (mp_irqs[i].mp_srcbusirq == irq))
1da177e4 743
2fddb6e2 744 return mp_irqs[i].mp_dstirq;
1da177e4
LT
745 }
746 return -1;
747}
748
fcfd636a
EB
749static int __init find_isa_irq_apic(int irq, int type)
750{
751 int i;
752
753 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 754 int lbus = mp_irqs[i].mp_srcbus;
fcfd636a 755
73b2961b 756 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
757 (mp_irqs[i].mp_irqtype == type) &&
758 (mp_irqs[i].mp_srcbusirq == irq))
fcfd636a
EB
759 break;
760 }
761 if (i < mp_irq_entries) {
762 int apic;
36062448 763 for (apic = 0; apic < nr_ioapics; apic++) {
2fddb6e2 764 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
fcfd636a
EB
765 return apic;
766 }
767 }
768
769 return -1;
770}
771
1da177e4
LT
772/*
773 * Find a specific PCI IRQ entry.
774 * Not an __init, possibly needed by modules
775 */
776static int pin_2_irq(int idx, int apic, int pin);
777
778int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
779{
780 int apic, i, best_guess = -1;
781
782 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
783 "slot:%d, pin:%d.\n", bus, slot, pin);
ce6444d3 784 if (test_bit(bus, mp_bus_not_pci)) {
1da177e4
LT
785 printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
786 return -1;
787 }
788 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 789 int lbus = mp_irqs[i].mp_srcbus;
1da177e4
LT
790
791 for (apic = 0; apic < nr_ioapics; apic++)
2fddb6e2
AS
792 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
793 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
1da177e4
LT
794 break;
795
47cab822 796 if (!test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2 797 !mp_irqs[i].mp_irqtype &&
1da177e4 798 (bus == lbus) &&
2fddb6e2 799 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
3de352bb 800 int irq = pin_2_irq(i, apic, mp_irqs[i].mp_dstirq);
1da177e4
LT
801
802 if (!(apic || IO_APIC_IRQ(irq)))
803 continue;
804
2fddb6e2 805 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
1da177e4
LT
806 return irq;
807 /*
808 * Use the first all-but-pin matching entry as a
809 * best-guess fuzzy result for broken mptables.
810 */
811 if (best_guess < 0)
812 best_guess = irq;
813 }
814 }
815 return best_guess;
816}
129f6946 817EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1da177e4 818
c0a282c2 819#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1da177e4
LT
820/*
821 * EISA Edge/Level control register, ELCR
822 */
823static int EISA_ELCR(unsigned int irq)
824{
825 if (irq < 16) {
826 unsigned int port = 0x4d0 + (irq >> 3);
827 return (inb(port) >> (irq & 7)) & 1;
828 }
829 apic_printk(APIC_VERBOSE, KERN_INFO
830 "Broken MPtable reports ISA irq %d\n", irq);
831 return 0;
832}
c0a282c2 833#endif
1da177e4 834
6728801d
AS
835/* ISA interrupts are always polarity zero edge triggered,
836 * when listed as conforming in the MP table. */
837
838#define default_ISA_trigger(idx) (0)
839#define default_ISA_polarity(idx) (0)
840
1da177e4
LT
841/* EISA interrupts are always polarity zero and can be edge or level
842 * trigger depending on the ELCR value. If an interrupt is listed as
843 * EISA conforming in the MP table, that means its trigger type must
844 * be read in from the ELCR */
845
2fddb6e2 846#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
6728801d 847#define default_EISA_polarity(idx) default_ISA_polarity(idx)
1da177e4
LT
848
849/* PCI interrupts are always polarity one level triggered,
850 * when listed as conforming in the MP table. */
851
852#define default_PCI_trigger(idx) (1)
853#define default_PCI_polarity(idx) (1)
854
855/* MCA interrupts are always polarity zero level triggered,
856 * when listed as conforming in the MP table. */
857
858#define default_MCA_trigger(idx) (1)
6728801d 859#define default_MCA_polarity(idx) default_ISA_polarity(idx)
1da177e4 860
61fd47e0 861static int MPBIOS_polarity(int idx)
1da177e4 862{
2fddb6e2 863 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
864 int polarity;
865
866 /*
867 * Determine IRQ line polarity (high active or low active):
868 */
3de352bb 869 switch (mp_irqs[idx].mp_irqflag & 3) {
36062448 870 case 0: /* conforms, ie. bus-type dependent polarity */
1da177e4 871 {
36062448
PC
872 polarity = test_bit(bus, mp_bus_not_pci)?
873 default_ISA_polarity(idx):
874 default_PCI_polarity(idx);
875 break;
876 }
877 case 1: /* high active */
878 {
879 polarity = 0;
880 break;
881 }
882 case 2: /* reserved */
883 {
884 printk(KERN_WARNING "broken BIOS!!\n");
885 polarity = 1;
886 break;
887 }
888 case 3: /* low active */
889 {
890 polarity = 1;
891 break;
892 }
893 default: /* invalid */
894 {
895 printk(KERN_WARNING "broken BIOS!!\n");
896 polarity = 1;
897 break;
898 }
1da177e4
LT
899 }
900 return polarity;
901}
902
903static int MPBIOS_trigger(int idx)
904{
2fddb6e2 905 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
906 int trigger;
907
908 /*
909 * Determine IRQ trigger mode (edge or level sensitive):
910 */
3de352bb 911 switch ((mp_irqs[idx].mp_irqflag>>2) & 3) {
36062448 912 case 0: /* conforms, ie. bus-type dependent */
1da177e4 913 {
36062448
PC
914 trigger = test_bit(bus, mp_bus_not_pci)?
915 default_ISA_trigger(idx):
916 default_PCI_trigger(idx);
c0a282c2 917#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
36062448
PC
918 switch (mp_bus_id_to_type[bus]) {
919 case MP_BUS_ISA: /* ISA pin */
920 {
921 /* set before the switch */
1da177e4
LT
922 break;
923 }
36062448 924 case MP_BUS_EISA: /* EISA pin */
1da177e4 925 {
36062448 926 trigger = default_EISA_trigger(idx);
1da177e4
LT
927 break;
928 }
36062448 929 case MP_BUS_PCI: /* PCI pin */
1da177e4 930 {
36062448 931 /* set before the switch */
1da177e4
LT
932 break;
933 }
36062448 934 case MP_BUS_MCA: /* MCA pin */
1da177e4 935 {
36062448 936 trigger = default_MCA_trigger(idx);
1da177e4
LT
937 break;
938 }
36062448 939 default:
1da177e4
LT
940 {
941 printk(KERN_WARNING "broken BIOS!!\n");
36062448 942 trigger = 1;
1da177e4
LT
943 break;
944 }
945 }
36062448
PC
946#endif
947 break;
948 }
949 case 1: /* edge */
950 {
951 trigger = 0;
952 break;
953 }
954 case 2: /* reserved */
955 {
956 printk(KERN_WARNING "broken BIOS!!\n");
957 trigger = 1;
958 break;
959 }
960 case 3: /* level */
961 {
962 trigger = 1;
963 break;
964 }
965 default: /* invalid */
966 {
967 printk(KERN_WARNING "broken BIOS!!\n");
968 trigger = 0;
969 break;
970 }
971 }
1da177e4
LT
972 return trigger;
973}
974
975static inline int irq_polarity(int idx)
976{
977 return MPBIOS_polarity(idx);
978}
979
980static inline int irq_trigger(int idx)
981{
982 return MPBIOS_trigger(idx);
983}
984
efa2559f 985int (*ioapic_renumber_irq)(int ioapic, int irq);
1da177e4
LT
986static int pin_2_irq(int idx, int apic, int pin)
987{
988 int irq, i;
2fddb6e2 989 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
990
991 /*
992 * Debugging check, we are in big trouble if this message pops up!
993 */
2fddb6e2 994 if (mp_irqs[idx].mp_dstirq != pin)
1da177e4
LT
995 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
996
643befed 997 if (test_bit(bus, mp_bus_not_pci))
2fddb6e2 998 irq = mp_irqs[idx].mp_srcbusirq;
643befed
AS
999 else {
1000 /*
1001 * PCI IRQs are mapped in order
1002 */
1003 i = irq = 0;
1004 while (i < apic)
1005 irq += nr_ioapic_registers[i++];
1006 irq += pin;
1da177e4 1007
643befed
AS
1008 /*
1009 * For MPS mode, so far only needed by ES7000 platform
1010 */
1011 if (ioapic_renumber_irq)
1012 irq = ioapic_renumber_irq(apic, irq);
1da177e4
LT
1013 }
1014
1015 /*
1016 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1017 */
1018 if ((pin >= 16) && (pin <= 23)) {
1019 if (pirq_entries[pin-16] != -1) {
1020 if (!pirq_entries[pin-16]) {
1021 apic_printk(APIC_VERBOSE, KERN_DEBUG
1022 "disabling PIRQ%d\n", pin-16);
1023 } else {
1024 irq = pirq_entries[pin-16];
1025 apic_printk(APIC_VERBOSE, KERN_DEBUG
1026 "using PIRQ%d -> IRQ %d\n",
1027 pin-16, irq);
1028 }
1029 }
1030 }
1031 return irq;
1032}
1033
497c9a19
YL
1034void lock_vector_lock(void)
1035{
1036 /* Used to the online set of cpus does not change
1037 * during assign_irq_vector.
1038 */
1039 spin_lock(&vector_lock);
1040}
1da177e4 1041
497c9a19 1042void unlock_vector_lock(void)
1da177e4 1043{
497c9a19
YL
1044 spin_unlock(&vector_lock);
1045}
1da177e4 1046
497c9a19
YL
1047static int __assign_irq_vector(int irq, cpumask_t mask)
1048{
1049 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1050 unsigned int old_vector;
1051 int cpu;
1052 struct irq_cfg *cfg;
ace80ab7 1053
497c9a19 1054 cfg = irq_cfg(irq);
8339f000 1055
497c9a19
YL
1056 /* Only try and allocate irqs on cpus that are present */
1057 cpus_and(mask, mask, cpu_online_map);
ace80ab7 1058
497c9a19
YL
1059 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1060 return -EBUSY;
0a1ad60d 1061
497c9a19
YL
1062 old_vector = cfg->vector;
1063 if (old_vector) {
1064 cpumask_t tmp;
1065 cpus_and(tmp, cfg->domain, mask);
1066 if (!cpus_empty(tmp))
1067 return 0;
1068 }
1069
1070 for_each_cpu_mask_nr(cpu, mask) {
1071 cpumask_t domain, new_mask;
1072 int new_cpu;
1073 int vector, offset;
1074
1075 domain = vector_allocation_domain(cpu);
1076 cpus_and(new_mask, domain, cpu_online_map);
1077
1078 vector = current_vector;
1079 offset = current_offset;
1080next:
1081 vector += 8;
1082 if (vector >= first_system_vector) {
1083 /* If we run out of vectors on large boxen, must share them. */
1084 offset = (offset + 1) % 8;
1085 vector = FIRST_DEVICE_VECTOR + offset;
1086 }
1087 if (unlikely(current_vector == vector))
1088 continue;
1089 if (vector == SYSCALL_VECTOR)
1090 goto next;
1091
1092 for_each_cpu_mask_nr(new_cpu, new_mask)
1093 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1094 goto next;
1095 /* Found one! */
1096 current_vector = vector;
1097 current_offset = offset;
1098 if (old_vector) {
1099 cfg->move_in_progress = 1;
1100 cfg->old_domain = cfg->domain;
1101 }
7a959cff
YL
1102 printk(KERN_DEBUG "assign_irq_vector: irq %d vector %#x cpu ", irq, vector);
1103 for_each_cpu_mask_nr(new_cpu, new_mask) {
1104 per_cpu(vector_irq, new_cpu)[vector] = irq;
1105 printk(KERN_CONT " %d ", new_cpu);
1106 }
1107 printk(KERN_CONT "\n");
497c9a19
YL
1108 cfg->vector = vector;
1109 cfg->domain = domain;
1110 return 0;
1111 }
1112 return -ENOSPC;
1113}
1114
1115static int assign_irq_vector(int irq, cpumask_t mask)
1116{
1117 int err;
ace80ab7 1118 unsigned long flags;
ace80ab7
EB
1119
1120 spin_lock_irqsave(&vector_lock, flags);
497c9a19 1121 err = __assign_irq_vector(irq, mask);
26a3c49c 1122 spin_unlock_irqrestore(&vector_lock, flags);
1da177e4 1123
497c9a19
YL
1124 return err;
1125}
1126
1127static void __clear_irq_vector(int irq)
1128{
1129 struct irq_cfg *cfg;
1130 cpumask_t mask;
1131 int cpu, vector;
1132
1133 cfg = irq_cfg(irq);
1134 BUG_ON(!cfg->vector);
1135
1136 vector = cfg->vector;
1137 cpus_and(mask, cfg->domain, cpu_online_map);
1138 for_each_cpu_mask_nr(cpu, mask)
1139 per_cpu(vector_irq, cpu)[vector] = -1;
1140
1141 cfg->vector = 0;
1142 cpus_clear(cfg->domain);
1143}
1144
1145void __setup_vector_irq(int cpu)
1146{
1147 /* Initialize vector_irq on a new cpu */
1148 /* This function must be called with vector_lock held */
1149 int irq, vector;
1150 struct irq_cfg *cfg;
1151
1152 /* Mark the inuse vectors */
1153 for_each_irq_cfg(cfg) {
1154 if (!cpu_isset(cpu, cfg->domain))
1155 continue;
1156 vector = cfg->vector;
1157 irq = cfg->irq;
1158 per_cpu(vector_irq, cpu)[vector] = irq;
1159 }
1160 /* Mark the free vectors */
1161 for (vector = 0; vector < NR_VECTORS; ++vector) {
1162 irq = per_cpu(vector_irq, cpu)[vector];
1163 if (irq < 0)
1164 continue;
1165
1166 cfg = irq_cfg(irq);
1167 if (!cpu_isset(cpu, cfg->domain))
1168 per_cpu(vector_irq, cpu)[vector] = -1;
1169 }
1da177e4 1170}
3fde6900 1171
f5b9ed7a 1172static struct irq_chip ioapic_chip;
1da177e4
LT
1173
1174#define IOAPIC_AUTO -1
1175#define IOAPIC_EDGE 0
1176#define IOAPIC_LEVEL 1
1177
1d025192
YL
1178static inline int IO_APIC_irq_trigger(int irq)
1179{
1180 int apic, idx, pin;
1181
1182 for (apic = 0; apic < nr_ioapics; apic++) {
1183 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1184 idx = find_irq_entry(apic, pin, mp_INT);
1185 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1186 return irq_trigger(idx);
1187 }
1188 }
1189 /*
1190 * nonexistent IRQs are edge default
1191 */
1192 return 0;
1193}
1194
497c9a19 1195static void ioapic_register_intr(int irq, unsigned long trigger)
1da177e4 1196{
08678b08
YL
1197 struct irq_desc *desc;
1198
199751d7
YL
1199 /* first time to use this irq_desc */
1200 if (irq < 16)
1201 desc = irq_to_desc(irq);
1202 else
1203 desc = irq_to_desc_alloc(irq);
1204
6ebcc00e 1205 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
cc75b92d 1206 trigger == IOAPIC_LEVEL) {
08678b08 1207 desc->status |= IRQ_LEVEL;
a460e745
IM
1208 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1209 handle_fasteoi_irq, "fasteoi");
cc75b92d 1210 } else {
08678b08 1211 desc->status &= ~IRQ_LEVEL;
a460e745
IM
1212 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1213 handle_edge_irq, "edge");
cc75b92d 1214 }
1da177e4
LT
1215}
1216
497c9a19
YL
1217static int setup_ioapic_entry(int apic, int irq,
1218 struct IO_APIC_route_entry *entry,
1219 unsigned int destination, int trigger,
1220 int polarity, int vector)
1da177e4 1221{
497c9a19
YL
1222 /*
1223 * add it to the IO-APIC irq-routing table:
1224 */
1225 memset(entry,0,sizeof(*entry));
1226
1227 entry->delivery_mode = INT_DELIVERY_MODE;
1228 entry->dest_mode = INT_DEST_MODE;
d83e94ac 1229 entry->dest = destination;
497c9a19
YL
1230
1231 entry->mask = 0; /* enable IRQ */
1232 entry->trigger = trigger;
1233 entry->polarity = polarity;
1234 entry->vector = vector;
1235
1236 /* Mask level triggered irqs.
1237 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1238 */
1239 if (trigger)
1240 entry->mask = 1;
1241
1242 return 0;
1243}
1244
1245static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1246 int trigger, int polarity)
1247{
1248 struct irq_cfg *cfg;
1da177e4 1249 struct IO_APIC_route_entry entry;
497c9a19
YL
1250 cpumask_t mask;
1251
1252 if (!IO_APIC_IRQ(irq))
1253 return;
1254
1255 cfg = irq_cfg(irq);
1256
1257 mask = TARGET_CPUS;
1258 if (assign_irq_vector(irq, mask))
1259 return;
1260
1261 cpus_and(mask, cfg->domain, mask);
1262
1263 apic_printk(APIC_VERBOSE,KERN_DEBUG
1264 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1265 "IRQ %d Mode:%i Active:%i)\n",
1266 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1267 irq, trigger, polarity);
1268
1269
1270 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1271 cpu_mask_to_apicid(mask), trigger, polarity,
1272 cfg->vector)) {
1273 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1274 mp_ioapics[apic].mp_apicid, pin);
1275 __clear_irq_vector(irq);
1276 return;
1277 }
1278
1279 ioapic_register_intr(irq, trigger);
1280 if (irq < 16)
1281 disable_8259A_irq(irq);
1282
1283 ioapic_write_entry(apic, pin, entry);
1284}
1285
1286static void __init setup_IO_APIC_irqs(void)
1287{
1288 int apic, pin, idx, irq, first_notcon = 1;
1da177e4
LT
1289
1290 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1291
1292 for (apic = 0; apic < nr_ioapics; apic++) {
1293 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1294
497c9a19 1295 idx = find_irq_entry(apic,pin,mp_INT);
1da177e4
LT
1296 if (idx == -1) {
1297 if (first_notcon) {
497c9a19 1298 apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
1da177e4
LT
1299 first_notcon = 0;
1300 } else
497c9a19 1301 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
1da177e4
LT
1302 continue;
1303 }
20d225b9
YL
1304 if (!first_notcon) {
1305 apic_printk(APIC_VERBOSE, " not connected.\n");
1306 first_notcon = 1;
1307 }
1308
1da177e4 1309 irq = pin_2_irq(idx, apic, pin);
1da177e4 1310
497c9a19
YL
1311 if (multi_timer_check(apic, irq))
1312 continue;
1da177e4 1313
497c9a19 1314 add_pin_to_irq(irq, apic, pin);
36062448 1315
497c9a19
YL
1316 setup_IO_APIC_irq(apic, pin, irq,
1317 irq_trigger(idx), irq_polarity(idx));
1da177e4
LT
1318 }
1319 }
1320
1321 if (!first_notcon)
1322 apic_printk(APIC_VERBOSE, " not connected.\n");
1323}
1324
1325/*
f7633ce5 1326 * Set up the timer pin, possibly with the 8259A-master behind.
1da177e4 1327 */
f7633ce5
MR
1328static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1329 int vector)
1da177e4
LT
1330{
1331 struct IO_APIC_route_entry entry;
1da177e4 1332
36062448 1333 memset(&entry, 0, sizeof(entry));
1da177e4
LT
1334
1335 /*
1336 * We use logical delivery to get the timer IRQ
1337 * to the first CPU.
1338 */
1339 entry.dest_mode = INT_DEST_MODE;
03be7505 1340 entry.mask = 1; /* mask IRQ now */
d83e94ac 1341 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1da177e4
LT
1342 entry.delivery_mode = INT_DELIVERY_MODE;
1343 entry.polarity = 0;
1344 entry.trigger = 0;
1345 entry.vector = vector;
1346
1347 /*
1348 * The timer IRQ doesn't have to know that behind the
f7633ce5 1349 * scene we may have a 8259A-master in AEOI mode ...
1da177e4 1350 */
497c9a19 1351 ioapic_register_intr(0, IOAPIC_EDGE);
1da177e4
LT
1352
1353 /*
1354 * Add it to the IO-APIC irq-routing table:
1355 */
cf4c6a2f 1356 ioapic_write_entry(apic, pin, entry);
1da177e4
LT
1357}
1358
32f71aff
MR
1359
1360__apicdebuginit(void) print_IO_APIC(void)
1da177e4
LT
1361{
1362 int apic, i;
1363 union IO_APIC_reg_00 reg_00;
1364 union IO_APIC_reg_01 reg_01;
1365 union IO_APIC_reg_02 reg_02;
1366 union IO_APIC_reg_03 reg_03;
1367 unsigned long flags;
0f978f45 1368 struct irq_cfg *cfg;
1da177e4
LT
1369
1370 if (apic_verbosity == APIC_QUIET)
1371 return;
1372
36062448 1373 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1da177e4
LT
1374 for (i = 0; i < nr_ioapics; i++)
1375 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
ec2cd0a2 1376 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1da177e4
LT
1377
1378 /*
1379 * We are a bit conservative about what we expect. We have to
1380 * know about every hardware change ASAP.
1381 */
1382 printk(KERN_INFO "testing the IO APIC.......................\n");
1383
1384 for (apic = 0; apic < nr_ioapics; apic++) {
1385
1386 spin_lock_irqsave(&ioapic_lock, flags);
1387 reg_00.raw = io_apic_read(apic, 0);
1388 reg_01.raw = io_apic_read(apic, 1);
1389 if (reg_01.bits.version >= 0x10)
1390 reg_02.raw = io_apic_read(apic, 2);
1391 if (reg_01.bits.version >= 0x20)
1392 reg_03.raw = io_apic_read(apic, 3);
1393 spin_unlock_irqrestore(&ioapic_lock, flags);
1394
ec2cd0a2 1395 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1da177e4
LT
1396 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1397 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1398 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1399 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1da177e4
LT
1400
1401 printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
1402 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1da177e4
LT
1403
1404 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1405 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1da177e4
LT
1406
1407 /*
1408 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1409 * but the value of reg_02 is read as the previous read register
1410 * value, so ignore it if reg_02 == reg_01.
1411 */
1412 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1413 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1414 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1da177e4
LT
1415 }
1416
1417 /*
1418 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1419 * or reg_03, but the value of reg_0[23] is read as the previous read
1420 * register value, so ignore it if reg_03 == reg_0[12].
1421 */
1422 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1423 reg_03.raw != reg_01.raw) {
1424 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1425 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1da177e4
LT
1426 }
1427
1428 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1429
d83e94ac
YL
1430 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1431 " Stat Dmod Deli Vect: \n");
1da177e4
LT
1432
1433 for (i = 0; i <= reg_01.bits.entries; i++) {
1434 struct IO_APIC_route_entry entry;
1435
cf4c6a2f 1436 entry = ioapic_read_entry(apic, i);
1da177e4 1437
d83e94ac 1438 printk(KERN_DEBUG " %02x %02X ", i, entry.dest);
1da177e4
LT
1439
1440 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1441 entry.mask,
1442 entry.trigger,
1443 entry.irr,
1444 entry.polarity,
1445 entry.delivery_status,
1446 entry.dest_mode,
1447 entry.delivery_mode,
1448 entry.vector
1449 );
1450 }
1451 }
1da177e4 1452 printk(KERN_DEBUG "IRQ to pin mappings:\n");
0f978f45
YL
1453 for_each_irq_cfg(cfg) {
1454 struct irq_pin_list *entry = cfg->irq_2_pin;
1455 if (!entry)
1da177e4 1456 continue;
ace80ab7 1457 printk(KERN_DEBUG "IRQ%d ", i);
1da177e4
LT
1458 for (;;) {
1459 printk("-> %d:%d", entry->apic, entry->pin);
1460 if (!entry->next)
1461 break;
0f978f45 1462 entry = entry->next;
1da177e4
LT
1463 }
1464 printk("\n");
1465 }
1466
1467 printk(KERN_INFO ".................................... done.\n");
1468
1469 return;
1470}
1471
32f71aff 1472__apicdebuginit(void) print_APIC_bitfield(int base)
1da177e4
LT
1473{
1474 unsigned int v;
1475 int i, j;
1476
1477 if (apic_verbosity == APIC_QUIET)
1478 return;
1479
1480 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1481 for (i = 0; i < 8; i++) {
1482 v = apic_read(base + i*0x10);
1483 for (j = 0; j < 32; j++) {
1484 if (v & (1<<j))
1485 printk("1");
1486 else
1487 printk("0");
1488 }
1489 printk("\n");
1490 }
1491}
1492
32f71aff 1493__apicdebuginit(void) print_local_APIC(void *dummy)
1da177e4
LT
1494{
1495 unsigned int v, ver, maxlvt;
7ab6af7a 1496 u64 icr;
1da177e4
LT
1497
1498 if (apic_verbosity == APIC_QUIET)
1499 return;
1500
1501 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1502 smp_processor_id(), hard_smp_processor_id());
66823114 1503 v = apic_read(APIC_ID);
05f2d12c 1504 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v,
4c9961d5 1505 GET_APIC_ID(v));
1da177e4
LT
1506 v = apic_read(APIC_LVR);
1507 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1508 ver = GET_APIC_VERSION(v);
e05d723f 1509 maxlvt = lapic_get_maxlvt();
1da177e4
LT
1510
1511 v = apic_read(APIC_TASKPRI);
1512 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1513
1514 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1515 v = apic_read(APIC_ARBPRI);
1516 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1517 v & APIC_ARBPRI_MASK);
1518 v = apic_read(APIC_PROCPRI);
1519 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1520 }
1521
1522 v = apic_read(APIC_EOI);
1523 printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
1524 v = apic_read(APIC_RRR);
1525 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1526 v = apic_read(APIC_LDR);
1527 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1528 v = apic_read(APIC_DFR);
1529 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1530 v = apic_read(APIC_SPIV);
1531 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1532
1533 printk(KERN_DEBUG "... APIC ISR field:\n");
1534 print_APIC_bitfield(APIC_ISR);
1535 printk(KERN_DEBUG "... APIC TMR field:\n");
1536 print_APIC_bitfield(APIC_TMR);
1537 printk(KERN_DEBUG "... APIC IRR field:\n");
1538 print_APIC_bitfield(APIC_IRR);
1539
1540 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1541 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1542 apic_write(APIC_ESR, 0);
1543 v = apic_read(APIC_ESR);
1544 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1545 }
1546
7ab6af7a
HS
1547 icr = apic_icr_read();
1548 printk(KERN_DEBUG "... APIC ICR: %08x\n", icr);
1549 printk(KERN_DEBUG "... APIC ICR2: %08x\n", icr >> 32);
1da177e4
LT
1550
1551 v = apic_read(APIC_LVTT);
1552 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1553
1554 if (maxlvt > 3) { /* PC is LVT#4. */
1555 v = apic_read(APIC_LVTPC);
1556 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1557 }
1558 v = apic_read(APIC_LVT0);
1559 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1560 v = apic_read(APIC_LVT1);
1561 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1562
1563 if (maxlvt > 2) { /* ERR is LVT#3. */
1564 v = apic_read(APIC_LVTERR);
1565 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1566 }
1567
1568 v = apic_read(APIC_TMICT);
1569 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1570 v = apic_read(APIC_TMCCT);
1571 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1572 v = apic_read(APIC_TDCR);
1573 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1574 printk("\n");
1575}
1576
32f71aff 1577__apicdebuginit(void) print_all_local_APICs(void)
1da177e4 1578{
15c8b6c1 1579 on_each_cpu(print_local_APIC, NULL, 1);
1da177e4
LT
1580}
1581
32f71aff 1582__apicdebuginit(void) print_PIC(void)
1da177e4 1583{
1da177e4
LT
1584 unsigned int v;
1585 unsigned long flags;
1586
1587 if (apic_verbosity == APIC_QUIET)
1588 return;
1589
1590 printk(KERN_DEBUG "\nprinting PIC contents\n");
1591
1592 spin_lock_irqsave(&i8259A_lock, flags);
1593
1594 v = inb(0xa1) << 8 | inb(0x21);
1595 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1596
1597 v = inb(0xa0) << 8 | inb(0x20);
1598 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1599
36062448
PC
1600 outb(0x0b, 0xa0);
1601 outb(0x0b, 0x20);
1da177e4 1602 v = inb(0xa0) << 8 | inb(0x20);
36062448
PC
1603 outb(0x0a, 0xa0);
1604 outb(0x0a, 0x20);
1da177e4
LT
1605
1606 spin_unlock_irqrestore(&i8259A_lock, flags);
1607
1608 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1609
1610 v = inb(0x4d1) << 8 | inb(0x4d0);
1611 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1612}
1613
32f71aff
MR
1614__apicdebuginit(int) print_all_ICs(void)
1615{
1616 print_PIC();
1617 print_all_local_APICs();
1618 print_IO_APIC();
1619
1620 return 0;
1621}
1622
1623fs_initcall(print_all_ICs);
1624
1da177e4 1625
efa2559f
YL
1626/* Where if anywhere is the i8259 connect in external int mode */
1627static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1628
1da177e4
LT
1629static void __init enable_IO_APIC(void)
1630{
1631 union IO_APIC_reg_01 reg_01;
fcfd636a
EB
1632 int i8259_apic, i8259_pin;
1633 int i, apic;
1da177e4
LT
1634 unsigned long flags;
1635
1da177e4
LT
1636 if (!pirqs_enabled)
1637 for (i = 0; i < MAX_PIRQS; i++)
1638 pirq_entries[i] = -1;
1639
1640 /*
1641 * The number of IO-APIC IRQ registers (== #pins):
1642 */
fcfd636a 1643 for (apic = 0; apic < nr_ioapics; apic++) {
1da177e4 1644 spin_lock_irqsave(&ioapic_lock, flags);
fcfd636a 1645 reg_01.raw = io_apic_read(apic, 1);
1da177e4 1646 spin_unlock_irqrestore(&ioapic_lock, flags);
fcfd636a
EB
1647 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1648 }
36062448 1649 for (apic = 0; apic < nr_ioapics; apic++) {
fcfd636a
EB
1650 int pin;
1651 /* See if any of the pins is in ExtINT mode */
1008fddc 1652 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
fcfd636a 1653 struct IO_APIC_route_entry entry;
cf4c6a2f 1654 entry = ioapic_read_entry(apic, pin);
fcfd636a
EB
1655
1656
1657 /* If the interrupt line is enabled and in ExtInt mode
1658 * I have found the pin where the i8259 is connected.
1659 */
1660 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1661 ioapic_i8259.apic = apic;
1662 ioapic_i8259.pin = pin;
1663 goto found_i8259;
1664 }
1665 }
1666 }
1667 found_i8259:
1668 /* Look to see what if the MP table has reported the ExtINT */
1669 /* If we could not find the appropriate pin by looking at the ioapic
1670 * the i8259 probably is not connected the ioapic but give the
1671 * mptable a chance anyway.
1672 */
1673 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1674 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1675 /* Trust the MP table if nothing is setup in the hardware */
1676 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1677 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1678 ioapic_i8259.pin = i8259_pin;
1679 ioapic_i8259.apic = i8259_apic;
1680 }
1681 /* Complain if the MP table and the hardware disagree */
1682 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1683 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1684 {
1685 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1da177e4
LT
1686 }
1687
1688 /*
1689 * Do not trust the IO-APIC being empty at bootup
1690 */
1691 clear_IO_APIC();
1692}
1693
1694/*
1695 * Not an __init, needed by the reboot code
1696 */
1697void disable_IO_APIC(void)
1698{
1699 /*
1700 * Clear the IO-APIC before rebooting:
1701 */
1702 clear_IO_APIC();
1703
650927ef 1704 /*
0b968d23 1705 * If the i8259 is routed through an IOAPIC
650927ef 1706 * Put that IOAPIC in virtual wire mode
0b968d23 1707 * so legacy interrupts can be delivered.
650927ef 1708 */
fcfd636a 1709 if (ioapic_i8259.pin != -1) {
650927ef 1710 struct IO_APIC_route_entry entry;
650927ef
EB
1711
1712 memset(&entry, 0, sizeof(entry));
1713 entry.mask = 0; /* Enabled */
1714 entry.trigger = 0; /* Edge */
1715 entry.irr = 0;
1716 entry.polarity = 0; /* High */
1717 entry.delivery_status = 0;
1718 entry.dest_mode = 0; /* Physical */
fcfd636a 1719 entry.delivery_mode = dest_ExtINT; /* ExtInt */
650927ef 1720 entry.vector = 0;
d83e94ac 1721 entry.dest = read_apic_id();
650927ef
EB
1722
1723 /*
1724 * Add it to the IO-APIC irq-routing table:
1725 */
cf4c6a2f 1726 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
650927ef 1727 }
fcfd636a 1728 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1da177e4
LT
1729}
1730
1731/*
1732 * function to set the IO-APIC physical IDs based on the
1733 * values stored in the MPC table.
1734 *
1735 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1736 */
1737
1da177e4
LT
1738static void __init setup_ioapic_ids_from_mpc(void)
1739{
1740 union IO_APIC_reg_00 reg_00;
1741 physid_mask_t phys_id_present_map;
1742 int apic;
1743 int i;
1744 unsigned char old_id;
1745 unsigned long flags;
1746
a4dbc34d 1747 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
d49c4288 1748 return;
d49c4288 1749
ca05fea6
NP
1750 /*
1751 * Don't check I/O APIC IDs for xAPIC systems. They have
1752 * no meaning without the serial APIC bus.
1753 */
7c5c1e42
SL
1754 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1755 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
ca05fea6 1756 return;
1da177e4
LT
1757 /*
1758 * This is broken; anything with a real cpu count has to
1759 * circumvent this idiocy regardless.
1760 */
1761 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1762
1763 /*
1764 * Set the IOAPIC ID to the value stored in the MPC table.
1765 */
1766 for (apic = 0; apic < nr_ioapics; apic++) {
1767
1768 /* Read the register 0 value */
1769 spin_lock_irqsave(&ioapic_lock, flags);
1770 reg_00.raw = io_apic_read(apic, 0);
1771 spin_unlock_irqrestore(&ioapic_lock, flags);
36062448 1772
ec2cd0a2 1773 old_id = mp_ioapics[apic].mp_apicid;
1da177e4 1774
ec2cd0a2 1775 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
1da177e4 1776 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
ec2cd0a2 1777 apic, mp_ioapics[apic].mp_apicid);
1da177e4
LT
1778 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1779 reg_00.bits.ID);
ec2cd0a2 1780 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
1da177e4
LT
1781 }
1782
1da177e4
LT
1783 /*
1784 * Sanity check, is the ID really free? Every APIC in a
1785 * system must have a unique ID or we get lots of nice
1786 * 'stuck on smp_invalidate_needed IPI wait' messages.
1787 */
1788 if (check_apicid_used(phys_id_present_map,
ec2cd0a2 1789 mp_ioapics[apic].mp_apicid)) {
1da177e4 1790 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
ec2cd0a2 1791 apic, mp_ioapics[apic].mp_apicid);
1da177e4
LT
1792 for (i = 0; i < get_physical_broadcast(); i++)
1793 if (!physid_isset(i, phys_id_present_map))
1794 break;
1795 if (i >= get_physical_broadcast())
1796 panic("Max APIC ID exceeded!\n");
1797 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1798 i);
1799 physid_set(i, phys_id_present_map);
ec2cd0a2 1800 mp_ioapics[apic].mp_apicid = i;
1da177e4
LT
1801 } else {
1802 physid_mask_t tmp;
ec2cd0a2 1803 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
1da177e4
LT
1804 apic_printk(APIC_VERBOSE, "Setting %d in the "
1805 "phys_id_present_map\n",
ec2cd0a2 1806 mp_ioapics[apic].mp_apicid);
1da177e4
LT
1807 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1808 }
1809
1810
1811 /*
1812 * We need to adjust the IRQ routing table
1813 * if the ID changed.
1814 */
ec2cd0a2 1815 if (old_id != mp_ioapics[apic].mp_apicid)
1da177e4 1816 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
1817 if (mp_irqs[i].mp_dstapic == old_id)
1818 mp_irqs[i].mp_dstapic
ec2cd0a2 1819 = mp_ioapics[apic].mp_apicid;
1da177e4
LT
1820
1821 /*
1822 * Read the right value from the MPC table and
1823 * write it into the ID register.
36062448 1824 */
1da177e4
LT
1825 apic_printk(APIC_VERBOSE, KERN_INFO
1826 "...changing IO-APIC physical APIC ID to %d ...",
ec2cd0a2 1827 mp_ioapics[apic].mp_apicid);
1da177e4 1828
ec2cd0a2 1829 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
1da177e4
LT
1830 spin_lock_irqsave(&ioapic_lock, flags);
1831 io_apic_write(apic, 0, reg_00.raw);
1832 spin_unlock_irqrestore(&ioapic_lock, flags);
1833
1834 /*
1835 * Sanity check
1836 */
1837 spin_lock_irqsave(&ioapic_lock, flags);
1838 reg_00.raw = io_apic_read(apic, 0);
1839 spin_unlock_irqrestore(&ioapic_lock, flags);
ec2cd0a2 1840 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
1da177e4
LT
1841 printk("could not set ID!\n");
1842 else
1843 apic_printk(APIC_VERBOSE, " ok.\n");
1844 }
1845}
1da177e4 1846
7ce0bcfd 1847int no_timer_check __initdata;
8542b200
ZA
1848
1849static int __init notimercheck(char *s)
1850{
1851 no_timer_check = 1;
1852 return 1;
1853}
1854__setup("no_timer_check", notimercheck);
1855
1da177e4
LT
1856/*
1857 * There is a nasty bug in some older SMP boards, their mptable lies
1858 * about the timer IRQ. We do the following to work around the situation:
1859 *
1860 * - timer IRQ defaults to IO-APIC IRQ
1861 * - if this function detects that timer IRQs are defunct, then we fall
1862 * back to ISA timer IRQs
1863 */
f0a7a5c9 1864static int __init timer_irq_works(void)
1da177e4
LT
1865{
1866 unsigned long t1 = jiffies;
4aae0702 1867 unsigned long flags;
1da177e4 1868
8542b200
ZA
1869 if (no_timer_check)
1870 return 1;
1871
4aae0702 1872 local_save_flags(flags);
1da177e4
LT
1873 local_irq_enable();
1874 /* Let ten ticks pass... */
1875 mdelay((10 * 1000) / HZ);
4aae0702 1876 local_irq_restore(flags);
1da177e4
LT
1877
1878 /*
1879 * Expect a few ticks at least, to be sure some possible
1880 * glue logic does not lock up after one or two first
1881 * ticks in a non-ExtINT mode. Also the local APIC
1882 * might have cached one ExtINT interrupt. Finally, at
1883 * least one tick may be lost due to delays.
1884 */
1d16b53e 1885 if (time_after(jiffies, t1 + 4))
1da177e4
LT
1886 return 1;
1887
1888 return 0;
1889}
1890
1891/*
1892 * In the SMP+IOAPIC case it might happen that there are an unspecified
1893 * number of pending IRQ events unhandled. These cases are very rare,
1894 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1895 * better to do it this way as thus we do not have to be aware of
1896 * 'pending' interrupts in the IRQ path, except at this point.
1897 */
1898/*
1899 * Edge triggered needs to resend any interrupt
1900 * that was delayed but this is now handled in the device
1901 * independent code.
1902 */
1903
1904/*
f5b9ed7a
IM
1905 * Startup quirk:
1906 *
1da177e4
LT
1907 * Starting up a edge-triggered IO-APIC interrupt is
1908 * nasty - we need to make sure that we get the edge.
1909 * If it is already asserted for some reason, we need
1910 * return 1 to indicate that is was pending.
1911 *
1912 * This is not complete - we should be able to fake
1913 * an edge even if it isn't on the 8259A...
f5b9ed7a
IM
1914 *
1915 * (We do this for level-triggered IRQs too - it cannot hurt.)
1da177e4 1916 */
f5b9ed7a 1917static unsigned int startup_ioapic_irq(unsigned int irq)
1da177e4
LT
1918{
1919 int was_pending = 0;
1920 unsigned long flags;
1921
1922 spin_lock_irqsave(&ioapic_lock, flags);
1923 if (irq < 16) {
1924 disable_8259A_irq(irq);
1925 if (i8259A_irq_pending(irq))
1926 was_pending = 1;
1927 }
1928 __unmask_IO_APIC_irq(irq);
1929 spin_unlock_irqrestore(&ioapic_lock, flags);
1930
1931 return was_pending;
1932}
1933
ace80ab7 1934static int ioapic_retrigger_irq(unsigned int irq)
1da177e4 1935{
a1420f39 1936 send_IPI_self(irq_cfg(irq)->vector);
c0ad90a3
IM
1937
1938 return 1;
1939}
1940
497c9a19
YL
1941#ifdef CONFIG_SMP
1942asmlinkage void smp_irq_move_cleanup_interrupt(void)
1943{
1944 unsigned vector, me;
1945 ack_APIC_irq();
1946 irq_enter();
1947
1948 me = smp_processor_id();
1949 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1950 unsigned int irq;
1951 struct irq_desc *desc;
1952 struct irq_cfg *cfg;
1953 irq = __get_cpu_var(vector_irq)[vector];
1954
1955 desc = irq_to_desc(irq);
1956 if (!desc)
1957 continue;
1958
1959 cfg = irq_cfg(irq);
1960 spin_lock(&desc->lock);
1961 if (!cfg->move_cleanup_count)
1962 goto unlock;
1963
1964 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
1965 goto unlock;
1966
1967 __get_cpu_var(vector_irq)[vector] = -1;
1968 cfg->move_cleanup_count--;
1969unlock:
1970 spin_unlock(&desc->lock);
1971 }
1972
1973 irq_exit();
1974}
1975
1976static void irq_complete_move(unsigned int irq)
1977{
1978 struct irq_cfg *cfg = irq_cfg(irq);
1979 unsigned vector, me;
1980
1981 if (likely(!cfg->move_in_progress))
1982 return;
1983
1984 vector = ~get_irq_regs()->orig_ax;
1985 me = smp_processor_id();
1986 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
1987 cpumask_t cleanup_mask;
1988
1989 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
1990 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
1991 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1992 cfg->move_in_progress = 0;
1993 }
1994}
1995#else
1996static inline void irq_complete_move(unsigned int irq) {}
1997#endif
1998
1d025192
YL
1999static void ack_apic_edge(unsigned int irq)
2000{
2001 irq_complete_move(irq);
2002 move_native_irq(irq);
2003 ack_APIC_irq();
2004}
2005
efa2559f 2006atomic_t irq_mis_count;
1d025192
YL
2007static void ack_apic_level(unsigned int irq)
2008{
2009 unsigned long v;
2010 int i;
2011
2012 irq_complete_move(irq);
2013 move_native_irq(irq);
2014/*
2015 * It appears there is an erratum which affects at least version 0x11
2016 * of I/O APIC (that's the 82093AA and cores integrated into various
2017 * chipsets). Under certain conditions a level-triggered interrupt is
2018 * erroneously delivered as edge-triggered one but the respective IRR
2019 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2020 * message but it will never arrive and further interrupts are blocked
2021 * from the source. The exact reason is so far unknown, but the
2022 * phenomenon was observed when two consecutive interrupt requests
2023 * from a given source get delivered to the same CPU and the source is
2024 * temporarily disabled in between.
2025 *
2026 * A workaround is to simulate an EOI message manually. We achieve it
2027 * by setting the trigger mode to edge and then to level when the edge
2028 * trigger mode gets detected in the TMR of a local APIC for a
2029 * level-triggered interrupt. We mask the source for the time of the
2030 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2031 * The idea is from Manfred Spraul. --macro
2032 */
2033 i = irq_cfg(irq)->vector;
2034
2035 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2036
2037 ack_APIC_irq();
2038
2039 if (!(v & (1 << (i & 0x1f)))) {
2040 atomic_inc(&irq_mis_count);
2041 spin_lock(&ioapic_lock);
2042 __mask_and_edge_IO_APIC_irq(irq);
2043 __unmask_and_level_IO_APIC_irq(irq);
2044 spin_unlock(&ioapic_lock);
2045 }
2046}
2047
f5b9ed7a
IM
2048static struct irq_chip ioapic_chip __read_mostly = {
2049 .name = "IO-APIC",
ace80ab7
EB
2050 .startup = startup_ioapic_irq,
2051 .mask = mask_IO_APIC_irq,
2052 .unmask = unmask_IO_APIC_irq,
1d025192
YL
2053 .ack = ack_apic_edge,
2054 .eoi = ack_apic_level,
54d5d424 2055#ifdef CONFIG_SMP
ace80ab7 2056 .set_affinity = set_ioapic_affinity_irq,
54d5d424 2057#endif
ace80ab7 2058 .retrigger = ioapic_retrigger_irq,
1da177e4
LT
2059};
2060
1da177e4
LT
2061
2062static inline void init_IO_APIC_traps(void)
2063{
2064 int irq;
08678b08 2065 struct irq_desc *desc;
da51a821 2066 struct irq_cfg *cfg;
1da177e4
LT
2067
2068 /*
2069 * NOTE! The local APIC isn't very good at handling
2070 * multiple interrupts at the same interrupt level.
2071 * As the interrupt level is determined by taking the
2072 * vector number and shifting that right by 4, we
2073 * want to spread these out a bit so that they don't
2074 * all fall in the same interrupt level.
2075 *
2076 * Also, we've got to be careful not to trash gate
2077 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2078 */
da51a821
YL
2079 for_each_irq_cfg(cfg) {
2080 irq = cfg->irq;
2081 if (IO_APIC_IRQ(irq) && !cfg->vector) {
1da177e4
LT
2082 /*
2083 * Hmm.. We don't have an entry for this,
2084 * so default to an old-fashioned 8259
2085 * interrupt if we can..
2086 */
2087 if (irq < 16)
2088 make_8259A_irq(irq);
08678b08
YL
2089 else {
2090 desc = irq_to_desc(irq);
1da177e4 2091 /* Strange. Oh, well.. */
08678b08
YL
2092 desc->chip = &no_irq_chip;
2093 }
1da177e4
LT
2094 }
2095 }
2096}
2097
f5b9ed7a
IM
2098/*
2099 * The local APIC irq-chip implementation:
2100 */
1da177e4 2101
36062448 2102static void mask_lapic_irq(unsigned int irq)
1da177e4
LT
2103{
2104 unsigned long v;
2105
2106 v = apic_read(APIC_LVT0);
593f4a78 2107 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1da177e4
LT
2108}
2109
36062448 2110static void unmask_lapic_irq(unsigned int irq)
1da177e4 2111{
f5b9ed7a 2112 unsigned long v;
1da177e4 2113
f5b9ed7a 2114 v = apic_read(APIC_LVT0);
593f4a78 2115 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
f5b9ed7a 2116}
1da177e4 2117
1d025192
YL
2118static void ack_lapic_irq(unsigned int irq)
2119{
2120 ack_APIC_irq();
2121}
2122
f5b9ed7a 2123static struct irq_chip lapic_chip __read_mostly = {
9a1c6192 2124 .name = "local-APIC",
f5b9ed7a
IM
2125 .mask = mask_lapic_irq,
2126 .unmask = unmask_lapic_irq,
c88ac1df 2127 .ack = ack_lapic_irq,
1da177e4
LT
2128};
2129
497c9a19 2130static void lapic_register_intr(int irq)
c88ac1df 2131{
08678b08
YL
2132 struct irq_desc *desc;
2133
2134 desc = irq_to_desc(irq);
2135 desc->status &= ~IRQ_LEVEL;
c88ac1df
MR
2136 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2137 "edge");
c88ac1df
MR
2138}
2139
e9427101 2140static void __init setup_nmi(void)
1da177e4
LT
2141{
2142 /*
36062448 2143 * Dirty trick to enable the NMI watchdog ...
1da177e4
LT
2144 * We put the 8259A master into AEOI mode and
2145 * unmask on all local APICs LVT0 as NMI.
2146 *
2147 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2148 * is from Maciej W. Rozycki - so we do not have to EOI from
2149 * the NMI handler or the timer interrupt.
36062448 2150 */
1da177e4
LT
2151 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2152
e9427101 2153 enable_NMI_through_LVT0();
1da177e4
LT
2154
2155 apic_printk(APIC_VERBOSE, " done.\n");
2156}
2157
2158/*
2159 * This looks a bit hackish but it's about the only one way of sending
2160 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2161 * not support the ExtINT mode, unfortunately. We need to send these
2162 * cycles as some i82489DX-based boards have glue logic that keeps the
2163 * 8259A interrupt line asserted until INTA. --macro
2164 */
28acf285 2165static inline void __init unlock_ExtINT_logic(void)
1da177e4 2166{
fcfd636a 2167 int apic, pin, i;
1da177e4
LT
2168 struct IO_APIC_route_entry entry0, entry1;
2169 unsigned char save_control, save_freq_select;
1da177e4 2170
fcfd636a 2171 pin = find_isa_irq_pin(8, mp_INT);
956fb531
AB
2172 if (pin == -1) {
2173 WARN_ON_ONCE(1);
2174 return;
2175 }
fcfd636a 2176 apic = find_isa_irq_apic(8, mp_INT);
956fb531
AB
2177 if (apic == -1) {
2178 WARN_ON_ONCE(1);
1da177e4 2179 return;
956fb531 2180 }
1da177e4 2181
cf4c6a2f 2182 entry0 = ioapic_read_entry(apic, pin);
fcfd636a 2183 clear_IO_APIC_pin(apic, pin);
1da177e4
LT
2184
2185 memset(&entry1, 0, sizeof(entry1));
2186
2187 entry1.dest_mode = 0; /* physical delivery */
2188 entry1.mask = 0; /* unmask IRQ now */
d83e94ac 2189 entry1.dest = hard_smp_processor_id();
1da177e4
LT
2190 entry1.delivery_mode = dest_ExtINT;
2191 entry1.polarity = entry0.polarity;
2192 entry1.trigger = 0;
2193 entry1.vector = 0;
2194
cf4c6a2f 2195 ioapic_write_entry(apic, pin, entry1);
1da177e4
LT
2196
2197 save_control = CMOS_READ(RTC_CONTROL);
2198 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2199 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2200 RTC_FREQ_SELECT);
2201 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2202
2203 i = 100;
2204 while (i-- > 0) {
2205 mdelay(10);
2206 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2207 i -= 10;
2208 }
2209
2210 CMOS_WRITE(save_control, RTC_CONTROL);
2211 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
fcfd636a 2212 clear_IO_APIC_pin(apic, pin);
1da177e4 2213
cf4c6a2f 2214 ioapic_write_entry(apic, pin, entry0);
1da177e4
LT
2215}
2216
efa2559f
YL
2217static int disable_timer_pin_1 __initdata;
2218
2219static int __init parse_disable_timer_pin_1(char *arg)
2220{
2221 disable_timer_pin_1 = 1;
2222 return 0;
2223}
2224early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
2225
2226int timer_through_8259 __initdata;
2227
1da177e4
LT
2228/*
2229 * This code may look a bit paranoid, but it's supposed to cooperate with
2230 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2231 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2232 * fanatically on his truly buggy board.
2233 */
8542b200 2234static inline void __init check_timer(void)
1da177e4 2235{
497c9a19 2236 struct irq_cfg *cfg = irq_cfg(0);
fcfd636a 2237 int apic1, pin1, apic2, pin2;
691874fa 2238 int no_pin1 = 0;
6e908947 2239 unsigned int ver;
4aae0702
IM
2240 unsigned long flags;
2241
2242 local_irq_save(flags);
d4d25dec 2243
6e908947
IM
2244 ver = apic_read(APIC_LVR);
2245 ver = GET_APIC_VERSION(ver);
2246
1da177e4
LT
2247 /*
2248 * get/set the timer IRQ vector:
2249 */
2250 disable_8259A_irq(0);
497c9a19 2251 assign_irq_vector(0, TARGET_CPUS);
1da177e4
LT
2252
2253 /*
d11d5794
MR
2254 * As IRQ0 is to be enabled in the 8259A, the virtual
2255 * wire has to be disabled in the local APIC. Also
2256 * timer interrupts need to be acknowledged manually in
2257 * the 8259A for the i82489DX when using the NMI
2258 * watchdog as that APIC treats NMIs as level-triggered.
2259 * The AEOI mode will finish them in the 8259A
2260 * automatically.
1da177e4 2261 */
593f4a78 2262 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1da177e4 2263 init_8259A(1);
d11d5794 2264 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
1da177e4 2265
fcfd636a
EB
2266 pin1 = find_isa_irq_pin(0, mp_INT);
2267 apic1 = find_isa_irq_apic(0, mp_INT);
2268 pin2 = ioapic_i8259.pin;
2269 apic2 = ioapic_i8259.apic;
1da177e4 2270
49a66a0b
MR
2271 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2272 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
497c9a19 2273 cfg->vector, apic1, pin1, apic2, pin2);
1da177e4 2274
691874fa
MR
2275 /*
2276 * Some BIOS writers are clueless and report the ExtINTA
2277 * I/O APIC input from the cascaded 8259A as the timer
2278 * interrupt input. So just in case, if only one pin
2279 * was found above, try it both directly and through the
2280 * 8259A.
2281 */
2282 if (pin1 == -1) {
2283 pin1 = pin2;
2284 apic1 = apic2;
2285 no_pin1 = 1;
2286 } else if (pin2 == -1) {
2287 pin2 = pin1;
2288 apic2 = apic1;
2289 }
2290
1da177e4
LT
2291 if (pin1 != -1) {
2292 /*
2293 * Ok, does IRQ0 through the IOAPIC work?
2294 */
691874fa
MR
2295 if (no_pin1) {
2296 add_pin_to_irq(0, apic1, pin1);
497c9a19 2297 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
691874fa 2298 }
1da177e4
LT
2299 unmask_IO_APIC_irq(0);
2300 if (timer_irq_works()) {
2301 if (nmi_watchdog == NMI_IO_APIC) {
1da177e4
LT
2302 setup_nmi();
2303 enable_8259A_irq(0);
1da177e4 2304 }
66759a01
CE
2305 if (disable_timer_pin_1 > 0)
2306 clear_IO_APIC_pin(0, pin1);
4aae0702 2307 goto out;
1da177e4 2308 }
fcfd636a 2309 clear_IO_APIC_pin(apic1, pin1);
691874fa 2310 if (!no_pin1)
49a66a0b
MR
2311 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2312 "8254 timer not connected to IO-APIC\n");
1da177e4 2313
49a66a0b
MR
2314 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2315 "(IRQ0) through the 8259A ...\n");
2316 apic_printk(APIC_QUIET, KERN_INFO
2317 "..... (found apic %d pin %d) ...\n", apic2, pin2);
1da177e4
LT
2318 /*
2319 * legacy devices should be connected to IO APIC #0
2320 */
691874fa 2321 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
497c9a19 2322 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
24742ece 2323 unmask_IO_APIC_irq(0);
ecd29476 2324 enable_8259A_irq(0);
1da177e4 2325 if (timer_irq_works()) {
49a66a0b 2326 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
35542c5e 2327 timer_through_8259 = 1;
1da177e4 2328 if (nmi_watchdog == NMI_IO_APIC) {
60134ebe 2329 disable_8259A_irq(0);
1da177e4 2330 setup_nmi();
60134ebe 2331 enable_8259A_irq(0);
1da177e4 2332 }
4aae0702 2333 goto out;
1da177e4
LT
2334 }
2335 /*
2336 * Cleanup, just in case ...
2337 */
ecd29476 2338 disable_8259A_irq(0);
fcfd636a 2339 clear_IO_APIC_pin(apic2, pin2);
49a66a0b 2340 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
1da177e4 2341 }
1da177e4
LT
2342
2343 if (nmi_watchdog == NMI_IO_APIC) {
49a66a0b
MR
2344 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2345 "through the IO-APIC - disabling NMI Watchdog!\n");
067fa0ff 2346 nmi_watchdog = NMI_NONE;
1da177e4 2347 }
d11d5794 2348 timer_ack = 0;
1da177e4 2349
49a66a0b
MR
2350 apic_printk(APIC_QUIET, KERN_INFO
2351 "...trying to set up timer as Virtual Wire IRQ...\n");
1da177e4 2352
497c9a19
YL
2353 lapic_register_intr(0);
2354 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
1da177e4
LT
2355 enable_8259A_irq(0);
2356
2357 if (timer_irq_works()) {
49a66a0b 2358 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2359 goto out;
1da177e4 2360 }
e67465f1 2361 disable_8259A_irq(0);
497c9a19 2362 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
49a66a0b 2363 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
1da177e4 2364
49a66a0b
MR
2365 apic_printk(APIC_QUIET, KERN_INFO
2366 "...trying to set up timer as ExtINT IRQ...\n");
1da177e4 2367
1da177e4
LT
2368 init_8259A(0);
2369 make_8259A_irq(0);
593f4a78 2370 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1da177e4
LT
2371
2372 unlock_ExtINT_logic();
2373
2374 if (timer_irq_works()) {
49a66a0b 2375 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2376 goto out;
1da177e4 2377 }
49a66a0b 2378 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
1da177e4 2379 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
49a66a0b 2380 "report. Then try booting with the 'noapic' option.\n");
4aae0702
IM
2381out:
2382 local_irq_restore(flags);
1da177e4
LT
2383}
2384
2385/*
af174783
MR
2386 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2387 * to devices. However there may be an I/O APIC pin available for
2388 * this interrupt regardless. The pin may be left unconnected, but
2389 * typically it will be reused as an ExtINT cascade interrupt for
2390 * the master 8259A. In the MPS case such a pin will normally be
2391 * reported as an ExtINT interrupt in the MP table. With ACPI
2392 * there is no provision for ExtINT interrupts, and in the absence
2393 * of an override it would be treated as an ordinary ISA I/O APIC
2394 * interrupt, that is edge-triggered and unmasked by default. We
2395 * used to do this, but it caused problems on some systems because
2396 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2397 * the same ExtINT cascade interrupt to drive the local APIC of the
2398 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2399 * the I/O APIC in all cases now. No actual device should request
2400 * it anyway. --macro
1da177e4
LT
2401 */
2402#define PIC_IRQS (1 << PIC_CASCADE_IR)
2403
2404void __init setup_IO_APIC(void)
2405{
2406 enable_IO_APIC();
2407
af174783 2408 io_apic_irqs = ~PIC_IRQS;
1da177e4
LT
2409
2410 printk("ENABLING IO-APIC IRQs\n");
2411
2412 /*
2413 * Set up IO-APIC IRQ routing.
2414 */
2415 if (!acpi_ioapic)
2416 setup_ioapic_ids_from_mpc();
2417 sync_Arb_IDs();
2418 setup_IO_APIC_irqs();
2419 init_IO_APIC_traps();
1e4c85f9 2420 check_timer();
1da177e4
LT
2421}
2422
2423/*
2424 * Called after all the initialization is done. If we didnt find any
2425 * APIC bugs then we can allow the modify fast path
2426 */
36062448 2427
1da177e4
LT
2428static int __init io_apic_bug_finalize(void)
2429{
36062448 2430 if (sis_apic_bug == -1)
1da177e4
LT
2431 sis_apic_bug = 0;
2432 return 0;
2433}
2434
2435late_initcall(io_apic_bug_finalize);
2436
2437struct sysfs_ioapic_data {
2438 struct sys_device dev;
2439 struct IO_APIC_route_entry entry[0];
2440};
36062448 2441static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS];
1da177e4 2442
438510f6 2443static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
2444{
2445 struct IO_APIC_route_entry *entry;
2446 struct sysfs_ioapic_data *data;
1da177e4 2447 int i;
36062448 2448
1da177e4
LT
2449 data = container_of(dev, struct sysfs_ioapic_data, dev);
2450 entry = data->entry;
36062448 2451 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
cf4c6a2f 2452 entry[i] = ioapic_read_entry(dev->id, i);
1da177e4
LT
2453
2454 return 0;
2455}
2456
2457static int ioapic_resume(struct sys_device *dev)
2458{
2459 struct IO_APIC_route_entry *entry;
2460 struct sysfs_ioapic_data *data;
2461 unsigned long flags;
2462 union IO_APIC_reg_00 reg_00;
2463 int i;
36062448 2464
1da177e4
LT
2465 data = container_of(dev, struct sysfs_ioapic_data, dev);
2466 entry = data->entry;
2467
2468 spin_lock_irqsave(&ioapic_lock, flags);
2469 reg_00.raw = io_apic_read(dev->id, 0);
ec2cd0a2
AS
2470 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
2471 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
1da177e4
LT
2472 io_apic_write(dev->id, 0, reg_00.raw);
2473 }
1da177e4 2474 spin_unlock_irqrestore(&ioapic_lock, flags);
36062448 2475 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
cf4c6a2f 2476 ioapic_write_entry(dev->id, i, entry[i]);
1da177e4
LT
2477
2478 return 0;
2479}
2480
2481static struct sysdev_class ioapic_sysdev_class = {
af5ca3f4 2482 .name = "ioapic",
1da177e4
LT
2483 .suspend = ioapic_suspend,
2484 .resume = ioapic_resume,
2485};
2486
2487static int __init ioapic_init_sysfs(void)
2488{
36062448 2489 struct sys_device *dev;
1da177e4
LT
2490 int i, size, error = 0;
2491
2492 error = sysdev_class_register(&ioapic_sysdev_class);
2493 if (error)
2494 return error;
2495
36062448
PC
2496 for (i = 0; i < nr_ioapics; i++) {
2497 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1da177e4 2498 * sizeof(struct IO_APIC_route_entry);
25556c16 2499 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
1da177e4
LT
2500 if (!mp_ioapic_data[i]) {
2501 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2502 continue;
2503 }
1da177e4 2504 dev = &mp_ioapic_data[i]->dev;
36062448 2505 dev->id = i;
1da177e4
LT
2506 dev->cls = &ioapic_sysdev_class;
2507 error = sysdev_register(dev);
2508 if (error) {
2509 kfree(mp_ioapic_data[i]);
2510 mp_ioapic_data[i] = NULL;
2511 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
2512 continue;
2513 }
2514 }
2515
2516 return 0;
2517}
2518
2519device_initcall(ioapic_init_sysfs);
2520
3fc471ed 2521/*
95d77884 2522 * Dynamic irq allocate and deallocation
3fc471ed 2523 */
199751d7 2524unsigned int create_irq_nr(unsigned int irq_want)
3fc471ed 2525{
ace80ab7 2526 /* Allocate an unused irq */
497c9a19 2527 unsigned int irq, new;
3fc471ed 2528 unsigned long flags;
da51a821 2529 struct irq_cfg *cfg_new;
3fc471ed 2530
497c9a19 2531#ifndef CONFIG_HAVE_SPARSE_IRQ
199751d7
YL
2532 /* only can use bus/dev/fn.. when per_cpu vector is used */
2533 irq_want = nr_irqs - 1;
497c9a19 2534#endif
199751d7
YL
2535
2536 irq = 0;
ace80ab7 2537 spin_lock_irqsave(&vector_lock, flags);
199751d7 2538 for (new = (nr_irqs - 1); new > 0; new--) {
ace80ab7
EB
2539 if (platform_legacy_irq(new))
2540 continue;
da51a821
YL
2541 cfg_new = irq_cfg(new);
2542 if (cfg_new && cfg_new->vector != 0)
ace80ab7 2543 continue;
da51a821
YL
2544 if (!cfg_new)
2545 cfg_new = irq_cfg_alloc(new);
497c9a19 2546 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
ace80ab7
EB
2547 irq = new;
2548 break;
2549 }
2550 spin_unlock_irqrestore(&vector_lock, flags);
3fc471ed 2551
199751d7 2552 if (irq > 0) {
3fc471ed
EB
2553 dynamic_irq_init(irq);
2554 }
2555 return irq;
2556}
2557
199751d7
YL
2558int create_irq(void)
2559{
2560 return create_irq_nr(nr_irqs - 1);
2561}
2562
3fc471ed
EB
2563void destroy_irq(unsigned int irq)
2564{
2565 unsigned long flags;
3fc471ed
EB
2566
2567 dynamic_irq_cleanup(irq);
2568
2569 spin_lock_irqsave(&vector_lock, flags);
497c9a19 2570 __clear_irq_vector(irq);
3fc471ed
EB
2571 spin_unlock_irqrestore(&vector_lock, flags);
2572}
3fc471ed 2573
2d3fcc1c 2574/*
27b46d76 2575 * MSI message composition
2d3fcc1c
EB
2576 */
2577#ifdef CONFIG_PCI_MSI
3b7d1921 2578static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2d3fcc1c 2579{
497c9a19
YL
2580 struct irq_cfg *cfg;
2581 int err;
2d3fcc1c 2582 unsigned dest;
497c9a19 2583 cpumask_t tmp;
2d3fcc1c 2584
497c9a19
YL
2585 tmp = TARGET_CPUS;
2586 err = assign_irq_vector(irq, tmp);
2587 if (err)
2588 return err;
2d3fcc1c 2589
497c9a19
YL
2590 cfg = irq_cfg(irq);
2591 cpus_and(tmp, cfg->domain, tmp);
2592 dest = cpu_mask_to_apicid(tmp);
2593
2594 msg->address_hi = MSI_ADDR_BASE_HI;
2595 msg->address_lo =
2596 MSI_ADDR_BASE_LO |
2597 ((INT_DEST_MODE == 0) ?
2598 MSI_ADDR_DEST_MODE_PHYSICAL:
2599 MSI_ADDR_DEST_MODE_LOGICAL) |
2600 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2601 MSI_ADDR_REDIRECTION_CPU:
2602 MSI_ADDR_REDIRECTION_LOWPRI) |
2603 MSI_ADDR_DEST_ID(dest);
2604
2605 msg->data =
2606 MSI_DATA_TRIGGER_EDGE |
2607 MSI_DATA_LEVEL_ASSERT |
2608 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2609 MSI_DATA_DELIVERY_FIXED:
2610 MSI_DATA_DELIVERY_LOWPRI) |
2611 MSI_DATA_VECTOR(cfg->vector);
2612
2613 return err;
2d3fcc1c
EB
2614}
2615
3b7d1921
EB
2616#ifdef CONFIG_SMP
2617static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2d3fcc1c 2618{
497c9a19 2619 struct irq_cfg *cfg;
3b7d1921
EB
2620 struct msi_msg msg;
2621 unsigned int dest;
2622 cpumask_t tmp;
3b7d1921
EB
2623
2624 cpus_and(tmp, mask, cpu_online_map);
2625 if (cpus_empty(tmp))
497c9a19 2626 return;
2d3fcc1c 2627
497c9a19 2628 if (assign_irq_vector(irq, mask))
3b7d1921 2629 return;
2d3fcc1c 2630
497c9a19
YL
2631 cfg = irq_cfg(irq);
2632 cpus_and(tmp, cfg->domain, mask);
2633 dest = cpu_mask_to_apicid(tmp);
3b7d1921
EB
2634
2635 read_msi_msg(irq, &msg);
2636
2637 msg.data &= ~MSI_DATA_VECTOR_MASK;
497c9a19 2638 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3b7d1921
EB
2639 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2640 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2641
2642 write_msi_msg(irq, &msg);
199751d7 2643 irq_to_desc(irq)->affinity = mask;
2d3fcc1c 2644}
3b7d1921 2645#endif /* CONFIG_SMP */
2d3fcc1c 2646
3b7d1921
EB
2647/*
2648 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2649 * which implement the MSI or MSI-X Capability Structure.
2650 */
2651static struct irq_chip msi_chip = {
2652 .name = "PCI-MSI",
2653 .unmask = unmask_msi_irq,
2654 .mask = mask_msi_irq,
1d025192 2655 .ack = ack_apic_edge,
3b7d1921
EB
2656#ifdef CONFIG_SMP
2657 .set_affinity = set_msi_irq_affinity,
2658#endif
2659 .retrigger = ioapic_retrigger_irq,
2d3fcc1c
EB
2660};
2661
1d025192
YL
2662
2663static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
2664{
2665 int ret;
2666 struct msi_msg msg;
2667
2668 ret = msi_compose_msg(dev, irq, &msg);
2669 if (ret < 0)
2670 return ret;
2671
2672 set_irq_msi(irq, desc);
2673 write_msi_msg(irq, &msg);
2674
2675 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
2676
2677 return 0;
2678}
2679
199751d7
YL
2680static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
2681{
2682 unsigned int irq;
2683
2684 irq = dev->bus->number;
2685 irq <<= 8;
2686 irq |= dev->devfn;
2687 irq <<= 12;
2688
2689 return irq;
2690}
2691
f7feaca7 2692int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3b7d1921 2693{
f7feaca7 2694 int irq, ret;
199751d7
YL
2695
2696 unsigned int irq_want;
2697
2698 irq_want = build_irq_for_pci_dev(dev) + 0x100;
2699
2700 irq = create_irq_nr(irq_want);
2701
2702 if (irq == 0)
2703 return -1;
f7feaca7 2704
1d025192 2705 ret = setup_msi_irq(dev, desc, irq);
f7feaca7
EB
2706 if (ret < 0) {
2707 destroy_irq(irq);
3b7d1921 2708 return ret;
1d025192 2709 }
3b7d1921 2710
7fe3730d 2711 return 0;
3b7d1921
EB
2712}
2713
2714void arch_teardown_msi_irq(unsigned int irq)
2715{
f7feaca7 2716 destroy_irq(irq);
3b7d1921
EB
2717}
2718
2d3fcc1c
EB
2719#endif /* CONFIG_PCI_MSI */
2720
8b955b0d
EB
2721/*
2722 * Hypertransport interrupt support
2723 */
2724#ifdef CONFIG_HT_IRQ
2725
2726#ifdef CONFIG_SMP
2727
497c9a19 2728static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
8b955b0d 2729{
ec68307c
EB
2730 struct ht_irq_msg msg;
2731 fetch_ht_irq_msg(irq, &msg);
8b955b0d 2732
497c9a19 2733 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
ec68307c 2734 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
8b955b0d 2735
497c9a19 2736 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
ec68307c 2737 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 2738
ec68307c 2739 write_ht_irq_msg(irq, &msg);
8b955b0d
EB
2740}
2741
2742static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2743{
497c9a19 2744 struct irq_cfg *cfg;
8b955b0d
EB
2745 unsigned int dest;
2746 cpumask_t tmp;
2747
2748 cpus_and(tmp, mask, cpu_online_map);
2749 if (cpus_empty(tmp))
497c9a19 2750 return;
8b955b0d 2751
497c9a19
YL
2752 if (assign_irq_vector(irq, mask))
2753 return;
8b955b0d 2754
497c9a19
YL
2755 cfg = irq_cfg(irq);
2756 cpus_and(tmp, cfg->domain, mask);
2757 dest = cpu_mask_to_apicid(tmp);
8b955b0d 2758
497c9a19 2759 target_ht_irq(irq, dest, cfg->vector);
199751d7 2760 irq_to_desc(irq)->affinity = mask;
8b955b0d
EB
2761}
2762#endif
2763
c37e108d 2764static struct irq_chip ht_irq_chip = {
8b955b0d
EB
2765 .name = "PCI-HT",
2766 .mask = mask_ht_irq,
2767 .unmask = unmask_ht_irq,
1d025192 2768 .ack = ack_apic_edge,
8b955b0d
EB
2769#ifdef CONFIG_SMP
2770 .set_affinity = set_ht_irq_affinity,
2771#endif
2772 .retrigger = ioapic_retrigger_irq,
2773};
2774
2775int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2776{
497c9a19
YL
2777 struct irq_cfg *cfg;
2778 int err;
2779 cpumask_t tmp;
8b955b0d 2780
497c9a19
YL
2781 tmp = TARGET_CPUS;
2782 err = assign_irq_vector(irq, tmp);
2783 if ( !err) {
ec68307c 2784 struct ht_irq_msg msg;
8b955b0d 2785 unsigned dest;
8b955b0d 2786
497c9a19
YL
2787 cfg = irq_cfg(irq);
2788 cpus_and(tmp, cfg->domain, tmp);
8b955b0d
EB
2789 dest = cpu_mask_to_apicid(tmp);
2790
ec68307c 2791 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 2792
ec68307c
EB
2793 msg.address_lo =
2794 HT_IRQ_LOW_BASE |
8b955b0d 2795 HT_IRQ_LOW_DEST_ID(dest) |
497c9a19 2796 HT_IRQ_LOW_VECTOR(cfg->vector) |
8b955b0d
EB
2797 ((INT_DEST_MODE == 0) ?
2798 HT_IRQ_LOW_DM_PHYSICAL :
2799 HT_IRQ_LOW_DM_LOGICAL) |
2800 HT_IRQ_LOW_RQEOI_EDGE |
2801 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2802 HT_IRQ_LOW_MT_FIXED :
2803 HT_IRQ_LOW_MT_ARBITRATED) |
2804 HT_IRQ_LOW_IRQ_MASKED;
2805
ec68307c 2806 write_ht_irq_msg(irq, &msg);
8b955b0d 2807
a460e745
IM
2808 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
2809 handle_edge_irq, "edge");
8b955b0d 2810 }
497c9a19 2811 return err;
8b955b0d
EB
2812}
2813#endif /* CONFIG_HT_IRQ */
2814
1da177e4 2815/* --------------------------------------------------------------------------
36062448 2816 ACPI-based IOAPIC Configuration
1da177e4
LT
2817 -------------------------------------------------------------------------- */
2818
888ba6c6 2819#ifdef CONFIG_ACPI
1da177e4 2820
36062448 2821int __init io_apic_get_unique_id(int ioapic, int apic_id)
1da177e4
LT
2822{
2823 union IO_APIC_reg_00 reg_00;
2824 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
2825 physid_mask_t tmp;
2826 unsigned long flags;
2827 int i = 0;
2828
2829 /*
36062448
PC
2830 * The P4 platform supports up to 256 APIC IDs on two separate APIC
2831 * buses (one for LAPICs, one for IOAPICs), where predecessors only
1da177e4 2832 * supports up to 16 on one shared APIC bus.
36062448 2833 *
1da177e4
LT
2834 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
2835 * advantage of new APIC bus architecture.
2836 */
2837
2838 if (physids_empty(apic_id_map))
2839 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
2840
2841 spin_lock_irqsave(&ioapic_lock, flags);
2842 reg_00.raw = io_apic_read(ioapic, 0);
2843 spin_unlock_irqrestore(&ioapic_lock, flags);
2844
2845 if (apic_id >= get_physical_broadcast()) {
2846 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
2847 "%d\n", ioapic, apic_id, reg_00.bits.ID);
2848 apic_id = reg_00.bits.ID;
2849 }
2850
2851 /*
36062448 2852 * Every APIC in a system must have a unique ID or we get lots of nice
1da177e4
LT
2853 * 'stuck on smp_invalidate_needed IPI wait' messages.
2854 */
2855 if (check_apicid_used(apic_id_map, apic_id)) {
2856
2857 for (i = 0; i < get_physical_broadcast(); i++) {
2858 if (!check_apicid_used(apic_id_map, i))
2859 break;
2860 }
2861
2862 if (i == get_physical_broadcast())
2863 panic("Max apic_id exceeded!\n");
2864
2865 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
2866 "trying %d\n", ioapic, apic_id, i);
2867
2868 apic_id = i;
36062448 2869 }
1da177e4
LT
2870
2871 tmp = apicid_to_cpu_present(apic_id);
2872 physids_or(apic_id_map, apic_id_map, tmp);
2873
2874 if (reg_00.bits.ID != apic_id) {
2875 reg_00.bits.ID = apic_id;
2876
2877 spin_lock_irqsave(&ioapic_lock, flags);
2878 io_apic_write(ioapic, 0, reg_00.raw);
2879 reg_00.raw = io_apic_read(ioapic, 0);
2880 spin_unlock_irqrestore(&ioapic_lock, flags);
2881
2882 /* Sanity check */
6070f9ec
AD
2883 if (reg_00.bits.ID != apic_id) {
2884 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
2885 return -1;
2886 }
1da177e4
LT
2887 }
2888
2889 apic_printk(APIC_VERBOSE, KERN_INFO
2890 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
2891
2892 return apic_id;
2893}
2894
2895
36062448 2896int __init io_apic_get_version(int ioapic)
1da177e4
LT
2897{
2898 union IO_APIC_reg_01 reg_01;
2899 unsigned long flags;
2900
2901 spin_lock_irqsave(&ioapic_lock, flags);
2902 reg_01.raw = io_apic_read(ioapic, 1);
2903 spin_unlock_irqrestore(&ioapic_lock, flags);
2904
2905 return reg_01.bits.version;
2906}
2907
2908
36062448 2909int __init io_apic_get_redir_entries(int ioapic)
1da177e4
LT
2910{
2911 union IO_APIC_reg_01 reg_01;
2912 unsigned long flags;
2913
2914 spin_lock_irqsave(&ioapic_lock, flags);
2915 reg_01.raw = io_apic_read(ioapic, 1);
2916 spin_unlock_irqrestore(&ioapic_lock, flags);
2917
2918 return reg_01.bits.entries;
2919}
2920
2921
497c9a19 2922int io_apic_set_pci_routing(int ioapic, int pin, int irq, int triggering, int polarity)
1da177e4 2923{
1da177e4
LT
2924 if (!IO_APIC_IRQ(irq)) {
2925 printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
2926 ioapic);
2927 return -EINVAL;
2928 }
2929
1da177e4
LT
2930 /*
2931 * IRQs < 16 are already in the irq_2_pin[] map
2932 */
2933 if (irq >= 16)
2934 add_pin_to_irq(irq, ioapic, pin);
2935
497c9a19 2936 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
1da177e4
LT
2937
2938 return 0;
2939}
2940
61fd47e0
SL
2941int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
2942{
2943 int i;
2944
2945 if (skip_ioapic_setup)
2946 return -1;
2947
2948 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
2949 if (mp_irqs[i].mp_irqtype == mp_INT &&
2950 mp_irqs[i].mp_srcbusirq == bus_irq)
61fd47e0
SL
2951 break;
2952 if (i >= mp_irq_entries)
2953 return -1;
2954
2955 *trigger = irq_trigger(i);
2956 *polarity = irq_polarity(i);
2957 return 0;
2958}
2959
888ba6c6 2960#endif /* CONFIG_ACPI */
1a3f239d 2961
497c9a19
YL
2962/*
2963 * This function currently is only a helper for the i386 smp boot process where
2964 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2965 * so mask in all cases should simply be TARGET_CPUS
2966 */
2967#ifdef CONFIG_SMP
2968void __init setup_ioapic_dest(void)
2969{
2970 int pin, ioapic, irq, irq_entry;
2971 struct irq_cfg *cfg;
2972 struct irq_desc *desc;
2973
2974 if (skip_ioapic_setup == 1)
2975 return;
2976
2977 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
2978 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
2979 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
2980 if (irq_entry == -1)
2981 continue;
2982 irq = pin_2_irq(irq_entry, ioapic, pin);
2983
2984 /* setup_IO_APIC_irqs could fail to get vector for some device
2985 * when you have too many devices, because at that time only boot
2986 * cpu is online.
2987 */
2988 cfg = irq_cfg(irq);
2989 if (!cfg->vector)
2990 setup_IO_APIC_irq(ioapic, pin, irq,
2991 irq_trigger(irq_entry),
2992 irq_polarity(irq_entry));
2993 else {
2994 desc = irq_to_desc(irq);
2995 set_ioapic_affinity_irq(irq, TARGET_CPUS);
2996 }
2997 }
2998
2999 }
3000}
3001#endif
3002
f3294a33
YL
3003void __init ioapic_init_mappings(void)
3004{
3005 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3006 int i;
3007
3008 for (i = 0; i < nr_ioapics; i++) {
3009 if (smp_found_config) {
3010 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3011 if (!ioapic_phys) {
3012 printk(KERN_ERR
3013 "WARNING: bogus zero IO-APIC "
3014 "address found in MPTABLE, "
3015 "disabling IO/APIC support!\n");
3016 smp_found_config = 0;
3017 skip_ioapic_setup = 1;
3018 goto fake_ioapic_page;
3019 }
3020 } else {
3021fake_ioapic_page:
3022 ioapic_phys = (unsigned long)
3023 alloc_bootmem_pages(PAGE_SIZE);
3024 ioapic_phys = __pa(ioapic_phys);
3025 }
3026 set_fixmap_nocache(idx, ioapic_phys);
3027 printk(KERN_DEBUG "mapped IOAPIC to %08lx (%08lx)\n",
3028 __fix_to_virt(idx), ioapic_phys);
3029 idx++;
3030 }
3031}
3032
This page took 0.656496 seconds and 5 git commands to generate.