2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <asm/irqdomain.h>
18 #include <asm/hw_irq.h>
20 #include <asm/i8259.h>
22 #include <asm/irq_remapping.h>
24 struct apic_chip_data
{
27 cpumask_var_t old_domain
;
28 u8 move_in_progress
: 1;
31 struct irq_domain
*x86_vector_domain
;
32 EXPORT_SYMBOL_GPL(x86_vector_domain
);
33 static DEFINE_RAW_SPINLOCK(vector_lock
);
34 static cpumask_var_t vector_cpumask
, vector_searchmask
, searched_cpumask
;
35 static struct irq_chip lapic_controller
;
36 #ifdef CONFIG_X86_IO_APIC
37 static struct apic_chip_data
*legacy_irq_data
[NR_IRQS_LEGACY
];
40 void lock_vector_lock(void)
42 /* Used to the online set of cpus does not change
43 * during assign_irq_vector.
45 raw_spin_lock(&vector_lock
);
48 void unlock_vector_lock(void)
50 raw_spin_unlock(&vector_lock
);
53 static struct apic_chip_data
*apic_chip_data(struct irq_data
*irq_data
)
58 while (irq_data
->parent_data
)
59 irq_data
= irq_data
->parent_data
;
61 return irq_data
->chip_data
;
64 struct irq_cfg
*irqd_cfg(struct irq_data
*irq_data
)
66 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
68 return data
? &data
->cfg
: NULL
;
70 EXPORT_SYMBOL_GPL(irqd_cfg
);
72 struct irq_cfg
*irq_cfg(unsigned int irq
)
74 return irqd_cfg(irq_get_irq_data(irq
));
77 static struct apic_chip_data
*alloc_apic_chip_data(int node
)
79 struct apic_chip_data
*data
;
81 data
= kzalloc_node(sizeof(*data
), GFP_KERNEL
, node
);
84 if (!zalloc_cpumask_var_node(&data
->domain
, GFP_KERNEL
, node
))
86 if (!zalloc_cpumask_var_node(&data
->old_domain
, GFP_KERNEL
, node
))
90 free_cpumask_var(data
->domain
);
96 static void free_apic_chip_data(struct apic_chip_data
*data
)
99 free_cpumask_var(data
->domain
);
100 free_cpumask_var(data
->old_domain
);
105 static int __assign_irq_vector(int irq
, struct apic_chip_data
*d
,
106 const struct cpumask
*mask
)
109 * NOTE! The local APIC isn't very good at handling
110 * multiple interrupts at the same interrupt level.
111 * As the interrupt level is determined by taking the
112 * vector number and shifting that right by 4, we
113 * want to spread these out a bit so that they don't
114 * all fall in the same interrupt level.
116 * Also, we've got to be careful not to trash gate
117 * 0x80, because int 0x80 is hm, kind of importantish. ;)
119 static int current_vector
= FIRST_EXTERNAL_VECTOR
+ VECTOR_OFFSET_START
;
120 static int current_offset
= VECTOR_OFFSET_START
% 16;
123 if (d
->move_in_progress
)
126 /* Only try and allocate irqs on cpus that are present */
127 cpumask_clear(d
->old_domain
);
128 cpumask_clear(searched_cpumask
);
129 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
130 while (cpu
< nr_cpu_ids
) {
133 /* Get the possible target cpus for @mask/@cpu from the apic */
134 apic
->vector_allocation_domain(cpu
, vector_cpumask
, mask
);
137 * Clear the offline cpus from @vector_cpumask for searching
138 * and verify whether the result overlaps with @mask. If true,
139 * then the call to apic->cpu_mask_to_apicid_and() will
140 * succeed as well. If not, no point in trying to find a
141 * vector in this mask.
143 cpumask_and(vector_searchmask
, vector_cpumask
, cpu_online_mask
);
144 if (!cpumask_intersects(vector_searchmask
, mask
))
147 if (cpumask_subset(vector_cpumask
, d
->domain
)) {
148 if (cpumask_equal(vector_cpumask
, d
->domain
))
151 * Mark the cpus which are not longer in the mask for
154 cpumask_andnot(d
->old_domain
, d
->domain
, vector_cpumask
);
155 vector
= d
->cfg
.vector
;
159 vector
= current_vector
;
160 offset
= current_offset
;
163 if (vector
>= first_system_vector
) {
164 offset
= (offset
+ 1) % 16;
165 vector
= FIRST_EXTERNAL_VECTOR
+ offset
;
168 /* If the search wrapped around, try the next cpu */
169 if (unlikely(current_vector
== vector
))
172 if (test_bit(vector
, used_vectors
))
175 for_each_cpu(new_cpu
, vector_searchmask
) {
176 if (!IS_ERR_OR_NULL(per_cpu(vector_irq
, new_cpu
)[vector
]))
180 current_vector
= vector
;
181 current_offset
= offset
;
182 /* Schedule the old vector for cleanup on all cpus */
184 cpumask_copy(d
->old_domain
, d
->domain
);
185 for_each_cpu(new_cpu
, vector_searchmask
)
186 per_cpu(vector_irq
, new_cpu
)[vector
] = irq_to_desc(irq
);
191 * We exclude the current @vector_cpumask from the requested
192 * @mask and try again with the next online cpu in the
193 * result. We cannot modify @mask, so we use @vector_cpumask
194 * as a temporary buffer here as it will be reassigned when
195 * calling apic->vector_allocation_domain() above.
197 cpumask_or(searched_cpumask
, searched_cpumask
, vector_cpumask
);
198 cpumask_andnot(vector_cpumask
, mask
, searched_cpumask
);
199 cpu
= cpumask_first_and(vector_cpumask
, cpu_online_mask
);
205 /* Cleanup required ? */
206 d
->move_in_progress
= cpumask_intersects(d
->old_domain
, cpu_online_mask
);
207 d
->cfg
.vector
= vector
;
208 cpumask_copy(d
->domain
, vector_cpumask
);
211 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
212 * as we already established, that mask & d->domain & cpu_online_mask
215 BUG_ON(apic
->cpu_mask_to_apicid_and(mask
, d
->domain
,
216 &d
->cfg
.dest_apicid
));
220 static int assign_irq_vector(int irq
, struct apic_chip_data
*data
,
221 const struct cpumask
*mask
)
226 raw_spin_lock_irqsave(&vector_lock
, flags
);
227 err
= __assign_irq_vector(irq
, data
, mask
);
228 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
232 static int assign_irq_vector_policy(int irq
, int node
,
233 struct apic_chip_data
*data
,
234 struct irq_alloc_info
*info
)
236 if (info
&& info
->mask
)
237 return assign_irq_vector(irq
, data
, info
->mask
);
238 if (node
!= NUMA_NO_NODE
&&
239 assign_irq_vector(irq
, data
, cpumask_of_node(node
)) == 0)
241 return assign_irq_vector(irq
, data
, apic
->target_cpus());
244 static void clear_irq_vector(int irq
, struct apic_chip_data
*data
)
246 struct irq_desc
*desc
;
249 BUG_ON(!data
->cfg
.vector
);
251 vector
= data
->cfg
.vector
;
252 for_each_cpu_and(cpu
, data
->domain
, cpu_online_mask
)
253 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
255 data
->cfg
.vector
= 0;
256 cpumask_clear(data
->domain
);
258 if (likely(!data
->move_in_progress
))
261 desc
= irq_to_desc(irq
);
262 for_each_cpu_and(cpu
, data
->old_domain
, cpu_online_mask
) {
263 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
;
265 if (per_cpu(vector_irq
, cpu
)[vector
] != desc
)
267 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
271 data
->move_in_progress
= 0;
274 void init_irq_alloc_info(struct irq_alloc_info
*info
,
275 const struct cpumask
*mask
)
277 memset(info
, 0, sizeof(*info
));
281 void copy_irq_alloc_info(struct irq_alloc_info
*dst
, struct irq_alloc_info
*src
)
286 memset(dst
, 0, sizeof(*dst
));
289 static void x86_vector_free_irqs(struct irq_domain
*domain
,
290 unsigned int virq
, unsigned int nr_irqs
)
292 struct apic_chip_data
*apic_data
;
293 struct irq_data
*irq_data
;
297 for (i
= 0; i
< nr_irqs
; i
++) {
298 irq_data
= irq_domain_get_irq_data(x86_vector_domain
, virq
+ i
);
299 if (irq_data
&& irq_data
->chip_data
) {
300 raw_spin_lock_irqsave(&vector_lock
, flags
);
301 clear_irq_vector(virq
+ i
, irq_data
->chip_data
);
302 apic_data
= irq_data
->chip_data
;
303 irq_domain_reset_irq_data(irq_data
);
304 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
305 free_apic_chip_data(apic_data
);
306 #ifdef CONFIG_X86_IO_APIC
307 if (virq
+ i
< nr_legacy_irqs())
308 legacy_irq_data
[virq
+ i
] = NULL
;
314 static int x86_vector_alloc_irqs(struct irq_domain
*domain
, unsigned int virq
,
315 unsigned int nr_irqs
, void *arg
)
317 struct irq_alloc_info
*info
= arg
;
318 struct apic_chip_data
*data
;
319 struct irq_data
*irq_data
;
325 /* Currently vector allocator can't guarantee contiguous allocations */
326 if ((info
->flags
& X86_IRQ_ALLOC_CONTIGUOUS_VECTORS
) && nr_irqs
> 1)
329 for (i
= 0; i
< nr_irqs
; i
++) {
330 irq_data
= irq_domain_get_irq_data(domain
, virq
+ i
);
332 node
= irq_data_get_node(irq_data
);
333 #ifdef CONFIG_X86_IO_APIC
334 if (virq
+ i
< nr_legacy_irqs() && legacy_irq_data
[virq
+ i
])
335 data
= legacy_irq_data
[virq
+ i
];
338 data
= alloc_apic_chip_data(node
);
344 irq_data
->chip
= &lapic_controller
;
345 irq_data
->chip_data
= data
;
346 irq_data
->hwirq
= virq
+ i
;
347 err
= assign_irq_vector_policy(virq
+ i
, node
, data
, info
);
355 x86_vector_free_irqs(domain
, virq
, i
+ 1);
359 static const struct irq_domain_ops x86_vector_domain_ops
= {
360 .alloc
= x86_vector_alloc_irqs
,
361 .free
= x86_vector_free_irqs
,
364 int __init
arch_probe_nr_irqs(void)
368 if (nr_irqs
> (NR_VECTORS
* nr_cpu_ids
))
369 nr_irqs
= NR_VECTORS
* nr_cpu_ids
;
371 nr
= (gsi_top
+ nr_legacy_irqs()) + 8 * nr_cpu_ids
;
372 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
374 * for MSI and HT dyn irq
376 if (gsi_top
<= NR_IRQS_LEGACY
)
377 nr
+= 8 * nr_cpu_ids
;
385 * We don't know if PIC is present at this point so we need to do
386 * probe() to get the right number of legacy IRQs.
388 return legacy_pic
->probe();
391 #ifdef CONFIG_X86_IO_APIC
392 static void init_legacy_irqs(void)
394 int i
, node
= cpu_to_node(0);
395 struct apic_chip_data
*data
;
398 * For legacy IRQ's, start with assigning irq0 to irq15 to
399 * ISA_IRQ_VECTOR(i) for all cpu's.
401 for (i
= 0; i
< nr_legacy_irqs(); i
++) {
402 data
= legacy_irq_data
[i
] = alloc_apic_chip_data(node
);
405 data
->cfg
.vector
= ISA_IRQ_VECTOR(i
);
406 cpumask_setall(data
->domain
);
407 irq_set_chip_data(i
, data
);
411 static void init_legacy_irqs(void) { }
414 int __init
arch_early_irq_init(void)
418 x86_vector_domain
= irq_domain_add_tree(NULL
, &x86_vector_domain_ops
,
420 BUG_ON(x86_vector_domain
== NULL
);
421 irq_set_default_host(x86_vector_domain
);
423 arch_init_msi_domain(x86_vector_domain
);
424 arch_init_htirq_domain(x86_vector_domain
);
426 BUG_ON(!alloc_cpumask_var(&vector_cpumask
, GFP_KERNEL
));
427 BUG_ON(!alloc_cpumask_var(&vector_searchmask
, GFP_KERNEL
));
428 BUG_ON(!alloc_cpumask_var(&searched_cpumask
, GFP_KERNEL
));
430 return arch_early_ioapic_init();
433 /* Initialize vector_irq on a new cpu */
434 static void __setup_vector_irq(int cpu
)
436 struct apic_chip_data
*data
;
437 struct irq_desc
*desc
;
440 /* Mark the inuse vectors */
441 for_each_irq_desc(irq
, desc
) {
442 struct irq_data
*idata
= irq_desc_get_irq_data(desc
);
444 data
= apic_chip_data(idata
);
445 if (!data
|| !cpumask_test_cpu(cpu
, data
->domain
))
447 vector
= data
->cfg
.vector
;
448 per_cpu(vector_irq
, cpu
)[vector
] = desc
;
450 /* Mark the free vectors */
451 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
452 desc
= per_cpu(vector_irq
, cpu
)[vector
];
453 if (IS_ERR_OR_NULL(desc
))
456 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
457 if (!cpumask_test_cpu(cpu
, data
->domain
))
458 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
463 * Setup the vector to irq mappings. Must be called with vector_lock held.
465 void setup_vector_irq(int cpu
)
469 lockdep_assert_held(&vector_lock
);
471 * On most of the platforms, legacy PIC delivers the interrupts on the
472 * boot cpu. But there are certain platforms where PIC interrupts are
473 * delivered to multiple cpu's. If the legacy IRQ is handled by the
474 * legacy PIC, for the new cpu that is coming online, setup the static
475 * legacy vector to irq mapping:
477 for (irq
= 0; irq
< nr_legacy_irqs(); irq
++)
478 per_cpu(vector_irq
, cpu
)[ISA_IRQ_VECTOR(irq
)] = irq_to_desc(irq
);
480 __setup_vector_irq(cpu
);
483 static int apic_retrigger_irq(struct irq_data
*irq_data
)
485 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
489 raw_spin_lock_irqsave(&vector_lock
, flags
);
490 cpu
= cpumask_first_and(data
->domain
, cpu_online_mask
);
491 apic
->send_IPI_mask(cpumask_of(cpu
), data
->cfg
.vector
);
492 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
497 void apic_ack_edge(struct irq_data
*data
)
499 irq_complete_move(irqd_cfg(data
));
504 static int apic_set_affinity(struct irq_data
*irq_data
,
505 const struct cpumask
*dest
, bool force
)
507 struct apic_chip_data
*data
= irq_data
->chip_data
;
508 int err
, irq
= irq_data
->irq
;
510 if (!config_enabled(CONFIG_SMP
))
513 if (!cpumask_intersects(dest
, cpu_online_mask
))
516 err
= assign_irq_vector(irq
, data
, dest
);
517 return err
? err
: IRQ_SET_MASK_OK
;
520 static struct irq_chip lapic_controller
= {
521 .irq_ack
= apic_ack_edge
,
522 .irq_set_affinity
= apic_set_affinity
,
523 .irq_retrigger
= apic_retrigger_irq
,
527 static void __send_cleanup_vector(struct apic_chip_data
*data
)
529 cpumask_var_t cleanup_mask
;
531 if (unlikely(!alloc_cpumask_var(&cleanup_mask
, GFP_ATOMIC
))) {
534 for_each_cpu_and(i
, data
->old_domain
, cpu_online_mask
)
535 apic
->send_IPI_mask(cpumask_of(i
),
536 IRQ_MOVE_CLEANUP_VECTOR
);
538 cpumask_and(cleanup_mask
, data
->old_domain
, cpu_online_mask
);
539 apic
->send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
540 free_cpumask_var(cleanup_mask
);
542 data
->move_in_progress
= 0;
545 void send_cleanup_vector(struct irq_cfg
*cfg
)
547 struct apic_chip_data
*data
;
549 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
550 if (data
->move_in_progress
)
551 __send_cleanup_vector(data
);
554 asmlinkage __visible
void smp_irq_move_cleanup_interrupt(void)
560 /* Prevent vectors vanishing under us */
561 raw_spin_lock(&vector_lock
);
563 me
= smp_processor_id();
564 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
565 struct apic_chip_data
*data
;
566 struct irq_desc
*desc
;
570 desc
= __this_cpu_read(vector_irq
[vector
]);
571 if (IS_ERR_OR_NULL(desc
))
574 if (!raw_spin_trylock(&desc
->lock
)) {
575 raw_spin_unlock(&vector_lock
);
577 raw_spin_lock(&vector_lock
);
581 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
586 * Check if the irq migration is in progress. If so, we
587 * haven't received the cleanup request yet for this irq.
589 if (data
->move_in_progress
)
592 if (vector
== data
->cfg
.vector
&&
593 cpumask_test_cpu(me
, data
->domain
))
596 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
598 * Check if the vector that needs to be cleanedup is
599 * registered at the cpu's IRR. If so, then this is not
600 * the best time to clean it up. Lets clean it up in the
601 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
604 if (irr
& (1 << (vector
% 32))) {
605 apic
->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR
);
608 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);
610 raw_spin_unlock(&desc
->lock
);
613 raw_spin_unlock(&vector_lock
);
618 static void __irq_complete_move(struct irq_cfg
*cfg
, unsigned vector
)
621 struct apic_chip_data
*data
;
623 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
624 if (likely(!data
->move_in_progress
))
627 me
= smp_processor_id();
628 if (vector
== data
->cfg
.vector
&& cpumask_test_cpu(me
, data
->domain
))
629 __send_cleanup_vector(data
);
632 void irq_complete_move(struct irq_cfg
*cfg
)
634 __irq_complete_move(cfg
, ~get_irq_regs()->orig_ax
);
637 void irq_force_complete_move(int irq
)
639 struct irq_cfg
*cfg
= irq_cfg(irq
);
642 __irq_complete_move(cfg
, cfg
->vector
);
646 static void __init
print_APIC_field(int base
)
652 for (i
= 0; i
< 8; i
++)
653 pr_cont("%08x", apic_read(base
+ i
*0x10));
658 static void __init
print_local_APIC(void *dummy
)
660 unsigned int i
, v
, ver
, maxlvt
;
663 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
664 smp_processor_id(), hard_smp_processor_id());
665 v
= apic_read(APIC_ID
);
666 pr_info("... APIC ID: %08x (%01x)\n", v
, read_apic_id());
667 v
= apic_read(APIC_LVR
);
668 pr_info("... APIC VERSION: %08x\n", v
);
669 ver
= GET_APIC_VERSION(v
);
670 maxlvt
= lapic_get_maxlvt();
672 v
= apic_read(APIC_TASKPRI
);
673 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
676 if (APIC_INTEGRATED(ver
)) {
677 if (!APIC_XAPIC(ver
)) {
678 v
= apic_read(APIC_ARBPRI
);
679 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
680 v
, v
& APIC_ARBPRI_MASK
);
682 v
= apic_read(APIC_PROCPRI
);
683 pr_debug("... APIC PROCPRI: %08x\n", v
);
687 * Remote read supported only in the 82489DX and local APIC for
688 * Pentium processors.
690 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
691 v
= apic_read(APIC_RRR
);
692 pr_debug("... APIC RRR: %08x\n", v
);
695 v
= apic_read(APIC_LDR
);
696 pr_debug("... APIC LDR: %08x\n", v
);
697 if (!x2apic_enabled()) {
698 v
= apic_read(APIC_DFR
);
699 pr_debug("... APIC DFR: %08x\n", v
);
701 v
= apic_read(APIC_SPIV
);
702 pr_debug("... APIC SPIV: %08x\n", v
);
704 pr_debug("... APIC ISR field:\n");
705 print_APIC_field(APIC_ISR
);
706 pr_debug("... APIC TMR field:\n");
707 print_APIC_field(APIC_TMR
);
708 pr_debug("... APIC IRR field:\n");
709 print_APIC_field(APIC_IRR
);
712 if (APIC_INTEGRATED(ver
)) {
713 /* Due to the Pentium erratum 3AP. */
715 apic_write(APIC_ESR
, 0);
717 v
= apic_read(APIC_ESR
);
718 pr_debug("... APIC ESR: %08x\n", v
);
721 icr
= apic_icr_read();
722 pr_debug("... APIC ICR: %08x\n", (u32
)icr
);
723 pr_debug("... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
725 v
= apic_read(APIC_LVTT
);
726 pr_debug("... APIC LVTT: %08x\n", v
);
730 v
= apic_read(APIC_LVTPC
);
731 pr_debug("... APIC LVTPC: %08x\n", v
);
733 v
= apic_read(APIC_LVT0
);
734 pr_debug("... APIC LVT0: %08x\n", v
);
735 v
= apic_read(APIC_LVT1
);
736 pr_debug("... APIC LVT1: %08x\n", v
);
740 v
= apic_read(APIC_LVTERR
);
741 pr_debug("... APIC LVTERR: %08x\n", v
);
744 v
= apic_read(APIC_TMICT
);
745 pr_debug("... APIC TMICT: %08x\n", v
);
746 v
= apic_read(APIC_TMCCT
);
747 pr_debug("... APIC TMCCT: %08x\n", v
);
748 v
= apic_read(APIC_TDCR
);
749 pr_debug("... APIC TDCR: %08x\n", v
);
751 if (boot_cpu_has(X86_FEATURE_EXTAPIC
)) {
752 v
= apic_read(APIC_EFEAT
);
753 maxlvt
= (v
>> 16) & 0xff;
754 pr_debug("... APIC EFEAT: %08x\n", v
);
755 v
= apic_read(APIC_ECTRL
);
756 pr_debug("... APIC ECTRL: %08x\n", v
);
757 for (i
= 0; i
< maxlvt
; i
++) {
758 v
= apic_read(APIC_EILVTn(i
));
759 pr_debug("... APIC EILVT%d: %08x\n", i
, v
);
765 static void __init
print_local_APICs(int maxcpu
)
773 for_each_online_cpu(cpu
) {
776 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
781 static void __init
print_PIC(void)
786 if (!nr_legacy_irqs())
789 pr_debug("\nprinting PIC contents\n");
791 raw_spin_lock_irqsave(&i8259A_lock
, flags
);
793 v
= inb(0xa1) << 8 | inb(0x21);
794 pr_debug("... PIC IMR: %04x\n", v
);
796 v
= inb(0xa0) << 8 | inb(0x20);
797 pr_debug("... PIC IRR: %04x\n", v
);
801 v
= inb(0xa0) << 8 | inb(0x20);
805 raw_spin_unlock_irqrestore(&i8259A_lock
, flags
);
807 pr_debug("... PIC ISR: %04x\n", v
);
809 v
= inb(0x4d1) << 8 | inb(0x4d0);
810 pr_debug("... PIC ELCR: %04x\n", v
);
813 static int show_lapic __initdata
= 1;
814 static __init
int setup_show_lapic(char *arg
)
818 if (strcmp(arg
, "all") == 0) {
819 show_lapic
= CONFIG_NR_CPUS
;
821 get_option(&arg
, &num
);
828 __setup("show_lapic=", setup_show_lapic
);
830 static int __init
print_ICs(void)
832 if (apic_verbosity
== APIC_QUIET
)
837 /* don't print out if apic is not there */
838 if (!cpu_has_apic
&& !apic_from_smp_config())
841 print_local_APICs(show_lapic
);
847 late_initcall(print_ICs
);