2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <asm/irqdomain.h>
18 #include <asm/hw_irq.h>
20 #include <asm/i8259.h>
22 #include <asm/irq_remapping.h>
24 struct apic_chip_data
{
27 cpumask_var_t old_domain
;
28 u8 move_in_progress
: 1;
31 struct irq_domain
*x86_vector_domain
;
32 EXPORT_SYMBOL_GPL(x86_vector_domain
);
33 static DEFINE_RAW_SPINLOCK(vector_lock
);
34 static cpumask_var_t vector_cpumask
, vector_searchmask
, searched_cpumask
;
35 static struct irq_chip lapic_controller
;
36 #ifdef CONFIG_X86_IO_APIC
37 static struct apic_chip_data
*legacy_irq_data
[NR_IRQS_LEGACY
];
40 void lock_vector_lock(void)
42 /* Used to the online set of cpus does not change
43 * during assign_irq_vector.
45 raw_spin_lock(&vector_lock
);
48 void unlock_vector_lock(void)
50 raw_spin_unlock(&vector_lock
);
53 static struct apic_chip_data
*apic_chip_data(struct irq_data
*irq_data
)
58 while (irq_data
->parent_data
)
59 irq_data
= irq_data
->parent_data
;
61 return irq_data
->chip_data
;
64 struct irq_cfg
*irqd_cfg(struct irq_data
*irq_data
)
66 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
68 return data
? &data
->cfg
: NULL
;
70 EXPORT_SYMBOL_GPL(irqd_cfg
);
72 struct irq_cfg
*irq_cfg(unsigned int irq
)
74 return irqd_cfg(irq_get_irq_data(irq
));
77 static struct apic_chip_data
*alloc_apic_chip_data(int node
)
79 struct apic_chip_data
*data
;
81 data
= kzalloc_node(sizeof(*data
), GFP_KERNEL
, node
);
84 if (!zalloc_cpumask_var_node(&data
->domain
, GFP_KERNEL
, node
))
86 if (!zalloc_cpumask_var_node(&data
->old_domain
, GFP_KERNEL
, node
))
90 free_cpumask_var(data
->domain
);
96 static void free_apic_chip_data(struct apic_chip_data
*data
)
99 free_cpumask_var(data
->domain
);
100 free_cpumask_var(data
->old_domain
);
105 static int __assign_irq_vector(int irq
, struct apic_chip_data
*d
,
106 const struct cpumask
*mask
)
109 * NOTE! The local APIC isn't very good at handling
110 * multiple interrupts at the same interrupt level.
111 * As the interrupt level is determined by taking the
112 * vector number and shifting that right by 4, we
113 * want to spread these out a bit so that they don't
114 * all fall in the same interrupt level.
116 * Also, we've got to be careful not to trash gate
117 * 0x80, because int 0x80 is hm, kind of importantish. ;)
119 static int current_vector
= FIRST_EXTERNAL_VECTOR
+ VECTOR_OFFSET_START
;
120 static int current_offset
= VECTOR_OFFSET_START
% 16;
123 if (d
->move_in_progress
)
126 /* Only try and allocate irqs on cpus that are present */
127 cpumask_clear(d
->old_domain
);
128 cpumask_clear(searched_cpumask
);
129 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
130 while (cpu
< nr_cpu_ids
) {
133 /* Get the possible target cpus for @mask/@cpu from the apic */
134 apic
->vector_allocation_domain(cpu
, vector_cpumask
, mask
);
137 * Clear the offline cpus from @vector_cpumask for searching
138 * and verify whether the result overlaps with @mask. If true,
139 * then the call to apic->cpu_mask_to_apicid_and() will
140 * succeed as well. If not, no point in trying to find a
141 * vector in this mask.
143 cpumask_and(vector_searchmask
, vector_cpumask
, cpu_online_mask
);
144 if (!cpumask_intersects(vector_searchmask
, mask
))
147 if (cpumask_subset(vector_cpumask
, d
->domain
)) {
148 if (cpumask_equal(vector_cpumask
, d
->domain
))
151 * Mark the cpus which are not longer in the mask for
154 cpumask_andnot(d
->old_domain
, d
->domain
, vector_cpumask
);
155 vector
= d
->cfg
.vector
;
159 vector
= current_vector
;
160 offset
= current_offset
;
163 if (vector
>= first_system_vector
) {
164 offset
= (offset
+ 1) % 16;
165 vector
= FIRST_EXTERNAL_VECTOR
+ offset
;
168 /* If the search wrapped around, try the next cpu */
169 if (unlikely(current_vector
== vector
))
172 if (test_bit(vector
, used_vectors
))
175 for_each_cpu(new_cpu
, vector_searchmask
) {
176 if (!IS_ERR_OR_NULL(per_cpu(vector_irq
, new_cpu
)[vector
]))
180 current_vector
= vector
;
181 current_offset
= offset
;
182 /* Schedule the old vector for cleanup on all cpus */
184 cpumask_copy(d
->old_domain
, d
->domain
);
185 for_each_cpu(new_cpu
, vector_searchmask
)
186 per_cpu(vector_irq
, new_cpu
)[vector
] = irq_to_desc(irq
);
191 * We exclude the current @vector_cpumask from the requested
192 * @mask and try again with the next online cpu in the
193 * result. We cannot modify @mask, so we use @vector_cpumask
194 * as a temporary buffer here as it will be reassigned when
195 * calling apic->vector_allocation_domain() above.
197 cpumask_or(searched_cpumask
, searched_cpumask
, vector_cpumask
);
198 cpumask_andnot(vector_cpumask
, mask
, searched_cpumask
);
199 cpu
= cpumask_first_and(vector_cpumask
, cpu_online_mask
);
206 * Exclude offline cpus from the cleanup mask and set the
207 * move_in_progress flag when the result is not empty.
209 cpumask_and(d
->old_domain
, d
->old_domain
, cpu_online_mask
);
210 d
->move_in_progress
= !cpumask_empty(d
->old_domain
);
211 d
->cfg
.vector
= vector
;
212 cpumask_copy(d
->domain
, vector_cpumask
);
215 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
216 * as we already established, that mask & d->domain & cpu_online_mask
219 BUG_ON(apic
->cpu_mask_to_apicid_and(mask
, d
->domain
,
220 &d
->cfg
.dest_apicid
));
224 static int assign_irq_vector(int irq
, struct apic_chip_data
*data
,
225 const struct cpumask
*mask
)
230 raw_spin_lock_irqsave(&vector_lock
, flags
);
231 err
= __assign_irq_vector(irq
, data
, mask
);
232 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
236 static int assign_irq_vector_policy(int irq
, int node
,
237 struct apic_chip_data
*data
,
238 struct irq_alloc_info
*info
)
240 if (info
&& info
->mask
)
241 return assign_irq_vector(irq
, data
, info
->mask
);
242 if (node
!= NUMA_NO_NODE
&&
243 assign_irq_vector(irq
, data
, cpumask_of_node(node
)) == 0)
245 return assign_irq_vector(irq
, data
, apic
->target_cpus());
248 static void clear_irq_vector(int irq
, struct apic_chip_data
*data
)
250 struct irq_desc
*desc
;
253 BUG_ON(!data
->cfg
.vector
);
255 vector
= data
->cfg
.vector
;
256 for_each_cpu_and(cpu
, data
->domain
, cpu_online_mask
)
257 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
259 data
->cfg
.vector
= 0;
260 cpumask_clear(data
->domain
);
262 if (likely(!data
->move_in_progress
))
265 desc
= irq_to_desc(irq
);
266 for_each_cpu_and(cpu
, data
->old_domain
, cpu_online_mask
) {
267 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
;
269 if (per_cpu(vector_irq
, cpu
)[vector
] != desc
)
271 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
275 data
->move_in_progress
= 0;
278 void init_irq_alloc_info(struct irq_alloc_info
*info
,
279 const struct cpumask
*mask
)
281 memset(info
, 0, sizeof(*info
));
285 void copy_irq_alloc_info(struct irq_alloc_info
*dst
, struct irq_alloc_info
*src
)
290 memset(dst
, 0, sizeof(*dst
));
293 static void x86_vector_free_irqs(struct irq_domain
*domain
,
294 unsigned int virq
, unsigned int nr_irqs
)
296 struct apic_chip_data
*apic_data
;
297 struct irq_data
*irq_data
;
301 for (i
= 0; i
< nr_irqs
; i
++) {
302 irq_data
= irq_domain_get_irq_data(x86_vector_domain
, virq
+ i
);
303 if (irq_data
&& irq_data
->chip_data
) {
304 raw_spin_lock_irqsave(&vector_lock
, flags
);
305 clear_irq_vector(virq
+ i
, irq_data
->chip_data
);
306 apic_data
= irq_data
->chip_data
;
307 irq_domain_reset_irq_data(irq_data
);
308 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
309 free_apic_chip_data(apic_data
);
310 #ifdef CONFIG_X86_IO_APIC
311 if (virq
+ i
< nr_legacy_irqs())
312 legacy_irq_data
[virq
+ i
] = NULL
;
318 static int x86_vector_alloc_irqs(struct irq_domain
*domain
, unsigned int virq
,
319 unsigned int nr_irqs
, void *arg
)
321 struct irq_alloc_info
*info
= arg
;
322 struct apic_chip_data
*data
;
323 struct irq_data
*irq_data
;
329 /* Currently vector allocator can't guarantee contiguous allocations */
330 if ((info
->flags
& X86_IRQ_ALLOC_CONTIGUOUS_VECTORS
) && nr_irqs
> 1)
333 for (i
= 0; i
< nr_irqs
; i
++) {
334 irq_data
= irq_domain_get_irq_data(domain
, virq
+ i
);
336 node
= irq_data_get_node(irq_data
);
337 #ifdef CONFIG_X86_IO_APIC
338 if (virq
+ i
< nr_legacy_irqs() && legacy_irq_data
[virq
+ i
])
339 data
= legacy_irq_data
[virq
+ i
];
342 data
= alloc_apic_chip_data(node
);
348 irq_data
->chip
= &lapic_controller
;
349 irq_data
->chip_data
= data
;
350 irq_data
->hwirq
= virq
+ i
;
351 err
= assign_irq_vector_policy(virq
+ i
, node
, data
, info
);
359 x86_vector_free_irqs(domain
, virq
, i
+ 1);
363 static const struct irq_domain_ops x86_vector_domain_ops
= {
364 .alloc
= x86_vector_alloc_irqs
,
365 .free
= x86_vector_free_irqs
,
368 int __init
arch_probe_nr_irqs(void)
372 if (nr_irqs
> (NR_VECTORS
* nr_cpu_ids
))
373 nr_irqs
= NR_VECTORS
* nr_cpu_ids
;
375 nr
= (gsi_top
+ nr_legacy_irqs()) + 8 * nr_cpu_ids
;
376 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
378 * for MSI and HT dyn irq
380 if (gsi_top
<= NR_IRQS_LEGACY
)
381 nr
+= 8 * nr_cpu_ids
;
389 * We don't know if PIC is present at this point so we need to do
390 * probe() to get the right number of legacy IRQs.
392 return legacy_pic
->probe();
395 #ifdef CONFIG_X86_IO_APIC
396 static void init_legacy_irqs(void)
398 int i
, node
= cpu_to_node(0);
399 struct apic_chip_data
*data
;
402 * For legacy IRQ's, start with assigning irq0 to irq15 to
403 * ISA_IRQ_VECTOR(i) for all cpu's.
405 for (i
= 0; i
< nr_legacy_irqs(); i
++) {
406 data
= legacy_irq_data
[i
] = alloc_apic_chip_data(node
);
409 data
->cfg
.vector
= ISA_IRQ_VECTOR(i
);
410 cpumask_setall(data
->domain
);
411 irq_set_chip_data(i
, data
);
415 static void init_legacy_irqs(void) { }
418 int __init
arch_early_irq_init(void)
422 x86_vector_domain
= irq_domain_add_tree(NULL
, &x86_vector_domain_ops
,
424 BUG_ON(x86_vector_domain
== NULL
);
425 irq_set_default_host(x86_vector_domain
);
427 arch_init_msi_domain(x86_vector_domain
);
428 arch_init_htirq_domain(x86_vector_domain
);
430 BUG_ON(!alloc_cpumask_var(&vector_cpumask
, GFP_KERNEL
));
431 BUG_ON(!alloc_cpumask_var(&vector_searchmask
, GFP_KERNEL
));
432 BUG_ON(!alloc_cpumask_var(&searched_cpumask
, GFP_KERNEL
));
434 return arch_early_ioapic_init();
437 /* Initialize vector_irq on a new cpu */
438 static void __setup_vector_irq(int cpu
)
440 struct apic_chip_data
*data
;
441 struct irq_desc
*desc
;
444 /* Mark the inuse vectors */
445 for_each_irq_desc(irq
, desc
) {
446 struct irq_data
*idata
= irq_desc_get_irq_data(desc
);
448 data
= apic_chip_data(idata
);
449 if (!data
|| !cpumask_test_cpu(cpu
, data
->domain
))
451 vector
= data
->cfg
.vector
;
452 per_cpu(vector_irq
, cpu
)[vector
] = desc
;
454 /* Mark the free vectors */
455 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
456 desc
= per_cpu(vector_irq
, cpu
)[vector
];
457 if (IS_ERR_OR_NULL(desc
))
460 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
461 if (!cpumask_test_cpu(cpu
, data
->domain
))
462 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
467 * Setup the vector to irq mappings. Must be called with vector_lock held.
469 void setup_vector_irq(int cpu
)
473 lockdep_assert_held(&vector_lock
);
475 * On most of the platforms, legacy PIC delivers the interrupts on the
476 * boot cpu. But there are certain platforms where PIC interrupts are
477 * delivered to multiple cpu's. If the legacy IRQ is handled by the
478 * legacy PIC, for the new cpu that is coming online, setup the static
479 * legacy vector to irq mapping:
481 for (irq
= 0; irq
< nr_legacy_irqs(); irq
++)
482 per_cpu(vector_irq
, cpu
)[ISA_IRQ_VECTOR(irq
)] = irq_to_desc(irq
);
484 __setup_vector_irq(cpu
);
487 static int apic_retrigger_irq(struct irq_data
*irq_data
)
489 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
493 raw_spin_lock_irqsave(&vector_lock
, flags
);
494 cpu
= cpumask_first_and(data
->domain
, cpu_online_mask
);
495 apic
->send_IPI_mask(cpumask_of(cpu
), data
->cfg
.vector
);
496 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
501 void apic_ack_edge(struct irq_data
*data
)
503 irq_complete_move(irqd_cfg(data
));
508 static int apic_set_affinity(struct irq_data
*irq_data
,
509 const struct cpumask
*dest
, bool force
)
511 struct apic_chip_data
*data
= irq_data
->chip_data
;
512 int err
, irq
= irq_data
->irq
;
514 if (!config_enabled(CONFIG_SMP
))
517 if (!cpumask_intersects(dest
, cpu_online_mask
))
520 err
= assign_irq_vector(irq
, data
, dest
);
521 return err
? err
: IRQ_SET_MASK_OK
;
524 static struct irq_chip lapic_controller
= {
525 .irq_ack
= apic_ack_edge
,
526 .irq_set_affinity
= apic_set_affinity
,
527 .irq_retrigger
= apic_retrigger_irq
,
531 static void __send_cleanup_vector(struct apic_chip_data
*data
)
533 cpumask_var_t cleanup_mask
;
535 raw_spin_lock(&vector_lock
);
536 data
->move_in_progress
= 0;
537 if (unlikely(!alloc_cpumask_var(&cleanup_mask
, GFP_ATOMIC
))) {
540 for_each_cpu_and(i
, data
->old_domain
, cpu_online_mask
)
541 apic
->send_IPI_mask(cpumask_of(i
),
542 IRQ_MOVE_CLEANUP_VECTOR
);
544 cpumask_and(cleanup_mask
, data
->old_domain
, cpu_online_mask
);
545 apic
->send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
546 free_cpumask_var(cleanup_mask
);
548 raw_spin_unlock(&vector_lock
);
551 void send_cleanup_vector(struct irq_cfg
*cfg
)
553 struct apic_chip_data
*data
;
555 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
556 if (data
->move_in_progress
)
557 __send_cleanup_vector(data
);
560 asmlinkage __visible
void smp_irq_move_cleanup_interrupt(void)
566 /* Prevent vectors vanishing under us */
567 raw_spin_lock(&vector_lock
);
569 me
= smp_processor_id();
570 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
571 struct apic_chip_data
*data
;
572 struct irq_desc
*desc
;
576 desc
= __this_cpu_read(vector_irq
[vector
]);
577 if (IS_ERR_OR_NULL(desc
))
580 if (!raw_spin_trylock(&desc
->lock
)) {
581 raw_spin_unlock(&vector_lock
);
583 raw_spin_lock(&vector_lock
);
587 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
592 * Check if the irq migration is in progress. If so, we
593 * haven't received the cleanup request yet for this irq.
595 if (data
->move_in_progress
)
598 if (vector
== data
->cfg
.vector
&&
599 cpumask_test_cpu(me
, data
->domain
))
602 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
604 * Check if the vector that needs to be cleanedup is
605 * registered at the cpu's IRR. If so, then this is not
606 * the best time to clean it up. Lets clean it up in the
607 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
610 if (irr
& (1 << (vector
% 32))) {
611 apic
->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR
);
614 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);
616 raw_spin_unlock(&desc
->lock
);
619 raw_spin_unlock(&vector_lock
);
624 static void __irq_complete_move(struct irq_cfg
*cfg
, unsigned vector
)
627 struct apic_chip_data
*data
;
629 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
630 if (likely(!data
->move_in_progress
))
633 me
= smp_processor_id();
634 if (vector
== data
->cfg
.vector
&& cpumask_test_cpu(me
, data
->domain
))
635 __send_cleanup_vector(data
);
638 void irq_complete_move(struct irq_cfg
*cfg
)
640 __irq_complete_move(cfg
, ~get_irq_regs()->orig_ax
);
643 void irq_force_complete_move(int irq
)
645 struct irq_cfg
*cfg
= irq_cfg(irq
);
648 __irq_complete_move(cfg
, cfg
->vector
);
652 static void __init
print_APIC_field(int base
)
658 for (i
= 0; i
< 8; i
++)
659 pr_cont("%08x", apic_read(base
+ i
*0x10));
664 static void __init
print_local_APIC(void *dummy
)
666 unsigned int i
, v
, ver
, maxlvt
;
669 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
670 smp_processor_id(), hard_smp_processor_id());
671 v
= apic_read(APIC_ID
);
672 pr_info("... APIC ID: %08x (%01x)\n", v
, read_apic_id());
673 v
= apic_read(APIC_LVR
);
674 pr_info("... APIC VERSION: %08x\n", v
);
675 ver
= GET_APIC_VERSION(v
);
676 maxlvt
= lapic_get_maxlvt();
678 v
= apic_read(APIC_TASKPRI
);
679 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
682 if (APIC_INTEGRATED(ver
)) {
683 if (!APIC_XAPIC(ver
)) {
684 v
= apic_read(APIC_ARBPRI
);
685 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
686 v
, v
& APIC_ARBPRI_MASK
);
688 v
= apic_read(APIC_PROCPRI
);
689 pr_debug("... APIC PROCPRI: %08x\n", v
);
693 * Remote read supported only in the 82489DX and local APIC for
694 * Pentium processors.
696 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
697 v
= apic_read(APIC_RRR
);
698 pr_debug("... APIC RRR: %08x\n", v
);
701 v
= apic_read(APIC_LDR
);
702 pr_debug("... APIC LDR: %08x\n", v
);
703 if (!x2apic_enabled()) {
704 v
= apic_read(APIC_DFR
);
705 pr_debug("... APIC DFR: %08x\n", v
);
707 v
= apic_read(APIC_SPIV
);
708 pr_debug("... APIC SPIV: %08x\n", v
);
710 pr_debug("... APIC ISR field:\n");
711 print_APIC_field(APIC_ISR
);
712 pr_debug("... APIC TMR field:\n");
713 print_APIC_field(APIC_TMR
);
714 pr_debug("... APIC IRR field:\n");
715 print_APIC_field(APIC_IRR
);
718 if (APIC_INTEGRATED(ver
)) {
719 /* Due to the Pentium erratum 3AP. */
721 apic_write(APIC_ESR
, 0);
723 v
= apic_read(APIC_ESR
);
724 pr_debug("... APIC ESR: %08x\n", v
);
727 icr
= apic_icr_read();
728 pr_debug("... APIC ICR: %08x\n", (u32
)icr
);
729 pr_debug("... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
731 v
= apic_read(APIC_LVTT
);
732 pr_debug("... APIC LVTT: %08x\n", v
);
736 v
= apic_read(APIC_LVTPC
);
737 pr_debug("... APIC LVTPC: %08x\n", v
);
739 v
= apic_read(APIC_LVT0
);
740 pr_debug("... APIC LVT0: %08x\n", v
);
741 v
= apic_read(APIC_LVT1
);
742 pr_debug("... APIC LVT1: %08x\n", v
);
746 v
= apic_read(APIC_LVTERR
);
747 pr_debug("... APIC LVTERR: %08x\n", v
);
750 v
= apic_read(APIC_TMICT
);
751 pr_debug("... APIC TMICT: %08x\n", v
);
752 v
= apic_read(APIC_TMCCT
);
753 pr_debug("... APIC TMCCT: %08x\n", v
);
754 v
= apic_read(APIC_TDCR
);
755 pr_debug("... APIC TDCR: %08x\n", v
);
757 if (boot_cpu_has(X86_FEATURE_EXTAPIC
)) {
758 v
= apic_read(APIC_EFEAT
);
759 maxlvt
= (v
>> 16) & 0xff;
760 pr_debug("... APIC EFEAT: %08x\n", v
);
761 v
= apic_read(APIC_ECTRL
);
762 pr_debug("... APIC ECTRL: %08x\n", v
);
763 for (i
= 0; i
< maxlvt
; i
++) {
764 v
= apic_read(APIC_EILVTn(i
));
765 pr_debug("... APIC EILVT%d: %08x\n", i
, v
);
771 static void __init
print_local_APICs(int maxcpu
)
779 for_each_online_cpu(cpu
) {
782 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
787 static void __init
print_PIC(void)
792 if (!nr_legacy_irqs())
795 pr_debug("\nprinting PIC contents\n");
797 raw_spin_lock_irqsave(&i8259A_lock
, flags
);
799 v
= inb(0xa1) << 8 | inb(0x21);
800 pr_debug("... PIC IMR: %04x\n", v
);
802 v
= inb(0xa0) << 8 | inb(0x20);
803 pr_debug("... PIC IRR: %04x\n", v
);
807 v
= inb(0xa0) << 8 | inb(0x20);
811 raw_spin_unlock_irqrestore(&i8259A_lock
, flags
);
813 pr_debug("... PIC ISR: %04x\n", v
);
815 v
= inb(0x4d1) << 8 | inb(0x4d0);
816 pr_debug("... PIC ELCR: %04x\n", v
);
819 static int show_lapic __initdata
= 1;
820 static __init
int setup_show_lapic(char *arg
)
824 if (strcmp(arg
, "all") == 0) {
825 show_lapic
= CONFIG_NR_CPUS
;
827 get_option(&arg
, &num
);
834 __setup("show_lapic=", setup_show_lapic
);
836 static int __init
print_ICs(void)
838 if (apic_verbosity
== APIC_QUIET
)
843 /* don't print out if apic is not there */
844 if (!cpu_has_apic
&& !apic_from_smp_config())
847 print_local_APICs(show_lapic
);
853 late_initcall(print_ICs
);