2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <asm/irqdomain.h>
18 #include <asm/hw_irq.h>
20 #include <asm/i8259.h>
22 #include <asm/irq_remapping.h>
24 struct apic_chip_data
{
27 cpumask_var_t old_domain
;
28 u8 move_in_progress
: 1;
31 struct irq_domain
*x86_vector_domain
;
32 EXPORT_SYMBOL_GPL(x86_vector_domain
);
33 static DEFINE_RAW_SPINLOCK(vector_lock
);
34 static cpumask_var_t vector_cpumask
, vector_searchmask
, searched_cpumask
;
35 static struct irq_chip lapic_controller
;
36 #ifdef CONFIG_X86_IO_APIC
37 static struct apic_chip_data
*legacy_irq_data
[NR_IRQS_LEGACY
];
40 void lock_vector_lock(void)
42 /* Used to the online set of cpus does not change
43 * during assign_irq_vector.
45 raw_spin_lock(&vector_lock
);
48 void unlock_vector_lock(void)
50 raw_spin_unlock(&vector_lock
);
53 static struct apic_chip_data
*apic_chip_data(struct irq_data
*irq_data
)
58 while (irq_data
->parent_data
)
59 irq_data
= irq_data
->parent_data
;
61 return irq_data
->chip_data
;
64 struct irq_cfg
*irqd_cfg(struct irq_data
*irq_data
)
66 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
68 return data
? &data
->cfg
: NULL
;
70 EXPORT_SYMBOL_GPL(irqd_cfg
);
72 struct irq_cfg
*irq_cfg(unsigned int irq
)
74 return irqd_cfg(irq_get_irq_data(irq
));
77 static struct apic_chip_data
*alloc_apic_chip_data(int node
)
79 struct apic_chip_data
*data
;
81 data
= kzalloc_node(sizeof(*data
), GFP_KERNEL
, node
);
84 if (!zalloc_cpumask_var_node(&data
->domain
, GFP_KERNEL
, node
))
86 if (!zalloc_cpumask_var_node(&data
->old_domain
, GFP_KERNEL
, node
))
90 free_cpumask_var(data
->domain
);
96 static void free_apic_chip_data(struct apic_chip_data
*data
)
99 free_cpumask_var(data
->domain
);
100 free_cpumask_var(data
->old_domain
);
105 static int __assign_irq_vector(int irq
, struct apic_chip_data
*d
,
106 const struct cpumask
*mask
)
109 * NOTE! The local APIC isn't very good at handling
110 * multiple interrupts at the same interrupt level.
111 * As the interrupt level is determined by taking the
112 * vector number and shifting that right by 4, we
113 * want to spread these out a bit so that they don't
114 * all fall in the same interrupt level.
116 * Also, we've got to be careful not to trash gate
117 * 0x80, because int 0x80 is hm, kind of importantish. ;)
119 static int current_vector
= FIRST_EXTERNAL_VECTOR
+ VECTOR_OFFSET_START
;
120 static int current_offset
= VECTOR_OFFSET_START
% 16;
124 * If there is still a move in progress or the previous move has not
125 * been cleaned up completely, tell the caller to come back later.
127 if (d
->move_in_progress
||
128 cpumask_intersects(d
->old_domain
, cpu_online_mask
))
131 /* Only try and allocate irqs on cpus that are present */
132 cpumask_clear(d
->old_domain
);
133 cpumask_clear(searched_cpumask
);
134 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
135 while (cpu
< nr_cpu_ids
) {
138 /* Get the possible target cpus for @mask/@cpu from the apic */
139 apic
->vector_allocation_domain(cpu
, vector_cpumask
, mask
);
142 * Clear the offline cpus from @vector_cpumask for searching
143 * and verify whether the result overlaps with @mask. If true,
144 * then the call to apic->cpu_mask_to_apicid_and() will
145 * succeed as well. If not, no point in trying to find a
146 * vector in this mask.
148 cpumask_and(vector_searchmask
, vector_cpumask
, cpu_online_mask
);
149 if (!cpumask_intersects(vector_searchmask
, mask
))
152 if (cpumask_subset(vector_cpumask
, d
->domain
)) {
153 if (cpumask_equal(vector_cpumask
, d
->domain
))
156 * Mark the cpus which are not longer in the mask for
159 cpumask_andnot(d
->old_domain
, d
->domain
, vector_cpumask
);
160 vector
= d
->cfg
.vector
;
164 vector
= current_vector
;
165 offset
= current_offset
;
168 if (vector
>= first_system_vector
) {
169 offset
= (offset
+ 1) % 16;
170 vector
= FIRST_EXTERNAL_VECTOR
+ offset
;
173 /* If the search wrapped around, try the next cpu */
174 if (unlikely(current_vector
== vector
))
177 if (test_bit(vector
, used_vectors
))
180 for_each_cpu(new_cpu
, vector_searchmask
) {
181 if (!IS_ERR_OR_NULL(per_cpu(vector_irq
, new_cpu
)[vector
]))
185 current_vector
= vector
;
186 current_offset
= offset
;
187 /* Schedule the old vector for cleanup on all cpus */
189 cpumask_copy(d
->old_domain
, d
->domain
);
190 for_each_cpu(new_cpu
, vector_searchmask
)
191 per_cpu(vector_irq
, new_cpu
)[vector
] = irq_to_desc(irq
);
196 * We exclude the current @vector_cpumask from the requested
197 * @mask and try again with the next online cpu in the
198 * result. We cannot modify @mask, so we use @vector_cpumask
199 * as a temporary buffer here as it will be reassigned when
200 * calling apic->vector_allocation_domain() above.
202 cpumask_or(searched_cpumask
, searched_cpumask
, vector_cpumask
);
203 cpumask_andnot(vector_cpumask
, mask
, searched_cpumask
);
204 cpu
= cpumask_first_and(vector_cpumask
, cpu_online_mask
);
211 * Exclude offline cpus from the cleanup mask and set the
212 * move_in_progress flag when the result is not empty.
214 cpumask_and(d
->old_domain
, d
->old_domain
, cpu_online_mask
);
215 d
->move_in_progress
= !cpumask_empty(d
->old_domain
);
216 d
->cfg
.vector
= vector
;
217 cpumask_copy(d
->domain
, vector_cpumask
);
220 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
221 * as we already established, that mask & d->domain & cpu_online_mask
224 BUG_ON(apic
->cpu_mask_to_apicid_and(mask
, d
->domain
,
225 &d
->cfg
.dest_apicid
));
229 static int assign_irq_vector(int irq
, struct apic_chip_data
*data
,
230 const struct cpumask
*mask
)
235 raw_spin_lock_irqsave(&vector_lock
, flags
);
236 err
= __assign_irq_vector(irq
, data
, mask
);
237 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
241 static int assign_irq_vector_policy(int irq
, int node
,
242 struct apic_chip_data
*data
,
243 struct irq_alloc_info
*info
)
245 if (info
&& info
->mask
)
246 return assign_irq_vector(irq
, data
, info
->mask
);
247 if (node
!= NUMA_NO_NODE
&&
248 assign_irq_vector(irq
, data
, cpumask_of_node(node
)) == 0)
250 return assign_irq_vector(irq
, data
, apic
->target_cpus());
253 static void clear_irq_vector(int irq
, struct apic_chip_data
*data
)
255 struct irq_desc
*desc
;
258 BUG_ON(!data
->cfg
.vector
);
260 vector
= data
->cfg
.vector
;
261 for_each_cpu_and(cpu
, data
->domain
, cpu_online_mask
)
262 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
264 data
->cfg
.vector
= 0;
265 cpumask_clear(data
->domain
);
268 * If move is in progress or the old_domain mask is not empty,
269 * i.e. the cleanup IPI has not been processed yet, we need to remove
270 * the old references to desc from all cpus vector tables.
272 if (!data
->move_in_progress
&& cpumask_empty(data
->old_domain
))
275 desc
= irq_to_desc(irq
);
276 for_each_cpu_and(cpu
, data
->old_domain
, cpu_online_mask
) {
277 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
;
279 if (per_cpu(vector_irq
, cpu
)[vector
] != desc
)
281 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
285 data
->move_in_progress
= 0;
288 void init_irq_alloc_info(struct irq_alloc_info
*info
,
289 const struct cpumask
*mask
)
291 memset(info
, 0, sizeof(*info
));
295 void copy_irq_alloc_info(struct irq_alloc_info
*dst
, struct irq_alloc_info
*src
)
300 memset(dst
, 0, sizeof(*dst
));
303 static void x86_vector_free_irqs(struct irq_domain
*domain
,
304 unsigned int virq
, unsigned int nr_irqs
)
306 struct apic_chip_data
*apic_data
;
307 struct irq_data
*irq_data
;
311 for (i
= 0; i
< nr_irqs
; i
++) {
312 irq_data
= irq_domain_get_irq_data(x86_vector_domain
, virq
+ i
);
313 if (irq_data
&& irq_data
->chip_data
) {
314 raw_spin_lock_irqsave(&vector_lock
, flags
);
315 clear_irq_vector(virq
+ i
, irq_data
->chip_data
);
316 apic_data
= irq_data
->chip_data
;
317 irq_domain_reset_irq_data(irq_data
);
318 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
319 free_apic_chip_data(apic_data
);
320 #ifdef CONFIG_X86_IO_APIC
321 if (virq
+ i
< nr_legacy_irqs())
322 legacy_irq_data
[virq
+ i
] = NULL
;
328 static int x86_vector_alloc_irqs(struct irq_domain
*domain
, unsigned int virq
,
329 unsigned int nr_irqs
, void *arg
)
331 struct irq_alloc_info
*info
= arg
;
332 struct apic_chip_data
*data
;
333 struct irq_data
*irq_data
;
339 /* Currently vector allocator can't guarantee contiguous allocations */
340 if ((info
->flags
& X86_IRQ_ALLOC_CONTIGUOUS_VECTORS
) && nr_irqs
> 1)
343 for (i
= 0; i
< nr_irqs
; i
++) {
344 irq_data
= irq_domain_get_irq_data(domain
, virq
+ i
);
346 node
= irq_data_get_node(irq_data
);
347 #ifdef CONFIG_X86_IO_APIC
348 if (virq
+ i
< nr_legacy_irqs() && legacy_irq_data
[virq
+ i
])
349 data
= legacy_irq_data
[virq
+ i
];
352 data
= alloc_apic_chip_data(node
);
358 irq_data
->chip
= &lapic_controller
;
359 irq_data
->chip_data
= data
;
360 irq_data
->hwirq
= virq
+ i
;
361 err
= assign_irq_vector_policy(virq
+ i
, node
, data
, info
);
369 x86_vector_free_irqs(domain
, virq
, i
+ 1);
373 static const struct irq_domain_ops x86_vector_domain_ops
= {
374 .alloc
= x86_vector_alloc_irqs
,
375 .free
= x86_vector_free_irqs
,
378 int __init
arch_probe_nr_irqs(void)
382 if (nr_irqs
> (NR_VECTORS
* nr_cpu_ids
))
383 nr_irqs
= NR_VECTORS
* nr_cpu_ids
;
385 nr
= (gsi_top
+ nr_legacy_irqs()) + 8 * nr_cpu_ids
;
386 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
388 * for MSI and HT dyn irq
390 if (gsi_top
<= NR_IRQS_LEGACY
)
391 nr
+= 8 * nr_cpu_ids
;
399 * We don't know if PIC is present at this point so we need to do
400 * probe() to get the right number of legacy IRQs.
402 return legacy_pic
->probe();
405 #ifdef CONFIG_X86_IO_APIC
406 static void init_legacy_irqs(void)
408 int i
, node
= cpu_to_node(0);
409 struct apic_chip_data
*data
;
412 * For legacy IRQ's, start with assigning irq0 to irq15 to
413 * ISA_IRQ_VECTOR(i) for all cpu's.
415 for (i
= 0; i
< nr_legacy_irqs(); i
++) {
416 data
= legacy_irq_data
[i
] = alloc_apic_chip_data(node
);
419 data
->cfg
.vector
= ISA_IRQ_VECTOR(i
);
420 cpumask_setall(data
->domain
);
421 irq_set_chip_data(i
, data
);
425 static void init_legacy_irqs(void) { }
428 int __init
arch_early_irq_init(void)
432 x86_vector_domain
= irq_domain_add_tree(NULL
, &x86_vector_domain_ops
,
434 BUG_ON(x86_vector_domain
== NULL
);
435 irq_set_default_host(x86_vector_domain
);
437 arch_init_msi_domain(x86_vector_domain
);
438 arch_init_htirq_domain(x86_vector_domain
);
440 BUG_ON(!alloc_cpumask_var(&vector_cpumask
, GFP_KERNEL
));
441 BUG_ON(!alloc_cpumask_var(&vector_searchmask
, GFP_KERNEL
));
442 BUG_ON(!alloc_cpumask_var(&searched_cpumask
, GFP_KERNEL
));
444 return arch_early_ioapic_init();
447 /* Initialize vector_irq on a new cpu */
448 static void __setup_vector_irq(int cpu
)
450 struct apic_chip_data
*data
;
451 struct irq_desc
*desc
;
454 /* Mark the inuse vectors */
455 for_each_irq_desc(irq
, desc
) {
456 struct irq_data
*idata
= irq_desc_get_irq_data(desc
);
458 data
= apic_chip_data(idata
);
459 if (!data
|| !cpumask_test_cpu(cpu
, data
->domain
))
461 vector
= data
->cfg
.vector
;
462 per_cpu(vector_irq
, cpu
)[vector
] = desc
;
464 /* Mark the free vectors */
465 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
466 desc
= per_cpu(vector_irq
, cpu
)[vector
];
467 if (IS_ERR_OR_NULL(desc
))
470 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
471 if (!cpumask_test_cpu(cpu
, data
->domain
))
472 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
477 * Setup the vector to irq mappings. Must be called with vector_lock held.
479 void setup_vector_irq(int cpu
)
483 lockdep_assert_held(&vector_lock
);
485 * On most of the platforms, legacy PIC delivers the interrupts on the
486 * boot cpu. But there are certain platforms where PIC interrupts are
487 * delivered to multiple cpu's. If the legacy IRQ is handled by the
488 * legacy PIC, for the new cpu that is coming online, setup the static
489 * legacy vector to irq mapping:
491 for (irq
= 0; irq
< nr_legacy_irqs(); irq
++)
492 per_cpu(vector_irq
, cpu
)[ISA_IRQ_VECTOR(irq
)] = irq_to_desc(irq
);
494 __setup_vector_irq(cpu
);
497 static int apic_retrigger_irq(struct irq_data
*irq_data
)
499 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
503 raw_spin_lock_irqsave(&vector_lock
, flags
);
504 cpu
= cpumask_first_and(data
->domain
, cpu_online_mask
);
505 apic
->send_IPI_mask(cpumask_of(cpu
), data
->cfg
.vector
);
506 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
511 void apic_ack_edge(struct irq_data
*data
)
513 irq_complete_move(irqd_cfg(data
));
518 static int apic_set_affinity(struct irq_data
*irq_data
,
519 const struct cpumask
*dest
, bool force
)
521 struct apic_chip_data
*data
= irq_data
->chip_data
;
522 int err
, irq
= irq_data
->irq
;
524 if (!config_enabled(CONFIG_SMP
))
527 if (!cpumask_intersects(dest
, cpu_online_mask
))
530 err
= assign_irq_vector(irq
, data
, dest
);
531 return err
? err
: IRQ_SET_MASK_OK
;
534 static struct irq_chip lapic_controller
= {
535 .irq_ack
= apic_ack_edge
,
536 .irq_set_affinity
= apic_set_affinity
,
537 .irq_retrigger
= apic_retrigger_irq
,
541 static void __send_cleanup_vector(struct apic_chip_data
*data
)
543 raw_spin_lock(&vector_lock
);
544 cpumask_and(data
->old_domain
, data
->old_domain
, cpu_online_mask
);
545 data
->move_in_progress
= 0;
546 if (!cpumask_empty(data
->old_domain
))
547 apic
->send_IPI_mask(data
->old_domain
, IRQ_MOVE_CLEANUP_VECTOR
);
548 raw_spin_unlock(&vector_lock
);
551 void send_cleanup_vector(struct irq_cfg
*cfg
)
553 struct apic_chip_data
*data
;
555 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
556 if (data
->move_in_progress
)
557 __send_cleanup_vector(data
);
560 asmlinkage __visible
void smp_irq_move_cleanup_interrupt(void)
566 /* Prevent vectors vanishing under us */
567 raw_spin_lock(&vector_lock
);
569 me
= smp_processor_id();
570 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
571 struct apic_chip_data
*data
;
572 struct irq_desc
*desc
;
576 desc
= __this_cpu_read(vector_irq
[vector
]);
577 if (IS_ERR_OR_NULL(desc
))
580 if (!raw_spin_trylock(&desc
->lock
)) {
581 raw_spin_unlock(&vector_lock
);
583 raw_spin_lock(&vector_lock
);
587 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
592 * Nothing to cleanup if irq migration is in progress
593 * or this cpu is not set in the cleanup mask.
595 if (data
->move_in_progress
||
596 !cpumask_test_cpu(me
, data
->old_domain
))
600 * We have two cases to handle here:
601 * 1) vector is unchanged but the target mask got reduced
602 * 2) vector and the target mask has changed
604 * #1 is obvious, but in #2 we have two vectors with the same
605 * irq descriptor: the old and the new vector. So we need to
606 * make sure that we only cleanup the old vector. The new
607 * vector has the current @vector number in the config and
608 * this cpu is part of the target mask. We better leave that
611 if (vector
== data
->cfg
.vector
&&
612 cpumask_test_cpu(me
, data
->domain
))
615 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
617 * Check if the vector that needs to be cleanedup is
618 * registered at the cpu's IRR. If so, then this is not
619 * the best time to clean it up. Lets clean it up in the
620 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
623 if (irr
& (1 << (vector
% 32))) {
624 apic
->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR
);
627 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);
628 cpumask_clear_cpu(me
, data
->old_domain
);
630 raw_spin_unlock(&desc
->lock
);
633 raw_spin_unlock(&vector_lock
);
638 static void __irq_complete_move(struct irq_cfg
*cfg
, unsigned vector
)
641 struct apic_chip_data
*data
;
643 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
644 if (likely(!data
->move_in_progress
))
647 me
= smp_processor_id();
648 if (vector
== data
->cfg
.vector
&& cpumask_test_cpu(me
, data
->domain
))
649 __send_cleanup_vector(data
);
652 void irq_complete_move(struct irq_cfg
*cfg
)
654 __irq_complete_move(cfg
, ~get_irq_regs()->orig_ax
);
658 * Called with @desc->lock held and interrupts disabled.
660 void irq_force_complete_move(struct irq_desc
*desc
)
662 struct irq_data
*irqdata
= irq_desc_get_irq_data(desc
);
663 struct apic_chip_data
*data
= apic_chip_data(irqdata
);
664 struct irq_cfg
*cfg
= data
? &data
->cfg
: NULL
;
669 __irq_complete_move(cfg
, cfg
->vector
);
672 * This is tricky. If the cleanup of @data->old_domain has not been
673 * done yet, then the following setaffinity call will fail with
674 * -EBUSY. This can leave the interrupt in a stale state.
676 * The cleanup cannot make progress because we hold @desc->lock. So in
677 * case @data->old_domain is not yet cleaned up, we need to drop the
678 * lock and acquire it again. @desc cannot go away, because the
679 * hotplug code holds the sparse irq lock.
681 raw_spin_lock(&vector_lock
);
682 /* Clean out all offline cpus (including ourself) first. */
683 cpumask_and(data
->old_domain
, data
->old_domain
, cpu_online_mask
);
684 while (!cpumask_empty(data
->old_domain
)) {
685 raw_spin_unlock(&vector_lock
);
686 raw_spin_unlock(&desc
->lock
);
688 raw_spin_lock(&desc
->lock
);
690 * Reevaluate apic_chip_data. It might have been cleared after
691 * we dropped @desc->lock.
693 data
= apic_chip_data(irqdata
);
696 raw_spin_lock(&vector_lock
);
698 raw_spin_unlock(&vector_lock
);
702 static void __init
print_APIC_field(int base
)
708 for (i
= 0; i
< 8; i
++)
709 pr_cont("%08x", apic_read(base
+ i
*0x10));
714 static void __init
print_local_APIC(void *dummy
)
716 unsigned int i
, v
, ver
, maxlvt
;
719 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
720 smp_processor_id(), hard_smp_processor_id());
721 v
= apic_read(APIC_ID
);
722 pr_info("... APIC ID: %08x (%01x)\n", v
, read_apic_id());
723 v
= apic_read(APIC_LVR
);
724 pr_info("... APIC VERSION: %08x\n", v
);
725 ver
= GET_APIC_VERSION(v
);
726 maxlvt
= lapic_get_maxlvt();
728 v
= apic_read(APIC_TASKPRI
);
729 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
732 if (APIC_INTEGRATED(ver
)) {
733 if (!APIC_XAPIC(ver
)) {
734 v
= apic_read(APIC_ARBPRI
);
735 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
736 v
, v
& APIC_ARBPRI_MASK
);
738 v
= apic_read(APIC_PROCPRI
);
739 pr_debug("... APIC PROCPRI: %08x\n", v
);
743 * Remote read supported only in the 82489DX and local APIC for
744 * Pentium processors.
746 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
747 v
= apic_read(APIC_RRR
);
748 pr_debug("... APIC RRR: %08x\n", v
);
751 v
= apic_read(APIC_LDR
);
752 pr_debug("... APIC LDR: %08x\n", v
);
753 if (!x2apic_enabled()) {
754 v
= apic_read(APIC_DFR
);
755 pr_debug("... APIC DFR: %08x\n", v
);
757 v
= apic_read(APIC_SPIV
);
758 pr_debug("... APIC SPIV: %08x\n", v
);
760 pr_debug("... APIC ISR field:\n");
761 print_APIC_field(APIC_ISR
);
762 pr_debug("... APIC TMR field:\n");
763 print_APIC_field(APIC_TMR
);
764 pr_debug("... APIC IRR field:\n");
765 print_APIC_field(APIC_IRR
);
768 if (APIC_INTEGRATED(ver
)) {
769 /* Due to the Pentium erratum 3AP. */
771 apic_write(APIC_ESR
, 0);
773 v
= apic_read(APIC_ESR
);
774 pr_debug("... APIC ESR: %08x\n", v
);
777 icr
= apic_icr_read();
778 pr_debug("... APIC ICR: %08x\n", (u32
)icr
);
779 pr_debug("... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
781 v
= apic_read(APIC_LVTT
);
782 pr_debug("... APIC LVTT: %08x\n", v
);
786 v
= apic_read(APIC_LVTPC
);
787 pr_debug("... APIC LVTPC: %08x\n", v
);
789 v
= apic_read(APIC_LVT0
);
790 pr_debug("... APIC LVT0: %08x\n", v
);
791 v
= apic_read(APIC_LVT1
);
792 pr_debug("... APIC LVT1: %08x\n", v
);
796 v
= apic_read(APIC_LVTERR
);
797 pr_debug("... APIC LVTERR: %08x\n", v
);
800 v
= apic_read(APIC_TMICT
);
801 pr_debug("... APIC TMICT: %08x\n", v
);
802 v
= apic_read(APIC_TMCCT
);
803 pr_debug("... APIC TMCCT: %08x\n", v
);
804 v
= apic_read(APIC_TDCR
);
805 pr_debug("... APIC TDCR: %08x\n", v
);
807 if (boot_cpu_has(X86_FEATURE_EXTAPIC
)) {
808 v
= apic_read(APIC_EFEAT
);
809 maxlvt
= (v
>> 16) & 0xff;
810 pr_debug("... APIC EFEAT: %08x\n", v
);
811 v
= apic_read(APIC_ECTRL
);
812 pr_debug("... APIC ECTRL: %08x\n", v
);
813 for (i
= 0; i
< maxlvt
; i
++) {
814 v
= apic_read(APIC_EILVTn(i
));
815 pr_debug("... APIC EILVT%d: %08x\n", i
, v
);
821 static void __init
print_local_APICs(int maxcpu
)
829 for_each_online_cpu(cpu
) {
832 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
837 static void __init
print_PIC(void)
842 if (!nr_legacy_irqs())
845 pr_debug("\nprinting PIC contents\n");
847 raw_spin_lock_irqsave(&i8259A_lock
, flags
);
849 v
= inb(0xa1) << 8 | inb(0x21);
850 pr_debug("... PIC IMR: %04x\n", v
);
852 v
= inb(0xa0) << 8 | inb(0x20);
853 pr_debug("... PIC IRR: %04x\n", v
);
857 v
= inb(0xa0) << 8 | inb(0x20);
861 raw_spin_unlock_irqrestore(&i8259A_lock
, flags
);
863 pr_debug("... PIC ISR: %04x\n", v
);
865 v
= inb(0x4d1) << 8 | inb(0x4d0);
866 pr_debug("... PIC ELCR: %04x\n", v
);
869 static int show_lapic __initdata
= 1;
870 static __init
int setup_show_lapic(char *arg
)
874 if (strcmp(arg
, "all") == 0) {
875 show_lapic
= CONFIG_NR_CPUS
;
877 get_option(&arg
, &num
);
884 __setup("show_lapic=", setup_show_lapic
);
886 static int __init
print_ICs(void)
888 if (apic_verbosity
== APIC_QUIET
)
893 /* don't print out if apic is not there */
894 if (!cpu_has_apic
&& !apic_from_smp_config())
897 print_local_APICs(show_lapic
);
903 late_initcall(print_ICs
);