2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/slab.h>
17 #include <asm/irqdomain.h>
18 #include <asm/hw_irq.h>
20 #include <asm/i8259.h>
22 #include <asm/irq_remapping.h>
24 struct apic_chip_data
{
27 cpumask_var_t old_domain
;
28 u8 move_in_progress
: 1;
31 struct irq_domain
*x86_vector_domain
;
32 EXPORT_SYMBOL_GPL(x86_vector_domain
);
33 static DEFINE_RAW_SPINLOCK(vector_lock
);
34 static cpumask_var_t vector_cpumask
, vector_searchmask
, searched_cpumask
;
35 static struct irq_chip lapic_controller
;
36 #ifdef CONFIG_X86_IO_APIC
37 static struct apic_chip_data
*legacy_irq_data
[NR_IRQS_LEGACY
];
40 void lock_vector_lock(void)
42 /* Used to the online set of cpus does not change
43 * during assign_irq_vector.
45 raw_spin_lock(&vector_lock
);
48 void unlock_vector_lock(void)
50 raw_spin_unlock(&vector_lock
);
53 static struct apic_chip_data
*apic_chip_data(struct irq_data
*irq_data
)
58 while (irq_data
->parent_data
)
59 irq_data
= irq_data
->parent_data
;
61 return irq_data
->chip_data
;
64 struct irq_cfg
*irqd_cfg(struct irq_data
*irq_data
)
66 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
68 return data
? &data
->cfg
: NULL
;
70 EXPORT_SYMBOL_GPL(irqd_cfg
);
72 struct irq_cfg
*irq_cfg(unsigned int irq
)
74 return irqd_cfg(irq_get_irq_data(irq
));
77 static struct apic_chip_data
*alloc_apic_chip_data(int node
)
79 struct apic_chip_data
*data
;
81 data
= kzalloc_node(sizeof(*data
), GFP_KERNEL
, node
);
84 if (!zalloc_cpumask_var_node(&data
->domain
, GFP_KERNEL
, node
))
86 if (!zalloc_cpumask_var_node(&data
->old_domain
, GFP_KERNEL
, node
))
90 free_cpumask_var(data
->domain
);
96 static void free_apic_chip_data(struct apic_chip_data
*data
)
99 free_cpumask_var(data
->domain
);
100 free_cpumask_var(data
->old_domain
);
105 static int __assign_irq_vector(int irq
, struct apic_chip_data
*d
,
106 const struct cpumask
*mask
)
109 * NOTE! The local APIC isn't very good at handling
110 * multiple interrupts at the same interrupt level.
111 * As the interrupt level is determined by taking the
112 * vector number and shifting that right by 4, we
113 * want to spread these out a bit so that they don't
114 * all fall in the same interrupt level.
116 * Also, we've got to be careful not to trash gate
117 * 0x80, because int 0x80 is hm, kind of importantish. ;)
119 static int current_vector
= FIRST_EXTERNAL_VECTOR
+ VECTOR_OFFSET_START
;
120 static int current_offset
= VECTOR_OFFSET_START
% 16;
124 * If there is still a move in progress or the previous move has not
125 * been cleaned up completely, tell the caller to come back later.
127 if (d
->move_in_progress
||
128 cpumask_intersects(d
->old_domain
, cpu_online_mask
))
131 /* Only try and allocate irqs on cpus that are present */
132 cpumask_clear(d
->old_domain
);
133 cpumask_clear(searched_cpumask
);
134 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
135 while (cpu
< nr_cpu_ids
) {
138 /* Get the possible target cpus for @mask/@cpu from the apic */
139 apic
->vector_allocation_domain(cpu
, vector_cpumask
, mask
);
142 * Clear the offline cpus from @vector_cpumask for searching
143 * and verify whether the result overlaps with @mask. If true,
144 * then the call to apic->cpu_mask_to_apicid_and() will
145 * succeed as well. If not, no point in trying to find a
146 * vector in this mask.
148 cpumask_and(vector_searchmask
, vector_cpumask
, cpu_online_mask
);
149 if (!cpumask_intersects(vector_searchmask
, mask
))
152 if (cpumask_subset(vector_cpumask
, d
->domain
)) {
153 if (cpumask_equal(vector_cpumask
, d
->domain
))
156 * Mark the cpus which are not longer in the mask for
159 cpumask_andnot(d
->old_domain
, d
->domain
, vector_cpumask
);
160 vector
= d
->cfg
.vector
;
164 vector
= current_vector
;
165 offset
= current_offset
;
168 if (vector
>= first_system_vector
) {
169 offset
= (offset
+ 1) % 16;
170 vector
= FIRST_EXTERNAL_VECTOR
+ offset
;
173 /* If the search wrapped around, try the next cpu */
174 if (unlikely(current_vector
== vector
))
177 if (test_bit(vector
, used_vectors
))
180 for_each_cpu(new_cpu
, vector_searchmask
) {
181 if (!IS_ERR_OR_NULL(per_cpu(vector_irq
, new_cpu
)[vector
]))
185 current_vector
= vector
;
186 current_offset
= offset
;
187 /* Schedule the old vector for cleanup on all cpus */
189 cpumask_copy(d
->old_domain
, d
->domain
);
190 for_each_cpu(new_cpu
, vector_searchmask
)
191 per_cpu(vector_irq
, new_cpu
)[vector
] = irq_to_desc(irq
);
196 * We exclude the current @vector_cpumask from the requested
197 * @mask and try again with the next online cpu in the
198 * result. We cannot modify @mask, so we use @vector_cpumask
199 * as a temporary buffer here as it will be reassigned when
200 * calling apic->vector_allocation_domain() above.
202 cpumask_or(searched_cpumask
, searched_cpumask
, vector_cpumask
);
203 cpumask_andnot(vector_cpumask
, mask
, searched_cpumask
);
204 cpu
= cpumask_first_and(vector_cpumask
, cpu_online_mask
);
211 * Exclude offline cpus from the cleanup mask and set the
212 * move_in_progress flag when the result is not empty.
214 cpumask_and(d
->old_domain
, d
->old_domain
, cpu_online_mask
);
215 d
->move_in_progress
= !cpumask_empty(d
->old_domain
);
216 d
->cfg
.old_vector
= d
->move_in_progress
? d
->cfg
.vector
: 0;
217 d
->cfg
.vector
= vector
;
218 cpumask_copy(d
->domain
, vector_cpumask
);
221 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
222 * as we already established, that mask & d->domain & cpu_online_mask
225 BUG_ON(apic
->cpu_mask_to_apicid_and(mask
, d
->domain
,
226 &d
->cfg
.dest_apicid
));
230 static int assign_irq_vector(int irq
, struct apic_chip_data
*data
,
231 const struct cpumask
*mask
)
236 raw_spin_lock_irqsave(&vector_lock
, flags
);
237 err
= __assign_irq_vector(irq
, data
, mask
);
238 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
242 static int assign_irq_vector_policy(int irq
, int node
,
243 struct apic_chip_data
*data
,
244 struct irq_alloc_info
*info
)
246 if (info
&& info
->mask
)
247 return assign_irq_vector(irq
, data
, info
->mask
);
248 if (node
!= NUMA_NO_NODE
&&
249 assign_irq_vector(irq
, data
, cpumask_of_node(node
)) == 0)
251 return assign_irq_vector(irq
, data
, apic
->target_cpus());
254 static void clear_irq_vector(int irq
, struct apic_chip_data
*data
)
256 struct irq_desc
*desc
;
259 BUG_ON(!data
->cfg
.vector
);
261 vector
= data
->cfg
.vector
;
262 for_each_cpu_and(cpu
, data
->domain
, cpu_online_mask
)
263 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
265 data
->cfg
.vector
= 0;
266 cpumask_clear(data
->domain
);
269 * If move is in progress or the old_domain mask is not empty,
270 * i.e. the cleanup IPI has not been processed yet, we need to remove
271 * the old references to desc from all cpus vector tables.
273 if (!data
->move_in_progress
&& cpumask_empty(data
->old_domain
))
276 desc
= irq_to_desc(irq
);
277 for_each_cpu_and(cpu
, data
->old_domain
, cpu_online_mask
) {
278 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
;
280 if (per_cpu(vector_irq
, cpu
)[vector
] != desc
)
282 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
286 data
->move_in_progress
= 0;
289 void init_irq_alloc_info(struct irq_alloc_info
*info
,
290 const struct cpumask
*mask
)
292 memset(info
, 0, sizeof(*info
));
296 void copy_irq_alloc_info(struct irq_alloc_info
*dst
, struct irq_alloc_info
*src
)
301 memset(dst
, 0, sizeof(*dst
));
304 static void x86_vector_free_irqs(struct irq_domain
*domain
,
305 unsigned int virq
, unsigned int nr_irqs
)
307 struct apic_chip_data
*apic_data
;
308 struct irq_data
*irq_data
;
312 for (i
= 0; i
< nr_irqs
; i
++) {
313 irq_data
= irq_domain_get_irq_data(x86_vector_domain
, virq
+ i
);
314 if (irq_data
&& irq_data
->chip_data
) {
315 raw_spin_lock_irqsave(&vector_lock
, flags
);
316 clear_irq_vector(virq
+ i
, irq_data
->chip_data
);
317 apic_data
= irq_data
->chip_data
;
318 irq_domain_reset_irq_data(irq_data
);
319 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
320 free_apic_chip_data(apic_data
);
321 #ifdef CONFIG_X86_IO_APIC
322 if (virq
+ i
< nr_legacy_irqs())
323 legacy_irq_data
[virq
+ i
] = NULL
;
329 static int x86_vector_alloc_irqs(struct irq_domain
*domain
, unsigned int virq
,
330 unsigned int nr_irqs
, void *arg
)
332 struct irq_alloc_info
*info
= arg
;
333 struct apic_chip_data
*data
;
334 struct irq_data
*irq_data
;
340 /* Currently vector allocator can't guarantee contiguous allocations */
341 if ((info
->flags
& X86_IRQ_ALLOC_CONTIGUOUS_VECTORS
) && nr_irqs
> 1)
344 for (i
= 0; i
< nr_irqs
; i
++) {
345 irq_data
= irq_domain_get_irq_data(domain
, virq
+ i
);
347 node
= irq_data_get_node(irq_data
);
348 #ifdef CONFIG_X86_IO_APIC
349 if (virq
+ i
< nr_legacy_irqs() && legacy_irq_data
[virq
+ i
])
350 data
= legacy_irq_data
[virq
+ i
];
353 data
= alloc_apic_chip_data(node
);
359 irq_data
->chip
= &lapic_controller
;
360 irq_data
->chip_data
= data
;
361 irq_data
->hwirq
= virq
+ i
;
362 err
= assign_irq_vector_policy(virq
+ i
, node
, data
, info
);
370 x86_vector_free_irqs(domain
, virq
, i
+ 1);
374 static const struct irq_domain_ops x86_vector_domain_ops
= {
375 .alloc
= x86_vector_alloc_irqs
,
376 .free
= x86_vector_free_irqs
,
379 int __init
arch_probe_nr_irqs(void)
383 if (nr_irqs
> (NR_VECTORS
* nr_cpu_ids
))
384 nr_irqs
= NR_VECTORS
* nr_cpu_ids
;
386 nr
= (gsi_top
+ nr_legacy_irqs()) + 8 * nr_cpu_ids
;
387 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
389 * for MSI and HT dyn irq
391 if (gsi_top
<= NR_IRQS_LEGACY
)
392 nr
+= 8 * nr_cpu_ids
;
400 * We don't know if PIC is present at this point so we need to do
401 * probe() to get the right number of legacy IRQs.
403 return legacy_pic
->probe();
406 #ifdef CONFIG_X86_IO_APIC
407 static void init_legacy_irqs(void)
409 int i
, node
= cpu_to_node(0);
410 struct apic_chip_data
*data
;
413 * For legacy IRQ's, start with assigning irq0 to irq15 to
414 * ISA_IRQ_VECTOR(i) for all cpu's.
416 for (i
= 0; i
< nr_legacy_irqs(); i
++) {
417 data
= legacy_irq_data
[i
] = alloc_apic_chip_data(node
);
420 data
->cfg
.vector
= ISA_IRQ_VECTOR(i
);
421 cpumask_setall(data
->domain
);
422 irq_set_chip_data(i
, data
);
426 static void init_legacy_irqs(void) { }
429 int __init
arch_early_irq_init(void)
433 x86_vector_domain
= irq_domain_add_tree(NULL
, &x86_vector_domain_ops
,
435 BUG_ON(x86_vector_domain
== NULL
);
436 irq_set_default_host(x86_vector_domain
);
438 arch_init_msi_domain(x86_vector_domain
);
439 arch_init_htirq_domain(x86_vector_domain
);
441 BUG_ON(!alloc_cpumask_var(&vector_cpumask
, GFP_KERNEL
));
442 BUG_ON(!alloc_cpumask_var(&vector_searchmask
, GFP_KERNEL
));
443 BUG_ON(!alloc_cpumask_var(&searched_cpumask
, GFP_KERNEL
));
445 return arch_early_ioapic_init();
448 /* Initialize vector_irq on a new cpu */
449 static void __setup_vector_irq(int cpu
)
451 struct apic_chip_data
*data
;
452 struct irq_desc
*desc
;
455 /* Mark the inuse vectors */
456 for_each_irq_desc(irq
, desc
) {
457 struct irq_data
*idata
= irq_desc_get_irq_data(desc
);
459 data
= apic_chip_data(idata
);
460 if (!data
|| !cpumask_test_cpu(cpu
, data
->domain
))
462 vector
= data
->cfg
.vector
;
463 per_cpu(vector_irq
, cpu
)[vector
] = desc
;
465 /* Mark the free vectors */
466 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
467 desc
= per_cpu(vector_irq
, cpu
)[vector
];
468 if (IS_ERR_OR_NULL(desc
))
471 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
472 if (!cpumask_test_cpu(cpu
, data
->domain
))
473 per_cpu(vector_irq
, cpu
)[vector
] = VECTOR_UNUSED
;
478 * Setup the vector to irq mappings. Must be called with vector_lock held.
480 void setup_vector_irq(int cpu
)
484 lockdep_assert_held(&vector_lock
);
486 * On most of the platforms, legacy PIC delivers the interrupts on the
487 * boot cpu. But there are certain platforms where PIC interrupts are
488 * delivered to multiple cpu's. If the legacy IRQ is handled by the
489 * legacy PIC, for the new cpu that is coming online, setup the static
490 * legacy vector to irq mapping:
492 for (irq
= 0; irq
< nr_legacy_irqs(); irq
++)
493 per_cpu(vector_irq
, cpu
)[ISA_IRQ_VECTOR(irq
)] = irq_to_desc(irq
);
495 __setup_vector_irq(cpu
);
498 static int apic_retrigger_irq(struct irq_data
*irq_data
)
500 struct apic_chip_data
*data
= apic_chip_data(irq_data
);
504 raw_spin_lock_irqsave(&vector_lock
, flags
);
505 cpu
= cpumask_first_and(data
->domain
, cpu_online_mask
);
506 apic
->send_IPI_mask(cpumask_of(cpu
), data
->cfg
.vector
);
507 raw_spin_unlock_irqrestore(&vector_lock
, flags
);
512 void apic_ack_edge(struct irq_data
*data
)
514 irq_complete_move(irqd_cfg(data
));
519 static int apic_set_affinity(struct irq_data
*irq_data
,
520 const struct cpumask
*dest
, bool force
)
522 struct apic_chip_data
*data
= irq_data
->chip_data
;
523 int err
, irq
= irq_data
->irq
;
525 if (!config_enabled(CONFIG_SMP
))
528 if (!cpumask_intersects(dest
, cpu_online_mask
))
531 err
= assign_irq_vector(irq
, data
, dest
);
532 return err
? err
: IRQ_SET_MASK_OK
;
535 static struct irq_chip lapic_controller
= {
536 .irq_ack
= apic_ack_edge
,
537 .irq_set_affinity
= apic_set_affinity
,
538 .irq_retrigger
= apic_retrigger_irq
,
542 static void __send_cleanup_vector(struct apic_chip_data
*data
)
544 raw_spin_lock(&vector_lock
);
545 cpumask_and(data
->old_domain
, data
->old_domain
, cpu_online_mask
);
546 data
->move_in_progress
= 0;
547 if (!cpumask_empty(data
->old_domain
))
548 apic
->send_IPI_mask(data
->old_domain
, IRQ_MOVE_CLEANUP_VECTOR
);
549 raw_spin_unlock(&vector_lock
);
552 void send_cleanup_vector(struct irq_cfg
*cfg
)
554 struct apic_chip_data
*data
;
556 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
557 if (data
->move_in_progress
)
558 __send_cleanup_vector(data
);
561 asmlinkage __visible
void smp_irq_move_cleanup_interrupt(void)
567 /* Prevent vectors vanishing under us */
568 raw_spin_lock(&vector_lock
);
570 me
= smp_processor_id();
571 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
572 struct apic_chip_data
*data
;
573 struct irq_desc
*desc
;
577 desc
= __this_cpu_read(vector_irq
[vector
]);
578 if (IS_ERR_OR_NULL(desc
))
581 if (!raw_spin_trylock(&desc
->lock
)) {
582 raw_spin_unlock(&vector_lock
);
584 raw_spin_lock(&vector_lock
);
588 data
= apic_chip_data(irq_desc_get_irq_data(desc
));
593 * Nothing to cleanup if irq migration is in progress
594 * or this cpu is not set in the cleanup mask.
596 if (data
->move_in_progress
||
597 !cpumask_test_cpu(me
, data
->old_domain
))
601 * We have two cases to handle here:
602 * 1) vector is unchanged but the target mask got reduced
603 * 2) vector and the target mask has changed
605 * #1 is obvious, but in #2 we have two vectors with the same
606 * irq descriptor: the old and the new vector. So we need to
607 * make sure that we only cleanup the old vector. The new
608 * vector has the current @vector number in the config and
609 * this cpu is part of the target mask. We better leave that
612 if (vector
== data
->cfg
.vector
&&
613 cpumask_test_cpu(me
, data
->domain
))
616 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
618 * Check if the vector that needs to be cleanedup is
619 * registered at the cpu's IRR. If so, then this is not
620 * the best time to clean it up. Lets clean it up in the
621 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
624 if (irr
& (1 << (vector
% 32))) {
625 apic
->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR
);
628 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);
629 cpumask_clear_cpu(me
, data
->old_domain
);
631 raw_spin_unlock(&desc
->lock
);
634 raw_spin_unlock(&vector_lock
);
639 static void __irq_complete_move(struct irq_cfg
*cfg
, unsigned vector
)
642 struct apic_chip_data
*data
;
644 data
= container_of(cfg
, struct apic_chip_data
, cfg
);
645 if (likely(!data
->move_in_progress
))
648 me
= smp_processor_id();
649 if (vector
== data
->cfg
.vector
&& cpumask_test_cpu(me
, data
->domain
))
650 __send_cleanup_vector(data
);
653 void irq_complete_move(struct irq_cfg
*cfg
)
655 __irq_complete_move(cfg
, ~get_irq_regs()->orig_ax
);
659 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
661 void irq_force_complete_move(struct irq_desc
*desc
)
663 struct irq_data
*irqdata
= irq_desc_get_irq_data(desc
);
664 struct apic_chip_data
*data
= apic_chip_data(irqdata
);
665 struct irq_cfg
*cfg
= data
? &data
->cfg
: NULL
;
672 * This is tricky. If the cleanup of @data->old_domain has not been
673 * done yet, then the following setaffinity call will fail with
674 * -EBUSY. This can leave the interrupt in a stale state.
676 * All CPUs are stuck in stop machine with interrupts disabled so
677 * calling __irq_complete_move() would be completely pointless.
679 raw_spin_lock(&vector_lock
);
681 * Clean out all offline cpus (including the outgoing one) from the
684 cpumask_and(data
->old_domain
, data
->old_domain
, cpu_online_mask
);
687 * If move_in_progress is cleared and the old_domain mask is empty,
688 * then there is nothing to cleanup. fixup_irqs() will take care of
689 * the stale vectors on the outgoing cpu.
691 if (!data
->move_in_progress
&& cpumask_empty(data
->old_domain
)) {
692 raw_spin_unlock(&vector_lock
);
697 * 1) The interrupt is in move_in_progress state. That means that we
698 * have not seen an interrupt since the io_apic was reprogrammed to
701 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
702 * have not been processed yet.
704 if (data
->move_in_progress
) {
706 * In theory there is a race:
708 * set_ioapic(new_vector) <-- Interrupt is raised before update
709 * is effective, i.e. it's raised on
712 * So if the target cpu cannot handle that interrupt before
713 * the old vector is cleaned up, we get a spurious interrupt
714 * and in the worst case the ioapic irq line becomes stale.
716 * But in case of cpu hotplug this should be a non issue
717 * because if the affinity update happens right before all
718 * cpus rendevouz in stop machine, there is no way that the
719 * interrupt can be blocked on the target cpu because all cpus
720 * loops first with interrupts enabled in stop machine, so the
721 * old vector is not yet cleaned up when the interrupt fires.
723 * So the only way to run into this issue is if the delivery
724 * of the interrupt on the apic/system bus would be delayed
725 * beyond the point where the target cpu disables interrupts
726 * in stop machine. I doubt that it can happen, but at least
727 * there is a theroretical chance. Virtualization might be
728 * able to expose this, but AFAICT the IOAPIC emulation is not
729 * as stupid as the real hardware.
731 * Anyway, there is nothing we can do about that at this point
732 * w/o refactoring the whole fixup_irq() business completely.
733 * We print at least the irq number and the old vector number,
734 * so we have the necessary information when a problem in that
737 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
738 irqdata
->irq
, cfg
->old_vector
);
741 * If old_domain is not empty, then other cpus still have the irq
742 * descriptor set in their vector array. Clean it up.
744 for_each_cpu(cpu
, data
->old_domain
)
745 per_cpu(vector_irq
, cpu
)[cfg
->old_vector
] = VECTOR_UNUSED
;
747 /* Cleanup the left overs of the (half finished) move */
748 cpumask_clear(data
->old_domain
);
749 data
->move_in_progress
= 0;
750 raw_spin_unlock(&vector_lock
);
754 static void __init
print_APIC_field(int base
)
760 for (i
= 0; i
< 8; i
++)
761 pr_cont("%08x", apic_read(base
+ i
*0x10));
766 static void __init
print_local_APIC(void *dummy
)
768 unsigned int i
, v
, ver
, maxlvt
;
771 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
772 smp_processor_id(), hard_smp_processor_id());
773 v
= apic_read(APIC_ID
);
774 pr_info("... APIC ID: %08x (%01x)\n", v
, read_apic_id());
775 v
= apic_read(APIC_LVR
);
776 pr_info("... APIC VERSION: %08x\n", v
);
777 ver
= GET_APIC_VERSION(v
);
778 maxlvt
= lapic_get_maxlvt();
780 v
= apic_read(APIC_TASKPRI
);
781 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
784 if (APIC_INTEGRATED(ver
)) {
785 if (!APIC_XAPIC(ver
)) {
786 v
= apic_read(APIC_ARBPRI
);
787 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
788 v
, v
& APIC_ARBPRI_MASK
);
790 v
= apic_read(APIC_PROCPRI
);
791 pr_debug("... APIC PROCPRI: %08x\n", v
);
795 * Remote read supported only in the 82489DX and local APIC for
796 * Pentium processors.
798 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
799 v
= apic_read(APIC_RRR
);
800 pr_debug("... APIC RRR: %08x\n", v
);
803 v
= apic_read(APIC_LDR
);
804 pr_debug("... APIC LDR: %08x\n", v
);
805 if (!x2apic_enabled()) {
806 v
= apic_read(APIC_DFR
);
807 pr_debug("... APIC DFR: %08x\n", v
);
809 v
= apic_read(APIC_SPIV
);
810 pr_debug("... APIC SPIV: %08x\n", v
);
812 pr_debug("... APIC ISR field:\n");
813 print_APIC_field(APIC_ISR
);
814 pr_debug("... APIC TMR field:\n");
815 print_APIC_field(APIC_TMR
);
816 pr_debug("... APIC IRR field:\n");
817 print_APIC_field(APIC_IRR
);
820 if (APIC_INTEGRATED(ver
)) {
821 /* Due to the Pentium erratum 3AP. */
823 apic_write(APIC_ESR
, 0);
825 v
= apic_read(APIC_ESR
);
826 pr_debug("... APIC ESR: %08x\n", v
);
829 icr
= apic_icr_read();
830 pr_debug("... APIC ICR: %08x\n", (u32
)icr
);
831 pr_debug("... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
833 v
= apic_read(APIC_LVTT
);
834 pr_debug("... APIC LVTT: %08x\n", v
);
838 v
= apic_read(APIC_LVTPC
);
839 pr_debug("... APIC LVTPC: %08x\n", v
);
841 v
= apic_read(APIC_LVT0
);
842 pr_debug("... APIC LVT0: %08x\n", v
);
843 v
= apic_read(APIC_LVT1
);
844 pr_debug("... APIC LVT1: %08x\n", v
);
848 v
= apic_read(APIC_LVTERR
);
849 pr_debug("... APIC LVTERR: %08x\n", v
);
852 v
= apic_read(APIC_TMICT
);
853 pr_debug("... APIC TMICT: %08x\n", v
);
854 v
= apic_read(APIC_TMCCT
);
855 pr_debug("... APIC TMCCT: %08x\n", v
);
856 v
= apic_read(APIC_TDCR
);
857 pr_debug("... APIC TDCR: %08x\n", v
);
859 if (boot_cpu_has(X86_FEATURE_EXTAPIC
)) {
860 v
= apic_read(APIC_EFEAT
);
861 maxlvt
= (v
>> 16) & 0xff;
862 pr_debug("... APIC EFEAT: %08x\n", v
);
863 v
= apic_read(APIC_ECTRL
);
864 pr_debug("... APIC ECTRL: %08x\n", v
);
865 for (i
= 0; i
< maxlvt
; i
++) {
866 v
= apic_read(APIC_EILVTn(i
));
867 pr_debug("... APIC EILVT%d: %08x\n", i
, v
);
873 static void __init
print_local_APICs(int maxcpu
)
881 for_each_online_cpu(cpu
) {
884 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
889 static void __init
print_PIC(void)
894 if (!nr_legacy_irqs())
897 pr_debug("\nprinting PIC contents\n");
899 raw_spin_lock_irqsave(&i8259A_lock
, flags
);
901 v
= inb(0xa1) << 8 | inb(0x21);
902 pr_debug("... PIC IMR: %04x\n", v
);
904 v
= inb(0xa0) << 8 | inb(0x20);
905 pr_debug("... PIC IRR: %04x\n", v
);
909 v
= inb(0xa0) << 8 | inb(0x20);
913 raw_spin_unlock_irqrestore(&i8259A_lock
, flags
);
915 pr_debug("... PIC ISR: %04x\n", v
);
917 v
= inb(0x4d1) << 8 | inb(0x4d0);
918 pr_debug("... PIC ELCR: %04x\n", v
);
921 static int show_lapic __initdata
= 1;
922 static __init
int setup_show_lapic(char *arg
)
926 if (strcmp(arg
, "all") == 0) {
927 show_lapic
= CONFIG_NR_CPUS
;
929 get_option(&arg
, &num
);
936 __setup("show_lapic=", setup_show_lapic
);
938 static int __init
print_ICs(void)
940 if (apic_verbosity
== APIC_QUIET
)
945 /* don't print out if apic is not there */
946 if (!cpu_has_apic
&& !apic_from_smp_config())
949 print_local_APICs(show_lapic
);
955 late_initcall(print_ICs
);