2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
19 #include <asm/hw_irq.h>
22 #define CREATE_TRACE_POINTS
23 #include <asm/trace/irq_vectors.h>
25 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
26 EXPORT_PER_CPU_SYMBOL(irq_stat
);
28 DEFINE_PER_CPU(struct pt_regs
*, irq_regs
);
29 EXPORT_PER_CPU_SYMBOL(irq_regs
);
31 atomic_t irq_err_count
;
33 /* Function pointer for generic interrupt vector handling */
34 void (*x86_platform_ipi_callback
)(void) = NULL
;
37 * 'what should we do if we get a hw irq event on an illegal vector'.
38 * each architecture has to answer this themselves.
40 void ack_bad_irq(unsigned int irq
)
42 if (printk_ratelimit())
43 pr_err("unexpected IRQ trap at vector %02x\n", irq
);
46 * Currently unexpected vectors happen only on SMP and APIC.
47 * We _must_ ack these because every local APIC has only N
48 * irq slots per priority level, and a 'hanging, unacked' IRQ
49 * holds up an irq slot - in excessive cases (when multiple
50 * unexpected vectors occur) that might lock up the APIC
52 * But only ack when the APIC is enabled -AK
57 #define irq_stats(x) (&per_cpu(irq_stat, x))
59 * /proc/interrupts printing for arch specific interrupts
61 int arch_show_interrupts(struct seq_file
*p
, int prec
)
65 seq_printf(p
, "%*s: ", prec
, "NMI");
66 for_each_online_cpu(j
)
67 seq_printf(p
, "%10u ", irq_stats(j
)->__nmi_count
);
68 seq_puts(p
, " Non-maskable interrupts\n");
69 #ifdef CONFIG_X86_LOCAL_APIC
70 seq_printf(p
, "%*s: ", prec
, "LOC");
71 for_each_online_cpu(j
)
72 seq_printf(p
, "%10u ", irq_stats(j
)->apic_timer_irqs
);
73 seq_puts(p
, " Local timer interrupts\n");
75 seq_printf(p
, "%*s: ", prec
, "SPU");
76 for_each_online_cpu(j
)
77 seq_printf(p
, "%10u ", irq_stats(j
)->irq_spurious_count
);
78 seq_puts(p
, " Spurious interrupts\n");
79 seq_printf(p
, "%*s: ", prec
, "PMI");
80 for_each_online_cpu(j
)
81 seq_printf(p
, "%10u ", irq_stats(j
)->apic_perf_irqs
);
82 seq_puts(p
, " Performance monitoring interrupts\n");
83 seq_printf(p
, "%*s: ", prec
, "IWI");
84 for_each_online_cpu(j
)
85 seq_printf(p
, "%10u ", irq_stats(j
)->apic_irq_work_irqs
);
86 seq_puts(p
, " IRQ work interrupts\n");
87 seq_printf(p
, "%*s: ", prec
, "RTR");
88 for_each_online_cpu(j
)
89 seq_printf(p
, "%10u ", irq_stats(j
)->icr_read_retry_count
);
90 seq_puts(p
, " APIC ICR read retries\n");
92 if (x86_platform_ipi_callback
) {
93 seq_printf(p
, "%*s: ", prec
, "PLT");
94 for_each_online_cpu(j
)
95 seq_printf(p
, "%10u ", irq_stats(j
)->x86_platform_ipis
);
96 seq_puts(p
, " Platform interrupts\n");
99 seq_printf(p
, "%*s: ", prec
, "RES");
100 for_each_online_cpu(j
)
101 seq_printf(p
, "%10u ", irq_stats(j
)->irq_resched_count
);
102 seq_puts(p
, " Rescheduling interrupts\n");
103 seq_printf(p
, "%*s: ", prec
, "CAL");
104 for_each_online_cpu(j
)
105 seq_printf(p
, "%10u ", irq_stats(j
)->irq_call_count
-
106 irq_stats(j
)->irq_tlb_count
);
107 seq_puts(p
, " Function call interrupts\n");
108 seq_printf(p
, "%*s: ", prec
, "TLB");
109 for_each_online_cpu(j
)
110 seq_printf(p
, "%10u ", irq_stats(j
)->irq_tlb_count
);
111 seq_puts(p
, " TLB shootdowns\n");
113 #ifdef CONFIG_X86_THERMAL_VECTOR
114 seq_printf(p
, "%*s: ", prec
, "TRM");
115 for_each_online_cpu(j
)
116 seq_printf(p
, "%10u ", irq_stats(j
)->irq_thermal_count
);
117 seq_puts(p
, " Thermal event interrupts\n");
119 #ifdef CONFIG_X86_MCE_THRESHOLD
120 seq_printf(p
, "%*s: ", prec
, "THR");
121 for_each_online_cpu(j
)
122 seq_printf(p
, "%10u ", irq_stats(j
)->irq_threshold_count
);
123 seq_puts(p
, " Threshold APIC interrupts\n");
125 #ifdef CONFIG_X86_MCE
126 seq_printf(p
, "%*s: ", prec
, "MCE");
127 for_each_online_cpu(j
)
128 seq_printf(p
, "%10u ", per_cpu(mce_exception_count
, j
));
129 seq_puts(p
, " Machine check exceptions\n");
130 seq_printf(p
, "%*s: ", prec
, "MCP");
131 for_each_online_cpu(j
)
132 seq_printf(p
, "%10u ", per_cpu(mce_poll_count
, j
));
133 seq_puts(p
, " Machine check polls\n");
135 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
136 seq_printf(p
, "%*s: ", prec
, "HYP");
137 for_each_online_cpu(j
)
138 seq_printf(p
, "%10u ", irq_stats(j
)->irq_hv_callback_count
);
139 seq_puts(p
, " Hypervisor callback interrupts\n");
141 seq_printf(p
, "%*s: %10u\n", prec
, "ERR", atomic_read(&irq_err_count
));
142 #if defined(CONFIG_X86_IO_APIC)
143 seq_printf(p
, "%*s: %10u\n", prec
, "MIS", atomic_read(&irq_mis_count
));
145 #ifdef CONFIG_HAVE_KVM
146 seq_printf(p
, "%*s: ", prec
, "PIN");
147 for_each_online_cpu(j
)
148 seq_printf(p
, "%10u ", irq_stats(j
)->kvm_posted_intr_ipis
);
149 seq_puts(p
, " Posted-interrupt notification event\n");
151 seq_printf(p
, "%*s: ", prec
, "PIW");
152 for_each_online_cpu(j
)
153 seq_printf(p
, "%10u ",
154 irq_stats(j
)->kvm_posted_intr_wakeup_ipis
);
155 seq_puts(p
, " Posted-interrupt wakeup event\n");
163 u64
arch_irq_stat_cpu(unsigned int cpu
)
165 u64 sum
= irq_stats(cpu
)->__nmi_count
;
167 #ifdef CONFIG_X86_LOCAL_APIC
168 sum
+= irq_stats(cpu
)->apic_timer_irqs
;
169 sum
+= irq_stats(cpu
)->irq_spurious_count
;
170 sum
+= irq_stats(cpu
)->apic_perf_irqs
;
171 sum
+= irq_stats(cpu
)->apic_irq_work_irqs
;
172 sum
+= irq_stats(cpu
)->icr_read_retry_count
;
174 if (x86_platform_ipi_callback
)
175 sum
+= irq_stats(cpu
)->x86_platform_ipis
;
177 sum
+= irq_stats(cpu
)->irq_resched_count
;
178 sum
+= irq_stats(cpu
)->irq_call_count
;
180 #ifdef CONFIG_X86_THERMAL_VECTOR
181 sum
+= irq_stats(cpu
)->irq_thermal_count
;
183 #ifdef CONFIG_X86_MCE_THRESHOLD
184 sum
+= irq_stats(cpu
)->irq_threshold_count
;
186 #ifdef CONFIG_X86_MCE
187 sum
+= per_cpu(mce_exception_count
, cpu
);
188 sum
+= per_cpu(mce_poll_count
, cpu
);
193 u64
arch_irq_stat(void)
195 u64 sum
= atomic_read(&irq_err_count
);
201 * do_IRQ handles all normal device IRQ's (the special
202 * SMP cross-CPU interrupts have their own specific
205 __visible
unsigned int __irq_entry
do_IRQ(struct pt_regs
*regs
)
207 struct pt_regs
*old_regs
= set_irq_regs(regs
);
209 /* high bit used in ret_from_ code */
210 unsigned vector
= ~regs
->orig_ax
;
215 irq
= __this_cpu_read(vector_irq
[vector
]);
217 if (!handle_irq(irq
, regs
)) {
220 if (irq
!= VECTOR_RETRIGGERED
) {
221 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
222 __func__
, smp_processor_id(),
225 __this_cpu_write(vector_irq
[vector
], VECTOR_UNDEFINED
);
231 set_irq_regs(old_regs
);
236 * Handler for X86_PLATFORM_IPI_VECTOR.
238 void __smp_x86_platform_ipi(void)
240 inc_irq_stat(x86_platform_ipis
);
242 if (x86_platform_ipi_callback
)
243 x86_platform_ipi_callback();
246 __visible
void smp_x86_platform_ipi(struct pt_regs
*regs
)
248 struct pt_regs
*old_regs
= set_irq_regs(regs
);
251 __smp_x86_platform_ipi();
253 set_irq_regs(old_regs
);
256 #ifdef CONFIG_HAVE_KVM
257 static void dummy_handler(void) {}
258 static void (*kvm_posted_intr_wakeup_handler
)(void) = dummy_handler
;
260 void kvm_set_posted_intr_wakeup_handler(void (*handler
)(void))
263 kvm_posted_intr_wakeup_handler
= handler
;
265 kvm_posted_intr_wakeup_handler
= dummy_handler
;
267 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler
);
270 * Handler for POSTED_INTERRUPT_VECTOR.
272 __visible
void smp_kvm_posted_intr_ipi(struct pt_regs
*regs
)
274 struct pt_regs
*old_regs
= set_irq_regs(regs
);
277 inc_irq_stat(kvm_posted_intr_ipis
);
279 set_irq_regs(old_regs
);
283 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
285 __visible
void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs
*regs
)
287 struct pt_regs
*old_regs
= set_irq_regs(regs
);
290 inc_irq_stat(kvm_posted_intr_wakeup_ipis
);
291 kvm_posted_intr_wakeup_handler();
293 set_irq_regs(old_regs
);
297 __visible
void smp_trace_x86_platform_ipi(struct pt_regs
*regs
)
299 struct pt_regs
*old_regs
= set_irq_regs(regs
);
302 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR
);
303 __smp_x86_platform_ipi();
304 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR
);
306 set_irq_regs(old_regs
);
309 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq
);
311 #ifdef CONFIG_HOTPLUG_CPU
313 /* These two declarations are only used in check_irq_vectors_for_cpu_disable()
314 * below, which is protected by stop_machine(). Putting them on the stack
315 * results in a stack frame overflow. Dynamically allocating could result in a
316 * failure so declare these two cpumasks as global.
318 static struct cpumask affinity_new
, online_new
;
321 * This cpu is going to be removed and its vectors migrated to the remaining
322 * online cpus. Check to see if there are enough vectors in the remaining cpus.
323 * This function is protected by stop_machine().
325 int check_irq_vectors_for_cpu_disable(void)
328 unsigned int this_cpu
, vector
, this_count
, count
;
329 struct irq_desc
*desc
;
330 struct irq_data
*data
;
332 this_cpu
= smp_processor_id();
333 cpumask_copy(&online_new
, cpu_online_mask
);
334 cpumask_clear_cpu(this_cpu
, &online_new
);
337 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
338 irq
= __this_cpu_read(vector_irq
[vector
]);
340 desc
= irq_to_desc(irq
);
344 data
= irq_desc_get_irq_data(desc
);
345 cpumask_copy(&affinity_new
, data
->affinity
);
346 cpumask_clear_cpu(this_cpu
, &affinity_new
);
348 /* Do not count inactive or per-cpu irqs. */
349 if (!irq_has_action(irq
) || irqd_is_per_cpu(data
))
353 * A single irq may be mapped to multiple
354 * cpu's vector_irq[] (for example IOAPIC cluster
355 * mode). In this case we have two
358 * 1) the resulting affinity mask is empty; that is
359 * this the down'd cpu is the last cpu in the irq's
362 * 2) the resulting affinity mask is no longer
363 * a subset of the online cpus but the affinity
364 * mask is not zero; that is the down'd cpu is the
365 * last online cpu in a user set affinity mask.
367 if (cpumask_empty(&affinity_new
) ||
368 !cpumask_subset(&affinity_new
, &online_new
))
374 for_each_online_cpu(cpu
) {
378 * We scan from FIRST_EXTERNAL_VECTOR to first system
379 * vector. If the vector is marked in the used vectors
380 * bitmap or an irq is assigned to it, we don't count
383 for (vector
= FIRST_EXTERNAL_VECTOR
;
384 vector
< first_system_vector
; vector
++) {
385 if (!test_bit(vector
, used_vectors
) &&
386 per_cpu(vector_irq
, cpu
)[vector
] < 0)
391 if (count
< this_count
) {
392 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
393 this_cpu
, this_count
, count
);
399 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
400 void fixup_irqs(void)
402 unsigned int irq
, vector
;
404 struct irq_desc
*desc
;
405 struct irq_data
*data
;
406 struct irq_chip
*chip
;
409 for_each_irq_desc(irq
, desc
) {
410 int break_affinity
= 0;
411 int set_affinity
= 1;
412 const struct cpumask
*affinity
;
419 /* interrupt's are disabled at this point */
420 raw_spin_lock(&desc
->lock
);
422 data
= irq_desc_get_irq_data(desc
);
423 affinity
= data
->affinity
;
424 if (!irq_has_action(irq
) || irqd_is_per_cpu(data
) ||
425 cpumask_subset(affinity
, cpu_online_mask
)) {
426 raw_spin_unlock(&desc
->lock
);
431 * Complete the irq move. This cpu is going down and for
432 * non intr-remapping case, we can't wait till this interrupt
433 * arrives at this cpu before completing the irq move.
435 irq_force_complete_move(irq
);
437 if (cpumask_any_and(affinity
, cpu_online_mask
) >= nr_cpu_ids
) {
439 affinity
= cpu_online_mask
;
442 chip
= irq_data_get_irq_chip(data
);
443 if (!irqd_can_move_in_process_context(data
) && chip
->irq_mask
)
444 chip
->irq_mask(data
);
446 if (chip
->irq_set_affinity
) {
447 ret
= chip
->irq_set_affinity(data
, affinity
, true);
449 pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq
);
456 * We unmask if the irq was not marked masked by the
457 * core code. That respects the lazy irq disable
460 if (!irqd_can_move_in_process_context(data
) &&
461 !irqd_irq_masked(data
) && chip
->irq_unmask
)
462 chip
->irq_unmask(data
);
464 raw_spin_unlock(&desc
->lock
);
466 if (break_affinity
&& set_affinity
)
467 pr_notice("Broke affinity for irq %i\n", irq
);
468 else if (!set_affinity
)
469 pr_notice("Cannot set affinity for irq %i\n", irq
);
473 * We can remove mdelay() and then send spuriuous interrupts to
474 * new cpu targets for all the irqs that were handled previously by
475 * this cpu. While it works, I have seen spurious interrupt messages
476 * (nothing wrong but still...).
478 * So for now, retain mdelay(1) and check the IRR and then send those
479 * interrupts to new targets as this cpu is already offlined...
483 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
486 if (__this_cpu_read(vector_irq
[vector
]) <= VECTOR_UNDEFINED
)
489 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
490 if (irr
& (1 << (vector
% 32))) {
491 irq
= __this_cpu_read(vector_irq
[vector
]);
493 desc
= irq_to_desc(irq
);
494 data
= irq_desc_get_irq_data(desc
);
495 chip
= irq_data_get_irq_chip(data
);
496 raw_spin_lock(&desc
->lock
);
497 if (chip
->irq_retrigger
) {
498 chip
->irq_retrigger(data
);
499 __this_cpu_write(vector_irq
[vector
], VECTOR_RETRIGGERED
);
501 raw_spin_unlock(&desc
->lock
);
503 if (__this_cpu_read(vector_irq
[vector
]) != VECTOR_RETRIGGERED
)
504 __this_cpu_write(vector_irq
[vector
], VECTOR_UNDEFINED
);