2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
19 #include <asm/hw_irq.h>
22 #define CREATE_TRACE_POINTS
23 #include <asm/trace/irq_vectors.h>
25 atomic_t irq_err_count
;
27 /* Function pointer for generic interrupt vector handling */
28 void (*x86_platform_ipi_callback
)(void) = NULL
;
31 * 'what should we do if we get a hw irq event on an illegal vector'.
32 * each architecture has to answer this themselves.
34 void ack_bad_irq(unsigned int irq
)
36 if (printk_ratelimit())
37 pr_err("unexpected IRQ trap at vector %02x\n", irq
);
40 * Currently unexpected vectors happen only on SMP and APIC.
41 * We _must_ ack these because every local APIC has only N
42 * irq slots per priority level, and a 'hanging, unacked' IRQ
43 * holds up an irq slot - in excessive cases (when multiple
44 * unexpected vectors occur) that might lock up the APIC
46 * But only ack when the APIC is enabled -AK
51 #define irq_stats(x) (&per_cpu(irq_stat, x))
53 * /proc/interrupts printing for arch specific interrupts
55 int arch_show_interrupts(struct seq_file
*p
, int prec
)
59 seq_printf(p
, "%*s: ", prec
, "NMI");
60 for_each_online_cpu(j
)
61 seq_printf(p
, "%10u ", irq_stats(j
)->__nmi_count
);
62 seq_puts(p
, " Non-maskable interrupts\n");
63 #ifdef CONFIG_X86_LOCAL_APIC
64 seq_printf(p
, "%*s: ", prec
, "LOC");
65 for_each_online_cpu(j
)
66 seq_printf(p
, "%10u ", irq_stats(j
)->apic_timer_irqs
);
67 seq_puts(p
, " Local timer interrupts\n");
69 seq_printf(p
, "%*s: ", prec
, "SPU");
70 for_each_online_cpu(j
)
71 seq_printf(p
, "%10u ", irq_stats(j
)->irq_spurious_count
);
72 seq_puts(p
, " Spurious interrupts\n");
73 seq_printf(p
, "%*s: ", prec
, "PMI");
74 for_each_online_cpu(j
)
75 seq_printf(p
, "%10u ", irq_stats(j
)->apic_perf_irqs
);
76 seq_puts(p
, " Performance monitoring interrupts\n");
77 seq_printf(p
, "%*s: ", prec
, "IWI");
78 for_each_online_cpu(j
)
79 seq_printf(p
, "%10u ", irq_stats(j
)->apic_irq_work_irqs
);
80 seq_puts(p
, " IRQ work interrupts\n");
81 seq_printf(p
, "%*s: ", prec
, "RTR");
82 for_each_online_cpu(j
)
83 seq_printf(p
, "%10u ", irq_stats(j
)->icr_read_retry_count
);
84 seq_puts(p
, " APIC ICR read retries\n");
86 if (x86_platform_ipi_callback
) {
87 seq_printf(p
, "%*s: ", prec
, "PLT");
88 for_each_online_cpu(j
)
89 seq_printf(p
, "%10u ", irq_stats(j
)->x86_platform_ipis
);
90 seq_puts(p
, " Platform interrupts\n");
93 seq_printf(p
, "%*s: ", prec
, "RES");
94 for_each_online_cpu(j
)
95 seq_printf(p
, "%10u ", irq_stats(j
)->irq_resched_count
);
96 seq_puts(p
, " Rescheduling interrupts\n");
97 seq_printf(p
, "%*s: ", prec
, "CAL");
98 for_each_online_cpu(j
)
99 seq_printf(p
, "%10u ", irq_stats(j
)->irq_call_count
-
100 irq_stats(j
)->irq_tlb_count
);
101 seq_puts(p
, " Function call interrupts\n");
102 seq_printf(p
, "%*s: ", prec
, "TLB");
103 for_each_online_cpu(j
)
104 seq_printf(p
, "%10u ", irq_stats(j
)->irq_tlb_count
);
105 seq_puts(p
, " TLB shootdowns\n");
107 #ifdef CONFIG_X86_THERMAL_VECTOR
108 seq_printf(p
, "%*s: ", prec
, "TRM");
109 for_each_online_cpu(j
)
110 seq_printf(p
, "%10u ", irq_stats(j
)->irq_thermal_count
);
111 seq_puts(p
, " Thermal event interrupts\n");
113 #ifdef CONFIG_X86_MCE_THRESHOLD
114 seq_printf(p
, "%*s: ", prec
, "THR");
115 for_each_online_cpu(j
)
116 seq_printf(p
, "%10u ", irq_stats(j
)->irq_threshold_count
);
117 seq_puts(p
, " Threshold APIC interrupts\n");
119 #ifdef CONFIG_X86_MCE
120 seq_printf(p
, "%*s: ", prec
, "MCE");
121 for_each_online_cpu(j
)
122 seq_printf(p
, "%10u ", per_cpu(mce_exception_count
, j
));
123 seq_puts(p
, " Machine check exceptions\n");
124 seq_printf(p
, "%*s: ", prec
, "MCP");
125 for_each_online_cpu(j
)
126 seq_printf(p
, "%10u ", per_cpu(mce_poll_count
, j
));
127 seq_puts(p
, " Machine check polls\n");
129 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
130 seq_printf(p
, "%*s: ", prec
, "HYP");
131 for_each_online_cpu(j
)
132 seq_printf(p
, "%10u ", irq_stats(j
)->irq_hv_callback_count
);
133 seq_puts(p
, " Hypervisor callback interrupts\n");
135 seq_printf(p
, "%*s: %10u\n", prec
, "ERR", atomic_read(&irq_err_count
));
136 #if defined(CONFIG_X86_IO_APIC)
137 seq_printf(p
, "%*s: %10u\n", prec
, "MIS", atomic_read(&irq_mis_count
));
145 u64
arch_irq_stat_cpu(unsigned int cpu
)
147 u64 sum
= irq_stats(cpu
)->__nmi_count
;
149 #ifdef CONFIG_X86_LOCAL_APIC
150 sum
+= irq_stats(cpu
)->apic_timer_irqs
;
151 sum
+= irq_stats(cpu
)->irq_spurious_count
;
152 sum
+= irq_stats(cpu
)->apic_perf_irqs
;
153 sum
+= irq_stats(cpu
)->apic_irq_work_irqs
;
154 sum
+= irq_stats(cpu
)->icr_read_retry_count
;
156 if (x86_platform_ipi_callback
)
157 sum
+= irq_stats(cpu
)->x86_platform_ipis
;
159 sum
+= irq_stats(cpu
)->irq_resched_count
;
160 sum
+= irq_stats(cpu
)->irq_call_count
;
162 #ifdef CONFIG_X86_THERMAL_VECTOR
163 sum
+= irq_stats(cpu
)->irq_thermal_count
;
165 #ifdef CONFIG_X86_MCE_THRESHOLD
166 sum
+= irq_stats(cpu
)->irq_threshold_count
;
168 #ifdef CONFIG_X86_MCE
169 sum
+= per_cpu(mce_exception_count
, cpu
);
170 sum
+= per_cpu(mce_poll_count
, cpu
);
175 u64
arch_irq_stat(void)
177 u64 sum
= atomic_read(&irq_err_count
);
183 * do_IRQ handles all normal device IRQ's (the special
184 * SMP cross-CPU interrupts have their own specific
187 __visible
unsigned int __irq_entry
do_IRQ(struct pt_regs
*regs
)
189 struct pt_regs
*old_regs
= set_irq_regs(regs
);
191 /* high bit used in ret_from_ code */
192 unsigned vector
= ~regs
->orig_ax
;
198 irq
= __this_cpu_read(vector_irq
[vector
]);
200 if (!handle_irq(irq
, regs
)) {
203 if (irq
!= VECTOR_RETRIGGERED
) {
204 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
205 __func__
, smp_processor_id(),
208 __this_cpu_write(vector_irq
[vector
], VECTOR_UNDEFINED
);
214 set_irq_regs(old_regs
);
219 * Handler for X86_PLATFORM_IPI_VECTOR.
221 void __smp_x86_platform_ipi(void)
223 inc_irq_stat(x86_platform_ipis
);
225 if (x86_platform_ipi_callback
)
226 x86_platform_ipi_callback();
229 __visible
void smp_x86_platform_ipi(struct pt_regs
*regs
)
231 struct pt_regs
*old_regs
= set_irq_regs(regs
);
234 __smp_x86_platform_ipi();
236 set_irq_regs(old_regs
);
239 #ifdef CONFIG_HAVE_KVM
241 * Handler for POSTED_INTERRUPT_VECTOR.
243 __visible
void smp_kvm_posted_intr_ipi(struct pt_regs
*regs
)
245 struct pt_regs
*old_regs
= set_irq_regs(regs
);
253 inc_irq_stat(kvm_posted_intr_ipis
);
257 set_irq_regs(old_regs
);
261 __visible
void smp_trace_x86_platform_ipi(struct pt_regs
*regs
)
263 struct pt_regs
*old_regs
= set_irq_regs(regs
);
266 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR
);
267 __smp_x86_platform_ipi();
268 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR
);
270 set_irq_regs(old_regs
);
273 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq
);
275 #ifdef CONFIG_HOTPLUG_CPU
277 /* These two declarations are only used in check_irq_vectors_for_cpu_disable()
278 * below, which is protected by stop_machine(). Putting them on the stack
279 * results in a stack frame overflow. Dynamically allocating could result in a
280 * failure so declare these two cpumasks as global.
282 static struct cpumask affinity_new
, online_new
;
285 * This cpu is going to be removed and its vectors migrated to the remaining
286 * online cpus. Check to see if there are enough vectors in the remaining cpus.
287 * This function is protected by stop_machine().
289 int check_irq_vectors_for_cpu_disable(void)
292 unsigned int this_cpu
, vector
, this_count
, count
;
293 struct irq_desc
*desc
;
294 struct irq_data
*data
;
296 this_cpu
= smp_processor_id();
297 cpumask_copy(&online_new
, cpu_online_mask
);
298 cpumask_clear_cpu(this_cpu
, &online_new
);
301 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
302 irq
= __this_cpu_read(vector_irq
[vector
]);
304 desc
= irq_to_desc(irq
);
308 data
= irq_desc_get_irq_data(desc
);
309 cpumask_copy(&affinity_new
, data
->affinity
);
310 cpumask_clear_cpu(this_cpu
, &affinity_new
);
312 /* Do not count inactive or per-cpu irqs. */
313 if (!irq_has_action(irq
) || irqd_is_per_cpu(data
))
317 * A single irq may be mapped to multiple
318 * cpu's vector_irq[] (for example IOAPIC cluster
319 * mode). In this case we have two
322 * 1) the resulting affinity mask is empty; that is
323 * this the down'd cpu is the last cpu in the irq's
326 * 2) the resulting affinity mask is no longer
327 * a subset of the online cpus but the affinity
328 * mask is not zero; that is the down'd cpu is the
329 * last online cpu in a user set affinity mask.
331 if (cpumask_empty(&affinity_new
) ||
332 !cpumask_subset(&affinity_new
, &online_new
))
338 for_each_online_cpu(cpu
) {
342 * We scan from FIRST_EXTERNAL_VECTOR to first system
343 * vector. If the vector is marked in the used vectors
344 * bitmap or an irq is assigned to it, we don't count
347 for (vector
= FIRST_EXTERNAL_VECTOR
;
348 vector
< first_system_vector
; vector
++) {
349 if (!test_bit(vector
, used_vectors
) &&
350 per_cpu(vector_irq
, cpu
)[vector
] < 0)
355 if (count
< this_count
) {
356 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
357 this_cpu
, this_count
, count
);
363 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
364 void fixup_irqs(void)
366 unsigned int irq
, vector
;
368 struct irq_desc
*desc
;
369 struct irq_data
*data
;
370 struct irq_chip
*chip
;
373 for_each_irq_desc(irq
, desc
) {
374 int break_affinity
= 0;
375 int set_affinity
= 1;
376 const struct cpumask
*affinity
;
383 /* interrupt's are disabled at this point */
384 raw_spin_lock(&desc
->lock
);
386 data
= irq_desc_get_irq_data(desc
);
387 affinity
= data
->affinity
;
388 if (!irq_has_action(irq
) || irqd_is_per_cpu(data
) ||
389 cpumask_subset(affinity
, cpu_online_mask
)) {
390 raw_spin_unlock(&desc
->lock
);
395 * Complete the irq move. This cpu is going down and for
396 * non intr-remapping case, we can't wait till this interrupt
397 * arrives at this cpu before completing the irq move.
399 irq_force_complete_move(irq
);
401 if (cpumask_any_and(affinity
, cpu_online_mask
) >= nr_cpu_ids
) {
403 affinity
= cpu_online_mask
;
406 chip
= irq_data_get_irq_chip(data
);
407 if (!irqd_can_move_in_process_context(data
) && chip
->irq_mask
)
408 chip
->irq_mask(data
);
410 if (chip
->irq_set_affinity
) {
411 ret
= chip
->irq_set_affinity(data
, affinity
, true);
413 pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq
);
420 * We unmask if the irq was not marked masked by the
421 * core code. That respects the lazy irq disable
424 if (!irqd_can_move_in_process_context(data
) &&
425 !irqd_irq_masked(data
) && chip
->irq_unmask
)
426 chip
->irq_unmask(data
);
428 raw_spin_unlock(&desc
->lock
);
430 if (break_affinity
&& set_affinity
)
431 pr_notice("Broke affinity for irq %i\n", irq
);
432 else if (!set_affinity
)
433 pr_notice("Cannot set affinity for irq %i\n", irq
);
437 * We can remove mdelay() and then send spuriuous interrupts to
438 * new cpu targets for all the irqs that were handled previously by
439 * this cpu. While it works, I have seen spurious interrupt messages
440 * (nothing wrong but still...).
442 * So for now, retain mdelay(1) and check the IRR and then send those
443 * interrupts to new targets as this cpu is already offlined...
447 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
450 if (__this_cpu_read(vector_irq
[vector
]) <= VECTOR_UNDEFINED
)
453 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
454 if (irr
& (1 << (vector
% 32))) {
455 irq
= __this_cpu_read(vector_irq
[vector
]);
457 desc
= irq_to_desc(irq
);
458 data
= irq_desc_get_irq_data(desc
);
459 chip
= irq_data_get_irq_chip(data
);
460 raw_spin_lock(&desc
->lock
);
461 if (chip
->irq_retrigger
) {
462 chip
->irq_retrigger(data
);
463 __this_cpu_write(vector_irq
[vector
], VECTOR_RETRIGGERED
);
465 raw_spin_unlock(&desc
->lock
);
467 if (__this_cpu_read(vector_irq
[vector
]) != VECTOR_RETRIGGERED
)
468 __this_cpu_write(vector_irq
[vector
], VECTOR_UNDEFINED
);