2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
20 #include <asm/uaccess.h>
22 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
23 EXPORT_PER_CPU_SYMBOL(irq_stat
);
25 DEFINE_PER_CPU(struct pt_regs
*, irq_regs
);
26 EXPORT_PER_CPU_SYMBOL(irq_regs
);
29 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves.
32 void ack_bad_irq(unsigned int irq
)
34 printk(KERN_ERR
"unexpected IRQ trap at vector %02x\n", irq
);
36 #ifdef CONFIG_X86_LOCAL_APIC
38 * Currently unexpected vectors happen only on SMP and APIC.
39 * We _must_ ack these because every local APIC has only N
40 * irq slots per priority level, and a 'hanging, unacked' IRQ
41 * holds up an irq slot - in excessive cases (when multiple
42 * unexpected vectors occur) that might lock up the APIC
44 * But only ack when the APIC is enabled -AK
51 #ifdef CONFIG_DEBUG_STACKOVERFLOW
52 /* Debugging check for stack overflow: is there less than 1KB free? */
53 static int check_stack_overflow(void)
57 __asm__
__volatile__("andl %%esp,%0" :
58 "=r" (sp
) : "0" (THREAD_SIZE
- 1));
60 return sp
< (sizeof(struct thread_info
) + STACK_WARN
);
63 static void print_stack_overflow(void)
65 printk(KERN_WARNING
"low stack detected by irq handler\n");
70 static inline int check_stack_overflow(void) { return 0; }
71 static inline void print_stack_overflow(void) { }
74 #ifdef CONFIG_4KSTACKS
76 * per-CPU IRQ handling contexts (thread information and stack)
79 struct thread_info tinfo
;
80 u32 stack
[THREAD_SIZE
/sizeof(u32
)];
83 static union irq_ctx
*hardirq_ctx
[NR_CPUS
] __read_mostly
;
84 static union irq_ctx
*softirq_ctx
[NR_CPUS
] __read_mostly
;
86 static char softirq_stack
[NR_CPUS
* THREAD_SIZE
]
87 __attribute__((__section__(".bss.page_aligned")));
89 static char hardirq_stack
[NR_CPUS
* THREAD_SIZE
]
90 __attribute__((__section__(".bss.page_aligned")));
92 static void call_on_stack(void *func
, void *stack
)
94 asm volatile("xchgl %%ebx,%%esp \n"
100 : "memory", "cc", "edx", "ecx", "eax");
104 execute_on_irq_stack(int overflow
, struct irq_desc
*desc
, int irq
)
106 union irq_ctx
*curctx
, *irqctx
;
107 u32
*isp
, arg1
, arg2
;
109 curctx
= (union irq_ctx
*) current_thread_info();
110 irqctx
= hardirq_ctx
[smp_processor_id()];
113 * this is where we switch to the IRQ stack. However, if we are
114 * already using the IRQ stack (because we interrupted a hardirq
115 * handler) we can't do that and just have to keep using the
116 * current stack (which is the irq stack already after all)
118 if (unlikely(curctx
== irqctx
))
121 /* build the stack frame on the IRQ stack */
122 isp
= (u32
*) ((char*)irqctx
+ sizeof(*irqctx
));
123 irqctx
->tinfo
.task
= curctx
->tinfo
.task
;
124 irqctx
->tinfo
.previous_esp
= current_stack_pointer
;
127 * Copy the softirq bits in preempt_count so that the
128 * softirq checks work in the hardirq context.
130 irqctx
->tinfo
.preempt_count
=
131 (irqctx
->tinfo
.preempt_count
& ~SOFTIRQ_MASK
) |
132 (curctx
->tinfo
.preempt_count
& SOFTIRQ_MASK
);
134 if (unlikely(overflow
))
135 call_on_stack(print_stack_overflow
, isp
);
137 asm volatile("xchgl %%ebx,%%esp \n"
139 "movl %%ebx,%%esp \n"
140 : "=a" (arg1
), "=d" (arg2
), "=b" (isp
)
141 : "0" (irq
), "1" (desc
), "2" (isp
),
142 "D" (desc
->handle_irq
)
143 : "memory", "cc", "ecx");
148 * allocate per-cpu stacks for hardirq and for softirq processing
150 void __cpuinit
irq_ctx_init(int cpu
)
152 union irq_ctx
*irqctx
;
154 if (hardirq_ctx
[cpu
])
157 irqctx
= (union irq_ctx
*) &hardirq_stack
[cpu
*THREAD_SIZE
];
158 irqctx
->tinfo
.task
= NULL
;
159 irqctx
->tinfo
.exec_domain
= NULL
;
160 irqctx
->tinfo
.cpu
= cpu
;
161 irqctx
->tinfo
.preempt_count
= HARDIRQ_OFFSET
;
162 irqctx
->tinfo
.addr_limit
= MAKE_MM_SEG(0);
164 hardirq_ctx
[cpu
] = irqctx
;
166 irqctx
= (union irq_ctx
*) &softirq_stack
[cpu
*THREAD_SIZE
];
167 irqctx
->tinfo
.task
= NULL
;
168 irqctx
->tinfo
.exec_domain
= NULL
;
169 irqctx
->tinfo
.cpu
= cpu
;
170 irqctx
->tinfo
.preempt_count
= 0;
171 irqctx
->tinfo
.addr_limit
= MAKE_MM_SEG(0);
173 softirq_ctx
[cpu
] = irqctx
;
175 printk(KERN_DEBUG
"CPU %u irqstacks, hard=%p soft=%p\n",
176 cpu
,hardirq_ctx
[cpu
],softirq_ctx
[cpu
]);
179 void irq_ctx_exit(int cpu
)
181 hardirq_ctx
[cpu
] = NULL
;
184 asmlinkage
void do_softirq(void)
187 struct thread_info
*curctx
;
188 union irq_ctx
*irqctx
;
194 local_irq_save(flags
);
196 if (local_softirq_pending()) {
197 curctx
= current_thread_info();
198 irqctx
= softirq_ctx
[smp_processor_id()];
199 irqctx
->tinfo
.task
= curctx
->task
;
200 irqctx
->tinfo
.previous_esp
= current_stack_pointer
;
202 /* build the stack frame on the softirq stack */
203 isp
= (u32
*) ((char*)irqctx
+ sizeof(*irqctx
));
205 call_on_stack(__do_softirq
, isp
);
207 * Shouldnt happen, we returned above if in_interrupt():
209 WARN_ON_ONCE(softirq_count());
212 local_irq_restore(flags
);
217 execute_on_irq_stack(int overflow
, struct irq_desc
*desc
, int irq
) { return 0; }
221 * do_IRQ handles all normal device IRQ's (the special
222 * SMP cross-CPU interrupts have their own specific
225 unsigned int do_IRQ(struct pt_regs
*regs
)
227 struct pt_regs
*old_regs
;
228 /* high bit used in ret_from_ code */
229 int overflow
, irq
= ~regs
->orig_ax
;
230 struct irq_desc
*desc
= irq_desc
+ irq
;
232 if (unlikely((unsigned)irq
>= NR_IRQS
)) {
233 printk(KERN_EMERG
"%s: cannot handle IRQ %d\n",
238 old_regs
= set_irq_regs(regs
);
241 overflow
= check_stack_overflow();
243 if (!execute_on_irq_stack(overflow
, desc
, irq
)) {
244 if (unlikely(overflow
))
245 print_stack_overflow();
246 desc
->handle_irq(irq
, desc
);
250 set_irq_regs(old_regs
);
255 * Interrupt statistics:
258 atomic_t irq_err_count
;
261 * /proc/interrupts printing:
264 int show_interrupts(struct seq_file
*p
, void *v
)
266 int i
= *(loff_t
*) v
, j
;
267 struct irqaction
* action
;
272 for_each_online_cpu(j
)
273 seq_printf(p
, "CPU%-8d",j
);
278 unsigned any_count
= 0;
280 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
282 any_count
= kstat_irqs(i
);
284 for_each_online_cpu(j
)
285 any_count
|= kstat_cpu(j
).irqs
[i
];
287 action
= irq_desc
[i
].action
;
288 if (!action
&& !any_count
)
290 seq_printf(p
, "%3d: ",i
);
292 seq_printf(p
, "%10u ", kstat_irqs(i
));
294 for_each_online_cpu(j
)
295 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
]);
297 seq_printf(p
, " %8s", irq_desc
[i
].chip
->name
);
298 seq_printf(p
, "-%-8s", irq_desc
[i
].name
);
301 seq_printf(p
, " %s", action
->name
);
302 while ((action
= action
->next
) != NULL
)
303 seq_printf(p
, ", %s", action
->name
);
308 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
309 } else if (i
== NR_IRQS
) {
310 seq_printf(p
, "NMI: ");
311 for_each_online_cpu(j
)
312 seq_printf(p
, "%10u ", nmi_count(j
));
313 seq_printf(p
, " Non-maskable interrupts\n");
314 #ifdef CONFIG_X86_LOCAL_APIC
315 seq_printf(p
, "LOC: ");
316 for_each_online_cpu(j
)
317 seq_printf(p
, "%10u ",
318 per_cpu(irq_stat
,j
).apic_timer_irqs
);
319 seq_printf(p
, " Local timer interrupts\n");
322 seq_printf(p
, "RES: ");
323 for_each_online_cpu(j
)
324 seq_printf(p
, "%10u ",
325 per_cpu(irq_stat
,j
).irq_resched_count
);
326 seq_printf(p
, " Rescheduling interrupts\n");
327 seq_printf(p
, "CAL: ");
328 for_each_online_cpu(j
)
329 seq_printf(p
, "%10u ",
330 per_cpu(irq_stat
,j
).irq_call_count
);
331 seq_printf(p
, " function call interrupts\n");
332 seq_printf(p
, "TLB: ");
333 for_each_online_cpu(j
)
334 seq_printf(p
, "%10u ",
335 per_cpu(irq_stat
,j
).irq_tlb_count
);
336 seq_printf(p
, " TLB shootdowns\n");
338 #ifdef CONFIG_X86_MCE
339 seq_printf(p
, "TRM: ");
340 for_each_online_cpu(j
)
341 seq_printf(p
, "%10u ",
342 per_cpu(irq_stat
,j
).irq_thermal_count
);
343 seq_printf(p
, " Thermal event interrupts\n");
345 #ifdef CONFIG_X86_LOCAL_APIC
346 seq_printf(p
, "SPU: ");
347 for_each_online_cpu(j
)
348 seq_printf(p
, "%10u ",
349 per_cpu(irq_stat
,j
).irq_spurious_count
);
350 seq_printf(p
, " Spurious interrupts\n");
352 seq_printf(p
, "ERR: %10u\n", atomic_read(&irq_err_count
));
353 #if defined(CONFIG_X86_IO_APIC)
354 seq_printf(p
, "MIS: %10u\n", atomic_read(&irq_mis_count
));
363 u64
arch_irq_stat_cpu(unsigned int cpu
)
365 u64 sum
= nmi_count(cpu
);
367 #ifdef CONFIG_X86_LOCAL_APIC
368 sum
+= per_cpu(irq_stat
, cpu
).apic_timer_irqs
;
371 sum
+= per_cpu(irq_stat
, cpu
).irq_resched_count
;
372 sum
+= per_cpu(irq_stat
, cpu
).irq_call_count
;
373 sum
+= per_cpu(irq_stat
, cpu
).irq_tlb_count
;
375 #ifdef CONFIG_X86_MCE
376 sum
+= per_cpu(irq_stat
, cpu
).irq_thermal_count
;
378 #ifdef CONFIG_X86_LOCAL_APIC
379 sum
+= per_cpu(irq_stat
, cpu
).irq_spurious_count
;
384 u64
arch_irq_stat(void)
386 u64 sum
= atomic_read(&irq_err_count
);
388 #ifdef CONFIG_X86_IO_APIC
389 sum
+= atomic_read(&irq_mis_count
);
394 #ifdef CONFIG_HOTPLUG_CPU
395 #include <mach_apic.h>
397 void fixup_irqs(cpumask_t map
)
402 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
407 cpus_and(mask
, irq_desc
[irq
].affinity
, map
);
408 if (any_online_cpu(mask
) == NR_CPUS
) {
409 printk("Breaking affinity for irq %i\n", irq
);
412 if (irq_desc
[irq
].chip
->set_affinity
)
413 irq_desc
[irq
].chip
->set_affinity(irq
, mask
);
414 else if (irq_desc
[irq
].action
&& !(warned
++))
415 printk("Cannot set affinity for irq %i\n", irq
);
420 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
421 [note the nop - the interrupt-enable boundary on x86 is two
422 instructions from sti] - to flush out pending hardirqs and
423 IPIs. After this point nothing is supposed to reach this CPU." */
424 __asm__
__volatile__("sti; nop; cli");
427 /* That doesn't seem sufficient. Give it 1ms. */
This page took 0.056629 seconds and 6 git commands to generate.