Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
3 | * | |
4 | * This file contains the lowest level x86-specific interrupt | |
5 | * entry, irq-stacks and irq statistics code. All the remaining | |
6 | * irq logic is done by the generic kernel/irq/ code and | |
7 | * by the x86-specific irq controller code. (e.g. i8259.c and | |
8 | * io_apic.c.) | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/seq_file.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/kernel_stat.h> | |
f3705136 ZM |
15 | #include <linux/notifier.h> |
16 | #include <linux/cpu.h> | |
17 | #include <linux/delay.h> | |
72ade5f9 | 18 | #include <linux/uaccess.h> |
42f8faec | 19 | #include <linux/percpu.h> |
1da177e4 | 20 | |
e05d723f | 21 | #include <asm/apic.h> |
e05d723f | 22 | |
f34e3b61 | 23 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
1da177e4 LT |
24 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
25 | ||
7c3576d2 JF |
26 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
27 | EXPORT_PER_CPU_SYMBOL(irq_regs); | |
28 | ||
de9b10af TG |
29 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
30 | /* Debugging check for stack overflow: is there less than 1KB free? */ | |
31 | static int check_stack_overflow(void) | |
32 | { | |
33 | long sp; | |
34 | ||
35 | __asm__ __volatile__("andl %%esp,%0" : | |
36 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | |
37 | ||
38 | return sp < (sizeof(struct thread_info) + STACK_WARN); | |
39 | } | |
40 | ||
41 | static void print_stack_overflow(void) | |
42 | { | |
43 | printk(KERN_WARNING "low stack detected by irq handler\n"); | |
44 | dump_stack(); | |
45 | } | |
46 | ||
47 | #else | |
48 | static inline int check_stack_overflow(void) { return 0; } | |
49 | static inline void print_stack_overflow(void) { } | |
50 | #endif | |
51 | ||
1da177e4 LT |
52 | /* |
53 | * per-CPU IRQ handling contexts (thread information and stack) | |
54 | */ | |
55 | union irq_ctx { | |
56 | struct thread_info tinfo; | |
57 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
25897374 | 58 | } __attribute__((aligned(THREAD_SIZE))); |
1da177e4 | 59 | |
42f8faec LJ |
60 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); |
61 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); | |
1da177e4 | 62 | |
403d8efc | 63 | static void call_on_stack(void *func, void *stack) |
04b361ab | 64 | { |
403d8efc TG |
65 | asm volatile("xchgl %%ebx,%%esp \n" |
66 | "call *%%edi \n" | |
67 | "movl %%ebx,%%esp \n" | |
68 | : "=b" (stack) | |
69 | : "0" (stack), | |
70 | "D"(func) | |
71 | : "memory", "cc", "edx", "ecx", "eax"); | |
04b361ab | 72 | } |
1da177e4 | 73 | |
de9b10af TG |
74 | static inline int |
75 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |
76 | { | |
77 | union irq_ctx *curctx, *irqctx; | |
403d8efc | 78 | u32 *isp, arg1, arg2; |
1da177e4 LT |
79 | |
80 | curctx = (union irq_ctx *) current_thread_info(); | |
42f8faec | 81 | irqctx = __get_cpu_var(hardirq_ctx); |
1da177e4 LT |
82 | |
83 | /* | |
84 | * this is where we switch to the IRQ stack. However, if we are | |
85 | * already using the IRQ stack (because we interrupted a hardirq | |
86 | * handler) we can't do that and just have to keep using the | |
87 | * current stack (which is the irq stack already after all) | |
88 | */ | |
de9b10af TG |
89 | if (unlikely(curctx == irqctx)) |
90 | return 0; | |
1da177e4 | 91 | |
de9b10af | 92 | /* build the stack frame on the IRQ stack */ |
72ade5f9 | 93 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
de9b10af TG |
94 | irqctx->tinfo.task = curctx->tinfo.task; |
95 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
1da177e4 | 96 | |
de9b10af TG |
97 | /* |
98 | * Copy the softirq bits in preempt_count so that the | |
99 | * softirq checks work in the hardirq context. | |
100 | */ | |
101 | irqctx->tinfo.preempt_count = | |
102 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | |
103 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | |
104 | ||
105 | if (unlikely(overflow)) | |
403d8efc TG |
106 | call_on_stack(print_stack_overflow, isp); |
107 | ||
108 | asm volatile("xchgl %%ebx,%%esp \n" | |
109 | "call *%%edi \n" | |
110 | "movl %%ebx,%%esp \n" | |
111 | : "=a" (arg1), "=d" (arg2), "=b" (isp) | |
112 | : "0" (irq), "1" (desc), "2" (isp), | |
113 | "D" (desc->handle_irq) | |
114 | : "memory", "cc", "ecx"); | |
1da177e4 LT |
115 | return 1; |
116 | } | |
117 | ||
1da177e4 LT |
118 | /* |
119 | * allocate per-cpu stacks for hardirq and for softirq processing | |
120 | */ | |
403d8efc | 121 | void __cpuinit irq_ctx_init(int cpu) |
1da177e4 LT |
122 | { |
123 | union irq_ctx *irqctx; | |
124 | ||
42f8faec | 125 | if (per_cpu(hardirq_ctx, cpu)) |
1da177e4 LT |
126 | return; |
127 | ||
22d4cd4c | 128 | irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER); |
403d8efc TG |
129 | irqctx->tinfo.task = NULL; |
130 | irqctx->tinfo.exec_domain = NULL; | |
131 | irqctx->tinfo.cpu = cpu; | |
132 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | |
133 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | |
1da177e4 | 134 | |
42f8faec | 135 | per_cpu(hardirq_ctx, cpu) = irqctx; |
1da177e4 | 136 | |
22d4cd4c | 137 | irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER); |
403d8efc TG |
138 | irqctx->tinfo.task = NULL; |
139 | irqctx->tinfo.exec_domain = NULL; | |
140 | irqctx->tinfo.cpu = cpu; | |
141 | irqctx->tinfo.preempt_count = 0; | |
142 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | |
1da177e4 | 143 | |
42f8faec | 144 | per_cpu(softirq_ctx, cpu) = irqctx; |
1da177e4 | 145 | |
403d8efc | 146 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
42f8faec | 147 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); |
1da177e4 LT |
148 | } |
149 | ||
1da177e4 LT |
150 | asmlinkage void do_softirq(void) |
151 | { | |
152 | unsigned long flags; | |
153 | struct thread_info *curctx; | |
154 | union irq_ctx *irqctx; | |
155 | u32 *isp; | |
156 | ||
157 | if (in_interrupt()) | |
158 | return; | |
159 | ||
160 | local_irq_save(flags); | |
161 | ||
162 | if (local_softirq_pending()) { | |
163 | curctx = current_thread_info(); | |
42f8faec | 164 | irqctx = __get_cpu_var(softirq_ctx); |
1da177e4 LT |
165 | irqctx->tinfo.task = curctx->task; |
166 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
167 | ||
168 | /* build the stack frame on the softirq stack */ | |
72ade5f9 | 169 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
1da177e4 | 170 | |
403d8efc | 171 | call_on_stack(__do_softirq, isp); |
55f327fa IM |
172 | /* |
173 | * Shouldnt happen, we returned above if in_interrupt(): | |
403d8efc | 174 | */ |
55f327fa | 175 | WARN_ON_ONCE(softirq_count()); |
1da177e4 LT |
176 | } |
177 | ||
178 | local_irq_restore(flags); | |
179 | } | |
403d8efc | 180 | |
9b2b76a3 JF |
181 | bool handle_irq(unsigned irq, struct pt_regs *regs) |
182 | { | |
183 | struct irq_desc *desc; | |
184 | int overflow; | |
185 | ||
186 | overflow = check_stack_overflow(); | |
187 | ||
188 | desc = irq_to_desc(irq); | |
189 | if (unlikely(!desc)) | |
190 | return false; | |
191 | ||
192 | if (!execute_on_irq_stack(overflow, desc, irq)) { | |
193 | if (unlikely(overflow)) | |
194 | print_stack_overflow(); | |
195 | desc->handle_irq(irq, desc); | |
196 | } | |
197 | ||
198 | return true; | |
199 | } |