Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
3 | * | |
4 | * This file contains the lowest level x86-specific interrupt | |
5 | * entry, irq-stacks and irq statistics code. All the remaining | |
6 | * irq logic is done by the generic kernel/irq/ code and | |
7 | * by the x86-specific irq controller code. (e.g. i8259.c and | |
8 | * io_apic.c.) | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/seq_file.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/kernel_stat.h> | |
f3705136 ZM |
15 | #include <linux/notifier.h> |
16 | #include <linux/cpu.h> | |
17 | #include <linux/delay.h> | |
72ade5f9 | 18 | #include <linux/uaccess.h> |
42f8faec | 19 | #include <linux/percpu.h> |
1da177e4 | 20 | |
e05d723f | 21 | #include <asm/apic.h> |
e05d723f | 22 | |
f34e3b61 | 23 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
1da177e4 LT |
24 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
25 | ||
7c3576d2 JF |
26 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
27 | EXPORT_PER_CPU_SYMBOL(irq_regs); | |
28 | ||
de9b10af TG |
29 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
30 | /* Debugging check for stack overflow: is there less than 1KB free? */ | |
31 | static int check_stack_overflow(void) | |
32 | { | |
33 | long sp; | |
34 | ||
35 | __asm__ __volatile__("andl %%esp,%0" : | |
36 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | |
37 | ||
38 | return sp < (sizeof(struct thread_info) + STACK_WARN); | |
39 | } | |
40 | ||
41 | static void print_stack_overflow(void) | |
42 | { | |
43 | printk(KERN_WARNING "low stack detected by irq handler\n"); | |
44 | dump_stack(); | |
45 | } | |
46 | ||
47 | #else | |
48 | static inline int check_stack_overflow(void) { return 0; } | |
49 | static inline void print_stack_overflow(void) { } | |
50 | #endif | |
51 | ||
1da177e4 LT |
52 | #ifdef CONFIG_4KSTACKS |
53 | /* | |
54 | * per-CPU IRQ handling contexts (thread information and stack) | |
55 | */ | |
56 | union irq_ctx { | |
57 | struct thread_info tinfo; | |
58 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
42f8faec | 59 | } __attribute__((aligned(PAGE_SIZE))); |
1da177e4 | 60 | |
42f8faec LJ |
61 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); |
62 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); | |
1da177e4 | 63 | |
42f8faec LJ |
64 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); |
65 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); | |
a052b68b | 66 | |
403d8efc | 67 | static void call_on_stack(void *func, void *stack) |
04b361ab | 68 | { |
403d8efc TG |
69 | asm volatile("xchgl %%ebx,%%esp \n" |
70 | "call *%%edi \n" | |
71 | "movl %%ebx,%%esp \n" | |
72 | : "=b" (stack) | |
73 | : "0" (stack), | |
74 | "D"(func) | |
75 | : "memory", "cc", "edx", "ecx", "eax"); | |
04b361ab | 76 | } |
1da177e4 | 77 | |
de9b10af TG |
78 | static inline int |
79 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |
80 | { | |
81 | union irq_ctx *curctx, *irqctx; | |
403d8efc | 82 | u32 *isp, arg1, arg2; |
1da177e4 LT |
83 | |
84 | curctx = (union irq_ctx *) current_thread_info(); | |
42f8faec | 85 | irqctx = __get_cpu_var(hardirq_ctx); |
1da177e4 LT |
86 | |
87 | /* | |
88 | * this is where we switch to the IRQ stack. However, if we are | |
89 | * already using the IRQ stack (because we interrupted a hardirq | |
90 | * handler) we can't do that and just have to keep using the | |
91 | * current stack (which is the irq stack already after all) | |
92 | */ | |
de9b10af TG |
93 | if (unlikely(curctx == irqctx)) |
94 | return 0; | |
1da177e4 | 95 | |
de9b10af | 96 | /* build the stack frame on the IRQ stack */ |
72ade5f9 | 97 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
de9b10af TG |
98 | irqctx->tinfo.task = curctx->tinfo.task; |
99 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
1da177e4 | 100 | |
de9b10af TG |
101 | /* |
102 | * Copy the softirq bits in preempt_count so that the | |
103 | * softirq checks work in the hardirq context. | |
104 | */ | |
105 | irqctx->tinfo.preempt_count = | |
106 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | |
107 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | |
108 | ||
109 | if (unlikely(overflow)) | |
403d8efc TG |
110 | call_on_stack(print_stack_overflow, isp); |
111 | ||
112 | asm volatile("xchgl %%ebx,%%esp \n" | |
113 | "call *%%edi \n" | |
114 | "movl %%ebx,%%esp \n" | |
115 | : "=a" (arg1), "=d" (arg2), "=b" (isp) | |
116 | : "0" (irq), "1" (desc), "2" (isp), | |
117 | "D" (desc->handle_irq) | |
118 | : "memory", "cc", "ecx"); | |
1da177e4 LT |
119 | return 1; |
120 | } | |
121 | ||
1da177e4 LT |
122 | /* |
123 | * allocate per-cpu stacks for hardirq and for softirq processing | |
124 | */ | |
403d8efc | 125 | void __cpuinit irq_ctx_init(int cpu) |
1da177e4 LT |
126 | { |
127 | union irq_ctx *irqctx; | |
128 | ||
42f8faec | 129 | if (per_cpu(hardirq_ctx, cpu)) |
1da177e4 LT |
130 | return; |
131 | ||
42f8faec | 132 | irqctx = &per_cpu(hardirq_stack, cpu); |
403d8efc TG |
133 | irqctx->tinfo.task = NULL; |
134 | irqctx->tinfo.exec_domain = NULL; | |
135 | irqctx->tinfo.cpu = cpu; | |
136 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | |
137 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | |
1da177e4 | 138 | |
42f8faec | 139 | per_cpu(hardirq_ctx, cpu) = irqctx; |
1da177e4 | 140 | |
42f8faec | 141 | irqctx = &per_cpu(softirq_stack, cpu); |
403d8efc TG |
142 | irqctx->tinfo.task = NULL; |
143 | irqctx->tinfo.exec_domain = NULL; | |
144 | irqctx->tinfo.cpu = cpu; | |
145 | irqctx->tinfo.preempt_count = 0; | |
146 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | |
1da177e4 | 147 | |
42f8faec | 148 | per_cpu(softirq_ctx, cpu) = irqctx; |
1da177e4 | 149 | |
403d8efc | 150 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
42f8faec | 151 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); |
1da177e4 LT |
152 | } |
153 | ||
e1367daf LS |
154 | void irq_ctx_exit(int cpu) |
155 | { | |
42f8faec | 156 | per_cpu(hardirq_ctx, cpu) = NULL; |
e1367daf LS |
157 | } |
158 | ||
1da177e4 LT |
159 | asmlinkage void do_softirq(void) |
160 | { | |
161 | unsigned long flags; | |
162 | struct thread_info *curctx; | |
163 | union irq_ctx *irqctx; | |
164 | u32 *isp; | |
165 | ||
166 | if (in_interrupt()) | |
167 | return; | |
168 | ||
169 | local_irq_save(flags); | |
170 | ||
171 | if (local_softirq_pending()) { | |
172 | curctx = current_thread_info(); | |
42f8faec | 173 | irqctx = __get_cpu_var(softirq_ctx); |
1da177e4 LT |
174 | irqctx->tinfo.task = curctx->task; |
175 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
176 | ||
177 | /* build the stack frame on the softirq stack */ | |
72ade5f9 | 178 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
1da177e4 | 179 | |
403d8efc | 180 | call_on_stack(__do_softirq, isp); |
55f327fa IM |
181 | /* |
182 | * Shouldnt happen, we returned above if in_interrupt(): | |
403d8efc | 183 | */ |
55f327fa | 184 | WARN_ON_ONCE(softirq_count()); |
1da177e4 LT |
185 | } |
186 | ||
187 | local_irq_restore(flags); | |
188 | } | |
403d8efc TG |
189 | |
190 | #else | |
191 | static inline int | |
192 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } | |
1da177e4 LT |
193 | #endif |
194 | ||
9b2b76a3 JF |
195 | bool handle_irq(unsigned irq, struct pt_regs *regs) |
196 | { | |
197 | struct irq_desc *desc; | |
198 | int overflow; | |
199 | ||
200 | overflow = check_stack_overflow(); | |
201 | ||
202 | desc = irq_to_desc(irq); | |
203 | if (unlikely(!desc)) | |
204 | return false; | |
205 | ||
206 | if (!execute_on_irq_stack(overflow, desc, irq)) { | |
207 | if (unlikely(overflow)) | |
208 | print_stack_overflow(); | |
209 | desc->handle_irq(irq, desc); | |
210 | } | |
211 | ||
212 | return true; | |
213 | } | |
214 | ||
f3705136 | 215 | #ifdef CONFIG_HOTPLUG_CPU |
f3705136 | 216 | |
d7b381bb MT |
217 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
218 | void fixup_irqs(void) | |
f3705136 ZM |
219 | { |
220 | unsigned int irq; | |
199751d7 | 221 | struct irq_desc *desc; |
f3705136 | 222 | |
199751d7 | 223 | for_each_irq_desc(irq, desc) { |
d7b381bb | 224 | const struct cpumask *affinity; |
08678b08 | 225 | |
0b8f1efa YL |
226 | if (!desc) |
227 | continue; | |
f3705136 ZM |
228 | if (irq == 2) |
229 | continue; | |
230 | ||
7f7ace0c | 231 | affinity = desc->affinity; |
d7b381bb | 232 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
f3705136 | 233 | printk("Breaking affinity for irq %i\n", irq); |
d7b381bb | 234 | affinity = cpu_all_mask; |
f3705136 | 235 | } |
08678b08 | 236 | if (desc->chip->set_affinity) |
d7b381bb | 237 | desc->chip->set_affinity(irq, affinity); |
9f51e24e MS |
238 | else if (desc->action) |
239 | printk_once("Cannot set affinity for irq %i\n", irq); | |
f3705136 ZM |
240 | } |
241 | ||
242 | #if 0 | |
243 | barrier(); | |
244 | /* Ingo Molnar says: "after the IO-APIC masks have been redirected | |
245 | [note the nop - the interrupt-enable boundary on x86 is two | |
246 | instructions from sti] - to flush out pending hardirqs and | |
247 | IPIs. After this point nothing is supposed to reach this CPU." */ | |
248 | __asm__ __volatile__("sti; nop; cli"); | |
249 | barrier(); | |
250 | #else | |
251 | /* That doesn't seem sufficient. Give it 1ms. */ | |
252 | local_irq_enable(); | |
253 | mdelay(1); | |
254 | local_irq_disable(); | |
255 | #endif | |
256 | } | |
257 | #endif | |
258 |