[IA64] update sn2_defconfig
[deliverable/linux.git] / arch / x86 / kernel / irq_32.c
1 /*
2 * linux/arch/i386/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the lowest level x86-specific interrupt
7 * entry, irq-stacks and irq statistics code. All the remaining
8 * irq logic is done by the generic kernel/irq/ code and
9 * by the x86-specific irq controller code. (e.g. i8259.c and
10 * io_apic.c.)
11 */
12
13 #include <linux/module.h>
14 #include <linux/seq_file.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/notifier.h>
18 #include <linux/cpu.h>
19 #include <linux/delay.h>
20
21 #include <asm/apic.h>
22 #include <asm/uaccess.h>
23
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25 EXPORT_PER_CPU_SYMBOL(irq_stat);
26
27 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28 EXPORT_PER_CPU_SYMBOL(irq_regs);
29
30 /*
31 * 'what should we do if we get a hw irq event on an illegal vector'.
32 * each architecture has to answer this themselves.
33 */
34 void ack_bad_irq(unsigned int irq)
35 {
36 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
37
38 #ifdef CONFIG_X86_LOCAL_APIC
39 /*
40 * Currently unexpected vectors happen only on SMP and APIC.
41 * We _must_ ack these because every local APIC has only N
42 * irq slots per priority level, and a 'hanging, unacked' IRQ
43 * holds up an irq slot - in excessive cases (when multiple
44 * unexpected vectors occur) that might lock up the APIC
45 * completely.
46 * But only ack when the APIC is enabled -AK
47 */
48 if (cpu_has_apic)
49 ack_APIC_irq();
50 #endif
51 }
52
53 #ifdef CONFIG_4KSTACKS
54 /*
55 * per-CPU IRQ handling contexts (thread information and stack)
56 */
57 union irq_ctx {
58 struct thread_info tinfo;
59 u32 stack[THREAD_SIZE/sizeof(u32)];
60 };
61
62 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
63 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
64 #endif
65
66 /*
67 * do_IRQ handles all normal device IRQ's (the special
68 * SMP cross-CPU interrupts have their own specific
69 * handlers).
70 */
71 fastcall unsigned int do_IRQ(struct pt_regs *regs)
72 {
73 struct pt_regs *old_regs;
74 /* high bit used in ret_from_ code */
75 int irq = ~regs->orig_eax;
76 struct irq_desc *desc = irq_desc + irq;
77 #ifdef CONFIG_4KSTACKS
78 union irq_ctx *curctx, *irqctx;
79 u32 *isp;
80 #endif
81
82 if (unlikely((unsigned)irq >= NR_IRQS)) {
83 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
84 __FUNCTION__, irq);
85 BUG();
86 }
87
88 old_regs = set_irq_regs(regs);
89 irq_enter();
90 #ifdef CONFIG_DEBUG_STACKOVERFLOW
91 /* Debugging check for stack overflow: is there less than 1KB free? */
92 {
93 long esp;
94
95 __asm__ __volatile__("andl %%esp,%0" :
96 "=r" (esp) : "0" (THREAD_SIZE - 1));
97 if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
98 printk("do_IRQ: stack overflow: %ld\n",
99 esp - sizeof(struct thread_info));
100 dump_stack();
101 }
102 }
103 #endif
104
105 #ifdef CONFIG_4KSTACKS
106
107 curctx = (union irq_ctx *) current_thread_info();
108 irqctx = hardirq_ctx[smp_processor_id()];
109
110 /*
111 * this is where we switch to the IRQ stack. However, if we are
112 * already using the IRQ stack (because we interrupted a hardirq
113 * handler) we can't do that and just have to keep using the
114 * current stack (which is the irq stack already after all)
115 */
116 if (curctx != irqctx) {
117 int arg1, arg2, ebx;
118
119 /* build the stack frame on the IRQ stack */
120 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
121 irqctx->tinfo.task = curctx->tinfo.task;
122 irqctx->tinfo.previous_esp = current_stack_pointer;
123
124 /*
125 * Copy the softirq bits in preempt_count so that the
126 * softirq checks work in the hardirq context.
127 */
128 irqctx->tinfo.preempt_count =
129 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
130 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
131
132 asm volatile(
133 " xchgl %%ebx,%%esp \n"
134 " call *%%edi \n"
135 " movl %%ebx,%%esp \n"
136 : "=a" (arg1), "=d" (arg2), "=b" (ebx)
137 : "0" (irq), "1" (desc), "2" (isp),
138 "D" (desc->handle_irq)
139 : "memory", "cc"
140 );
141 } else
142 #endif
143 desc->handle_irq(irq, desc);
144
145 irq_exit();
146 set_irq_regs(old_regs);
147 return 1;
148 }
149
150 #ifdef CONFIG_4KSTACKS
151
152 static char softirq_stack[NR_CPUS * THREAD_SIZE]
153 __attribute__((__section__(".bss.page_aligned")));
154
155 static char hardirq_stack[NR_CPUS * THREAD_SIZE]
156 __attribute__((__section__(".bss.page_aligned")));
157
158 /*
159 * allocate per-cpu stacks for hardirq and for softirq processing
160 */
161 void irq_ctx_init(int cpu)
162 {
163 union irq_ctx *irqctx;
164
165 if (hardirq_ctx[cpu])
166 return;
167
168 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
169 irqctx->tinfo.task = NULL;
170 irqctx->tinfo.exec_domain = NULL;
171 irqctx->tinfo.cpu = cpu;
172 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
173 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
174
175 hardirq_ctx[cpu] = irqctx;
176
177 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
178 irqctx->tinfo.task = NULL;
179 irqctx->tinfo.exec_domain = NULL;
180 irqctx->tinfo.cpu = cpu;
181 irqctx->tinfo.preempt_count = 0;
182 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
183
184 softirq_ctx[cpu] = irqctx;
185
186 printk("CPU %u irqstacks, hard=%p soft=%p\n",
187 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
188 }
189
190 void irq_ctx_exit(int cpu)
191 {
192 hardirq_ctx[cpu] = NULL;
193 }
194
195 extern asmlinkage void __do_softirq(void);
196
197 asmlinkage void do_softirq(void)
198 {
199 unsigned long flags;
200 struct thread_info *curctx;
201 union irq_ctx *irqctx;
202 u32 *isp;
203
204 if (in_interrupt())
205 return;
206
207 local_irq_save(flags);
208
209 if (local_softirq_pending()) {
210 curctx = current_thread_info();
211 irqctx = softirq_ctx[smp_processor_id()];
212 irqctx->tinfo.task = curctx->task;
213 irqctx->tinfo.previous_esp = current_stack_pointer;
214
215 /* build the stack frame on the softirq stack */
216 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
217
218 asm volatile(
219 " xchgl %%ebx,%%esp \n"
220 " call __do_softirq \n"
221 " movl %%ebx,%%esp \n"
222 : "=b"(isp)
223 : "0"(isp)
224 : "memory", "cc", "edx", "ecx", "eax"
225 );
226 /*
227 * Shouldnt happen, we returned above if in_interrupt():
228 */
229 WARN_ON_ONCE(softirq_count());
230 }
231
232 local_irq_restore(flags);
233 }
234 #endif
235
236 /*
237 * Interrupt statistics:
238 */
239
240 atomic_t irq_err_count;
241
242 /*
243 * /proc/interrupts printing:
244 */
245
246 int show_interrupts(struct seq_file *p, void *v)
247 {
248 int i = *(loff_t *) v, j;
249 struct irqaction * action;
250 unsigned long flags;
251
252 if (i == 0) {
253 seq_printf(p, " ");
254 for_each_online_cpu(j)
255 seq_printf(p, "CPU%-8d",j);
256 seq_putc(p, '\n');
257 }
258
259 if (i < NR_IRQS) {
260 spin_lock_irqsave(&irq_desc[i].lock, flags);
261 action = irq_desc[i].action;
262 if (!action)
263 goto skip;
264 seq_printf(p, "%3d: ",i);
265 #ifndef CONFIG_SMP
266 seq_printf(p, "%10u ", kstat_irqs(i));
267 #else
268 for_each_online_cpu(j)
269 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
270 #endif
271 seq_printf(p, " %8s", irq_desc[i].chip->name);
272 seq_printf(p, "-%-8s", irq_desc[i].name);
273 seq_printf(p, " %s", action->name);
274
275 for (action=action->next; action; action = action->next)
276 seq_printf(p, ", %s", action->name);
277
278 seq_putc(p, '\n');
279 skip:
280 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
281 } else if (i == NR_IRQS) {
282 seq_printf(p, "NMI: ");
283 for_each_online_cpu(j)
284 seq_printf(p, "%10u ", nmi_count(j));
285 seq_putc(p, '\n');
286 #ifdef CONFIG_X86_LOCAL_APIC
287 seq_printf(p, "LOC: ");
288 for_each_online_cpu(j)
289 seq_printf(p, "%10u ",
290 per_cpu(irq_stat,j).apic_timer_irqs);
291 seq_putc(p, '\n');
292 #endif
293 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
294 #if defined(CONFIG_X86_IO_APIC)
295 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
296 #endif
297 }
298 return 0;
299 }
300
301 #ifdef CONFIG_HOTPLUG_CPU
302 #include <mach_apic.h>
303
304 void fixup_irqs(cpumask_t map)
305 {
306 unsigned int irq;
307 static int warned;
308
309 for (irq = 0; irq < NR_IRQS; irq++) {
310 cpumask_t mask;
311 if (irq == 2)
312 continue;
313
314 cpus_and(mask, irq_desc[irq].affinity, map);
315 if (any_online_cpu(mask) == NR_CPUS) {
316 printk("Breaking affinity for irq %i\n", irq);
317 mask = map;
318 }
319 if (irq_desc[irq].chip->set_affinity)
320 irq_desc[irq].chip->set_affinity(irq, mask);
321 else if (irq_desc[irq].action && !(warned++))
322 printk("Cannot set affinity for irq %i\n", irq);
323 }
324
325 #if 0
326 barrier();
327 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
328 [note the nop - the interrupt-enable boundary on x86 is two
329 instructions from sti] - to flush out pending hardirqs and
330 IPIs. After this point nothing is supposed to reach this CPU." */
331 __asm__ __volatile__("sti; nop; cli");
332 barrier();
333 #else
334 /* That doesn't seem sufficient. Give it 1ms. */
335 local_irq_enable();
336 mdelay(1);
337 local_irq_disable();
338 #endif
339 }
340 #endif
341
This page took 0.041703 seconds and 5 git commands to generate.