Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/kernel/irq.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | |
5 | * | |
6 | * This file contains the lowest level x86-specific interrupt | |
7 | * entry, irq-stacks and irq statistics code. All the remaining | |
8 | * irq logic is done by the generic kernel/irq/ code and | |
9 | * by the x86-specific irq controller code. (e.g. i8259.c and | |
10 | * io_apic.c.) | |
11 | */ | |
12 | ||
1da177e4 LT |
13 | #include <linux/module.h> |
14 | #include <linux/seq_file.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/kernel_stat.h> | |
f3705136 ZM |
17 | #include <linux/notifier.h> |
18 | #include <linux/cpu.h> | |
19 | #include <linux/delay.h> | |
1da177e4 | 20 | |
e05d723f TG |
21 | #include <asm/apic.h> |
22 | #include <asm/uaccess.h> | |
23 | ||
f34e3b61 | 24 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
1da177e4 LT |
25 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
26 | ||
7c3576d2 JF |
27 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
28 | EXPORT_PER_CPU_SYMBOL(irq_regs); | |
29 | ||
1da177e4 LT |
30 | /* |
31 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
32 | * each architecture has to answer this themselves. | |
33 | */ | |
34 | void ack_bad_irq(unsigned int irq) | |
35 | { | |
e05d723f TG |
36 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); |
37 | ||
38 | #ifdef CONFIG_X86_LOCAL_APIC | |
39 | /* | |
40 | * Currently unexpected vectors happen only on SMP and APIC. | |
41 | * We _must_ ack these because every local APIC has only N | |
42 | * irq slots per priority level, and a 'hanging, unacked' IRQ | |
43 | * holds up an irq slot - in excessive cases (when multiple | |
44 | * unexpected vectors occur) that might lock up the APIC | |
45 | * completely. | |
46 | * But only ack when the APIC is enabled -AK | |
47 | */ | |
48 | if (cpu_has_apic) | |
49 | ack_APIC_irq(); | |
1da177e4 | 50 | #endif |
e05d723f | 51 | } |
1da177e4 LT |
52 | |
53 | #ifdef CONFIG_4KSTACKS | |
54 | /* | |
55 | * per-CPU IRQ handling contexts (thread information and stack) | |
56 | */ | |
57 | union irq_ctx { | |
58 | struct thread_info tinfo; | |
59 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
60 | }; | |
61 | ||
22722051 AM |
62 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
63 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
64 | #endif |
65 | ||
66 | /* | |
67 | * do_IRQ handles all normal device IRQ's (the special | |
68 | * SMP cross-CPU interrupts have their own specific | |
69 | * handlers). | |
70 | */ | |
71 | fastcall unsigned int do_IRQ(struct pt_regs *regs) | |
72 | { | |
7d12e780 | 73 | struct pt_regs *old_regs; |
19eadf98 RR |
74 | /* high bit used in ret_from_ code */ |
75 | int irq = ~regs->orig_eax; | |
f5b9ed7a | 76 | struct irq_desc *desc = irq_desc + irq; |
1da177e4 LT |
77 | #ifdef CONFIG_4KSTACKS |
78 | union irq_ctx *curctx, *irqctx; | |
79 | u32 *isp; | |
80 | #endif | |
81 | ||
a052b68b AM |
82 | if (unlikely((unsigned)irq >= NR_IRQS)) { |
83 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | |
84 | __FUNCTION__, irq); | |
85 | BUG(); | |
86 | } | |
87 | ||
7d12e780 | 88 | old_regs = set_irq_regs(regs); |
1da177e4 LT |
89 | irq_enter(); |
90 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
91 | /* Debugging check for stack overflow: is there less than 1KB free? */ | |
92 | { | |
93 | long esp; | |
94 | ||
95 | __asm__ __volatile__("andl %%esp,%0" : | |
96 | "=r" (esp) : "0" (THREAD_SIZE - 1)); | |
97 | if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) { | |
98 | printk("do_IRQ: stack overflow: %ld\n", | |
99 | esp - sizeof(struct thread_info)); | |
100 | dump_stack(); | |
101 | } | |
102 | } | |
103 | #endif | |
104 | ||
105 | #ifdef CONFIG_4KSTACKS | |
106 | ||
107 | curctx = (union irq_ctx *) current_thread_info(); | |
108 | irqctx = hardirq_ctx[smp_processor_id()]; | |
109 | ||
110 | /* | |
111 | * this is where we switch to the IRQ stack. However, if we are | |
112 | * already using the IRQ stack (because we interrupted a hardirq | |
113 | * handler) we can't do that and just have to keep using the | |
114 | * current stack (which is the irq stack already after all) | |
115 | */ | |
116 | if (curctx != irqctx) { | |
7d12e780 | 117 | int arg1, arg2, ebx; |
1da177e4 LT |
118 | |
119 | /* build the stack frame on the IRQ stack */ | |
120 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | |
121 | irqctx->tinfo.task = curctx->tinfo.task; | |
122 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
123 |