2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
19 #include <linux/percpu.h>
23 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
24 EXPORT_PER_CPU_SYMBOL(irq_stat
);
26 DEFINE_PER_CPU(struct pt_regs
*, irq_regs
);
27 EXPORT_PER_CPU_SYMBOL(irq_regs
);
29 #ifdef CONFIG_DEBUG_STACKOVERFLOW
30 /* Debugging check for stack overflow: is there less than 1KB free? */
31 static int check_stack_overflow(void)
35 __asm__
__volatile__("andl %%esp,%0" :
36 "=r" (sp
) : "0" (THREAD_SIZE
- 1));
38 return sp
< (sizeof(struct thread_info
) + STACK_WARN
);
41 static void print_stack_overflow(void)
43 printk(KERN_WARNING
"low stack detected by irq handler\n");
48 static inline int check_stack_overflow(void) { return 0; }
49 static inline void print_stack_overflow(void) { }
53 * per-CPU IRQ handling contexts (thread information and stack)
56 struct thread_info tinfo
;
57 u32 stack
[THREAD_SIZE
/sizeof(u32
)];
58 } __attribute__((aligned(THREAD_SIZE
)));
60 static DEFINE_PER_CPU(union irq_ctx
*, hardirq_ctx
);
61 static DEFINE_PER_CPU(union irq_ctx
*, softirq_ctx
);
63 static void call_on_stack(void *func
, void *stack
)
65 asm volatile("xchgl %%ebx,%%esp \n"
71 : "memory", "cc", "edx", "ecx", "eax");
75 execute_on_irq_stack(int overflow
, struct irq_desc
*desc
, int irq
)
77 union irq_ctx
*curctx
, *irqctx
;
80 curctx
= (union irq_ctx
*) current_thread_info();
81 irqctx
= __get_cpu_var(hardirq_ctx
);
84 * this is where we switch to the IRQ stack. However, if we are
85 * already using the IRQ stack (because we interrupted a hardirq
86 * handler) we can't do that and just have to keep using the
87 * current stack (which is the irq stack already after all)
89 if (unlikely(curctx
== irqctx
))
92 /* build the stack frame on the IRQ stack */
93 isp
= (u32
*) ((char *)irqctx
+ sizeof(*irqctx
));
94 irqctx
->tinfo
.task
= curctx
->tinfo
.task
;
95 irqctx
->tinfo
.previous_esp
= current_stack_pointer
;
98 * Copy the softirq bits in preempt_count so that the
99 * softirq checks work in the hardirq context.
101 irqctx
->tinfo
.preempt_count
=
102 (irqctx
->tinfo
.preempt_count
& ~SOFTIRQ_MASK
) |
103 (curctx
->tinfo
.preempt_count
& SOFTIRQ_MASK
);
105 if (unlikely(overflow
))
106 call_on_stack(print_stack_overflow
, isp
);
108 asm volatile("xchgl %%ebx,%%esp \n"
110 "movl %%ebx,%%esp \n"
111 : "=a" (arg1
), "=d" (arg2
), "=b" (isp
)
112 : "0" (irq
), "1" (desc
), "2" (isp
),
113 "D" (desc
->handle_irq
)
114 : "memory", "cc", "ecx");
119 * allocate per-cpu stacks for hardirq and for softirq processing
121 void __cpuinit
irq_ctx_init(int cpu
)
123 union irq_ctx
*irqctx
;
125 if (per_cpu(hardirq_ctx
, cpu
))
128 irqctx
= (union irq_ctx
*)__get_free_pages(THREAD_FLAGS
, THREAD_ORDER
);
129 irqctx
->tinfo
.task
= NULL
;
130 irqctx
->tinfo
.exec_domain
= NULL
;
131 irqctx
->tinfo
.cpu
= cpu
;
132 irqctx
->tinfo
.preempt_count
= HARDIRQ_OFFSET
;
133 irqctx
->tinfo
.addr_limit
= MAKE_MM_SEG(0);
135 per_cpu(hardirq_ctx
, cpu
) = irqctx
;
137 irqctx
= (union irq_ctx
*)__get_free_pages(THREAD_FLAGS
, THREAD_ORDER
);
138 irqctx
->tinfo
.task
= NULL
;
139 irqctx
->tinfo
.exec_domain
= NULL
;
140 irqctx
->tinfo
.cpu
= cpu
;
141 irqctx
->tinfo
.preempt_count
= 0;
142 irqctx
->tinfo
.addr_limit
= MAKE_MM_SEG(0);
144 per_cpu(softirq_ctx
, cpu
) = irqctx
;
146 printk(KERN_DEBUG
"CPU %u irqstacks, hard=%p soft=%p\n",
147 cpu
, per_cpu(hardirq_ctx
, cpu
), per_cpu(softirq_ctx
, cpu
));
150 asmlinkage
void do_softirq(void)
153 struct thread_info
*curctx
;
154 union irq_ctx
*irqctx
;
160 local_irq_save(flags
);
162 if (local_softirq_pending()) {
163 curctx
= current_thread_info();
164 irqctx
= __get_cpu_var(softirq_ctx
);
165 irqctx
->tinfo
.task
= curctx
->task
;
166 irqctx
->tinfo
.previous_esp
= current_stack_pointer
;
168 /* build the stack frame on the softirq stack */
169 isp
= (u32
*) ((char *)irqctx
+ sizeof(*irqctx
));
171 call_on_stack(__do_softirq
, isp
);
173 * Shouldnt happen, we returned above if in_interrupt():
175 WARN_ON_ONCE(softirq_count());
178 local_irq_restore(flags
);
181 bool handle_irq(unsigned irq
, struct pt_regs
*regs
)
183 struct irq_desc
*desc
;
186 overflow
= check_stack_overflow();
188 desc
= irq_to_desc(irq
);
192 if (!execute_on_irq_stack(overflow
, desc
, irq
)) {
193 if (unlikely(overflow
))
194 print_stack_overflow();
195 desc
->handle_irq(irq
, desc
);
This page took 0.037244 seconds and 5 git commands to generate.