2 * linux/arch/i386/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the lowest level x86-specific interrupt
7 * entry, irq-stacks and irq statistics code. All the remaining
8 * irq logic is done by the generic kernel/irq/ code and
9 * by the x86-specific irq controller code. (e.g. i8259.c and
13 #include <linux/module.h>
14 #include <linux/seq_file.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/notifier.h>
18 #include <linux/cpu.h>
19 #include <linux/delay.h>
22 #include <asm/uaccess.h>
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
25 EXPORT_PER_CPU_SYMBOL(irq_stat
);
27 DEFINE_PER_CPU(struct pt_regs
*, irq_regs
);
28 EXPORT_PER_CPU_SYMBOL(irq_regs
);
31 * 'what should we do if we get a hw irq event on an illegal vector'.
32 * each architecture has to answer this themselves.
34 void ack_bad_irq(unsigned int irq
)
36 printk(KERN_ERR
"unexpected IRQ trap at vector %02x\n", irq
);
38 #ifdef CONFIG_X86_LOCAL_APIC
40 * Currently unexpected vectors happen only on SMP and APIC.
41 * We _must_ ack these because every local APIC has only N
42 * irq slots per priority level, and a 'hanging, unacked' IRQ
43 * holds up an irq slot - in excessive cases (when multiple
44 * unexpected vectors occur) that might lock up the APIC
46 * But only ack when the APIC is enabled -AK
53 #ifdef CONFIG_4KSTACKS
55 * per-CPU IRQ handling contexts (thread information and stack)
58 struct thread_info tinfo
;
59 u32 stack
[THREAD_SIZE
/sizeof(u32
)];
62 static union irq_ctx
*hardirq_ctx
[NR_CPUS
] __read_mostly
;
63 static union irq_ctx
*softirq_ctx
[NR_CPUS
] __read_mostly
;
67 * do_IRQ handles all normal device IRQ's (the special
68 * SMP cross-CPU interrupts have their own specific
71 fastcall
unsigned int do_IRQ(struct pt_regs
*regs
)
73 struct pt_regs
*old_regs
;
74 /* high bit used in ret_from_ code */
75 int irq
= ~regs
->orig_eax
;
76 struct irq_desc
*desc
= irq_desc
+ irq
;
77 #ifdef CONFIG_4KSTACKS
78 union irq_ctx
*curctx
, *irqctx
;
82 if (unlikely((unsigned)irq
>= NR_IRQS
)) {
83 printk(KERN_EMERG
"%s: cannot handle IRQ %d\n",
88 old_regs
= set_irq_regs(regs
);
90 #ifdef CONFIG_DEBUG_STACKOVERFLOW
91 /* Debugging check for stack overflow: is there less than 1KB free? */
95 __asm__
__volatile__("andl %%esp,%0" :
96 "=r" (esp
) : "0" (THREAD_SIZE
- 1));
97 if (unlikely(esp
< (sizeof(struct thread_info
) + STACK_WARN
))) {
98 printk("do_IRQ: stack overflow: %ld\n",
99 esp
- sizeof(struct thread_info
));
105 #ifdef CONFIG_4KSTACKS
107 curctx
= (union irq_ctx
*) current_thread_info();
108 irqctx
= hardirq_ctx
[smp_processor_id()];
111 * this is where we switch to the IRQ stack. However, if we are
112 * already using the IRQ stack (because we interrupted a hardirq
113 * handler) we can't do that and just have to keep using the
114 * current stack (which is the irq stack already after all)
116 if (curctx
!= irqctx
) {
119 /* build the stack frame on the IRQ stack */
120 isp
= (u32
*) ((char*)irqctx
+ sizeof(*irqctx
));
121 irqctx
->tinfo
.task
= curctx
->tinfo
.task
;
122 irqctx
->tinfo
.previous_esp
= current_stack_pointer
;
125 * Copy the softirq bits in preempt_count so that the
126 * softirq checks work in the hardirq context.
128 irqctx
->tinfo
.preempt_count
=
129 (irqctx
->tinfo
.preempt_count
& ~SOFTIRQ_MASK
) |
130 (curctx
->tinfo
.preempt_count
& SOFTIRQ_MASK
);
133 " xchgl %%ebx,%%esp \n"
135 " movl %%ebx,%%esp \n"
136 : "=a" (arg1
), "=d" (arg2
), "=b" (ebx
)
137 : "0" (irq
), "1" (desc
), "2" (isp
),
138 "D" (desc
->handle_irq
)
143 desc
->handle_irq(irq
, desc
);
146 set_irq_regs(old_regs
);
150 #ifdef CONFIG_4KSTACKS
152 static char softirq_stack
[NR_CPUS
* THREAD_SIZE
]
153 __attribute__((__section__(".bss.page_aligned")));
155 static char hardirq_stack
[NR_CPUS
* THREAD_SIZE
]
156 __attribute__((__section__(".bss.page_aligned")));
159 * allocate per-cpu stacks for hardirq and for softirq processing
161 void irq_ctx_init(int cpu
)
163 union irq_ctx
*irqctx
;
165 if (hardirq_ctx
[cpu
])
168 irqctx
= (union irq_ctx
*) &hardirq_stack
[cpu
*THREAD_SIZE
];
169 irqctx
->tinfo
.task
= NULL
;
170 irqctx
->tinfo
.exec_domain
= NULL
;
171 irqctx
->tinfo
.cpu
= cpu
;
172 irqctx
->tinfo
.preempt_count
= HARDIRQ_OFFSET
;
173 irqctx
->tinfo
.addr_limit
= MAKE_MM_SEG(0);
175 hardirq_ctx
[cpu
] = irqctx
;
177 irqctx
= (union irq_ctx
*) &softirq_stack
[cpu
*THREAD_SIZE
];
178 irqctx
->tinfo
.task
= NULL
;
179 irqctx
->tinfo
.exec_domain
= NULL
;
180 irqctx
->tinfo
.cpu
= cpu
;
181 irqctx
->tinfo
.preempt_count
= 0;
182 irqctx
->tinfo
.addr_limit
= MAKE_MM_SEG(0);
184 softirq_ctx
[cpu
] = irqctx
;
186 printk("CPU %u irqstacks, hard=%p soft=%p\n",
187 cpu
,hardirq_ctx
[cpu
],softirq_ctx
[cpu
]);
190 void irq_ctx_exit(int cpu
)
192 hardirq_ctx
[cpu
] = NULL
;
195 extern asmlinkage
void __do_softirq(void);
197 asmlinkage
void do_softirq(void)
200 struct thread_info
*curctx
;
201 union irq_ctx
*irqctx
;
207 local_irq_save(flags
);
209 if (local_softirq_pending()) {
210 curctx
= current_thread_info();
211 irqctx
= softirq_ctx
[smp_processor_id()];
212 irqctx
->tinfo
.task
= curctx
->task
;
213 irqctx
->tinfo
.previous_esp
= current_stack_pointer
;
215 /* build the stack frame on the softirq stack */
216 isp
= (u32
*) ((char*)irqctx
+ sizeof(*irqctx
));
219 " xchgl %%ebx,%%esp \n"
220 " call __do_softirq \n"
221 " movl %%ebx,%%esp \n"
224 : "memory", "cc", "edx", "ecx", "eax"
227 * Shouldnt happen, we returned above if in_interrupt():
229 WARN_ON_ONCE(softirq_count());
232 local_irq_restore(flags
);
235 EXPORT_SYMBOL(do_softirq
);
239 * Interrupt statistics:
242 atomic_t irq_err_count
;
245 * /proc/interrupts printing:
248 int show_interrupts(struct seq_file
*p
, void *v
)
250 int i
= *(loff_t
*) v
, j
;
251 struct irqaction
* action
;
256 for_each_online_cpu(j
)
257 seq_printf(p
, "CPU%-8d",j
);
262 spin_lock_irqsave(&irq_desc
[i
].lock
, flags
);
263 action
= irq_desc
[i
].action
;
266 seq_printf(p
, "%3d: ",i
);
268 seq_printf(p
, "%10u ", kstat_irqs(i
));
270 for_each_online_cpu(j
)
271 seq_printf(p
, "%10u ", kstat_cpu(j
).irqs
[i
]);
273 seq_printf(p
, " %8s", irq_desc
[i
].chip
->name
);
274 seq_printf(p
, "-%-8s", irq_desc
[i
].name
);
275 seq_printf(p
, " %s", action
->name
);
277 for (action
=action
->next
; action
; action
= action
->next
)
278 seq_printf(p
, ", %s", action
->name
);
282 spin_unlock_irqrestore(&irq_desc
[i
].lock
, flags
);
283 } else if (i
== NR_IRQS
) {
284 seq_printf(p
, "NMI: ");
285 for_each_online_cpu(j
)
286 seq_printf(p
, "%10u ", nmi_count(j
));
288 #ifdef CONFIG_X86_LOCAL_APIC
289 seq_printf(p
, "LOC: ");
290 for_each_online_cpu(j
)
291 seq_printf(p
, "%10u ",
292 per_cpu(irq_stat
,j
).apic_timer_irqs
);
295 seq_printf(p
, "ERR: %10u\n", atomic_read(&irq_err_count
));
296 #if defined(CONFIG_X86_IO_APIC)
297 seq_printf(p
, "MIS: %10u\n", atomic_read(&irq_mis_count
));
303 #ifdef CONFIG_HOTPLUG_CPU
304 #include <mach_apic.h>
306 void fixup_irqs(cpumask_t map
)
311 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
316 cpus_and(mask
, irq_desc
[irq
].affinity
, map
);
317 if (any_online_cpu(mask
) == NR_CPUS
) {
318 printk("Breaking affinity for irq %i\n", irq
);
321 if (irq_desc
[irq
].chip
->set_affinity
)
322 irq_desc
[irq
].chip
->set_affinity(irq
, mask
);
323 else if (irq_desc
[irq
].action
&& !(warned
++))
324 printk("Cannot set affinity for irq %i\n", irq
);
329 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
330 [note the nop - the interrupt-enable boundary on x86 is two
331 instructions from sti] - to flush out pending hardirqs and
332 IPIs. After this point nothing is supposed to reach this CPU." */
333 __asm__
__volatile__("sti; nop; cli");
336 /* That doesn't seem sufficient. Give it 1ms. */
This page took 0.040026 seconds and 6 git commands to generate.