x86: unify native_init_IRQ() in irqinit_{32,64}.c
[deliverable/linux.git] / arch / x86 / kernel / irqinit_32.c
CommitLineData
1da177e4
LT
1#include <linux/errno.h>
2#include <linux/signal.h>
3#include <linux/sched.h>
4#include <linux/ioport.h>
5#include <linux/interrupt.h>
6#include <linux/slab.h>
7#include <linux/random.h>
1da177e4
LT
8#include <linux/init.h>
9#include <linux/kernel_stat.h>
10#include <linux/sysdev.h>
11#include <linux/bitops.h>
aa09e6cd
JSR
12#include <linux/io.h>
13#include <linux/delay.h>
1da177e4 14
1da177e4
LT
15#include <asm/atomic.h>
16#include <asm/system.h>
1da177e4
LT
17#include <asm/timer.h>
18#include <asm/pgtable.h>
1da177e4
LT
19#include <asm/desc.h>
20#include <asm/apic.h>
8e6dafd6 21#include <asm/setup.h>
1da177e4 22#include <asm/i8259.h>
aa09e6cd 23#include <asm/traps.h>
1da177e4 24
320fd996 25#ifdef CONFIG_X86_32
1da177e4
LT
26/*
27 * Note that on a 486, we don't want to do a SIGFPE on an irq13
28 * as the irq is unreliable, and exception 16 works correctly
29 * (ie as explained in the intel literature). On a 386, you
30 * can't use exception 16 due to bad IBM design, so we have to
31 * rely on the less exact irq13.
32 *
33 * Careful.. Not only is IRQ13 unreliable, but it is also
34 * leads to races. IBM designers who came up with it should
35 * be shot.
36 */
1da177e4 37
7d12e780 38static irqreturn_t math_error_irq(int cpl, void *dev_id)
1da177e4 39{
aa09e6cd 40 outb(0, 0xF0);
1da177e4
LT
41 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
42 return IRQ_NONE;
65ea5b03 43 math_error((void __user *)get_irq_regs()->ip);
1da177e4
LT
44 return IRQ_HANDLED;
45}
46
47/*
48 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
49 * so allow interrupt sharing.
50 */
6a61f6a5
TG
51static struct irqaction fpu_irq = {
52 .handler = math_error_irq,
6a61f6a5
TG
53 .name = "fpu",
54};
320fd996 55#endif
1da177e4 56
2ae111cd
CG
57/*
58 * IRQ2 is cascade interrupt to second interrupt controller
59 */
60static struct irqaction irq2 = {
61 .handler = no_action,
2ae111cd
CG
62 .name = "cascade",
63};
64
497c9a19
YL
65DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
66 [0 ... IRQ0_VECTOR - 1] = -1,
67 [IRQ0_VECTOR] = 0,
68 [IRQ1_VECTOR] = 1,
69 [IRQ2_VECTOR] = 2,
70 [IRQ3_VECTOR] = 3,
71 [IRQ4_VECTOR] = 4,
72 [IRQ5_VECTOR] = 5,
73 [IRQ6_VECTOR] = 6,
74 [IRQ7_VECTOR] = 7,
75 [IRQ8_VECTOR] = 8,
76 [IRQ9_VECTOR] = 9,
77 [IRQ10_VECTOR] = 10,
78 [IRQ11_VECTOR] = 11,
79 [IRQ12_VECTOR] = 12,
80 [IRQ13_VECTOR] = 13,
81 [IRQ14_VECTOR] = 14,
82 [IRQ15_VECTOR] = 15,
83 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
84};
85
b77b881f
YL
86int vector_used_by_percpu_irq(unsigned int vector)
87{
88 int cpu;
89
90 for_each_online_cpu(cpu) {
91 if (per_cpu(vector_irq, cpu)[vector] != -1)
92 return 1;
93 }
94
95 return 0;
96}
97
7371d9fc
PE
98static void __init init_ISA_irqs(void)
99{
100 int i;
101
598c73d2 102#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
7371d9fc
PE
103 init_bsp_APIC();
104#endif
105 init_8259A(0);
106
107 /*
108 * 16 old-style INTA-cycle interrupts:
109 */
110 for (i = 0; i < NR_IRQS_LEGACY; i++) {
111 struct irq_desc *desc = irq_to_desc(i);
112
113 desc->status = IRQ_DISABLED;
114 desc->action = NULL;
115 desc->depth = 1;
116
117 set_irq_chip_and_handler_name(i, &i8259A_chip,
118 handle_level_irq, "XT");
119 }
120}
121
d3561b7f
RR
122/* Overridden in paravirt.c */
123void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
124
36290d87
PE
125static void __init smp_intr_init(void)
126{
b0096bb0
PE
127#ifdef CONFIG_SMP
128#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
36290d87
PE
129 /*
130 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
131 * IPI, driven by wakeup.
132 */
133 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
134
135 /* IPIs for invalidation */
136 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
137 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
138 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
139 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
140 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
141 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
142 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
143 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
144
145 /* IPI for generic function call */
146 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
147
b0096bb0 148 /* IPI for generic single function call */
36290d87 149 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
b0096bb0 150 call_function_single_interrupt);
36290d87
PE
151
152 /* Low priority IPI to cleanup after moving an irq */
153 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
154 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
155#endif
b0096bb0 156#endif /* CONFIG_SMP */
36290d87
PE
157}
158
22813c45 159static void __init apic_intr_init(void)
1da177e4 160{
36290d87 161 smp_intr_init();
2ae111cd
CG
162
163#ifdef CONFIG_X86_LOCAL_APIC
164 /* self generated IPI for local APIC timer */
165 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
166
acaabe79
DS
167 /* generic IPI for platform specific use */
168 alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
169
2ae111cd
CG
170 /* IPI vectors for APIC spurious and error interrupts */
171 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
172 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
173#endif
174
175#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
176 /* thermal monitor LVT interrupt */
177 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
178#endif
22813c45
PE
179}
180
320fd996
PE
181#ifdef CONFIG_X86_32
182/**
183 * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
184 *
185 * Description:
186 * Perform any necessary interrupt initialisation prior to setting up
187 * the "ordinary" interrupt call gates. For legacy reasons, the ISA
188 * interrupts should be initialised here if the machine emulates a PC
189 * in any way.
190 **/
191static void __init x86_quirk_pre_intr_init(void)
192{
193 if (x86_quirks->arch_pre_intr_init) {
194 if (x86_quirks->arch_pre_intr_init())
195 return;
196 }
197 init_ISA_irqs();
198}
199#endif
200
22813c45
PE
201void __init native_init_IRQ(void)
202{
203 int i;
204
320fd996 205#ifdef CONFIG_X86_32
22813c45
PE
206 /* Execute any quirks before the call gates are initialised: */
207 x86_quirk_pre_intr_init();
320fd996
PE
208#else
209 init_ISA_irqs();
210#endif
22813c45
PE
211
212 /*
213 * Cover the whole vector space, no vector can escape
214 * us. (some of these will be overridden and become
215 * 'special' SMP interrupts)
216 */
d3496c85 217 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
320fd996 218#ifdef CONFIG_X86_32
22813c45
PE
219 /* SYSCALL_VECTOR was reserved in trap_init. */
220 if (i != SYSCALL_VECTOR)
221 set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
320fd996
PE
222#else
223 /* IA32_SYSCALL_VECTOR was reserved in trap_init. */
224 if (i != IA32_SYSCALL_VECTOR)
225 set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
226#endif
22813c45
PE
227 }
228
229 apic_intr_init();
2ae111cd
CG
230
231 if (!acpi_ioapic)
232 setup_irq(2, &irq2);
233
320fd996 234#ifdef CONFIG_X86_32
8e6dafd6
IM
235 /*
236 * Call quirks after call gates are initialised (usually add in
237 * the architecture specific gates):
1da177e4 238 */
8e6dafd6 239 x86_quirk_intr_init();
1da177e4 240
1da177e4
LT
241 /*
242 * External FPU? Set up irq13 if so, for
243 * original braindamaged IBM FERR coupling.
244 */
245 if (boot_cpu_data.hard_math && !cpu_has_fpu)
246 setup_irq(FPU_IRQ, &fpu_irq);
247
248 irq_ctx_init(smp_processor_id());
320fd996 249#endif
1da177e4 250}
This page took 0.549437 seconds and 5 git commands to generate.