[PATCH] genirq: ia64 irq: Dynamic irq support
[deliverable/linux.git] / arch / ia64 / kernel / irq_ia64.c
1 /*
2 * linux/arch/ia64/kernel/irq_ia64.c
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
16
17 #include <linux/module.h>
18
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/slab.h>
26 #include <linux/ptrace.h>
27 #include <linux/random.h> /* for rand_initialize_irq() */
28 #include <linux/signal.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/threads.h>
32 #include <linux/bitops.h>
33 #include <linux/irq.h>
34
35 #include <asm/delay.h>
36 #include <asm/intrinsics.h>
37 #include <asm/io.h>
38 #include <asm/hw_irq.h>
39 #include <asm/machvec.h>
40 #include <asm/pgtable.h>
41 #include <asm/system.h>
42
43 #ifdef CONFIG_PERFMON
44 # include <asm/perfmon.h>
45 #endif
46
47 #define IRQ_DEBUG 0
48
49 /* These can be overridden in platform_irq_init */
50 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
51 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
52
53 /* default base addr of IPI table */
54 void __iomem *ipi_base_addr = ((void __iomem *)
55 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
56
57 /*
58 * Legacy IRQ to IA-64 vector translation table.
59 */
60 __u8 isa_irq_to_vector_map[16] = {
61 /* 8259 IRQ translation, first 16 entries */
62 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
63 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
64 };
65 EXPORT_SYMBOL(isa_irq_to_vector_map);
66
67 static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
68
69 int
70 assign_irq_vector (int irq)
71 {
72 int pos, vector;
73 again:
74 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
75 vector = IA64_FIRST_DEVICE_VECTOR + pos;
76 if (vector > IA64_LAST_DEVICE_VECTOR)
77 return -ENOSPC;
78 if (test_and_set_bit(pos, ia64_vector_mask))
79 goto again;
80 return vector;
81 }
82
83 void
84 free_irq_vector (int vector)
85 {
86 int pos;
87
88 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
89 return;
90
91 pos = vector - IA64_FIRST_DEVICE_VECTOR;
92 if (!test_and_clear_bit(pos, ia64_vector_mask))
93 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
94 }
95
96 int
97 reserve_irq_vector (int vector)
98 {
99 int pos;
100
101 if (vector < IA64_FIRST_DEVICE_VECTOR ||
102 vector > IA64_LAST_DEVICE_VECTOR)
103 return -EINVAL;
104
105 pos = vector - IA64_FIRST_DEVICE_VECTOR;
106 return test_and_set_bit(pos, ia64_vector_mask);
107 }
108
109 /*
110 * Dynamic irq allocate and deallocation for MSI
111 */
112 int create_irq(void)
113 {
114 int vector = assign_irq_vector(AUTO_ASSIGN);
115
116 if (vector >= 0)
117 dynamic_irq_init(vector);
118
119 return vector;
120 }
121
122 void destroy_irq(unsigned int irq)
123 {
124 dynamic_irq_cleanup(irq);
125 free_irq_vector(irq);
126 }
127
128 #ifdef CONFIG_SMP
129 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
130 #else
131 # define IS_RESCHEDULE(vec) (0)
132 #endif
133 /*
134 * That's where the IVT branches when we get an external
135 * interrupt. This branches to the correct hardware IRQ handler via
136 * function ptr.
137 */
138 void
139 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
140 {
141 unsigned long saved_tpr;
142
143 #if IRQ_DEBUG
144 {
145 unsigned long bsp, sp;
146
147 /*
148 * Note: if the interrupt happened while executing in
149 * the context switch routine (ia64_switch_to), we may
150 * get a spurious stack overflow here. This is
151 * because the register and the memory stack are not
152 * switched atomically.
153 */
154 bsp = ia64_getreg(_IA64_REG_AR_BSP);
155 sp = ia64_getreg(_IA64_REG_SP);
156
157 if ((sp - bsp) < 1024) {
158 static unsigned char count;
159 static long last_time;
160
161 if (jiffies - last_time > 5*HZ)
162 count = 0;
163 if (++count < 5) {
164 last_time = jiffies;
165 printk("ia64_handle_irq: DANGER: less than "
166 "1KB of free stack space!!\n"
167 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
168 }
169 }
170 }
171 #endif /* IRQ_DEBUG */
172
173 /*
174 * Always set TPR to limit maximum interrupt nesting depth to
175 * 16 (without this, it would be ~240, which could easily lead
176 * to kernel stack overflows).
177 */
178 irq_enter();
179 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
180 ia64_srlz_d();
181 while (vector != IA64_SPURIOUS_INT_VECTOR) {
182 if (!IS_RESCHEDULE(vector)) {
183 ia64_setreg(_IA64_REG_CR_TPR, vector);
184 ia64_srlz_d();
185
186 __do_IRQ(local_vector_to_irq(vector), regs);
187
188 /*
189 * Disable interrupts and send EOI:
190 */
191 local_irq_disable();
192 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
193 }
194 ia64_eoi();
195 vector = ia64_get_ivr();
196 }
197 /*
198 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
199 * handler needs to be able to wait for further keyboard interrupts, which can't
200 * come through until ia64_eoi() has been done.
201 */
202 irq_exit();
203 }
204
205 #ifdef CONFIG_HOTPLUG_CPU
206 /*
207 * This function emulates a interrupt processing when a cpu is about to be
208 * brought down.
209 */
210 void ia64_process_pending_intr(void)
211 {
212 ia64_vector vector;
213 unsigned long saved_tpr;
214 extern unsigned int vectors_in_migration[NR_IRQS];
215
216 vector = ia64_get_ivr();
217
218 irq_enter();
219 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
220 ia64_srlz_d();
221
222 /*
223 * Perform normal interrupt style processing
224 */
225 while (vector != IA64_SPURIOUS_INT_VECTOR) {
226 if (!IS_RESCHEDULE(vector)) {
227 ia64_setreg(_IA64_REG_CR_TPR, vector);
228 ia64_srlz_d();
229
230 /*
231 * Now try calling normal ia64_handle_irq as it would have got called
232 * from a real intr handler. Try passing null for pt_regs, hopefully
233 * it will work. I hope it works!.
234 * Probably could shared code.
235 */
236 vectors_in_migration[local_vector_to_irq(vector)]=0;
237 __do_IRQ(local_vector_to_irq(vector), NULL);
238
239 /*
240 * Disable interrupts and send EOI
241 */
242 local_irq_disable();
243 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
244 }
245 ia64_eoi();
246 vector = ia64_get_ivr();
247 }
248 irq_exit();
249 }
250 #endif
251
252
253 #ifdef CONFIG_SMP
254 extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
255
256 static struct irqaction ipi_irqaction = {
257 .handler = handle_IPI,
258 .flags = IRQF_DISABLED,
259 .name = "IPI"
260 };
261 #endif
262
263 void
264 register_percpu_irq (ia64_vector vec, struct irqaction *action)
265 {
266 irq_desc_t *desc;
267 unsigned int irq;
268
269 for (irq = 0; irq < NR_IRQS; ++irq)
270 if (irq_to_vector(irq) == vec) {
271 desc = irq_desc + irq;
272 desc->status |= IRQ_PER_CPU;
273 desc->chip = &irq_type_ia64_lsapic;
274 if (action)
275 setup_irq(irq, action);
276 }
277 }
278
279 void __init
280 init_IRQ (void)
281 {
282 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
283 #ifdef CONFIG_SMP
284 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
285 #endif
286 #ifdef CONFIG_PERFMON
287 pfm_init_percpu();
288 #endif
289 platform_irq_init();
290 }
291
292 void
293 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
294 {
295 void __iomem *ipi_addr;
296 unsigned long ipi_data;
297 unsigned long phys_cpu_id;
298
299 #ifdef CONFIG_SMP
300 phys_cpu_id = cpu_physical_id(cpu);
301 #else
302 phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
303 #endif
304
305 /*
306 * cpu number is in 8bit ID and 8bit EID
307 */
308
309 ipi_data = (delivery_mode << 8) | (vector & 0xff);
310 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
311
312 writeq(ipi_data, ipi_addr);
313 }
This page took 0.043344 seconds and 5 git commands to generate.