Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
1da177e4 LT |
33 | #include <linux/module.h> |
34 | #include <linux/threads.h> | |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
53 | #include <linux/bootmem.h> | |
45934c47 | 54 | #include <linux/pci.h> |
60b332e7 | 55 | #include <linux/debugfs.h> |
1da177e4 LT |
56 | |
57 | #include <asm/uaccess.h> | |
58 | #include <asm/system.h> | |
59 | #include <asm/io.h> | |
60 | #include <asm/pgtable.h> | |
61 | #include <asm/irq.h> | |
62 | #include <asm/cache.h> | |
63 | #include <asm/prom.h> | |
64 | #include <asm/ptrace.h> | |
1da177e4 | 65 | #include <asm/machdep.h> |
0ebfff14 | 66 | #include <asm/udbg.h> |
d04c56f7 | 67 | #ifdef CONFIG_PPC64 |
1da177e4 | 68 | #include <asm/paca.h> |
d04c56f7 | 69 | #include <asm/firmware.h> |
0874dd40 | 70 | #include <asm/lv1call.h> |
756e7104 | 71 | #endif |
1da177e4 | 72 | |
868accb7 | 73 | int __irq_offset_value; |
756e7104 SR |
74 | static int ppc_spurious_interrupts; |
75 | ||
756e7104 | 76 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
77 | EXPORT_SYMBOL(__irq_offset_value); |
78 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 79 | |
756e7104 SR |
80 | #ifdef CONFIG_TAU_INT |
81 | extern int tau_initialized; | |
82 | extern int tau_interrupts(int); | |
83 | #endif | |
b9e5b4e6 | 84 | #endif /* CONFIG_PPC32 */ |
756e7104 | 85 | |
756e7104 | 86 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
87 | EXPORT_SYMBOL(irq_desc); |
88 | ||
89 | int distribute_irqs = 1; | |
d04c56f7 | 90 | |
4e491d14 | 91 | static inline notrace unsigned long get_hard_enabled(void) |
ef2b343e HD |
92 | { |
93 | unsigned long enabled; | |
94 | ||
95 | __asm__ __volatile__("lbz %0,%1(13)" | |
96 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | |
97 | ||
98 | return enabled; | |
99 | } | |
100 | ||
4e491d14 | 101 | static inline notrace void set_soft_enabled(unsigned long enable) |
ef2b343e HD |
102 | { |
103 | __asm__ __volatile__("stb %0,%1(13)" | |
104 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
105 | } | |
106 | ||
4e491d14 | 107 | notrace void raw_local_irq_restore(unsigned long en) |
d04c56f7 | 108 | { |
ef2b343e HD |
109 | /* |
110 | * get_paca()->soft_enabled = en; | |
111 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | |
112 | * That was allowed before, and in such a case we do need to take care | |
113 | * that gcc will set soft_enabled directly via r13, not choose to use | |
114 | * an intermediate register, lest we're preempted to a different cpu. | |
115 | */ | |
116 | set_soft_enabled(en); | |
d04c56f7 PM |
117 | if (!en) |
118 | return; | |
119 | ||
120 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
ef2b343e HD |
121 | /* |
122 | * Do we need to disable preemption here? Not really: in the | |
123 | * unlikely event that we're preempted to a different cpu in | |
124 | * between getting r13, loading its lppaca_ptr, and loading | |
125 | * its any_int, we might call iseries_handle_interrupts without | |
126 | * an interrupt pending on the new cpu, but that's no disaster, | |
127 | * is it? And the business of preempting us off the old cpu | |
128 | * would itself involve a local_irq_restore which handles the | |
129 | * interrupt to that cpu. | |
130 | * | |
131 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | |
132 | * to avoid any preemption checking added into get_paca(). | |
133 | */ | |
134 | if (local_paca->lppaca_ptr->int_dword.any_int) | |
d04c56f7 | 135 | iseries_handle_interrupts(); |
d04c56f7 PM |
136 | } |
137 | ||
ef2b343e HD |
138 | /* |
139 | * if (get_paca()->hard_enabled) return; | |
140 | * But again we need to take care that gcc gets hard_enabled directly | |
141 | * via r13, not choose to use an intermediate register, lest we're | |
142 | * preempted to a different cpu in between the two instructions. | |
143 | */ | |
144 | if (get_hard_enabled()) | |
d04c56f7 | 145 | return; |
ef2b343e HD |
146 | |
147 | /* | |
148 | * Need to hard-enable interrupts here. Since currently disabled, | |
149 | * no need to take further asm precautions against preemption; but | |
150 | * use local_paca instead of get_paca() to avoid preemption checking. | |
151 | */ | |
152 | local_paca->hard_enabled = en; | |
d04c56f7 PM |
153 | if ((int)mfspr(SPRN_DEC) < 0) |
154 | mtspr(SPRN_DEC, 1); | |
0874dd40 TS |
155 | |
156 | /* | |
157 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
158 | * Any HV call will have this side effect. | |
159 | */ | |
160 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
161 | u64 tmp; | |
162 | lv1_get_version_info(&tmp); | |
163 | } | |
164 | ||
e1fa2e13 | 165 | __hard_irq_enable(); |
d04c56f7 | 166 | } |
945feb17 | 167 | EXPORT_SYMBOL(raw_local_irq_restore); |
756e7104 | 168 | #endif /* CONFIG_PPC64 */ |
1da177e4 LT |
169 | |
170 | int show_interrupts(struct seq_file *p, void *v) | |
171 | { | |
756e7104 SR |
172 | int i = *(loff_t *)v, j; |
173 | struct irqaction *action; | |
97f7d6bc | 174 | struct irq_desc *desc; |
1da177e4 LT |
175 | unsigned long flags; |
176 | ||
177 | if (i == 0) { | |
756e7104 SR |
178 | seq_puts(p, " "); |
179 | for_each_online_cpu(j) | |
180 | seq_printf(p, "CPU%d ", j); | |
1da177e4 LT |
181 | seq_putc(p, '\n'); |
182 | } | |
183 | ||
184 | if (i < NR_IRQS) { | |
185 | desc = get_irq_desc(i); | |
186 | spin_lock_irqsave(&desc->lock, flags); | |
187 | action = desc->action; | |
188 | if (!action || !action->handler) | |
189 | goto skip; | |
190 | seq_printf(p, "%3d: ", i); | |
191 | #ifdef CONFIG_SMP | |
756e7104 | 192 | for_each_online_cpu(j) |
dee4102a | 193 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
1da177e4 LT |
194 | #else |
195 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
196 | #endif /* CONFIG_SMP */ | |
d1bef4ed IM |
197 | if (desc->chip) |
198 | seq_printf(p, " %s ", desc->chip->typename); | |
1da177e4 | 199 | else |
756e7104 | 200 | seq_puts(p, " None "); |
1da177e4 | 201 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); |
756e7104 SR |
202 | seq_printf(p, " %s", action->name); |
203 | for (action = action->next; action; action = action->next) | |
1da177e4 LT |
204 | seq_printf(p, ", %s", action->name); |
205 | seq_putc(p, '\n'); | |
206 | skip: | |
207 | spin_unlock_irqrestore(&desc->lock, flags); | |
756e7104 | 208 | } else if (i == NR_IRQS) { |
9c4cb825 | 209 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) |
756e7104 SR |
210 | if (tau_initialized){ |
211 | seq_puts(p, "TAU: "); | |
394e3902 AM |
212 | for_each_online_cpu(j) |
213 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
756e7104 SR |
214 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
215 | } | |
9c4cb825 | 216 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/ |
1da177e4 | 217 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
756e7104 | 218 | } |
1da177e4 LT |
219 | return 0; |
220 | } | |
221 | ||
222 | #ifdef CONFIG_HOTPLUG_CPU | |
223 | void fixup_irqs(cpumask_t map) | |
224 | { | |
225 | unsigned int irq; | |
226 | static int warned; | |
227 | ||
228 | for_each_irq(irq) { | |
229 | cpumask_t mask; | |
230 | ||
231 | if (irq_desc[irq].status & IRQ_PER_CPU) | |
232 | continue; | |
233 | ||
e65e49d0 | 234 | cpumask_and(&mask, irq_desc[irq].affinity, &map); |
1da177e4 LT |
235 | if (any_online_cpu(mask) == NR_CPUS) { |
236 | printk("Breaking affinity for irq %i\n", irq); | |
237 | mask = map; | |
238 | } | |
d1bef4ed | 239 | if (irq_desc[irq].chip->set_affinity) |
0de26520 | 240 | irq_desc[irq].chip->set_affinity(irq, &mask); |
1da177e4 LT |
241 | else if (irq_desc[irq].action && !(warned++)) |
242 | printk("Cannot set affinity for irq %i\n", irq); | |
243 | } | |
244 | ||
245 | local_irq_enable(); | |
246 | mdelay(1); | |
247 | local_irq_disable(); | |
248 | } | |
249 | #endif | |
250 | ||
1da177e4 LT |
251 | void do_IRQ(struct pt_regs *regs) |
252 | { | |
7d12e780 | 253 | struct pt_regs *old_regs = set_irq_regs(regs); |
0ebfff14 | 254 | unsigned int irq; |
b709c083 SR |
255 | #ifdef CONFIG_IRQSTACKS |
256 | struct thread_info *curtp, *irqtp; | |
257 | #endif | |
1da177e4 | 258 | |
4b218e9b | 259 | irq_enter(); |
1da177e4 LT |
260 | |
261 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
262 | /* Debugging check for stack overflow: is there less than 2KB free? */ | |
263 | { | |
264 | long sp; | |
265 | ||
266 | sp = __get_SP() & (THREAD_SIZE-1); | |
267 | ||
268 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
269 | printk("do_IRQ: stack overflow: %ld\n", | |
270 | sp - sizeof(struct thread_info)); | |
271 | dump_stack(); | |
272 | } | |
273 | } | |
274 | #endif | |
275 | ||
756e7104 SR |
276 | /* |
277 | * Every platform is required to implement ppc_md.get_irq. | |
92d4dda3 | 278 | * This function will either return an irq number or NO_IRQ to |
756e7104 | 279 | * indicate there are no more pending. |
92d4dda3 JB |
280 | * The value NO_IRQ_IGNORE is for buggy hardware and means that this |
281 | * IRQ has already been handled. -- Tom | |
756e7104 | 282 | */ |
35a84c2f | 283 | irq = ppc_md.get_irq(); |
1da177e4 | 284 | |
0ebfff14 | 285 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { |
b709c083 SR |
286 | #ifdef CONFIG_IRQSTACKS |
287 | /* Switch to the irq stack to handle this */ | |
288 | curtp = current_thread_info(); | |
289 | irqtp = hardirq_ctx[smp_processor_id()]; | |
290 | if (curtp != irqtp) { | |
b9e5b4e6 BH |
291 | struct irq_desc *desc = irq_desc + irq; |
292 | void *handler = desc->handle_irq; | |
85218827 | 293 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
b9e5b4e6 BH |
294 | if (handler == NULL) |
295 | handler = &__do_IRQ; | |
b709c083 SR |
296 | irqtp->task = curtp->task; |
297 | irqtp->flags = 0; | |
e6768a4f BH |
298 | |
299 | /* Copy the softirq bits in preempt_count so that the | |
300 | * softirq checks work in the hardirq context. | |
301 | */ | |
302 | irqtp->preempt_count = | |
303 | (irqtp->preempt_count & ~SOFTIRQ_MASK) | | |
304 | (curtp->preempt_count & SOFTIRQ_MASK); | |
305 | ||
85218827 KG |
306 | current->thread.ksp_limit = (unsigned long)irqtp + |
307 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
7d12e780 | 308 | call_handle_irq(irq, desc, irqtp, handler); |
85218827 | 309 | current->thread.ksp_limit = saved_sp_limit; |
b709c083 | 310 | irqtp->task = NULL; |
e6768a4f BH |
311 | |
312 | ||
313 | /* Set any flag that may have been set on the | |
314 | * alternate stack | |
315 | */ | |
b709c083 SR |
316 | if (irqtp->flags) |
317 | set_bits(irqtp->flags, &curtp->flags); | |
318 | } else | |
319 | #endif | |
7d12e780 | 320 | generic_handle_irq(irq); |
0ebfff14 | 321 | } else if (irq != NO_IRQ_IGNORE) |
e199500c SR |
322 | /* That's not SMP safe ... but who cares ? */ |
323 | ppc_spurious_interrupts++; | |
324 | ||
4b218e9b | 325 | irq_exit(); |
7d12e780 | 326 | set_irq_regs(old_regs); |
756e7104 | 327 | |
e199500c | 328 | #ifdef CONFIG_PPC_ISERIES |
b06a3183 SR |
329 | if (firmware_has_feature(FW_FEATURE_ISERIES) && |
330 | get_lppaca()->int_dword.fields.decr_int) { | |
3356bb9f DG |
331 | get_lppaca()->int_dword.fields.decr_int = 0; |
332 | /* Signal a fake decrementer interrupt */ | |
333 | timer_interrupt(regs); | |
e199500c SR |
334 | } |
335 | #endif | |
336 | } | |
1da177e4 LT |
337 | |
338 | void __init init_IRQ(void) | |
339 | { | |
70584578 SR |
340 | if (ppc_md.init_IRQ) |
341 | ppc_md.init_IRQ(); | |
bcf0b088 KG |
342 | |
343 | exc_lvl_ctx_init(); | |
344 | ||
1da177e4 LT |
345 | irq_ctx_init(); |
346 | } | |
347 | ||
bcf0b088 KG |
348 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
349 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | |
350 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | |
351 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
352 | ||
353 | void exc_lvl_ctx_init(void) | |
354 | { | |
355 | struct thread_info *tp; | |
356 | int i; | |
357 | ||
358 | for_each_possible_cpu(i) { | |
359 | memset((void *)critirq_ctx[i], 0, THREAD_SIZE); | |
360 | tp = critirq_ctx[i]; | |
361 | tp->cpu = i; | |
362 | tp->preempt_count = 0; | |
363 | ||
364 | #ifdef CONFIG_BOOKE | |
365 | memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); | |
366 | tp = dbgirq_ctx[i]; | |
367 | tp->cpu = i; | |
368 | tp->preempt_count = 0; | |
369 | ||
370 | memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); | |
371 | tp = mcheckirq_ctx[i]; | |
372 | tp->cpu = i; | |
373 | tp->preempt_count = HARDIRQ_OFFSET; | |
374 | #endif | |
375 | } | |
376 | } | |
377 | #endif | |
1da177e4 | 378 | |
1da177e4 | 379 | #ifdef CONFIG_IRQSTACKS |
22722051 AM |
380 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
381 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
382 | |
383 | void irq_ctx_init(void) | |
384 | { | |
385 | struct thread_info *tp; | |
386 | int i; | |
387 | ||
0e551954 | 388 | for_each_possible_cpu(i) { |
1da177e4 LT |
389 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
390 | tp = softirq_ctx[i]; | |
391 | tp->cpu = i; | |
e6768a4f | 392 | tp->preempt_count = 0; |
1da177e4 LT |
393 | |
394 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
395 | tp = hardirq_ctx[i]; | |
396 | tp->cpu = i; | |
397 | tp->preempt_count = HARDIRQ_OFFSET; | |
398 | } | |
399 | } | |
400 | ||
c6622f63 PM |
401 | static inline void do_softirq_onstack(void) |
402 | { | |
403 | struct thread_info *curtp, *irqtp; | |
85218827 | 404 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
c6622f63 PM |
405 | |
406 | curtp = current_thread_info(); | |
407 | irqtp = softirq_ctx[smp_processor_id()]; | |
408 | irqtp->task = curtp->task; | |
85218827 KG |
409 | current->thread.ksp_limit = (unsigned long)irqtp + |
410 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
c6622f63 | 411 | call_do_softirq(irqtp); |
85218827 | 412 | current->thread.ksp_limit = saved_sp_limit; |
c6622f63 PM |
413 | irqtp->task = NULL; |
414 | } | |
1da177e4 | 415 | |
c6622f63 PM |
416 | #else |
417 | #define do_softirq_onstack() __do_softirq() | |
418 | #endif /* CONFIG_IRQSTACKS */ | |
419 | ||
1da177e4 LT |
420 | void do_softirq(void) |
421 | { | |
422 | unsigned long flags; | |
1da177e4 LT |
423 | |
424 | if (in_interrupt()) | |
1da177e4 LT |
425 | return; |
426 | ||
1da177e4 | 427 | local_irq_save(flags); |
1da177e4 | 428 | |
912b2539 | 429 | if (local_softirq_pending()) |
c6622f63 | 430 | do_softirq_onstack(); |
1da177e4 LT |
431 | |
432 | local_irq_restore(flags); | |
1da177e4 | 433 | } |
1da177e4 | 434 | |
1da177e4 | 435 | |
1da177e4 | 436 | /* |
0ebfff14 | 437 | * IRQ controller and virtual interrupts |
1da177e4 LT |
438 | */ |
439 | ||
0ebfff14 | 440 | static LIST_HEAD(irq_hosts); |
057b184a | 441 | static DEFINE_SPINLOCK(irq_big_lock); |
967e012e | 442 | static unsigned int revmap_trees_allocated; |
150c6c8f | 443 | static DEFINE_MUTEX(revmap_trees_mutex); |
0ebfff14 BH |
444 | struct irq_map_entry irq_map[NR_IRQS]; |
445 | static unsigned int irq_virq_count = NR_IRQS; | |
446 | static struct irq_host *irq_default_host; | |
1da177e4 | 447 | |
35923f12 OJ |
448 | irq_hw_number_t virq_to_hw(unsigned int virq) |
449 | { | |
450 | return irq_map[virq].hwirq; | |
451 | } | |
452 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
453 | ||
68158006 ME |
454 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) |
455 | { | |
456 | return h->of_node != NULL && h->of_node == np; | |
457 | } | |
458 | ||
5669c3cf | 459 | struct irq_host *irq_alloc_host(struct device_node *of_node, |
52964f87 ME |
460 | unsigned int revmap_type, |
461 | unsigned int revmap_arg, | |
462 | struct irq_host_ops *ops, | |
463 | irq_hw_number_t inval_irq) | |
1da177e4 | 464 | { |
0ebfff14 BH |
465 | struct irq_host *host; |
466 | unsigned int size = sizeof(struct irq_host); | |
467 | unsigned int i; | |
468 | unsigned int *rmap; | |
469 | unsigned long flags; | |
470 | ||
471 | /* Allocate structure and revmap table if using linear mapping */ | |
472 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | |
473 | size += revmap_arg * sizeof(unsigned int); | |
5669c3cf | 474 | host = zalloc_maybe_bootmem(size, GFP_KERNEL); |
0ebfff14 BH |
475 | if (host == NULL) |
476 | return NULL; | |
7d01c880 | 477 | |
0ebfff14 BH |
478 | /* Fill structure */ |
479 | host->revmap_type = revmap_type; | |
480 | host->inval_irq = inval_irq; | |
481 | host->ops = ops; | |
19fc65b5 | 482 | host->of_node = of_node_get(of_node); |
7d01c880 | 483 | |
68158006 ME |
484 | if (host->ops->match == NULL) |
485 | host->ops->match = default_irq_host_match; | |
7d01c880 | 486 | |
0ebfff14 BH |
487 | spin_lock_irqsave(&irq_big_lock, flags); |
488 | ||
489 | /* If it's a legacy controller, check for duplicates and | |
490 | * mark it as allocated (we use irq 0 host pointer for that | |
491 | */ | |
492 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | |
493 | if (irq_map[0].host != NULL) { | |
494 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
495 | /* If we are early boot, we can't free the structure, | |
496 | * too bad... | |
497 | * this will be fixed once slab is made available early | |
498 | * instead of the current cruft | |
499 | */ | |
500 | if (mem_init_done) | |
501 | kfree(host); | |
502 | return NULL; | |
503 | } | |
504 | irq_map[0].host = host; | |
505 | } | |
506 | ||
507 | list_add(&host->link, &irq_hosts); | |
508 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
509 | ||
510 | /* Additional setups per revmap type */ | |
511 | switch(revmap_type) { | |
512 | case IRQ_HOST_MAP_LEGACY: | |
513 | /* 0 is always the invalid number for legacy */ | |
514 | host->inval_irq = 0; | |
515 | /* setup us as the host for all legacy interrupts */ | |
516 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | |
7866291d | 517 | irq_map[i].hwirq = i; |
0ebfff14 BH |
518 | smp_wmb(); |
519 | irq_map[i].host = host; | |
520 | smp_wmb(); | |
521 | ||
6e99e458 BH |
522 | /* Clear norequest flags */ |
523 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; | |
0ebfff14 BH |
524 | |
525 | /* Legacy flags are left to default at this point, | |
526 | * one can then use irq_create_mapping() to | |
c03983ac | 527 | * explicitly change them |
0ebfff14 | 528 | */ |
6e99e458 | 529 | ops->map(host, i, i); |
0ebfff14 BH |
530 | } |
531 | break; | |
532 | case IRQ_HOST_MAP_LINEAR: | |
533 | rmap = (unsigned int *)(host + 1); | |
534 | for (i = 0; i < revmap_arg; i++) | |
f5921697 | 535 | rmap[i] = NO_IRQ; |
0ebfff14 BH |
536 | host->revmap_data.linear.size = revmap_arg; |
537 | smp_wmb(); | |
538 | host->revmap_data.linear.revmap = rmap; | |
539 | break; | |
540 | default: | |
541 | break; | |
542 | } | |
543 | ||
544 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | |
545 | ||
546 | return host; | |
1da177e4 LT |
547 | } |
548 | ||
0ebfff14 | 549 | struct irq_host *irq_find_host(struct device_node *node) |
1da177e4 | 550 | { |
0ebfff14 BH |
551 | struct irq_host *h, *found = NULL; |
552 | unsigned long flags; | |
553 | ||
554 | /* We might want to match the legacy controller last since | |
555 | * it might potentially be set to match all interrupts in | |
556 | * the absence of a device node. This isn't a problem so far | |
557 | * yet though... | |
558 | */ | |
559 | spin_lock_irqsave(&irq_big_lock, flags); | |
560 | list_for_each_entry(h, &irq_hosts, link) | |
68158006 | 561 | if (h->ops->match(h, node)) { |
0ebfff14 BH |
562 | found = h; |
563 | break; | |
564 | } | |
565 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
566 | return found; | |
567 | } | |
568 | EXPORT_SYMBOL_GPL(irq_find_host); | |
569 | ||
570 | void irq_set_default_host(struct irq_host *host) | |
571 | { | |
572 | pr_debug("irq: Default host set to @0x%p\n", host); | |
1da177e4 | 573 | |
0ebfff14 BH |
574 | irq_default_host = host; |
575 | } | |
1da177e4 | 576 | |
0ebfff14 BH |
577 | void irq_set_virq_count(unsigned int count) |
578 | { | |
579 | pr_debug("irq: Trying to set virq count to %d\n", count); | |
fef1c772 | 580 | |
0ebfff14 BH |
581 | BUG_ON(count < NUM_ISA_INTERRUPTS); |
582 | if (count < NR_IRQS) | |
583 | irq_virq_count = count; | |
584 | } | |
585 | ||
6fde40f3 ME |
586 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
587 | irq_hw_number_t hwirq) | |
588 | { | |
589 | /* Clear IRQ_NOREQUEST flag */ | |
590 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; | |
591 | ||
592 | /* map it */ | |
593 | smp_wmb(); | |
594 | irq_map[virq].hwirq = hwirq; | |
595 | smp_mb(); | |
596 | ||
597 | if (host->ops->map(host, virq, hwirq)) { | |
598 | pr_debug("irq: -> mapping failed, freeing\n"); | |
599 | irq_free_virt(virq, 1); | |
600 | return -1; | |
601 | } | |
602 | ||
603 | return 0; | |
604 | } | |
8ec8f2e8 | 605 | |
ee51de56 ME |
606 | unsigned int irq_create_direct_mapping(struct irq_host *host) |
607 | { | |
608 | unsigned int virq; | |
609 | ||
610 | if (host == NULL) | |
611 | host = irq_default_host; | |
612 | ||
613 | BUG_ON(host == NULL); | |
614 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | |
615 | ||
616 | virq = irq_alloc_virt(host, 1, 0); | |
617 | if (virq == NO_IRQ) { | |
618 | pr_debug("irq: create_direct virq allocation failed\n"); | |
619 | return NO_IRQ; | |
620 | } | |
621 | ||
622 | pr_debug("irq: create_direct obtained virq %d\n", virq); | |
623 | ||
624 | if (irq_setup_virq(host, virq, virq)) | |
625 | return NO_IRQ; | |
626 | ||
627 | return virq; | |
628 | } | |
629 | ||
0ebfff14 | 630 | unsigned int irq_create_mapping(struct irq_host *host, |
6e99e458 | 631 | irq_hw_number_t hwirq) |
0ebfff14 BH |
632 | { |
633 | unsigned int virq, hint; | |
634 | ||
6e99e458 | 635 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); |
0ebfff14 BH |
636 | |
637 | /* Look for default host if nececssary */ | |
638 | if (host == NULL) | |
639 | host = irq_default_host; | |
640 | if (host == NULL) { | |
641 | printk(KERN_WARNING "irq_create_mapping called for" | |
642 | " NULL host, hwirq=%lx\n", hwirq); | |
643 | WARN_ON(1); | |
644 | return NO_IRQ; | |
1da177e4 | 645 | } |
0ebfff14 | 646 | pr_debug("irq: -> using host @%p\n", host); |
1da177e4 | 647 | |
0ebfff14 BH |
648 | /* Check if mapping already exist, if it does, call |
649 | * host->ops->map() to update the flags | |
650 | */ | |
651 | virq = irq_find_mapping(host, hwirq); | |
f5921697 | 652 | if (virq != NO_IRQ) { |
acc900ef IK |
653 | if (host->ops->remap) |
654 | host->ops->remap(host, virq, hwirq); | |
0ebfff14 | 655 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
0ebfff14 | 656 | return virq; |
1da177e4 LT |
657 | } |
658 | ||
0ebfff14 BH |
659 | /* Get a virtual interrupt number */ |
660 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | |
661 | /* Handle legacy */ | |
662 | virq = (unsigned int)hwirq; | |
663 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | |
664 | return NO_IRQ; | |
665 | return virq; | |
666 | } else { | |
667 | /* Allocate a virtual interrupt number */ | |
668 | hint = hwirq % irq_virq_count; | |
669 | virq = irq_alloc_virt(host, 1, hint); | |
670 | if (virq == NO_IRQ) { | |
671 | pr_debug("irq: -> virq allocation failed\n"); | |
672 | return NO_IRQ; | |
673 | } | |
674 | } | |
675 | pr_debug("irq: -> obtained virq %d\n", virq); | |
676 | ||
6fde40f3 | 677 | if (irq_setup_virq(host, virq, hwirq)) |
0ebfff14 | 678 | return NO_IRQ; |
6fde40f3 | 679 | |
1da177e4 | 680 | return virq; |
0ebfff14 BH |
681 | } |
682 | EXPORT_SYMBOL_GPL(irq_create_mapping); | |
683 | ||
f3d2ab41 AV |
684 | unsigned int irq_create_of_mapping(struct device_node *controller, |
685 | u32 *intspec, unsigned int intsize) | |
0ebfff14 BH |
686 | { |
687 | struct irq_host *host; | |
688 | irq_hw_number_t hwirq; | |
6e99e458 BH |
689 | unsigned int type = IRQ_TYPE_NONE; |
690 | unsigned int virq; | |
1da177e4 | 691 | |
0ebfff14 BH |
692 | if (controller == NULL) |
693 | host = irq_default_host; | |
694 | else | |
695 | host = irq_find_host(controller); | |
6e99e458 BH |
696 | if (host == NULL) { |
697 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | |
698 | controller->full_name); | |
0ebfff14 | 699 | return NO_IRQ; |
6e99e458 | 700 | } |
0ebfff14 BH |
701 | |
702 | /* If host has no translation, then we assume interrupt line */ | |
703 | if (host->ops->xlate == NULL) | |
704 | hwirq = intspec[0]; | |
705 | else { | |
706 | if (host->ops->xlate(host, controller, intspec, intsize, | |
6e99e458 | 707 | &hwirq, &type)) |
0ebfff14 | 708 | return NO_IRQ; |
1da177e4 | 709 | } |
0ebfff14 | 710 | |
6e99e458 BH |
711 | /* Create mapping */ |
712 | virq = irq_create_mapping(host, hwirq); | |
713 | if (virq == NO_IRQ) | |
714 | return virq; | |
715 | ||
716 | /* Set type if specified and different than the current one */ | |
717 | if (type != IRQ_TYPE_NONE && | |
718 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) | |
719 | set_irq_type(virq, type); | |
720 | return virq; | |
1da177e4 | 721 | } |
0ebfff14 | 722 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
1da177e4 | 723 | |
0ebfff14 | 724 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) |
1da177e4 | 725 | { |
0ebfff14 | 726 | struct of_irq oirq; |
1da177e4 | 727 | |
0ebfff14 BH |
728 | if (of_irq_map_one(dev, index, &oirq)) |
729 | return NO_IRQ; | |
1da177e4 | 730 | |
0ebfff14 BH |
731 | return irq_create_of_mapping(oirq.controller, oirq.specifier, |
732 | oirq.size); | |
733 | } | |
734 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |
1da177e4 | 735 | |
0ebfff14 BH |
736 | void irq_dispose_mapping(unsigned int virq) |
737 | { | |
5414c6be | 738 | struct irq_host *host; |
0ebfff14 | 739 | irq_hw_number_t hwirq; |
1da177e4 | 740 | |
5414c6be ME |
741 | if (virq == NO_IRQ) |
742 | return; | |
743 | ||
744 | host = irq_map[virq].host; | |
0ebfff14 BH |
745 | WARN_ON (host == NULL); |
746 | if (host == NULL) | |
747 | return; | |
1da177e4 | 748 | |
0ebfff14 BH |
749 | /* Never unmap legacy interrupts */ |
750 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
751 | return; | |
1da177e4 | 752 | |
0ebfff14 BH |
753 | /* remove chip and handler */ |
754 | set_irq_chip_and_handler(virq, NULL, NULL); | |
755 | ||
756 | /* Make sure it's completed */ | |
757 | synchronize_irq(virq); | |
758 | ||
759 | /* Tell the PIC about it */ | |
760 | if (host->ops->unmap) | |
761 | host->ops->unmap(host, virq); | |
762 | smp_mb(); | |
763 | ||
764 | /* Clear reverse map */ | |
765 | hwirq = irq_map[virq].hwirq; | |
766 | switch(host->revmap_type) { | |
767 | case IRQ_HOST_MAP_LINEAR: | |
768 | if (hwirq < host->revmap_data.linear.size) | |
f5921697 | 769 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
0ebfff14 BH |
770 | break; |
771 | case IRQ_HOST_MAP_TREE: | |
967e012e SD |
772 | /* |
773 | * Check if radix tree allocated yet, if not then nothing to | |
774 | * remove. | |
775 | */ | |
776 | smp_rmb(); | |
777 | if (revmap_trees_allocated < 1) | |
0ebfff14 | 778 | break; |
150c6c8f | 779 | mutex_lock(&revmap_trees_mutex); |
0ebfff14 | 780 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
150c6c8f | 781 | mutex_unlock(&revmap_trees_mutex); |
0ebfff14 BH |
782 | break; |
783 | } | |
1da177e4 | 784 | |
0ebfff14 BH |
785 | /* Destroy map */ |
786 | smp_mb(); | |
787 | irq_map[virq].hwirq = host->inval_irq; | |
1da177e4 | 788 | |
0ebfff14 BH |
789 | /* Set some flags */ |
790 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | |
1da177e4 | 791 | |
0ebfff14 BH |
792 | /* Free it */ |
793 | irq_free_virt(virq, 1); | |
1da177e4 | 794 | } |
0ebfff14 | 795 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
1da177e4 | 796 | |
0ebfff14 BH |
797 | unsigned int irq_find_mapping(struct irq_host *host, |
798 | irq_hw_number_t hwirq) | |
799 | { | |
800 | unsigned int i; | |
801 | unsigned int hint = hwirq % irq_virq_count; | |
802 | ||
803 | /* Look for default host if nececssary */ | |
804 | if (host == NULL) | |
805 | host = irq_default_host; | |
806 | if (host == NULL) | |
807 | return NO_IRQ; | |
808 | ||
809 | /* legacy -> bail early */ | |
810 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
811 | return hwirq; | |
812 | ||
813 | /* Slow path does a linear search of the map */ | |
814 | if (hint < NUM_ISA_INTERRUPTS) | |
815 | hint = NUM_ISA_INTERRUPTS; | |
816 | i = hint; | |
817 | do { | |
818 | if (irq_map[i].host == host && | |
819 | irq_map[i].hwirq == hwirq) | |
820 | return i; | |
821 | i++; | |
822 | if (i >= irq_virq_count) | |
823 | i = NUM_ISA_INTERRUPTS; | |
824 | } while(i != hint); | |
825 | return NO_IRQ; | |
826 | } | |
827 | EXPORT_SYMBOL_GPL(irq_find_mapping); | |
1da177e4 | 828 | |
0ebfff14 | 829 | |
967e012e SD |
830 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, |
831 | irq_hw_number_t hwirq) | |
1da177e4 | 832 | { |
0ebfff14 BH |
833 | struct irq_map_entry *ptr; |
834 | unsigned int virq; | |
1da177e4 | 835 | |
0ebfff14 | 836 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
1da177e4 | 837 | |
967e012e SD |
838 | /* |
839 | * Check if the radix tree exists and has bee initialized. | |
840 | * If not, we fallback to slow mode | |
0ebfff14 | 841 | */ |
967e012e | 842 | if (revmap_trees_allocated < 2) |
0ebfff14 BH |
843 | return irq_find_mapping(host, hwirq); |
844 | ||
0ebfff14 | 845 | /* Now try to resolve */ |
150c6c8f SD |
846 | /* |
847 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us | |
848 | * as it's referencing an entry in the static irq_map table. | |
849 | */ | |
967e012e | 850 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
8ec8f2e8 | 851 | |
967e012e SD |
852 | /* |
853 | * If found in radix tree, then fine. | |
854 | * Else fallback to linear lookup - this should not happen in practice | |
855 | * as it means that we failed to insert the node in the radix tree. | |
856 | */ | |
857 | if (ptr) | |
0ebfff14 | 858 | virq = ptr - irq_map; |
967e012e SD |
859 | else |
860 | virq = irq_find_mapping(host, hwirq); | |
861 | ||
862 | return virq; | |
863 | } | |
864 | ||
865 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | |
866 | irq_hw_number_t hwirq) | |
867 | { | |
967e012e SD |
868 | |
869 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | |
870 | ||
871 | /* | |
872 | * Check if the radix tree exists yet. | |
873 | * If not, then the irq will be inserted into the tree when it gets | |
874 | * initialized. | |
875 | */ | |
876 | smp_rmb(); | |
877 | if (revmap_trees_allocated < 1) | |
878 | return; | |
0ebfff14 | 879 | |
8ec8f2e8 | 880 | if (virq != NO_IRQ) { |
150c6c8f | 881 | mutex_lock(&revmap_trees_mutex); |
967e012e SD |
882 | radix_tree_insert(&host->revmap_data.tree, hwirq, |
883 | &irq_map[virq]); | |
150c6c8f | 884 | mutex_unlock(&revmap_trees_mutex); |
8ec8f2e8 | 885 | } |
1da177e4 LT |
886 | } |
887 | ||
0ebfff14 BH |
888 | unsigned int irq_linear_revmap(struct irq_host *host, |
889 | irq_hw_number_t hwirq) | |
c6622f63 | 890 | { |
0ebfff14 | 891 | unsigned int *revmap; |
c6622f63 | 892 | |
0ebfff14 BH |
893 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); |
894 | ||
895 | /* Check revmap bounds */ | |
896 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | |
897 | return irq_find_mapping(host, hwirq); | |
898 | ||
899 | /* Check if revmap was allocated */ | |
900 | revmap = host->revmap_data.linear.revmap; | |
901 | if (unlikely(revmap == NULL)) | |
902 | return irq_find_mapping(host, hwirq); | |
903 | ||
904 | /* Fill up revmap with slow path if no mapping found */ | |
905 | if (unlikely(revmap[hwirq] == NO_IRQ)) | |
906 | revmap[hwirq] = irq_find_mapping(host, hwirq); | |
907 | ||
908 | return revmap[hwirq]; | |
c6622f63 PM |
909 | } |
910 | ||
0ebfff14 BH |
911 | unsigned int irq_alloc_virt(struct irq_host *host, |
912 | unsigned int count, | |
913 | unsigned int hint) | |
914 | { | |
915 | unsigned long flags; | |
916 | unsigned int i, j, found = NO_IRQ; | |
c6622f63 | 917 | |
0ebfff14 BH |
918 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
919 | return NO_IRQ; | |
920 | ||
921 | spin_lock_irqsave(&irq_big_lock, flags); | |
922 | ||
923 | /* Use hint for 1 interrupt if any */ | |
924 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | |
925 | hint < irq_virq_count && irq_map[hint].host == NULL) { | |
926 | found = hint; | |
927 | goto hint_found; | |
928 | } | |
929 | ||
930 | /* Look for count consecutive numbers in the allocatable | |
931 | * (non-legacy) space | |
932 | */ | |
e1251465 ME |
933 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { |
934 | if (irq_map[i].host != NULL) | |
935 | j = 0; | |
936 | else | |
937 | j++; | |
938 | ||
939 | if (j == count) { | |
940 | found = i - count + 1; | |
941 | break; | |
942 | } | |
0ebfff14 BH |
943 | } |
944 | if (found == NO_IRQ) { | |
945 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
946 | return NO_IRQ; | |
947 | } | |
948 | hint_found: | |
949 | for (i = found; i < (found + count); i++) { | |
950 | irq_map[i].hwirq = host->inval_irq; | |
951 | smp_wmb(); | |
952 | irq_map[i].host = host; | |
953 | } | |
954 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
955 | return found; | |
956 | } | |
957 | ||
958 | void irq_free_virt(unsigned int virq, unsigned int count) | |
1da177e4 LT |
959 | { |
960 | unsigned long flags; | |
0ebfff14 | 961 | unsigned int i; |
1da177e4 | 962 | |
0ebfff14 BH |
963 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
964 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | |
1da177e4 | 965 | |
0ebfff14 BH |
966 | spin_lock_irqsave(&irq_big_lock, flags); |
967 | for (i = virq; i < (virq + count); i++) { | |
968 | struct irq_host *host; | |
1da177e4 | 969 | |
0ebfff14 BH |
970 | if (i < NUM_ISA_INTERRUPTS || |
971 | (virq + count) > irq_virq_count) | |
972 | continue; | |
1da177e4 | 973 | |
0ebfff14 BH |
974 | host = irq_map[i].host; |
975 | irq_map[i].hwirq = host->inval_irq; | |
976 | smp_wmb(); | |
977 | irq_map[i].host = NULL; | |
978 | } | |
979 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
1da177e4 | 980 | } |
0ebfff14 BH |
981 | |
982 | void irq_early_init(void) | |
983 | { | |
984 | unsigned int i; | |
985 | ||
986 | for (i = 0; i < NR_IRQS; i++) | |
987 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | |
988 | } | |
989 | ||
990 | /* We need to create the radix trees late */ | |
991 | static int irq_late_init(void) | |
992 | { | |
993 | struct irq_host *h; | |
967e012e | 994 | unsigned int i; |
0ebfff14 | 995 | |
967e012e SD |
996 | /* |
997 | * No mutual exclusion with respect to accessors of the tree is needed | |
998 | * here as the synchronization is done via the state variable | |
999 | * revmap_trees_allocated. | |
1000 | */ | |
0ebfff14 BH |
1001 | list_for_each_entry(h, &irq_hosts, link) { |
1002 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | |
967e012e SD |
1003 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); |
1004 | } | |
1005 | ||
1006 | /* | |
1007 | * Make sure the radix trees inits are visible before setting | |
1008 | * the flag | |
1009 | */ | |
1010 | smp_wmb(); | |
1011 | revmap_trees_allocated = 1; | |
1012 | ||
1013 | /* | |
1014 | * Insert the reverse mapping for those interrupts already present | |
1015 | * in irq_map[]. | |
1016 | */ | |
150c6c8f | 1017 | mutex_lock(&revmap_trees_mutex); |
967e012e SD |
1018 | for (i = 0; i < irq_virq_count; i++) { |
1019 | if (irq_map[i].host && | |
1020 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | |
1021 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | |
1022 | irq_map[i].hwirq, &irq_map[i]); | |
0ebfff14 | 1023 | } |
150c6c8f | 1024 | mutex_unlock(&revmap_trees_mutex); |
0ebfff14 | 1025 | |
967e012e SD |
1026 | /* |
1027 | * Make sure the radix trees insertions are visible before setting | |
1028 | * the flag | |
1029 | */ | |
1030 | smp_wmb(); | |
1031 | revmap_trees_allocated = 2; | |
1032 | ||
0ebfff14 BH |
1033 | return 0; |
1034 | } | |
1035 | arch_initcall(irq_late_init); | |
1036 | ||
60b332e7 ME |
1037 | #ifdef CONFIG_VIRQ_DEBUG |
1038 | static int virq_debug_show(struct seq_file *m, void *private) | |
1039 | { | |
1040 | unsigned long flags; | |
97f7d6bc | 1041 | struct irq_desc *desc; |
60b332e7 ME |
1042 | const char *p; |
1043 | char none[] = "none"; | |
1044 | int i; | |
1045 | ||
1046 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | |
1047 | "chip name", "host name"); | |
1048 | ||
1049 | for (i = 1; i < NR_IRQS; i++) { | |
1050 | desc = get_irq_desc(i); | |
1051 | spin_lock_irqsave(&desc->lock, flags); | |
1052 | ||
1053 | if (desc->action && desc->action->handler) { | |
1054 | seq_printf(m, "%5d ", i); | |
1055 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | |
1056 | ||
1057 | if (desc->chip && desc->chip->typename) | |
1058 | p = desc->chip->typename; | |
1059 | else | |
1060 | p = none; | |
1061 | seq_printf(m, "%-15s ", p); | |
1062 | ||
1063 | if (irq_map[i].host && irq_map[i].host->of_node) | |
1064 | p = irq_map[i].host->of_node->full_name; | |
1065 | else | |
1066 | p = none; | |
1067 | seq_printf(m, "%s\n", p); | |
1068 | } | |
1069 | ||
1070 | spin_unlock_irqrestore(&desc->lock, flags); | |
1071 | } | |
1072 | ||
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | static int virq_debug_open(struct inode *inode, struct file *file) | |
1077 | { | |
1078 | return single_open(file, virq_debug_show, inode->i_private); | |
1079 | } | |
1080 | ||
1081 | static const struct file_operations virq_debug_fops = { | |
1082 | .open = virq_debug_open, | |
1083 | .read = seq_read, | |
1084 | .llseek = seq_lseek, | |
1085 | .release = single_release, | |
1086 | }; | |
1087 | ||
1088 | static int __init irq_debugfs_init(void) | |
1089 | { | |
1090 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | |
476ff8a0 | 1091 | NULL, &virq_debug_fops) == NULL) |
60b332e7 ME |
1092 | return -ENOMEM; |
1093 | ||
1094 | return 0; | |
1095 | } | |
1096 | __initcall(irq_debugfs_init); | |
1097 | #endif /* CONFIG_VIRQ_DEBUG */ | |
1098 | ||
c6622f63 | 1099 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
1100 | static int __init setup_noirqdistrib(char *str) |
1101 | { | |
1102 | distribute_irqs = 0; | |
1103 | return 1; | |
1104 | } | |
1105 | ||
1106 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 1107 | #endif /* CONFIG_PPC64 */ |