Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/irq.c | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This file contains the code used by various IRQ handling routines: | |
12 | * asking for different IRQ's should be done through these routines | |
13 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
14 | * shouldn't result in any weird surprises, and installing new handlers | |
15 | * should be easier. | |
16 | * | |
17 | * IRQ's are in fact implemented a bit like signal handlers for the kernel. | |
18 | * Naturally it's not a 1:1 relation, but there are similarities. | |
19 | */ | |
20 | #include <linux/config.h> | |
21 | #include <linux/kernel_stat.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/signal.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/ptrace.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/random.h> | |
29 | #include <linux/smp.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/seq_file.h> | |
32 | #include <linux/errno.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/kallsyms.h> | |
35 | #include <linux/proc_fs.h> | |
36 | ||
37 | #include <asm/irq.h> | |
38 | #include <asm/system.h> | |
39 | #include <asm/mach/irq.h> | |
40 | ||
41 | /* | |
42 | * Maximum IRQ count. Currently, this is arbitary. However, it should | |
43 | * not be set too low to prevent false triggering. Conversely, if it | |
44 | * is set too high, then you could miss a stuck IRQ. | |
45 | * | |
46 | * Maybe we ought to set a timer and re-enable the IRQ at a later time? | |
47 | */ | |
48 | #define MAX_IRQ_CNT 100000 | |
49 | ||
50 | static int noirqdebug; | |
51 | static volatile unsigned long irq_err_count; | |
52 | static DEFINE_SPINLOCK(irq_controller_lock); | |
53 | static LIST_HEAD(irq_pending); | |
54 | ||
55 | struct irqdesc irq_desc[NR_IRQS]; | |
56 | void (*init_arch_irq)(void) __initdata = NULL; | |
57 | ||
58 | /* | |
59 | * No architecture-specific irq_finish function defined in arm/arch/irqs.h. | |
60 | */ | |
61 | #ifndef irq_finish | |
62 | #define irq_finish(irq) do { } while (0) | |
63 | #endif | |
64 | ||
65 | /* | |
66 | * Dummy mask/unmask handler | |
67 | */ | |
68 | void dummy_mask_unmask_irq(unsigned int irq) | |
69 | { | |
70 | } | |
71 | ||
72 | irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs) | |
73 | { | |
74 | return IRQ_NONE; | |
75 | } | |
76 | ||
77 | void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | |
78 | { | |
79 | irq_err_count += 1; | |
80 | printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); | |
81 | } | |
82 | ||
83 | static struct irqchip bad_chip = { | |
84 | .ack = dummy_mask_unmask_irq, | |
85 | .mask = dummy_mask_unmask_irq, | |
86 | .unmask = dummy_mask_unmask_irq, | |
87 | }; | |
88 | ||
89 | static struct irqdesc bad_irq_desc = { | |
90 | .chip = &bad_chip, | |
91 | .handle = do_bad_IRQ, | |
92 | .pend = LIST_HEAD_INIT(bad_irq_desc.pend), | |
93 | .disable_depth = 1, | |
94 | }; | |
95 | ||
96 | #ifdef CONFIG_SMP | |
97 | void synchronize_irq(unsigned int irq) | |
98 | { | |
99 | struct irqdesc *desc = irq_desc + irq; | |
100 | ||
101 | while (desc->running) | |
102 | barrier(); | |
103 | } | |
104 | EXPORT_SYMBOL(synchronize_irq); | |
105 | ||
106 | #define smp_set_running(desc) do { desc->running = 1; } while (0) | |
107 | #define smp_clear_running(desc) do { desc->running = 0; } while (0) | |
108 | #else | |
109 | #define smp_set_running(desc) do { } while (0) | |
110 | #define smp_clear_running(desc) do { } while (0) | |
111 | #endif | |
112 | ||
113 | /** | |
114 | * disable_irq_nosync - disable an irq without waiting | |
115 | * @irq: Interrupt to disable | |
116 | * | |
117 | * Disable the selected interrupt line. Enables and disables | |
118 | * are nested. We do this lazily. | |
119 | * | |
120 | * This function may be called from IRQ context. | |
121 | */ | |
122 | void disable_irq_nosync(unsigned int irq) | |
123 | { | |
124 | struct irqdesc *desc = irq_desc + irq; | |
125 | unsigned long flags; | |
126 | ||
127 | spin_lock_irqsave(&irq_controller_lock, flags); | |
128 | desc->disable_depth++; | |
129 | list_del_init(&desc->pend); | |
130 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
131 | } | |
132 | EXPORT_SYMBOL(disable_irq_nosync); | |
133 | ||
134 | /** | |
135 | * disable_irq - disable an irq and wait for completion | |
136 | * @irq: Interrupt to disable | |
137 | * | |
138 | * Disable the selected interrupt line. Enables and disables | |
139 | * are nested. This functions waits for any pending IRQ | |
140 | * handlers for this interrupt to complete before returning. | |
141 | * If you use this function while holding a resource the IRQ | |
142 | * handler may need you will deadlock. | |
143 | * | |
144 | * This function may be called - with care - from IRQ context. | |
145 | */ | |
146 | void disable_irq(unsigned int irq) | |
147 | { | |
148 | struct irqdesc *desc = irq_desc + irq; | |
149 | ||
150 | disable_irq_nosync(irq); | |
151 | if (desc->action) | |
152 | synchronize_irq(irq); | |
153 | } | |
154 | EXPORT_SYMBOL(disable_irq); | |
155 | ||
156 | /** | |
157 | * enable_irq - enable interrupt handling on an irq | |
158 | * @irq: Interrupt to enable | |
159 | * | |
160 | * Re-enables the processing of interrupts on this IRQ line. | |
161 | * Note that this may call the interrupt handler, so you may | |
162 | * get unexpected results if you hold IRQs disabled. | |
163 | * | |
164 | * This function may be called from IRQ context. | |
165 | */ | |
166 | void enable_irq(unsigned int irq) | |
167 | { | |
168 | struct irqdesc *desc = irq_desc + irq; | |
169 | unsigned long flags; | |
170 | ||
171 | spin_lock_irqsave(&irq_controller_lock, flags); | |
172 | if (unlikely(!desc->disable_depth)) { | |
173 | printk("enable_irq(%u) unbalanced from %p\n", irq, | |
174 | __builtin_return_address(0)); | |
175 | } else if (!--desc->disable_depth) { | |
176 | desc->probing = 0; | |
177 | desc->chip->unmask(irq); | |
178 | ||
179 | /* | |
180 | * If the interrupt is waiting to be processed, | |
181 | * try to re-run it. We can't directly run it | |
182 | * from here since the caller might be in an | |
183 | * interrupt-protected region. | |
184 | */ | |
185 | if (desc->pending && list_empty(&desc->pend)) { | |
186 | desc->pending = 0; | |
187 | if (!desc->chip->retrigger || | |
188 | desc->chip->retrigger(irq)) | |
189 | list_add(&desc->pend, &irq_pending); | |
190 | } | |
191 | } | |
192 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
193 | } | |
194 | EXPORT_SYMBOL(enable_irq); | |
195 | ||
196 | /* | |
197 | * Enable wake on selected irq | |
198 | */ | |
199 | void enable_irq_wake(unsigned int irq) | |
200 | { | |
201 | struct irqdesc *desc = irq_desc + irq; | |
202 | unsigned long flags; | |
203 | ||
204 | spin_lock_irqsave(&irq_controller_lock, flags); | |
205 | if (desc->chip->wake) | |
206 | desc->chip->wake(irq, 1); | |
207 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
208 | } | |
209 | EXPORT_SYMBOL(enable_irq_wake); | |
210 | ||
211 | void disable_irq_wake(unsigned int irq) | |
212 | { | |
213 | struct irqdesc *desc = irq_desc + irq; | |
214 | unsigned long flags; | |
215 | ||
216 | spin_lock_irqsave(&irq_controller_lock, flags); | |
217 | if (desc->chip->wake) | |
218 | desc->chip->wake(irq, 0); | |
219 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
220 | } | |
221 | EXPORT_SYMBOL(disable_irq_wake); | |
222 | ||
223 | int show_interrupts(struct seq_file *p, void *v) | |
224 | { | |
225 | int i = *(loff_t *) v, cpu; | |
226 | struct irqaction * action; | |
227 | unsigned long flags; | |
228 | ||
229 | if (i == 0) { | |
230 | char cpuname[12]; | |
231 | ||
232 | seq_printf(p, " "); | |
233 | for_each_present_cpu(cpu) { | |
234 | sprintf(cpuname, "CPU%d", cpu); | |
235 | seq_printf(p, " %10s", cpuname); | |
236 | } | |
237 | seq_putc(p, '\n'); | |
238 | } | |
239 | ||
240 | if (i < NR_IRQS) { | |
241 | spin_lock_irqsave(&irq_controller_lock, flags); | |
242 | action = irq_desc[i].action; | |
243 | if (!action) | |
244 | goto unlock; | |
245 | ||
246 | seq_printf(p, "%3d: ", i); | |
247 | for_each_present_cpu(cpu) | |
248 | seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); | |
249 | seq_printf(p, " %s", action->name); | |
250 | for (action = action->next; action; action = action->next) | |
251 | seq_printf(p, ", %s", action->name); | |
252 | ||
253 | seq_putc(p, '\n'); | |
254 | unlock: | |
255 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
256 | } else if (i == NR_IRQS) { | |
257 | #ifdef CONFIG_ARCH_ACORN | |
258 | show_fiq_list(p, v); | |
259 | #endif | |
260 | #ifdef CONFIG_SMP | |
261 | show_ipi_list(p); | |
262 | #endif | |
263 | seq_printf(p, "Err: %10lu\n", irq_err_count); | |
264 | } | |
265 | return 0; | |
266 | } | |
267 | ||
268 | /* | |
269 | * IRQ lock detection. | |
270 | * | |
271 | * Hopefully, this should get us out of a few locked situations. | |
272 | * However, it may take a while for this to happen, since we need | |
273 | * a large number if IRQs to appear in the same jiffie with the | |
274 | * same instruction pointer (or within 2 instructions). | |
275 | */ | |
276 | static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs) | |
277 | { | |
278 | unsigned long instr_ptr = instruction_pointer(regs); | |
279 | ||
280 | if (desc->lck_jif == jiffies && | |
281 | desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) { | |
282 | desc->lck_cnt += 1; | |
283 | ||
284 | if (desc->lck_cnt > MAX_IRQ_CNT) { | |
285 | printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq); | |
286 | return 1; | |
287 | } | |
288 | } else { | |
289 | desc->lck_cnt = 0; | |
290 | desc->lck_pc = instruction_pointer(regs); | |
291 | desc->lck_jif = jiffies; | |
292 | } | |
293 | return 0; | |
294 | } | |
295 | ||
296 | static void | |
297 | report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret) | |
298 | { | |
299 | static int count = 100; | |
300 | struct irqaction *action; | |
301 | ||
302 | if (!count || noirqdebug) | |
303 | return; | |
304 | ||
305 | count--; | |
306 | ||
307 | if (ret != IRQ_HANDLED && ret != IRQ_NONE) { | |
308 | printk("irq%u: bogus retval mask %x\n", irq, ret); | |
309 | } else { | |
310 | printk("irq%u: nobody cared\n", irq); | |
311 | } | |
312 | show_regs(regs); | |
313 | dump_stack(); | |
314 | printk(KERN_ERR "handlers:"); | |
315 | action = desc->action; | |
316 | do { | |
317 | printk("\n" KERN_ERR "[<%p>]", action->handler); | |
318 | print_symbol(" (%s)", (unsigned long)action->handler); | |
319 | action = action->next; | |
320 | } while (action); | |
321 | printk("\n"); | |
322 | } | |
323 | ||
324 | static int | |
325 | __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) | |
326 | { | |
327 | unsigned int status; | |
328 | int ret, retval = 0; | |
329 | ||
330 | spin_unlock(&irq_controller_lock); | |
331 | ||
332 | if (!(action->flags & SA_INTERRUPT)) | |
333 | local_irq_enable(); | |
334 | ||
335 | status = 0; | |
336 | do { | |
337 | ret = action->handler(irq, action->dev_id, regs); | |
338 | if (ret == IRQ_HANDLED) | |
339 | status |= action->flags; | |
340 | retval |= ret; | |
341 | action = action->next; | |
342 | } while (action); | |
343 | ||
344 | if (status & SA_SAMPLE_RANDOM) | |
345 | add_interrupt_randomness(irq); | |
346 | ||
347 | spin_lock_irq(&irq_controller_lock); | |
348 | ||
349 | return retval; | |
350 | } | |
351 | ||
352 | /* | |
353 | * This is for software-decoded IRQs. The caller is expected to | |
354 | * handle the ack, clear, mask and unmask issues. | |
355 | */ | |
356 | void | |
357 | do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | |
358 | { | |
359 | struct irqaction *action; | |
360 | const unsigned int cpu = smp_processor_id(); | |
361 | ||
362 | desc->triggered = 1; | |
363 | ||
364 | kstat_cpu(cpu).irqs[irq]++; | |
365 | ||
366 | smp_set_running(desc); | |
367 | ||
368 | action = desc->action; | |
369 | if (action) { | |
370 | int ret = __do_irq(irq, action, regs); | |
371 | if (ret != IRQ_HANDLED) | |
372 | report_bad_irq(irq, regs, desc, ret); | |
373 | } | |
374 | ||
375 | smp_clear_running(desc); | |
376 | } | |
377 | ||
378 | /* | |
379 | * Most edge-triggered IRQ implementations seem to take a broken | |
380 | * approach to this. Hence the complexity. | |
381 | */ | |
382 | void | |
383 | do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | |
384 | { | |
385 | const unsigned int cpu = smp_processor_id(); | |
386 | ||
387 | desc->triggered = 1; | |
388 | ||
389 | /* | |
390 | * If we're currently running this IRQ, or its disabled, | |
391 | * we shouldn't process the IRQ. Instead, turn on the | |
392 | * hardware masks. | |
393 | */ | |
394 | if (unlikely(desc->running || desc->disable_depth)) | |
395 | goto running; | |
396 | ||
397 | /* | |
398 | * Acknowledge and clear the IRQ, but don't mask it. | |
399 | */ | |
400 | desc->chip->ack(irq); | |
401 | ||
402 | /* | |
403 | * Mark the IRQ currently in progress. | |
404 | */ | |
405 | desc->running = 1; | |
406 | ||
407 | kstat_cpu(cpu).irqs[irq]++; | |
408 | ||
409 | do { | |
410 | struct irqaction *action; | |
411 | ||
412 | action = desc->action; | |
413 | if (!action) | |
414 | break; | |
415 | ||
416 | if (desc->pending && !desc->disable_depth) { | |
417 | desc->pending = 0; | |
418 | desc->chip->unmask(irq); | |
419 | } | |
420 | ||
421 | __do_irq(irq, action, regs); | |
422 | } while (desc->pending && !desc->disable_depth); | |
423 | ||
424 | desc->running = 0; | |
425 | ||
426 | /* | |
427 | * If we were disabled or freed, shut down the handler. | |
428 | */ | |
429 | if (likely(desc->action && !check_irq_lock(desc, irq, regs))) | |
430 | return; | |
431 | ||
432 | running: | |
433 | /* | |
434 | * We got another IRQ while this one was masked or | |
435 | * currently running. Delay it. | |
436 | */ | |
437 | desc->pending = 1; | |
438 | desc->chip->mask(irq); | |
439 | desc->chip->ack(irq); | |
440 | } | |
441 | ||
442 | /* | |
443 | * Level-based IRQ handler. Nice and simple. | |
444 | */ | |
445 | void | |
446 | do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | |
447 | { | |
448 | struct irqaction *action; | |
449 | const unsigned int cpu = smp_processor_id(); | |
450 | ||
451 | desc->triggered = 1; | |
452 | ||
453 | /* | |
454 | * Acknowledge, clear _AND_ disable the interrupt. | |
455 | */ | |
456 | desc->chip->ack(irq); | |
457 | ||
458 | if (likely(!desc->disable_depth)) { | |
459 | kstat_cpu(cpu).irqs[irq]++; | |
460 | ||
461 | smp_set_running(desc); | |
462 | ||
463 | /* | |
464 | * Return with this interrupt masked if no action | |
465 | */ | |
466 | action = desc->action; | |
467 | if (action) { | |
468 | int ret = __do_irq(irq, desc->action, regs); | |
469 | ||
470 | if (ret != IRQ_HANDLED) | |
471 | report_bad_irq(irq, regs, desc, ret); | |
472 | ||
473 | if (likely(!desc->disable_depth && | |
474 | !check_irq_lock(desc, irq, regs))) | |
475 | desc->chip->unmask(irq); | |
476 | } | |
477 | ||
478 | smp_clear_running(desc); | |
479 | } | |
480 | } | |
481 | ||
482 | static void do_pending_irqs(struct pt_regs *regs) | |
483 | { | |
484 | struct list_head head, *l, *n; | |
485 | ||
486 | do { | |
487 | struct irqdesc *desc; | |
488 | ||
489 | /* | |
490 | * First, take the pending interrupts off the list. | |
491 | * The act of calling the handlers may add some IRQs | |
492 | * back onto the list. | |
493 | */ | |
494 | head = irq_pending; | |
495 | INIT_LIST_HEAD(&irq_pending); | |
496 | head.next->prev = &head; | |
497 | head.prev->next = &head; | |
498 | ||
499 | /* | |
500 | * Now run each entry. We must delete it from our | |
501 | * list before calling the handler. | |
502 | */ | |
503 | list_for_each_safe(l, n, &head) { | |
504 | desc = list_entry(l, struct irqdesc, pend); | |
505 | list_del_init(&desc->pend); | |
506 | desc->handle(desc - irq_desc, desc, regs); | |
507 | } | |
508 | ||
509 | /* | |
510 | * The list must be empty. | |
511 | */ | |
512 | BUG_ON(!list_empty(&head)); | |
513 | } while (!list_empty(&irq_pending)); | |
514 | } | |
515 | ||
516 | /* | |
517 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not | |
518 | * come via this function. Instead, they should provide their | |
519 | * own 'handler' | |
520 | */ | |
521 | asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |
522 | { | |
523 | struct irqdesc *desc = irq_desc + irq; | |
524 | ||
525 | /* | |
526 | * Some hardware gives randomly wrong interrupts. Rather | |
527 | * than crashing, do something sensible. | |
528 | */ | |
529 | if (irq >= NR_IRQS) | |
530 | desc = &bad_irq_desc; | |
531 | ||
532 | irq_enter(); | |
533 | spin_lock(&irq_controller_lock); | |
534 | desc->handle(irq, desc, regs); | |
535 | ||
536 | /* | |
537 | * Now re-run any pending interrupts. | |
538 | */ | |
539 | if (!list_empty(&irq_pending)) | |
540 | do_pending_irqs(regs); | |
541 | ||
542 | irq_finish(irq); | |
543 | ||
544 | spin_unlock(&irq_controller_lock); | |
545 | irq_exit(); | |
546 | } | |
547 | ||
548 | void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained) | |
549 | { | |
550 | struct irqdesc *desc; | |
551 | unsigned long flags; | |
552 | ||
553 | if (irq >= NR_IRQS) { | |
554 | printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq); | |
555 | return; | |
556 | } | |
557 | ||
558 | if (handle == NULL) | |
559 | handle = do_bad_IRQ; | |
560 | ||
561 | desc = irq_desc + irq; | |
562 | ||
563 | if (is_chained && desc->chip == &bad_chip) | |
564 | printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq); | |
565 | ||
566 | spin_lock_irqsave(&irq_controller_lock, flags); | |
567 | if (handle == do_bad_IRQ) { | |
568 | desc->chip->mask(irq); | |
569 | desc->chip->ack(irq); | |
570 | desc->disable_depth = 1; | |
571 | } | |
572 | desc->handle = handle; | |
573 | if (handle != do_bad_IRQ && is_chained) { | |
574 | desc->valid = 0; | |
575 | desc->probe_ok = 0; | |
576 | desc->disable_depth = 0; | |
577 | desc->chip->unmask(irq); | |
578 | } | |
579 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
580 | } | |
581 | ||
582 | void set_irq_chip(unsigned int irq, struct irqchip *chip) | |
583 | { | |
584 | struct irqdesc *desc; | |
585 | unsigned long flags; | |
586 | ||
587 | if (irq >= NR_IRQS) { | |
588 | printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); | |
589 | return; | |
590 | } | |
591 | ||
592 | if (chip == NULL) | |
593 | chip = &bad_chip; | |
594 | ||
595 | desc = irq_desc + irq; | |
596 | spin_lock_irqsave(&irq_controller_lock, flags); | |
597 | desc->chip = chip; | |
598 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
599 | } | |
600 | ||
601 | int set_irq_type(unsigned int irq, unsigned int type) | |
602 | { | |
603 | struct irqdesc *desc; | |
604 | unsigned long flags; | |
605 | int ret = -ENXIO; | |
606 | ||
607 | if (irq >= NR_IRQS) { | |
608 | printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); | |
609 | return -ENODEV; | |
610 | } | |
611 | ||
612 | desc = irq_desc + irq; | |
613 | if (desc->chip->type) { | |
614 | spin_lock_irqsave(&irq_controller_lock, flags); | |
615 | ret = desc->chip->type(irq, type); | |
616 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
617 | } | |
618 | ||
619 | return ret; | |
620 | } | |
621 | EXPORT_SYMBOL(set_irq_type); | |
622 | ||
623 | void set_irq_flags(unsigned int irq, unsigned int iflags) | |
624 | { | |
625 | struct irqdesc *desc; | |
626 | unsigned long flags; | |
627 | ||
628 | if (irq >= NR_IRQS) { | |
629 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); | |
630 | return; | |
631 | } | |
632 | ||
633 | desc = irq_desc + irq; | |
634 | spin_lock_irqsave(&irq_controller_lock, flags); | |
635 | desc->valid = (iflags & IRQF_VALID) != 0; | |
636 | desc->probe_ok = (iflags & IRQF_PROBE) != 0; | |
637 | desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0; | |
638 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
639 | } | |
640 | ||
641 | int setup_irq(unsigned int irq, struct irqaction *new) | |
642 | { | |
643 | int shared = 0; | |
644 | struct irqaction *old, **p; | |
645 | unsigned long flags; | |
646 | struct irqdesc *desc; | |
647 | ||
648 | /* | |
649 | * Some drivers like serial.c use request_irq() heavily, | |
650 | * so we have to be careful not to interfere with a | |
651 | * running system. | |
652 | */ | |
653 | if (new->flags & SA_SAMPLE_RANDOM) { | |
654 | /* | |
655 | * This function might sleep, we want to call it first, | |
656 | * outside of the atomic block. | |
657 | * Yes, this might clear the entropy pool if the wrong | |
658 | * driver is attempted to be loaded, without actually | |
659 | * installing a new handler, but is this really a problem, | |
660 | * only the sysadmin is able to do this. | |
661 | */ | |
662 | rand_initialize_irq(irq); | |
663 | } | |
664 | ||
665 | /* | |
666 | * The following block of code has to be executed atomically | |
667 | */ | |
668 | desc = irq_desc + irq; | |
669 | spin_lock_irqsave(&irq_controller_lock, flags); | |
670 | p = &desc->action; | |
671 | if ((old = *p) != NULL) { | |
672 | /* Can't share interrupts unless both agree to */ | |
673 | if (!(old->flags & new->flags & SA_SHIRQ)) { | |
674 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
675 | return -EBUSY; | |
676 | } | |
677 | ||
678 | /* add new interrupt at end of irq queue */ | |
679 | do { | |
680 | p = &old->next; | |
681 | old = *p; | |
682 | } while (old); | |
683 | shared = 1; | |
684 | } | |
685 | ||
686 | *p = new; | |
687 | ||
688 | if (!shared) { | |
689 | desc->probing = 0; | |
690 | desc->running = 0; | |
691 | desc->pending = 0; | |
692 | desc->disable_depth = 1; | |
693 | if (!desc->noautoenable) { | |
694 | desc->disable_depth = 0; | |
695 | desc->chip->unmask(irq); | |
696 | } | |
697 | } | |
698 | ||
699 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
700 | return 0; | |
701 | } | |
702 | ||
703 | /** | |
704 | * request_irq - allocate an interrupt line | |
705 | * @irq: Interrupt line to allocate | |
706 | * @handler: Function to be called when the IRQ occurs | |
707 | * @irqflags: Interrupt type flags | |
708 | * @devname: An ascii name for the claiming device | |
709 | * @dev_id: A cookie passed back to the handler function | |
710 | * | |
711 | * This call allocates interrupt resources and enables the | |
712 | * interrupt line and IRQ handling. From the point this | |
713 | * call is made your handler function may be invoked. Since | |
714 | * your handler function must clear any interrupt the board | |
715 | * raises, you must take care both to initialise your hardware | |
716 | * and to set up the interrupt handler in the right order. | |
717 | * | |
718 | * Dev_id must be globally unique. Normally the address of the | |
719 | * device data structure is used as the cookie. Since the handler | |
720 | * receives this value it makes sense to use it. | |
721 | * | |
722 | * If your interrupt is shared you must pass a non NULL dev_id | |
723 | * as this is required when freeing the interrupt. | |
724 | * | |
725 | * Flags: | |
726 | * | |
727 | * SA_SHIRQ Interrupt is shared | |
728 | * | |
729 | * SA_INTERRUPT Disable local interrupts while processing | |
730 | * | |
731 | * SA_SAMPLE_RANDOM The interrupt can be used for entropy | |
732 | * | |
733 | */ | |
734 | int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), | |
735 | unsigned long irq_flags, const char * devname, void *dev_id) | |
736 | { | |
737 | unsigned long retval; | |
738 | struct irqaction *action; | |
739 | ||
740 | if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler || | |
741 | (irq_flags & SA_SHIRQ && !dev_id)) | |
742 | return -EINVAL; | |
743 | ||
744 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); | |
745 | if (!action) | |
746 | return -ENOMEM; | |
747 | ||
748 | action->handler = handler; | |
749 | action->flags = irq_flags; | |
750 | cpus_clear(action->mask); | |
751 | action->name = devname; | |
752 | action->next = NULL; | |
753 | action->dev_id = dev_id; | |
754 | ||
755 | retval = setup_irq(irq, action); | |
756 | ||
757 | if (retval) | |
758 | kfree(action); | |
759 | return retval; | |
760 | } | |
761 | ||
762 | EXPORT_SYMBOL(request_irq); | |
763 | ||
764 | /** | |
765 | * free_irq - free an interrupt | |
766 | * @irq: Interrupt line to free | |
767 | * @dev_id: Device identity to free | |
768 | * | |
769 | * Remove an interrupt handler. The handler is removed and if the | |
770 | * interrupt line is no longer in use by any driver it is disabled. | |
771 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
772 | * on the card it drives before calling this function. | |
773 | * | |
774 | * This function must not be called from interrupt context. | |
775 | */ | |
776 | void free_irq(unsigned int irq, void *dev_id) | |
777 | { | |
778 | struct irqaction * action, **p; | |
779 | unsigned long flags; | |
780 | ||
781 | if (irq >= NR_IRQS || !irq_desc[irq].valid) { | |
782 | printk(KERN_ERR "Trying to free IRQ%d\n",irq); | |
783 | dump_stack(); | |
784 | return; | |
785 | } | |
786 | ||
787 | spin_lock_irqsave(&irq_controller_lock, flags); | |
788 | for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) { | |
789 | if (action->dev_id != dev_id) | |
790 | continue; | |
791 | ||
792 | /* Found it - now free it */ | |
793 | *p = action->next; | |
794 | break; | |
795 | } | |
796 | spin_unlock_irqrestore(&irq_controller_lock, flags); | |
797 | ||
798 | if (!action) { | |
799 | printk(KERN_ERR "Trying to free free IRQ%d\n",irq); | |
800 | dump_stack(); | |
801 | } else { | |
802 | synchronize_irq(irq); | |
803 | kfree(action); | |
804 | } | |
805 | } | |
806 | ||
807 | EXPORT_SYMBOL(free_irq); | |
808 | ||
809 | static DECLARE_MUTEX(probe_sem); | |
810 | ||
811 | /* Start the interrupt probing. Unlike other architectures, | |
812 | * we don't return a mask of interrupts from probe_irq_on, | |
813 | * but return the number of interrupts enabled for the probe. | |
814 | * The interrupts which have been enabled for probing is | |
815 | * instead recorded in the irq_desc structure. | |
816 | */ | |
817 | unsigned long probe_irq_on(void) | |
818 | { | |
819 | unsigned int i, irqs = 0; | |
820 | unsigned long delay; | |
821 | ||
822 | down(&probe_sem); | |
823 | ||
824 | /* | |
825 | * first snaffle up any unassigned but | |
826 | * probe-able interrupts | |
827 | */ | |
828 | spin_lock_irq(&irq_controller_lock); | |
829 | for (i = 0; i < NR_IRQS; i++) { | |
830 | if (!irq_desc[i].probe_ok || irq_desc[i].action) | |
831 | continue; | |
832 | ||
833 | irq_desc[i].probing = 1; | |
834 | irq_desc[i].triggered = 0; | |
835 | if (irq_desc[i].chip->type) | |
836 | irq_desc[i].chip->type(i, IRQT_PROBE); | |
837 | irq_desc[i].chip->unmask(i); | |
838 | irqs += 1; | |
839 | } | |
840 | spin_unlock_irq(&irq_controller_lock); | |
841 | ||
842 | /* | |
843 | * wait for spurious interrupts to mask themselves out again | |
844 | */ | |
845 | for (delay = jiffies + HZ/10; time_before(jiffies, delay); ) | |
846 | /* min 100ms delay */; | |
847 | ||
848 | /* | |
849 | * now filter out any obviously spurious interrupts | |
850 | */ | |
851 | spin_lock_irq(&irq_controller_lock); | |
852 | for (i = 0; i < NR_IRQS; i++) { | |
853 | if (irq_desc[i].probing && irq_desc[i].triggered) { | |
854 | irq_desc[i].probing = 0; | |
855 | irqs -= 1; | |
856 | } | |
857 | } | |
858 | spin_unlock_irq(&irq_controller_lock); | |
859 | ||
860 | return irqs; | |
861 | } | |
862 | ||
863 | EXPORT_SYMBOL(probe_irq_on); | |
864 | ||
865 | unsigned int probe_irq_mask(unsigned long irqs) | |
866 | { | |
867 | unsigned int mask = 0, i; | |
868 | ||
869 | spin_lock_irq(&irq_controller_lock); | |
870 | for (i = 0; i < 16 && i < NR_IRQS; i++) | |
871 | if (irq_desc[i].probing && irq_desc[i].triggered) | |
872 | mask |= 1 << i; | |
873 | spin_unlock_irq(&irq_controller_lock); | |
874 | ||
875 | up(&probe_sem); | |
876 | ||
877 | return mask; | |
878 | } | |
879 | EXPORT_SYMBOL(probe_irq_mask); | |
880 | ||
881 | /* | |
882 | * Possible return values: | |
883 | * >= 0 - interrupt number | |
884 | * -1 - no interrupt/many interrupts | |
885 | */ | |
886 | int probe_irq_off(unsigned long irqs) | |
887 | { | |
888 | unsigned int i; | |
889 | int irq_found = NO_IRQ; | |
890 | ||
891 | /* | |
892 | * look at the interrupts, and find exactly one | |
893 | * that we were probing has been triggered | |
894 | */ | |
895 | spin_lock_irq(&irq_controller_lock); | |
896 | for (i = 0; i < NR_IRQS; i++) { | |
897 | if (irq_desc[i].probing && | |
898 | irq_desc[i].triggered) { | |
899 | if (irq_found != NO_IRQ) { | |
900 | irq_found = NO_IRQ; | |
901 | goto out; | |
902 | } | |
903 | irq_found = i; | |
904 | } | |
905 | } | |
906 | ||
907 | if (irq_found == -1) | |
908 | irq_found = NO_IRQ; | |
909 | out: | |
910 | spin_unlock_irq(&irq_controller_lock); | |
911 | ||
912 | up(&probe_sem); | |
913 | ||
914 | return irq_found; | |
915 | } | |
916 | ||
917 | EXPORT_SYMBOL(probe_irq_off); | |
918 | ||
919 | #ifdef CONFIG_SMP | |
920 | static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu) | |
921 | { | |
922 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); | |
923 | ||
924 | spin_lock_irq(&irq_controller_lock); | |
925 | desc->cpu = cpu; | |
926 | desc->chip->set_cpu(desc, irq, cpu); | |
927 | spin_unlock_irq(&irq_controller_lock); | |
928 | } | |
929 | ||
930 | #ifdef CONFIG_PROC_FS | |
931 | static int | |
932 | irq_affinity_read_proc(char *page, char **start, off_t off, int count, | |
933 | int *eof, void *data) | |
934 | { | |
935 | struct irqdesc *desc = irq_desc + ((int)data); | |
936 | int len = cpumask_scnprintf(page, count, desc->affinity); | |
937 | ||
938 | if (count - len < 2) | |
939 | return -EINVAL; | |
940 | page[len++] = '\n'; | |
941 | page[len] = '\0'; | |
942 | ||
943 | return len; | |
944 | } | |
945 | ||
946 | static int | |
947 | irq_affinity_write_proc(struct file *file, const char __user *buffer, | |
948 | unsigned long count, void *data) | |
949 | { | |
950 | unsigned int irq = (unsigned int)data; | |
951 | struct irqdesc *desc = irq_desc + irq; | |
952 | cpumask_t affinity, tmp; | |
953 | int ret = -EIO; | |
954 | ||
955 | if (!desc->chip->set_cpu) | |
956 | goto out; | |
957 | ||
958 | ret = cpumask_parse(buffer, count, affinity); | |
959 | if (ret) | |
960 | goto out; | |
961 | ||
962 | cpus_and(tmp, affinity, cpu_online_map); | |
963 | if (cpus_empty(tmp)) { | |
964 | ret = -EINVAL; | |
965 | goto out; | |
966 | } | |
967 | ||
968 | desc->affinity = affinity; | |
969 | route_irq(desc, irq, first_cpu(tmp)); | |
970 | ret = count; | |
971 | ||
972 | out: | |
973 | return ret; | |
974 | } | |
975 | #endif | |
976 | #endif | |
977 | ||
978 | void __init init_irq_proc(void) | |
979 | { | |
980 | #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) | |
981 | struct proc_dir_entry *dir; | |
982 | int irq; | |
983 | ||
984 | dir = proc_mkdir("irq", 0); | |
985 | if (!dir) | |
986 | return; | |
987 | ||
988 | for (irq = 0; irq < NR_IRQS; irq++) { | |
989 | struct proc_dir_entry *entry; | |
990 | struct irqdesc *desc; | |
991 | char name[16]; | |
992 | ||
993 | desc = irq_desc + irq; | |
994 | memset(name, 0, sizeof(name)); | |
995 | snprintf(name, sizeof(name) - 1, "%u", irq); | |
996 | ||
997 | desc->procdir = proc_mkdir(name, dir); | |
998 | if (!desc->procdir) | |
999 | continue; | |
1000 | ||
1001 | entry = create_proc_entry("smp_affinity", 0600, desc->procdir); | |
1002 | if (entry) { | |
1003 | entry->nlink = 1; | |
1004 | entry->data = (void *)irq; | |
1005 | entry->read_proc = irq_affinity_read_proc; | |
1006 | entry->write_proc = irq_affinity_write_proc; | |
1007 | } | |
1008 | } | |
1009 | #endif | |
1010 | } | |
1011 | ||
1012 | void __init init_IRQ(void) | |
1013 | { | |
1014 | struct irqdesc *desc; | |
1015 | extern void init_dma(void); | |
1016 | int irq; | |
1017 | ||
1018 | #ifdef CONFIG_SMP | |
1019 | bad_irq_desc.affinity = CPU_MASK_ALL; | |
1020 | bad_irq_desc.cpu = smp_processor_id(); | |
1021 | #endif | |
1022 | ||
1023 | for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { | |
1024 | *desc = bad_irq_desc; | |
1025 | INIT_LIST_HEAD(&desc->pend); | |
1026 | } | |
1027 | ||
1028 | init_arch_irq(); | |
1029 | init_dma(); | |
1030 | } | |
1031 | ||
1032 | static int __init noirqdebug_setup(char *str) | |
1033 | { | |
1034 | noirqdebug = 1; | |
1035 | return 1; | |
1036 | } | |
1037 | ||
1038 | __setup("noirqdebug", noirqdebug_setup); |