2 * linux/kernel/irq/manage.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
7 * This file contains driver APIs to the irq subsystem.
10 #include <linux/irq.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
18 #include "internals.h"
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 * @irq: interrupt number to wait for
24 * This function waits for any pending IRQ handlers for this interrupt
25 * to complete before returning. If you use this function while
26 * holding a resource the IRQ handler may need you will deadlock.
28 * This function may be called - with care - from IRQ context.
30 void synchronize_irq(unsigned int irq
)
32 struct irq_desc
*desc
= irq_to_desc(irq
);
42 * Wait until we're out of the critical section. This might
43 * give the wrong answer due to the lack of memory barriers.
45 while (desc
->istate
& IRQS_INPROGRESS
)
48 /* Ok, that indicated we're done: double-check carefully. */
49 raw_spin_lock_irqsave(&desc
->lock
, flags
);
51 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
53 /* Oops, that failed? */
54 } while (state
& IRQS_INPROGRESS
);
57 * We made sure that no hardirq handler is running. Now verify
58 * that no threaded handlers are active.
60 wait_event(desc
->wait_for_threads
, !atomic_read(&desc
->threads_active
));
62 EXPORT_SYMBOL(synchronize_irq
);
65 cpumask_var_t irq_default_affinity
;
68 * irq_can_set_affinity - Check if the affinity of a given irq can be set
69 * @irq: Interrupt to check
72 int irq_can_set_affinity(unsigned int irq
)
74 struct irq_desc
*desc
= irq_to_desc(irq
);
76 if (!desc
|| !irqd_can_balance(&desc
->irq_data
) ||
77 !desc
->irq_data
.chip
|| !desc
->irq_data
.chip
->irq_set_affinity
)
84 * irq_set_thread_affinity - Notify irq threads to adjust affinity
85 * @desc: irq descriptor which has affitnity changed
87 * We just set IRQTF_AFFINITY and delegate the affinity setting
88 * to the interrupt thread itself. We can not call
89 * set_cpus_allowed_ptr() here as we hold desc->lock and this
90 * code can be called from hard interrupt context.
92 void irq_set_thread_affinity(struct irq_desc
*desc
)
94 struct irqaction
*action
= desc
->action
;
98 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
99 action
= action
->next
;
103 #ifdef CONFIG_GENERIC_PENDING_IRQ
104 static inline bool irq_can_move_pcntxt(struct irq_desc
*desc
)
106 return desc
->status
& IRQ_MOVE_PCNTXT
;
108 static inline bool irq_move_pending(struct irq_desc
*desc
)
110 return irqd_is_setaffinity_pending(&desc
->irq_data
);
113 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
115 cpumask_copy(desc
->pending_mask
, mask
);
118 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
120 cpumask_copy(mask
, desc
->pending_mask
);
123 static inline bool irq_can_move_pcntxt(struct irq_desc
*desc
) { return true; }
124 static inline bool irq_move_pending(struct irq_desc
*desc
) { return false; }
126 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
) { }
128 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
) { }
132 * irq_set_affinity - Set the irq affinity of a given irq
133 * @irq: Interrupt to set affinity
137 int irq_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
139 struct irq_desc
*desc
= irq_to_desc(irq
);
140 struct irq_chip
*chip
= desc
->irq_data
.chip
;
144 if (!chip
->irq_set_affinity
)
147 raw_spin_lock_irqsave(&desc
->lock
, flags
);
149 if (irq_can_move_pcntxt(desc
)) {
150 ret
= chip
->irq_set_affinity(&desc
->irq_data
, mask
, false);
152 case IRQ_SET_MASK_OK
:
153 cpumask_copy(desc
->irq_data
.affinity
, mask
);
154 case IRQ_SET_MASK_OK_NOCOPY
:
155 irq_set_thread_affinity(desc
);
159 irqd_set_move_pending(&desc
->irq_data
);
160 irq_copy_pending(desc
, mask
);
163 if (desc
->affinity_notify
) {
164 kref_get(&desc
->affinity_notify
->kref
);
165 schedule_work(&desc
->affinity_notify
->work
);
167 irq_compat_set_affinity(desc
);
168 irqd_set(&desc
->irq_data
, IRQD_AFFINITY_SET
);
169 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
173 int irq_set_affinity_hint(unsigned int irq
, const struct cpumask
*m
)
175 struct irq_desc
*desc
= irq_to_desc(irq
);
181 raw_spin_lock_irqsave(&desc
->lock
, flags
);
182 desc
->affinity_hint
= m
;
183 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
187 EXPORT_SYMBOL_GPL(irq_set_affinity_hint
);
189 static void irq_affinity_notify(struct work_struct
*work
)
191 struct irq_affinity_notify
*notify
=
192 container_of(work
, struct irq_affinity_notify
, work
);
193 struct irq_desc
*desc
= irq_to_desc(notify
->irq
);
194 cpumask_var_t cpumask
;
197 if (!desc
|| !alloc_cpumask_var(&cpumask
, GFP_KERNEL
))
200 raw_spin_lock_irqsave(&desc
->lock
, flags
);
201 if (irq_move_pending(desc
))
202 irq_get_pending(cpumask
, desc
);
204 cpumask_copy(cpumask
, desc
->irq_data
.affinity
);
205 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
207 notify
->notify(notify
, cpumask
);
209 free_cpumask_var(cpumask
);
211 kref_put(¬ify
->kref
, notify
->release
);
215 * irq_set_affinity_notifier - control notification of IRQ affinity changes
216 * @irq: Interrupt for which to enable/disable notification
217 * @notify: Context for notification, or %NULL to disable
218 * notification. Function pointers must be initialised;
219 * the other fields will be initialised by this function.
221 * Must be called in process context. Notification may only be enabled
222 * after the IRQ is allocated and must be disabled before the IRQ is
223 * freed using free_irq().
226 irq_set_affinity_notifier(unsigned int irq
, struct irq_affinity_notify
*notify
)
228 struct irq_desc
*desc
= irq_to_desc(irq
);
229 struct irq_affinity_notify
*old_notify
;
232 /* The release function is promised process context */
238 /* Complete initialisation of *notify */
241 kref_init(¬ify
->kref
);
242 INIT_WORK(¬ify
->work
, irq_affinity_notify
);
245 raw_spin_lock_irqsave(&desc
->lock
, flags
);
246 old_notify
= desc
->affinity_notify
;
247 desc
->affinity_notify
= notify
;
248 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
251 kref_put(&old_notify
->kref
, old_notify
->release
);
255 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier
);
257 #ifndef CONFIG_AUTO_IRQ_AFFINITY
259 * Generic version of the affinity autoselector.
262 setup_affinity(unsigned int irq
, struct irq_desc
*desc
, struct cpumask
*mask
)
264 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
265 struct cpumask
*set
= irq_default_affinity
;
268 /* Excludes PER_CPU and NO_BALANCE interrupts */
269 if (!irq_can_set_affinity(irq
))
273 * Preserve an userspace affinity setup, but make sure that
274 * one of the targets is online.
276 if (irqd_has_set(&desc
->irq_data
, IRQD_AFFINITY_SET
)) {
277 if (cpumask_intersects(desc
->irq_data
.affinity
,
279 set
= desc
->irq_data
.affinity
;
281 irq_compat_clr_affinity(desc
);
282 irqd_clear(&desc
->irq_data
, IRQD_AFFINITY_SET
);
286 cpumask_and(mask
, cpu_online_mask
, set
);
287 ret
= chip
->irq_set_affinity(&desc
->irq_data
, mask
, false);
289 case IRQ_SET_MASK_OK
:
290 cpumask_copy(desc
->irq_data
.affinity
, mask
);
291 case IRQ_SET_MASK_OK_NOCOPY
:
292 irq_set_thread_affinity(desc
);
298 setup_affinity(unsigned int irq
, struct irq_desc
*d
, struct cpumask
*mask
)
300 return irq_select_affinity(irq
);
305 * Called when affinity is set via /proc/irq
307 int irq_select_affinity_usr(unsigned int irq
, struct cpumask
*mask
)
309 struct irq_desc
*desc
= irq_to_desc(irq
);
313 raw_spin_lock_irqsave(&desc
->lock
, flags
);
314 ret
= setup_affinity(irq
, desc
, mask
);
315 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
321 setup_affinity(unsigned int irq
, struct irq_desc
*desc
, struct cpumask
*mask
)
327 void __disable_irq(struct irq_desc
*desc
, unsigned int irq
, bool suspend
)
330 if (!desc
->action
|| (desc
->action
->flags
& IRQF_NO_SUSPEND
))
332 desc
->istate
|= IRQS_SUSPENDED
;
340 * disable_irq_nosync - disable an irq without waiting
341 * @irq: Interrupt to disable
343 * Disable the selected interrupt line. Disables and Enables are
345 * Unlike disable_irq(), this function does not ensure existing
346 * instances of the IRQ handler have completed before returning.
348 * This function may be called from IRQ context.
350 void disable_irq_nosync(unsigned int irq
)
352 struct irq_desc
*desc
= irq_to_desc(irq
);
359 raw_spin_lock_irqsave(&desc
->lock
, flags
);
360 __disable_irq(desc
, irq
, false);
361 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
362 chip_bus_sync_unlock(desc
);
364 EXPORT_SYMBOL(disable_irq_nosync
);
367 * disable_irq - disable an irq and wait for completion
368 * @irq: Interrupt to disable
370 * Disable the selected interrupt line. Enables and Disables are
372 * This function waits for any pending IRQ handlers for this interrupt
373 * to complete before returning. If you use this function while
374 * holding a resource the IRQ handler may need you will deadlock.
376 * This function may be called - with care - from IRQ context.
378 void disable_irq(unsigned int irq
)
380 struct irq_desc
*desc
= irq_to_desc(irq
);
385 disable_irq_nosync(irq
);
387 synchronize_irq(irq
);
389 EXPORT_SYMBOL(disable_irq
);
391 void __enable_irq(struct irq_desc
*desc
, unsigned int irq
, bool resume
)
394 if (!(desc
->istate
& IRQS_SUSPENDED
)) {
397 if (!(desc
->action
->flags
& IRQF_FORCE_RESUME
))
399 /* Pretend that it got disabled ! */
402 desc
->istate
&= ~IRQS_SUSPENDED
;
405 switch (desc
->depth
) {
408 WARN(1, KERN_WARNING
"Unbalanced enable for IRQ %d\n", irq
);
411 if (desc
->istate
& IRQS_SUSPENDED
)
413 /* Prevent probing on this irq: */
414 desc
->status
|= IRQ_NOPROBE
;
416 check_irq_resend(desc
, irq
);
425 * enable_irq - enable handling of an irq
426 * @irq: Interrupt to enable
428 * Undoes the effect of one call to disable_irq(). If this
429 * matches the last disable, processing of interrupts on this
430 * IRQ line is re-enabled.
432 * This function may be called from IRQ context only when
433 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
435 void enable_irq(unsigned int irq
)
437 struct irq_desc
*desc
= irq_to_desc(irq
);
443 if (WARN(!desc
->irq_data
.chip
,
444 KERN_ERR
"enable_irq before setup/request_irq: irq %u\n", irq
))
448 raw_spin_lock_irqsave(&desc
->lock
, flags
);
449 __enable_irq(desc
, irq
, false);
450 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
451 chip_bus_sync_unlock(desc
);
453 EXPORT_SYMBOL(enable_irq
);
455 static int set_irq_wake_real(unsigned int irq
, unsigned int on
)
457 struct irq_desc
*desc
= irq_to_desc(irq
);
460 if (desc
->irq_data
.chip
->irq_set_wake
)
461 ret
= desc
->irq_data
.chip
->irq_set_wake(&desc
->irq_data
, on
);
467 * irq_set_irq_wake - control irq power management wakeup
468 * @irq: interrupt to control
469 * @on: enable/disable power management wakeup
471 * Enable/disable power management wakeup mode, which is
472 * disabled by default. Enables and disables must match,
473 * just as they match for non-wakeup mode support.
475 * Wakeup mode lets this IRQ wake the system from sleep
476 * states like "suspend to RAM".
478 int irq_set_irq_wake(unsigned int irq
, unsigned int on
)
480 struct irq_desc
*desc
= irq_to_desc(irq
);
484 /* wakeup-capable irqs can be shared between drivers that
485 * don't need to have the same sleep mode behaviors.
488 raw_spin_lock_irqsave(&desc
->lock
, flags
);
490 if (desc
->wake_depth
++ == 0) {
491 ret
= set_irq_wake_real(irq
, on
);
493 desc
->wake_depth
= 0;
495 desc
->istate
|= IRQS_WAKEUP
;
498 if (desc
->wake_depth
== 0) {
499 WARN(1, "Unbalanced IRQ %d wake disable\n", irq
);
500 } else if (--desc
->wake_depth
== 0) {
501 ret
= set_irq_wake_real(irq
, on
);
503 desc
->wake_depth
= 1;
505 desc
->istate
&= ~IRQS_WAKEUP
;
509 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
510 chip_bus_sync_unlock(desc
);
513 EXPORT_SYMBOL(irq_set_irq_wake
);
516 * Internal function that tells the architecture code whether a
517 * particular irq has been exclusively allocated or is available
520 int can_request_irq(unsigned int irq
, unsigned long irqflags
)
522 struct irq_desc
*desc
= irq_to_desc(irq
);
523 struct irqaction
*action
;
529 if (desc
->status
& IRQ_NOREQUEST
)
532 raw_spin_lock_irqsave(&desc
->lock
, flags
);
533 action
= desc
->action
;
535 if (irqflags
& action
->flags
& IRQF_SHARED
)
538 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
543 void compat_irq_chip_set_default_handler(struct irq_desc
*desc
)
546 * If the architecture still has not overriden
547 * the flow handler then zap the default. This
548 * should catch incorrect flow-type setting.
550 if (desc
->handle_irq
== &handle_bad_irq
)
551 desc
->handle_irq
= NULL
;
554 int __irq_set_trigger(struct irq_desc
*desc
, unsigned int irq
,
558 struct irq_chip
*chip
= desc
->irq_data
.chip
;
560 if (!chip
|| !chip
->irq_set_type
) {
562 * IRQF_TRIGGER_* but the PIC does not support multiple
565 pr_debug("No set_type function for IRQ %d (%s)\n", irq
,
566 chip
? (chip
->name
? : "unknown") : "unknown");
570 /* caller masked out all except trigger mode flags */
571 ret
= chip
->irq_set_type(&desc
->irq_data
, flags
);
574 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
575 flags
, irq
, chip
->irq_set_type
);
577 if (flags
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_LEVEL_HIGH
))
579 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
580 desc
->status
&= ~(IRQ_LEVEL
| IRQ_TYPE_SENSE_MASK
);
581 desc
->status
|= flags
;
583 if (chip
!= desc
->irq_data
.chip
)
584 irq_chip_set_defaults(desc
->irq_data
.chip
);
591 * Default primary interrupt handler for threaded interrupts. Is
592 * assigned as primary handler when request_threaded_irq is called
593 * with handler == NULL. Useful for oneshot interrupts.
595 static irqreturn_t
irq_default_primary_handler(int irq
, void *dev_id
)
597 return IRQ_WAKE_THREAD
;
601 * Primary handler for nested threaded interrupts. Should never be
604 static irqreturn_t
irq_nested_primary_handler(int irq
, void *dev_id
)
606 WARN(1, "Primary handler called for nested irq %d\n", irq
);
610 static int irq_wait_for_interrupt(struct irqaction
*action
)
612 while (!kthread_should_stop()) {
613 set_current_state(TASK_INTERRUPTIBLE
);
615 if (test_and_clear_bit(IRQTF_RUNTHREAD
,
616 &action
->thread_flags
)) {
617 __set_current_state(TASK_RUNNING
);
626 * Oneshot interrupts keep the irq line masked until the threaded
627 * handler finished. unmask if the interrupt has not been disabled and
630 static void irq_finalize_oneshot(unsigned int irq
, struct irq_desc
*desc
)
634 raw_spin_lock_irq(&desc
->lock
);
637 * Implausible though it may be we need to protect us against
638 * the following scenario:
640 * The thread is faster done than the hard interrupt handler
641 * on the other CPU. If we unmask the irq line then the
642 * interrupt can come in again and masks the line, leaves due
643 * to IRQS_INPROGRESS and the irq line is masked forever.
645 if (unlikely(desc
->istate
& IRQS_INPROGRESS
)) {
646 raw_spin_unlock_irq(&desc
->lock
);
647 chip_bus_sync_unlock(desc
);
652 if (!(desc
->istate
& IRQS_DISABLED
) && (desc
->istate
& IRQS_MASKED
)) {
653 irq_compat_clr_masked(desc
);
654 desc
->istate
&= ~IRQS_MASKED
;
655 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
657 raw_spin_unlock_irq(&desc
->lock
);
658 chip_bus_sync_unlock(desc
);
663 * Check whether we need to change the affinity of the interrupt thread.
666 irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
)
670 if (!test_and_clear_bit(IRQTF_AFFINITY
, &action
->thread_flags
))
674 * In case we are out of memory we set IRQTF_AFFINITY again and
675 * try again next time
677 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
678 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
682 raw_spin_lock_irq(&desc
->lock
);
683 cpumask_copy(mask
, desc
->irq_data
.affinity
);
684 raw_spin_unlock_irq(&desc
->lock
);
686 set_cpus_allowed_ptr(current
, mask
);
687 free_cpumask_var(mask
);
691 irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
) { }
695 * Interrupt handler thread
697 static int irq_thread(void *data
)
699 static const struct sched_param param
= {
700 .sched_priority
= MAX_USER_RT_PRIO
/2,
702 struct irqaction
*action
= data
;
703 struct irq_desc
*desc
= irq_to_desc(action
->irq
);
704 int wake
, oneshot
= desc
->istate
& IRQS_ONESHOT
;
706 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
707 current
->irqaction
= action
;
709 while (!irq_wait_for_interrupt(action
)) {
711 irq_thread_check_affinity(desc
, action
);
713 atomic_inc(&desc
->threads_active
);
715 raw_spin_lock_irq(&desc
->lock
);
716 if (unlikely(desc
->istate
& IRQS_DISABLED
)) {
718 * CHECKME: We might need a dedicated
719 * IRQ_THREAD_PENDING flag here, which
720 * retriggers the thread in check_irq_resend()
721 * but AFAICT IRQS_PENDING should be fine as it
722 * retriggers the interrupt itself --- tglx
724 irq_compat_set_pending(desc
);
725 desc
->istate
|= IRQS_PENDING
;
726 raw_spin_unlock_irq(&desc
->lock
);
728 raw_spin_unlock_irq(&desc
->lock
);
730 action
->thread_fn(action
->irq
, action
->dev_id
);
733 irq_finalize_oneshot(action
->irq
, desc
);
736 wake
= atomic_dec_and_test(&desc
->threads_active
);
738 if (wake
&& waitqueue_active(&desc
->wait_for_threads
))
739 wake_up(&desc
->wait_for_threads
);
743 * Clear irqaction. Otherwise exit_irq_thread() would make
744 * fuzz about an active irq thread going into nirvana.
746 current
->irqaction
= NULL
;
751 * Called from do_exit()
753 void exit_irq_thread(void)
755 struct task_struct
*tsk
= current
;
761 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
762 tsk
->comm
? tsk
->comm
: "", tsk
->pid
, tsk
->irqaction
->irq
);
765 * Set the THREAD DIED flag to prevent further wakeups of the
766 * soon to be gone threaded handler.
768 set_bit(IRQTF_DIED
, &tsk
->irqaction
->flags
);
772 * Internal function to register an irqaction - typically used to
773 * allocate special interrupts that are part of the architecture.
776 __setup_irq(unsigned int irq
, struct irq_desc
*desc
, struct irqaction
*new)
778 struct irqaction
*old
, **old_ptr
;
779 const char *old_name
= NULL
;
781 int ret
, nested
, shared
= 0;
787 if (desc
->irq_data
.chip
== &no_irq_chip
)
790 * Some drivers like serial.c use request_irq() heavily,
791 * so we have to be careful not to interfere with a
794 if (new->flags
& IRQF_SAMPLE_RANDOM
) {
796 * This function might sleep, we want to call it first,
797 * outside of the atomic block.
798 * Yes, this might clear the entropy pool if the wrong
799 * driver is attempted to be loaded, without actually
800 * installing a new handler, but is this really a problem,
801 * only the sysadmin is able to do this.
803 rand_initialize_irq(irq
);
806 /* Oneshot interrupts are not allowed with shared */
807 if ((new->flags
& IRQF_ONESHOT
) && (new->flags
& IRQF_SHARED
))
811 * Check whether the interrupt nests into another interrupt
814 nested
= desc
->status
& IRQ_NESTED_THREAD
;
819 * Replace the primary handler which was provided from
820 * the driver for non nested interrupt handling by the
821 * dummy function which warns when called.
823 new->handler
= irq_nested_primary_handler
;
827 * Create a handler thread when a thread function is supplied
828 * and the interrupt does not nest into another interrupt
831 if (new->thread_fn
&& !nested
) {
832 struct task_struct
*t
;
834 t
= kthread_create(irq_thread
, new, "irq/%d-%s", irq
,
839 * We keep the reference to the task struct even if
840 * the thread dies to avoid that the interrupt code
841 * references an already freed task_struct.
847 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
853 * The following block of code has to be executed atomically
855 raw_spin_lock_irqsave(&desc
->lock
, flags
);
856 old_ptr
= &desc
->action
;
860 * Can't share interrupts unless both agree to and are
861 * the same type (level, edge, polarity). So both flag
862 * fields must have IRQF_SHARED set and the bits which
863 * set the trigger type must match.
865 if (!((old
->flags
& new->flags
) & IRQF_SHARED
) ||
866 ((old
->flags
^ new->flags
) & IRQF_TRIGGER_MASK
)) {
867 old_name
= old
->name
;
871 /* All handlers must agree on per-cpuness */
872 if ((old
->flags
& IRQF_PERCPU
) !=
873 (new->flags
& IRQF_PERCPU
))
876 /* add new interrupt at end of irq queue */
878 old_ptr
= &old
->next
;
885 irq_chip_set_defaults(desc
->irq_data
.chip
);
887 init_waitqueue_head(&desc
->wait_for_threads
);
889 /* Setup the type (level, edge polarity) if configured: */
890 if (new->flags
& IRQF_TRIGGER_MASK
) {
891 ret
= __irq_set_trigger(desc
, irq
,
892 new->flags
& IRQF_TRIGGER_MASK
);
897 compat_irq_chip_set_default_handler(desc
);
899 desc
->istate
&= ~(IRQS_AUTODETECT
| IRQS_SPURIOUS_DISABLED
| \
900 IRQS_INPROGRESS
| IRQS_ONESHOT
| \
903 if (new->flags
& IRQF_PERCPU
) {
904 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
905 irq_settings_set_per_cpu(desc
);
908 if (new->flags
& IRQF_ONESHOT
)
909 desc
->istate
|= IRQS_ONESHOT
;
911 if (!(desc
->status
& IRQ_NOAUTOEN
))
914 /* Undo nested disables: */
917 /* Exclude IRQ from balancing if requested */
918 if (new->flags
& IRQF_NOBALANCING
) {
919 irq_settings_set_no_balancing(desc
);
920 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
923 /* Set default affinity mask once everything is setup */
924 setup_affinity(irq
, desc
, mask
);
926 } else if ((new->flags
& IRQF_TRIGGER_MASK
)
927 && (new->flags
& IRQF_TRIGGER_MASK
)
928 != (desc
->status
& IRQ_TYPE_SENSE_MASK
)) {
929 /* hope the handler works with the actual trigger mode... */
930 pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
931 irq
, (int)(desc
->status
& IRQ_TYPE_SENSE_MASK
),
932 (int)(new->flags
& IRQF_TRIGGER_MASK
));
938 /* Reset broken irq detection when installing new handler */
940 desc
->irqs_unhandled
= 0;
943 * Check whether we disabled the irq via the spurious handler
944 * before. Reenable it and give it another chance.
946 if (shared
&& (desc
->istate
& IRQS_SPURIOUS_DISABLED
)) {
947 desc
->istate
&= ~IRQS_SPURIOUS_DISABLED
;
948 __enable_irq(desc
, irq
, false);
951 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
954 * Strictly no need to wake it up, but hung_task complains
955 * when no hard interrupt wakes the thread up.
958 wake_up_process(new->thread
);
960 register_irq_proc(irq
, desc
);
962 register_handler_proc(irq
, new);
967 #ifdef CONFIG_DEBUG_SHIRQ
968 if (!(new->flags
& IRQF_PROBE_SHARED
)) {
969 printk(KERN_ERR
"IRQ handler type mismatch for IRQ %d\n", irq
);
971 printk(KERN_ERR
"current handler: %s\n", old_name
);
978 free_cpumask_var(mask
);
981 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
983 struct task_struct
*t
= new->thread
;
986 if (likely(!test_bit(IRQTF_DIED
, &new->thread_flags
)))
994 * setup_irq - setup an interrupt
995 * @irq: Interrupt line to setup
996 * @act: irqaction for the interrupt
998 * Used to statically setup interrupts in the early boot process.
1000 int setup_irq(unsigned int irq
, struct irqaction
*act
)
1003 struct irq_desc
*desc
= irq_to_desc(irq
);
1005 chip_bus_lock(desc
);
1006 retval
= __setup_irq(irq
, desc
, act
);
1007 chip_bus_sync_unlock(desc
);
1011 EXPORT_SYMBOL_GPL(setup_irq
);
1014 * Internal function to unregister an irqaction - used to free
1015 * regular and special interrupts that are part of the architecture.
1017 static struct irqaction
*__free_irq(unsigned int irq
, void *dev_id
)
1019 struct irq_desc
*desc
= irq_to_desc(irq
);
1020 struct irqaction
*action
, **action_ptr
;
1021 unsigned long flags
;
1023 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq
);
1028 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1031 * There can be multiple actions per IRQ descriptor, find the right
1032 * one based on the dev_id:
1034 action_ptr
= &desc
->action
;
1036 action
= *action_ptr
;
1039 WARN(1, "Trying to free already-free IRQ %d\n", irq
);
1040 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1045 if (action
->dev_id
== dev_id
)
1047 action_ptr
= &action
->next
;
1050 /* Found it - now remove it from the list of entries: */
1051 *action_ptr
= action
->next
;
1053 /* Currently used only by UML, might disappear one day: */
1054 #ifdef CONFIG_IRQ_RELEASE_METHOD
1055 if (desc
->irq_data
.chip
->release
)
1056 desc
->irq_data
.chip
->release(irq
, dev_id
);
1059 /* If this was the last handler, shut down the IRQ line: */
1064 /* make sure affinity_hint is cleaned up */
1065 if (WARN_ON_ONCE(desc
->affinity_hint
))
1066 desc
->affinity_hint
= NULL
;
1069 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1071 unregister_handler_proc(irq
, action
);
1073 /* Make sure it's not being used on another CPU: */
1074 synchronize_irq(irq
);
1076 #ifdef CONFIG_DEBUG_SHIRQ
1078 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1079 * event to happen even now it's being freed, so let's make sure that
1080 * is so by doing an extra call to the handler ....
1082 * ( We do this after actually deregistering it, to make sure that a
1083 * 'real' IRQ doesn't run in * parallel with our fake. )
1085 if (action
->flags
& IRQF_SHARED
) {
1086 local_irq_save(flags
);
1087 action
->handler(irq
, dev_id
);
1088 local_irq_restore(flags
);
1092 if (action
->thread
) {
1093 if (!test_bit(IRQTF_DIED
, &action
->thread_flags
))
1094 kthread_stop(action
->thread
);
1095 put_task_struct(action
->thread
);
1102 * remove_irq - free an interrupt
1103 * @irq: Interrupt line to free
1104 * @act: irqaction for the interrupt
1106 * Used to remove interrupts statically setup by the early boot process.
1108 void remove_irq(unsigned int irq
, struct irqaction
*act
)
1110 __free_irq(irq
, act
->dev_id
);
1112 EXPORT_SYMBOL_GPL(remove_irq
);
1115 * free_irq - free an interrupt allocated with request_irq
1116 * @irq: Interrupt line to free
1117 * @dev_id: Device identity to free
1119 * Remove an interrupt handler. The handler is removed and if the
1120 * interrupt line is no longer in use by any driver it is disabled.
1121 * On a shared IRQ the caller must ensure the interrupt is disabled
1122 * on the card it drives before calling this function. The function
1123 * does not return until any executing interrupts for this IRQ
1126 * This function must not be called from interrupt context.
1128 void free_irq(unsigned int irq
, void *dev_id
)
1130 struct irq_desc
*desc
= irq_to_desc(irq
);
1136 if (WARN_ON(desc
->affinity_notify
))
1137 desc
->affinity_notify
= NULL
;
1140 chip_bus_lock(desc
);
1141 kfree(__free_irq(irq
, dev_id
));
1142 chip_bus_sync_unlock(desc
);
1144 EXPORT_SYMBOL(free_irq
);
1147 * request_threaded_irq - allocate an interrupt line
1148 * @irq: Interrupt line to allocate
1149 * @handler: Function to be called when the IRQ occurs.
1150 * Primary handler for threaded interrupts
1151 * If NULL and thread_fn != NULL the default
1152 * primary handler is installed
1153 * @thread_fn: Function called from the irq handler thread
1154 * If NULL, no irq thread is created
1155 * @irqflags: Interrupt type flags
1156 * @devname: An ascii name for the claiming device
1157 * @dev_id: A cookie passed back to the handler function
1159 * This call allocates interrupt resources and enables the
1160 * interrupt line and IRQ handling. From the point this
1161 * call is made your handler function may be invoked. Since
1162 * your handler function must clear any interrupt the board
1163 * raises, you must take care both to initialise your hardware
1164 * and to set up the interrupt handler in the right order.
1166 * If you want to set up a threaded irq handler for your device
1167 * then you need to supply @handler and @thread_fn. @handler ist
1168 * still called in hard interrupt context and has to check
1169 * whether the interrupt originates from the device. If yes it
1170 * needs to disable the interrupt on the device and return
1171 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1172 * @thread_fn. This split handler design is necessary to support
1173 * shared interrupts.
1175 * Dev_id must be globally unique. Normally the address of the
1176 * device data structure is used as the cookie. Since the handler
1177 * receives this value it makes sense to use it.
1179 * If your interrupt is shared you must pass a non NULL dev_id
1180 * as this is required when freeing the interrupt.
1184 * IRQF_SHARED Interrupt is shared
1185 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1186 * IRQF_TRIGGER_* Specify active edge(s) or level
1189 int request_threaded_irq(unsigned int irq
, irq_handler_t handler
,
1190 irq_handler_t thread_fn
, unsigned long irqflags
,
1191 const char *devname
, void *dev_id
)
1193 struct irqaction
*action
;
1194 struct irq_desc
*desc
;
1198 * Sanity-check: shared interrupts must pass in a real dev-ID,
1199 * otherwise we'll have trouble later trying to figure out
1200 * which interrupt is which (messes up the interrupt freeing
1203 if ((irqflags
& IRQF_SHARED
) && !dev_id
)
1206 desc
= irq_to_desc(irq
);
1210 if (desc
->status
& IRQ_NOREQUEST
)
1216 handler
= irq_default_primary_handler
;
1219 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
1223 action
->handler
= handler
;
1224 action
->thread_fn
= thread_fn
;
1225 action
->flags
= irqflags
;
1226 action
->name
= devname
;
1227 action
->dev_id
= dev_id
;
1229 chip_bus_lock(desc
);
1230 retval
= __setup_irq(irq
, desc
, action
);
1231 chip_bus_sync_unlock(desc
);
1236 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1237 if (!retval
&& (irqflags
& IRQF_SHARED
)) {
1239 * It's a shared IRQ -- the driver ought to be prepared for it
1240 * to happen immediately, so let's make sure....
1241 * We disable the irq to make sure that a 'real' IRQ doesn't
1242 * run in parallel with our fake.
1244 unsigned long flags
;
1247 local_irq_save(flags
);
1249 handler(irq
, dev_id
);
1251 local_irq_restore(flags
);
1257 EXPORT_SYMBOL(request_threaded_irq
);
1260 * request_any_context_irq - allocate an interrupt line
1261 * @irq: Interrupt line to allocate
1262 * @handler: Function to be called when the IRQ occurs.
1263 * Threaded handler for threaded interrupts.
1264 * @flags: Interrupt type flags
1265 * @name: An ascii name for the claiming device
1266 * @dev_id: A cookie passed back to the handler function
1268 * This call allocates interrupt resources and enables the
1269 * interrupt line and IRQ handling. It selects either a
1270 * hardirq or threaded handling method depending on the
1273 * On failure, it returns a negative value. On success,
1274 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1276 int request_any_context_irq(unsigned int irq
, irq_handler_t handler
,
1277 unsigned long flags
, const char *name
, void *dev_id
)
1279 struct irq_desc
*desc
= irq_to_desc(irq
);
1285 if (desc
->status
& IRQ_NESTED_THREAD
) {
1286 ret
= request_threaded_irq(irq
, NULL
, handler
,
1287 flags
, name
, dev_id
);
1288 return !ret
? IRQC_IS_NESTED
: ret
;
1291 ret
= request_irq(irq
, handler
, flags
, name
, dev_id
);
1292 return !ret
? IRQC_IS_HARDIRQ
: ret
;
1294 EXPORT_SYMBOL_GPL(request_any_context_irq
);