[PATCH] genirq: cleanup: remove fastcall
[deliverable/linux.git] / kernel / irq / handle.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/irq/handle.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the core interrupt handling code.
7 */
8
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/random.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14
15#include "internals.h"
16
17/*
18 * Linux has a controller-independent interrupt architecture.
19 * Every controller has a 'controller-template', that is used
20 * by the main code to do the right thing. Each driver-visible
21 * interrupt source is transparently wired to the apropriate
22 * controller. Thus drivers need not be aware of the
23 * interrupt-controller.
24 *
25 * The code is designed to be easily extended with new/different
26 * interrupt controllers, without having to do assembly magic or
27 * having to touch the generic code.
28 *
29 * Controller mappings for all interrupt sources:
30 */
31irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
32 [0 ... NR_IRQS-1] = {
4f167fb4 33 .status = IRQ_DISABLED,
d1bef4ed 34 .chip = &no_irq_type,
a53da52f
IM
35 .lock = SPIN_LOCK_UNLOCKED,
36#ifdef CONFIG_SMP
37 .affinity = CPU_MASK_ALL
38#endif
1da177e4
LT
39 }
40};
41
42/*
43 * Generic 'no controller' code
44 */
45static void end_none(unsigned int irq) { }
46static void enable_none(unsigned int irq) { }
47static void disable_none(unsigned int irq) { }
48static void shutdown_none(unsigned int irq) { }
49static unsigned int startup_none(unsigned int irq) { return 0; }
50
51static void ack_none(unsigned int irq)
52{
53 /*
54 * 'what should we do if we get a hw irq event on an illegal vector'.
55 * each architecture has to answer this themself.
56 */
57 ack_bad_irq(irq);
58}
59
60struct hw_interrupt_type no_irq_type = {
61 .typename = "none",
62 .startup = startup_none,
63 .shutdown = shutdown_none,
64 .enable = enable_none,
65 .disable = disable_none,
66 .ack = ack_none,
67 .end = end_none,
68 .set_affinity = NULL
69};
70
71/*
72 * Special, empty irq handler:
73 */
74irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
75{
76 return IRQ_NONE;
77}
78
79/*
80 * Have got an event to handle:
81 */
2e60bbb6
IM
82irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
83 struct irqaction *action)
1da177e4 84{
908dcecd
JB
85 irqreturn_t ret, retval = IRQ_NONE;
86 unsigned int status = 0;
1da177e4
LT
87
88 if (!(action->flags & SA_INTERRUPT))
89 local_irq_enable();
90
91 do {
92 ret = action->handler(irq, action->dev_id, regs);
93 if (ret == IRQ_HANDLED)
94 status |= action->flags;
95 retval |= ret;
96 action = action->next;
97 } while (action);
98
99 if (status & SA_SAMPLE_RANDOM)
100 add_interrupt_randomness(irq);
101 local_irq_disable();
102
103 return retval;
104}
105
106/*
107 * do_IRQ handles all normal device IRQ's (the special
108 * SMP cross-CPU interrupts have their own specific
109 * handlers).
110 */
111fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
112{
113 irq_desc_t *desc = irq_desc + irq;
114 struct irqaction * action;
115 unsigned int status;
116
117 kstat_this_cpu.irqs[irq]++;
f26fdd59 118 if (CHECK_IRQ_PER_CPU(desc->status)) {
1da177e4
LT
119 irqreturn_t action_ret;
120
121 /*
122 * No locking required for CPU-local interrupts:
123 */
d1bef4ed
IM
124 if (desc->chip->ack)
125 desc->chip->ack(irq);
1da177e4 126 action_ret = handle_IRQ_event(irq, regs, desc->action);
d1bef4ed 127 desc->chip->end(irq);
1da177e4
LT
128 return 1;
129 }
130
131 spin_lock(&desc->lock);
d1bef4ed
IM
132 if (desc->chip->ack)
133 desc->chip->ack(irq);
1da177e4
LT
134 /*
135 * REPLAY is when Linux resends an IRQ that was dropped earlier
136 * WAITING is used by probe to mark irqs that are being tested
137 */
138 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
139 status |= IRQ_PENDING; /* we _want_ to handle it */
140
141 /*
142 * If the IRQ is disabled for whatever reason, we cannot
143 * use the action we have.
144 */
145 action = NULL;
146 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
147 action = desc->action;
148 status &= ~IRQ_PENDING; /* we commit to handling */
149 status |= IRQ_INPROGRESS; /* we are handling it */
150 }
151 desc->status = status;
152
153 /*
154 * If there is no IRQ handler or it was disabled, exit early.
155 * Since we set PENDING, if another processor is handling
156 * a different instance of this same irq, the other processor
157 * will take care of it.
158 */
159 if (unlikely(!action))
160 goto out;
161
162 /*
163 * Edge triggered interrupts need to remember
164 * pending events.
165 * This applies to any hw interrupts that allow a second
166 * instance of the same irq to arrive while we are in do_IRQ
167 * or in the handler. But the code here only handles the _second_
168 * instance of the irq, not the third or fourth. So it is mostly
169 * useful for irq hardware that does not mask cleanly in an
170 * SMP environment.
171 */
172 for (;;) {
173 irqreturn_t action_ret;
174
175 spin_unlock(&desc->lock);
176
177 action_ret = handle_IRQ_event(irq, regs, action);
178
179 spin_lock(&desc->lock);
180 if (!noirqdebug)
200803df 181 note_interrupt(irq, desc, action_ret, regs);
1da177e4
LT
182 if (likely(!(desc->status & IRQ_PENDING)))
183 break;
184 desc->status &= ~IRQ_PENDING;
185 }
186 desc->status &= ~IRQ_INPROGRESS;
187
188out:
189 /*
190 * The ->end() handler has to deal with interrupts which got
191 * disabled while the handler was running.
192 */
d1bef4ed 193 desc->chip->end(irq);
1da177e4
LT
194 spin_unlock(&desc->lock);
195
196 return 1;
197}
198
This page took 0.14612 seconds and 5 git commands to generate.