Merge branch 'master' into upstream
[deliverable/linux.git] / kernel / irq / manage.c
1 /*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9 #include <linux/config.h>
10 #include <linux/irq.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/interrupt.h>
14
15 #include "internals.h"
16
17 #ifdef CONFIG_SMP
18
19 cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
20
21 #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
22 cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
23 #endif
24
25 /**
26 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
27 * @irq: interrupt number to wait for
28 *
29 * This function waits for any pending IRQ handlers for this interrupt
30 * to complete before returning. If you use this function while
31 * holding a resource the IRQ handler may need you will deadlock.
32 *
33 * This function may be called - with care - from IRQ context.
34 */
35 void synchronize_irq(unsigned int irq)
36 {
37 struct irq_desc *desc = irq_desc + irq;
38
39 if (irq >= NR_IRQS)
40 return;
41
42 while (desc->status & IRQ_INPROGRESS)
43 cpu_relax();
44 }
45
46 EXPORT_SYMBOL(synchronize_irq);
47
48 #endif
49
50 /**
51 * disable_irq_nosync - disable an irq without waiting
52 * @irq: Interrupt to disable
53 *
54 * Disable the selected interrupt line. Disables and Enables are
55 * nested.
56 * Unlike disable_irq(), this function does not ensure existing
57 * instances of the IRQ handler have completed before returning.
58 *
59 * This function may be called from IRQ context.
60 */
61 void disable_irq_nosync(unsigned int irq)
62 {
63 irq_desc_t *desc = irq_desc + irq;
64 unsigned long flags;
65
66 if (irq >= NR_IRQS)
67 return;
68
69 spin_lock_irqsave(&desc->lock, flags);
70 if (!desc->depth++) {
71 desc->status |= IRQ_DISABLED;
72 desc->handler->disable(irq);
73 }
74 spin_unlock_irqrestore(&desc->lock, flags);
75 }
76
77 EXPORT_SYMBOL(disable_irq_nosync);
78
79 /**
80 * disable_irq - disable an irq and wait for completion
81 * @irq: Interrupt to disable
82 *
83 * Disable the selected interrupt line. Enables and Disables are
84 * nested.
85 * This function waits for any pending IRQ handlers for this interrupt
86 * to complete before returning. If you use this function while
87 * holding a resource the IRQ handler may need you will deadlock.
88 *
89 * This function may be called - with care - from IRQ context.
90 */
91 void disable_irq(unsigned int irq)
92 {
93 irq_desc_t *desc = irq_desc + irq;
94
95 if (irq >= NR_IRQS)
96 return;
97
98 disable_irq_nosync(irq);
99 if (desc->action)
100 synchronize_irq(irq);
101 }
102
103 EXPORT_SYMBOL(disable_irq);
104
105 /**
106 * enable_irq - enable handling of an irq
107 * @irq: Interrupt to enable
108 *
109 * Undoes the effect of one call to disable_irq(). If this
110 * matches the last disable, processing of interrupts on this
111 * IRQ line is re-enabled.
112 *
113 * This function may be called from IRQ context.
114 */
115 void enable_irq(unsigned int irq)
116 {
117 irq_desc_t *desc = irq_desc + irq;
118 unsigned long flags;
119
120 if (irq >= NR_IRQS)
121 return;
122
123 spin_lock_irqsave(&desc->lock, flags);
124 switch (desc->depth) {
125 case 0:
126 WARN_ON(1);
127 break;
128 case 1: {
129 unsigned int status = desc->status & ~IRQ_DISABLED;
130
131 desc->status = status;
132 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
133 desc->status = status | IRQ_REPLAY;
134 hw_resend_irq(desc->handler,irq);
135 }
136 desc->handler->enable(irq);
137 /* fall-through */
138 }
139 default:
140 desc->depth--;
141 }
142 spin_unlock_irqrestore(&desc->lock, flags);
143 }
144
145 EXPORT_SYMBOL(enable_irq);
146
147 /*
148 * Internal function that tells the architecture code whether a
149 * particular irq has been exclusively allocated or is available
150 * for driver use.
151 */
152 int can_request_irq(unsigned int irq, unsigned long irqflags)
153 {
154 struct irqaction *action;
155
156 if (irq >= NR_IRQS)
157 return 0;
158
159 action = irq_desc[irq].action;
160 if (action)
161 if (irqflags & action->flags & SA_SHIRQ)
162 action = NULL;
163
164 return !action;
165 }
166
167 /*
168 * Internal function to register an irqaction - typically used to
169 * allocate special interrupts that are part of the architecture.
170 */
171 int setup_irq(unsigned int irq, struct irqaction * new)
172 {
173 struct irq_desc *desc = irq_desc + irq;
174 struct irqaction *old, **p;
175 unsigned long flags;
176 int shared = 0;
177
178 if (irq >= NR_IRQS)
179 return -EINVAL;
180
181 if (desc->handler == &no_irq_type)
182 return -ENOSYS;
183 /*
184 * Some drivers like serial.c use request_irq() heavily,
185 * so we have to be careful not to interfere with a
186 * running system.
187 */
188 if (new->flags & SA_SAMPLE_RANDOM) {
189 /*
190 * This function might sleep, we want to call it first,
191 * outside of the atomic block.
192 * Yes, this might clear the entropy pool if the wrong
193 * driver is attempted to be loaded, without actually
194 * installing a new handler, but is this really a problem,
195 * only the sysadmin is able to do this.
196 */
197 rand_initialize_irq(irq);
198 }
199
200 /*
201 * The following block of code has to be executed atomically
202 */
203 spin_lock_irqsave(&desc->lock,flags);
204 p = &desc->action;
205 if ((old = *p) != NULL) {
206 /* Can't share interrupts unless both agree to */
207 if (!(old->flags & new->flags & SA_SHIRQ))
208 goto mismatch;
209
210 #if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
211 /* All handlers must agree on per-cpuness */
212 if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU))
213 goto mismatch;
214 #endif
215
216 /* add new interrupt at end of irq queue */
217 do {
218 p = &old->next;
219 old = *p;
220 } while (old);
221 shared = 1;
222 }
223
224 *p = new;
225 #if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ)
226 if (new->flags & SA_PERCPU_IRQ)
227 desc->status |= IRQ_PER_CPU;
228 #endif
229 if (!shared) {
230 desc->depth = 0;
231 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT |
232 IRQ_WAITING | IRQ_INPROGRESS);
233 if (desc->handler->startup)
234 desc->handler->startup(irq);
235 else
236 desc->handler->enable(irq);
237 }
238 spin_unlock_irqrestore(&desc->lock,flags);
239
240 new->irq = irq;
241 register_irq_proc(irq);
242 new->dir = NULL;
243 register_handler_proc(irq, new);
244
245 return 0;
246
247 mismatch:
248 spin_unlock_irqrestore(&desc->lock, flags);
249 if (!(new->flags & SA_PROBEIRQ)) {
250 printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__);
251 dump_stack();
252 }
253 return -EBUSY;
254 }
255
256 /**
257 * free_irq - free an interrupt
258 * @irq: Interrupt line to free
259 * @dev_id: Device identity to free
260 *
261 * Remove an interrupt handler. The handler is removed and if the
262 * interrupt line is no longer in use by any driver it is disabled.
263 * On a shared IRQ the caller must ensure the interrupt is disabled
264 * on the card it drives before calling this function. The function
265 * does not return until any executing interrupts for this IRQ
266 * have completed.
267 *
268 * This function must not be called from interrupt context.
269 */
270 void free_irq(unsigned int irq, void *dev_id)
271 {
272 struct irq_desc *desc;
273 struct irqaction **p;
274 unsigned long flags;
275
276 WARN_ON(in_interrupt());
277 if (irq >= NR_IRQS)
278 return;
279
280 desc = irq_desc + irq;
281 spin_lock_irqsave(&desc->lock,flags);
282 p = &desc->action;
283 for (;;) {
284 struct irqaction * action = *p;
285
286 if (action) {
287 struct irqaction **pp = p;
288
289 p = &action->next;
290 if (action->dev_id != dev_id)
291 continue;
292
293 /* Found it - now remove it from the list of entries */
294 *pp = action->next;
295
296 /* Currently used only by UML, might disappear one day.*/
297 #ifdef CONFIG_IRQ_RELEASE_METHOD
298 if (desc->handler->release)
299 desc->handler->release(irq, dev_id);
300 #endif
301
302 if (!desc->action) {
303 desc->status |= IRQ_DISABLED;
304 if (desc->handler->shutdown)
305 desc->handler->shutdown(irq);
306 else
307 desc->handler->disable(irq);
308 }
309 spin_unlock_irqrestore(&desc->lock,flags);
310 unregister_handler_proc(irq, action);
311
312 /* Make sure it's not being used on another CPU */
313 synchronize_irq(irq);
314 kfree(action);
315 return;
316 }
317 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
318 spin_unlock_irqrestore(&desc->lock,flags);
319 return;
320 }
321 }
322
323 EXPORT_SYMBOL(free_irq);
324
325 /**
326 * request_irq - allocate an interrupt line
327 * @irq: Interrupt line to allocate
328 * @handler: Function to be called when the IRQ occurs
329 * @irqflags: Interrupt type flags
330 * @devname: An ascii name for the claiming device
331 * @dev_id: A cookie passed back to the handler function
332 *
333 * This call allocates interrupt resources and enables the
334 * interrupt line and IRQ handling. From the point this
335 * call is made your handler function may be invoked. Since
336 * your handler function must clear any interrupt the board
337 * raises, you must take care both to initialise your hardware
338 * and to set up the interrupt handler in the right order.
339 *
340 * Dev_id must be globally unique. Normally the address of the
341 * device data structure is used as the cookie. Since the handler
342 * receives this value it makes sense to use it.
343 *
344 * If your interrupt is shared you must pass a non NULL dev_id
345 * as this is required when freeing the interrupt.
346 *
347 * Flags:
348 *
349 * SA_SHIRQ Interrupt is shared
350 * SA_INTERRUPT Disable local interrupts while processing
351 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
352 *
353 */
354 int request_irq(unsigned int irq,
355 irqreturn_t (*handler)(int, void *, struct pt_regs *),
356 unsigned long irqflags, const char * devname, void *dev_id)
357 {
358 struct irqaction * action;
359 int retval;
360
361 /*
362 * Sanity-check: shared interrupts must pass in a real dev-ID,
363 * otherwise we'll have trouble later trying to figure out
364 * which interrupt is which (messes up the interrupt freeing
365 * logic etc).
366 */
367 if ((irqflags & SA_SHIRQ) && !dev_id)
368 return -EINVAL;
369 if (irq >= NR_IRQS)
370 return -EINVAL;
371 if (!handler)
372 return -EINVAL;
373
374 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
375 if (!action)
376 return -ENOMEM;
377
378 action->handler = handler;
379 action->flags = irqflags;
380 cpus_clear(action->mask);
381 action->name = devname;
382 action->next = NULL;
383 action->dev_id = dev_id;
384
385 select_smp_affinity(irq);
386
387 retval = setup_irq(irq, action);
388 if (retval)
389 kfree(action);
390
391 return retval;
392 }
393
394 EXPORT_SYMBOL(request_irq);
395
This page took 0.038941 seconds and 6 git commands to generate.