Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/handle.c | |
3 | * | |
a34db9b2 IM |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
1da177e4 LT |
6 | * |
7 | * This file contains the core interrupt handling code. | |
a34db9b2 IM |
8 | * |
9 | * Detailed information is available in Documentation/DocBook/genericirq | |
10 | * | |
1da177e4 LT |
11 | */ |
12 | ||
13 | #include <linux/irq.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/random.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
0b8f1efa YL |
18 | #include <linux/rculist.h> |
19 | #include <linux/hash.h> | |
1da177e4 LT |
20 | |
21 | #include "internals.h" | |
22 | ||
0b8f1efa YL |
23 | /* |
24 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | |
25 | */ | |
48a1b10a | 26 | struct lock_class_key irq_desc_lock_class; |
0b8f1efa | 27 | |
6a6de9ef TG |
28 | /** |
29 | * handle_bad_irq - handle spurious and unhandled irqs | |
43a1dd50 HK |
30 | * @irq: the interrupt number |
31 | * @desc: description of the interrupt | |
43a1dd50 HK |
32 | * |
33 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | |
6a6de9ef | 34 | */ |
d6c88a50 | 35 | void handle_bad_irq(unsigned int irq, struct irq_desc *desc) |
6a6de9ef | 36 | { |
43f77759 | 37 | print_irq_desc(irq, desc); |
d6c88a50 | 38 | kstat_incr_irqs_this_cpu(irq, desc); |
6a6de9ef TG |
39 | ack_bad_irq(irq); |
40 | } | |
41 | ||
1da177e4 LT |
42 | /* |
43 | * Linux has a controller-independent interrupt architecture. | |
44 | * Every controller has a 'controller-template', that is used | |
45 | * by the main code to do the right thing. Each driver-visible | |
06fcb0c6 | 46 | * interrupt source is transparently wired to the appropriate |
1da177e4 LT |
47 | * controller. Thus drivers need not be aware of the |
48 | * interrupt-controller. | |
49 | * | |
50 | * The code is designed to be easily extended with new/different | |
51 | * interrupt controllers, without having to do assembly magic or | |
52 | * having to touch the generic code. | |
53 | * | |
54 | * Controller mappings for all interrupt sources: | |
55 | */ | |
85c0f909 | 56 | int nr_irqs = NR_IRQS; |
fa42d10d | 57 | EXPORT_SYMBOL_GPL(nr_irqs); |
d60458b2 | 58 | |
0b8f1efa YL |
59 | #ifdef CONFIG_SPARSE_IRQ |
60 | static struct irq_desc irq_desc_init = { | |
61 | .irq = -1, | |
62 | .status = IRQ_DISABLED, | |
63 | .chip = &no_irq_chip, | |
64 | .handle_irq = handle_bad_irq, | |
65 | .depth = 1, | |
66 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | |
67 | #ifdef CONFIG_SMP | |
68 | .affinity = CPU_MASK_ALL | |
69 | #endif | |
70 | }; | |
71 | ||
48a1b10a | 72 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
0b8f1efa YL |
73 | { |
74 | unsigned long bytes; | |
75 | char *ptr; | |
76 | int node; | |
77 | ||
78 | /* Compute how many bytes we need per irq and allocate them */ | |
79 | bytes = nr * sizeof(unsigned int); | |
80 | ||
81 | node = cpu_to_node(cpu); | |
82 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | |
83 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | |
84 | ||
85 | if (ptr) | |
86 | desc->kstat_irqs = (unsigned int *)ptr; | |
87 | } | |
88 | ||
0b8f1efa YL |
89 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) |
90 | { | |
91 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | |
793f7b12 IM |
92 | |
93 | spin_lock_init(&desc->lock); | |
0b8f1efa YL |
94 | desc->irq = irq; |
95 | #ifdef CONFIG_SMP | |
96 | desc->cpu = cpu; | |
97 | #endif | |
98 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | |
99 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | |
100 | if (!desc->kstat_irqs) { | |
101 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | |
102 | BUG_ON(1); | |
103 | } | |
104 | arch_init_chip_data(desc, cpu); | |
105 | } | |
106 | ||
107 | /* | |
108 | * Protect the sparse_irqs: | |
109 | */ | |
48a1b10a | 110 | DEFINE_SPINLOCK(sparse_irq_lock); |
0b8f1efa YL |
111 | |
112 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | |
113 | ||
99d093d1 YL |
114 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
115 | [0 ... NR_IRQS_LEGACY-1] = { | |
0b8f1efa YL |
116 | .irq = -1, |
117 | .status = IRQ_DISABLED, | |
118 | .chip = &no_irq_chip, | |
119 | .handle_irq = handle_bad_irq, | |
120 | .depth = 1, | |
121 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | |
122 | #ifdef CONFIG_SMP | |
123 | .affinity = CPU_MASK_ALL | |
124 | #endif | |
125 | } | |
126 | }; | |
127 | ||
128 | /* FIXME: use bootmem alloc ...*/ | |
99d093d1 | 129 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; |
0b8f1efa | 130 | |
13a0c3c2 | 131 | int __init early_irq_init(void) |
0b8f1efa YL |
132 | { |
133 | struct irq_desc *desc; | |
134 | int legacy_count; | |
135 | int i; | |
136 | ||
137 | desc = irq_desc_legacy; | |
138 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | |
139 | ||
140 | for (i = 0; i < legacy_count; i++) { | |
141 | desc[i].irq = i; | |
142 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | |
fa6beb37 | 143 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
0b8f1efa YL |
144 | |
145 | irq_desc_ptrs[i] = desc + i; | |
146 | } | |
147 | ||
148 | for (i = legacy_count; i < NR_IRQS; i++) | |
149 | irq_desc_ptrs[i] = NULL; | |
150 | ||
13a0c3c2 | 151 | return arch_early_irq_init(); |
0b8f1efa YL |
152 | } |
153 | ||
154 | struct irq_desc *irq_to_desc(unsigned int irq) | |
155 | { | |
156 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | |
157 | } | |
158 | ||
159 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |
160 | { | |
161 | struct irq_desc *desc; | |
162 | unsigned long flags; | |
163 | int node; | |
164 | ||
165 | if (irq >= NR_IRQS) { | |
166 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | |
167 | irq, NR_IRQS); | |
168 | WARN_ON(1); | |
169 | return NULL; | |
170 | } | |
171 | ||
172 | desc = irq_desc_ptrs[irq]; | |
173 | if (desc) | |
174 | return desc; | |
175 | ||
176 | spin_lock_irqsave(&sparse_irq_lock, flags); | |
177 | ||
178 | /* We have to check it to avoid races with another CPU */ | |
179 | desc = irq_desc_ptrs[irq]; | |
180 | if (desc) | |
181 | goto out_unlock; | |
182 | ||
183 | node = cpu_to_node(cpu); | |
184 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | |
185 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | |
186 | irq, cpu, node); | |
187 | if (!desc) { | |
188 | printk(KERN_ERR "can not alloc irq_desc\n"); | |
189 | BUG_ON(1); | |
190 | } | |
191 | init_one_irq_desc(irq, desc, cpu); | |
192 | ||
193 | irq_desc_ptrs[irq] = desc; | |
194 | ||
195 | out_unlock: | |
196 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | |
197 | ||
198 | return desc; | |
199 | } | |
200 | ||
f9af0e70 | 201 | #else /* !CONFIG_SPARSE_IRQ */ |
0b8f1efa | 202 | |
e729aa16 | 203 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
1da177e4 | 204 | [0 ... NR_IRQS-1] = { |
4f167fb4 | 205 | .status = IRQ_DISABLED, |
f1c2662c | 206 | .chip = &no_irq_chip, |
7a55713a | 207 | .handle_irq = handle_bad_irq, |
94d39e1f | 208 | .depth = 1, |
aac3f2b6 | 209 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
a53da52f IM |
210 | #ifdef CONFIG_SMP |
211 | .affinity = CPU_MASK_ALL | |
212 | #endif | |
1da177e4 LT |
213 | } |
214 | }; | |
08678b08 | 215 | |
12026ea1 YL |
216 | int __init early_irq_init(void) |
217 | { | |
218 | struct irq_desc *desc; | |
219 | int count; | |
220 | int i; | |
221 | ||
222 | desc = irq_desc; | |
223 | count = ARRAY_SIZE(irq_desc); | |
224 | ||
225 | for (i = 0; i < count; i++) | |
226 | desc[i].irq = i; | |
227 | ||
228 | return arch_early_irq_init(); | |
229 | } | |
230 | ||
f9af0e70 KM |
231 | struct irq_desc *irq_to_desc(unsigned int irq) |
232 | { | |
233 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | |
234 | } | |
235 | ||
236 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |
237 | { | |
238 | return irq_to_desc(irq); | |
239 | } | |
240 | #endif /* !CONFIG_SPARSE_IRQ */ | |
0b8f1efa | 241 | |
1da177e4 | 242 | /* |
77a5afec IM |
243 | * What should we do if we get a hw irq event on an illegal vector? |
244 | * Each architecture has to answer this themself. | |
1da177e4 | 245 | */ |
77a5afec | 246 | static void ack_bad(unsigned int irq) |
1da177e4 | 247 | { |
d3c60047 | 248 | struct irq_desc *desc = irq_to_desc(irq); |
08678b08 | 249 | |
08678b08 | 250 | print_irq_desc(irq, desc); |
1da177e4 LT |
251 | ack_bad_irq(irq); |
252 | } | |
253 | ||
77a5afec IM |
254 | /* |
255 | * NOP functions | |
256 | */ | |
257 | static void noop(unsigned int irq) | |
258 | { | |
259 | } | |
260 | ||
261 | static unsigned int noop_ret(unsigned int irq) | |
262 | { | |
263 | return 0; | |
264 | } | |
265 | ||
266 | /* | |
267 | * Generic no controller implementation | |
268 | */ | |
f1c2662c IM |
269 | struct irq_chip no_irq_chip = { |
270 | .name = "none", | |
77a5afec IM |
271 | .startup = noop_ret, |
272 | .shutdown = noop, | |
273 | .enable = noop, | |
274 | .disable = noop, | |
275 | .ack = ack_bad, | |
276 | .end = noop, | |
1da177e4 LT |
277 | }; |
278 | ||
f8b5473f TG |
279 | /* |
280 | * Generic dummy implementation which can be used for | |
281 | * real dumb interrupt sources | |
282 | */ | |
283 | struct irq_chip dummy_irq_chip = { | |
284 | .name = "dummy", | |
285 | .startup = noop_ret, | |
286 | .shutdown = noop, | |
287 | .enable = noop, | |
288 | .disable = noop, | |
289 | .ack = noop, | |
290 | .mask = noop, | |
291 | .unmask = noop, | |
292 | .end = noop, | |
293 | }; | |
294 | ||
1da177e4 LT |
295 | /* |
296 | * Special, empty irq handler: | |
297 | */ | |
7d12e780 | 298 | irqreturn_t no_action(int cpl, void *dev_id) |
1da177e4 LT |
299 | { |
300 | return IRQ_NONE; | |
301 | } | |
302 | ||
8d28bc75 IM |
303 | /** |
304 | * handle_IRQ_event - irq action chain handler | |
305 | * @irq: the interrupt number | |
8d28bc75 IM |
306 | * @action: the interrupt action chain for this irq |
307 | * | |
308 | * Handles the action chain of an irq event | |
1da177e4 | 309 | */ |
7d12e780 | 310 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) |
1da177e4 | 311 | { |
908dcecd JB |
312 | irqreturn_t ret, retval = IRQ_NONE; |
313 | unsigned int status = 0; | |
1da177e4 | 314 | |
3cca53b0 | 315 | if (!(action->flags & IRQF_DISABLED)) |
366c7f55 | 316 | local_irq_enable_in_hardirq(); |
1da177e4 LT |
317 | |
318 | do { | |
7d12e780 | 319 | ret = action->handler(irq, action->dev_id); |
1da177e4 LT |
320 | if (ret == IRQ_HANDLED) |
321 | status |= action->flags; | |
322 | retval |= ret; | |
323 | action = action->next; | |
324 | } while (action); | |
325 | ||
3cca53b0 | 326 | if (status & IRQF_SAMPLE_RANDOM) |
1da177e4 LT |
327 | add_interrupt_randomness(irq); |
328 | local_irq_disable(); | |
329 | ||
330 | return retval; | |
331 | } | |
332 | ||
af8c65b5 | 333 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
8d28bc75 IM |
334 | /** |
335 | * __do_IRQ - original all in one highlevel IRQ handler | |
336 | * @irq: the interrupt number | |
8d28bc75 IM |
337 | * |
338 | * __do_IRQ handles all normal device IRQ's (the special | |
1da177e4 LT |
339 | * SMP cross-CPU interrupts have their own specific |
340 | * handlers). | |
8d28bc75 IM |
341 | * |
342 | * This is the original x86 implementation which is used for every | |
343 | * interrupt type. | |
1da177e4 | 344 | */ |
7ad5b3a5 | 345 | unsigned int __do_IRQ(unsigned int irq) |
1da177e4 | 346 | { |
08678b08 | 347 | struct irq_desc *desc = irq_to_desc(irq); |
06fcb0c6 | 348 | struct irqaction *action; |
1da177e4 LT |
349 | unsigned int status; |
350 | ||
d6c88a50 TG |
351 | kstat_incr_irqs_this_cpu(irq, desc); |
352 | ||
f26fdd59 | 353 | if (CHECK_IRQ_PER_CPU(desc->status)) { |
1da177e4 LT |
354 | irqreturn_t action_ret; |
355 | ||
356 | /* | |
357 | * No locking required for CPU-local interrupts: | |
358 | */ | |
48a1b10a | 359 | if (desc->chip->ack) { |
d1bef4ed | 360 | desc->chip->ack(irq); |
48a1b10a YL |
361 | /* get new one */ |
362 | desc = irq_remap_to_desc(irq, desc); | |
363 | } | |
c642b839 RA |
364 | if (likely(!(desc->status & IRQ_DISABLED))) { |
365 | action_ret = handle_IRQ_event(irq, desc->action); | |
366 | if (!noirqdebug) | |
367 | note_interrupt(irq, desc, action_ret); | |
368 | } | |
d1bef4ed | 369 | desc->chip->end(irq); |
1da177e4 LT |
370 | return 1; |
371 | } | |
372 | ||
373 | spin_lock(&desc->lock); | |
48a1b10a | 374 | if (desc->chip->ack) { |
d1bef4ed | 375 | desc->chip->ack(irq); |
48a1b10a YL |
376 | desc = irq_remap_to_desc(irq, desc); |
377 | } | |
1da177e4 LT |
378 | /* |
379 | * REPLAY is when Linux resends an IRQ that was dropped earlier | |
380 | * WAITING is used by probe to mark irqs that are being tested | |
381 | */ | |
382 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | |
383 | status |= IRQ_PENDING; /* we _want_ to handle it */ | |
384 | ||
385 | /* | |
386 | * If the IRQ is disabled for whatever reason, we cannot | |
387 | * use the action we have. | |
388 | */ | |
389 | action = NULL; | |
390 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | |
391 | action = desc->action; | |
392 | status &= ~IRQ_PENDING; /* we commit to handling */ | |
393 | status |= IRQ_INPROGRESS; /* we are handling it */ | |
394 | } | |
395 | desc->status = status; | |
396 | ||
397 | /* | |
398 | * If there is no IRQ handler or it was disabled, exit early. | |
399 | * Since we set PENDING, if another processor is handling | |
400 | * a different instance of this same irq, the other processor | |
401 | * will take care of it. | |
402 | */ | |
403 | if (unlikely(!action)) | |
404 | goto out; | |
405 | ||
406 | /* | |
407 | * Edge triggered interrupts need to remember | |
408 | * pending events. | |
409 | * This applies to any hw interrupts that allow a second | |
410 | * instance of the same irq to arrive while we are in do_IRQ | |
411 | * or in the handler. But the code here only handles the _second_ | |
412 | * instance of the irq, not the third or fourth. So it is mostly | |
413 | * useful for irq hardware that does not mask cleanly in an | |
414 | * SMP environment. | |
415 | */ | |
416 | for (;;) { | |
417 | irqreturn_t action_ret; | |
418 | ||
419 | spin_unlock(&desc->lock); | |
420 | ||
7d12e780 | 421 | action_ret = handle_IRQ_event(irq, action); |
1da177e4 | 422 | if (!noirqdebug) |
7d12e780 | 423 | note_interrupt(irq, desc, action_ret); |
b42172fc LT |
424 | |
425 | spin_lock(&desc->lock); | |
1da177e4 LT |
426 | if (likely(!(desc->status & IRQ_PENDING))) |
427 | break; | |
428 | desc->status &= ~IRQ_PENDING; | |
429 | } | |
430 | desc->status &= ~IRQ_INPROGRESS; | |
431 | ||
432 | out: | |
433 | /* | |
434 | * The ->end() handler has to deal with interrupts which got | |
435 | * disabled while the handler was running. | |
436 | */ | |
d1bef4ed | 437 | desc->chip->end(irq); |
1da177e4 LT |
438 | spin_unlock(&desc->lock); |
439 | ||
440 | return 1; | |
441 | } | |
af8c65b5 | 442 | #endif |
1da177e4 | 443 | |
243c7621 IM |
444 | void early_init_irq_lock_class(void) |
445 | { | |
10e58084 | 446 | struct irq_desc *desc; |
243c7621 IM |
447 | int i; |
448 | ||
0b8f1efa | 449 | for_each_irq_desc(i, desc) { |
10e58084 | 450 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
0b8f1efa | 451 | } |
0b8f1efa | 452 | } |
0b8f1efa YL |
453 | |
454 | #ifdef CONFIG_SPARSE_IRQ | |
455 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | |
456 | { | |
457 | struct irq_desc *desc = irq_to_desc(irq); | |
26ddd8d5 | 458 | return desc ? desc->kstat_irqs[cpu] : 0; |
243c7621 | 459 | } |
243c7621 | 460 | #endif |
0b8f1efa YL |
461 | EXPORT_SYMBOL(kstat_irqs_cpu); |
462 |