Commit | Line | Data |
---|---|---|
dd87eb3a TG |
1 | /* |
2 | * linux/kernel/irq/chip.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | |
6 | * | |
7 | * This file contains the core interrupt handling code, for irq-chip | |
8 | * based architectures. | |
9 | * | |
10 | * Detailed information is available in Documentation/DocBook/genericirq | |
11 | */ | |
12 | ||
13 | #include <linux/irq.h> | |
7fe3730d | 14 | #include <linux/msi.h> |
dd87eb3a TG |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
18 | ||
f069686e SR |
19 | #include <trace/events/irq.h> |
20 | ||
dd87eb3a TG |
21 | #include "internals.h" |
22 | ||
23 | /** | |
a0cd9ca2 | 24 | * irq_set_chip - set the irq chip for an irq |
dd87eb3a TG |
25 | * @irq: irq number |
26 | * @chip: pointer to irq chip description structure | |
27 | */ | |
a0cd9ca2 | 28 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
dd87eb3a | 29 | { |
dd87eb3a | 30 | unsigned long flags; |
31d9d9b6 | 31 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 32 | |
02725e74 | 33 | if (!desc) |
dd87eb3a | 34 | return -EINVAL; |
dd87eb3a TG |
35 | |
36 | if (!chip) | |
37 | chip = &no_irq_chip; | |
38 | ||
6b8ff312 | 39 | desc->irq_data.chip = chip; |
02725e74 | 40 | irq_put_desc_unlock(desc, flags); |
d72274e5 DD |
41 | /* |
42 | * For !CONFIG_SPARSE_IRQ make the irq show up in | |
43 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is | |
44 | * already marked, and this call is harmless. | |
45 | */ | |
46 | irq_reserve_irq(irq); | |
dd87eb3a TG |
47 | return 0; |
48 | } | |
a0cd9ca2 | 49 | EXPORT_SYMBOL(irq_set_chip); |
dd87eb3a TG |
50 | |
51 | /** | |
a0cd9ca2 | 52 | * irq_set_type - set the irq trigger type for an irq |
dd87eb3a | 53 | * @irq: irq number |
0c5d1eb7 | 54 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
dd87eb3a | 55 | */ |
a0cd9ca2 | 56 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
dd87eb3a | 57 | { |
dd87eb3a | 58 | unsigned long flags; |
31d9d9b6 | 59 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e74 | 60 | int ret = 0; |
dd87eb3a | 61 | |
02725e74 TG |
62 | if (!desc) |
63 | return -EINVAL; | |
dd87eb3a | 64 | |
f2b662da | 65 | type &= IRQ_TYPE_SENSE_MASK; |
a09b659c | 66 | ret = __irq_set_trigger(desc, irq, type); |
02725e74 | 67 | irq_put_desc_busunlock(desc, flags); |
dd87eb3a TG |
68 | return ret; |
69 | } | |
a0cd9ca2 | 70 | EXPORT_SYMBOL(irq_set_irq_type); |
dd87eb3a TG |
71 | |
72 | /** | |
a0cd9ca2 | 73 | * irq_set_handler_data - set irq handler data for an irq |
dd87eb3a TG |
74 | * @irq: Interrupt number |
75 | * @data: Pointer to interrupt specific data | |
76 | * | |
77 | * Set the hardware irq controller data for an irq | |
78 | */ | |
a0cd9ca2 | 79 | int irq_set_handler_data(unsigned int irq, void *data) |
dd87eb3a | 80 | { |
dd87eb3a | 81 | unsigned long flags; |
31d9d9b6 | 82 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 83 | |
02725e74 | 84 | if (!desc) |
dd87eb3a | 85 | return -EINVAL; |
6b8ff312 | 86 | desc->irq_data.handler_data = data; |
02725e74 | 87 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
88 | return 0; |
89 | } | |
a0cd9ca2 | 90 | EXPORT_SYMBOL(irq_set_handler_data); |
dd87eb3a | 91 | |
5b912c10 | 92 | /** |
51906e77 AG |
93 | * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset |
94 | * @irq_base: Interrupt number base | |
95 | * @irq_offset: Interrupt number offset | |
96 | * @entry: Pointer to MSI descriptor data | |
5b912c10 | 97 | * |
51906e77 | 98 | * Set the MSI descriptor entry for an irq at offset |
5b912c10 | 99 | */ |
51906e77 AG |
100 | int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
101 | struct msi_desc *entry) | |
5b912c10 | 102 | { |
5b912c10 | 103 | unsigned long flags; |
51906e77 | 104 | struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
5b912c10 | 105 | |
02725e74 | 106 | if (!desc) |
5b912c10 | 107 | return -EINVAL; |
6b8ff312 | 108 | desc->irq_data.msi_desc = entry; |
51906e77 AG |
109 | if (entry && !irq_offset) |
110 | entry->irq = irq_base; | |
02725e74 | 111 | irq_put_desc_unlock(desc, flags); |
5b912c10 EB |
112 | return 0; |
113 | } | |
114 | ||
51906e77 AG |
115 | /** |
116 | * irq_set_msi_desc - set MSI descriptor data for an irq | |
117 | * @irq: Interrupt number | |
118 | * @entry: Pointer to MSI descriptor data | |
119 | * | |
120 | * Set the MSI descriptor entry for an irq | |
121 | */ | |
122 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) | |
123 | { | |
124 | return irq_set_msi_desc_off(irq, 0, entry); | |
125 | } | |
126 | ||
dd87eb3a | 127 | /** |
a0cd9ca2 | 128 | * irq_set_chip_data - set irq chip data for an irq |
dd87eb3a TG |
129 | * @irq: Interrupt number |
130 | * @data: Pointer to chip specific data | |
131 | * | |
132 | * Set the hardware irq chip data for an irq | |
133 | */ | |
a0cd9ca2 | 134 | int irq_set_chip_data(unsigned int irq, void *data) |
dd87eb3a | 135 | { |
dd87eb3a | 136 | unsigned long flags; |
31d9d9b6 | 137 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
dd87eb3a | 138 | |
02725e74 | 139 | if (!desc) |
dd87eb3a | 140 | return -EINVAL; |
6b8ff312 | 141 | desc->irq_data.chip_data = data; |
02725e74 | 142 | irq_put_desc_unlock(desc, flags); |
dd87eb3a TG |
143 | return 0; |
144 | } | |
a0cd9ca2 | 145 | EXPORT_SYMBOL(irq_set_chip_data); |
dd87eb3a | 146 | |
f303a6dd TG |
147 | struct irq_data *irq_get_irq_data(unsigned int irq) |
148 | { | |
149 | struct irq_desc *desc = irq_to_desc(irq); | |
150 | ||
151 | return desc ? &desc->irq_data : NULL; | |
152 | } | |
153 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | |
154 | ||
c1594b77 TG |
155 | static void irq_state_clr_disabled(struct irq_desc *desc) |
156 | { | |
801a0e9a | 157 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
158 | } |
159 | ||
160 | static void irq_state_set_disabled(struct irq_desc *desc) | |
161 | { | |
801a0e9a | 162 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
c1594b77 TG |
163 | } |
164 | ||
6e40262e TG |
165 | static void irq_state_clr_masked(struct irq_desc *desc) |
166 | { | |
32f4125e | 167 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
168 | } |
169 | ||
170 | static void irq_state_set_masked(struct irq_desc *desc) | |
171 | { | |
32f4125e | 172 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
6e40262e TG |
173 | } |
174 | ||
b4bc724e | 175 | int irq_startup(struct irq_desc *desc, bool resend) |
46999238 | 176 | { |
b4bc724e TG |
177 | int ret = 0; |
178 | ||
c1594b77 | 179 | irq_state_clr_disabled(desc); |
46999238 TG |
180 | desc->depth = 0; |
181 | ||
3aae994f | 182 | if (desc->irq_data.chip->irq_startup) { |
b4bc724e | 183 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
6e40262e | 184 | irq_state_clr_masked(desc); |
b4bc724e TG |
185 | } else { |
186 | irq_enable(desc); | |
3aae994f | 187 | } |
b4bc724e TG |
188 | if (resend) |
189 | check_irq_resend(desc, desc->irq_data.irq); | |
190 | return ret; | |
46999238 TG |
191 | } |
192 | ||
193 | void irq_shutdown(struct irq_desc *desc) | |
194 | { | |
c1594b77 | 195 | irq_state_set_disabled(desc); |
46999238 | 196 | desc->depth = 1; |
50f7c032 TG |
197 | if (desc->irq_data.chip->irq_shutdown) |
198 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | |
ed585a65 | 199 | else if (desc->irq_data.chip->irq_disable) |
50f7c032 TG |
200 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
201 | else | |
202 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 203 | irq_state_set_masked(desc); |
46999238 TG |
204 | } |
205 | ||
87923470 TG |
206 | void irq_enable(struct irq_desc *desc) |
207 | { | |
c1594b77 | 208 | irq_state_clr_disabled(desc); |
50f7c032 TG |
209 | if (desc->irq_data.chip->irq_enable) |
210 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
211 | else | |
212 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 213 | irq_state_clr_masked(desc); |
dd87eb3a TG |
214 | } |
215 | ||
d671a605 AF |
216 | /** |
217 | * irq_disable - Mark interupt disabled | |
218 | * @desc: irq descriptor which should be disabled | |
219 | * | |
220 | * If the chip does not implement the irq_disable callback, we | |
221 | * use a lazy disable approach. That means we mark the interrupt | |
222 | * disabled, but leave the hardware unmasked. That's an | |
223 | * optimization because we avoid the hardware access for the | |
224 | * common case where no interrupt happens after we marked it | |
225 | * disabled. If an interrupt happens, then the interrupt flow | |
226 | * handler masks the line at the hardware level and marks it | |
227 | * pending. | |
228 | */ | |
50f7c032 | 229 | void irq_disable(struct irq_desc *desc) |
89d694b9 | 230 | { |
c1594b77 | 231 | irq_state_set_disabled(desc); |
50f7c032 TG |
232 | if (desc->irq_data.chip->irq_disable) { |
233 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
a61d8258 | 234 | irq_state_set_masked(desc); |
50f7c032 | 235 | } |
89d694b9 TG |
236 | } |
237 | ||
31d9d9b6 MZ |
238 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
239 | { | |
240 | if (desc->irq_data.chip->irq_enable) | |
241 | desc->irq_data.chip->irq_enable(&desc->irq_data); | |
242 | else | |
243 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
244 | cpumask_set_cpu(cpu, desc->percpu_enabled); | |
245 | } | |
246 | ||
247 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | |
248 | { | |
249 | if (desc->irq_data.chip->irq_disable) | |
250 | desc->irq_data.chip->irq_disable(&desc->irq_data); | |
251 | else | |
252 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
253 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | |
254 | } | |
255 | ||
9205e31d | 256 | static inline void mask_ack_irq(struct irq_desc *desc) |
dd87eb3a | 257 | { |
9205e31d TG |
258 | if (desc->irq_data.chip->irq_mask_ack) |
259 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); | |
dd87eb3a | 260 | else { |
e2c0f8ff | 261 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
22a49163 TG |
262 | if (desc->irq_data.chip->irq_ack) |
263 | desc->irq_data.chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 264 | } |
6e40262e | 265 | irq_state_set_masked(desc); |
0b1adaa0 TG |
266 | } |
267 | ||
d4d5e089 | 268 | void mask_irq(struct irq_desc *desc) |
0b1adaa0 | 269 | { |
e2c0f8ff TG |
270 | if (desc->irq_data.chip->irq_mask) { |
271 | desc->irq_data.chip->irq_mask(&desc->irq_data); | |
6e40262e | 272 | irq_state_set_masked(desc); |
0b1adaa0 TG |
273 | } |
274 | } | |
275 | ||
d4d5e089 | 276 | void unmask_irq(struct irq_desc *desc) |
0b1adaa0 | 277 | { |
0eda58b7 TG |
278 | if (desc->irq_data.chip->irq_unmask) { |
279 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
6e40262e | 280 | irq_state_clr_masked(desc); |
0b1adaa0 | 281 | } |
dd87eb3a TG |
282 | } |
283 | ||
399b5da2 TG |
284 | /* |
285 | * handle_nested_irq - Handle a nested irq from a irq thread | |
286 | * @irq: the interrupt number | |
287 | * | |
288 | * Handle interrupts which are nested into a threaded interrupt | |
289 | * handler. The handler function is called inside the calling | |
290 | * threads context. | |
291 | */ | |
292 | void handle_nested_irq(unsigned int irq) | |
293 | { | |
294 | struct irq_desc *desc = irq_to_desc(irq); | |
295 | struct irqaction *action; | |
296 | irqreturn_t action_ret; | |
297 | ||
298 | might_sleep(); | |
299 | ||
239007b8 | 300 | raw_spin_lock_irq(&desc->lock); |
399b5da2 | 301 | |
293a7a0a | 302 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
399b5da2 TG |
303 | kstat_incr_irqs_this_cpu(irq, desc); |
304 | ||
305 | action = desc->action; | |
23812b9d NJ |
306 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
307 | desc->istate |= IRQS_PENDING; | |
399b5da2 | 308 | goto out_unlock; |
23812b9d | 309 | } |
399b5da2 | 310 | |
32f4125e | 311 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
239007b8 | 312 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
313 | |
314 | action_ret = action->thread_fn(action->irq, action->dev_id); | |
315 | if (!noirqdebug) | |
316 | note_interrupt(irq, desc, action_ret); | |
317 | ||
239007b8 | 318 | raw_spin_lock_irq(&desc->lock); |
32f4125e | 319 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
399b5da2 TG |
320 | |
321 | out_unlock: | |
239007b8 | 322 | raw_spin_unlock_irq(&desc->lock); |
399b5da2 TG |
323 | } |
324 | EXPORT_SYMBOL_GPL(handle_nested_irq); | |
325 | ||
fe200ae4 TG |
326 | static bool irq_check_poll(struct irq_desc *desc) |
327 | { | |
6954b75b | 328 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
fe200ae4 TG |
329 | return false; |
330 | return irq_wait_for_poll(desc); | |
331 | } | |
332 | ||
dd87eb3a TG |
333 | /** |
334 | * handle_simple_irq - Simple and software-decoded IRQs. | |
335 | * @irq: the interrupt number | |
336 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
337 | * |
338 | * Simple interrupts are either sent from a demultiplexing interrupt | |
339 | * handler or come from hardware, where no interrupt hardware control | |
340 | * is necessary. | |
341 | * | |
342 | * Note: The caller is expected to handle the ack, clear, mask and | |
343 | * unmask issues if necessary. | |
344 | */ | |
7ad5b3a5 | 345 | void |
7d12e780 | 346 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 347 | { |
239007b8 | 348 | raw_spin_lock(&desc->lock); |
dd87eb3a | 349 | |
32f4125e | 350 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
351 | if (!irq_check_poll(desc)) |
352 | goto out_unlock; | |
353 | ||
163ef309 | 354 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 355 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 356 | |
23812b9d NJ |
357 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
358 | desc->istate |= IRQS_PENDING; | |
dd87eb3a | 359 | goto out_unlock; |
23812b9d | 360 | } |
dd87eb3a | 361 | |
107781e7 | 362 | handle_irq_event(desc); |
dd87eb3a | 363 | |
dd87eb3a | 364 | out_unlock: |
239007b8 | 365 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 366 | } |
edf76f83 | 367 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
dd87eb3a | 368 | |
ac563761 TG |
369 | /* |
370 | * Called unconditionally from handle_level_irq() and only for oneshot | |
371 | * interrupts from handle_fasteoi_irq() | |
372 | */ | |
373 | static void cond_unmask_irq(struct irq_desc *desc) | |
374 | { | |
375 | /* | |
376 | * We need to unmask in the following cases: | |
377 | * - Standard level irq (IRQF_ONESHOT is not set) | |
378 | * - Oneshot irq which did not wake the thread (caused by a | |
379 | * spurious interrupt or a primary handler handling it | |
380 | * completely). | |
381 | */ | |
382 | if (!irqd_irq_disabled(&desc->irq_data) && | |
383 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) | |
384 | unmask_irq(desc); | |
385 | } | |
386 | ||
dd87eb3a TG |
387 | /** |
388 | * handle_level_irq - Level type irq handler | |
389 | * @irq: the interrupt number | |
390 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
391 | * |
392 | * Level type interrupts are active as long as the hardware line has | |
393 | * the active level. This may require to mask the interrupt and unmask | |
394 | * it after the associated handler has acknowledged the device, so the | |
395 | * interrupt line is back to inactive. | |
396 | */ | |
7ad5b3a5 | 397 | void |
7d12e780 | 398 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 399 | { |
239007b8 | 400 | raw_spin_lock(&desc->lock); |
9205e31d | 401 | mask_ack_irq(desc); |
dd87eb3a | 402 | |
32f4125e | 403 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
404 | if (!irq_check_poll(desc)) |
405 | goto out_unlock; | |
406 | ||
163ef309 | 407 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 408 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
409 | |
410 | /* | |
411 | * If its disabled or no action available | |
412 | * keep it masked and get out of here | |
413 | */ | |
d4dc0f90 TG |
414 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
415 | desc->istate |= IRQS_PENDING; | |
86998aa6 | 416 | goto out_unlock; |
d4dc0f90 | 417 | } |
dd87eb3a | 418 | |
1529866c | 419 | handle_irq_event(desc); |
b25c340c | 420 | |
ac563761 TG |
421 | cond_unmask_irq(desc); |
422 | ||
86998aa6 | 423 | out_unlock: |
239007b8 | 424 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 425 | } |
14819ea1 | 426 | EXPORT_SYMBOL_GPL(handle_level_irq); |
dd87eb3a | 427 | |
78129576 TG |
428 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
429 | static inline void preflow_handler(struct irq_desc *desc) | |
430 | { | |
431 | if (desc->preflow_handler) | |
432 | desc->preflow_handler(&desc->irq_data); | |
433 | } | |
434 | #else | |
435 | static inline void preflow_handler(struct irq_desc *desc) { } | |
436 | #endif | |
437 | ||
dd87eb3a | 438 | /** |
47c2a3aa | 439 | * handle_fasteoi_irq - irq handler for transparent controllers |
dd87eb3a TG |
440 | * @irq: the interrupt number |
441 | * @desc: the interrupt description structure for this irq | |
dd87eb3a | 442 | * |
47c2a3aa | 443 | * Only a single callback will be issued to the chip: an ->eoi() |
dd87eb3a TG |
444 | * call when the interrupt has been serviced. This enables support |
445 | * for modern forms of interrupt handlers, which handle the flow | |
446 | * details in hardware, transparently. | |
447 | */ | |
7ad5b3a5 | 448 | void |
7d12e780 | 449 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 450 | { |
239007b8 | 451 | raw_spin_lock(&desc->lock); |
dd87eb3a | 452 | |
32f4125e | 453 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
fe200ae4 TG |
454 | if (!irq_check_poll(desc)) |
455 | goto out; | |
dd87eb3a | 456 | |
163ef309 | 457 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
d6c88a50 | 458 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
459 | |
460 | /* | |
461 | * If its disabled or no action available | |
76d21601 | 462 | * then mask it and get out of here: |
dd87eb3a | 463 | */ |
32f4125e | 464 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
2a0d6fb3 | 465 | desc->istate |= IRQS_PENDING; |
e2c0f8ff | 466 | mask_irq(desc); |
dd87eb3a | 467 | goto out; |
98bb244b | 468 | } |
c69e3758 TG |
469 | |
470 | if (desc->istate & IRQS_ONESHOT) | |
471 | mask_irq(desc); | |
472 | ||
78129576 | 473 | preflow_handler(desc); |
a7ae4de5 | 474 | handle_irq_event(desc); |
77694b40 | 475 | |
ac563761 TG |
476 | if (desc->istate & IRQS_ONESHOT) |
477 | cond_unmask_irq(desc); | |
478 | ||
77694b40 | 479 | out_eoi: |
0c5c1557 | 480 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
77694b40 | 481 | out_unlock: |
239007b8 | 482 | raw_spin_unlock(&desc->lock); |
77694b40 TG |
483 | return; |
484 | out: | |
485 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | |
486 | goto out_eoi; | |
487 | goto out_unlock; | |
dd87eb3a TG |
488 | } |
489 | ||
490 | /** | |
491 | * handle_edge_irq - edge type IRQ handler | |
492 | * @irq: the interrupt number | |
493 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
494 | * |
495 | * Interrupt occures on the falling and/or rising edge of a hardware | |
25985edc | 496 | * signal. The occurrence is latched into the irq controller hardware |
dd87eb3a TG |
497 | * and must be acked in order to be reenabled. After the ack another |
498 | * interrupt can happen on the same source even before the first one | |
dfff0615 | 499 | * is handled by the associated event handler. If this happens it |
dd87eb3a TG |
500 | * might be necessary to disable (mask) the interrupt depending on the |
501 | * controller hardware. This requires to reenable the interrupt inside | |
502 | * of the loop which handles the interrupts which have arrived while | |
503 | * the handler was running. If all pending interrupts are handled, the | |
504 | * loop is left. | |
505 | */ | |
7ad5b3a5 | 506 | void |
7d12e780 | 507 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 508 | { |
239007b8 | 509 | raw_spin_lock(&desc->lock); |
dd87eb3a | 510 | |
163ef309 | 511 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
dd87eb3a TG |
512 | /* |
513 | * If we're currently running this IRQ, or its disabled, | |
514 | * we shouldn't process the IRQ. Mark it pending, handle | |
515 | * the necessary masking and go out | |
516 | */ | |
32f4125e TG |
517 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
518 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
fe200ae4 | 519 | if (!irq_check_poll(desc)) { |
2a0d6fb3 | 520 | desc->istate |= IRQS_PENDING; |
fe200ae4 TG |
521 | mask_ack_irq(desc); |
522 | goto out_unlock; | |
523 | } | |
dd87eb3a | 524 | } |
d6c88a50 | 525 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a TG |
526 | |
527 | /* Start handling the irq */ | |
22a49163 | 528 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
dd87eb3a | 529 | |
dd87eb3a | 530 | do { |
a60a5dc2 | 531 | if (unlikely(!desc->action)) { |
e2c0f8ff | 532 | mask_irq(desc); |
dd87eb3a TG |
533 | goto out_unlock; |
534 | } | |
535 | ||
536 | /* | |
537 | * When another irq arrived while we were handling | |
538 | * one, we could have masked the irq. | |
539 | * Renable it, if it was not disabled in meantime. | |
540 | */ | |
2a0d6fb3 | 541 | if (unlikely(desc->istate & IRQS_PENDING)) { |
32f4125e TG |
542 | if (!irqd_irq_disabled(&desc->irq_data) && |
543 | irqd_irq_masked(&desc->irq_data)) | |
c1594b77 | 544 | unmask_irq(desc); |
dd87eb3a TG |
545 | } |
546 | ||
a60a5dc2 | 547 | handle_irq_event(desc); |
dd87eb3a | 548 | |
2a0d6fb3 | 549 | } while ((desc->istate & IRQS_PENDING) && |
32f4125e | 550 | !irqd_irq_disabled(&desc->irq_data)); |
dd87eb3a | 551 | |
dd87eb3a | 552 | out_unlock: |
239007b8 | 553 | raw_spin_unlock(&desc->lock); |
dd87eb3a | 554 | } |
3911ff30 | 555 | EXPORT_SYMBOL(handle_edge_irq); |
dd87eb3a | 556 | |
0521c8fb TG |
557 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
558 | /** | |
559 | * handle_edge_eoi_irq - edge eoi type IRQ handler | |
560 | * @irq: the interrupt number | |
561 | * @desc: the interrupt description structure for this irq | |
562 | * | |
563 | * Similar as the above handle_edge_irq, but using eoi and w/o the | |
564 | * mask/unmask logic. | |
565 | */ | |
566 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |
567 | { | |
568 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
569 | ||
570 | raw_spin_lock(&desc->lock); | |
571 | ||
572 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | |
573 | /* | |
574 | * If we're currently running this IRQ, or its disabled, | |
575 | * we shouldn't process the IRQ. Mark it pending, handle | |
576 | * the necessary masking and go out | |
577 | */ | |
578 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | |
579 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | |
580 | if (!irq_check_poll(desc)) { | |
581 | desc->istate |= IRQS_PENDING; | |
582 | goto out_eoi; | |
583 | } | |
584 | } | |
585 | kstat_incr_irqs_this_cpu(irq, desc); | |
586 | ||
587 | do { | |
588 | if (unlikely(!desc->action)) | |
589 | goto out_eoi; | |
590 | ||
591 | handle_irq_event(desc); | |
592 | ||
593 | } while ((desc->istate & IRQS_PENDING) && | |
594 | !irqd_irq_disabled(&desc->irq_data)); | |
595 | ||
ac0e0447 | 596 | out_eoi: |
0521c8fb TG |
597 | chip->irq_eoi(&desc->irq_data); |
598 | raw_spin_unlock(&desc->lock); | |
599 | } | |
600 | #endif | |
601 | ||
dd87eb3a | 602 | /** |
24b26d42 | 603 | * handle_percpu_irq - Per CPU local irq handler |
dd87eb3a TG |
604 | * @irq: the interrupt number |
605 | * @desc: the interrupt description structure for this irq | |
dd87eb3a TG |
606 | * |
607 | * Per CPU interrupts on SMP machines without locking requirements | |
608 | */ | |
7ad5b3a5 | 609 | void |
7d12e780 | 610 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
dd87eb3a | 611 | { |
35e857cb | 612 | struct irq_chip *chip = irq_desc_get_chip(desc); |
dd87eb3a | 613 | |
d6c88a50 | 614 | kstat_incr_irqs_this_cpu(irq, desc); |
dd87eb3a | 615 | |
849f061c TG |
616 | if (chip->irq_ack) |
617 | chip->irq_ack(&desc->irq_data); | |
dd87eb3a | 618 | |
849f061c | 619 | handle_irq_event_percpu(desc, desc->action); |
dd87eb3a | 620 | |
849f061c TG |
621 | if (chip->irq_eoi) |
622 | chip->irq_eoi(&desc->irq_data); | |
dd87eb3a TG |
623 | } |
624 | ||
31d9d9b6 MZ |
625 | /** |
626 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | |
627 | * @irq: the interrupt number | |
628 | * @desc: the interrupt description structure for this irq | |
629 | * | |
630 | * Per CPU interrupts on SMP machines without locking requirements. Same as | |
631 | * handle_percpu_irq() above but with the following extras: | |
632 | * | |
633 | * action->percpu_dev_id is a pointer to percpu variables which | |
634 | * contain the real device id for the cpu on which this handler is | |
635 | * called | |
636 | */ | |
637 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | |
638 | { | |
639 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
640 | struct irqaction *action = desc->action; | |
641 | void *dev_id = __this_cpu_ptr(action->percpu_dev_id); | |
642 | irqreturn_t res; | |
643 | ||
644 | kstat_incr_irqs_this_cpu(irq, desc); | |
645 | ||
646 | if (chip->irq_ack) | |
647 | chip->irq_ack(&desc->irq_data); | |
648 | ||
649 | trace_irq_handler_entry(irq, action); | |
650 | res = action->handler(irq, dev_id); | |
651 | trace_irq_handler_exit(irq, action, res); | |
652 | ||
653 | if (chip->irq_eoi) | |
654 | chip->irq_eoi(&desc->irq_data); | |
655 | } | |
656 | ||
dd87eb3a | 657 | void |
3836ca08 | 658 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
a460e745 | 659 | const char *name) |
dd87eb3a | 660 | { |
dd87eb3a | 661 | unsigned long flags; |
31d9d9b6 | 662 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); |
dd87eb3a | 663 | |
02725e74 | 664 | if (!desc) |
dd87eb3a | 665 | return; |
dd87eb3a | 666 | |
091738a2 | 667 | if (!handle) { |
dd87eb3a | 668 | handle = handle_bad_irq; |
091738a2 TG |
669 | } else { |
670 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) | |
02725e74 | 671 | goto out; |
f8b5473f | 672 | } |
dd87eb3a | 673 | |
dd87eb3a TG |
674 | /* Uninstall? */ |
675 | if (handle == handle_bad_irq) { | |
6b8ff312 | 676 | if (desc->irq_data.chip != &no_irq_chip) |
9205e31d | 677 | mask_ack_irq(desc); |
801a0e9a | 678 | irq_state_set_disabled(desc); |
dd87eb3a TG |
679 | desc->depth = 1; |
680 | } | |
681 | desc->handle_irq = handle; | |
a460e745 | 682 | desc->name = name; |
dd87eb3a TG |
683 | |
684 | if (handle != handle_bad_irq && is_chained) { | |
1ccb4e61 TG |
685 | irq_settings_set_noprobe(desc); |
686 | irq_settings_set_norequest(desc); | |
7f1b1244 | 687 | irq_settings_set_nothread(desc); |
b4bc724e | 688 | irq_startup(desc, true); |
dd87eb3a | 689 | } |
02725e74 TG |
690 | out: |
691 | irq_put_desc_busunlock(desc, flags); | |
dd87eb3a | 692 | } |
3836ca08 | 693 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
dd87eb3a TG |
694 | |
695 | void | |
3836ca08 | 696 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
a460e745 | 697 | irq_flow_handler_t handle, const char *name) |
dd87eb3a | 698 | { |
35e857cb | 699 | irq_set_chip(irq, chip); |
3836ca08 | 700 | __irq_set_handler(irq, handle, 0, name); |
dd87eb3a | 701 | } |
b3ae66f2 | 702 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); |
46f4f8f6 | 703 | |
44247184 | 704 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
46f4f8f6 | 705 | { |
46f4f8f6 | 706 | unsigned long flags; |
31d9d9b6 | 707 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
46f4f8f6 | 708 | |
44247184 | 709 | if (!desc) |
46f4f8f6 | 710 | return; |
a005677b TG |
711 | irq_settings_clr_and_set(desc, clr, set); |
712 | ||
876dbd4c | 713 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
e1ef8241 | 714 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
a005677b TG |
715 | if (irq_settings_has_no_balance_set(desc)) |
716 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
717 | if (irq_settings_is_per_cpu(desc)) | |
718 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
e1ef8241 TG |
719 | if (irq_settings_can_move_pcntxt(desc)) |
720 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | |
0ef5ca1e TG |
721 | if (irq_settings_is_level(desc)) |
722 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
a005677b | 723 | |
876dbd4c TG |
724 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
725 | ||
02725e74 | 726 | irq_put_desc_unlock(desc, flags); |
46f4f8f6 | 727 | } |
edf76f83 | 728 | EXPORT_SYMBOL_GPL(irq_modify_status); |
0fdb4b25 DD |
729 | |
730 | /** | |
731 | * irq_cpu_online - Invoke all irq_cpu_online functions. | |
732 | * | |
733 | * Iterate through all irqs and invoke the chip.irq_cpu_online() | |
734 | * for each. | |
735 | */ | |
736 | void irq_cpu_online(void) | |
737 | { | |
738 | struct irq_desc *desc; | |
739 | struct irq_chip *chip; | |
740 | unsigned long flags; | |
741 | unsigned int irq; | |
742 | ||
743 | for_each_active_irq(irq) { | |
744 | desc = irq_to_desc(irq); | |
745 | if (!desc) | |
746 | continue; | |
747 | ||
748 | raw_spin_lock_irqsave(&desc->lock, flags); | |
749 | ||
750 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
751 | if (chip && chip->irq_cpu_online && |
752 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 753 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
754 | chip->irq_cpu_online(&desc->irq_data); |
755 | ||
756 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
757 | } | |
758 | } | |
759 | ||
760 | /** | |
761 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. | |
762 | * | |
763 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() | |
764 | * for each. | |
765 | */ | |
766 | void irq_cpu_offline(void) | |
767 | { | |
768 | struct irq_desc *desc; | |
769 | struct irq_chip *chip; | |
770 | unsigned long flags; | |
771 | unsigned int irq; | |
772 | ||
773 | for_each_active_irq(irq) { | |
774 | desc = irq_to_desc(irq); | |
775 | if (!desc) | |
776 | continue; | |
777 | ||
778 | raw_spin_lock_irqsave(&desc->lock, flags); | |
779 | ||
780 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
b3d42232 TG |
781 | if (chip && chip->irq_cpu_offline && |
782 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || | |
32f4125e | 783 | !irqd_irq_disabled(&desc->irq_data))) |
0fdb4b25 DD |
784 | chip->irq_cpu_offline(&desc->irq_data); |
785 | ||
786 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
787 | } | |
788 | } |