Merge remote-tracking branches 'spi/topic/s3c64xx', 'spi/topic/ti-qspi' and 'spi...
[deliverable/linux.git] / kernel / irq / chip.c
CommitLineData
dd87eb3a
TG
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
7fe3730d 14#include <linux/msi.h>
dd87eb3a
TG
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
f8264e34 18#include <linux/irqdomain.h>
dd87eb3a 19
f069686e
SR
20#include <trace/events/irq.h>
21
dd87eb3a
TG
22#include "internals.h"
23
24/**
a0cd9ca2 25 * irq_set_chip - set the irq chip for an irq
dd87eb3a
TG
26 * @irq: irq number
27 * @chip: pointer to irq chip description structure
28 */
a0cd9ca2 29int irq_set_chip(unsigned int irq, struct irq_chip *chip)
dd87eb3a 30{
dd87eb3a 31 unsigned long flags;
31d9d9b6 32 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
dd87eb3a 33
02725e74 34 if (!desc)
dd87eb3a 35 return -EINVAL;
dd87eb3a
TG
36
37 if (!chip)
38 chip = &no_irq_chip;
39
6b8ff312 40 desc->irq_data.chip = chip;
02725e74 41 irq_put_desc_unlock(desc, flags);
d72274e5
DD
42 /*
43 * For !CONFIG_SPARSE_IRQ make the irq show up in
f63b6a05 44 * allocated_irqs.
d72274e5 45 */
f63b6a05 46 irq_mark_irq(irq);
dd87eb3a
TG
47 return 0;
48}
a0cd9ca2 49EXPORT_SYMBOL(irq_set_chip);
dd87eb3a
TG
50
51/**
a0cd9ca2 52 * irq_set_type - set the irq trigger type for an irq
dd87eb3a 53 * @irq: irq number
0c5d1eb7 54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
dd87eb3a 55 */
a0cd9ca2 56int irq_set_irq_type(unsigned int irq, unsigned int type)
dd87eb3a 57{
dd87eb3a 58 unsigned long flags;
31d9d9b6 59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
02725e74 60 int ret = 0;
dd87eb3a 61
02725e74
TG
62 if (!desc)
63 return -EINVAL;
dd87eb3a 64
f2b662da 65 type &= IRQ_TYPE_SENSE_MASK;
a1ff541a 66 ret = __irq_set_trigger(desc, type);
02725e74 67 irq_put_desc_busunlock(desc, flags);
dd87eb3a
TG
68 return ret;
69}
a0cd9ca2 70EXPORT_SYMBOL(irq_set_irq_type);
dd87eb3a
TG
71
72/**
a0cd9ca2 73 * irq_set_handler_data - set irq handler data for an irq
dd87eb3a
TG
74 * @irq: Interrupt number
75 * @data: Pointer to interrupt specific data
76 *
77 * Set the hardware irq controller data for an irq
78 */
a0cd9ca2 79int irq_set_handler_data(unsigned int irq, void *data)
dd87eb3a 80{
dd87eb3a 81 unsigned long flags;
31d9d9b6 82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
dd87eb3a 83
02725e74 84 if (!desc)
dd87eb3a 85 return -EINVAL;
af7080e0 86 desc->irq_common_data.handler_data = data;
02725e74 87 irq_put_desc_unlock(desc, flags);
dd87eb3a
TG
88 return 0;
89}
a0cd9ca2 90EXPORT_SYMBOL(irq_set_handler_data);
dd87eb3a 91
5b912c10 92/**
51906e77
AG
93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
94 * @irq_base: Interrupt number base
95 * @irq_offset: Interrupt number offset
96 * @entry: Pointer to MSI descriptor data
5b912c10 97 *
51906e77 98 * Set the MSI descriptor entry for an irq at offset
5b912c10 99 */
51906e77
AG
100int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
101 struct msi_desc *entry)
5b912c10 102{
5b912c10 103 unsigned long flags;
51906e77 104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
5b912c10 105
02725e74 106 if (!desc)
5b912c10 107 return -EINVAL;
b237721c 108 desc->irq_common_data.msi_desc = entry;
51906e77
AG
109 if (entry && !irq_offset)
110 entry->irq = irq_base;
02725e74 111 irq_put_desc_unlock(desc, flags);
5b912c10
EB
112 return 0;
113}
114
51906e77
AG
115/**
116 * irq_set_msi_desc - set MSI descriptor data for an irq
117 * @irq: Interrupt number
118 * @entry: Pointer to MSI descriptor data
119 *
120 * Set the MSI descriptor entry for an irq
121 */
122int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
123{
124 return irq_set_msi_desc_off(irq, 0, entry);
125}
126
dd87eb3a 127/**
a0cd9ca2 128 * irq_set_chip_data - set irq chip data for an irq
dd87eb3a
TG
129 * @irq: Interrupt number
130 * @data: Pointer to chip specific data
131 *
132 * Set the hardware irq chip data for an irq
133 */
a0cd9ca2 134int irq_set_chip_data(unsigned int irq, void *data)
dd87eb3a 135{
dd87eb3a 136 unsigned long flags;
31d9d9b6 137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
dd87eb3a 138
02725e74 139 if (!desc)
dd87eb3a 140 return -EINVAL;
6b8ff312 141 desc->irq_data.chip_data = data;
02725e74 142 irq_put_desc_unlock(desc, flags);
dd87eb3a
TG
143 return 0;
144}
a0cd9ca2 145EXPORT_SYMBOL(irq_set_chip_data);
dd87eb3a 146
f303a6dd
TG
147struct irq_data *irq_get_irq_data(unsigned int irq)
148{
149 struct irq_desc *desc = irq_to_desc(irq);
150
151 return desc ? &desc->irq_data : NULL;
152}
153EXPORT_SYMBOL_GPL(irq_get_irq_data);
154
c1594b77
TG
155static void irq_state_clr_disabled(struct irq_desc *desc)
156{
801a0e9a 157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
c1594b77
TG
158}
159
160static void irq_state_set_disabled(struct irq_desc *desc)
161{
801a0e9a 162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
c1594b77
TG
163}
164
6e40262e
TG
165static void irq_state_clr_masked(struct irq_desc *desc)
166{
32f4125e 167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
6e40262e
TG
168}
169
170static void irq_state_set_masked(struct irq_desc *desc)
171{
32f4125e 172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
6e40262e
TG
173}
174
b4bc724e 175int irq_startup(struct irq_desc *desc, bool resend)
46999238 176{
b4bc724e
TG
177 int ret = 0;
178
c1594b77 179 irq_state_clr_disabled(desc);
46999238
TG
180 desc->depth = 0;
181
f8264e34 182 irq_domain_activate_irq(&desc->irq_data);
3aae994f 183 if (desc->irq_data.chip->irq_startup) {
b4bc724e 184 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
6e40262e 185 irq_state_clr_masked(desc);
b4bc724e
TG
186 } else {
187 irq_enable(desc);
3aae994f 188 }
b4bc724e 189 if (resend)
0798abeb 190 check_irq_resend(desc);
b4bc724e 191 return ret;
46999238
TG
192}
193
194void irq_shutdown(struct irq_desc *desc)
195{
c1594b77 196 irq_state_set_disabled(desc);
46999238 197 desc->depth = 1;
50f7c032
TG
198 if (desc->irq_data.chip->irq_shutdown)
199 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
ed585a65 200 else if (desc->irq_data.chip->irq_disable)
50f7c032
TG
201 desc->irq_data.chip->irq_disable(&desc->irq_data);
202 else
203 desc->irq_data.chip->irq_mask(&desc->irq_data);
f8264e34 204 irq_domain_deactivate_irq(&desc->irq_data);
6e40262e 205 irq_state_set_masked(desc);
46999238
TG
206}
207
87923470
TG
208void irq_enable(struct irq_desc *desc)
209{
c1594b77 210 irq_state_clr_disabled(desc);
50f7c032
TG
211 if (desc->irq_data.chip->irq_enable)
212 desc->irq_data.chip->irq_enable(&desc->irq_data);
213 else
214 desc->irq_data.chip->irq_unmask(&desc->irq_data);
6e40262e 215 irq_state_clr_masked(desc);
dd87eb3a
TG
216}
217
d671a605 218/**
f788e7bf 219 * irq_disable - Mark interrupt disabled
d671a605
AF
220 * @desc: irq descriptor which should be disabled
221 *
222 * If the chip does not implement the irq_disable callback, we
223 * use a lazy disable approach. That means we mark the interrupt
224 * disabled, but leave the hardware unmasked. That's an
225 * optimization because we avoid the hardware access for the
226 * common case where no interrupt happens after we marked it
227 * disabled. If an interrupt happens, then the interrupt flow
228 * handler masks the line at the hardware level and marks it
229 * pending.
230 */
50f7c032 231void irq_disable(struct irq_desc *desc)
89d694b9 232{
c1594b77 233 irq_state_set_disabled(desc);
50f7c032
TG
234 if (desc->irq_data.chip->irq_disable) {
235 desc->irq_data.chip->irq_disable(&desc->irq_data);
a61d8258 236 irq_state_set_masked(desc);
50f7c032 237 }
89d694b9
TG
238}
239
31d9d9b6
MZ
240void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
241{
242 if (desc->irq_data.chip->irq_enable)
243 desc->irq_data.chip->irq_enable(&desc->irq_data);
244 else
245 desc->irq_data.chip->irq_unmask(&desc->irq_data);
246 cpumask_set_cpu(cpu, desc->percpu_enabled);
247}
248
249void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
250{
251 if (desc->irq_data.chip->irq_disable)
252 desc->irq_data.chip->irq_disable(&desc->irq_data);
253 else
254 desc->irq_data.chip->irq_mask(&desc->irq_data);
255 cpumask_clear_cpu(cpu, desc->percpu_enabled);
256}
257
9205e31d 258static inline void mask_ack_irq(struct irq_desc *desc)
dd87eb3a 259{
9205e31d
TG
260 if (desc->irq_data.chip->irq_mask_ack)
261 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
dd87eb3a 262 else {
e2c0f8ff 263 desc->irq_data.chip->irq_mask(&desc->irq_data);
22a49163
TG
264 if (desc->irq_data.chip->irq_ack)
265 desc->irq_data.chip->irq_ack(&desc->irq_data);
dd87eb3a 266 }
6e40262e 267 irq_state_set_masked(desc);
0b1adaa0
TG
268}
269
d4d5e089 270void mask_irq(struct irq_desc *desc)
0b1adaa0 271{
e2c0f8ff
TG
272 if (desc->irq_data.chip->irq_mask) {
273 desc->irq_data.chip->irq_mask(&desc->irq_data);
6e40262e 274 irq_state_set_masked(desc);
0b1adaa0
TG
275 }
276}
277
d4d5e089 278void unmask_irq(struct irq_desc *desc)
0b1adaa0 279{
0eda58b7
TG
280 if (desc->irq_data.chip->irq_unmask) {
281 desc->irq_data.chip->irq_unmask(&desc->irq_data);
6e40262e 282 irq_state_clr_masked(desc);
0b1adaa0 283 }
dd87eb3a
TG
284}
285
328a4978
TG
286void unmask_threaded_irq(struct irq_desc *desc)
287{
288 struct irq_chip *chip = desc->irq_data.chip;
289
290 if (chip->flags & IRQCHIP_EOI_THREADED)
291 chip->irq_eoi(&desc->irq_data);
292
293 if (chip->irq_unmask) {
294 chip->irq_unmask(&desc->irq_data);
295 irq_state_clr_masked(desc);
296 }
297}
298
399b5da2
TG
299/*
300 * handle_nested_irq - Handle a nested irq from a irq thread
301 * @irq: the interrupt number
302 *
303 * Handle interrupts which are nested into a threaded interrupt
304 * handler. The handler function is called inside the calling
305 * threads context.
306 */
307void handle_nested_irq(unsigned int irq)
308{
309 struct irq_desc *desc = irq_to_desc(irq);
310 struct irqaction *action;
311 irqreturn_t action_ret;
312
313 might_sleep();
314
239007b8 315 raw_spin_lock_irq(&desc->lock);
399b5da2 316
293a7a0a 317 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
b51bf95c 318 kstat_incr_irqs_this_cpu(desc);
399b5da2
TG
319
320 action = desc->action;
23812b9d
NJ
321 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
322 desc->istate |= IRQS_PENDING;
399b5da2 323 goto out_unlock;
23812b9d 324 }
399b5da2 325
32f4125e 326 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
239007b8 327 raw_spin_unlock_irq(&desc->lock);
399b5da2
TG
328
329 action_ret = action->thread_fn(action->irq, action->dev_id);
330 if (!noirqdebug)
0dcdbc97 331 note_interrupt(desc, action_ret);
399b5da2 332
239007b8 333 raw_spin_lock_irq(&desc->lock);
32f4125e 334 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
399b5da2
TG
335
336out_unlock:
239007b8 337 raw_spin_unlock_irq(&desc->lock);
399b5da2
TG
338}
339EXPORT_SYMBOL_GPL(handle_nested_irq);
340
fe200ae4
TG
341static bool irq_check_poll(struct irq_desc *desc)
342{
6954b75b 343 if (!(desc->istate & IRQS_POLL_INPROGRESS))
fe200ae4
TG
344 return false;
345 return irq_wait_for_poll(desc);
346}
347
c7bd3ec0
TG
348static bool irq_may_run(struct irq_desc *desc)
349{
9ce7a258
TG
350 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
351
352 /*
353 * If the interrupt is not in progress and is not an armed
354 * wakeup interrupt, proceed.
355 */
356 if (!irqd_has_set(&desc->irq_data, mask))
c7bd3ec0 357 return true;
9ce7a258
TG
358
359 /*
360 * If the interrupt is an armed wakeup source, mark it pending
361 * and suspended, disable it and notify the pm core about the
362 * event.
363 */
364 if (irq_pm_check_wakeup(desc))
365 return false;
366
367 /*
368 * Handle a potential concurrent poll on a different core.
369 */
c7bd3ec0
TG
370 return irq_check_poll(desc);
371}
372
dd87eb3a
TG
373/**
374 * handle_simple_irq - Simple and software-decoded IRQs.
dd87eb3a 375 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
376 *
377 * Simple interrupts are either sent from a demultiplexing interrupt
378 * handler or come from hardware, where no interrupt hardware control
379 * is necessary.
380 *
381 * Note: The caller is expected to handle the ack, clear, mask and
382 * unmask issues if necessary.
383 */
bd0b9ac4 384void handle_simple_irq(struct irq_desc *desc)
dd87eb3a 385{
239007b8 386 raw_spin_lock(&desc->lock);
dd87eb3a 387
c7bd3ec0
TG
388 if (!irq_may_run(desc))
389 goto out_unlock;
fe200ae4 390
163ef309 391 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
b51bf95c 392 kstat_incr_irqs_this_cpu(desc);
dd87eb3a 393
23812b9d
NJ
394 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
395 desc->istate |= IRQS_PENDING;
dd87eb3a 396 goto out_unlock;
23812b9d 397 }
dd87eb3a 398
107781e7 399 handle_irq_event(desc);
dd87eb3a 400
dd87eb3a 401out_unlock:
239007b8 402 raw_spin_unlock(&desc->lock);
dd87eb3a 403}
edf76f83 404EXPORT_SYMBOL_GPL(handle_simple_irq);
dd87eb3a 405
ac563761
TG
406/*
407 * Called unconditionally from handle_level_irq() and only for oneshot
408 * interrupts from handle_fasteoi_irq()
409 */
410static void cond_unmask_irq(struct irq_desc *desc)
411{
412 /*
413 * We need to unmask in the following cases:
414 * - Standard level irq (IRQF_ONESHOT is not set)
415 * - Oneshot irq which did not wake the thread (caused by a
416 * spurious interrupt or a primary handler handling it
417 * completely).
418 */
419 if (!irqd_irq_disabled(&desc->irq_data) &&
420 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
421 unmask_irq(desc);
422}
423
dd87eb3a
TG
424/**
425 * handle_level_irq - Level type irq handler
dd87eb3a 426 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
427 *
428 * Level type interrupts are active as long as the hardware line has
429 * the active level. This may require to mask the interrupt and unmask
430 * it after the associated handler has acknowledged the device, so the
431 * interrupt line is back to inactive.
432 */
bd0b9ac4 433void handle_level_irq(struct irq_desc *desc)
dd87eb3a 434{
239007b8 435 raw_spin_lock(&desc->lock);
9205e31d 436 mask_ack_irq(desc);
dd87eb3a 437
c7bd3ec0
TG
438 if (!irq_may_run(desc))
439 goto out_unlock;
fe200ae4 440
163ef309 441 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
b51bf95c 442 kstat_incr_irqs_this_cpu(desc);
dd87eb3a
TG
443
444 /*
445 * If its disabled or no action available
446 * keep it masked and get out of here
447 */
d4dc0f90
TG
448 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
449 desc->istate |= IRQS_PENDING;
86998aa6 450 goto out_unlock;
d4dc0f90 451 }
dd87eb3a 452
1529866c 453 handle_irq_event(desc);
b25c340c 454
ac563761
TG
455 cond_unmask_irq(desc);
456
86998aa6 457out_unlock:
239007b8 458 raw_spin_unlock(&desc->lock);
dd87eb3a 459}
14819ea1 460EXPORT_SYMBOL_GPL(handle_level_irq);
dd87eb3a 461
78129576
TG
462#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
463static inline void preflow_handler(struct irq_desc *desc)
464{
465 if (desc->preflow_handler)
466 desc->preflow_handler(&desc->irq_data);
467}
468#else
469static inline void preflow_handler(struct irq_desc *desc) { }
470#endif
471
328a4978
TG
472static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
473{
474 if (!(desc->istate & IRQS_ONESHOT)) {
475 chip->irq_eoi(&desc->irq_data);
476 return;
477 }
478 /*
479 * We need to unmask in the following cases:
480 * - Oneshot irq which did not wake the thread (caused by a
481 * spurious interrupt or a primary handler handling it
482 * completely).
483 */
484 if (!irqd_irq_disabled(&desc->irq_data) &&
485 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
486 chip->irq_eoi(&desc->irq_data);
487 unmask_irq(desc);
488 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
489 chip->irq_eoi(&desc->irq_data);
490 }
491}
492
dd87eb3a 493/**
47c2a3aa 494 * handle_fasteoi_irq - irq handler for transparent controllers
dd87eb3a 495 * @desc: the interrupt description structure for this irq
dd87eb3a 496 *
47c2a3aa 497 * Only a single callback will be issued to the chip: an ->eoi()
dd87eb3a
TG
498 * call when the interrupt has been serviced. This enables support
499 * for modern forms of interrupt handlers, which handle the flow
500 * details in hardware, transparently.
501 */
bd0b9ac4 502void handle_fasteoi_irq(struct irq_desc *desc)
dd87eb3a 503{
328a4978
TG
504 struct irq_chip *chip = desc->irq_data.chip;
505
239007b8 506 raw_spin_lock(&desc->lock);
dd87eb3a 507
c7bd3ec0
TG
508 if (!irq_may_run(desc))
509 goto out;
dd87eb3a 510
163ef309 511 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
b51bf95c 512 kstat_incr_irqs_this_cpu(desc);
dd87eb3a
TG
513
514 /*
515 * If its disabled or no action available
76d21601 516 * then mask it and get out of here:
dd87eb3a 517 */
32f4125e 518 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
2a0d6fb3 519 desc->istate |= IRQS_PENDING;
e2c0f8ff 520 mask_irq(desc);
dd87eb3a 521 goto out;
98bb244b 522 }
c69e3758
TG
523
524 if (desc->istate & IRQS_ONESHOT)
525 mask_irq(desc);
526
78129576 527 preflow_handler(desc);
a7ae4de5 528 handle_irq_event(desc);
77694b40 529
328a4978 530 cond_unmask_eoi_irq(desc, chip);
ac563761 531
239007b8 532 raw_spin_unlock(&desc->lock);
77694b40
TG
533 return;
534out:
328a4978
TG
535 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
536 chip->irq_eoi(&desc->irq_data);
537 raw_spin_unlock(&desc->lock);
dd87eb3a 538}
7cad45ee 539EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
dd87eb3a
TG
540
541/**
542 * handle_edge_irq - edge type IRQ handler
dd87eb3a 543 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
544 *
545 * Interrupt occures on the falling and/or rising edge of a hardware
25985edc 546 * signal. The occurrence is latched into the irq controller hardware
dd87eb3a
TG
547 * and must be acked in order to be reenabled. After the ack another
548 * interrupt can happen on the same source even before the first one
dfff0615 549 * is handled by the associated event handler. If this happens it
dd87eb3a
TG
550 * might be necessary to disable (mask) the interrupt depending on the
551 * controller hardware. This requires to reenable the interrupt inside
552 * of the loop which handles the interrupts which have arrived while
553 * the handler was running. If all pending interrupts are handled, the
554 * loop is left.
555 */
bd0b9ac4 556void handle_edge_irq(struct irq_desc *desc)
dd87eb3a 557{
239007b8 558 raw_spin_lock(&desc->lock);
dd87eb3a 559
163ef309 560 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
c3d7acd0 561
c7bd3ec0
TG
562 if (!irq_may_run(desc)) {
563 desc->istate |= IRQS_PENDING;
564 mask_ack_irq(desc);
565 goto out_unlock;
dd87eb3a 566 }
c3d7acd0 567
dd87eb3a 568 /*
c3d7acd0
TG
569 * If its disabled or no action available then mask it and get
570 * out of here.
dd87eb3a 571 */
c3d7acd0
TG
572 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
573 desc->istate |= IRQS_PENDING;
574 mask_ack_irq(desc);
575 goto out_unlock;
dd87eb3a 576 }
c3d7acd0 577
b51bf95c 578 kstat_incr_irqs_this_cpu(desc);
dd87eb3a
TG
579
580 /* Start handling the irq */
22a49163 581 desc->irq_data.chip->irq_ack(&desc->irq_data);
dd87eb3a 582
dd87eb3a 583 do {
a60a5dc2 584 if (unlikely(!desc->action)) {
e2c0f8ff 585 mask_irq(desc);
dd87eb3a
TG
586 goto out_unlock;
587 }
588
589 /*
590 * When another irq arrived while we were handling
591 * one, we could have masked the irq.
592 * Renable it, if it was not disabled in meantime.
593 */
2a0d6fb3 594 if (unlikely(desc->istate & IRQS_PENDING)) {
32f4125e
TG
595 if (!irqd_irq_disabled(&desc->irq_data) &&
596 irqd_irq_masked(&desc->irq_data))
c1594b77 597 unmask_irq(desc);
dd87eb3a
TG
598 }
599
a60a5dc2 600 handle_irq_event(desc);
dd87eb3a 601
2a0d6fb3 602 } while ((desc->istate & IRQS_PENDING) &&
32f4125e 603 !irqd_irq_disabled(&desc->irq_data));
dd87eb3a 604
dd87eb3a 605out_unlock:
239007b8 606 raw_spin_unlock(&desc->lock);
dd87eb3a 607}
3911ff30 608EXPORT_SYMBOL(handle_edge_irq);
dd87eb3a 609
0521c8fb
TG
610#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
611/**
612 * handle_edge_eoi_irq - edge eoi type IRQ handler
0521c8fb
TG
613 * @desc: the interrupt description structure for this irq
614 *
615 * Similar as the above handle_edge_irq, but using eoi and w/o the
616 * mask/unmask logic.
617 */
bd0b9ac4 618void handle_edge_eoi_irq(struct irq_desc *desc)
0521c8fb
TG
619{
620 struct irq_chip *chip = irq_desc_get_chip(desc);
621
622 raw_spin_lock(&desc->lock);
623
624 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
c3d7acd0 625
c7bd3ec0
TG
626 if (!irq_may_run(desc)) {
627 desc->istate |= IRQS_PENDING;
628 goto out_eoi;
0521c8fb 629 }
c3d7acd0 630
0521c8fb 631 /*
c3d7acd0
TG
632 * If its disabled or no action available then mask it and get
633 * out of here.
0521c8fb 634 */
c3d7acd0
TG
635 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
636 desc->istate |= IRQS_PENDING;
637 goto out_eoi;
0521c8fb 638 }
c3d7acd0 639
b51bf95c 640 kstat_incr_irqs_this_cpu(desc);
0521c8fb
TG
641
642 do {
643 if (unlikely(!desc->action))
644 goto out_eoi;
645
646 handle_irq_event(desc);
647
648 } while ((desc->istate & IRQS_PENDING) &&
649 !irqd_irq_disabled(&desc->irq_data));
650
ac0e0447 651out_eoi:
0521c8fb
TG
652 chip->irq_eoi(&desc->irq_data);
653 raw_spin_unlock(&desc->lock);
654}
655#endif
656
dd87eb3a 657/**
24b26d42 658 * handle_percpu_irq - Per CPU local irq handler
dd87eb3a 659 * @desc: the interrupt description structure for this irq
dd87eb3a
TG
660 *
661 * Per CPU interrupts on SMP machines without locking requirements
662 */
bd0b9ac4 663void handle_percpu_irq(struct irq_desc *desc)
dd87eb3a 664{
35e857cb 665 struct irq_chip *chip = irq_desc_get_chip(desc);
dd87eb3a 666
b51bf95c 667 kstat_incr_irqs_this_cpu(desc);
dd87eb3a 668
849f061c
TG
669 if (chip->irq_ack)
670 chip->irq_ack(&desc->irq_data);
dd87eb3a 671
849f061c 672 handle_irq_event_percpu(desc, desc->action);
dd87eb3a 673
849f061c
TG
674 if (chip->irq_eoi)
675 chip->irq_eoi(&desc->irq_data);
dd87eb3a
TG
676}
677
31d9d9b6
MZ
678/**
679 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
31d9d9b6
MZ
680 * @desc: the interrupt description structure for this irq
681 *
682 * Per CPU interrupts on SMP machines without locking requirements. Same as
683 * handle_percpu_irq() above but with the following extras:
684 *
685 * action->percpu_dev_id is a pointer to percpu variables which
686 * contain the real device id for the cpu on which this handler is
687 * called
688 */
bd0b9ac4 689void handle_percpu_devid_irq(struct irq_desc *desc)
31d9d9b6
MZ
690{
691 struct irq_chip *chip = irq_desc_get_chip(desc);
692 struct irqaction *action = desc->action;
532d0d06 693 void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
bd0b9ac4 694 unsigned int irq = irq_desc_get_irq(desc);
31d9d9b6
MZ
695 irqreturn_t res;
696
b51bf95c 697 kstat_incr_irqs_this_cpu(desc);
31d9d9b6
MZ
698
699 if (chip->irq_ack)
700 chip->irq_ack(&desc->irq_data);
701
702 trace_irq_handler_entry(irq, action);
703 res = action->handler(irq, dev_id);
704 trace_irq_handler_exit(irq, action, res);
705
706 if (chip->irq_eoi)
707 chip->irq_eoi(&desc->irq_data);
708}
709
dd87eb3a 710void
3b0f95be
RK
711__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
712 int is_chained, const char *name)
dd87eb3a 713{
091738a2 714 if (!handle) {
dd87eb3a 715 handle = handle_bad_irq;
091738a2 716 } else {
f86eff22
MZ
717 struct irq_data *irq_data = &desc->irq_data;
718#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
719 /*
720 * With hierarchical domains we might run into a
721 * situation where the outermost chip is not yet set
722 * up, but the inner chips are there. Instead of
723 * bailing we install the handler, but obviously we
724 * cannot enable/startup the interrupt at this point.
725 */
726 while (irq_data) {
727 if (irq_data->chip != &no_irq_chip)
728 break;
729 /*
730 * Bail out if the outer chip is not set up
731 * and the interrrupt supposed to be started
732 * right away.
733 */
734 if (WARN_ON(is_chained))
3b0f95be 735 return;
f86eff22
MZ
736 /* Try the parent */
737 irq_data = irq_data->parent_data;
738 }
739#endif
740 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
3b0f95be 741 return;
f8b5473f 742 }
dd87eb3a 743
dd87eb3a
TG
744 /* Uninstall? */
745 if (handle == handle_bad_irq) {
6b8ff312 746 if (desc->irq_data.chip != &no_irq_chip)
9205e31d 747 mask_ack_irq(desc);
801a0e9a 748 irq_state_set_disabled(desc);
dd87eb3a
TG
749 desc->depth = 1;
750 }
751 desc->handle_irq = handle;
a460e745 752 desc->name = name;
dd87eb3a
TG
753
754 if (handle != handle_bad_irq && is_chained) {
1ccb4e61
TG
755 irq_settings_set_noprobe(desc);
756 irq_settings_set_norequest(desc);
7f1b1244 757 irq_settings_set_nothread(desc);
b4bc724e 758 irq_startup(desc, true);
dd87eb3a 759 }
3b0f95be
RK
760}
761
762void
763__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
764 const char *name)
765{
766 unsigned long flags;
767 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
768
769 if (!desc)
770 return;
771
772 __irq_do_set_handler(desc, handle, is_chained, name);
02725e74 773 irq_put_desc_busunlock(desc, flags);
dd87eb3a 774}
3836ca08 775EXPORT_SYMBOL_GPL(__irq_set_handler);
dd87eb3a 776
3b0f95be
RK
777void
778irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
779 void *data)
780{
781 unsigned long flags;
782 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
783
784 if (!desc)
785 return;
786
787 __irq_do_set_handler(desc, handle, 1, NULL);
af7080e0 788 desc->irq_common_data.handler_data = data;
3b0f95be
RK
789
790 irq_put_desc_busunlock(desc, flags);
791}
792EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
793
dd87eb3a 794void
3836ca08 795irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
a460e745 796 irq_flow_handler_t handle, const char *name)
dd87eb3a 797{
35e857cb 798 irq_set_chip(irq, chip);
3836ca08 799 __irq_set_handler(irq, handle, 0, name);
dd87eb3a 800}
b3ae66f2 801EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
46f4f8f6 802
44247184 803void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
46f4f8f6 804{
46f4f8f6 805 unsigned long flags;
31d9d9b6 806 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
46f4f8f6 807
44247184 808 if (!desc)
46f4f8f6 809 return;
a005677b
TG
810 irq_settings_clr_and_set(desc, clr, set);
811
876dbd4c 812 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
e1ef8241 813 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
a005677b
TG
814 if (irq_settings_has_no_balance_set(desc))
815 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
816 if (irq_settings_is_per_cpu(desc))
817 irqd_set(&desc->irq_data, IRQD_PER_CPU);
e1ef8241
TG
818 if (irq_settings_can_move_pcntxt(desc))
819 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
0ef5ca1e
TG
820 if (irq_settings_is_level(desc))
821 irqd_set(&desc->irq_data, IRQD_LEVEL);
a005677b 822
876dbd4c
TG
823 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
824
02725e74 825 irq_put_desc_unlock(desc, flags);
46f4f8f6 826}
edf76f83 827EXPORT_SYMBOL_GPL(irq_modify_status);
0fdb4b25
DD
828
829/**
830 * irq_cpu_online - Invoke all irq_cpu_online functions.
831 *
832 * Iterate through all irqs and invoke the chip.irq_cpu_online()
833 * for each.
834 */
835void irq_cpu_online(void)
836{
837 struct irq_desc *desc;
838 struct irq_chip *chip;
839 unsigned long flags;
840 unsigned int irq;
841
842 for_each_active_irq(irq) {
843 desc = irq_to_desc(irq);
844 if (!desc)
845 continue;
846
847 raw_spin_lock_irqsave(&desc->lock, flags);
848
849 chip = irq_data_get_irq_chip(&desc->irq_data);
b3d42232
TG
850 if (chip && chip->irq_cpu_online &&
851 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
32f4125e 852 !irqd_irq_disabled(&desc->irq_data)))
0fdb4b25
DD
853 chip->irq_cpu_online(&desc->irq_data);
854
855 raw_spin_unlock_irqrestore(&desc->lock, flags);
856 }
857}
858
859/**
860 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
861 *
862 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
863 * for each.
864 */
865void irq_cpu_offline(void)
866{
867 struct irq_desc *desc;
868 struct irq_chip *chip;
869 unsigned long flags;
870 unsigned int irq;
871
872 for_each_active_irq(irq) {
873 desc = irq_to_desc(irq);
874 if (!desc)
875 continue;
876
877 raw_spin_lock_irqsave(&desc->lock, flags);
878
879 chip = irq_data_get_irq_chip(&desc->irq_data);
b3d42232
TG
880 if (chip && chip->irq_cpu_offline &&
881 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
32f4125e 882 !irqd_irq_disabled(&desc->irq_data)))
0fdb4b25
DD
883 chip->irq_cpu_offline(&desc->irq_data);
884
885 raw_spin_unlock_irqrestore(&desc->lock, flags);
886 }
887}
85f08c17
JL
888
889#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
3cfeffc2
SA
890/**
891 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
892 * NULL)
893 * @data: Pointer to interrupt specific data
894 */
895void irq_chip_enable_parent(struct irq_data *data)
896{
897 data = data->parent_data;
898 if (data->chip->irq_enable)
899 data->chip->irq_enable(data);
900 else
901 data->chip->irq_unmask(data);
902}
903
904/**
905 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
906 * NULL)
907 * @data: Pointer to interrupt specific data
908 */
909void irq_chip_disable_parent(struct irq_data *data)
910{
911 data = data->parent_data;
912 if (data->chip->irq_disable)
913 data->chip->irq_disable(data);
914 else
915 data->chip->irq_mask(data);
916}
917
85f08c17
JL
918/**
919 * irq_chip_ack_parent - Acknowledge the parent interrupt
920 * @data: Pointer to interrupt specific data
921 */
922void irq_chip_ack_parent(struct irq_data *data)
923{
924 data = data->parent_data;
925 data->chip->irq_ack(data);
926}
927
56e8abab
YC
928/**
929 * irq_chip_mask_parent - Mask the parent interrupt
930 * @data: Pointer to interrupt specific data
931 */
932void irq_chip_mask_parent(struct irq_data *data)
933{
934 data = data->parent_data;
935 data->chip->irq_mask(data);
936}
937
938/**
939 * irq_chip_unmask_parent - Unmask the parent interrupt
940 * @data: Pointer to interrupt specific data
941 */
942void irq_chip_unmask_parent(struct irq_data *data)
943{
944 data = data->parent_data;
945 data->chip->irq_unmask(data);
946}
947
948/**
949 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
950 * @data: Pointer to interrupt specific data
951 */
952void irq_chip_eoi_parent(struct irq_data *data)
953{
954 data = data->parent_data;
955 data->chip->irq_eoi(data);
956}
957
958/**
959 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
960 * @data: Pointer to interrupt specific data
961 * @dest: The affinity mask to set
962 * @force: Flag to enforce setting (disable online checks)
963 *
964 * Conditinal, as the underlying parent chip might not implement it.
965 */
966int irq_chip_set_affinity_parent(struct irq_data *data,
967 const struct cpumask *dest, bool force)
968{
969 data = data->parent_data;
970 if (data->chip->irq_set_affinity)
971 return data->chip->irq_set_affinity(data, dest, force);
b7560de1
GS
972
973 return -ENOSYS;
974}
975
976/**
977 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
978 * @data: Pointer to interrupt specific data
979 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
980 *
981 * Conditional, as the underlying parent chip might not implement it.
982 */
983int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
984{
985 data = data->parent_data;
986
987 if (data->chip->irq_set_type)
988 return data->chip->irq_set_type(data, type);
56e8abab
YC
989
990 return -ENOSYS;
991}
992
85f08c17
JL
993/**
994 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
995 * @data: Pointer to interrupt specific data
996 *
997 * Iterate through the domain hierarchy of the interrupt and check
998 * whether a hw retrigger function exists. If yes, invoke it.
999 */
1000int irq_chip_retrigger_hierarchy(struct irq_data *data)
1001{
1002 for (data = data->parent_data; data; data = data->parent_data)
1003 if (data->chip && data->chip->irq_retrigger)
1004 return data->chip->irq_retrigger(data);
1005
6d4affea 1006 return 0;
85f08c17 1007}
08b55e2a 1008
0a4377de
JL
1009/**
1010 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1011 * @data: Pointer to interrupt specific data
8505a81b 1012 * @vcpu_info: The vcpu affinity information
0a4377de
JL
1013 */
1014int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1015{
1016 data = data->parent_data;
1017 if (data->chip->irq_set_vcpu_affinity)
1018 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1019
1020 return -ENOSYS;
1021}
1022
08b55e2a
MZ
1023/**
1024 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1025 * @data: Pointer to interrupt specific data
1026 * @on: Whether to set or reset the wake-up capability of this irq
1027 *
1028 * Conditional, as the underlying parent chip might not implement it.
1029 */
1030int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1031{
1032 data = data->parent_data;
1033 if (data->chip->irq_set_wake)
1034 return data->chip->irq_set_wake(data, on);
1035
1036 return -ENOSYS;
1037}
85f08c17 1038#endif
515085ef
JL
1039
1040/**
1041 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1042 * @data: Pointer to interrupt specific data
1043 * @msg: Pointer to the MSI message
1044 *
1045 * For hierarchical domains we find the first chip in the hierarchy
1046 * which implements the irq_compose_msi_msg callback. For non
1047 * hierarchical we use the top level chip.
1048 */
1049int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1050{
1051 struct irq_data *pos = NULL;
1052
1053#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1054 for (; data; data = data->parent_data)
1055#endif
1056 if (data->chip && data->chip->irq_compose_msi_msg)
1057 pos = data;
1058 if (!pos)
1059 return -ENOSYS;
1060
1061 pos->chip->irq_compose_msi_msg(pos, msg);
1062
1063 return 0;
1064}
This page took 0.612929 seconds and 5 git commands to generate.