Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/irq.c | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. | |
6 | * | |
8749af68 RK |
7 | * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. |
8 | * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and | |
9 | * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. | |
10 | * | |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | * This file contains the code used by various IRQ handling routines: | |
16 | * asking for different IRQ's should be done through these routines | |
17 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
18 | * shouldn't result in any weird surprises, and installing new handlers | |
19 | * should be easier. | |
20 | * | |
21 | * IRQ's are in fact implemented a bit like signal handlers for the kernel. | |
22 | * Naturally it's not a 1:1 relation, but there are similarities. | |
23 | */ | |
1da177e4 | 24 | #include <linux/kernel_stat.h> |
1da177e4 LT |
25 | #include <linux/signal.h> |
26 | #include <linux/ioport.h> | |
27 | #include <linux/interrupt.h> | |
4a2581a0 | 28 | #include <linux/irq.h> |
ebafed7a | 29 | #include <linux/irqchip.h> |
1da177e4 LT |
30 | #include <linux/random.h> |
31 | #include <linux/smp.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/seq_file.h> | |
34 | #include <linux/errno.h> | |
35 | #include <linux/list.h> | |
36 | #include <linux/kallsyms.h> | |
37 | #include <linux/proc_fs.h> | |
05c76982 | 38 | #include <linux/export.h> |
1da177e4 | 39 | |
5a567d78 | 40 | #include <asm/exception.h> |
8ff1443c | 41 | #include <asm/mach/arch.h> |
897d8527 | 42 | #include <asm/mach/irq.h> |
8749af68 | 43 | #include <asm/mach/time.h> |
1da177e4 | 44 | |
4a2581a0 | 45 | unsigned long irq_err_count; |
1da177e4 | 46 | |
25a5662a | 47 | int arch_show_interrupts(struct seq_file *p, int prec) |
1da177e4 | 48 | { |
baa28e35 | 49 | #ifdef CONFIG_FIQ |
25a5662a | 50 | show_fiq_list(p, prec); |
1da177e4 LT |
51 | #endif |
52 | #ifdef CONFIG_SMP | |
25a5662a | 53 | show_ipi_list(p, prec); |
1da177e4 | 54 | #endif |
25a5662a | 55 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); |
1da177e4 LT |
56 | return 0; |
57 | } | |
58 | ||
1da177e4 | 59 | /* |
a4841e39 RKAL |
60 | * handle_IRQ handles all hardware IRQ's. Decoded IRQs should |
61 | * not come via this function. Instead, they should provide their | |
62 | * own 'handler'. Used by platform code implementing C-based 1st | |
63 | * level decoding. | |
1da177e4 | 64 | */ |
a4841e39 | 65 | void handle_IRQ(unsigned int irq, struct pt_regs *regs) |
1da177e4 | 66 | { |
e6300155 | 67 | struct pt_regs *old_regs = set_irq_regs(regs); |
d8aa0251 DB |
68 | |
69 | irq_enter(); | |
1da177e4 LT |
70 | |
71 | /* | |
72 | * Some hardware gives randomly wrong interrupts. Rather | |
73 | * than crashing, do something sensible. | |
74 | */ | |
354e6f72 | 75 | if (unlikely(irq >= nr_irqs)) { |
7aa5514e AK |
76 | if (printk_ratelimit()) |
77 | printk(KERN_WARNING "Bad IRQ%u\n", irq); | |
78 | ack_bad_irq(irq); | |
79 | } else { | |
d8aa0251 | 80 | generic_handle_irq(irq); |
7aa5514e | 81 | } |
1da177e4 | 82 | |
1da177e4 | 83 | irq_exit(); |
e6300155 | 84 | set_irq_regs(old_regs); |
1da177e4 LT |
85 | } |
86 | ||
a4841e39 RKAL |
87 | /* |
88 | * asm_do_IRQ is the interface to be used from assembly code. | |
89 | */ | |
90 | asmlinkage void __exception_irq_entry | |
91 | asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |
92 | { | |
93 | handle_IRQ(irq, regs); | |
94 | } | |
95 | ||
1da177e4 LT |
96 | void set_irq_flags(unsigned int irq, unsigned int iflags) |
97 | { | |
1b7a2d90 | 98 | unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
1da177e4 | 99 | |
354e6f72 | 100 | if (irq >= nr_irqs) { |
1da177e4 LT |
101 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); |
102 | return; | |
103 | } | |
104 | ||
4a2581a0 | 105 | if (iflags & IRQF_VALID) |
1b7a2d90 | 106 | clr |= IRQ_NOREQUEST; |
4a2581a0 | 107 | if (iflags & IRQF_PROBE) |
1b7a2d90 | 108 | clr |= IRQ_NOPROBE; |
4a2581a0 | 109 | if (!(iflags & IRQF_NOAUTOEN)) |
1b7a2d90 TG |
110 | clr |= IRQ_NOAUTOEN; |
111 | /* Order is clear bits in "clr" then set bits in "set" */ | |
112 | irq_modify_status(irq, clr, set & ~clr); | |
1da177e4 | 113 | } |
05c76982 | 114 | EXPORT_SYMBOL_GPL(set_irq_flags); |
1da177e4 LT |
115 | |
116 | void __init init_IRQ(void) | |
117 | { | |
ebafed7a MR |
118 | if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) |
119 | irqchip_init(); | |
120 | else | |
121 | machine_desc->init_irq(); | |
1da177e4 LT |
122 | } |
123 | ||
73171d15 TP |
124 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
125 | void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) | |
126 | { | |
127 | if (handle_arch_irq) | |
128 | return; | |
129 | ||
130 | handle_arch_irq = handle_irq; | |
131 | } | |
132 | #endif | |
133 | ||
354e6f72 | 134 | #ifdef CONFIG_SPARSE_IRQ |
135 | int __init arch_probe_nr_irqs(void) | |
136 | { | |
8ff1443c | 137 | nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; |
b683de2b | 138 | return nr_irqs; |
354e6f72 | 139 | } |
140 | #endif | |
141 | ||
a054a811 | 142 | #ifdef CONFIG_HOTPLUG_CPU |
f7ede370 | 143 | |
78359cb8 | 144 | static bool migrate_one_irq(struct irq_desc *desc) |
f7ede370 | 145 | { |
78359cb8 | 146 | struct irq_data *d = irq_desc_get_irq_data(desc); |
ca15af19 | 147 | const struct cpumask *affinity = d->affinity; |
78359cb8 | 148 | struct irq_chip *c; |
61791244 | 149 | bool ret = false; |
f7ede370 | 150 | |
78359cb8 RK |
151 | /* |
152 | * If this is a per-CPU interrupt, or the affinity does not | |
153 | * include this CPU, then we have nothing to do. | |
154 | */ | |
155 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | |
156 | return false; | |
157 | ||
ca15af19 | 158 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
78359cb8 | 159 | affinity = cpu_online_mask; |
61791244 RK |
160 | ret = true; |
161 | } | |
162 | ||
78359cb8 | 163 | c = irq_data_get_irq_chip(d); |
5e7371de | 164 | if (!c->irq_set_affinity) |
78359cb8 | 165 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); |
5e7371de WD |
166 | else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) |
167 | cpumask_copy(d->affinity, affinity); | |
61791244 RK |
168 | |
169 | return ret; | |
f7ede370 TG |
170 | } |
171 | ||
a054a811 | 172 | /* |
78359cb8 RK |
173 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
174 | * If the affinity settings do not allow other CPUs, force them onto any | |
a054a811 | 175 | * available CPU. |
78359cb8 RK |
176 | * |
177 | * Note: we must iterate over all IRQs, whether they have an attached | |
178 | * action structure or not, as we need to get chained interrupts too. | |
a054a811 RK |
179 | */ |
180 | void migrate_irqs(void) | |
181 | { | |
78359cb8 | 182 | unsigned int i; |
354e6f72 | 183 | struct irq_desc *desc; |
61791244 RK |
184 | unsigned long flags; |
185 | ||
186 | local_irq_save(flags); | |
a054a811 | 187 | |
354e6f72 | 188 | for_each_irq_desc(i, desc) { |
342d00ae | 189 | bool affinity_broken; |
78359cb8 | 190 | |
61791244 | 191 | raw_spin_lock(&desc->lock); |
78359cb8 | 192 | affinity_broken = migrate_one_irq(desc); |
61791244 RK |
193 | raw_spin_unlock(&desc->lock); |
194 | ||
195 | if (affinity_broken && printk_ratelimit()) | |
78359cb8 RK |
196 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, |
197 | smp_processor_id()); | |
a054a811 | 198 | } |
61791244 RK |
199 | |
200 | local_irq_restore(flags); | |
a054a811 RK |
201 | } |
202 | #endif /* CONFIG_HOTPLUG_CPU */ |