Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/irq.c | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. | |
6 | * | |
8749af68 RK |
7 | * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. |
8 | * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and | |
9 | * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. | |
10 | * | |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | * This file contains the code used by various IRQ handling routines: | |
16 | * asking for different IRQ's should be done through these routines | |
17 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
18 | * shouldn't result in any weird surprises, and installing new handlers | |
19 | * should be easier. | |
20 | * | |
21 | * IRQ's are in fact implemented a bit like signal handlers for the kernel. | |
22 | * Naturally it's not a 1:1 relation, but there are similarities. | |
23 | */ | |
1da177e4 LT |
24 | #include <linux/kernel_stat.h> |
25 | #include <linux/module.h> | |
26 | #include <linux/signal.h> | |
27 | #include <linux/ioport.h> | |
28 | #include <linux/interrupt.h> | |
4a2581a0 | 29 | #include <linux/irq.h> |
1da177e4 LT |
30 | #include <linux/ptrace.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/random.h> | |
33 | #include <linux/smp.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/seq_file.h> | |
36 | #include <linux/errno.h> | |
37 | #include <linux/list.h> | |
38 | #include <linux/kallsyms.h> | |
39 | #include <linux/proc_fs.h> | |
40 | ||
1da177e4 | 41 | #include <asm/system.h> |
8749af68 | 42 | #include <asm/mach/time.h> |
1da177e4 | 43 | |
1da177e4 LT |
44 | /* |
45 | * No architecture-specific irq_finish function defined in arm/arch/irqs.h. | |
46 | */ | |
47 | #ifndef irq_finish | |
48 | #define irq_finish(irq) do { } while (0) | |
49 | #endif | |
50 | ||
4a2581a0 TG |
51 | void (*init_arch_irq)(void) __initdata = NULL; |
52 | unsigned long irq_err_count; | |
1da177e4 LT |
53 | |
54 | int show_interrupts(struct seq_file *p, void *v) | |
55 | { | |
56 | int i = *(loff_t *) v, cpu; | |
57 | struct irqaction * action; | |
58 | unsigned long flags; | |
59 | ||
60 | if (i == 0) { | |
61 | char cpuname[12]; | |
62 | ||
63 | seq_printf(p, " "); | |
64 | for_each_present_cpu(cpu) { | |
65 | sprintf(cpuname, "CPU%d", cpu); | |
66 | seq_printf(p, " %10s", cpuname); | |
67 | } | |
68 | seq_putc(p, '\n'); | |
69 | } | |
70 | ||
71 | if (i < NR_IRQS) { | |
4a2581a0 TG |
72 | spin_lock_irqsave(&irq_desc[i].lock, flags); |
73 | action = irq_desc[i].action; | |
1da177e4 LT |
74 | if (!action) |
75 | goto unlock; | |
76 | ||
77 | seq_printf(p, "%3d: ", i); | |
78 | for_each_present_cpu(cpu) | |
79 | seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); | |
38c677cb | 80 | seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); |
1da177e4 LT |
81 | seq_printf(p, " %s", action->name); |
82 | for (action = action->next; action; action = action->next) | |
83 | seq_printf(p, ", %s", action->name); | |
84 | ||
85 | seq_putc(p, '\n'); | |
86 | unlock: | |
4a2581a0 | 87 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
1da177e4 LT |
88 | } else if (i == NR_IRQS) { |
89 | #ifdef CONFIG_ARCH_ACORN | |
90 | show_fiq_list(p, v); | |
91 | #endif | |
92 | #ifdef CONFIG_SMP | |
93 | show_ipi_list(p); | |
37ee16ae | 94 | show_local_irqs(p); |
1da177e4 LT |
95 | #endif |
96 | seq_printf(p, "Err: %10lu\n", irq_err_count); | |
97 | } | |
98 | return 0; | |
99 | } | |
100 | ||
4a2581a0 TG |
101 | /* Handle bad interrupts */ |
102 | static struct irq_desc bad_irq_desc = { | |
103 | .handle_irq = handle_bad_irq, | |
104 | .lock = SPIN_LOCK_UNLOCKED | |
105 | }; | |
1da177e4 LT |
106 | |
107 | /* | |
108 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not | |
109 | * come via this function. Instead, they should provide their | |
110 | * own 'handler' | |
111 | */ | |
112 | asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | |
113 | { | |
e6300155 | 114 | struct pt_regs *old_regs = set_irq_regs(regs); |
10dd5ce2 | 115 | struct irq_desc *desc = irq_desc + irq; |
1da177e4 LT |
116 | |
117 | /* | |
118 | * Some hardware gives randomly wrong interrupts. Rather | |
119 | * than crashing, do something sensible. | |
120 | */ | |
121 | if (irq >= NR_IRQS) | |
122 | desc = &bad_irq_desc; | |
123 | ||
124 | irq_enter(); | |
1da177e4 | 125 | |
0cd61b68 | 126 | desc_handle_irq(irq, desc); |
1da177e4 | 127 | |
4a2581a0 | 128 | /* AT91 specific workaround */ |
1da177e4 LT |
129 | irq_finish(irq); |
130 | ||
1da177e4 | 131 | irq_exit(); |
e6300155 | 132 | set_irq_regs(old_regs); |
1da177e4 LT |
133 | } |
134 | ||
1da177e4 LT |
135 | void set_irq_flags(unsigned int irq, unsigned int iflags) |
136 | { | |
10dd5ce2 | 137 | struct irq_desc *desc; |
1da177e4 LT |
138 | unsigned long flags; |
139 | ||
140 | if (irq >= NR_IRQS) { | |
141 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); | |
142 | return; | |
143 | } | |
144 | ||
145 | desc = irq_desc + irq; | |
4a2581a0 TG |
146 | spin_lock_irqsave(&desc->lock, flags); |
147 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | |
148 | if (iflags & IRQF_VALID) | |
149 | desc->status &= ~IRQ_NOREQUEST; | |
150 | if (iflags & IRQF_PROBE) | |
151 | desc->status &= ~IRQ_NOPROBE; | |
152 | if (!(iflags & IRQF_NOAUTOEN)) | |
153 | desc->status &= ~IRQ_NOAUTOEN; | |
154 | spin_unlock_irqrestore(&desc->lock, flags); | |
1da177e4 LT |
155 | } |
156 | ||
157 | void __init init_IRQ(void) | |
158 | { | |
1da177e4 LT |
159 | int irq; |
160 | ||
4a2581a0 TG |
161 | for (irq = 0; irq < NR_IRQS; irq++) |
162 | irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_DELAYED_DISABLE | | |
163 | IRQ_NOPROBE; | |
164 | ||
1da177e4 LT |
165 | #ifdef CONFIG_SMP |
166 | bad_irq_desc.affinity = CPU_MASK_ALL; | |
167 | bad_irq_desc.cpu = smp_processor_id(); | |
168 | #endif | |
1da177e4 | 169 | init_arch_irq(); |
1da177e4 LT |
170 | } |
171 | ||
a054a811 | 172 | #ifdef CONFIG_HOTPLUG_CPU |
f7ede370 | 173 | |
10dd5ce2 | 174 | static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) |
f7ede370 TG |
175 | { |
176 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); | |
177 | ||
178 | spin_lock_irq(&desc->lock); | |
179 | desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); | |
180 | spin_unlock_irq(&desc->lock); | |
181 | } | |
182 | ||
a054a811 RK |
183 | /* |
184 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | |
185 | * the affinity settings do not allow other CPUs, force them onto any | |
186 | * available CPU. | |
187 | */ | |
188 | void migrate_irqs(void) | |
189 | { | |
190 | unsigned int i, cpu = smp_processor_id(); | |
191 | ||
192 | for (i = 0; i < NR_IRQS; i++) { | |
10dd5ce2 | 193 | struct irq_desc *desc = irq_desc + i; |
a054a811 RK |
194 | |
195 | if (desc->cpu == cpu) { | |
196 | unsigned int newcpu = any_online_cpu(desc->affinity); | |
197 | ||
198 | if (newcpu == NR_CPUS) { | |
199 | if (printk_ratelimit()) | |
200 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | |
201 | i, cpu); | |
202 | ||
203 | cpus_setall(desc->affinity); | |
204 | newcpu = any_online_cpu(desc->affinity); | |
205 | } | |
206 | ||
207 | route_irq(desc, i, newcpu); | |
208 | } | |
209 | } | |
210 | } | |
211 | #endif /* CONFIG_HOTPLUG_CPU */ |