Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Code to handle x86 style IRQs plus some generic interrupt stuff. | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle | |
6 | * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org) | |
7 | * Copyright (C) 1999-2000 Grant Grundler | |
8 | * Copyright (c) 2005 Matthew Wilcox | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2, or (at your option) | |
13 | * any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | #include <linux/bitops.h> | |
25 | #include <linux/config.h> | |
26 | #include <linux/errno.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/interrupt.h> | |
29 | #include <linux/kernel_stat.h> | |
30 | #include <linux/seq_file.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/types.h> | |
c2ab64d0 | 33 | #include <asm/io.h> |
1da177e4 | 34 | |
1d4c452a KM |
35 | #include <asm/smp.h> |
36 | ||
1da177e4 LT |
37 | #undef PARISC_IRQ_CR16_COUNTS |
38 | ||
39 | extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *); | |
40 | extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); | |
41 | ||
42 | #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq)) | |
43 | ||
44 | /* Bits in EIEM correlate with cpu_irq_action[]. | |
45 | ** Numbered *Big Endian*! (ie bit 0 is MSB) | |
46 | */ | |
47 | static volatile unsigned long cpu_eiem = 0; | |
48 | ||
d911aed8 | 49 | static void cpu_disable_irq(unsigned int irq) |
1da177e4 LT |
50 | { |
51 | unsigned long eirr_bit = EIEM_MASK(irq); | |
52 | ||
53 | cpu_eiem &= ~eirr_bit; | |
d911aed8 JB |
54 | /* Do nothing on the other CPUs. If they get this interrupt, |
55 | * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't | |
56 | * handle it, and the set_eiem() at the bottom will ensure it | |
57 | * then gets disabled */ | |
1da177e4 LT |
58 | } |
59 | ||
60 | static void cpu_enable_irq(unsigned int irq) | |
61 | { | |
62 | unsigned long eirr_bit = EIEM_MASK(irq); | |
63 | ||
1da177e4 | 64 | cpu_eiem |= eirr_bit; |
d911aed8 JB |
65 | |
66 | /* FIXME: while our interrupts aren't nested, we cannot reset | |
67 | * the eiem mask if we're already in an interrupt. Once we | |
68 | * implement nested interrupts, this can go away | |
69 | */ | |
70 | if (!in_interrupt()) | |
71 | set_eiem(cpu_eiem); | |
72 | ||
73 | /* This is just a simple NOP IPI. But what it does is cause | |
74 | * all the other CPUs to do a set_eiem(cpu_eiem) at the end | |
75 | * of the interrupt handler */ | |
76 | smp_send_all_nop(); | |
1da177e4 LT |
77 | } |
78 | ||
79 | static unsigned int cpu_startup_irq(unsigned int irq) | |
80 | { | |
81 | cpu_enable_irq(irq); | |
82 | return 0; | |
83 | } | |
84 | ||
85 | void no_ack_irq(unsigned int irq) { } | |
86 | void no_end_irq(unsigned int irq) { } | |
87 | ||
c2ab64d0 JB |
88 | #ifdef CONFIG_SMP |
89 | int cpu_check_affinity(unsigned int irq, cpumask_t *dest) | |
90 | { | |
91 | int cpu_dest; | |
92 | ||
93 | /* timer and ipi have to always be received on all CPUs */ | |
94 | if (irq == TIMER_IRQ || irq == IPI_IRQ) { | |
95 | /* Bad linux design decision. The mask has already | |
96 | * been set; we must reset it */ | |
97 | irq_affinity[irq] = CPU_MASK_ALL; | |
98 | return -EINVAL; | |
99 | } | |
100 | ||
101 | /* whatever mask they set, we just allow one CPU */ | |
102 | cpu_dest = first_cpu(*dest); | |
103 | *dest = cpumask_of_cpu(cpu_dest); | |
104 | ||
105 | return 0; | |
106 | } | |
107 | ||
108 | static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) | |
109 | { | |
110 | if (cpu_check_affinity(irq, &dest)) | |
111 | return; | |
112 | ||
113 | irq_affinity[irq] = dest; | |
114 | } | |
115 | #endif | |
116 | ||
1da177e4 LT |
117 | static struct hw_interrupt_type cpu_interrupt_type = { |
118 | .typename = "CPU", | |
119 | .startup = cpu_startup_irq, | |
120 | .shutdown = cpu_disable_irq, | |
121 | .enable = cpu_enable_irq, | |
122 | .disable = cpu_disable_irq, | |
123 | .ack = no_ack_irq, | |
124 | .end = no_end_irq, | |
c2ab64d0 JB |
125 | #ifdef CONFIG_SMP |
126 | .set_affinity = cpu_set_affinity_irq, | |
127 | #endif | |
1da177e4 LT |
128 | }; |
129 | ||
130 | int show_interrupts(struct seq_file *p, void *v) | |
131 | { | |
132 | int i = *(loff_t *) v, j; | |
133 | unsigned long flags; | |
134 | ||
135 | if (i == 0) { | |
136 | seq_puts(p, " "); | |
137 | for_each_online_cpu(j) | |
138 | seq_printf(p, " CPU%d", j); | |
139 | ||
140 | #ifdef PARISC_IRQ_CR16_COUNTS | |
141 | seq_printf(p, " [min/avg/max] (CPU cycle counts)"); | |
142 | #endif | |
143 | seq_putc(p, '\n'); | |
144 | } | |
145 | ||
146 | if (i < NR_IRQS) { | |
147 | struct irqaction *action; | |
148 | ||
149 | spin_lock_irqsave(&irq_desc[i].lock, flags); | |
150 | action = irq_desc[i].action; | |
151 | if (!action) | |
152 | goto skip; | |
153 | seq_printf(p, "%3d: ", i); | |
154 | #ifdef CONFIG_SMP | |
155 | for_each_online_cpu(j) | |
156 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | |
157 | #else | |
158 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
159 | #endif | |
160 | ||
161 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | |
162 | #ifndef PARISC_IRQ_CR16_COUNTS | |
163 | seq_printf(p, " %s", action->name); | |
164 | ||
165 | while ((action = action->next)) | |
166 | seq_printf(p, ", %s", action->name); | |
167 | #else | |
168 | for ( ;action; action = action->next) { | |
169 | unsigned int k, avg, min, max; | |
170 | ||
171 | min = max = action->cr16_hist[0]; | |
172 | ||
173 | for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) { | |
174 | int hist = action->cr16_hist[k]; | |
175 | ||
176 | if (hist) { | |
177 | avg += hist; | |
178 | } else | |
179 | break; | |
180 | ||
181 | if (hist > max) max = hist; | |
182 | if (hist < min) min = hist; | |
183 | } | |
184 | ||
185 | avg /= k; | |
186 | seq_printf(p, " %s[%d/%d/%d]", action->name, | |
187 | min,avg,max); | |
188 | } | |
189 | #endif | |
190 | ||
191 | seq_putc(p, '\n'); | |
192 | skip: | |
193 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | |
194 | } | |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
199 | ||
200 | ||
201 | /* | |
202 | ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data. | |
203 | ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit. | |
204 | ** | |
205 | ** To use txn_XXX() interfaces, get a Virtual IRQ first. | |
206 | ** Then use that to get the Transaction address and data. | |
207 | */ | |
208 | ||
209 | int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data) | |
210 | { | |
211 | if (irq_desc[irq].action) | |
212 | return -EBUSY; | |
213 | if (irq_desc[irq].handler != &cpu_interrupt_type) | |
214 | return -EBUSY; | |
215 | ||
216 | if (type) { | |
217 | irq_desc[irq].handler = type; | |
218 | irq_desc[irq].handler_data = data; | |
219 | cpu_interrupt_type.enable(irq); | |
220 | } | |
221 | return 0; | |
222 | } | |
223 | ||
224 | int txn_claim_irq(int irq) | |
225 | { | |
226 | return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq; | |
227 | } | |
228 | ||
229 | /* | |
230 | * The bits_wide parameter accommodates the limitations of the HW/SW which | |
231 | * use these bits: | |
232 | * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register) | |
233 | * V-class (EPIC): 6 bits | |
234 | * N/L/A-class (iosapic): 8 bits | |
235 | * PCI 2.2 MSI: 16 bits | |
236 | * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric) | |
237 | * | |
238 | * On the service provider side: | |
239 | * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register) | |
240 | * o PA 2.0 wide mode 6-bits (per processor) | |
241 | * o IA64 8-bits (0-256 total) | |
242 | * | |
243 | * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported | |
244 | * by the processor...and the N/L-class I/O subsystem supports more bits than | |
245 | * PA2.0 has. The first case is the problem. | |
246 | */ | |
247 | int txn_alloc_irq(unsigned int bits_wide) | |
248 | { | |
249 | int irq; | |
250 | ||
251 | /* never return irq 0 cause that's the interval timer */ | |
252 | for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) { | |
253 | if (cpu_claim_irq(irq, NULL, NULL) < 0) | |
254 | continue; | |
255 | if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide)) | |
256 | continue; | |
257 | return irq; | |
258 | } | |
259 | ||
260 | /* unlikely, but be prepared */ | |
261 | return -1; | |
262 | } | |
263 | ||
c2ab64d0 JB |
264 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) |
265 | { | |
266 | irq_affinity[irq] = cpumask_of_cpu(cpu); | |
267 | ||
268 | return cpu_data[cpu].txn_addr; | |
269 | } | |
270 | ||
1da177e4 LT |
271 | unsigned long txn_alloc_addr(unsigned int virt_irq) |
272 | { | |
273 | static int next_cpu = -1; | |
274 | ||
275 | next_cpu++; /* assign to "next" CPU we want this bugger on */ | |
276 | ||
277 | /* validate entry */ | |
278 | while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || | |
279 | !cpu_online(next_cpu))) | |
280 | next_cpu++; | |
281 | ||
282 | if (next_cpu >= NR_CPUS) | |
283 | next_cpu = 0; /* nothing else, assign monarch */ | |
284 | ||
c2ab64d0 | 285 | return txn_affinity_addr(virt_irq, next_cpu); |
1da177e4 LT |
286 | } |
287 | ||
288 | ||
289 | unsigned int txn_alloc_data(unsigned int virt_irq) | |
290 | { | |
291 | return virt_irq - CPU_IRQ_BASE; | |
292 | } | |
293 | ||
294 | /* ONLY called from entry.S:intr_extint() */ | |
295 | void do_cpu_irq_mask(struct pt_regs *regs) | |
296 | { | |
297 | unsigned long eirr_val; | |
298 | ||
299 | irq_enter(); | |
300 | ||
301 | /* | |
3f902886 GG |
302 | * Don't allow TIMER or IPI nested interrupts. |
303 | * Allowing any single interrupt to nest can lead to that CPU | |
304 | * handling interrupts with all enabled interrupts unmasked. | |
1da177e4 | 305 | */ |
3f902886 | 306 | set_eiem(0UL); |
1da177e4 LT |
307 | |
308 | /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) | |
309 | * 2) We loop here on EIRR contents in order to avoid | |
310 | * nested interrupts or having to take another interrupt | |
311 | * when we could have just handled it right away. | |
312 | */ | |
313 | for (;;) { | |
314 | unsigned long bit = (1UL << (BITS_PER_LONG - 1)); | |
315 | unsigned int irq; | |
316 | eirr_val = mfctl(23) & cpu_eiem; | |
317 | if (!eirr_val) | |
318 | break; | |
319 | ||
1da177e4 LT |
320 | mtctl(eirr_val, 23); /* reset bits we are going to process */ |
321 | ||
322 | /* Work our way from MSb to LSb...same order we alloc EIRs */ | |
323 | for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { | |
c2ab64d0 JB |
324 | cpumask_t dest = irq_affinity[irq]; |
325 | ||
1da177e4 LT |
326 | if (!(bit & eirr_val)) |
327 | continue; | |
328 | ||
329 | /* clear bit in mask - can exit loop sooner */ | |
330 | eirr_val &= ~bit; | |
331 | ||
c2ab64d0 JB |
332 | /* FIXME: because generic set affinity mucks |
333 | * with the affinity before sending it to us | |
334 | * we can get the situation where the affinity is | |
335 | * wrong for our CPU type interrupts */ | |
336 | if (irq != TIMER_IRQ && irq != IPI_IRQ && | |
337 | !cpu_isset(smp_processor_id(), dest)) { | |
338 | int cpu = first_cpu(dest); | |
339 | ||
340 | printk("rethrowing irq %d from %d to %d\n", | |
341 | irq, smp_processor_id(), cpu); | |
342 | gsc_writel(irq + CPU_IRQ_BASE, | |
343 | cpu_data[cpu].hpa); | |
344 | continue; | |
345 | } | |
346 | ||
1da177e4 LT |
347 | __do_IRQ(irq, regs); |
348 | } | |
349 | } | |
3f902886 GG |
350 | |
351 | set_eiem(cpu_eiem); /* restore original mask */ | |
1da177e4 LT |
352 | irq_exit(); |
353 | } | |
354 | ||
355 | ||
356 | static struct irqaction timer_action = { | |
357 | .handler = timer_interrupt, | |
358 | .name = "timer", | |
9a8b4584 | 359 | .flags = SA_INTERRUPT, |
1da177e4 LT |
360 | }; |
361 | ||
362 | #ifdef CONFIG_SMP | |
363 | static struct irqaction ipi_action = { | |
364 | .handler = ipi_interrupt, | |
365 | .name = "IPI", | |
9a8b4584 | 366 | .flags = SA_INTERRUPT, |
1da177e4 LT |
367 | }; |
368 | #endif | |
369 | ||
370 | static void claim_cpu_irqs(void) | |
371 | { | |
372 | int i; | |
373 | for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { | |
374 | irq_desc[i].handler = &cpu_interrupt_type; | |
375 | } | |
376 | ||
377 | irq_desc[TIMER_IRQ].action = &timer_action; | |
378 | irq_desc[TIMER_IRQ].status |= IRQ_PER_CPU; | |
379 | #ifdef CONFIG_SMP | |
380 | irq_desc[IPI_IRQ].action = &ipi_action; | |
381 | irq_desc[IPI_IRQ].status = IRQ_PER_CPU; | |
382 | #endif | |
383 | } | |
384 | ||
385 | void __init init_IRQ(void) | |
386 | { | |
387 | local_irq_disable(); /* PARANOID - should already be disabled */ | |
388 | mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ | |
389 | claim_cpu_irqs(); | |
390 | #ifdef CONFIG_SMP | |
391 | if (!cpu_eiem) | |
392 | cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); | |
393 | #else | |
394 | cpu_eiem = EIEM_MASK(TIMER_IRQ); | |
395 | #endif | |
396 | set_eiem(cpu_eiem); /* EIEM : enable all external intr */ | |
397 | ||
398 | } | |
399 | ||
400 | void hw_resend_irq(struct hw_interrupt_type *type, unsigned int irq) | |
401 | { | |
402 | /* XXX: Needs to be written. We managed without it so far, but | |
403 | * we really ought to write it. | |
404 | */ | |
405 | } | |
406 | ||
407 | void ack_bad_irq(unsigned int irq) | |
408 | { | |
409 | printk("unexpected IRQ %d\n", irq); | |
410 | } |