MIPS: Octeon: Move MSI code out of octeon-irq.c.
[deliverable/linux.git] / arch / mips / cavium-octeon / octeon-irq.c
CommitLineData
5b3b1688
DD
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2008 Cavium Networks
7 */
8#include <linux/irq.h>
9#include <linux/interrupt.h>
631330f5 10#include <linux/smp.h>
5b3b1688
DD
11
12#include <asm/octeon/octeon.h>
13
39961422
DD
14static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
5b3b1688 16
cd847b78
DD
17static int octeon_coreid_for_cpu(int cpu)
18{
19#ifdef CONFIG_SMP
20 return cpu_logical_map(cpu);
21#else
22 return cvmx_get_core_num();
23#endif
24}
25
5b3b1688
DD
26static void octeon_irq_core_ack(unsigned int irq)
27{
28 unsigned int bit = irq - OCTEON_IRQ_SW0;
29 /*
30 * We don't need to disable IRQs to make these atomic since
31 * they are already disabled earlier in the low level
32 * interrupt code.
33 */
34 clear_c0_status(0x100 << bit);
35 /* The two user interrupts must be cleared manually. */
36 if (bit < 2)
37 clear_c0_cause(0x100 << bit);
38}
39
40static void octeon_irq_core_eoi(unsigned int irq)
41{
ae035505 42 struct irq_desc *desc = irq_desc + irq;
5b3b1688
DD
43 unsigned int bit = irq - OCTEON_IRQ_SW0;
44 /*
45 * If an IRQ is being processed while we are disabling it the
46 * handler will attempt to unmask the interrupt after it has
47 * been disabled.
48 */
49 if (desc->status & IRQ_DISABLED)
50 return;
5b3b1688
DD
51 /*
52 * We don't need to disable IRQs to make these atomic since
53 * they are already disabled earlier in the low level
54 * interrupt code.
55 */
56 set_c0_status(0x100 << bit);
57}
58
59static void octeon_irq_core_enable(unsigned int irq)
60{
61 unsigned long flags;
62 unsigned int bit = irq - OCTEON_IRQ_SW0;
63
64 /*
65 * We need to disable interrupts to make sure our updates are
66 * atomic.
67 */
68 local_irq_save(flags);
69 set_c0_status(0x100 << bit);
70 local_irq_restore(flags);
71}
72
73static void octeon_irq_core_disable_local(unsigned int irq)
74{
75 unsigned long flags;
76 unsigned int bit = irq - OCTEON_IRQ_SW0;
77 /*
78 * We need to disable interrupts to make sure our updates are
79 * atomic.
80 */
81 local_irq_save(flags);
82 clear_c0_status(0x100 << bit);
83 local_irq_restore(flags);
84}
85
86static void octeon_irq_core_disable(unsigned int irq)
87{
88#ifdef CONFIG_SMP
89 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
90 (void *) (long) irq, 1);
91#else
92 octeon_irq_core_disable_local(irq);
93#endif
94}
95
96static struct irq_chip octeon_irq_chip_core = {
97 .name = "Core",
98 .enable = octeon_irq_core_enable,
99 .disable = octeon_irq_core_disable,
100 .ack = octeon_irq_core_ack,
101 .eoi = octeon_irq_core_eoi,
102};
103
104
105static void octeon_irq_ciu0_ack(unsigned int irq)
106{
107 /*
108 * In order to avoid any locking accessing the CIU, we
109 * acknowledge CIU interrupts by disabling all of them. This
110 * way we can use a per core register and avoid any out of
111 * core locking requirements. This has the side affect that
112 * CIU interrupts can't be processed recursively.
113 *
114 * We don't need to disable IRQs to make these atomic since
115 * they are already disabled earlier in the low level
116 * interrupt code.
117 */
118 clear_c0_status(0x100 << 2);
119}
120
121static void octeon_irq_ciu0_eoi(unsigned int irq)
122{
123 /*
124 * Enable all CIU interrupts again. We don't need to disable
125 * IRQs to make these atomic since they are already disabled
126 * earlier in the low level interrupt code.
127 */
128 set_c0_status(0x100 << 2);
129}
130
131static void octeon_irq_ciu0_enable(unsigned int irq)
132{
133 int coreid = cvmx_get_core_num();
134 unsigned long flags;
135 uint64_t en0;
136 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
137
39961422 138 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
5b3b1688
DD
139 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
140 en0 |= 1ull << bit;
141 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
142 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
39961422 143 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
5b3b1688
DD
144}
145
146static void octeon_irq_ciu0_disable(unsigned int irq)
147{
148 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
149 unsigned long flags;
150 uint64_t en0;
5b3b1688 151 int cpu;
39961422 152 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
5b3b1688 153 for_each_online_cpu(cpu) {
cd847b78 154 int coreid = octeon_coreid_for_cpu(cpu);
5b3b1688
DD
155 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
156 en0 &= ~(1ull << bit);
157 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
158 }
159 /*
160 * We need to do a read after the last update to make sure all
161 * of them are done.
162 */
163 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
39961422 164 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
cd847b78
DD
165}
166
167/*
168 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
169 * registers.
170 */
171static void octeon_irq_ciu0_enable_v2(unsigned int irq)
172{
173 int index = cvmx_get_core_num() * 2;
174 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
175
176 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
177}
178
179/*
180 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
181 * registers.
182 */
dbb103b2 183static void octeon_irq_ciu0_ack_v2(unsigned int irq)
cd847b78
DD
184{
185 int index = cvmx_get_core_num() * 2;
186 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
187
188 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
189}
190
86568dc4
DD
191/*
192 * CIU timer type interrupts must be acknoleged by writing a '1' bit
193 * to their sum0 bit.
194 */
195static void octeon_irq_ciu0_timer_ack(unsigned int irq)
196{
197 int index = cvmx_get_core_num() * 2;
198 uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
199 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
200}
201
202static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
203{
204 octeon_irq_ciu0_timer_ack(irq);
205 octeon_irq_ciu0_ack(irq);
206}
207
208static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
209{
210 octeon_irq_ciu0_timer_ack(irq);
211 octeon_irq_ciu0_ack_v2(irq);
212}
213
dbb103b2
DD
214/*
215 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
216 * registers.
217 */
218static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
219{
220 struct irq_desc *desc = irq_desc + irq;
221 int index = cvmx_get_core_num() * 2;
222 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
223
224 if ((desc->status & IRQ_DISABLED) == 0)
225 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
226}
227
cd847b78
DD
228/*
229 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
230 * registers.
231 */
232static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
233{
234 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
235 int index;
236 int cpu;
237 for_each_online_cpu(cpu) {
238 index = octeon_coreid_for_cpu(cpu) * 2;
239 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
240 }
5b3b1688
DD
241}
242
243#ifdef CONFIG_SMP
d5dedd45 244static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
5b3b1688
DD
245{
246 int cpu;
b6b74d54 247 unsigned long flags;
5b3b1688
DD
248 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
249
39961422 250 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
5b3b1688 251 for_each_online_cpu(cpu) {
cd847b78 252 int coreid = octeon_coreid_for_cpu(cpu);
5b3b1688
DD
253 uint64_t en0 =
254 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
255 if (cpumask_test_cpu(cpu, dest))
256 en0 |= 1ull << bit;
257 else
258 en0 &= ~(1ull << bit);
259 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
260 }
261 /*
262 * We need to do a read after the last update to make sure all
263 * of them are done.
264 */
265 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
39961422 266 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
d5dedd45
YL
267
268 return 0;
5b3b1688 269}
cd847b78
DD
270
271/*
272 * Set affinity for the irq for chips that have the EN*_W1{S,C}
273 * registers.
274 */
275static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
276 const struct cpumask *dest)
277{
278 int cpu;
279 int index;
280 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
281 for_each_online_cpu(cpu) {
282 index = octeon_coreid_for_cpu(cpu) * 2;
283 if (cpumask_test_cpu(cpu, dest))
284 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
285 else
286 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
287 }
288 return 0;
289}
5b3b1688
DD
290#endif
291
cd847b78
DD
292/*
293 * Newer octeon chips have support for lockless CIU operation.
294 */
295static struct irq_chip octeon_irq_chip_ciu0_v2 = {
296 .name = "CIU0",
297 .enable = octeon_irq_ciu0_enable_v2,
298 .disable = octeon_irq_ciu0_disable_all_v2,
dbb103b2
DD
299 .ack = octeon_irq_ciu0_ack_v2,
300 .eoi = octeon_irq_ciu0_eoi_v2,
cd847b78
DD
301#ifdef CONFIG_SMP
302 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
303#endif
304};
305
5b3b1688
DD
306static struct irq_chip octeon_irq_chip_ciu0 = {
307 .name = "CIU0",
308 .enable = octeon_irq_ciu0_enable,
309 .disable = octeon_irq_ciu0_disable,
310 .ack = octeon_irq_ciu0_ack,
311 .eoi = octeon_irq_ciu0_eoi,
312#ifdef CONFIG_SMP
313 .set_affinity = octeon_irq_ciu0_set_affinity,
314#endif
315};
316
86568dc4
DD
317static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
318 .name = "CIU0-T",
319 .enable = octeon_irq_ciu0_enable_v2,
320 .disable = octeon_irq_ciu0_disable_all_v2,
321 .ack = octeon_irq_ciu0_timer_ack_v2,
322 .eoi = octeon_irq_ciu0_eoi_v2,
323#ifdef CONFIG_SMP
324 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
325#endif
326};
327
328static struct irq_chip octeon_irq_chip_ciu0_timer = {
329 .name = "CIU0-T",
330 .enable = octeon_irq_ciu0_enable,
331 .disable = octeon_irq_ciu0_disable,
332 .ack = octeon_irq_ciu0_timer_ack_v1,
333 .eoi = octeon_irq_ciu0_eoi,
334#ifdef CONFIG_SMP
335 .set_affinity = octeon_irq_ciu0_set_affinity,
336#endif
337};
338
5b3b1688
DD
339
340static void octeon_irq_ciu1_ack(unsigned int irq)
341{
342 /*
343 * In order to avoid any locking accessing the CIU, we
344 * acknowledge CIU interrupts by disabling all of them. This
345 * way we can use a per core register and avoid any out of
346 * core locking requirements. This has the side affect that
347 * CIU interrupts can't be processed recursively. We don't
348 * need to disable IRQs to make these atomic since they are
349 * already disabled earlier in the low level interrupt code.
350 */
351 clear_c0_status(0x100 << 3);
352}
353
354static void octeon_irq_ciu1_eoi(unsigned int irq)
355{
356 /*
357 * Enable all CIU interrupts again. We don't need to disable
358 * IRQs to make these atomic since they are already disabled
359 * earlier in the low level interrupt code.
360 */
361 set_c0_status(0x100 << 3);
362}
363
364static void octeon_irq_ciu1_enable(unsigned int irq)
365{
366 int coreid = cvmx_get_core_num();
367 unsigned long flags;
368 uint64_t en1;
369 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
370
39961422 371 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
5b3b1688
DD
372 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
373 en1 |= 1ull << bit;
374 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
375 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
39961422 376 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
5b3b1688
DD
377}
378
379static void octeon_irq_ciu1_disable(unsigned int irq)
380{
381 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
382 unsigned long flags;
383 uint64_t en1;
5b3b1688 384 int cpu;
39961422 385 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
5b3b1688 386 for_each_online_cpu(cpu) {
cd847b78 387 int coreid = octeon_coreid_for_cpu(cpu);
5b3b1688
DD
388 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
389 en1 &= ~(1ull << bit);
390 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
391 }
392 /*
393 * We need to do a read after the last update to make sure all
394 * of them are done.
395 */
396 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
39961422 397 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
cd847b78
DD
398}
399
400/*
401 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
402 * registers.
403 */
404static void octeon_irq_ciu1_enable_v2(unsigned int irq)
405{
406 int index = cvmx_get_core_num() * 2 + 1;
407 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
408
409 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
410}
411
412/*
413 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
414 * registers.
415 */
dbb103b2 416static void octeon_irq_ciu1_ack_v2(unsigned int irq)
cd847b78
DD
417{
418 int index = cvmx_get_core_num() * 2 + 1;
419 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
420
421 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
422}
423
dbb103b2
DD
424/*
425 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
426 * registers.
427 */
428static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
429{
430 struct irq_desc *desc = irq_desc + irq;
431 int index = cvmx_get_core_num() * 2 + 1;
432 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
433
434 if ((desc->status & IRQ_DISABLED) == 0)
435 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
436}
437
cd847b78
DD
438/*
439 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
440 * registers.
441 */
442static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
443{
444 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
445 int index;
446 int cpu;
447 for_each_online_cpu(cpu) {
448 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
449 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
450 }
5b3b1688
DD
451}
452
453#ifdef CONFIG_SMP
cd847b78
DD
454static int octeon_irq_ciu1_set_affinity(unsigned int irq,
455 const struct cpumask *dest)
5b3b1688
DD
456{
457 int cpu;
b6b74d54 458 unsigned long flags;
5b3b1688
DD
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
460
39961422 461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
5b3b1688 462 for_each_online_cpu(cpu) {
cd847b78 463 int coreid = octeon_coreid_for_cpu(cpu);
5b3b1688
DD
464 uint64_t en1 =
465 cvmx_read_csr(CVMX_CIU_INTX_EN1
466 (coreid * 2 + 1));
467 if (cpumask_test_cpu(cpu, dest))
468 en1 |= 1ull << bit;
469 else
470 en1 &= ~(1ull << bit);
471 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
472 }
473 /*
474 * We need to do a read after the last update to make sure all
475 * of them are done.
476 */
477 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
39961422 478 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
d5dedd45
YL
479
480 return 0;
5b3b1688 481}
cd847b78
DD
482
483/*
484 * Set affinity for the irq for chips that have the EN*_W1{S,C}
485 * registers.
486 */
487static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
488 const struct cpumask *dest)
489{
490 int cpu;
491 int index;
492 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
493 for_each_online_cpu(cpu) {
494 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
495 if (cpumask_test_cpu(cpu, dest))
496 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
497 else
498 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
499 }
500 return 0;
501}
5b3b1688
DD
502#endif
503
cd847b78
DD
504/*
505 * Newer octeon chips have support for lockless CIU operation.
506 */
507static struct irq_chip octeon_irq_chip_ciu1_v2 = {
508 .name = "CIU0",
509 .enable = octeon_irq_ciu1_enable_v2,
510 .disable = octeon_irq_ciu1_disable_all_v2,
dbb103b2
DD
511 .ack = octeon_irq_ciu1_ack_v2,
512 .eoi = octeon_irq_ciu1_eoi_v2,
cd847b78
DD
513#ifdef CONFIG_SMP
514 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
515#endif
516};
517
5b3b1688
DD
518static struct irq_chip octeon_irq_chip_ciu1 = {
519 .name = "CIU1",
520 .enable = octeon_irq_ciu1_enable,
521 .disable = octeon_irq_ciu1_disable,
522 .ack = octeon_irq_ciu1_ack,
523 .eoi = octeon_irq_ciu1_eoi,
524#ifdef CONFIG_SMP
525 .set_affinity = octeon_irq_ciu1_set_affinity,
526#endif
527};
528
5b3b1688
DD
529void __init arch_init_irq(void)
530{
531 int irq;
cd847b78 532 struct irq_chip *chip0;
86568dc4 533 struct irq_chip *chip0_timer;
cd847b78 534 struct irq_chip *chip1;
5b3b1688
DD
535
536#ifdef CONFIG_SMP
537 /* Set the default affinity to the boot cpu. */
538 cpumask_clear(irq_default_affinity);
539 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
540#endif
541
542 if (NR_IRQS < OCTEON_IRQ_LAST)
543 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
544
cd847b78
DD
545 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
546 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
547 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
548 chip0 = &octeon_irq_chip_ciu0_v2;
86568dc4 549 chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
cd847b78
DD
550 chip1 = &octeon_irq_chip_ciu1_v2;
551 } else {
552 chip0 = &octeon_irq_chip_ciu0;
86568dc4 553 chip0_timer = &octeon_irq_chip_ciu0_timer;
cd847b78
DD
554 chip1 = &octeon_irq_chip_ciu1;
555 }
556
5b3b1688
DD
557 /* 0 - 15 reserved for i8259 master and slave controller. */
558
559 /* 17 - 23 Mips internal */
560 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
561 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
562 handle_percpu_irq);
563 }
564
565 /* 24 - 87 CIU_INT_SUM0 */
566 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
86568dc4
DD
567 switch (irq) {
568 case OCTEON_IRQ_GMX_DRP0:
569 case OCTEON_IRQ_GMX_DRP1:
570 case OCTEON_IRQ_IPD_DRP:
571 case OCTEON_IRQ_KEY_ZERO:
572 case OCTEON_IRQ_TIMER0:
573 case OCTEON_IRQ_TIMER1:
574 case OCTEON_IRQ_TIMER2:
575 case OCTEON_IRQ_TIMER3:
576 set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
577 break;
578 default:
579 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
580 break;
581 }
5b3b1688
DD
582 }
583
584 /* 88 - 151 CIU_INT_SUM1 */
585 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
cd847b78 586 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
5b3b1688
DD
587 }
588
5b3b1688
DD
589 set_c0_status(0x300 << 2);
590}
591
592asmlinkage void plat_irq_dispatch(void)
593{
594 const unsigned long core_id = cvmx_get_core_num();
595 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
596 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
597 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
598 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
599 unsigned long cop0_cause;
600 unsigned long cop0_status;
601 uint64_t ciu_en;
602 uint64_t ciu_sum;
603
604 while (1) {
605 cop0_cause = read_c0_cause();
606 cop0_status = read_c0_status();
607 cop0_cause &= cop0_status;
608 cop0_cause &= ST0_IM;
609
610 if (unlikely(cop0_cause & STATUSF_IP2)) {
611 ciu_sum = cvmx_read_csr(ciu_sum0_address);
612 ciu_en = cvmx_read_csr(ciu_en0_address);
613 ciu_sum &= ciu_en;
614 if (likely(ciu_sum))
615 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
616 else
617 spurious_interrupt();
618 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
619 ciu_sum = cvmx_read_csr(ciu_sum1_address);
620 ciu_en = cvmx_read_csr(ciu_en1_address);
621 ciu_sum &= ciu_en;
622 if (likely(ciu_sum))
623 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
624 else
625 spurious_interrupt();
626 } else if (likely(cop0_cause)) {
627 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
628 } else {
629 break;
630 }
631 }
632}
773cb77d
RB
633
634#ifdef CONFIG_HOTPLUG_CPU
635static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
636{
cd847b78
DD
637 unsigned int isset;
638 int coreid = octeon_coreid_for_cpu(cpu);
773cb77d 639 int bit = (irq < OCTEON_IRQ_WDOG0) ?
cd847b78 640 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
773cb77d
RB
641 if (irq < 64) {
642 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
643 (1ull << bit)) >> bit;
644 } else {
645 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
646 (1ull << bit)) >> bit;
647 }
648 return isset;
649}
650
651void fixup_irqs(void)
652{
653 int irq;
654
655 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
656 octeon_irq_core_disable_local(irq);
657
658 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
659 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
660 /* ciu irq migrates to next cpu */
661 octeon_irq_chip_ciu0.disable(irq);
662 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
663 }
664 }
665
666#if 0
667 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
668 octeon_irq_mailbox_mask(irq);
669#endif
670 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
671 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
672 /* ciu irq migrates to next cpu */
673 octeon_irq_chip_ciu0.disable(irq);
674 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
675 }
676 }
677
678 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
679 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
680 /* ciu irq migrates to next cpu */
681 octeon_irq_chip_ciu1.disable(irq);
682 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
683 }
684 }
685}
686
687#endif /* CONFIG_HOTPLUG_CPU */
This page took 0.147748 seconds and 5 git commands to generate.