2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/hardirq.h>
12 #include <asm/octeon/octeon.h>
14 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock
);
15 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock
);
16 DEFINE_SPINLOCK(octeon_irq_msi_lock
);
18 static void octeon_irq_core_ack(unsigned int irq
)
20 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
22 * We don't need to disable IRQs to make these atomic since
23 * they are already disabled earlier in the low level
26 clear_c0_status(0x100 << bit
);
27 /* The two user interrupts must be cleared manually. */
29 clear_c0_cause(0x100 << bit
);
32 static void octeon_irq_core_eoi(unsigned int irq
)
34 struct irq_desc
*desc
= irq_desc
+ irq
;
35 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
37 * If an IRQ is being processed while we are disabling it the
38 * handler will attempt to unmask the interrupt after it has
41 if (desc
->status
& IRQ_DISABLED
)
44 /* There is a race here. We should fix it. */
47 * We don't need to disable IRQs to make these atomic since
48 * they are already disabled earlier in the low level
51 set_c0_status(0x100 << bit
);
54 static void octeon_irq_core_enable(unsigned int irq
)
57 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
60 * We need to disable interrupts to make sure our updates are
63 local_irq_save(flags
);
64 set_c0_status(0x100 << bit
);
65 local_irq_restore(flags
);
68 static void octeon_irq_core_disable_local(unsigned int irq
)
71 unsigned int bit
= irq
- OCTEON_IRQ_SW0
;
73 * We need to disable interrupts to make sure our updates are
76 local_irq_save(flags
);
77 clear_c0_status(0x100 << bit
);
78 local_irq_restore(flags
);
81 static void octeon_irq_core_disable(unsigned int irq
)
84 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local
,
85 (void *) (long) irq
, 1);
87 octeon_irq_core_disable_local(irq
);
91 static struct irq_chip octeon_irq_chip_core
= {
93 .enable
= octeon_irq_core_enable
,
94 .disable
= octeon_irq_core_disable
,
95 .ack
= octeon_irq_core_ack
,
96 .eoi
= octeon_irq_core_eoi
,
100 static void octeon_irq_ciu0_ack(unsigned int irq
)
103 * In order to avoid any locking accessing the CIU, we
104 * acknowledge CIU interrupts by disabling all of them. This
105 * way we can use a per core register and avoid any out of
106 * core locking requirements. This has the side affect that
107 * CIU interrupts can't be processed recursively.
109 * We don't need to disable IRQs to make these atomic since
110 * they are already disabled earlier in the low level
113 clear_c0_status(0x100 << 2);
116 static void octeon_irq_ciu0_eoi(unsigned int irq
)
119 * Enable all CIU interrupts again. We don't need to disable
120 * IRQs to make these atomic since they are already disabled
121 * earlier in the low level interrupt code.
123 set_c0_status(0x100 << 2);
126 static void octeon_irq_ciu0_enable(unsigned int irq
)
128 int coreid
= cvmx_get_core_num();
131 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
134 * A read lock is used here to make sure only one core is ever
135 * updating the CIU enable bits at a time. During an enable
136 * the cores don't interfere with each other. During a disable
137 * the write lock stops any enables that might cause a
140 read_lock_irqsave(&octeon_irq_ciu0_rwlock
, flags
);
141 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
143 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
144 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
145 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock
, flags
);
148 static void octeon_irq_ciu0_disable(unsigned int irq
)
150 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
155 write_lock_irqsave(&octeon_irq_ciu0_rwlock
, flags
);
156 for_each_online_cpu(cpu
) {
157 int coreid
= cpu_logical_map(cpu
);
158 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
159 en0
&= ~(1ull << bit
);
160 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
163 * We need to do a read after the last update to make sure all
166 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
167 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock
, flags
);
169 int coreid
= cvmx_get_core_num();
170 local_irq_save(flags
);
171 en0
= cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
172 en0
&= ~(1ull << bit
);
173 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
174 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
175 local_irq_restore(flags
);
180 static int octeon_irq_ciu0_set_affinity(unsigned int irq
, const struct cpumask
*dest
)
183 int bit
= irq
- OCTEON_IRQ_WORKQ0
; /* Bit 0-63 of EN0 */
185 write_lock(&octeon_irq_ciu0_rwlock
);
186 for_each_online_cpu(cpu
) {
187 int coreid
= cpu_logical_map(cpu
);
189 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid
* 2));
190 if (cpumask_test_cpu(cpu
, dest
))
193 en0
&= ~(1ull << bit
);
194 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid
* 2), en0
);
197 * We need to do a read after the last update to make sure all
200 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
201 write_unlock(&octeon_irq_ciu0_rwlock
);
207 static struct irq_chip octeon_irq_chip_ciu0
= {
209 .enable
= octeon_irq_ciu0_enable
,
210 .disable
= octeon_irq_ciu0_disable
,
211 .ack
= octeon_irq_ciu0_ack
,
212 .eoi
= octeon_irq_ciu0_eoi
,
214 .set_affinity
= octeon_irq_ciu0_set_affinity
,
219 static void octeon_irq_ciu1_ack(unsigned int irq
)
222 * In order to avoid any locking accessing the CIU, we
223 * acknowledge CIU interrupts by disabling all of them. This
224 * way we can use a per core register and avoid any out of
225 * core locking requirements. This has the side affect that
226 * CIU interrupts can't be processed recursively. We don't
227 * need to disable IRQs to make these atomic since they are
228 * already disabled earlier in the low level interrupt code.
230 clear_c0_status(0x100 << 3);
233 static void octeon_irq_ciu1_eoi(unsigned int irq
)
236 * Enable all CIU interrupts again. We don't need to disable
237 * IRQs to make these atomic since they are already disabled
238 * earlier in the low level interrupt code.
240 set_c0_status(0x100 << 3);
243 static void octeon_irq_ciu1_enable(unsigned int irq
)
245 int coreid
= cvmx_get_core_num();
248 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
251 * A read lock is used here to make sure only one core is ever
252 * updating the CIU enable bits at a time. During an enable
253 * the cores don't interfere with each other. During a disable
254 * the write lock stops any enables that might cause a
257 read_lock_irqsave(&octeon_irq_ciu1_rwlock
, flags
);
258 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
260 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
261 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
262 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock
, flags
);
265 static void octeon_irq_ciu1_disable(unsigned int irq
)
267 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
272 write_lock_irqsave(&octeon_irq_ciu1_rwlock
, flags
);
273 for_each_online_cpu(cpu
) {
274 int coreid
= cpu_logical_map(cpu
);
275 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
276 en1
&= ~(1ull << bit
);
277 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
280 * We need to do a read after the last update to make sure all
283 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
284 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock
, flags
);
286 int coreid
= cvmx_get_core_num();
287 local_irq_save(flags
);
288 en1
= cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
289 en1
&= ~(1ull << bit
);
290 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
291 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1));
292 local_irq_restore(flags
);
297 static int octeon_irq_ciu1_set_affinity(unsigned int irq
, const struct cpumask
*dest
)
300 int bit
= irq
- OCTEON_IRQ_WDOG0
; /* Bit 0-63 of EN1 */
302 write_lock(&octeon_irq_ciu1_rwlock
);
303 for_each_online_cpu(cpu
) {
304 int coreid
= cpu_logical_map(cpu
);
306 cvmx_read_csr(CVMX_CIU_INTX_EN1
308 if (cpumask_test_cpu(cpu
, dest
))
311 en1
&= ~(1ull << bit
);
312 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid
* 2 + 1), en1
);
315 * We need to do a read after the last update to make sure all
318 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
319 write_unlock(&octeon_irq_ciu1_rwlock
);
325 static struct irq_chip octeon_irq_chip_ciu1
= {
327 .enable
= octeon_irq_ciu1_enable
,
328 .disable
= octeon_irq_ciu1_disable
,
329 .ack
= octeon_irq_ciu1_ack
,
330 .eoi
= octeon_irq_ciu1_eoi
,
332 .set_affinity
= octeon_irq_ciu1_set_affinity
,
336 #ifdef CONFIG_PCI_MSI
338 static void octeon_irq_msi_ack(unsigned int irq
)
340 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
341 /* These chips have PCI */
342 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV
,
343 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
346 * These chips have PCIe. Thankfully the ACK doesn't
349 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0
,
350 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
354 static void octeon_irq_msi_eoi(unsigned int irq
)
359 static void octeon_irq_msi_enable(unsigned int irq
)
361 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
363 * Octeon PCI doesn't have the ability to mask/unmask
364 * MSI interrupts individually. Instead of
365 * masking/unmasking them in groups of 16, we simple
366 * assume MSI devices are well behaved. MSI
367 * interrupts are always enable and the ACK is assumed
371 /* These chips have PCIe. Note that we only support
372 * the first 64 MSI interrupts. Unfortunately all the
373 * MSI enables are in the same register. We use
374 * MSI0's lock to control access to them all.
378 spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
379 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
380 en
|= 1ull << (irq
- OCTEON_IRQ_MSI_BIT0
);
381 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
382 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
383 spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
387 static void octeon_irq_msi_disable(unsigned int irq
)
389 if (!octeon_has_feature(OCTEON_FEATURE_PCIE
)) {
390 /* See comment in enable */
393 * These chips have PCIe. Note that we only support
394 * the first 64 MSI interrupts. Unfortunately all the
395 * MSI enables are in the same register. We use
396 * MSI0's lock to control access to them all.
400 spin_lock_irqsave(&octeon_irq_msi_lock
, flags
);
401 en
= cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
402 en
&= ~(1ull << (irq
- OCTEON_IRQ_MSI_BIT0
));
403 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0
, en
);
404 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0
);
405 spin_unlock_irqrestore(&octeon_irq_msi_lock
, flags
);
409 static struct irq_chip octeon_irq_chip_msi
= {
411 .enable
= octeon_irq_msi_enable
,
412 .disable
= octeon_irq_msi_disable
,
413 .ack
= octeon_irq_msi_ack
,
414 .eoi
= octeon_irq_msi_eoi
,
418 void __init
arch_init_irq(void)
423 /* Set the default affinity to the boot cpu. */
424 cpumask_clear(irq_default_affinity
);
425 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
428 if (NR_IRQS
< OCTEON_IRQ_LAST
)
429 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
431 /* 0 - 15 reserved for i8259 master and slave controller. */
433 /* 17 - 23 Mips internal */
434 for (irq
= OCTEON_IRQ_SW0
; irq
<= OCTEON_IRQ_TIMER
; irq
++) {
435 set_irq_chip_and_handler(irq
, &octeon_irq_chip_core
,
439 /* 24 - 87 CIU_INT_SUM0 */
440 for (irq
= OCTEON_IRQ_WORKQ0
; irq
<= OCTEON_IRQ_BOOTDMA
; irq
++) {
441 set_irq_chip_and_handler(irq
, &octeon_irq_chip_ciu0
,
445 /* 88 - 151 CIU_INT_SUM1 */
446 for (irq
= OCTEON_IRQ_WDOG0
; irq
<= OCTEON_IRQ_RESERVED151
; irq
++) {
447 set_irq_chip_and_handler(irq
, &octeon_irq_chip_ciu1
,
451 #ifdef CONFIG_PCI_MSI
452 /* 152 - 215 PCI/PCIe MSI interrupts */
453 for (irq
= OCTEON_IRQ_MSI_BIT0
; irq
<= OCTEON_IRQ_MSI_BIT63
; irq
++) {
454 set_irq_chip_and_handler(irq
, &octeon_irq_chip_msi
,
458 set_c0_status(0x300 << 2);
461 asmlinkage
void plat_irq_dispatch(void)
463 const unsigned long core_id
= cvmx_get_core_num();
464 const uint64_t ciu_sum0_address
= CVMX_CIU_INTX_SUM0(core_id
* 2);
465 const uint64_t ciu_en0_address
= CVMX_CIU_INTX_EN0(core_id
* 2);
466 const uint64_t ciu_sum1_address
= CVMX_CIU_INT_SUM1
;
467 const uint64_t ciu_en1_address
= CVMX_CIU_INTX_EN1(core_id
* 2 + 1);
468 unsigned long cop0_cause
;
469 unsigned long cop0_status
;
474 cop0_cause
= read_c0_cause();
475 cop0_status
= read_c0_status();
476 cop0_cause
&= cop0_status
;
477 cop0_cause
&= ST0_IM
;
479 if (unlikely(cop0_cause
& STATUSF_IP2
)) {
480 ciu_sum
= cvmx_read_csr(ciu_sum0_address
);
481 ciu_en
= cvmx_read_csr(ciu_en0_address
);
484 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WORKQ0
- 1);
486 spurious_interrupt();
487 } else if (unlikely(cop0_cause
& STATUSF_IP3
)) {
488 ciu_sum
= cvmx_read_csr(ciu_sum1_address
);
489 ciu_en
= cvmx_read_csr(ciu_en1_address
);
492 do_IRQ(fls64(ciu_sum
) + OCTEON_IRQ_WDOG0
- 1);
494 spurious_interrupt();
495 } else if (likely(cop0_cause
)) {
496 do_IRQ(fls(cop0_cause
) - 9 + MIPS_CPU_IRQ_BASE
);