Commit | Line | Data |
---|---|---|
35832e26 | 1 | /* |
92592c9c | 2 | * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c |
35832e26 | 3 | * |
92592c9c | 4 | * This file define the irq handler for MSP CIC subsystem interrupts. |
35832e26 MSJ |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the | |
8 | * Free Software Foundation; either version 2 of the License, or (at your | |
9 | * option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/init.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/bitops.h> | |
ca4d3e67 | 16 | #include <linux/irq.h> |
35832e26 | 17 | |
92592c9c | 18 | #include <asm/mipsregs.h> |
35832e26 MSJ |
19 | #include <asm/system.h> |
20 | ||
21 | #include <msp_cic_int.h> | |
22 | #include <msp_regs.h> | |
23 | ||
24 | /* | |
92592c9c | 25 | * External API |
35832e26 | 26 | */ |
92592c9c A |
27 | extern void msp_per_irq_init(void); |
28 | extern void msp_per_irq_dispatch(void); | |
35832e26 | 29 | |
92592c9c A |
30 | |
31 | /* | |
32 | * Convenience Macro. Should be somewhere generic. | |
33 | */ | |
34 | #define get_current_vpe() \ | |
35 | ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE) | |
36 | ||
37 | #ifdef CONFIG_SMP | |
38 | ||
39 | #define LOCK_VPE(flags, mtflags) \ | |
40 | do { \ | |
41 | local_irq_save(flags); \ | |
42 | mtflags = dmt(); \ | |
43 | } while (0) | |
44 | ||
45 | #define UNLOCK_VPE(flags, mtflags) \ | |
46 | do { \ | |
47 | emt(mtflags); \ | |
48 | local_irq_restore(flags);\ | |
49 | } while (0) | |
50 | ||
51 | #define LOCK_CORE(flags, mtflags) \ | |
52 | do { \ | |
53 | local_irq_save(flags); \ | |
54 | mtflags = dvpe(); \ | |
55 | } while (0) | |
56 | ||
57 | #define UNLOCK_CORE(flags, mtflags) \ | |
58 | do { \ | |
59 | evpe(mtflags); \ | |
60 | local_irq_restore(flags);\ | |
61 | } while (0) | |
62 | ||
63 | #else | |
64 | ||
65 | #define LOCK_VPE(flags, mtflags) | |
66 | #define UNLOCK_VPE(flags, mtflags) | |
67 | #endif | |
68 | ||
69 | /* ensure writes to cic are completed */ | |
70 | static inline void cic_wmb(void) | |
35832e26 | 71 | { |
92592c9c A |
72 | const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG; |
73 | volatile u32 dummy_read; | |
35832e26 | 74 | |
92592c9c A |
75 | wmb(); |
76 | dummy_read = __raw_readl(cic_mem); | |
77 | dummy_read++; | |
35832e26 MSJ |
78 | } |
79 | ||
92592c9c | 80 | static inline void unmask_cic_irq(unsigned int irq) |
35832e26 | 81 | { |
92592c9c A |
82 | volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; |
83 | int vpe; | |
84 | #ifdef CONFIG_SMP | |
85 | unsigned int mtflags; | |
86 | unsigned long flags; | |
87 | ||
88 | /* | |
89 | * Make sure we have IRQ affinity. It may have changed while | |
90 | * we were processing the IRQ. | |
91 | */ | |
92 | if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) | |
93 | return; | |
94 | #endif | |
95 | ||
96 | vpe = get_current_vpe(); | |
97 | LOCK_VPE(flags, mtflags); | |
98 | cic_msk_reg[vpe] |= (1 << (irq - MSP_CIC_INTBASE)); | |
99 | UNLOCK_VPE(flags, mtflags); | |
100 | cic_wmb(); | |
35832e26 MSJ |
101 | } |
102 | ||
92592c9c | 103 | static inline void mask_cic_irq(unsigned int irq) |
35832e26 | 104 | { |
92592c9c A |
105 | volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; |
106 | int vpe = get_current_vpe(); | |
107 | #ifdef CONFIG_SMP | |
108 | unsigned long flags, mtflags; | |
109 | #endif | |
110 | LOCK_VPE(flags, mtflags); | |
111 | cic_msk_reg[vpe] &= ~(1 << (irq - MSP_CIC_INTBASE)); | |
112 | UNLOCK_VPE(flags, mtflags); | |
113 | cic_wmb(); | |
114 | } | |
115 | static inline void msp_cic_irq_ack(unsigned int irq) | |
116 | { | |
117 | mask_cic_irq(irq); | |
35832e26 | 118 | /* |
92592c9c A |
119 | * Only really necessary for 18, 16-14 and sometimes 3:0 |
120 | * (since these can be edge sensitive) but it doesn't | |
121 | * hurt for the others | |
122 | */ | |
123 | *CIC_STS_REG = (1 << (irq - MSP_CIC_INTBASE)); | |
124 | smtc_im_ack_irq(irq); | |
125 | } | |
126 | ||
127 | static void msp_cic_irq_end(unsigned int irq) | |
128 | { | |
129 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | |
130 | unmask_cic_irq(irq); | |
131 | } | |
132 | ||
133 | /*Note: Limiting to VSMP . Not tested in SMTC */ | |
134 | ||
135 | #ifdef CONFIG_MIPS_MT_SMP | |
136 | static inline int msp_cic_irq_set_affinity(unsigned int irq, | |
137 | const struct cpumask *cpumask) | |
138 | { | |
139 | int cpu; | |
140 | unsigned long flags; | |
141 | unsigned int mtflags; | |
142 | unsigned long imask = (1 << (irq - MSP_CIC_INTBASE)); | |
143 | volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG; | |
144 | ||
145 | /* timer balancing should be disabled in kernel code */ | |
146 | BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER); | |
147 | ||
148 | LOCK_CORE(flags, mtflags); | |
149 | /* enable if any of each VPE's TCs require this IRQ */ | |
150 | for_each_online_cpu(cpu) { | |
151 | if (cpumask_test_cpu(cpu, cpumask)) | |
152 | cic_mask[cpu] |= imask; | |
153 | else | |
154 | cic_mask[cpu] &= ~imask; | |
155 | ||
156 | } | |
157 | ||
158 | UNLOCK_CORE(flags, mtflags); | |
159 | return 0; | |
160 | ||
35832e26 | 161 | } |
92592c9c | 162 | #endif |
35832e26 MSJ |
163 | |
164 | static struct irq_chip msp_cic_irq_controller = { | |
165 | .name = "MSP_CIC", | |
92592c9c A |
166 | .mask = mask_cic_irq, |
167 | .mask_ack = msp_cic_irq_ack, | |
168 | .unmask = unmask_cic_irq, | |
169 | .ack = msp_cic_irq_ack, | |
170 | .end = msp_cic_irq_end, | |
171 | #ifdef CONFIG_MIPS_MT_SMP | |
172 | .set_affinity = msp_cic_irq_set_affinity, | |
173 | #endif | |
35832e26 MSJ |
174 | }; |
175 | ||
35832e26 MSJ |
176 | void __init msp_cic_irq_init(void) |
177 | { | |
178 | int i; | |
35832e26 MSJ |
179 | /* Mask/clear interrupts. */ |
180 | *CIC_VPE0_MSK_REG = 0x00000000; | |
92592c9c | 181 | *CIC_VPE1_MSK_REG = 0x00000000; |
35832e26 | 182 | *CIC_STS_REG = 0xFFFFFFFF; |
35832e26 | 183 | /* |
92592c9c A |
184 | * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI. |
185 | * These inputs map to EXT_INT_POL[6:4] inside the CIC. | |
186 | * They are to be active low, level sensitive. | |
187 | */ | |
35832e26 | 188 | *CIC_EXT_CFG_REG &= 0xFFFF8F8F; |
35832e26 MSJ |
189 | |
190 | /* initialize all the IRQ descriptors */ | |
92592c9c | 191 | for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { |
35832e26 MSJ |
192 | set_irq_chip_and_handler(i, &msp_cic_irq_controller, |
193 | handle_level_irq); | |
92592c9c A |
194 | #ifdef CONFIG_MIPS_MT_SMTC |
195 | /* Mask of CIC interrupt */ | |
196 | irq_hwmask[i] = C_IRQ4; | |
197 | #endif | |
198 | } | |
199 | ||
200 | /* Initialize the PER interrupt sub-system */ | |
201 | msp_per_irq_init(); | |
35832e26 MSJ |
202 | } |
203 | ||
92592c9c | 204 | /* CIC masked by CIC vector processing before dispatch called */ |
35832e26 MSJ |
205 | void msp_cic_irq_dispatch(void) |
206 | { | |
92592c9c A |
207 | volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG; |
208 | u32 cic_mask; | |
209 | u32 pending; | |
210 | int cic_status = *CIC_STS_REG; | |
211 | cic_mask = cic_msk_reg[get_current_vpe()]; | |
212 | pending = cic_status & cic_mask; | |
213 | if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) { | |
35832e26 | 214 | do_IRQ(MSP_INT_VPE0_TIMER); |
92592c9c A |
215 | } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) { |
216 | do_IRQ(MSP_INT_VPE1_TIMER); | |
217 | } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) { | |
218 | msp_per_irq_dispatch(); | |
219 | } else if (pending) { | |
220 | do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1); | |
221 | } else{ | |
222 | spurious_interrupt(); | |
223 | /* Re-enable the CIC cascaded interrupt. */ | |
224 | irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC); | |
225 | } | |
35832e26 | 226 | } |