MIPS: BCM63xx: Move bcm63xx_init_irq down
[deliverable/linux.git] / arch / mips / bcm63xx / irq.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/irq.h>
15 #include <asm/irq_cpu.h>
16 #include <asm/mipsregs.h>
17 #include <bcm63xx_cpu.h>
18 #include <bcm63xx_regs.h>
19 #include <bcm63xx_io.h>
20 #include <bcm63xx_irq.h>
21
22 static u32 irq_stat_addr, irq_mask_addr;
23 static void (*dispatch_internal)(void);
24 static int is_ext_irq_cascaded;
25 static unsigned int ext_irq_count;
26 static unsigned int ext_irq_start, ext_irq_end;
27 static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
28 static void (*internal_irq_mask)(unsigned int irq);
29 static void (*internal_irq_unmask)(unsigned int irq);
30
31
32 static inline u32 get_ext_irq_perf_reg(int irq)
33 {
34 if (irq < 4)
35 return ext_irq_cfg_reg1;
36 return ext_irq_cfg_reg2;
37 }
38
39 static inline void handle_internal(int intbit)
40 {
41 if (is_ext_irq_cascaded &&
42 intbit >= ext_irq_start && intbit <= ext_irq_end)
43 do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
44 else
45 do_IRQ(intbit + IRQ_INTERNAL_BASE);
46 }
47
48 /*
49 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
50 * prioritize any interrupt relatively to another. the static counter
51 * will resume the loop where it ended the last time we left this
52 * function.
53 */
54 static void __dispatch_internal_32(void)
55 {
56 u32 pending;
57 static int i;
58
59 pending = bcm_readl(irq_stat_addr) & bcm_readl(irq_mask_addr);
60
61 if (!pending)
62 return ;
63
64 while (1) {
65 int to_call = i;
66
67 i = (i + 1) & 0x1f;
68 if (pending & (1 << to_call)) {
69 handle_internal(to_call);
70 break;
71 }
72 }
73 }
74
75 static void __dispatch_internal_64(void)
76 {
77 u64 pending;
78 static int i;
79
80 pending = bcm_readq(irq_stat_addr) & bcm_readq(irq_mask_addr);
81
82 if (!pending)
83 return ;
84
85 while (1) {
86 int to_call = i;
87
88 i = (i + 1) & 0x3f;
89 if (pending & (1ull << to_call)) {
90 handle_internal(to_call);
91 break;
92 }
93 }
94 }
95
96 asmlinkage void plat_irq_dispatch(void)
97 {
98 u32 cause;
99
100 do {
101 cause = read_c0_cause() & read_c0_status() & ST0_IM;
102
103 if (!cause)
104 break;
105
106 if (cause & CAUSEF_IP7)
107 do_IRQ(7);
108 if (cause & CAUSEF_IP0)
109 do_IRQ(0);
110 if (cause & CAUSEF_IP1)
111 do_IRQ(1);
112 if (cause & CAUSEF_IP2)
113 dispatch_internal();
114 if (!is_ext_irq_cascaded) {
115 if (cause & CAUSEF_IP3)
116 do_IRQ(IRQ_EXT_0);
117 if (cause & CAUSEF_IP4)
118 do_IRQ(IRQ_EXT_1);
119 if (cause & CAUSEF_IP5)
120 do_IRQ(IRQ_EXT_2);
121 if (cause & CAUSEF_IP6)
122 do_IRQ(IRQ_EXT_3);
123 }
124 } while (1);
125 }
126
127 /*
128 * internal IRQs operations: only mask/unmask on PERF irq mask
129 * register.
130 */
131 static void __internal_irq_mask_32(unsigned int irq)
132 {
133 u32 mask;
134
135 mask = bcm_readl(irq_mask_addr);
136 mask &= ~(1 << irq);
137 bcm_writel(mask, irq_mask_addr);
138 }
139
140 static void __internal_irq_mask_64(unsigned int irq)
141 {
142 u64 mask;
143
144 mask = bcm_readq(irq_mask_addr);
145 mask &= ~(1ull << irq);
146 bcm_writeq(mask, irq_mask_addr);
147 }
148
149 static void __internal_irq_unmask_32(unsigned int irq)
150 {
151 u32 mask;
152
153 mask = bcm_readl(irq_mask_addr);
154 mask |= (1 << irq);
155 bcm_writel(mask, irq_mask_addr);
156 }
157
158 static void __internal_irq_unmask_64(unsigned int irq)
159 {
160 u64 mask;
161
162 mask = bcm_readq(irq_mask_addr);
163 mask |= (1ull << irq);
164 bcm_writeq(mask, irq_mask_addr);
165 }
166
167 static void bcm63xx_internal_irq_mask(struct irq_data *d)
168 {
169 internal_irq_mask(d->irq - IRQ_INTERNAL_BASE);
170 }
171
172 static void bcm63xx_internal_irq_unmask(struct irq_data *d)
173 {
174 internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE);
175 }
176
177 /*
178 * external IRQs operations: mask/unmask and clear on PERF external
179 * irq control register.
180 */
181 static void bcm63xx_external_irq_mask(struct irq_data *d)
182 {
183 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
184 u32 reg, regaddr;
185
186 regaddr = get_ext_irq_perf_reg(irq);
187 reg = bcm_perf_readl(regaddr);
188
189 if (BCMCPU_IS_6348())
190 reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
191 else
192 reg &= ~EXTIRQ_CFG_MASK(irq % 4);
193
194 bcm_perf_writel(reg, regaddr);
195 if (is_ext_irq_cascaded)
196 internal_irq_mask(irq + ext_irq_start);
197 }
198
199 static void bcm63xx_external_irq_unmask(struct irq_data *d)
200 {
201 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
202 u32 reg, regaddr;
203
204 regaddr = get_ext_irq_perf_reg(irq);
205 reg = bcm_perf_readl(regaddr);
206
207 if (BCMCPU_IS_6348())
208 reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
209 else
210 reg |= EXTIRQ_CFG_MASK(irq % 4);
211
212 bcm_perf_writel(reg, regaddr);
213
214 if (is_ext_irq_cascaded)
215 internal_irq_unmask(irq + ext_irq_start);
216 }
217
218 static void bcm63xx_external_irq_clear(struct irq_data *d)
219 {
220 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
221 u32 reg, regaddr;
222
223 regaddr = get_ext_irq_perf_reg(irq);
224 reg = bcm_perf_readl(regaddr);
225
226 if (BCMCPU_IS_6348())
227 reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
228 else
229 reg |= EXTIRQ_CFG_CLEAR(irq % 4);
230
231 bcm_perf_writel(reg, regaddr);
232 }
233
234 static int bcm63xx_external_irq_set_type(struct irq_data *d,
235 unsigned int flow_type)
236 {
237 unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
238 u32 reg, regaddr;
239 int levelsense, sense, bothedge;
240
241 flow_type &= IRQ_TYPE_SENSE_MASK;
242
243 if (flow_type == IRQ_TYPE_NONE)
244 flow_type = IRQ_TYPE_LEVEL_LOW;
245
246 levelsense = sense = bothedge = 0;
247 switch (flow_type) {
248 case IRQ_TYPE_EDGE_BOTH:
249 bothedge = 1;
250 break;
251
252 case IRQ_TYPE_EDGE_RISING:
253 sense = 1;
254 break;
255
256 case IRQ_TYPE_EDGE_FALLING:
257 break;
258
259 case IRQ_TYPE_LEVEL_HIGH:
260 levelsense = 1;
261 sense = 1;
262 break;
263
264 case IRQ_TYPE_LEVEL_LOW:
265 levelsense = 1;
266 break;
267
268 default:
269 printk(KERN_ERR "bogus flow type combination given !\n");
270 return -EINVAL;
271 }
272
273 regaddr = get_ext_irq_perf_reg(irq);
274 reg = bcm_perf_readl(regaddr);
275 irq %= 4;
276
277 switch (bcm63xx_get_cpu_id()) {
278 case BCM6348_CPU_ID:
279 if (levelsense)
280 reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
281 else
282 reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
283 if (sense)
284 reg |= EXTIRQ_CFG_SENSE_6348(irq);
285 else
286 reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
287 if (bothedge)
288 reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
289 else
290 reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
291 break;
292
293 case BCM3368_CPU_ID:
294 case BCM6328_CPU_ID:
295 case BCM6338_CPU_ID:
296 case BCM6345_CPU_ID:
297 case BCM6358_CPU_ID:
298 case BCM6362_CPU_ID:
299 case BCM6368_CPU_ID:
300 if (levelsense)
301 reg |= EXTIRQ_CFG_LEVELSENSE(irq);
302 else
303 reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
304 if (sense)
305 reg |= EXTIRQ_CFG_SENSE(irq);
306 else
307 reg &= ~EXTIRQ_CFG_SENSE(irq);
308 if (bothedge)
309 reg |= EXTIRQ_CFG_BOTHEDGE(irq);
310 else
311 reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
312 break;
313 default:
314 BUG();
315 }
316
317 bcm_perf_writel(reg, regaddr);
318
319 irqd_set_trigger_type(d, flow_type);
320 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
321 __irq_set_handler_locked(d->irq, handle_level_irq);
322 else
323 __irq_set_handler_locked(d->irq, handle_edge_irq);
324
325 return IRQ_SET_MASK_OK_NOCOPY;
326 }
327
328 static struct irq_chip bcm63xx_internal_irq_chip = {
329 .name = "bcm63xx_ipic",
330 .irq_mask = bcm63xx_internal_irq_mask,
331 .irq_unmask = bcm63xx_internal_irq_unmask,
332 };
333
334 static struct irq_chip bcm63xx_external_irq_chip = {
335 .name = "bcm63xx_epic",
336 .irq_ack = bcm63xx_external_irq_clear,
337
338 .irq_mask = bcm63xx_external_irq_mask,
339 .irq_unmask = bcm63xx_external_irq_unmask,
340
341 .irq_set_type = bcm63xx_external_irq_set_type,
342 };
343
344 static struct irqaction cpu_ip2_cascade_action = {
345 .handler = no_action,
346 .name = "cascade_ip2",
347 .flags = IRQF_NO_THREAD,
348 };
349
350 static struct irqaction cpu_ext_cascade_action = {
351 .handler = no_action,
352 .name = "cascade_extirq",
353 .flags = IRQF_NO_THREAD,
354 };
355
356 static void bcm63xx_init_irq(void)
357 {
358 int irq_bits;
359
360 irq_stat_addr = bcm63xx_regset_address(RSET_PERF);
361 irq_mask_addr = bcm63xx_regset_address(RSET_PERF);
362
363 switch (bcm63xx_get_cpu_id()) {
364 case BCM3368_CPU_ID:
365 irq_stat_addr += PERF_IRQSTAT_3368_REG;
366 irq_mask_addr += PERF_IRQMASK_3368_REG;
367 irq_bits = 32;
368 ext_irq_count = 4;
369 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
370 break;
371 case BCM6328_CPU_ID:
372 irq_stat_addr += PERF_IRQSTAT_6328_REG;
373 irq_mask_addr += PERF_IRQMASK_6328_REG;
374 irq_bits = 64;
375 ext_irq_count = 4;
376 is_ext_irq_cascaded = 1;
377 ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
378 ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
379 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
380 break;
381 case BCM6338_CPU_ID:
382 irq_stat_addr += PERF_IRQSTAT_6338_REG;
383 irq_mask_addr += PERF_IRQMASK_6338_REG;
384 irq_bits = 32;
385 ext_irq_count = 4;
386 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
387 break;
388 case BCM6345_CPU_ID:
389 irq_stat_addr += PERF_IRQSTAT_6345_REG;
390 irq_mask_addr += PERF_IRQMASK_6345_REG;
391 irq_bits = 32;
392 ext_irq_count = 4;
393 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
394 break;
395 case BCM6348_CPU_ID:
396 irq_stat_addr += PERF_IRQSTAT_6348_REG;
397 irq_mask_addr += PERF_IRQMASK_6348_REG;
398 irq_bits = 32;
399 ext_irq_count = 4;
400 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
401 break;
402 case BCM6358_CPU_ID:
403 irq_stat_addr += PERF_IRQSTAT_6358_REG;
404 irq_mask_addr += PERF_IRQMASK_6358_REG;
405 irq_bits = 32;
406 ext_irq_count = 4;
407 is_ext_irq_cascaded = 1;
408 ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
409 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
410 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
411 break;
412 case BCM6362_CPU_ID:
413 irq_stat_addr += PERF_IRQSTAT_6362_REG;
414 irq_mask_addr += PERF_IRQMASK_6362_REG;
415 irq_bits = 64;
416 ext_irq_count = 4;
417 is_ext_irq_cascaded = 1;
418 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
419 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
420 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
421 break;
422 case BCM6368_CPU_ID:
423 irq_stat_addr += PERF_IRQSTAT_6368_REG;
424 irq_mask_addr += PERF_IRQMASK_6368_REG;
425 irq_bits = 64;
426 ext_irq_count = 6;
427 is_ext_irq_cascaded = 1;
428 ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
429 ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
430 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
431 ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
432 break;
433 default:
434 BUG();
435 }
436
437 if (irq_bits == 32) {
438 dispatch_internal = __dispatch_internal_32;
439 internal_irq_mask = __internal_irq_mask_32;
440 internal_irq_unmask = __internal_irq_unmask_32;
441 } else {
442 dispatch_internal = __dispatch_internal_64;
443 internal_irq_mask = __internal_irq_mask_64;
444 internal_irq_unmask = __internal_irq_unmask_64;
445 }
446 }
447
448 void __init arch_init_irq(void)
449 {
450 int i;
451
452 bcm63xx_init_irq();
453 mips_cpu_irq_init();
454 for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
455 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
456 handle_level_irq);
457
458 for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
459 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
460 handle_edge_irq);
461
462 if (!is_ext_irq_cascaded) {
463 for (i = 3; i < 3 + ext_irq_count; ++i)
464 setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
465 }
466
467 setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
468 }
This page took 0.04198 seconds and 6 git commands to generate.