2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/irq.h>
15 #include <asm/irq_cpu.h>
16 #include <asm/mipsregs.h>
17 #include <bcm63xx_cpu.h>
18 #include <bcm63xx_regs.h>
19 #include <bcm63xx_io.h>
20 #include <bcm63xx_irq.h>
22 static u32 irq_stat_addr
[2];
23 static u32 irq_mask_addr
[2];
24 static void (*dispatch_internal
)(void);
25 static int is_ext_irq_cascaded
;
26 static unsigned int ext_irq_count
;
27 static unsigned int ext_irq_start
, ext_irq_end
;
28 static unsigned int ext_irq_cfg_reg1
, ext_irq_cfg_reg2
;
29 static void (*internal_irq_mask
)(unsigned int irq
);
30 static void (*internal_irq_unmask
)(unsigned int irq
);
33 static inline u32
get_ext_irq_perf_reg(int irq
)
36 return ext_irq_cfg_reg1
;
37 return ext_irq_cfg_reg2
;
40 static inline void handle_internal(int intbit
)
42 if (is_ext_irq_cascaded
&&
43 intbit
>= ext_irq_start
&& intbit
<= ext_irq_end
)
44 do_IRQ(intbit
- ext_irq_start
+ IRQ_EXTERNAL_BASE
);
46 do_IRQ(intbit
+ IRQ_INTERNAL_BASE
);
50 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
51 * prioritize any interrupt relatively to another. the static counter
52 * will resume the loop where it ended the last time we left this
56 #define BUILD_IPIC_INTERNAL(width) \
57 void __dispatch_internal_##width(void) \
59 u32 pending[width / 32]; \
60 unsigned int src, tgt; \
61 bool irqs_pending = false; \
62 static unsigned int i; \
64 /* read registers in reverse order */ \
65 for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
68 val = bcm_readl(irq_stat_addr[0] + src * sizeof(u32)); \
69 val &= bcm_readl(irq_mask_addr[0] + src * sizeof(u32)); \
70 pending[--tgt] = val; \
73 irqs_pending = true; \
80 unsigned int to_call = i; \
82 i = (i + 1) & (width - 1); \
83 if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
84 handle_internal(to_call); \
90 static void __internal_irq_mask_##width(unsigned int irq) \
93 unsigned reg = (irq / 32) ^ (width/32 - 1); \
94 unsigned bit = irq & 0x1f; \
96 val = bcm_readl(irq_mask_addr[0] + reg * sizeof(u32)); \
98 bcm_writel(val, irq_mask_addr[0] + reg * sizeof(u32)); \
101 static void __internal_irq_unmask_##width(unsigned int irq) \
104 unsigned reg = (irq / 32) ^ (width/32 - 1); \
105 unsigned bit = irq & 0x1f; \
107 val = bcm_readl(irq_mask_addr[0] + reg * sizeof(u32)); \
109 bcm_writel(val, irq_mask_addr[0] + reg * sizeof(u32)); \
112 BUILD_IPIC_INTERNAL(32);
113 BUILD_IPIC_INTERNAL(64);
115 asmlinkage
void plat_irq_dispatch(void)
120 cause
= read_c0_cause() & read_c0_status() & ST0_IM
;
125 if (cause
& CAUSEF_IP7
)
127 if (cause
& CAUSEF_IP0
)
129 if (cause
& CAUSEF_IP1
)
131 if (cause
& CAUSEF_IP2
)
133 if (!is_ext_irq_cascaded
) {
134 if (cause
& CAUSEF_IP3
)
136 if (cause
& CAUSEF_IP4
)
138 if (cause
& CAUSEF_IP5
)
140 if (cause
& CAUSEF_IP6
)
147 * internal IRQs operations: only mask/unmask on PERF irq mask
150 static void bcm63xx_internal_irq_mask(struct irq_data
*d
)
152 internal_irq_mask(d
->irq
- IRQ_INTERNAL_BASE
);
155 static void bcm63xx_internal_irq_unmask(struct irq_data
*d
)
157 internal_irq_unmask(d
->irq
- IRQ_INTERNAL_BASE
);
161 * external IRQs operations: mask/unmask and clear on PERF external
162 * irq control register.
164 static void bcm63xx_external_irq_mask(struct irq_data
*d
)
166 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
169 regaddr
= get_ext_irq_perf_reg(irq
);
170 reg
= bcm_perf_readl(regaddr
);
172 if (BCMCPU_IS_6348())
173 reg
&= ~EXTIRQ_CFG_MASK_6348(irq
% 4);
175 reg
&= ~EXTIRQ_CFG_MASK(irq
% 4);
177 bcm_perf_writel(reg
, regaddr
);
178 if (is_ext_irq_cascaded
)
179 internal_irq_mask(irq
+ ext_irq_start
);
182 static void bcm63xx_external_irq_unmask(struct irq_data
*d
)
184 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
187 regaddr
= get_ext_irq_perf_reg(irq
);
188 reg
= bcm_perf_readl(regaddr
);
190 if (BCMCPU_IS_6348())
191 reg
|= EXTIRQ_CFG_MASK_6348(irq
% 4);
193 reg
|= EXTIRQ_CFG_MASK(irq
% 4);
195 bcm_perf_writel(reg
, regaddr
);
197 if (is_ext_irq_cascaded
)
198 internal_irq_unmask(irq
+ ext_irq_start
);
201 static void bcm63xx_external_irq_clear(struct irq_data
*d
)
203 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
206 regaddr
= get_ext_irq_perf_reg(irq
);
207 reg
= bcm_perf_readl(regaddr
);
209 if (BCMCPU_IS_6348())
210 reg
|= EXTIRQ_CFG_CLEAR_6348(irq
% 4);
212 reg
|= EXTIRQ_CFG_CLEAR(irq
% 4);
214 bcm_perf_writel(reg
, regaddr
);
217 static int bcm63xx_external_irq_set_type(struct irq_data
*d
,
218 unsigned int flow_type
)
220 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
222 int levelsense
, sense
, bothedge
;
224 flow_type
&= IRQ_TYPE_SENSE_MASK
;
226 if (flow_type
== IRQ_TYPE_NONE
)
227 flow_type
= IRQ_TYPE_LEVEL_LOW
;
229 levelsense
= sense
= bothedge
= 0;
231 case IRQ_TYPE_EDGE_BOTH
:
235 case IRQ_TYPE_EDGE_RISING
:
239 case IRQ_TYPE_EDGE_FALLING
:
242 case IRQ_TYPE_LEVEL_HIGH
:
247 case IRQ_TYPE_LEVEL_LOW
:
252 printk(KERN_ERR
"bogus flow type combination given !\n");
256 regaddr
= get_ext_irq_perf_reg(irq
);
257 reg
= bcm_perf_readl(regaddr
);
260 switch (bcm63xx_get_cpu_id()) {
263 reg
|= EXTIRQ_CFG_LEVELSENSE_6348(irq
);
265 reg
&= ~EXTIRQ_CFG_LEVELSENSE_6348(irq
);
267 reg
|= EXTIRQ_CFG_SENSE_6348(irq
);
269 reg
&= ~EXTIRQ_CFG_SENSE_6348(irq
);
271 reg
|= EXTIRQ_CFG_BOTHEDGE_6348(irq
);
273 reg
&= ~EXTIRQ_CFG_BOTHEDGE_6348(irq
);
284 reg
|= EXTIRQ_CFG_LEVELSENSE(irq
);
286 reg
&= ~EXTIRQ_CFG_LEVELSENSE(irq
);
288 reg
|= EXTIRQ_CFG_SENSE(irq
);
290 reg
&= ~EXTIRQ_CFG_SENSE(irq
);
292 reg
|= EXTIRQ_CFG_BOTHEDGE(irq
);
294 reg
&= ~EXTIRQ_CFG_BOTHEDGE(irq
);
300 bcm_perf_writel(reg
, regaddr
);
302 irqd_set_trigger_type(d
, flow_type
);
303 if (flow_type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_LEVEL_HIGH
))
304 __irq_set_handler_locked(d
->irq
, handle_level_irq
);
306 __irq_set_handler_locked(d
->irq
, handle_edge_irq
);
308 return IRQ_SET_MASK_OK_NOCOPY
;
311 static struct irq_chip bcm63xx_internal_irq_chip
= {
312 .name
= "bcm63xx_ipic",
313 .irq_mask
= bcm63xx_internal_irq_mask
,
314 .irq_unmask
= bcm63xx_internal_irq_unmask
,
317 static struct irq_chip bcm63xx_external_irq_chip
= {
318 .name
= "bcm63xx_epic",
319 .irq_ack
= bcm63xx_external_irq_clear
,
321 .irq_mask
= bcm63xx_external_irq_mask
,
322 .irq_unmask
= bcm63xx_external_irq_unmask
,
324 .irq_set_type
= bcm63xx_external_irq_set_type
,
327 static struct irqaction cpu_ip2_cascade_action
= {
328 .handler
= no_action
,
329 .name
= "cascade_ip2",
330 .flags
= IRQF_NO_THREAD
,
333 static struct irqaction cpu_ext_cascade_action
= {
334 .handler
= no_action
,
335 .name
= "cascade_extirq",
336 .flags
= IRQF_NO_THREAD
,
339 static void bcm63xx_init_irq(void)
343 irq_stat_addr
[0] = bcm63xx_regset_address(RSET_PERF
);
344 irq_mask_addr
[0] = bcm63xx_regset_address(RSET_PERF
);
346 switch (bcm63xx_get_cpu_id()) {
348 irq_stat_addr
[0] += PERF_IRQSTAT_3368_REG
;
349 irq_mask_addr
[0] += PERF_IRQMASK_3368_REG
;
352 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_3368
;
355 irq_stat_addr
[0] += PERF_IRQSTAT_6328_REG(0);
356 irq_mask_addr
[0] += PERF_IRQMASK_6328_REG(0);
359 is_ext_irq_cascaded
= 1;
360 ext_irq_start
= BCM_6328_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
361 ext_irq_end
= BCM_6328_EXT_IRQ3
- IRQ_INTERNAL_BASE
;
362 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6328
;
365 irq_stat_addr
[0] += PERF_IRQSTAT_6338_REG
;
366 irq_mask_addr
[0] += PERF_IRQMASK_6338_REG
;
369 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6338
;
372 irq_stat_addr
[0] += PERF_IRQSTAT_6345_REG
;
373 irq_mask_addr
[0] += PERF_IRQMASK_6345_REG
;
376 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6345
;
379 irq_stat_addr
[0] += PERF_IRQSTAT_6348_REG
;
380 irq_mask_addr
[0] += PERF_IRQMASK_6348_REG
;
383 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6348
;
386 irq_stat_addr
[0] += PERF_IRQSTAT_6358_REG(0);
387 irq_mask_addr
[0] += PERF_IRQMASK_6358_REG(0);
390 is_ext_irq_cascaded
= 1;
391 ext_irq_start
= BCM_6358_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
392 ext_irq_end
= BCM_6358_EXT_IRQ3
- IRQ_INTERNAL_BASE
;
393 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6358
;
396 irq_stat_addr
[0] += PERF_IRQSTAT_6362_REG(0);
397 irq_mask_addr
[0] += PERF_IRQMASK_6362_REG(0);
400 is_ext_irq_cascaded
= 1;
401 ext_irq_start
= BCM_6362_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
402 ext_irq_end
= BCM_6362_EXT_IRQ3
- IRQ_INTERNAL_BASE
;
403 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6362
;
406 irq_stat_addr
[0] += PERF_IRQSTAT_6368_REG(0);
407 irq_mask_addr
[0] += PERF_IRQMASK_6368_REG(0);
410 is_ext_irq_cascaded
= 1;
411 ext_irq_start
= BCM_6368_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
412 ext_irq_end
= BCM_6368_EXT_IRQ5
- IRQ_INTERNAL_BASE
;
413 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6368
;
414 ext_irq_cfg_reg2
= PERF_EXTIRQ_CFG_REG2_6368
;
420 if (irq_bits
== 32) {
421 dispatch_internal
= __dispatch_internal_32
;
422 internal_irq_mask
= __internal_irq_mask_32
;
423 internal_irq_unmask
= __internal_irq_unmask_32
;
425 dispatch_internal
= __dispatch_internal_64
;
426 internal_irq_mask
= __internal_irq_mask_64
;
427 internal_irq_unmask
= __internal_irq_unmask_64
;
431 void __init
arch_init_irq(void)
437 for (i
= IRQ_INTERNAL_BASE
; i
< NR_IRQS
; ++i
)
438 irq_set_chip_and_handler(i
, &bcm63xx_internal_irq_chip
,
441 for (i
= IRQ_EXTERNAL_BASE
; i
< IRQ_EXTERNAL_BASE
+ ext_irq_count
; ++i
)
442 irq_set_chip_and_handler(i
, &bcm63xx_external_irq_chip
,
445 if (!is_ext_irq_cascaded
) {
446 for (i
= 3; i
< 3 + ext_irq_count
; ++i
)
447 setup_irq(MIPS_CPU_IRQ_BASE
+ i
, &cpu_ext_cascade_action
);
450 setup_irq(MIPS_CPU_IRQ_BASE
+ 2, &cpu_ip2_cascade_action
);