2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/irq.h>
15 #include <asm/irq_cpu.h>
16 #include <asm/mipsregs.h>
17 #include <bcm63xx_cpu.h>
18 #include <bcm63xx_regs.h>
19 #include <bcm63xx_io.h>
20 #include <bcm63xx_irq.h>
22 static u32 irq_stat_addr
, irq_mask_addr
;
23 static void (*dispatch_internal
)(void);
24 static int is_ext_irq_cascaded
;
25 static unsigned int ext_irq_count
;
26 static unsigned int ext_irq_start
, ext_irq_end
;
27 static unsigned int ext_irq_cfg_reg1
, ext_irq_cfg_reg2
;
28 static void (*internal_irq_mask
)(unsigned int irq
);
29 static void (*internal_irq_unmask
)(unsigned int irq
);
32 static inline u32
get_ext_irq_perf_reg(int irq
)
35 return ext_irq_cfg_reg1
;
36 return ext_irq_cfg_reg2
;
39 static inline void handle_internal(int intbit
)
41 if (is_ext_irq_cascaded
&&
42 intbit
>= ext_irq_start
&& intbit
<= ext_irq_end
)
43 do_IRQ(intbit
- ext_irq_start
+ IRQ_EXTERNAL_BASE
);
45 do_IRQ(intbit
+ IRQ_INTERNAL_BASE
);
49 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
50 * prioritize any interrupt relatively to another. the static counter
51 * will resume the loop where it ended the last time we left this
54 static void __dispatch_internal_32(void)
59 pending
= bcm_readl(irq_stat_addr
) & bcm_readl(irq_mask_addr
);
68 if (pending
& (1 << to_call
)) {
69 handle_internal(to_call
);
75 static void __dispatch_internal_64(void)
80 pending
= bcm_readq(irq_stat_addr
) & bcm_readq(irq_mask_addr
);
89 if (pending
& (1ull << to_call
)) {
90 handle_internal(to_call
);
96 asmlinkage
void plat_irq_dispatch(void)
101 cause
= read_c0_cause() & read_c0_status() & ST0_IM
;
106 if (cause
& CAUSEF_IP7
)
108 if (cause
& CAUSEF_IP0
)
110 if (cause
& CAUSEF_IP1
)
112 if (cause
& CAUSEF_IP2
)
114 if (!is_ext_irq_cascaded
) {
115 if (cause
& CAUSEF_IP3
)
117 if (cause
& CAUSEF_IP4
)
119 if (cause
& CAUSEF_IP5
)
121 if (cause
& CAUSEF_IP6
)
128 * internal IRQs operations: only mask/unmask on PERF irq mask
131 static void __internal_irq_mask_32(unsigned int irq
)
135 mask
= bcm_readl(irq_mask_addr
);
137 bcm_writel(mask
, irq_mask_addr
);
140 static void __internal_irq_mask_64(unsigned int irq
)
144 mask
= bcm_readq(irq_mask_addr
);
145 mask
&= ~(1ull << irq
);
146 bcm_writeq(mask
, irq_mask_addr
);
149 static void __internal_irq_unmask_32(unsigned int irq
)
153 mask
= bcm_readl(irq_mask_addr
);
155 bcm_writel(mask
, irq_mask_addr
);
158 static void __internal_irq_unmask_64(unsigned int irq
)
162 mask
= bcm_readq(irq_mask_addr
);
163 mask
|= (1ull << irq
);
164 bcm_writeq(mask
, irq_mask_addr
);
167 static void bcm63xx_internal_irq_mask(struct irq_data
*d
)
169 internal_irq_mask(d
->irq
- IRQ_INTERNAL_BASE
);
172 static void bcm63xx_internal_irq_unmask(struct irq_data
*d
)
174 internal_irq_unmask(d
->irq
- IRQ_INTERNAL_BASE
);
178 * external IRQs operations: mask/unmask and clear on PERF external
179 * irq control register.
181 static void bcm63xx_external_irq_mask(struct irq_data
*d
)
183 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
186 regaddr
= get_ext_irq_perf_reg(irq
);
187 reg
= bcm_perf_readl(regaddr
);
189 if (BCMCPU_IS_6348())
190 reg
&= ~EXTIRQ_CFG_MASK_6348(irq
% 4);
192 reg
&= ~EXTIRQ_CFG_MASK(irq
% 4);
194 bcm_perf_writel(reg
, regaddr
);
195 if (is_ext_irq_cascaded
)
196 internal_irq_mask(irq
+ ext_irq_start
);
199 static void bcm63xx_external_irq_unmask(struct irq_data
*d
)
201 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
204 regaddr
= get_ext_irq_perf_reg(irq
);
205 reg
= bcm_perf_readl(regaddr
);
207 if (BCMCPU_IS_6348())
208 reg
|= EXTIRQ_CFG_MASK_6348(irq
% 4);
210 reg
|= EXTIRQ_CFG_MASK(irq
% 4);
212 bcm_perf_writel(reg
, regaddr
);
214 if (is_ext_irq_cascaded
)
215 internal_irq_unmask(irq
+ ext_irq_start
);
218 static void bcm63xx_external_irq_clear(struct irq_data
*d
)
220 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
223 regaddr
= get_ext_irq_perf_reg(irq
);
224 reg
= bcm_perf_readl(regaddr
);
226 if (BCMCPU_IS_6348())
227 reg
|= EXTIRQ_CFG_CLEAR_6348(irq
% 4);
229 reg
|= EXTIRQ_CFG_CLEAR(irq
% 4);
231 bcm_perf_writel(reg
, regaddr
);
234 static int bcm63xx_external_irq_set_type(struct irq_data
*d
,
235 unsigned int flow_type
)
237 unsigned int irq
= d
->irq
- IRQ_EXTERNAL_BASE
;
239 int levelsense
, sense
, bothedge
;
241 flow_type
&= IRQ_TYPE_SENSE_MASK
;
243 if (flow_type
== IRQ_TYPE_NONE
)
244 flow_type
= IRQ_TYPE_LEVEL_LOW
;
246 levelsense
= sense
= bothedge
= 0;
248 case IRQ_TYPE_EDGE_BOTH
:
252 case IRQ_TYPE_EDGE_RISING
:
256 case IRQ_TYPE_EDGE_FALLING
:
259 case IRQ_TYPE_LEVEL_HIGH
:
264 case IRQ_TYPE_LEVEL_LOW
:
269 printk(KERN_ERR
"bogus flow type combination given !\n");
273 regaddr
= get_ext_irq_perf_reg(irq
);
274 reg
= bcm_perf_readl(regaddr
);
277 switch (bcm63xx_get_cpu_id()) {
280 reg
|= EXTIRQ_CFG_LEVELSENSE_6348(irq
);
282 reg
&= ~EXTIRQ_CFG_LEVELSENSE_6348(irq
);
284 reg
|= EXTIRQ_CFG_SENSE_6348(irq
);
286 reg
&= ~EXTIRQ_CFG_SENSE_6348(irq
);
288 reg
|= EXTIRQ_CFG_BOTHEDGE_6348(irq
);
290 reg
&= ~EXTIRQ_CFG_BOTHEDGE_6348(irq
);
301 reg
|= EXTIRQ_CFG_LEVELSENSE(irq
);
303 reg
&= ~EXTIRQ_CFG_LEVELSENSE(irq
);
305 reg
|= EXTIRQ_CFG_SENSE(irq
);
307 reg
&= ~EXTIRQ_CFG_SENSE(irq
);
309 reg
|= EXTIRQ_CFG_BOTHEDGE(irq
);
311 reg
&= ~EXTIRQ_CFG_BOTHEDGE(irq
);
317 bcm_perf_writel(reg
, regaddr
);
319 irqd_set_trigger_type(d
, flow_type
);
320 if (flow_type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_LEVEL_HIGH
))
321 __irq_set_handler_locked(d
->irq
, handle_level_irq
);
323 __irq_set_handler_locked(d
->irq
, handle_edge_irq
);
325 return IRQ_SET_MASK_OK_NOCOPY
;
328 static struct irq_chip bcm63xx_internal_irq_chip
= {
329 .name
= "bcm63xx_ipic",
330 .irq_mask
= bcm63xx_internal_irq_mask
,
331 .irq_unmask
= bcm63xx_internal_irq_unmask
,
334 static struct irq_chip bcm63xx_external_irq_chip
= {
335 .name
= "bcm63xx_epic",
336 .irq_ack
= bcm63xx_external_irq_clear
,
338 .irq_mask
= bcm63xx_external_irq_mask
,
339 .irq_unmask
= bcm63xx_external_irq_unmask
,
341 .irq_set_type
= bcm63xx_external_irq_set_type
,
344 static struct irqaction cpu_ip2_cascade_action
= {
345 .handler
= no_action
,
346 .name
= "cascade_ip2",
347 .flags
= IRQF_NO_THREAD
,
350 static struct irqaction cpu_ext_cascade_action
= {
351 .handler
= no_action
,
352 .name
= "cascade_extirq",
353 .flags
= IRQF_NO_THREAD
,
356 static void bcm63xx_init_irq(void)
360 irq_stat_addr
= bcm63xx_regset_address(RSET_PERF
);
361 irq_mask_addr
= bcm63xx_regset_address(RSET_PERF
);
363 switch (bcm63xx_get_cpu_id()) {
365 irq_stat_addr
+= PERF_IRQSTAT_3368_REG
;
366 irq_mask_addr
+= PERF_IRQMASK_3368_REG
;
369 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_3368
;
372 irq_stat_addr
+= PERF_IRQSTAT_6328_REG
;
373 irq_mask_addr
+= PERF_IRQMASK_6328_REG
;
376 is_ext_irq_cascaded
= 1;
377 ext_irq_start
= BCM_6328_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
378 ext_irq_end
= BCM_6328_EXT_IRQ3
- IRQ_INTERNAL_BASE
;
379 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6328
;
382 irq_stat_addr
+= PERF_IRQSTAT_6338_REG
;
383 irq_mask_addr
+= PERF_IRQMASK_6338_REG
;
386 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6338
;
389 irq_stat_addr
+= PERF_IRQSTAT_6345_REG
;
390 irq_mask_addr
+= PERF_IRQMASK_6345_REG
;
393 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6345
;
396 irq_stat_addr
+= PERF_IRQSTAT_6348_REG
;
397 irq_mask_addr
+= PERF_IRQMASK_6348_REG
;
400 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6348
;
403 irq_stat_addr
+= PERF_IRQSTAT_6358_REG
;
404 irq_mask_addr
+= PERF_IRQMASK_6358_REG
;
407 is_ext_irq_cascaded
= 1;
408 ext_irq_start
= BCM_6358_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
409 ext_irq_end
= BCM_6358_EXT_IRQ3
- IRQ_INTERNAL_BASE
;
410 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6358
;
413 irq_stat_addr
+= PERF_IRQSTAT_6362_REG
;
414 irq_mask_addr
+= PERF_IRQMASK_6362_REG
;
417 is_ext_irq_cascaded
= 1;
418 ext_irq_start
= BCM_6362_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
419 ext_irq_end
= BCM_6362_EXT_IRQ3
- IRQ_INTERNAL_BASE
;
420 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6362
;
423 irq_stat_addr
+= PERF_IRQSTAT_6368_REG
;
424 irq_mask_addr
+= PERF_IRQMASK_6368_REG
;
427 is_ext_irq_cascaded
= 1;
428 ext_irq_start
= BCM_6368_EXT_IRQ0
- IRQ_INTERNAL_BASE
;
429 ext_irq_end
= BCM_6368_EXT_IRQ5
- IRQ_INTERNAL_BASE
;
430 ext_irq_cfg_reg1
= PERF_EXTIRQ_CFG_REG_6368
;
431 ext_irq_cfg_reg2
= PERF_EXTIRQ_CFG_REG2_6368
;
437 if (irq_bits
== 32) {
438 dispatch_internal
= __dispatch_internal_32
;
439 internal_irq_mask
= __internal_irq_mask_32
;
440 internal_irq_unmask
= __internal_irq_unmask_32
;
442 dispatch_internal
= __dispatch_internal_64
;
443 internal_irq_mask
= __internal_irq_mask_64
;
444 internal_irq_unmask
= __internal_irq_unmask_64
;
448 void __init
arch_init_irq(void)
454 for (i
= IRQ_INTERNAL_BASE
; i
< NR_IRQS
; ++i
)
455 irq_set_chip_and_handler(i
, &bcm63xx_internal_irq_chip
,
458 for (i
= IRQ_EXTERNAL_BASE
; i
< IRQ_EXTERNAL_BASE
+ ext_irq_count
; ++i
)
459 irq_set_chip_and_handler(i
, &bcm63xx_external_irq_chip
,
462 if (!is_ext_irq_cascaded
) {
463 for (i
= 3; i
< 3 + ext_irq_count
; ++i
)
464 setup_irq(MIPS_CPU_IRQ_BASE
+ i
, &cpu_ext_cascade_action
);
467 setup_irq(MIPS_CPU_IRQ_BASE
+ 2, &cpu_ip2_cascade_action
);