2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
7 * Based on intc2.c and ipr.c
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
19 #include <linux/init.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/sh_intc.h>
25 #include <linux/sysdev.h>
26 #include <linux/list.h>
27 #include <linux/topology.h>
28 #include <linux/bitmap.h>
29 #include <linux/cpumask.h>
30 #include <asm/sizes.h>
32 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
33 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
34 ((addr_e) << 16) | ((addr_d << 24)))
36 #define _INTC_SHIFT(h) (h & 0x1f)
37 #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
38 #define _INTC_FN(h) ((h >> 9) & 0xf)
39 #define _INTC_MODE(h) ((h >> 13) & 0x7)
40 #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
41 #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
43 struct intc_handle_int
{
54 struct intc_desc_int
{
55 struct list_head list
;
56 struct sys_device sysdev
;
63 struct intc_handle_int
*prio
;
65 struct intc_handle_int
*sense
;
66 unsigned int nr_sense
;
67 struct intc_window
*window
;
68 unsigned int nr_windows
;
72 static LIST_HEAD(intc_list
);
75 * The intc_irq_map provides a global map of bound IRQ vectors for a
76 * given platform. Allocation of IRQs are either static through the CPU
77 * vector map, or dynamic in the case of board mux vectors or MSI.
79 * As this is a central point for all IRQ controllers on the system,
80 * each of the available sources are mapped out here. This combined with
81 * sparseirq makes it quite trivial to keep the vector map tightly packed
82 * when dynamically creating IRQs, as well as tying in to otherwise
83 * unused irq_desc positions in the sparse array.
85 static DECLARE_BITMAP(intc_irq_map
, NR_IRQS
);
86 static DEFINE_SPINLOCK(vector_lock
);
89 #define IS_SMP(x) x.smp
90 #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
91 #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
94 #define INTC_REG(d, x, c) (d->reg[(x)])
95 #define SMP_NR(d, x) 1
98 static unsigned int intc_prio_level
[NR_IRQS
]; /* for now */
99 static unsigned int default_prio_level
= 2; /* 2 - 16 */
100 static unsigned long ack_handle
[NR_IRQS
];
101 #ifdef CONFIG_INTC_BALANCING
102 static unsigned long dist_handle
[NR_IRQS
];
105 static inline struct intc_desc_int
*get_intc_desc(unsigned int irq
)
107 struct irq_chip
*chip
= get_irq_chip(irq
);
108 return container_of(chip
, struct intc_desc_int
, chip
);
111 static unsigned long intc_phys_to_virt(struct intc_desc_int
*d
,
112 unsigned long address
)
114 struct intc_window
*window
;
117 /* scan through physical windows and convert address */
118 for (k
= 0; k
< d
->nr_windows
; k
++) {
119 window
= d
->window
+ k
;
121 if (address
< window
->phys
)
124 if (address
>= (window
->phys
+ window
->size
))
127 address
-= window
->phys
;
128 address
+= (unsigned long)window
->virt
;
133 /* no windows defined, register must be 1:1 mapped virt:phys */
137 static unsigned int intc_get_reg(struct intc_desc_int
*d
, unsigned long address
)
141 address
= intc_phys_to_virt(d
, address
);
143 for (k
= 0; k
< d
->nr_reg
; k
++) {
144 if (d
->reg
[k
] == address
)
152 static inline unsigned int set_field(unsigned int value
,
153 unsigned int field_value
,
156 unsigned int width
= _INTC_WIDTH(handle
);
157 unsigned int shift
= _INTC_SHIFT(handle
);
159 value
&= ~(((1 << width
) - 1) << shift
);
160 value
|= field_value
<< shift
;
164 static void write_8(unsigned long addr
, unsigned long h
, unsigned long data
)
166 __raw_writeb(set_field(0, data
, h
), addr
);
167 (void)__raw_readb(addr
); /* Defeat write posting */
170 static void write_16(unsigned long addr
, unsigned long h
, unsigned long data
)
172 __raw_writew(set_field(0, data
, h
), addr
);
173 (void)__raw_readw(addr
); /* Defeat write posting */
176 static void write_32(unsigned long addr
, unsigned long h
, unsigned long data
)
178 __raw_writel(set_field(0, data
, h
), addr
);
179 (void)__raw_readl(addr
); /* Defeat write posting */
182 static void modify_8(unsigned long addr
, unsigned long h
, unsigned long data
)
185 local_irq_save(flags
);
186 __raw_writeb(set_field(__raw_readb(addr
), data
, h
), addr
);
187 (void)__raw_readb(addr
); /* Defeat write posting */
188 local_irq_restore(flags
);
191 static void modify_16(unsigned long addr
, unsigned long h
, unsigned long data
)
194 local_irq_save(flags
);
195 __raw_writew(set_field(__raw_readw(addr
), data
, h
), addr
);
196 (void)__raw_readw(addr
); /* Defeat write posting */
197 local_irq_restore(flags
);
200 static void modify_32(unsigned long addr
, unsigned long h
, unsigned long data
)
203 local_irq_save(flags
);
204 __raw_writel(set_field(__raw_readl(addr
), data
, h
), addr
);
205 (void)__raw_readl(addr
); /* Defeat write posting */
206 local_irq_restore(flags
);
209 enum { REG_FN_ERR
= 0, REG_FN_WRITE_BASE
= 1, REG_FN_MODIFY_BASE
= 5 };
211 static void (*intc_reg_fns
[])(unsigned long addr
,
213 unsigned long data
) = {
214 [REG_FN_WRITE_BASE
+ 0] = write_8
,
215 [REG_FN_WRITE_BASE
+ 1] = write_16
,
216 [REG_FN_WRITE_BASE
+ 3] = write_32
,
217 [REG_FN_MODIFY_BASE
+ 0] = modify_8
,
218 [REG_FN_MODIFY_BASE
+ 1] = modify_16
,
219 [REG_FN_MODIFY_BASE
+ 3] = modify_32
,
222 enum { MODE_ENABLE_REG
= 0, /* Bit(s) set -> interrupt enabled */
223 MODE_MASK_REG
, /* Bit(s) set -> interrupt disabled */
224 MODE_DUAL_REG
, /* Two registers, set bit to enable / disable */
225 MODE_PRIO_REG
, /* Priority value written to enable interrupt */
226 MODE_PCLR_REG
, /* Above plus all bits set to disable interrupt */
229 static void intc_mode_field(unsigned long addr
,
230 unsigned long handle
,
231 void (*fn
)(unsigned long,
236 fn(addr
, handle
, ((1 << _INTC_WIDTH(handle
)) - 1));
239 static void intc_mode_zero(unsigned long addr
,
240 unsigned long handle
,
241 void (*fn
)(unsigned long,
249 static void intc_mode_prio(unsigned long addr
,
250 unsigned long handle
,
251 void (*fn
)(unsigned long,
256 fn(addr
, handle
, intc_prio_level
[irq
]);
259 static void (*intc_enable_fns
[])(unsigned long addr
,
260 unsigned long handle
,
261 void (*fn
)(unsigned long,
264 unsigned int irq
) = {
265 [MODE_ENABLE_REG
] = intc_mode_field
,
266 [MODE_MASK_REG
] = intc_mode_zero
,
267 [MODE_DUAL_REG
] = intc_mode_field
,
268 [MODE_PRIO_REG
] = intc_mode_prio
,
269 [MODE_PCLR_REG
] = intc_mode_prio
,
272 static void (*intc_disable_fns
[])(unsigned long addr
,
273 unsigned long handle
,
274 void (*fn
)(unsigned long,
277 unsigned int irq
) = {
278 [MODE_ENABLE_REG
] = intc_mode_zero
,
279 [MODE_MASK_REG
] = intc_mode_field
,
280 [MODE_DUAL_REG
] = intc_mode_field
,
281 [MODE_PRIO_REG
] = intc_mode_zero
,
282 [MODE_PCLR_REG
] = intc_mode_field
,
285 #ifdef CONFIG_INTC_BALANCING
286 static inline void intc_balancing_enable(unsigned int irq
)
288 struct intc_desc_int
*d
= get_intc_desc(irq
);
289 unsigned long handle
= dist_handle
[irq
];
292 if (irq_balancing_disabled(irq
) || !handle
)
295 addr
= INTC_REG(d
, _INTC_ADDR_D(handle
), 0);
296 intc_reg_fns
[_INTC_FN(handle
)](addr
, handle
, 1);
299 static inline void intc_balancing_disable(unsigned int irq
)
301 struct intc_desc_int
*d
= get_intc_desc(irq
);
302 unsigned long handle
= dist_handle
[irq
];
305 if (irq_balancing_disabled(irq
) || !handle
)
308 addr
= INTC_REG(d
, _INTC_ADDR_D(handle
), 0);
309 intc_reg_fns
[_INTC_FN(handle
)](addr
, handle
, 0);
312 static unsigned int intc_dist_data(struct intc_desc
*desc
,
313 struct intc_desc_int
*d
,
316 struct intc_mask_reg
*mr
= desc
->hw
.mask_regs
;
317 unsigned int i
, j
, fn
, mode
;
318 unsigned long reg_e
, reg_d
;
320 for (i
= 0; mr
&& enum_id
&& i
< desc
->hw
.nr_mask_regs
; i
++) {
321 mr
= desc
->hw
.mask_regs
+ i
;
324 * Skip this entry if there's no auto-distribution
325 * register associated with it.
330 for (j
= 0; j
< ARRAY_SIZE(mr
->enum_ids
); j
++) {
331 if (mr
->enum_ids
[j
] != enum_id
)
334 fn
= REG_FN_MODIFY_BASE
;
335 mode
= MODE_ENABLE_REG
;
336 reg_e
= mr
->dist_reg
;
337 reg_d
= mr
->dist_reg
;
339 fn
+= (mr
->reg_width
>> 3) - 1;
340 return _INTC_MK(fn
, mode
,
341 intc_get_reg(d
, reg_e
),
342 intc_get_reg(d
, reg_d
),
344 (mr
->reg_width
- 1) - j
);
349 * It's possible we've gotten here with no distribution options
350 * available for the IRQ in question, so we just skip over those.
355 static inline void intc_balancing_enable(unsigned int irq
)
359 static inline void intc_balancing_disable(unsigned int irq
)
364 static inline void _intc_enable(unsigned int irq
, unsigned long handle
)
366 struct intc_desc_int
*d
= get_intc_desc(irq
);
370 for (cpu
= 0; cpu
< SMP_NR(d
, _INTC_ADDR_E(handle
)); cpu
++) {
372 if (!cpumask_test_cpu(cpu
, irq_to_desc(irq
)->affinity
))
375 addr
= INTC_REG(d
, _INTC_ADDR_E(handle
), cpu
);
376 intc_enable_fns
[_INTC_MODE(handle
)](addr
, handle
, intc_reg_fns\
377 [_INTC_FN(handle
)], irq
);
380 intc_balancing_enable(irq
);
383 static void intc_enable(unsigned int irq
)
385 _intc_enable(irq
, (unsigned long)get_irq_chip_data(irq
));
388 static void intc_disable(unsigned int irq
)
390 struct intc_desc_int
*d
= get_intc_desc(irq
);
391 unsigned long handle
= (unsigned long)get_irq_chip_data(irq
);
395 intc_balancing_disable(irq
);
397 for (cpu
= 0; cpu
< SMP_NR(d
, _INTC_ADDR_D(handle
)); cpu
++) {
399 if (!cpumask_test_cpu(cpu
, irq_to_desc(irq
)->affinity
))
402 addr
= INTC_REG(d
, _INTC_ADDR_D(handle
), cpu
);
403 intc_disable_fns
[_INTC_MODE(handle
)](addr
, handle
,intc_reg_fns\
404 [_INTC_FN(handle
)], irq
);
408 static void (*intc_enable_noprio_fns
[])(unsigned long addr
,
409 unsigned long handle
,
410 void (*fn
)(unsigned long,
413 unsigned int irq
) = {
414 [MODE_ENABLE_REG
] = intc_mode_field
,
415 [MODE_MASK_REG
] = intc_mode_zero
,
416 [MODE_DUAL_REG
] = intc_mode_field
,
417 [MODE_PRIO_REG
] = intc_mode_field
,
418 [MODE_PCLR_REG
] = intc_mode_field
,
421 static void intc_enable_disable(struct intc_desc_int
*d
,
422 unsigned long handle
, int do_enable
)
426 void (*fn
)(unsigned long, unsigned long,
427 void (*)(unsigned long, unsigned long, unsigned long),
431 for (cpu
= 0; cpu
< SMP_NR(d
, _INTC_ADDR_E(handle
)); cpu
++) {
432 addr
= INTC_REG(d
, _INTC_ADDR_E(handle
), cpu
);
433 fn
= intc_enable_noprio_fns
[_INTC_MODE(handle
)];
434 fn(addr
, handle
, intc_reg_fns
[_INTC_FN(handle
)], 0);
437 for (cpu
= 0; cpu
< SMP_NR(d
, _INTC_ADDR_D(handle
)); cpu
++) {
438 addr
= INTC_REG(d
, _INTC_ADDR_D(handle
), cpu
);
439 fn
= intc_disable_fns
[_INTC_MODE(handle
)];
440 fn(addr
, handle
, intc_reg_fns
[_INTC_FN(handle
)], 0);
445 static int intc_set_wake(unsigned int irq
, unsigned int on
)
447 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
452 * This is held with the irq desc lock held, so we don't require any
453 * additional locking here at the intc desc level. The affinity mask is
454 * later tested in the enable/disable paths.
456 static int intc_set_affinity(unsigned int irq
, const struct cpumask
*cpumask
)
458 if (!cpumask_intersects(cpumask
, cpu_online_mask
))
461 cpumask_copy(irq_to_desc(irq
)->affinity
, cpumask
);
467 static void intc_mask_ack(unsigned int irq
)
469 struct intc_desc_int
*d
= get_intc_desc(irq
);
470 unsigned long handle
= ack_handle
[irq
];
475 /* read register and write zero only to the associated bit */
477 addr
= INTC_REG(d
, _INTC_ADDR_D(handle
), 0);
478 switch (_INTC_FN(handle
)) {
479 case REG_FN_MODIFY_BASE
+ 0: /* 8bit */
481 __raw_writeb(0xff ^ set_field(0, 1, handle
), addr
);
483 case REG_FN_MODIFY_BASE
+ 1: /* 16bit */
485 __raw_writew(0xffff ^ set_field(0, 1, handle
), addr
);
487 case REG_FN_MODIFY_BASE
+ 3: /* 32bit */
489 __raw_writel(0xffffffff ^ set_field(0, 1, handle
), addr
);
498 static struct intc_handle_int
*intc_find_irq(struct intc_handle_int
*hp
,
505 * this doesn't scale well, but...
507 * this function should only be used for cerain uncommon
508 * operations such as intc_set_priority() and intc_set_sense()
509 * and in those rare cases performance doesn't matter that much.
510 * keeping the memory footprint low is more important.
512 * one rather simple way to speed this up and still keep the
513 * memory footprint down is to make sure the array is sorted
514 * and then perform a bisect to lookup the irq.
516 for (i
= 0; i
< nr_hp
; i
++) {
517 if ((hp
+ i
)->irq
!= irq
)
526 int intc_set_priority(unsigned int irq
, unsigned int prio
)
528 struct intc_desc_int
*d
= get_intc_desc(irq
);
529 struct intc_handle_int
*ihp
;
531 if (!intc_prio_level
[irq
] || prio
<= 1)
534 ihp
= intc_find_irq(d
->prio
, d
->nr_prio
, irq
);
536 if (prio
>= (1 << _INTC_WIDTH(ihp
->handle
)))
539 intc_prio_level
[irq
] = prio
;
542 * only set secondary masking method directly
543 * primary masking method is using intc_prio_level[irq]
544 * priority level will be set during next enable()
546 if (_INTC_FN(ihp
->handle
) != REG_FN_ERR
)
547 _intc_enable(irq
, ihp
->handle
);
552 #define VALID(x) (x | 0x80)
554 static unsigned char intc_irq_sense_table
[IRQ_TYPE_SENSE_MASK
+ 1] = {
555 [IRQ_TYPE_EDGE_FALLING
] = VALID(0),
556 [IRQ_TYPE_EDGE_RISING
] = VALID(1),
557 [IRQ_TYPE_LEVEL_LOW
] = VALID(2),
558 /* SH7706, SH7707 and SH7709 do not support high level triggered */
559 #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
560 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
561 !defined(CONFIG_CPU_SUBTYPE_SH7709)
562 [IRQ_TYPE_LEVEL_HIGH
] = VALID(3),
566 static int intc_set_sense(unsigned int irq
, unsigned int type
)
568 struct intc_desc_int
*d
= get_intc_desc(irq
);
569 unsigned char value
= intc_irq_sense_table
[type
& IRQ_TYPE_SENSE_MASK
];
570 struct intc_handle_int
*ihp
;
576 ihp
= intc_find_irq(d
->sense
, d
->nr_sense
, irq
);
578 addr
= INTC_REG(d
, _INTC_ADDR_E(ihp
->handle
), 0);
579 intc_reg_fns
[_INTC_FN(ihp
->handle
)](addr
, ihp
->handle
, value
);
584 static intc_enum __init
intc_grp_id(struct intc_desc
*desc
,
587 struct intc_group
*g
= desc
->hw
.groups
;
590 for (i
= 0; g
&& enum_id
&& i
< desc
->hw
.nr_groups
; i
++) {
591 g
= desc
->hw
.groups
+ i
;
593 for (j
= 0; g
->enum_ids
[j
]; j
++) {
594 if (g
->enum_ids
[j
] != enum_id
)
604 static unsigned int __init
_intc_mask_data(struct intc_desc
*desc
,
605 struct intc_desc_int
*d
,
607 unsigned int *reg_idx
,
608 unsigned int *fld_idx
)
610 struct intc_mask_reg
*mr
= desc
->hw
.mask_regs
;
611 unsigned int fn
, mode
;
612 unsigned long reg_e
, reg_d
;
614 while (mr
&& enum_id
&& *reg_idx
< desc
->hw
.nr_mask_regs
) {
615 mr
= desc
->hw
.mask_regs
+ *reg_idx
;
617 for (; *fld_idx
< ARRAY_SIZE(mr
->enum_ids
); (*fld_idx
)++) {
618 if (mr
->enum_ids
[*fld_idx
] != enum_id
)
621 if (mr
->set_reg
&& mr
->clr_reg
) {
622 fn
= REG_FN_WRITE_BASE
;
623 mode
= MODE_DUAL_REG
;
627 fn
= REG_FN_MODIFY_BASE
;
629 mode
= MODE_ENABLE_REG
;
633 mode
= MODE_MASK_REG
;
639 fn
+= (mr
->reg_width
>> 3) - 1;
640 return _INTC_MK(fn
, mode
,
641 intc_get_reg(d
, reg_e
),
642 intc_get_reg(d
, reg_d
),
644 (mr
->reg_width
- 1) - *fld_idx
);
654 static unsigned int __init
intc_mask_data(struct intc_desc
*desc
,
655 struct intc_desc_int
*d
,
656 intc_enum enum_id
, int do_grps
)
662 ret
= _intc_mask_data(desc
, d
, enum_id
, &i
, &j
);
667 return intc_mask_data(desc
, d
, intc_grp_id(desc
, enum_id
), 0);
672 static unsigned int __init
_intc_prio_data(struct intc_desc
*desc
,
673 struct intc_desc_int
*d
,
675 unsigned int *reg_idx
,
676 unsigned int *fld_idx
)
678 struct intc_prio_reg
*pr
= desc
->hw
.prio_regs
;
679 unsigned int fn
, n
, mode
, bit
;
680 unsigned long reg_e
, reg_d
;
682 while (pr
&& enum_id
&& *reg_idx
< desc
->hw
.nr_prio_regs
) {
683 pr
= desc
->hw
.prio_regs
+ *reg_idx
;
685 for (; *fld_idx
< ARRAY_SIZE(pr
->enum_ids
); (*fld_idx
)++) {
686 if (pr
->enum_ids
[*fld_idx
] != enum_id
)
689 if (pr
->set_reg
&& pr
->clr_reg
) {
690 fn
= REG_FN_WRITE_BASE
;
691 mode
= MODE_PCLR_REG
;
695 fn
= REG_FN_MODIFY_BASE
;
696 mode
= MODE_PRIO_REG
;
703 fn
+= (pr
->reg_width
>> 3) - 1;
706 BUG_ON(n
* pr
->field_width
> pr
->reg_width
);
708 bit
= pr
->reg_width
- (n
* pr
->field_width
);
710 return _INTC_MK(fn
, mode
,
711 intc_get_reg(d
, reg_e
),
712 intc_get_reg(d
, reg_d
),
713 pr
->field_width
, bit
);
723 static unsigned int __init
intc_prio_data(struct intc_desc
*desc
,
724 struct intc_desc_int
*d
,
725 intc_enum enum_id
, int do_grps
)
731 ret
= _intc_prio_data(desc
, d
, enum_id
, &i
, &j
);
736 return intc_prio_data(desc
, d
, intc_grp_id(desc
, enum_id
), 0);
741 static void __init
intc_enable_disable_enum(struct intc_desc
*desc
,
742 struct intc_desc_int
*d
,
743 intc_enum enum_id
, int enable
)
745 unsigned int i
, j
, data
;
747 /* go through and enable/disable all mask bits */
750 data
= _intc_mask_data(desc
, d
, enum_id
, &i
, &j
);
752 intc_enable_disable(d
, data
, enable
);
756 /* go through and enable/disable all priority fields */
759 data
= _intc_prio_data(desc
, d
, enum_id
, &i
, &j
);
761 intc_enable_disable(d
, data
, enable
);
767 static unsigned int __init
intc_ack_data(struct intc_desc
*desc
,
768 struct intc_desc_int
*d
,
771 struct intc_mask_reg
*mr
= desc
->hw
.ack_regs
;
772 unsigned int i
, j
, fn
, mode
;
773 unsigned long reg_e
, reg_d
;
775 for (i
= 0; mr
&& enum_id
&& i
< desc
->hw
.nr_ack_regs
; i
++) {
776 mr
= desc
->hw
.ack_regs
+ i
;
778 for (j
= 0; j
< ARRAY_SIZE(mr
->enum_ids
); j
++) {
779 if (mr
->enum_ids
[j
] != enum_id
)
782 fn
= REG_FN_MODIFY_BASE
;
783 mode
= MODE_ENABLE_REG
;
787 fn
+= (mr
->reg_width
>> 3) - 1;
788 return _INTC_MK(fn
, mode
,
789 intc_get_reg(d
, reg_e
),
790 intc_get_reg(d
, reg_d
),
792 (mr
->reg_width
- 1) - j
);
799 static unsigned int __init
intc_sense_data(struct intc_desc
*desc
,
800 struct intc_desc_int
*d
,
803 struct intc_sense_reg
*sr
= desc
->hw
.sense_regs
;
804 unsigned int i
, j
, fn
, bit
;
806 for (i
= 0; sr
&& enum_id
&& i
< desc
->hw
.nr_sense_regs
; i
++) {
807 sr
= desc
->hw
.sense_regs
+ i
;
809 for (j
= 0; j
< ARRAY_SIZE(sr
->enum_ids
); j
++) {
810 if (sr
->enum_ids
[j
] != enum_id
)
813 fn
= REG_FN_MODIFY_BASE
;
814 fn
+= (sr
->reg_width
>> 3) - 1;
816 BUG_ON((j
+ 1) * sr
->field_width
> sr
->reg_width
);
818 bit
= sr
->reg_width
- ((j
+ 1) * sr
->field_width
);
820 return _INTC_MK(fn
, 0, intc_get_reg(d
, sr
->reg
),
821 0, sr
->field_width
, bit
);
828 static void __init
intc_register_irq(struct intc_desc
*desc
,
829 struct intc_desc_int
*d
,
833 struct intc_handle_int
*hp
;
834 unsigned int data
[2], primary
;
837 * Register the IRQ position with the global IRQ map
839 set_bit(irq
, intc_irq_map
);
842 * Prefer single interrupt source bitmap over other combinations:
844 * 1. bitmap, single interrupt source
845 * 2. priority, single interrupt source
846 * 3. bitmap, multiple interrupt sources (groups)
847 * 4. priority, multiple interrupt sources (groups)
849 data
[0] = intc_mask_data(desc
, d
, enum_id
, 0);
850 data
[1] = intc_prio_data(desc
, d
, enum_id
, 0);
853 if (!data
[0] && data
[1])
856 if (!data
[0] && !data
[1])
857 pr_warning("intc: missing unique irq mask for "
858 "irq %d (vect 0x%04x)\n", irq
, irq2evt(irq
));
860 data
[0] = data
[0] ? data
[0] : intc_mask_data(desc
, d
, enum_id
, 1);
861 data
[1] = data
[1] ? data
[1] : intc_prio_data(desc
, d
, enum_id
, 1);
866 BUG_ON(!data
[primary
]); /* must have primary masking method */
868 disable_irq_nosync(irq
);
869 set_irq_chip_and_handler_name(irq
, &d
->chip
,
870 handle_level_irq
, "level");
871 set_irq_chip_data(irq
, (void *)data
[primary
]);
875 * - this needs to be at least 2 for 5-bit priorities on 7780
877 intc_prio_level
[irq
] = default_prio_level
;
879 /* enable secondary masking method if present */
881 _intc_enable(irq
, data
[!primary
]);
883 /* add irq to d->prio list if priority is available */
885 hp
= d
->prio
+ d
->nr_prio
;
887 hp
->handle
= data
[1];
891 * only secondary priority should access registers, so
892 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
894 hp
->handle
&= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
895 hp
->handle
|= _INTC_MK(REG_FN_ERR
, 0, 0, 0, 0, 0);
900 /* add irq to d->sense list if sense is available */
901 data
[0] = intc_sense_data(desc
, d
, enum_id
);
903 (d
->sense
+ d
->nr_sense
)->irq
= irq
;
904 (d
->sense
+ d
->nr_sense
)->handle
= data
[0];
908 /* irq should be disabled by default */
911 if (desc
->hw
.ack_regs
)
912 ack_handle
[irq
] = intc_ack_data(desc
, d
, enum_id
);
914 #ifdef CONFIG_INTC_BALANCING
915 if (desc
->hw
.mask_regs
)
916 dist_handle
[irq
] = intc_dist_data(desc
, d
, enum_id
);
920 set_irq_flags(irq
, IRQF_VALID
); /* Enable IRQ on ARM systems */
924 static unsigned int __init
save_reg(struct intc_desc_int
*d
,
930 value
= intc_phys_to_virt(d
, value
);
942 static void intc_redirect_irq(unsigned int irq
, struct irq_desc
*desc
)
944 generic_handle_irq((unsigned int)get_irq_data(irq
));
947 int __init
register_intc_controller(struct intc_desc
*desc
)
949 unsigned int i
, k
, smp
;
950 struct intc_hw_desc
*hw
= &desc
->hw
;
951 struct intc_desc_int
*d
;
952 struct resource
*res
;
954 pr_info("intc: Registered controller '%s' with %u IRQs\n",
955 desc
->name
, hw
->nr_vectors
);
957 d
= kzalloc(sizeof(*d
), GFP_NOWAIT
);
961 INIT_LIST_HEAD(&d
->list
);
962 list_add(&d
->list
, &intc_list
);
964 if (desc
->num_resources
) {
965 d
->nr_windows
= desc
->num_resources
;
966 d
->window
= kzalloc(d
->nr_windows
* sizeof(*d
->window
),
971 for (k
= 0; k
< d
->nr_windows
; k
++) {
972 res
= desc
->resource
+ k
;
973 WARN_ON(resource_type(res
) != IORESOURCE_MEM
);
974 d
->window
[k
].phys
= res
->start
;
975 d
->window
[k
].size
= resource_size(res
);
976 d
->window
[k
].virt
= ioremap_nocache(res
->start
,
978 if (!d
->window
[k
].virt
)
983 d
->nr_reg
= hw
->mask_regs
? hw
->nr_mask_regs
* 2 : 0;
984 #ifdef CONFIG_INTC_BALANCING
986 d
->nr_reg
+= hw
->nr_mask_regs
;
988 d
->nr_reg
+= hw
->prio_regs
? hw
->nr_prio_regs
* 2 : 0;
989 d
->nr_reg
+= hw
->sense_regs
? hw
->nr_sense_regs
: 0;
990 d
->nr_reg
+= hw
->ack_regs
? hw
->nr_ack_regs
: 0;
992 d
->reg
= kzalloc(d
->nr_reg
* sizeof(*d
->reg
), GFP_NOWAIT
);
997 d
->smp
= kzalloc(d
->nr_reg
* sizeof(*d
->smp
), GFP_NOWAIT
);
1003 if (hw
->mask_regs
) {
1004 for (i
= 0; i
< hw
->nr_mask_regs
; i
++) {
1005 smp
= IS_SMP(hw
->mask_regs
[i
]);
1006 k
+= save_reg(d
, k
, hw
->mask_regs
[i
].set_reg
, smp
);
1007 k
+= save_reg(d
, k
, hw
->mask_regs
[i
].clr_reg
, smp
);
1008 #ifdef CONFIG_INTC_BALANCING
1009 k
+= save_reg(d
, k
, hw
->mask_regs
[i
].dist_reg
, 0);
1014 if (hw
->prio_regs
) {
1015 d
->prio
= kzalloc(hw
->nr_vectors
* sizeof(*d
->prio
),
1020 for (i
= 0; i
< hw
->nr_prio_regs
; i
++) {
1021 smp
= IS_SMP(hw
->prio_regs
[i
]);
1022 k
+= save_reg(d
, k
, hw
->prio_regs
[i
].set_reg
, smp
);
1023 k
+= save_reg(d
, k
, hw
->prio_regs
[i
].clr_reg
, smp
);
1027 if (hw
->sense_regs
) {
1028 d
->sense
= kzalloc(hw
->nr_vectors
* sizeof(*d
->sense
),
1033 for (i
= 0; i
< hw
->nr_sense_regs
; i
++)
1034 k
+= save_reg(d
, k
, hw
->sense_regs
[i
].reg
, 0);
1037 d
->chip
.name
= desc
->name
;
1038 d
->chip
.mask
= intc_disable
;
1039 d
->chip
.unmask
= intc_enable
;
1040 d
->chip
.mask_ack
= intc_disable
;
1041 d
->chip
.enable
= intc_enable
;
1042 d
->chip
.disable
= intc_disable
;
1043 d
->chip
.shutdown
= intc_disable
;
1044 d
->chip
.set_type
= intc_set_sense
;
1045 d
->chip
.set_wake
= intc_set_wake
;
1047 d
->chip
.set_affinity
= intc_set_affinity
;
1051 for (i
= 0; i
< hw
->nr_ack_regs
; i
++)
1052 k
+= save_reg(d
, k
, hw
->ack_regs
[i
].set_reg
, 0);
1054 d
->chip
.mask_ack
= intc_mask_ack
;
1057 /* disable bits matching force_disable before registering irqs */
1058 if (desc
->force_disable
)
1059 intc_enable_disable_enum(desc
, d
, desc
->force_disable
, 0);
1061 /* disable bits matching force_enable before registering irqs */
1062 if (desc
->force_enable
)
1063 intc_enable_disable_enum(desc
, d
, desc
->force_enable
, 0);
1065 BUG_ON(k
> 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
1067 /* register the vectors one by one */
1068 for (i
= 0; i
< hw
->nr_vectors
; i
++) {
1069 struct intc_vect
*vect
= hw
->vectors
+ i
;
1070 unsigned int irq
= evt2irq(vect
->vect
);
1071 struct irq_desc
*irq_desc
;
1076 irq_desc
= irq_to_desc_alloc_node(irq
, numa_node_id());
1077 if (unlikely(!irq_desc
)) {
1078 pr_err("can't get irq_desc for %d\n", irq
);
1082 intc_register_irq(desc
, d
, vect
->enum_id
, irq
);
1084 for (k
= i
+ 1; k
< hw
->nr_vectors
; k
++) {
1085 struct intc_vect
*vect2
= hw
->vectors
+ k
;
1086 unsigned int irq2
= evt2irq(vect2
->vect
);
1088 if (vect
->enum_id
!= vect2
->enum_id
)
1092 * In the case of multi-evt handling and sparse
1093 * IRQ support, each vector still needs to have
1094 * its own backing irq_desc.
1096 irq_desc
= irq_to_desc_alloc_node(irq2
, numa_node_id());
1097 if (unlikely(!irq_desc
)) {
1098 pr_err("can't get irq_desc for %d\n", irq2
);
1104 /* redirect this interrupts to the first one */
1105 set_irq_chip(irq2
, &dummy_irq_chip
);
1106 set_irq_chained_handler(irq2
, intc_redirect_irq
);
1107 set_irq_data(irq2
, (void *)irq
);
1111 /* enable bits matching force_enable after registering irqs */
1112 if (desc
->force_enable
)
1113 intc_enable_disable_enum(desc
, d
, desc
->force_enable
, 1);
1125 for (k
= 0; k
< d
->nr_windows
; k
++)
1126 if (d
->window
[k
].virt
)
1127 iounmap(d
->window
[k
].virt
);
1133 pr_err("unable to allocate INTC memory\n");
1138 #ifdef CONFIG_INTC_USERIMASK
1139 static void __iomem
*uimask
;
1141 int register_intc_userimask(unsigned long addr
)
1143 if (unlikely(uimask
))
1146 uimask
= ioremap_nocache(addr
, SZ_4K
);
1147 if (unlikely(!uimask
))
1150 pr_info("intc: userimask support registered for levels 0 -> %d\n",
1151 default_prio_level
- 1);
1157 show_intc_userimask(struct sysdev_class
*cls
,
1158 struct sysdev_class_attribute
*attr
, char *buf
)
1160 return sprintf(buf
, "%d\n", (__raw_readl(uimask
) >> 4) & 0xf);
1164 store_intc_userimask(struct sysdev_class
*cls
,
1165 struct sysdev_class_attribute
*attr
,
1166 const char *buf
, size_t count
)
1168 unsigned long level
;
1170 level
= simple_strtoul(buf
, NULL
, 10);
1173 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1174 * these are chomped so as to not interfere with normal IRQs.
1176 * Level 1 is a special case on some CPUs in that it's not
1177 * directly settable, but given that USERIMASK cuts off below a
1178 * certain level, we don't care about this limitation here.
1179 * Level 0 on the other hand equates to user masking disabled.
1181 * We use default_prio_level as a cut off so that only special
1182 * case opt-in IRQs can be mangled.
1184 if (level
>= default_prio_level
)
1187 __raw_writel(0xa5 << 24 | level
<< 4, uimask
);
1192 static SYSDEV_CLASS_ATTR(userimask
, S_IRUSR
| S_IWUSR
,
1193 show_intc_userimask
, store_intc_userimask
);
1197 show_intc_name(struct sys_device
*dev
, struct sysdev_attribute
*attr
, char *buf
)
1199 struct intc_desc_int
*d
;
1201 d
= container_of(dev
, struct intc_desc_int
, sysdev
);
1203 return sprintf(buf
, "%s\n", d
->chip
.name
);
1206 static SYSDEV_ATTR(name
, S_IRUGO
, show_intc_name
, NULL
);
1208 static int intc_suspend(struct sys_device
*dev
, pm_message_t state
)
1210 struct intc_desc_int
*d
;
1211 struct irq_desc
*desc
;
1214 /* get intc controller associated with this sysdev */
1215 d
= container_of(dev
, struct intc_desc_int
, sysdev
);
1217 switch (state
.event
) {
1219 if (d
->state
.event
!= PM_EVENT_FREEZE
)
1221 for_each_irq_desc(irq
, desc
) {
1222 if (desc
->handle_irq
== intc_redirect_irq
)
1224 if (desc
->chip
!= &d
->chip
)
1226 if (desc
->status
& IRQ_DISABLED
)
1232 case PM_EVENT_FREEZE
:
1233 /* nothing has to be done */
1235 case PM_EVENT_SUSPEND
:
1236 /* enable wakeup irqs belonging to this intc controller */
1237 for_each_irq_desc(irq
, desc
) {
1238 if ((desc
->status
& IRQ_WAKEUP
) && (desc
->chip
== &d
->chip
))
1248 static int intc_resume(struct sys_device
*dev
)
1250 return intc_suspend(dev
, PMSG_ON
);
1253 static struct sysdev_class intc_sysdev_class
= {
1255 .suspend
= intc_suspend
,
1256 .resume
= intc_resume
,
1259 /* register this intc as sysdev to allow suspend/resume */
1260 static int __init
register_intc_sysdevs(void)
1262 struct intc_desc_int
*d
;
1266 error
= sysdev_class_register(&intc_sysdev_class
);
1267 #ifdef CONFIG_INTC_USERIMASK
1268 if (!error
&& uimask
)
1269 error
= sysdev_class_create_file(&intc_sysdev_class
,
1273 list_for_each_entry(d
, &intc_list
, list
) {
1275 d
->sysdev
.cls
= &intc_sysdev_class
;
1276 error
= sysdev_register(&d
->sysdev
);
1278 error
= sysdev_create_file(&d
->sysdev
,
1288 pr_err("intc: sysdev registration error\n");
1292 device_initcall(register_intc_sysdevs
);
1295 * Dynamic IRQ allocation and deallocation
1297 unsigned int create_irq_nr(unsigned int irq_want
, int node
)
1299 unsigned int irq
= 0, new;
1300 unsigned long flags
;
1301 struct irq_desc
*desc
;
1303 spin_lock_irqsave(&vector_lock
, flags
);
1306 * First try the wanted IRQ
1308 if (test_and_set_bit(irq_want
, intc_irq_map
) == 0) {
1311 /* .. then fall back to scanning. */
1312 new = find_first_zero_bit(intc_irq_map
, nr_irqs
);
1313 if (unlikely(new == nr_irqs
))
1316 __set_bit(new, intc_irq_map
);
1319 desc
= irq_to_desc_alloc_node(new, node
);
1320 if (unlikely(!desc
)) {
1321 pr_err("can't get irq_desc for %d\n", new);
1325 desc
= move_irq_desc(desc
, node
);
1329 spin_unlock_irqrestore(&vector_lock
, flags
);
1332 dynamic_irq_init(irq
);
1334 set_irq_flags(irq
, IRQF_VALID
); /* Enable IRQ on ARM systems */
1341 int create_irq(void)
1343 int nid
= cpu_to_node(smp_processor_id());
1346 irq
= create_irq_nr(NR_IRQS_LEGACY
, nid
);
1353 void destroy_irq(unsigned int irq
)
1355 unsigned long flags
;
1357 dynamic_irq_cleanup(irq
);
1359 spin_lock_irqsave(&vector_lock
, flags
);
1360 __clear_bit(irq
, intc_irq_map
);
1361 spin_unlock_irqrestore(&vector_lock
, flags
);
1364 int reserve_irq_vector(unsigned int irq
)
1366 unsigned long flags
;
1369 spin_lock_irqsave(&vector_lock
, flags
);
1370 if (test_and_set_bit(irq
, intc_irq_map
))
1372 spin_unlock_irqrestore(&vector_lock
, flags
);
1377 void reserve_irq_legacy(void)
1379 unsigned long flags
;
1382 spin_lock_irqsave(&vector_lock
, flags
);
1383 j
= find_first_bit(intc_irq_map
, nr_irqs
);
1384 for (i
= 0; i
< j
; i
++)
1385 __set_bit(i
, intc_irq_map
);
1386 spin_unlock_irqrestore(&vector_lock
, flags
);