sh: intc: Handle early lookups of subgroup IRQs.
[deliverable/linux.git] / drivers / sh / intc.c
CommitLineData
02ab3f70
MD
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
d58876e2 4 * Copyright (C) 2007, 2008 Magnus Damm
a8941dad 5 * Copyright (C) 2009, 2010 Paul Mundt
02ab3f70
MD
6 *
7 * Based on intc2.c and ipr.c
8 *
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
ac422f94
PM
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
02ab3f70
MD
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/module.h>
24#include <linux/io.h>
5a0e3ad6 25#include <linux/slab.h>
02ab3f70 26#include <linux/interrupt.h>
bbfbd8b1 27#include <linux/sh_intc.h>
2dcec7a9
MD
28#include <linux/sysdev.h>
29#include <linux/list.h>
54ff328b 30#include <linux/topology.h>
1ce7b039 31#include <linux/bitmap.h>
a8941dad 32#include <linux/cpumask.h>
44629f57
PM
33#include <linux/spinlock.h>
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
36#include <linux/radix-tree.h>
37#include <linux/mutex.h>
c1e30ad9 38#include <linux/rcupdate.h>
43b8774d 39#include <asm/sizes.h>
73505b44
MD
40
41#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
42 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
43 ((addr_e) << 16) | ((addr_d << 24)))
44
45#define _INTC_SHIFT(h) (h & 0x1f)
46#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
47#define _INTC_FN(h) ((h >> 9) & 0xf)
48#define _INTC_MODE(h) ((h >> 13) & 0x7)
49#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
50#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
51
52struct intc_handle_int {
53 unsigned int irq;
54 unsigned long handle;
55};
02ab3f70 56
dec710b7
MD
57struct intc_window {
58 phys_addr_t phys;
59 void __iomem *virt;
60 unsigned long size;
61};
62
44629f57
PM
63struct intc_map_entry {
64 intc_enum enum_id;
65 struct intc_desc_int *desc;
66};
67
c1e30ad9
PM
68struct intc_subgroup_entry {
69 unsigned int pirq;
70 intc_enum enum_id;
71 unsigned long handle;
72};
73
73505b44 74struct intc_desc_int {
2dcec7a9
MD
75 struct list_head list;
76 struct sys_device sysdev;
44629f57 77 struct radix_tree_root tree;
7fd87b3f 78 pm_message_t state;
c1e30ad9
PM
79 spinlock_t lock;
80 unsigned int index;
73505b44 81 unsigned long *reg;
f18d533e
MD
82#ifdef CONFIG_SMP
83 unsigned long *smp;
84#endif
73505b44
MD
85 unsigned int nr_reg;
86 struct intc_handle_int *prio;
87 unsigned int nr_prio;
88 struct intc_handle_int *sense;
89 unsigned int nr_sense;
dec710b7
MD
90 struct intc_window *window;
91 unsigned int nr_windows;
73505b44
MD
92 struct irq_chip chip;
93};
02ab3f70 94
2dcec7a9 95static LIST_HEAD(intc_list);
c1e30ad9 96static unsigned int nr_intc_controllers;
2dcec7a9 97
1ce7b039
PM
98/*
99 * The intc_irq_map provides a global map of bound IRQ vectors for a
100 * given platform. Allocation of IRQs are either static through the CPU
101 * vector map, or dynamic in the case of board mux vectors or MSI.
102 *
103 * As this is a central point for all IRQ controllers on the system,
104 * each of the available sources are mapped out here. This combined with
105 * sparseirq makes it quite trivial to keep the vector map tightly packed
106 * when dynamically creating IRQs, as well as tying in to otherwise
107 * unused irq_desc positions in the sparse array.
108 */
109static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
44629f57 110static struct intc_map_entry intc_irq_xlate[NR_IRQS];
1ce7b039 111static DEFINE_SPINLOCK(vector_lock);
c1e30ad9 112static DEFINE_SPINLOCK(xlate_lock);
1ce7b039 113
f18d533e
MD
114#ifdef CONFIG_SMP
115#define IS_SMP(x) x.smp
116#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
117#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
118#else
119#define IS_SMP(x) 0
120#define INTC_REG(d, x, c) (d->reg[(x)])
121#define SMP_NR(d, x) 1
122#endif
123
43b8774d
PM
124static unsigned int intc_prio_level[NR_IRQS]; /* for now */
125static unsigned int default_prio_level = 2; /* 2 - 16 */
d58876e2 126static unsigned long ack_handle[NR_IRQS];
dc825b17
PM
127#ifdef CONFIG_INTC_BALANCING
128static unsigned long dist_handle[NR_IRQS];
129#endif
02ab3f70 130
c1e30ad9
PM
131struct intc_virq_list {
132 unsigned int irq;
133 struct intc_virq_list *next;
134};
135
136#define for_each_virq(entry, head) \
137 for (entry = head; entry; entry = entry->next)
138
73505b44 139static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
02ab3f70
MD
140{
141 struct irq_chip *chip = get_irq_chip(irq);
c1e30ad9 142
6000fc4d 143 return container_of(chip, struct intc_desc_int, chip);
02ab3f70
MD
144}
145
c1e30ad9
PM
146static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
147{
148 generic_handle_irq((unsigned int)get_irq_data(irq));
149}
150
151static inline void activate_irq(int irq)
152{
153#ifdef CONFIG_ARM
154 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
155 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
156 */
157 set_irq_flags(irq, IRQF_VALID);
158#else
159 /* same effect on other architectures */
160 set_irq_noprobe(irq);
161#endif
162}
163
dc825b17
PM
164static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
165 unsigned long address)
166{
167 struct intc_window *window;
168 int k;
169
170 /* scan through physical windows and convert address */
171 for (k = 0; k < d->nr_windows; k++) {
172 window = d->window + k;
173
174 if (address < window->phys)
175 continue;
176
177 if (address >= (window->phys + window->size))
178 continue;
179
180 address -= window->phys;
181 address += (unsigned long)window->virt;
182
183 return address;
184 }
185
186 /* no windows defined, register must be 1:1 mapped virt:phys */
187 return address;
188}
189
190static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
191{
192 unsigned int k;
193
194 address = intc_phys_to_virt(d, address);
195
196 for (k = 0; k < d->nr_reg; k++) {
197 if (d->reg[k] == address)
198 return k;
199 }
200
201 BUG();
202 return 0;
203}
204
02ab3f70
MD
205static inline unsigned int set_field(unsigned int value,
206 unsigned int field_value,
73505b44 207 unsigned int handle)
02ab3f70 208{
73505b44
MD
209 unsigned int width = _INTC_WIDTH(handle);
210 unsigned int shift = _INTC_SHIFT(handle);
211
02ab3f70
MD
212 value &= ~(((1 << width) - 1) << shift);
213 value |= field_value << shift;
214 return value;
215}
216
c1e30ad9
PM
217static inline unsigned long get_field(unsigned int value, unsigned int handle)
218{
219 unsigned int width = _INTC_WIDTH(handle);
220 unsigned int shift = _INTC_SHIFT(handle);
221 unsigned int mask = ((1 << width) - 1) << shift;
222
223 return (value & mask) >> shift;
224}
225
226static unsigned long test_8(unsigned long addr, unsigned long h,
227 unsigned long ignore)
228{
229 return get_field(__raw_readb(addr), h);
230}
231
232static unsigned long test_16(unsigned long addr, unsigned long h,
233 unsigned long ignore)
234{
235 return get_field(__raw_readw(addr), h);
236}
237
238static unsigned long test_32(unsigned long addr, unsigned long h,
239 unsigned long ignore)
240{
241 return get_field(__raw_readl(addr), h);
242}
243
244static unsigned long write_8(unsigned long addr, unsigned long h,
245 unsigned long data)
02ab3f70 246{
62429e03 247 __raw_writeb(set_field(0, data, h), addr);
6000fc4d 248 (void)__raw_readb(addr); /* Defeat write posting */
c1e30ad9 249 return 0;
02ab3f70
MD
250}
251
c1e30ad9
PM
252static unsigned long write_16(unsigned long addr, unsigned long h,
253 unsigned long data)
02ab3f70 254{
62429e03 255 __raw_writew(set_field(0, data, h), addr);
6000fc4d 256 (void)__raw_readw(addr); /* Defeat write posting */
c1e30ad9 257 return 0;
02ab3f70
MD
258}
259
c1e30ad9
PM
260static unsigned long write_32(unsigned long addr, unsigned long h,
261 unsigned long data)
02ab3f70 262{
62429e03 263 __raw_writel(set_field(0, data, h), addr);
6000fc4d 264 (void)__raw_readl(addr); /* Defeat write posting */
c1e30ad9 265 return 0;
02ab3f70
MD
266}
267
c1e30ad9
PM
268static unsigned long modify_8(unsigned long addr, unsigned long h,
269 unsigned long data)
02ab3f70 270{
4370fe1c
MD
271 unsigned long flags;
272 local_irq_save(flags);
62429e03 273 __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
6000fc4d 274 (void)__raw_readb(addr); /* Defeat write posting */
4370fe1c 275 local_irq_restore(flags);
c1e30ad9 276 return 0;
02ab3f70
MD
277}
278
c1e30ad9
PM
279static unsigned long modify_16(unsigned long addr, unsigned long h,
280 unsigned long data)
02ab3f70 281{
4370fe1c
MD
282 unsigned long flags;
283 local_irq_save(flags);
62429e03 284 __raw_writew(set_field(__raw_readw(addr), data, h), addr);
6000fc4d 285 (void)__raw_readw(addr); /* Defeat write posting */
4370fe1c 286 local_irq_restore(flags);
c1e30ad9 287 return 0;
02ab3f70
MD
288}
289
c1e30ad9
PM
290static unsigned long modify_32(unsigned long addr, unsigned long h,
291 unsigned long data)
02ab3f70 292{
4370fe1c
MD
293 unsigned long flags;
294 local_irq_save(flags);
62429e03 295 __raw_writel(set_field(__raw_readl(addr), data, h), addr);
6000fc4d 296 (void)__raw_readl(addr); /* Defeat write posting */
4370fe1c 297 local_irq_restore(flags);
c1e30ad9 298 return 0;
02ab3f70
MD
299}
300
c1e30ad9
PM
301enum {
302 REG_FN_ERR = 0,
303 REG_FN_TEST_BASE = 1,
304 REG_FN_WRITE_BASE = 5,
305 REG_FN_MODIFY_BASE = 9
306};
73505b44 307
c1e30ad9
PM
308static unsigned long (*intc_reg_fns[])(unsigned long addr,
309 unsigned long h,
310 unsigned long data) = {
311 [REG_FN_TEST_BASE + 0] = test_8,
312 [REG_FN_TEST_BASE + 1] = test_16,
313 [REG_FN_TEST_BASE + 3] = test_32,
73505b44
MD
314 [REG_FN_WRITE_BASE + 0] = write_8,
315 [REG_FN_WRITE_BASE + 1] = write_16,
316 [REG_FN_WRITE_BASE + 3] = write_32,
317 [REG_FN_MODIFY_BASE + 0] = modify_8,
318 [REG_FN_MODIFY_BASE + 1] = modify_16,
319 [REG_FN_MODIFY_BASE + 3] = modify_32,
320};
02ab3f70 321
73505b44
MD
322enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
323 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
324 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
325 MODE_PRIO_REG, /* Priority value written to enable interrupt */
326 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
327};
02ab3f70 328
c1e30ad9
PM
329static unsigned long intc_mode_field(unsigned long addr,
330 unsigned long handle,
331 unsigned long (*fn)(unsigned long,
332 unsigned long,
333 unsigned long),
334 unsigned int irq)
02ab3f70 335{
c1e30ad9 336 return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
02ab3f70
MD
337}
338
c1e30ad9
PM
339static unsigned long intc_mode_zero(unsigned long addr,
340 unsigned long handle,
341 unsigned long (*fn)(unsigned long,
342 unsigned long,
343 unsigned long),
344 unsigned int irq)
51da6426 345{
c1e30ad9 346 return fn(addr, handle, 0);
51da6426
MD
347}
348
c1e30ad9
PM
349static unsigned long intc_mode_prio(unsigned long addr,
350 unsigned long handle,
351 unsigned long (*fn)(unsigned long,
352 unsigned long,
353 unsigned long),
354 unsigned int irq)
51da6426 355{
c1e30ad9 356 return fn(addr, handle, intc_prio_level[irq]);
51da6426
MD
357}
358
c1e30ad9
PM
359static unsigned long (*intc_enable_fns[])(unsigned long addr,
360 unsigned long handle,
361 unsigned long (*fn)(unsigned long,
362 unsigned long,
363 unsigned long),
364 unsigned int irq) = {
73505b44
MD
365 [MODE_ENABLE_REG] = intc_mode_field,
366 [MODE_MASK_REG] = intc_mode_zero,
367 [MODE_DUAL_REG] = intc_mode_field,
368 [MODE_PRIO_REG] = intc_mode_prio,
369 [MODE_PCLR_REG] = intc_mode_prio,
370};
51da6426 371
c1e30ad9 372static unsigned long (*intc_disable_fns[])(unsigned long addr,
73505b44 373 unsigned long handle,
c1e30ad9 374 unsigned long (*fn)(unsigned long,
73505b44
MD
375 unsigned long,
376 unsigned long),
377 unsigned int irq) = {
378 [MODE_ENABLE_REG] = intc_mode_zero,
379 [MODE_MASK_REG] = intc_mode_field,
380 [MODE_DUAL_REG] = intc_mode_field,
381 [MODE_PRIO_REG] = intc_mode_zero,
382 [MODE_PCLR_REG] = intc_mode_field,
383};
51da6426 384
dc825b17
PM
385#ifdef CONFIG_INTC_BALANCING
386static inline void intc_balancing_enable(unsigned int irq)
387{
388 struct intc_desc_int *d = get_intc_desc(irq);
389 unsigned long handle = dist_handle[irq];
390 unsigned long addr;
391
392 if (irq_balancing_disabled(irq) || !handle)
393 return;
394
395 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
396 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
397}
398
399static inline void intc_balancing_disable(unsigned int irq)
400{
401 struct intc_desc_int *d = get_intc_desc(irq);
402 unsigned long handle = dist_handle[irq];
403 unsigned long addr;
404
405 if (irq_balancing_disabled(irq) || !handle)
406 return;
407
408 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
409 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
410}
411
412static unsigned int intc_dist_data(struct intc_desc *desc,
413 struct intc_desc_int *d,
414 intc_enum enum_id)
415{
416 struct intc_mask_reg *mr = desc->hw.mask_regs;
417 unsigned int i, j, fn, mode;
418 unsigned long reg_e, reg_d;
419
420 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
421 mr = desc->hw.mask_regs + i;
422
423 /*
424 * Skip this entry if there's no auto-distribution
425 * register associated with it.
426 */
427 if (!mr->dist_reg)
428 continue;
429
430 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
431 if (mr->enum_ids[j] != enum_id)
432 continue;
433
434 fn = REG_FN_MODIFY_BASE;
435 mode = MODE_ENABLE_REG;
436 reg_e = mr->dist_reg;
437 reg_d = mr->dist_reg;
438
439 fn += (mr->reg_width >> 3) - 1;
440 return _INTC_MK(fn, mode,
441 intc_get_reg(d, reg_e),
442 intc_get_reg(d, reg_d),
443 1,
444 (mr->reg_width - 1) - j);
445 }
446 }
447
448 /*
449 * It's possible we've gotten here with no distribution options
450 * available for the IRQ in question, so we just skip over those.
451 */
452 return 0;
453}
454#else
455static inline void intc_balancing_enable(unsigned int irq)
456{
457}
458
459static inline void intc_balancing_disable(unsigned int irq)
460{
461}
462#endif
463
73505b44 464static inline void _intc_enable(unsigned int irq, unsigned long handle)
51da6426 465{
73505b44 466 struct intc_desc_int *d = get_intc_desc(irq);
f18d533e
MD
467 unsigned long addr;
468 unsigned int cpu;
51da6426 469
f18d533e 470 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
a8941dad
PM
471#ifdef CONFIG_SMP
472 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
473 continue;
474#endif
f18d533e
MD
475 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
476 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
477 [_INTC_FN(handle)], irq);
478 }
dc825b17
PM
479
480 intc_balancing_enable(irq);
51da6426
MD
481}
482
02ab3f70
MD
483static void intc_enable(unsigned int irq)
484{
73505b44 485 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
02ab3f70
MD
486}
487
488static void intc_disable(unsigned int irq)
489{
f18d533e 490 struct intc_desc_int *d = get_intc_desc(irq);
dc825b17 491 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
f18d533e
MD
492 unsigned long addr;
493 unsigned int cpu;
02ab3f70 494
dc825b17
PM
495 intc_balancing_disable(irq);
496
f18d533e 497 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
a8941dad
PM
498#ifdef CONFIG_SMP
499 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
500 continue;
501#endif
f18d533e
MD
502 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
503 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
504 [_INTC_FN(handle)], irq);
505 }
02ab3f70
MD
506}
507
c1e30ad9
PM
508static unsigned long
509(*intc_enable_noprio_fns[])(unsigned long addr,
510 unsigned long handle,
511 unsigned long (*fn)(unsigned long,
512 unsigned long,
513 unsigned long),
514 unsigned int irq) = {
d5190953
MD
515 [MODE_ENABLE_REG] = intc_mode_field,
516 [MODE_MASK_REG] = intc_mode_zero,
517 [MODE_DUAL_REG] = intc_mode_field,
518 [MODE_PRIO_REG] = intc_mode_field,
519 [MODE_PCLR_REG] = intc_mode_field,
520};
521
522static void intc_enable_disable(struct intc_desc_int *d,
523 unsigned long handle, int do_enable)
524{
525 unsigned long addr;
526 unsigned int cpu;
c1e30ad9
PM
527 unsigned long (*fn)(unsigned long, unsigned long,
528 unsigned long (*)(unsigned long, unsigned long,
529 unsigned long),
d5190953
MD
530 unsigned int);
531
532 if (do_enable) {
533 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
534 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
535 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
536 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
537 }
538 } else {
539 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
540 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
541 fn = intc_disable_fns[_INTC_MODE(handle)];
542 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
543 }
544 }
545}
546
2dcec7a9
MD
547static int intc_set_wake(unsigned int irq, unsigned int on)
548{
549 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
550}
551
a8941dad
PM
552#ifdef CONFIG_SMP
553/*
554 * This is held with the irq desc lock held, so we don't require any
555 * additional locking here at the intc desc level. The affinity mask is
556 * later tested in the enable/disable paths.
557 */
558static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
559{
560 if (!cpumask_intersects(cpumask, cpu_online_mask))
561 return -1;
562
563 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
564
565 return 0;
566}
567#endif
568
d58876e2
MD
569static void intc_mask_ack(unsigned int irq)
570{
571 struct intc_desc_int *d = get_intc_desc(irq);
572 unsigned long handle = ack_handle[irq];
573 unsigned long addr;
574
575 intc_disable(irq);
576
dc825b17 577 /* read register and write zero only to the associated bit */
d58876e2
MD
578 if (handle) {
579 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
6bdfb22a
YS
580 switch (_INTC_FN(handle)) {
581 case REG_FN_MODIFY_BASE + 0: /* 8bit */
62429e03
PM
582 __raw_readb(addr);
583 __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
584 break;
585 case REG_FN_MODIFY_BASE + 1: /* 16bit */
62429e03
PM
586 __raw_readw(addr);
587 __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
588 break;
589 case REG_FN_MODIFY_BASE + 3: /* 32bit */
62429e03
PM
590 __raw_readl(addr);
591 __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
592 break;
593 default:
594 BUG();
595 break;
596 }
d58876e2
MD
597 }
598}
d58876e2 599
73505b44
MD
600static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
601 unsigned int nr_hp,
602 unsigned int irq)
02ab3f70 603{
73505b44
MD
604 int i;
605
dc825b17
PM
606 /*
607 * this doesn't scale well, but...
3d37d94e
MD
608 *
609 * this function should only be used for cerain uncommon
610 * operations such as intc_set_priority() and intc_set_sense()
611 * and in those rare cases performance doesn't matter that much.
612 * keeping the memory footprint low is more important.
613 *
614 * one rather simple way to speed this up and still keep the
615 * memory footprint down is to make sure the array is sorted
616 * and then perform a bisect to lookup the irq.
617 */
73505b44
MD
618 for (i = 0; i < nr_hp; i++) {
619 if ((hp + i)->irq != irq)
620 continue;
621
622 return hp + i;
623 }
02ab3f70 624
73505b44 625 return NULL;
02ab3f70
MD
626}
627
73505b44 628int intc_set_priority(unsigned int irq, unsigned int prio)
02ab3f70 629{
73505b44
MD
630 struct intc_desc_int *d = get_intc_desc(irq);
631 struct intc_handle_int *ihp;
632
633 if (!intc_prio_level[irq] || prio <= 1)
634 return -EINVAL;
635
636 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
637 if (ihp) {
3d37d94e 638 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
73505b44 639 return -EINVAL;
02ab3f70 640
73505b44
MD
641 intc_prio_level[irq] = prio;
642
643 /*
644 * only set secondary masking method directly
645 * primary masking method is using intc_prio_level[irq]
646 * priority level will be set during next enable()
647 */
3d37d94e 648 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
73505b44
MD
649 _intc_enable(irq, ihp->handle);
650 }
651 return 0;
02ab3f70
MD
652}
653
654#define VALID(x) (x | 0x80)
655
656static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
657 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
658 [IRQ_TYPE_EDGE_RISING] = VALID(1),
659 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
720be990
MD
660 /* SH7706, SH7707 and SH7709 do not support high level triggered */
661#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
662 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
663 !defined(CONFIG_CPU_SUBTYPE_SH7709)
02ab3f70 664 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
720be990 665#endif
02ab3f70
MD
666};
667
668static int intc_set_sense(unsigned int irq, unsigned int type)
669{
73505b44 670 struct intc_desc_int *d = get_intc_desc(irq);
02ab3f70 671 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
73505b44
MD
672 struct intc_handle_int *ihp;
673 unsigned long addr;
02ab3f70 674
73505b44 675 if (!value)
02ab3f70
MD
676 return -EINVAL;
677
73505b44
MD
678 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
679 if (ihp) {
f18d533e 680 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
73505b44 681 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
02ab3f70 682 }
73505b44 683 return 0;
02ab3f70
MD
684}
685
73505b44
MD
686static intc_enum __init intc_grp_id(struct intc_desc *desc,
687 intc_enum enum_id)
680c4598 688{
577cd758 689 struct intc_group *g = desc->hw.groups;
680c4598
MD
690 unsigned int i, j;
691
577cd758
MD
692 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
693 g = desc->hw.groups + i;
680c4598
MD
694
695 for (j = 0; g->enum_ids[j]; j++) {
696 if (g->enum_ids[j] != enum_id)
697 continue;
698
699 return g->enum_id;
700 }
701 }
702
703 return 0;
704}
705
d5190953
MD
706static unsigned int __init _intc_mask_data(struct intc_desc *desc,
707 struct intc_desc_int *d,
708 intc_enum enum_id,
709 unsigned int *reg_idx,
710 unsigned int *fld_idx)
02ab3f70 711{
577cd758 712 struct intc_mask_reg *mr = desc->hw.mask_regs;
d5190953 713 unsigned int fn, mode;
73505b44 714 unsigned long reg_e, reg_d;
02ab3f70 715
d5190953
MD
716 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
717 mr = desc->hw.mask_regs + *reg_idx;
02ab3f70 718
d5190953
MD
719 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
720 if (mr->enum_ids[*fld_idx] != enum_id)
02ab3f70
MD
721 continue;
722
73505b44
MD
723 if (mr->set_reg && mr->clr_reg) {
724 fn = REG_FN_WRITE_BASE;
725 mode = MODE_DUAL_REG;
726 reg_e = mr->clr_reg;
727 reg_d = mr->set_reg;
728 } else {
729 fn = REG_FN_MODIFY_BASE;
730 if (mr->set_reg) {
731 mode = MODE_ENABLE_REG;
732 reg_e = mr->set_reg;
733 reg_d = mr->set_reg;
734 } else {
735 mode = MODE_MASK_REG;
736 reg_e = mr->clr_reg;
737 reg_d = mr->clr_reg;
738 }
51da6426
MD
739 }
740
73505b44
MD
741 fn += (mr->reg_width >> 3) - 1;
742 return _INTC_MK(fn, mode,
743 intc_get_reg(d, reg_e),
744 intc_get_reg(d, reg_d),
745 1,
d5190953 746 (mr->reg_width - 1) - *fld_idx);
02ab3f70 747 }
d5190953
MD
748
749 *fld_idx = 0;
750 (*reg_idx)++;
02ab3f70
MD
751 }
752
d5190953
MD
753 return 0;
754}
755
756static unsigned int __init intc_mask_data(struct intc_desc *desc,
757 struct intc_desc_int *d,
758 intc_enum enum_id, int do_grps)
759{
760 unsigned int i = 0;
761 unsigned int j = 0;
762 unsigned int ret;
763
764 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
765 if (ret)
766 return ret;
767
680c4598 768 if (do_grps)
73505b44 769 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
680c4598 770
02ab3f70
MD
771 return 0;
772}
773
d5190953
MD
774static unsigned int __init _intc_prio_data(struct intc_desc *desc,
775 struct intc_desc_int *d,
776 intc_enum enum_id,
777 unsigned int *reg_idx,
778 unsigned int *fld_idx)
02ab3f70 779{
577cd758 780 struct intc_prio_reg *pr = desc->hw.prio_regs;
d5190953 781 unsigned int fn, n, mode, bit;
73505b44 782 unsigned long reg_e, reg_d;
02ab3f70 783
d5190953
MD
784 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
785 pr = desc->hw.prio_regs + *reg_idx;
02ab3f70 786
d5190953
MD
787 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
788 if (pr->enum_ids[*fld_idx] != enum_id)
02ab3f70
MD
789 continue;
790
73505b44
MD
791 if (pr->set_reg && pr->clr_reg) {
792 fn = REG_FN_WRITE_BASE;
793 mode = MODE_PCLR_REG;
794 reg_e = pr->set_reg;
795 reg_d = pr->clr_reg;
796 } else {
797 fn = REG_FN_MODIFY_BASE;
798 mode = MODE_PRIO_REG;
799 if (!pr->set_reg)
800 BUG();
801 reg_e = pr->set_reg;
802 reg_d = pr->set_reg;
803 }
02ab3f70 804
73505b44 805 fn += (pr->reg_width >> 3) - 1;
d5190953 806 n = *fld_idx + 1;
02ab3f70 807
d5190953 808 BUG_ON(n * pr->field_width > pr->reg_width);
b21a9104 809
d5190953 810 bit = pr->reg_width - (n * pr->field_width);
02ab3f70 811
73505b44
MD
812 return _INTC_MK(fn, mode,
813 intc_get_reg(d, reg_e),
814 intc_get_reg(d, reg_d),
815 pr->field_width, bit);
02ab3f70 816 }
d5190953
MD
817
818 *fld_idx = 0;
819 (*reg_idx)++;
02ab3f70
MD
820 }
821
d5190953
MD
822 return 0;
823}
824
825static unsigned int __init intc_prio_data(struct intc_desc *desc,
826 struct intc_desc_int *d,
827 intc_enum enum_id, int do_grps)
828{
829 unsigned int i = 0;
830 unsigned int j = 0;
831 unsigned int ret;
832
833 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
834 if (ret)
835 return ret;
836
680c4598 837 if (do_grps)
73505b44
MD
838 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
839
840 return 0;
841}
842
d5190953
MD
843static void __init intc_enable_disable_enum(struct intc_desc *desc,
844 struct intc_desc_int *d,
845 intc_enum enum_id, int enable)
846{
847 unsigned int i, j, data;
848
849 /* go through and enable/disable all mask bits */
850 i = j = 0;
851 do {
852 data = _intc_mask_data(desc, d, enum_id, &i, &j);
853 if (data)
854 intc_enable_disable(d, data, enable);
855 j++;
856 } while (data);
857
858 /* go through and enable/disable all priority fields */
859 i = j = 0;
860 do {
861 data = _intc_prio_data(desc, d, enum_id, &i, &j);
862 if (data)
863 intc_enable_disable(d, data, enable);
864
865 j++;
866 } while (data);
867}
868
d58876e2
MD
869static unsigned int __init intc_ack_data(struct intc_desc *desc,
870 struct intc_desc_int *d,
871 intc_enum enum_id)
872{
577cd758 873 struct intc_mask_reg *mr = desc->hw.ack_regs;
d58876e2
MD
874 unsigned int i, j, fn, mode;
875 unsigned long reg_e, reg_d;
876
577cd758
MD
877 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
878 mr = desc->hw.ack_regs + i;
d58876e2
MD
879
880 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
881 if (mr->enum_ids[j] != enum_id)
882 continue;
883
884 fn = REG_FN_MODIFY_BASE;
885 mode = MODE_ENABLE_REG;
886 reg_e = mr->set_reg;
887 reg_d = mr->set_reg;
888
889 fn += (mr->reg_width >> 3) - 1;
890 return _INTC_MK(fn, mode,
891 intc_get_reg(d, reg_e),
892 intc_get_reg(d, reg_d),
893 1,
894 (mr->reg_width - 1) - j);
895 }
896 }
897
898 return 0;
899}
d58876e2 900
73505b44
MD
901static unsigned int __init intc_sense_data(struct intc_desc *desc,
902 struct intc_desc_int *d,
903 intc_enum enum_id)
904{
577cd758 905 struct intc_sense_reg *sr = desc->hw.sense_regs;
73505b44
MD
906 unsigned int i, j, fn, bit;
907
577cd758
MD
908 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
909 sr = desc->hw.sense_regs + i;
73505b44
MD
910
911 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
912 if (sr->enum_ids[j] != enum_id)
913 continue;
914
915 fn = REG_FN_MODIFY_BASE;
916 fn += (sr->reg_width >> 3) - 1;
73505b44 917
b21a9104 918 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
919
920 bit = sr->reg_width - ((j + 1) * sr->field_width);
73505b44
MD
921
922 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
923 0, sr->field_width, bit);
924 }
925 }
680c4598 926
02ab3f70
MD
927 return 0;
928}
929
d74310d3
PM
930#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
931
932int intc_irq_lookup(const char *chipname, intc_enum enum_id)
44629f57
PM
933{
934 struct intc_map_entry *ptr;
935 struct intc_desc_int *d;
d74310d3 936 int irq = -1;
44629f57
PM
937
938 list_for_each_entry(d, &intc_list, list) {
d74310d3
PM
939 int tagged;
940
941 if (strcmp(d->chip.name, chipname) != 0)
942 continue;
943
944 /*
945 * Catch early lookups for subgroup VIRQs that have not
946 * yet been allocated an IRQ. This already includes a
947 * fast-path out if the tree is untagged, so there is no
948 * need to explicitly test the root tree.
949 */
950 tagged = radix_tree_tag_get(&d->tree, enum_id,
951 INTC_TAG_VIRQ_NEEDS_ALLOC);
952 if (unlikely(tagged))
953 break;
954
955 ptr = radix_tree_lookup(&d->tree, enum_id);
956 if (ptr) {
957 irq = ptr - intc_irq_xlate;
958 break;
44629f57
PM
959 }
960 }
961
962 return irq;
963}
964EXPORT_SYMBOL_GPL(intc_irq_lookup);
965
c1e30ad9
PM
966static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
967{
968 struct intc_virq_list **last, *entry;
969 struct irq_desc *desc = irq_to_desc(irq);
970
971 /* scan for duplicates */
972 last = (struct intc_virq_list **)&desc->handler_data;
973 for_each_virq(entry, desc->handler_data) {
974 if (entry->irq == virq)
975 return 0;
976 last = &entry->next;
977 }
978
979 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
980 if (!entry) {
981 pr_err("can't allocate VIRQ mapping for %d\n", virq);
982 return -ENOMEM;
983 }
984
985 entry->irq = virq;
986
987 *last = entry;
988
989 return 0;
990}
991
992static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
993{
994 struct intc_virq_list *entry, *vlist = get_irq_data(irq);
995 struct intc_desc_int *d = get_intc_desc(irq);
996
997 desc->chip->mask_ack(irq);
998
999 for_each_virq(entry, vlist) {
1000 unsigned long addr, handle;
1001
1002 handle = (unsigned long)get_irq_data(entry->irq);
1003 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
1004
1005 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
1006 generic_handle_irq(entry->irq);
1007 }
1008
1009 desc->chip->unmask(irq);
1010}
1011
1012static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
1013 struct intc_desc_int *d,
1014 unsigned int index)
1015{
1016 unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
1017
1018 return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
1019 0, 1, (subgroup->reg_width - 1) - index);
1020}
1021
c1e30ad9
PM
1022static void __init intc_subgroup_init_one(struct intc_desc *desc,
1023 struct intc_desc_int *d,
1024 struct intc_subgroup *subgroup)
1025{
1026 struct intc_map_entry *mapped;
1027 unsigned int pirq;
1028 unsigned long flags;
1029 int i;
1030
1031 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
1032 if (!mapped) {
1033 WARN_ON(1);
1034 return;
1035 }
1036
1037 pirq = mapped - intc_irq_xlate;
1038
1039 spin_lock_irqsave(&d->lock, flags);
1040
1041 for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
1042 struct intc_subgroup_entry *entry;
1043 int err;
1044
1045 if (!subgroup->enum_ids[i])
1046 continue;
1047
1048 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
1049 if (!entry)
1050 break;
1051
1052 entry->pirq = pirq;
1053 entry->enum_id = subgroup->enum_ids[i];
1054 entry->handle = intc_subgroup_data(subgroup, d, i);
1055
1056 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
1057 if (unlikely(err < 0))
1058 break;
1059
1060 radix_tree_tag_set(&d->tree, entry->enum_id,
1061 INTC_TAG_VIRQ_NEEDS_ALLOC);
1062 }
1063
1064 spin_unlock_irqrestore(&d->lock, flags);
1065}
1066
1067static void __init intc_subgroup_init(struct intc_desc *desc,
1068 struct intc_desc_int *d)
1069{
1070 int i;
1071
1072 if (!desc->hw.subgroups)
1073 return;
1074
1075 for (i = 0; i < desc->hw.nr_subgroups; i++)
1076 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
1077}
1078
1079static void __init intc_subgroup_map(struct intc_desc_int *d)
1080{
1081 struct intc_subgroup_entry *entries[32];
1082 unsigned long flags;
1083 unsigned int nr_found;
1084 int i;
1085
1086 spin_lock_irqsave(&d->lock, flags);
1087
1088restart:
1089 nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
1090 (void ***)entries, 0, ARRAY_SIZE(entries),
1091 INTC_TAG_VIRQ_NEEDS_ALLOC);
1092
1093 for (i = 0; i < nr_found; i++) {
1094 struct intc_subgroup_entry *entry;
1095 int irq;
1096
1097 entry = radix_tree_deref_slot((void **)entries[i]);
1098 if (unlikely(!entry))
1099 continue;
1100 if (unlikely(entry == RADIX_TREE_RETRY))
1101 goto restart;
1102
1103 irq = create_irq();
1104 if (unlikely(irq < 0)) {
1105 pr_err("no more free IRQs, bailing..\n");
1106 break;
1107 }
1108
1109 pr_info("Setting up a chained VIRQ from %d -> %d\n",
1110 irq, entry->pirq);
1111
1112 spin_lock(&xlate_lock);
1113 intc_irq_xlate[irq].desc = d;
1114 intc_irq_xlate[irq].enum_id = entry->enum_id;
1115 spin_unlock(&xlate_lock);
1116
1117 set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
1118 handle_simple_irq, "virq");
1119 set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
1120
1121 set_irq_data(irq, (void *)entry->handle);
1122
1123 set_irq_chained_handler(entry->pirq, intc_virq_handler);
1124 add_virq_to_pirq(entry->pirq, irq);
1125
1126 radix_tree_tag_clear(&d->tree, entry->enum_id,
1127 INTC_TAG_VIRQ_NEEDS_ALLOC);
1128 radix_tree_replace_slot((void **)entries[i],
1129 &intc_irq_xlate[irq]);
1130 }
1131
1132 spin_unlock_irqrestore(&d->lock, flags);
1133}
1134
1135void __init intc_finalize(void)
1136{
1137 struct intc_desc_int *d;
1138
1139 list_for_each_entry(d, &intc_list, list)
1140 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
1141 intc_subgroup_map(d);
1142}
1143
73505b44
MD
1144static void __init intc_register_irq(struct intc_desc *desc,
1145 struct intc_desc_int *d,
1146 intc_enum enum_id,
02ab3f70
MD
1147 unsigned int irq)
1148{
3d37d94e 1149 struct intc_handle_int *hp;
680c4598 1150 unsigned int data[2], primary;
c1e30ad9 1151 unsigned long flags;
680c4598 1152
1ce7b039 1153 /*
44629f57
PM
1154 * Register the IRQ position with the global IRQ map, then insert
1155 * it in to the radix tree.
1ce7b039
PM
1156 */
1157 set_bit(irq, intc_irq_map);
1158
c1e30ad9 1159 spin_lock_irqsave(&xlate_lock, flags);
44629f57 1160 radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]);
c1e30ad9 1161 spin_unlock_irqrestore(&xlate_lock, flags);
44629f57 1162
dc825b17
PM
1163 /*
1164 * Prefer single interrupt source bitmap over other combinations:
1165 *
680c4598
MD
1166 * 1. bitmap, single interrupt source
1167 * 2. priority, single interrupt source
1168 * 3. bitmap, multiple interrupt sources (groups)
1169 * 4. priority, multiple interrupt sources (groups)
1170 */
73505b44
MD
1171 data[0] = intc_mask_data(desc, d, enum_id, 0);
1172 data[1] = intc_prio_data(desc, d, enum_id, 0);
680c4598
MD
1173
1174 primary = 0;
1175 if (!data[0] && data[1])
1176 primary = 1;
1177
bdaa6e80 1178 if (!data[0] && !data[1])
ac422f94
PM
1179 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
1180 irq, irq2evt(irq));
bdaa6e80 1181
73505b44
MD
1182 data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
1183 data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
680c4598
MD
1184
1185 if (!data[primary])
1186 primary ^= 1;
1187
1188 BUG_ON(!data[primary]); /* must have primary masking method */
02ab3f70
MD
1189
1190 disable_irq_nosync(irq);
73505b44 1191 set_irq_chip_and_handler_name(irq, &d->chip,
02ab3f70 1192 handle_level_irq, "level");
680c4598 1193 set_irq_chip_data(irq, (void *)data[primary]);
02ab3f70 1194
dc825b17
PM
1195 /*
1196 * set priority level
7f3edee8
MD
1197 * - this needs to be at least 2 for 5-bit priorities on 7780
1198 */
43b8774d 1199 intc_prio_level[irq] = default_prio_level;
73505b44 1200
680c4598
MD
1201 /* enable secondary masking method if present */
1202 if (data[!primary])
73505b44
MD
1203 _intc_enable(irq, data[!primary]);
1204
1205 /* add irq to d->prio list if priority is available */
1206 if (data[1]) {
3d37d94e
MD
1207 hp = d->prio + d->nr_prio;
1208 hp->irq = irq;
1209 hp->handle = data[1];
1210
1211 if (primary) {
1212 /*
1213 * only secondary priority should access registers, so
1214 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
1215 */
3d37d94e
MD
1216 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
1217 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
1218 }
73505b44
MD
1219 d->nr_prio++;
1220 }
1221
1222 /* add irq to d->sense list if sense is available */
1223 data[0] = intc_sense_data(desc, d, enum_id);
1224 if (data[0]) {
1225 (d->sense + d->nr_sense)->irq = irq;
1226 (d->sense + d->nr_sense)->handle = data[0];
1227 d->nr_sense++;
1228 }
02ab3f70
MD
1229
1230 /* irq should be disabled by default */
73505b44 1231 d->chip.mask(irq);
d58876e2 1232
577cd758 1233 if (desc->hw.ack_regs)
d58876e2 1234 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
65a5b28f 1235
dc825b17
PM
1236#ifdef CONFIG_INTC_BALANCING
1237 if (desc->hw.mask_regs)
1238 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
1239#endif
1240
c1e30ad9 1241 activate_irq(irq);
02ab3f70
MD
1242}
1243
f18d533e
MD
1244static unsigned int __init save_reg(struct intc_desc_int *d,
1245 unsigned int cnt,
1246 unsigned long value,
1247 unsigned int smp)
1248{
1249 if (value) {
dec710b7
MD
1250 value = intc_phys_to_virt(d, value);
1251
f18d533e
MD
1252 d->reg[cnt] = value;
1253#ifdef CONFIG_SMP
1254 d->smp[cnt] = smp;
1255#endif
1256 return 1;
1257 }
1258
1259 return 0;
1260}
1261
01e9651a 1262int __init register_intc_controller(struct intc_desc *desc)
02ab3f70 1263{
54ff328b 1264 unsigned int i, k, smp;
577cd758 1265 struct intc_hw_desc *hw = &desc->hw;
73505b44 1266 struct intc_desc_int *d;
dec710b7 1267 struct resource *res;
73505b44 1268
ac422f94 1269 pr_info("Registered controller '%s' with %u IRQs\n",
12129fea
PM
1270 desc->name, hw->nr_vectors);
1271
11b6aa95 1272 d = kzalloc(sizeof(*d), GFP_NOWAIT);
01e9651a
MD
1273 if (!d)
1274 goto err0;
73505b44 1275
2dcec7a9 1276 INIT_LIST_HEAD(&d->list);
c1e30ad9
PM
1277 list_add_tail(&d->list, &intc_list);
1278
1279 spin_lock_init(&d->lock);
1280
1281 d->index = nr_intc_controllers;
2dcec7a9 1282
dec710b7
MD
1283 if (desc->num_resources) {
1284 d->nr_windows = desc->num_resources;
1285 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
1286 GFP_NOWAIT);
1287 if (!d->window)
1288 goto err1;
1289
1290 for (k = 0; k < d->nr_windows; k++) {
1291 res = desc->resource + k;
1292 WARN_ON(resource_type(res) != IORESOURCE_MEM);
1293 d->window[k].phys = res->start;
1294 d->window[k].size = resource_size(res);
1295 d->window[k].virt = ioremap_nocache(res->start,
1296 resource_size(res));
1297 if (!d->window[k].virt)
1298 goto err2;
1299 }
1300 }
1301
577cd758 1302 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
dc825b17
PM
1303#ifdef CONFIG_INTC_BALANCING
1304 if (d->nr_reg)
1305 d->nr_reg += hw->nr_mask_regs;
1306#endif
577cd758
MD
1307 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
1308 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
1309 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
c1e30ad9 1310 d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
9b798d50 1311
11b6aa95 1312 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
01e9651a 1313 if (!d->reg)
dec710b7 1314 goto err2;
01e9651a 1315
f18d533e 1316#ifdef CONFIG_SMP
11b6aa95 1317 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
01e9651a 1318 if (!d->smp)
dec710b7 1319 goto err3;
f18d533e 1320#endif
73505b44
MD
1321 k = 0;
1322
577cd758
MD
1323 if (hw->mask_regs) {
1324 for (i = 0; i < hw->nr_mask_regs; i++) {
1325 smp = IS_SMP(hw->mask_regs[i]);
1326 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
1327 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
dc825b17
PM
1328#ifdef CONFIG_INTC_BALANCING
1329 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1330#endif
73505b44
MD
1331 }
1332 }
1333
577cd758
MD
1334 if (hw->prio_regs) {
1335 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
1336 GFP_NOWAIT);
01e9651a 1337 if (!d->prio)
dec710b7 1338 goto err4;
73505b44 1339
577cd758
MD
1340 for (i = 0; i < hw->nr_prio_regs; i++) {
1341 smp = IS_SMP(hw->prio_regs[i]);
1342 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
1343 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
73505b44
MD
1344 }
1345 }
1346
577cd758
MD
1347 if (hw->sense_regs) {
1348 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
1349 GFP_NOWAIT);
01e9651a 1350 if (!d->sense)
dec710b7 1351 goto err5;
73505b44 1352
577cd758
MD
1353 for (i = 0; i < hw->nr_sense_regs; i++)
1354 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
73505b44
MD
1355 }
1356
c1e30ad9
PM
1357 if (hw->subgroups)
1358 for (i = 0; i < hw->nr_subgroups; i++)
1359 if (hw->subgroups[i].reg)
1360 k+= save_reg(d, k, hw->subgroups[i].reg, 0);
1361
73505b44
MD
1362 d->chip.name = desc->name;
1363 d->chip.mask = intc_disable;
1364 d->chip.unmask = intc_enable;
1365 d->chip.mask_ack = intc_disable;
f7dd2548
MD
1366 d->chip.enable = intc_enable;
1367 d->chip.disable = intc_disable;
1368 d->chip.shutdown = intc_disable;
73505b44 1369 d->chip.set_type = intc_set_sense;
2dcec7a9 1370 d->chip.set_wake = intc_set_wake;
a8941dad
PM
1371#ifdef CONFIG_SMP
1372 d->chip.set_affinity = intc_set_affinity;
1373#endif
02ab3f70 1374
577cd758
MD
1375 if (hw->ack_regs) {
1376 for (i = 0; i < hw->nr_ack_regs; i++)
1377 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
d58876e2
MD
1378
1379 d->chip.mask_ack = intc_mask_ack;
1380 }
d58876e2 1381
d85429a3
MD
1382 /* disable bits matching force_disable before registering irqs */
1383 if (desc->force_disable)
1384 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
d5190953
MD
1385
1386 /* disable bits matching force_enable before registering irqs */
1387 if (desc->force_enable)
1388 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
1389
d58876e2
MD
1390 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
1391
bdaa6e80 1392 /* register the vectors one by one */
577cd758
MD
1393 for (i = 0; i < hw->nr_vectors; i++) {
1394 struct intc_vect *vect = hw->vectors + i;
05ff3004 1395 unsigned int irq = evt2irq(vect->vect);
c1e30ad9 1396 unsigned long flags;
05ff3004 1397 struct irq_desc *irq_desc;
54ff328b 1398
bdaa6e80
MD
1399 if (!vect->enum_id)
1400 continue;
1401
54ff328b 1402 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
05ff3004 1403 if (unlikely(!irq_desc)) {
12129fea 1404 pr_err("can't get irq_desc for %d\n", irq);
05ff3004
PM
1405 continue;
1406 }
1407
c1e30ad9 1408 spin_lock_irqsave(&xlate_lock, flags);
44629f57
PM
1409 intc_irq_xlate[irq].enum_id = vect->enum_id;
1410 intc_irq_xlate[irq].desc = d;
c1e30ad9 1411 spin_unlock_irqrestore(&xlate_lock, flags);
44629f57 1412
05ff3004 1413 intc_register_irq(desc, d, vect->enum_id, irq);
05ecd5a1 1414
577cd758
MD
1415 for (k = i + 1; k < hw->nr_vectors; k++) {
1416 struct intc_vect *vect2 = hw->vectors + k;
05ecd5a1
PM
1417 unsigned int irq2 = evt2irq(vect2->vect);
1418
1419 if (vect->enum_id != vect2->enum_id)
1420 continue;
1421
1279b7f1
PM
1422 /*
1423 * In the case of multi-evt handling and sparse
1424 * IRQ support, each vector still needs to have
1425 * its own backing irq_desc.
1426 */
1427 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
1428 if (unlikely(!irq_desc)) {
12129fea 1429 pr_err("can't get irq_desc for %d\n", irq2);
1279b7f1
PM
1430 continue;
1431 }
1432
05ecd5a1
PM
1433 vect2->enum_id = 0;
1434
1435 /* redirect this interrupts to the first one */
4d2185d9 1436 set_irq_chip(irq2, &dummy_irq_chip);
e6f07759 1437 set_irq_chained_handler(irq2, intc_redirect_irq);
05ecd5a1
PM
1438 set_irq_data(irq2, (void *)irq);
1439 }
02ab3f70 1440 }
d5190953 1441
c1e30ad9
PM
1442 intc_subgroup_init(desc, d);
1443
d5190953
MD
1444 /* enable bits matching force_enable after registering irqs */
1445 if (desc->force_enable)
1446 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
01e9651a 1447
c1e30ad9
PM
1448 nr_intc_controllers++;
1449
01e9651a 1450 return 0;
dec710b7 1451err5:
01e9651a 1452 kfree(d->prio);
dec710b7 1453err4:
01e9651a
MD
1454#ifdef CONFIG_SMP
1455 kfree(d->smp);
dec710b7 1456err3:
01e9651a
MD
1457#endif
1458 kfree(d->reg);
dec710b7
MD
1459err2:
1460 for (k = 0; k < d->nr_windows; k++)
1461 if (d->window[k].virt)
1462 iounmap(d->window[k].virt);
1463
1464 kfree(d->window);
1465err1:
01e9651a 1466 kfree(d);
dec710b7 1467err0:
01e9651a
MD
1468 pr_err("unable to allocate INTC memory\n");
1469
1470 return -ENOMEM;
02ab3f70 1471}
2dcec7a9 1472
43b8774d
PM
1473#ifdef CONFIG_INTC_USERIMASK
1474static void __iomem *uimask;
1475
1476int register_intc_userimask(unsigned long addr)
1477{
1478 if (unlikely(uimask))
1479 return -EBUSY;
1480
1481 uimask = ioremap_nocache(addr, SZ_4K);
1482 if (unlikely(!uimask))
1483 return -ENOMEM;
1484
ac422f94 1485 pr_info("userimask support registered for levels 0 -> %d\n",
43b8774d
PM
1486 default_prio_level - 1);
1487
1488 return 0;
1489}
1490
1491static ssize_t
1492show_intc_userimask(struct sysdev_class *cls,
1493 struct sysdev_class_attribute *attr, char *buf)
1494{
1495 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1496}
1497
1498static ssize_t
1499store_intc_userimask(struct sysdev_class *cls,
1500 struct sysdev_class_attribute *attr,
1501 const char *buf, size_t count)
1502{
1503 unsigned long level;
1504
1505 level = simple_strtoul(buf, NULL, 10);
1506
1507 /*
1508 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1509 * these are chomped so as to not interfere with normal IRQs.
1510 *
1511 * Level 1 is a special case on some CPUs in that it's not
1512 * directly settable, but given that USERIMASK cuts off below a
1513 * certain level, we don't care about this limitation here.
1514 * Level 0 on the other hand equates to user masking disabled.
1515 *
1516 * We use default_prio_level as a cut off so that only special
1517 * case opt-in IRQs can be mangled.
1518 */
1519 if (level >= default_prio_level)
1520 return -EINVAL;
1521
1522 __raw_writel(0xa5 << 24 | level << 4, uimask);
1523
1524 return count;
1525}
1526
1527static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1528 show_intc_userimask, store_intc_userimask);
1529#endif
1530
44629f57
PM
1531#ifdef CONFIG_INTC_MAPPING_DEBUG
1532static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
1533{
1534 int i;
1535
1536 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
1537
1538 for (i = 1; i < nr_irqs; i++) {
1539 struct intc_desc_int *desc = intc_irq_xlate[i].desc;
1540
1541 if (!desc)
1542 continue;
1543
1544 seq_printf(m, "%5d ", i);
1545 seq_printf(m, "0x%05x ", intc_irq_xlate[i].enum_id);
1546 seq_printf(m, "%-15s\n", desc->chip.name);
1547 }
1548
1549 return 0;
1550}
1551
1552static int intc_irq_xlate_open(struct inode *inode, struct file *file)
1553{
1554 return single_open(file, intc_irq_xlate_debug, inode->i_private);
1555}
1556
1557static const struct file_operations intc_irq_xlate_fops = {
1558 .open = intc_irq_xlate_open,
1559 .read = seq_read,
1560 .llseek = seq_lseek,
1561 .release = single_release,
1562};
1563
1564static int __init intc_irq_xlate_init(void)
1565{
1566 /*
1567 * XXX.. use arch_debugfs_dir here when all of the intc users are
1568 * converted.
1569 */
1570 if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
1571 &intc_irq_xlate_fops) == NULL)
1572 return -ENOMEM;
1573
1574 return 0;
1575}
1576fs_initcall(intc_irq_xlate_init);
1577#endif
1578
0ded7542
PM
1579static ssize_t
1580show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1581{
1582 struct intc_desc_int *d;
1583
1584 d = container_of(dev, struct intc_desc_int, sysdev);
1585
1586 return sprintf(buf, "%s\n", d->chip.name);
1587}
1588
1589static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1590
2dcec7a9
MD
1591static int intc_suspend(struct sys_device *dev, pm_message_t state)
1592{
1593 struct intc_desc_int *d;
1594 struct irq_desc *desc;
1595 int irq;
1596
1597 /* get intc controller associated with this sysdev */
1598 d = container_of(dev, struct intc_desc_int, sysdev);
1599
7fd87b3f
FV
1600 switch (state.event) {
1601 case PM_EVENT_ON:
1602 if (d->state.event != PM_EVENT_FREEZE)
1603 break;
1604 for_each_irq_desc(irq, desc) {
87a705dd 1605 if (desc->handle_irq == intc_redirect_irq)
0a753d58 1606 continue;
7fd87b3f
FV
1607 if (desc->chip != &d->chip)
1608 continue;
1609 if (desc->status & IRQ_DISABLED)
1610 intc_disable(irq);
1611 else
1612 intc_enable(irq);
1613 }
1614 break;
1615 case PM_EVENT_FREEZE:
1616 /* nothing has to be done */
1617 break;
1618 case PM_EVENT_SUSPEND:
1619 /* enable wakeup irqs belonging to this intc controller */
1620 for_each_irq_desc(irq, desc) {
1621 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
1622 intc_enable(irq);
1623 }
1624 break;
2dcec7a9 1625 }
7fd87b3f 1626 d->state = state;
2dcec7a9
MD
1627
1628 return 0;
1629}
1630
7fd87b3f
FV
1631static int intc_resume(struct sys_device *dev)
1632{
1633 return intc_suspend(dev, PMSG_ON);
1634}
1635
2dcec7a9
MD
1636static struct sysdev_class intc_sysdev_class = {
1637 .name = "intc",
1638 .suspend = intc_suspend,
7fd87b3f 1639 .resume = intc_resume,
2dcec7a9
MD
1640};
1641
1642/* register this intc as sysdev to allow suspend/resume */
1643static int __init register_intc_sysdevs(void)
1644{
1645 struct intc_desc_int *d;
1646 int error;
2dcec7a9
MD
1647
1648 error = sysdev_class_register(&intc_sysdev_class);
43b8774d
PM
1649#ifdef CONFIG_INTC_USERIMASK
1650 if (!error && uimask)
1651 error = sysdev_class_create_file(&intc_sysdev_class,
1652 &attr_userimask);
1653#endif
2dcec7a9
MD
1654 if (!error) {
1655 list_for_each_entry(d, &intc_list, list) {
c1e30ad9 1656 d->sysdev.id = d->index;
2dcec7a9
MD
1657 d->sysdev.cls = &intc_sysdev_class;
1658 error = sysdev_register(&d->sysdev);
0ded7542
PM
1659 if (error == 0)
1660 error = sysdev_create_file(&d->sysdev,
1661 &attr_name);
2dcec7a9
MD
1662 if (error)
1663 break;
2dcec7a9
MD
1664 }
1665 }
1666
1667 if (error)
ac422f94 1668 pr_err("sysdev registration error\n");
2dcec7a9
MD
1669
1670 return error;
1671}
2dcec7a9 1672device_initcall(register_intc_sysdevs);
1ce7b039
PM
1673
1674/*
1675 * Dynamic IRQ allocation and deallocation
1676 */
e9867c56 1677unsigned int create_irq_nr(unsigned int irq_want, int node)
1ce7b039
PM
1678{
1679 unsigned int irq = 0, new;
1680 unsigned long flags;
1681 struct irq_desc *desc;
1682
1683 spin_lock_irqsave(&vector_lock, flags);
1684
1685 /*
e9867c56 1686 * First try the wanted IRQ
1ce7b039 1687 */
e9867c56
PM
1688 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1689 new = irq_want;
1690 } else {
1691 /* .. then fall back to scanning. */
1ce7b039
PM
1692 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1693 if (unlikely(new == nr_irqs))
1694 goto out_unlock;
1695
1ce7b039 1696 __set_bit(new, intc_irq_map);
1ce7b039
PM
1697 }
1698
e9867c56
PM
1699 desc = irq_to_desc_alloc_node(new, node);
1700 if (unlikely(!desc)) {
12129fea 1701 pr_err("can't get irq_desc for %d\n", new);
e9867c56
PM
1702 goto out_unlock;
1703 }
1704
1705 desc = move_irq_desc(desc, node);
1706 irq = new;
1707
1ce7b039
PM
1708out_unlock:
1709 spin_unlock_irqrestore(&vector_lock, flags);
1710
65a5b28f 1711 if (irq > 0) {
1ce7b039 1712 dynamic_irq_init(irq);
c1e30ad9 1713 activate_irq(irq);
65a5b28f 1714 }
1ce7b039
PM
1715
1716 return irq;
1717}
1718
1719int create_irq(void)
1720{
1721 int nid = cpu_to_node(smp_processor_id());
1722 int irq;
1723
e9867c56 1724 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1ce7b039
PM
1725 if (irq == 0)
1726 irq = -1;
1727
1728 return irq;
1729}
1730
1731void destroy_irq(unsigned int irq)
1732{
1733 unsigned long flags;
1734
1735 dynamic_irq_cleanup(irq);
1736
1737 spin_lock_irqsave(&vector_lock, flags);
1738 __clear_bit(irq, intc_irq_map);
1739 spin_unlock_irqrestore(&vector_lock, flags);
1740}
45b9deaf
PM
1741
1742int reserve_irq_vector(unsigned int irq)
1743{
1744 unsigned long flags;
1745 int ret = 0;
1746
1747 spin_lock_irqsave(&vector_lock, flags);
1748 if (test_and_set_bit(irq, intc_irq_map))
1749 ret = -EBUSY;
1750 spin_unlock_irqrestore(&vector_lock, flags);
1751
1752 return ret;
1753}
1754
4bacd796
PM
1755void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
1756{
1757 unsigned long flags;
1758 int i;
1759
1760 spin_lock_irqsave(&vector_lock, flags);
1761 for (i = 0; i < nr_vecs; i++)
1762 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
1763 spin_unlock_irqrestore(&vector_lock, flags);
1764}
1765
45b9deaf
PM
1766void reserve_irq_legacy(void)
1767{
1768 unsigned long flags;
1769 int i, j;
1770
1771 spin_lock_irqsave(&vector_lock, flags);
1772 j = find_first_bit(intc_irq_map, nr_irqs);
1773 for (i = 0; i < j; i++)
1774 __set_bit(i, intc_irq_map);
1775 spin_unlock_irqrestore(&vector_lock, flags);
1776}
This page took 0.308233 seconds and 5 git commands to generate.