ARM: gic: add irq_domain support
[deliverable/linux.git] / arch / arm / common / gic.c
1 /*
2 * linux/arch/arm/common/gic.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Interrupt architecture for the GIC:
11 *
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
14 *
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
20 *
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
24 */
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/export.h>
28 #include <linux/list.h>
29 #include <linux/smp.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/cpumask.h>
32 #include <linux/io.h>
33 #include <linux/irqdomain.h>
34 #include <linux/interrupt.h>
35 #include <linux/percpu.h>
36 #include <linux/slab.h>
37
38 #include <asm/irq.h>
39 #include <asm/mach/irq.h>
40 #include <asm/hardware/gic.h>
41
42 static DEFINE_SPINLOCK(irq_controller_lock);
43
44 /* Address of GIC 0 CPU interface */
45 void __iomem *gic_cpu_base_addr __read_mostly;
46
47 /*
48 * Supported arch specific GIC irq extension.
49 * Default make them NULL.
50 */
51 struct irq_chip gic_arch_extn = {
52 .irq_eoi = NULL,
53 .irq_mask = NULL,
54 .irq_unmask = NULL,
55 .irq_retrigger = NULL,
56 .irq_set_type = NULL,
57 .irq_set_wake = NULL,
58 };
59
60 #ifndef MAX_GIC_NR
61 #define MAX_GIC_NR 1
62 #endif
63
64 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
65
66 static inline void __iomem *gic_dist_base(struct irq_data *d)
67 {
68 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
69 return gic_data->dist_base;
70 }
71
72 static inline void __iomem *gic_cpu_base(struct irq_data *d)
73 {
74 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
75 return gic_data->cpu_base;
76 }
77
78 static inline unsigned int gic_irq(struct irq_data *d)
79 {
80 return d->hwirq;
81 }
82
83 /*
84 * Routines to acknowledge, disable and enable interrupts
85 */
86 static void gic_mask_irq(struct irq_data *d)
87 {
88 u32 mask = 1 << (gic_irq(d) % 32);
89
90 spin_lock(&irq_controller_lock);
91 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
92 if (gic_arch_extn.irq_mask)
93 gic_arch_extn.irq_mask(d);
94 spin_unlock(&irq_controller_lock);
95 }
96
97 static void gic_unmask_irq(struct irq_data *d)
98 {
99 u32 mask = 1 << (gic_irq(d) % 32);
100
101 spin_lock(&irq_controller_lock);
102 if (gic_arch_extn.irq_unmask)
103 gic_arch_extn.irq_unmask(d);
104 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
105 spin_unlock(&irq_controller_lock);
106 }
107
108 static void gic_eoi_irq(struct irq_data *d)
109 {
110 if (gic_arch_extn.irq_eoi) {
111 spin_lock(&irq_controller_lock);
112 gic_arch_extn.irq_eoi(d);
113 spin_unlock(&irq_controller_lock);
114 }
115
116 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
117 }
118
119 static int gic_set_type(struct irq_data *d, unsigned int type)
120 {
121 void __iomem *base = gic_dist_base(d);
122 unsigned int gicirq = gic_irq(d);
123 u32 enablemask = 1 << (gicirq % 32);
124 u32 enableoff = (gicirq / 32) * 4;
125 u32 confmask = 0x2 << ((gicirq % 16) * 2);
126 u32 confoff = (gicirq / 16) * 4;
127 bool enabled = false;
128 u32 val;
129
130 /* Interrupt configuration for SGIs can't be changed */
131 if (gicirq < 16)
132 return -EINVAL;
133
134 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
135 return -EINVAL;
136
137 spin_lock(&irq_controller_lock);
138
139 if (gic_arch_extn.irq_set_type)
140 gic_arch_extn.irq_set_type(d, type);
141
142 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
143 if (type == IRQ_TYPE_LEVEL_HIGH)
144 val &= ~confmask;
145 else if (type == IRQ_TYPE_EDGE_RISING)
146 val |= confmask;
147
148 /*
149 * As recommended by the spec, disable the interrupt before changing
150 * the configuration
151 */
152 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
153 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
154 enabled = true;
155 }
156
157 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
158
159 if (enabled)
160 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
161
162 spin_unlock(&irq_controller_lock);
163
164 return 0;
165 }
166
167 static int gic_retrigger(struct irq_data *d)
168 {
169 if (gic_arch_extn.irq_retrigger)
170 return gic_arch_extn.irq_retrigger(d);
171
172 return -ENXIO;
173 }
174
175 #ifdef CONFIG_SMP
176 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
177 bool force)
178 {
179 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
180 unsigned int shift = (gic_irq(d) % 4) * 8;
181 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
182 u32 val, mask, bit;
183
184 if (cpu >= 8 || cpu >= nr_cpu_ids)
185 return -EINVAL;
186
187 mask = 0xff << shift;
188 bit = 1 << (cpu_logical_map(cpu) + shift);
189
190 spin_lock(&irq_controller_lock);
191 val = readl_relaxed(reg) & ~mask;
192 writel_relaxed(val | bit, reg);
193 spin_unlock(&irq_controller_lock);
194
195 return IRQ_SET_MASK_OK;
196 }
197 #endif
198
199 #ifdef CONFIG_PM
200 static int gic_set_wake(struct irq_data *d, unsigned int on)
201 {
202 int ret = -ENXIO;
203
204 if (gic_arch_extn.irq_set_wake)
205 ret = gic_arch_extn.irq_set_wake(d, on);
206
207 return ret;
208 }
209
210 #else
211 #define gic_set_wake NULL
212 #endif
213
214 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
215 {
216 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
217 struct irq_chip *chip = irq_get_chip(irq);
218 unsigned int cascade_irq, gic_irq;
219 unsigned long status;
220
221 chained_irq_enter(chip, desc);
222
223 spin_lock(&irq_controller_lock);
224 status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
225 spin_unlock(&irq_controller_lock);
226
227 gic_irq = (status & 0x3ff);
228 if (gic_irq == 1023)
229 goto out;
230
231 cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
232 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
233 do_bad_IRQ(cascade_irq, desc);
234 else
235 generic_handle_irq(cascade_irq);
236
237 out:
238 chained_irq_exit(chip, desc);
239 }
240
241 static struct irq_chip gic_chip = {
242 .name = "GIC",
243 .irq_mask = gic_mask_irq,
244 .irq_unmask = gic_unmask_irq,
245 .irq_eoi = gic_eoi_irq,
246 .irq_set_type = gic_set_type,
247 .irq_retrigger = gic_retrigger,
248 #ifdef CONFIG_SMP
249 .irq_set_affinity = gic_set_affinity,
250 #endif
251 .irq_set_wake = gic_set_wake,
252 };
253
254 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
255 {
256 if (gic_nr >= MAX_GIC_NR)
257 BUG();
258 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
259 BUG();
260 irq_set_chained_handler(irq, gic_handle_cascade_irq);
261 }
262
263 static void __init gic_dist_init(struct gic_chip_data *gic)
264 {
265 unsigned int i, irq;
266 u32 cpumask;
267 unsigned int gic_irqs = gic->gic_irqs;
268 struct irq_domain *domain = &gic->domain;
269 void __iomem *base = gic->dist_base;
270 u32 cpu = 0;
271
272 #ifdef CONFIG_SMP
273 cpu = cpu_logical_map(smp_processor_id());
274 #endif
275
276 cpumask = 1 << cpu;
277 cpumask |= cpumask << 8;
278 cpumask |= cpumask << 16;
279
280 writel_relaxed(0, base + GIC_DIST_CTRL);
281
282 /*
283 * Set all global interrupts to be level triggered, active low.
284 */
285 for (i = 32; i < gic_irqs; i += 16)
286 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
287
288 /*
289 * Set all global interrupts to this CPU only.
290 */
291 for (i = 32; i < gic_irqs; i += 4)
292 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
293
294 /*
295 * Set priority on all global interrupts.
296 */
297 for (i = 32; i < gic_irqs; i += 4)
298 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
299
300 /*
301 * Disable all interrupts. Leave the PPI and SGIs alone
302 * as these enables are banked registers.
303 */
304 for (i = 32; i < gic_irqs; i += 32)
305 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
306
307 /*
308 * Setup the Linux IRQ subsystem.
309 */
310 irq_domain_for_each_irq(domain, i, irq) {
311 if (i < 32) {
312 irq_set_percpu_devid(irq);
313 irq_set_chip_and_handler(irq, &gic_chip,
314 handle_percpu_devid_irq);
315 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
316 } else {
317 irq_set_chip_and_handler(irq, &gic_chip,
318 handle_fasteoi_irq);
319 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
320 }
321 irq_set_chip_data(irq, gic);
322 }
323
324 writel_relaxed(1, base + GIC_DIST_CTRL);
325 }
326
327 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
328 {
329 void __iomem *dist_base = gic->dist_base;
330 void __iomem *base = gic->cpu_base;
331 int i;
332
333 /*
334 * Deal with the banked PPI and SGI interrupts - disable all
335 * PPI interrupts, ensure all SGI interrupts are enabled.
336 */
337 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
338 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
339
340 /*
341 * Set priority on PPI and SGI interrupts
342 */
343 for (i = 0; i < 32; i += 4)
344 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
345
346 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
347 writel_relaxed(1, base + GIC_CPU_CTRL);
348 }
349
350 #ifdef CONFIG_CPU_PM
351 /*
352 * Saves the GIC distributor registers during suspend or idle. Must be called
353 * with interrupts disabled but before powering down the GIC. After calling
354 * this function, no interrupts will be delivered by the GIC, and another
355 * platform-specific wakeup source must be enabled.
356 */
357 static void gic_dist_save(unsigned int gic_nr)
358 {
359 unsigned int gic_irqs;
360 void __iomem *dist_base;
361 int i;
362
363 if (gic_nr >= MAX_GIC_NR)
364 BUG();
365
366 gic_irqs = gic_data[gic_nr].gic_irqs;
367 dist_base = gic_data[gic_nr].dist_base;
368
369 if (!dist_base)
370 return;
371
372 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
373 gic_data[gic_nr].saved_spi_conf[i] =
374 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
375
376 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
377 gic_data[gic_nr].saved_spi_target[i] =
378 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
379
380 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
381 gic_data[gic_nr].saved_spi_enable[i] =
382 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
383 }
384
385 /*
386 * Restores the GIC distributor registers during resume or when coming out of
387 * idle. Must be called before enabling interrupts. If a level interrupt
388 * that occured while the GIC was suspended is still present, it will be
389 * handled normally, but any edge interrupts that occured will not be seen by
390 * the GIC and need to be handled by the platform-specific wakeup source.
391 */
392 static void gic_dist_restore(unsigned int gic_nr)
393 {
394 unsigned int gic_irqs;
395 unsigned int i;
396 void __iomem *dist_base;
397
398 if (gic_nr >= MAX_GIC_NR)
399 BUG();
400
401 gic_irqs = gic_data[gic_nr].gic_irqs;
402 dist_base = gic_data[gic_nr].dist_base;
403
404 if (!dist_base)
405 return;
406
407 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
408
409 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
410 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
411 dist_base + GIC_DIST_CONFIG + i * 4);
412
413 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
414 writel_relaxed(0xa0a0a0a0,
415 dist_base + GIC_DIST_PRI + i * 4);
416
417 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
418 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
419 dist_base + GIC_DIST_TARGET + i * 4);
420
421 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
422 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
423 dist_base + GIC_DIST_ENABLE_SET + i * 4);
424
425 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
426 }
427
428 static void gic_cpu_save(unsigned int gic_nr)
429 {
430 int i;
431 u32 *ptr;
432 void __iomem *dist_base;
433 void __iomem *cpu_base;
434
435 if (gic_nr >= MAX_GIC_NR)
436 BUG();
437
438 dist_base = gic_data[gic_nr].dist_base;
439 cpu_base = gic_data[gic_nr].cpu_base;
440
441 if (!dist_base || !cpu_base)
442 return;
443
444 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
445 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
446 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
447
448 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
449 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
450 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
451
452 }
453
454 static void gic_cpu_restore(unsigned int gic_nr)
455 {
456 int i;
457 u32 *ptr;
458 void __iomem *dist_base;
459 void __iomem *cpu_base;
460
461 if (gic_nr >= MAX_GIC_NR)
462 BUG();
463
464 dist_base = gic_data[gic_nr].dist_base;
465 cpu_base = gic_data[gic_nr].cpu_base;
466
467 if (!dist_base || !cpu_base)
468 return;
469
470 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
471 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
472 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
473
474 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
475 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
476 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
477
478 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
479 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
480
481 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
482 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
483 }
484
485 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
486 {
487 int i;
488
489 for (i = 0; i < MAX_GIC_NR; i++) {
490 switch (cmd) {
491 case CPU_PM_ENTER:
492 gic_cpu_save(i);
493 break;
494 case CPU_PM_ENTER_FAILED:
495 case CPU_PM_EXIT:
496 gic_cpu_restore(i);
497 break;
498 case CPU_CLUSTER_PM_ENTER:
499 gic_dist_save(i);
500 break;
501 case CPU_CLUSTER_PM_ENTER_FAILED:
502 case CPU_CLUSTER_PM_EXIT:
503 gic_dist_restore(i);
504 break;
505 }
506 }
507
508 return NOTIFY_OK;
509 }
510
511 static struct notifier_block gic_notifier_block = {
512 .notifier_call = gic_notifier,
513 };
514
515 static void __init gic_pm_init(struct gic_chip_data *gic)
516 {
517 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
518 sizeof(u32));
519 BUG_ON(!gic->saved_ppi_enable);
520
521 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
522 sizeof(u32));
523 BUG_ON(!gic->saved_ppi_conf);
524
525 cpu_pm_register_notifier(&gic_notifier_block);
526 }
527 #else
528 static void __init gic_pm_init(struct gic_chip_data *gic)
529 {
530 }
531 #endif
532
533 const struct irq_domain_ops gic_irq_domain_ops = {
534 };
535
536 void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
537 void __iomem *dist_base, void __iomem *cpu_base)
538 {
539 struct gic_chip_data *gic;
540 struct irq_domain *domain;
541 int gic_irqs;
542
543 BUG_ON(gic_nr >= MAX_GIC_NR);
544
545 gic = &gic_data[gic_nr];
546 domain = &gic->domain;
547 gic->dist_base = dist_base;
548 gic->cpu_base = cpu_base;
549
550 /*
551 * For primary GICs, skip over SGIs.
552 * For secondary GICs, skip over PPIs, too.
553 */
554 if (gic_nr == 0) {
555 gic_cpu_base_addr = cpu_base;
556 domain->hwirq_base = 16;
557 irq_start = (irq_start & ~31) + 16;
558 } else
559 domain->hwirq_base = 32;
560
561 /*
562 * Find out how many interrupts are supported.
563 * The GIC only supports up to 1020 interrupt sources.
564 */
565 gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
566 gic_irqs = (gic_irqs + 1) * 32;
567 if (gic_irqs > 1020)
568 gic_irqs = 1020;
569 gic->gic_irqs = gic_irqs;
570
571 domain->nr_irq = gic_irqs - domain->hwirq_base;
572 domain->irq_base = irq_alloc_descs(-1, irq_start, domain->nr_irq,
573 numa_node_id());
574 domain->priv = gic;
575 domain->ops = &gic_irq_domain_ops;
576 irq_domain_add(domain);
577
578 gic_chip.flags |= gic_arch_extn.flags;
579 gic_dist_init(gic);
580 gic_cpu_init(gic);
581 gic_pm_init(gic);
582 }
583
584 void __cpuinit gic_secondary_init(unsigned int gic_nr)
585 {
586 BUG_ON(gic_nr >= MAX_GIC_NR);
587
588 gic_cpu_init(&gic_data[gic_nr]);
589 }
590
591 #ifdef CONFIG_SMP
592 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
593 {
594 int cpu;
595 unsigned long map = 0;
596
597 /* Convert our logical CPU mask into a physical one. */
598 for_each_cpu(cpu, mask)
599 map |= 1 << cpu_logical_map(cpu);
600
601 /*
602 * Ensure that stores to Normal memory are visible to the
603 * other CPUs before issuing the IPI.
604 */
605 dsb();
606
607 /* this always happens on GIC0 */
608 writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
609 }
610 #endif
This page took 0.049497 seconds and 5 git commands to generate.