Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / irqchip / irq-gic.c
CommitLineData
f27ecacc 1/*
f27ecacc
RK
2 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Interrupt architecture for the GIC:
9 *
10 * o There is one Interrupt Distributor, which receives interrupts
11 * from system devices and sends them to the Interrupt Controllers.
12 *
13 * o There is one CPU Interface per CPU, which sends interrupts sent
14 * by the Distributor, and interrupts generated locally, to the
b3a1bde4
CM
15 * associated CPU. The base address of the CPU interface is usually
16 * aliased so that the same address points to different chips depending
17 * on the CPU it is accessed from.
f27ecacc
RK
18 *
19 * Note that IRQs 0-31 are special - they are local to each CPU.
20 * As such, the enable set/clear, pending set/clear and active bit
21 * registers are banked per-cpu for these sources.
22 */
23#include <linux/init.h>
24#include <linux/kernel.h>
f37a53cc 25#include <linux/err.h>
7e1efcf5 26#include <linux/module.h>
f27ecacc
RK
27#include <linux/list.h>
28#include <linux/smp.h>
c0114709 29#include <linux/cpu.h>
254056f3 30#include <linux/cpu_pm.h>
dcb86e8c 31#include <linux/cpumask.h>
fced80c7 32#include <linux/io.h>
b3f7ed03
RH
33#include <linux/of.h>
34#include <linux/of_address.h>
35#include <linux/of_irq.h>
4294f8ba 36#include <linux/irqdomain.h>
292b293c
MZ
37#include <linux/interrupt.h>
38#include <linux/percpu.h>
39#include <linux/slab.h>
de88cbb7 40#include <linux/irqchip/chained_irq.h>
520f7bd7 41#include <linux/irqchip/arm-gic.h>
f27ecacc 42
29e697b1 43#include <asm/cputype.h>
f27ecacc 44#include <asm/irq.h>
562e0027 45#include <asm/exception.h>
eb50439b 46#include <asm/smp_plat.h>
f27ecacc 47
d51d0af4 48#include "irq-gic-common.h"
81243e44 49#include "irqchip.h"
f27ecacc 50
db0d4db2
MZ
51union gic_base {
52 void __iomem *common_base;
6859358e 53 void __percpu * __iomem *percpu_base;
db0d4db2
MZ
54};
55
56struct gic_chip_data {
db0d4db2
MZ
57 union gic_base dist_base;
58 union gic_base cpu_base;
59#ifdef CONFIG_CPU_PM
60 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
61 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
62 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
63 u32 __percpu *saved_ppi_enable;
64 u32 __percpu *saved_ppi_conf;
65#endif
75294957 66 struct irq_domain *domain;
db0d4db2
MZ
67 unsigned int gic_irqs;
68#ifdef CONFIG_GIC_NON_BANKED
69 void __iomem *(*get_base)(union gic_base *);
70#endif
71};
72
bd31b859 73static DEFINE_RAW_SPINLOCK(irq_controller_lock);
f27ecacc 74
384a2902
NP
75/*
76 * The GIC mapping of CPU interfaces does not necessarily match
77 * the logical CPU numbering. Let's use a mapping as returned
78 * by the GIC itself.
79 */
80#define NR_GIC_CPU_IF 8
81static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
82
d7ed36a4
SS
83/*
84 * Supported arch specific GIC irq extension.
85 * Default make them NULL.
86 */
87struct irq_chip gic_arch_extn = {
1a01753e 88 .irq_eoi = NULL,
d7ed36a4
SS
89 .irq_mask = NULL,
90 .irq_unmask = NULL,
91 .irq_retrigger = NULL,
92 .irq_set_type = NULL,
93 .irq_set_wake = NULL,
94};
95
b3a1bde4
CM
96#ifndef MAX_GIC_NR
97#define MAX_GIC_NR 1
98#endif
99
bef8f9ee 100static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
b3a1bde4 101
db0d4db2
MZ
102#ifdef CONFIG_GIC_NON_BANKED
103static void __iomem *gic_get_percpu_base(union gic_base *base)
104{
513d1a28 105 return raw_cpu_read(*base->percpu_base);
db0d4db2
MZ
106}
107
108static void __iomem *gic_get_common_base(union gic_base *base)
109{
110 return base->common_base;
111}
112
113static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
114{
115 return data->get_base(&data->dist_base);
116}
117
118static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
119{
120 return data->get_base(&data->cpu_base);
121}
122
123static inline void gic_set_base_accessor(struct gic_chip_data *data,
124 void __iomem *(*f)(union gic_base *))
125{
126 data->get_base = f;
127}
128#else
129#define gic_data_dist_base(d) ((d)->dist_base.common_base)
130#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
46f101df 131#define gic_set_base_accessor(d, f)
db0d4db2
MZ
132#endif
133
7d1f4288 134static inline void __iomem *gic_dist_base(struct irq_data *d)
b3a1bde4 135{
7d1f4288 136 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
db0d4db2 137 return gic_data_dist_base(gic_data);
b3a1bde4
CM
138}
139
7d1f4288 140static inline void __iomem *gic_cpu_base(struct irq_data *d)
b3a1bde4 141{
7d1f4288 142 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
db0d4db2 143 return gic_data_cpu_base(gic_data);
b3a1bde4
CM
144}
145
7d1f4288 146static inline unsigned int gic_irq(struct irq_data *d)
b3a1bde4 147{
4294f8ba 148 return d->hwirq;
b3a1bde4
CM
149}
150
f27ecacc
RK
151/*
152 * Routines to acknowledge, disable and enable interrupts
f27ecacc 153 */
7d1f4288 154static void gic_mask_irq(struct irq_data *d)
f27ecacc 155{
4294f8ba 156 u32 mask = 1 << (gic_irq(d) % 32);
c4bfa28a 157
bd31b859 158 raw_spin_lock(&irq_controller_lock);
6ac77e46 159 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
d7ed36a4
SS
160 if (gic_arch_extn.irq_mask)
161 gic_arch_extn.irq_mask(d);
bd31b859 162 raw_spin_unlock(&irq_controller_lock);
f27ecacc
RK
163}
164
7d1f4288 165static void gic_unmask_irq(struct irq_data *d)
f27ecacc 166{
4294f8ba 167 u32 mask = 1 << (gic_irq(d) % 32);
c4bfa28a 168
bd31b859 169 raw_spin_lock(&irq_controller_lock);
d7ed36a4
SS
170 if (gic_arch_extn.irq_unmask)
171 gic_arch_extn.irq_unmask(d);
6ac77e46 172 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
bd31b859 173 raw_spin_unlock(&irq_controller_lock);
f27ecacc
RK
174}
175
1a01753e
WD
176static void gic_eoi_irq(struct irq_data *d)
177{
178 if (gic_arch_extn.irq_eoi) {
bd31b859 179 raw_spin_lock(&irq_controller_lock);
1a01753e 180 gic_arch_extn.irq_eoi(d);
bd31b859 181 raw_spin_unlock(&irq_controller_lock);
1a01753e
WD
182 }
183
6ac77e46 184 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
1a01753e
WD
185}
186
7d1f4288 187static int gic_set_type(struct irq_data *d, unsigned int type)
5c0c1f08 188{
7d1f4288
LB
189 void __iomem *base = gic_dist_base(d);
190 unsigned int gicirq = gic_irq(d);
5c0c1f08
RV
191
192 /* Interrupt configuration for SGIs can't be changed */
193 if (gicirq < 16)
194 return -EINVAL;
195
196 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
197 return -EINVAL;
198
bd31b859 199 raw_spin_lock(&irq_controller_lock);
5c0c1f08 200
d7ed36a4
SS
201 if (gic_arch_extn.irq_set_type)
202 gic_arch_extn.irq_set_type(d, type);
203
d51d0af4 204 gic_configure_irq(gicirq, type, base, NULL);
5c0c1f08 205
bd31b859 206 raw_spin_unlock(&irq_controller_lock);
5c0c1f08
RV
207
208 return 0;
209}
210
d7ed36a4
SS
211static int gic_retrigger(struct irq_data *d)
212{
213 if (gic_arch_extn.irq_retrigger)
214 return gic_arch_extn.irq_retrigger(d);
215
bad9a43a
AD
216 /* the genirq layer expects 0 if we can't retrigger in hardware */
217 return 0;
d7ed36a4
SS
218}
219
a06f5466 220#ifdef CONFIG_SMP
c191789c
RK
221static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
222 bool force)
f27ecacc 223{
7d1f4288 224 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
ffde1de6 225 unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
c191789c 226 u32 val, mask, bit;
f27ecacc 227
ffde1de6
TG
228 if (!force)
229 cpu = cpumask_any_and(mask_val, cpu_online_mask);
230 else
231 cpu = cpumask_first(mask_val);
232
384a2902 233 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
87507500 234 return -EINVAL;
c191789c 235
1a6b69b6 236 raw_spin_lock(&irq_controller_lock);
c191789c 237 mask = 0xff << shift;
384a2902 238 bit = gic_cpu_map[cpu] << shift;
6ac77e46
SS
239 val = readl_relaxed(reg) & ~mask;
240 writel_relaxed(val | bit, reg);
bd31b859 241 raw_spin_unlock(&irq_controller_lock);
d5dedd45 242
5dfc54e0 243 return IRQ_SET_MASK_OK;
f27ecacc 244}
a06f5466 245#endif
f27ecacc 246
d7ed36a4
SS
247#ifdef CONFIG_PM
248static int gic_set_wake(struct irq_data *d, unsigned int on)
249{
250 int ret = -ENXIO;
251
252 if (gic_arch_extn.irq_set_wake)
253 ret = gic_arch_extn.irq_set_wake(d, on);
254
255 return ret;
256}
257
258#else
259#define gic_set_wake NULL
260#endif
261
8783dd3a 262static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
562e0027
MZ
263{
264 u32 irqstat, irqnr;
265 struct gic_chip_data *gic = &gic_data[0];
266 void __iomem *cpu_base = gic_data_cpu_base(gic);
267
268 do {
269 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
b8802f76 270 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
562e0027
MZ
271
272 if (likely(irqnr > 15 && irqnr < 1021)) {
60031b4e 273 handle_domain_irq(gic->domain, irqnr, regs);
562e0027
MZ
274 continue;
275 }
276 if (irqnr < 16) {
277 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
278#ifdef CONFIG_SMP
279 handle_IPI(irqnr, regs);
280#endif
281 continue;
282 }
283 break;
284 } while (1);
285}
286
0f347bb9 287static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
b3a1bde4 288{
6845664a
TG
289 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
290 struct irq_chip *chip = irq_get_chip(irq);
0f347bb9 291 unsigned int cascade_irq, gic_irq;
b3a1bde4
CM
292 unsigned long status;
293
1a01753e 294 chained_irq_enter(chip, desc);
b3a1bde4 295
bd31b859 296 raw_spin_lock(&irq_controller_lock);
db0d4db2 297 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
bd31b859 298 raw_spin_unlock(&irq_controller_lock);
b3a1bde4 299
e5f81539
FK
300 gic_irq = (status & GICC_IAR_INT_ID_MASK);
301 if (gic_irq == GICC_INT_SPURIOUS)
b3a1bde4 302 goto out;
b3a1bde4 303
75294957
GL
304 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
305 if (unlikely(gic_irq < 32 || gic_irq > 1020))
aec00956 306 handle_bad_irq(cascade_irq, desc);
0f347bb9
RK
307 else
308 generic_handle_irq(cascade_irq);
b3a1bde4
CM
309
310 out:
1a01753e 311 chained_irq_exit(chip, desc);
b3a1bde4
CM
312}
313
38c677cb 314static struct irq_chip gic_chip = {
7d1f4288 315 .name = "GIC",
7d1f4288
LB
316 .irq_mask = gic_mask_irq,
317 .irq_unmask = gic_unmask_irq,
1a01753e 318 .irq_eoi = gic_eoi_irq,
7d1f4288 319 .irq_set_type = gic_set_type,
d7ed36a4 320 .irq_retrigger = gic_retrigger,
f27ecacc 321#ifdef CONFIG_SMP
c191789c 322 .irq_set_affinity = gic_set_affinity,
f27ecacc 323#endif
d7ed36a4 324 .irq_set_wake = gic_set_wake,
f27ecacc
RK
325};
326
b3a1bde4
CM
327void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
328{
329 if (gic_nr >= MAX_GIC_NR)
330 BUG();
6845664a 331 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
b3a1bde4 332 BUG();
6845664a 333 irq_set_chained_handler(irq, gic_handle_cascade_irq);
b3a1bde4
CM
334}
335
2bb31351
RK
336static u8 gic_get_cpumask(struct gic_chip_data *gic)
337{
338 void __iomem *base = gic_data_dist_base(gic);
339 u32 mask, i;
340
341 for (i = mask = 0; i < 32; i += 4) {
342 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
343 mask |= mask >> 16;
344 mask |= mask >> 8;
345 if (mask)
346 break;
347 }
348
349 if (!mask)
350 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
351
352 return mask;
353}
354
32289506
FK
355static void gic_cpu_if_up(void)
356{
357 void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
358 u32 bypass = 0;
359
360 /*
361 * Preserve bypass disable bits to be written back later
362 */
363 bypass = readl(cpu_base + GIC_CPU_CTRL);
364 bypass &= GICC_DIS_BYPASS_MASK;
365
366 writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
367}
368
369
4294f8ba 370static void __init gic_dist_init(struct gic_chip_data *gic)
f27ecacc 371{
75294957 372 unsigned int i;
267840f3 373 u32 cpumask;
4294f8ba 374 unsigned int gic_irqs = gic->gic_irqs;
db0d4db2 375 void __iomem *base = gic_data_dist_base(gic);
f27ecacc 376
e5f81539 377 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
f27ecacc 378
f27ecacc
RK
379 /*
380 * Set all global interrupts to this CPU only.
381 */
2bb31351
RK
382 cpumask = gic_get_cpumask(gic);
383 cpumask |= cpumask << 8;
384 cpumask |= cpumask << 16;
e6afec9b 385 for (i = 32; i < gic_irqs; i += 4)
6ac77e46 386 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
f27ecacc 387
d51d0af4 388 gic_dist_config(base, gic_irqs, NULL);
f27ecacc 389
e5f81539 390 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
f27ecacc
RK
391}
392
8c37bb3a 393static void gic_cpu_init(struct gic_chip_data *gic)
f27ecacc 394{
db0d4db2
MZ
395 void __iomem *dist_base = gic_data_dist_base(gic);
396 void __iomem *base = gic_data_cpu_base(gic);
384a2902 397 unsigned int cpu_mask, cpu = smp_processor_id();
9395f6ea
RK
398 int i;
399
384a2902
NP
400 /*
401 * Get what the GIC says our CPU mask is.
402 */
403 BUG_ON(cpu >= NR_GIC_CPU_IF);
2bb31351 404 cpu_mask = gic_get_cpumask(gic);
384a2902
NP
405 gic_cpu_map[cpu] = cpu_mask;
406
407 /*
408 * Clear our mask from the other map entries in case they're
409 * still undefined.
410 */
411 for (i = 0; i < NR_GIC_CPU_IF; i++)
412 if (i != cpu)
413 gic_cpu_map[i] &= ~cpu_mask;
414
d51d0af4 415 gic_cpu_config(dist_base, NULL);
9395f6ea 416
e5f81539 417 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
32289506 418 gic_cpu_if_up();
f27ecacc
RK
419}
420
10d9eb8a
NP
421void gic_cpu_if_down(void)
422{
423 void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
32289506
FK
424 u32 val = 0;
425
426 val = readl(cpu_base + GIC_CPU_CTRL);
427 val &= ~GICC_ENABLE;
428 writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
10d9eb8a
NP
429}
430
254056f3
CC
431#ifdef CONFIG_CPU_PM
432/*
433 * Saves the GIC distributor registers during suspend or idle. Must be called
434 * with interrupts disabled but before powering down the GIC. After calling
435 * this function, no interrupts will be delivered by the GIC, and another
436 * platform-specific wakeup source must be enabled.
437 */
438static void gic_dist_save(unsigned int gic_nr)
439{
440 unsigned int gic_irqs;
441 void __iomem *dist_base;
442 int i;
443
444 if (gic_nr >= MAX_GIC_NR)
445 BUG();
446
447 gic_irqs = gic_data[gic_nr].gic_irqs;
db0d4db2 448 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
254056f3
CC
449
450 if (!dist_base)
451 return;
452
453 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
454 gic_data[gic_nr].saved_spi_conf[i] =
455 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
456
457 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
458 gic_data[gic_nr].saved_spi_target[i] =
459 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
460
461 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
462 gic_data[gic_nr].saved_spi_enable[i] =
463 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
464}
465
466/*
467 * Restores the GIC distributor registers during resume or when coming out of
468 * idle. Must be called before enabling interrupts. If a level interrupt
469 * that occured while the GIC was suspended is still present, it will be
470 * handled normally, but any edge interrupts that occured will not be seen by
471 * the GIC and need to be handled by the platform-specific wakeup source.
472 */
473static void gic_dist_restore(unsigned int gic_nr)
474{
475 unsigned int gic_irqs;
476 unsigned int i;
477 void __iomem *dist_base;
478
479 if (gic_nr >= MAX_GIC_NR)
480 BUG();
481
482 gic_irqs = gic_data[gic_nr].gic_irqs;
db0d4db2 483 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
254056f3
CC
484
485 if (!dist_base)
486 return;
487
e5f81539 488 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
254056f3
CC
489
490 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
491 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
492 dist_base + GIC_DIST_CONFIG + i * 4);
493
494 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
e5f81539 495 writel_relaxed(GICD_INT_DEF_PRI_X4,
254056f3
CC
496 dist_base + GIC_DIST_PRI + i * 4);
497
498 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
499 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
500 dist_base + GIC_DIST_TARGET + i * 4);
501
502 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
503 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
504 dist_base + GIC_DIST_ENABLE_SET + i * 4);
505
e5f81539 506 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
254056f3
CC
507}
508
509static void gic_cpu_save(unsigned int gic_nr)
510{
511 int i;
512 u32 *ptr;
513 void __iomem *dist_base;
514 void __iomem *cpu_base;
515
516 if (gic_nr >= MAX_GIC_NR)
517 BUG();
518
db0d4db2
MZ
519 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
520 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
254056f3
CC
521
522 if (!dist_base || !cpu_base)
523 return;
524
532d0d06 525 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
254056f3
CC
526 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
527 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
528
532d0d06 529 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
254056f3
CC
530 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
531 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
532
533}
534
535static void gic_cpu_restore(unsigned int gic_nr)
536{
537 int i;
538 u32 *ptr;
539 void __iomem *dist_base;
540 void __iomem *cpu_base;
541
542 if (gic_nr >= MAX_GIC_NR)
543 BUG();
544
db0d4db2
MZ
545 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
546 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
254056f3
CC
547
548 if (!dist_base || !cpu_base)
549 return;
550
532d0d06 551 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
254056f3
CC
552 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
553 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
554
532d0d06 555 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
254056f3
CC
556 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
557 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
558
559 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
e5f81539
FK
560 writel_relaxed(GICD_INT_DEF_PRI_X4,
561 dist_base + GIC_DIST_PRI + i * 4);
254056f3 562
e5f81539 563 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
32289506 564 gic_cpu_if_up();
254056f3
CC
565}
566
567static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
568{
569 int i;
570
571 for (i = 0; i < MAX_GIC_NR; i++) {
db0d4db2
MZ
572#ifdef CONFIG_GIC_NON_BANKED
573 /* Skip over unused GICs */
574 if (!gic_data[i].get_base)
575 continue;
576#endif
254056f3
CC
577 switch (cmd) {
578 case CPU_PM_ENTER:
579 gic_cpu_save(i);
580 break;
581 case CPU_PM_ENTER_FAILED:
582 case CPU_PM_EXIT:
583 gic_cpu_restore(i);
584 break;
585 case CPU_CLUSTER_PM_ENTER:
586 gic_dist_save(i);
587 break;
588 case CPU_CLUSTER_PM_ENTER_FAILED:
589 case CPU_CLUSTER_PM_EXIT:
590 gic_dist_restore(i);
591 break;
592 }
593 }
594
595 return NOTIFY_OK;
596}
597
598static struct notifier_block gic_notifier_block = {
599 .notifier_call = gic_notifier,
600};
601
602static void __init gic_pm_init(struct gic_chip_data *gic)
603{
604 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
605 sizeof(u32));
606 BUG_ON(!gic->saved_ppi_enable);
607
608 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
609 sizeof(u32));
610 BUG_ON(!gic->saved_ppi_conf);
611
abdd7b91
MZ
612 if (gic == &gic_data[0])
613 cpu_pm_register_notifier(&gic_notifier_block);
254056f3
CC
614}
615#else
616static void __init gic_pm_init(struct gic_chip_data *gic)
617{
618}
619#endif
620
b1cffebf 621#ifdef CONFIG_SMP
6859358e 622static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
b1cffebf
RH
623{
624 int cpu;
1a6b69b6
NP
625 unsigned long flags, map = 0;
626
627 raw_spin_lock_irqsave(&irq_controller_lock, flags);
b1cffebf
RH
628
629 /* Convert our logical CPU mask into a physical one. */
630 for_each_cpu(cpu, mask)
91bdf0d0 631 map |= gic_cpu_map[cpu];
b1cffebf
RH
632
633 /*
634 * Ensure that stores to Normal memory are visible to the
8adbf57f 635 * other CPUs before they observe us issuing the IPI.
b1cffebf 636 */
8adbf57f 637 dmb(ishst);
b1cffebf
RH
638
639 /* this always happens on GIC0 */
640 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
1a6b69b6
NP
641
642 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
643}
644#endif
645
646#ifdef CONFIG_BL_SWITCHER
14d2ca61
NP
647/*
648 * gic_send_sgi - send a SGI directly to given CPU interface number
649 *
650 * cpu_id: the ID for the destination CPU interface
651 * irq: the IPI number to send a SGI for
652 */
653void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
654{
655 BUG_ON(cpu_id >= NR_GIC_CPU_IF);
656 cpu_id = 1 << cpu_id;
657 /* this always happens on GIC0 */
658 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
659}
660
ed96762e
NP
661/*
662 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
663 *
664 * @cpu: the logical CPU number to get the GIC ID for.
665 *
666 * Return the CPU interface ID for the given logical CPU number,
667 * or -1 if the CPU number is too large or the interface ID is
668 * unknown (more than one bit set).
669 */
670int gic_get_cpu_id(unsigned int cpu)
671{
672 unsigned int cpu_bit;
673
674 if (cpu >= NR_GIC_CPU_IF)
675 return -1;
676 cpu_bit = gic_cpu_map[cpu];
677 if (cpu_bit & (cpu_bit - 1))
678 return -1;
679 return __ffs(cpu_bit);
680}
681
1a6b69b6
NP
682/*
683 * gic_migrate_target - migrate IRQs to another CPU interface
684 *
685 * @new_cpu_id: the CPU target ID to migrate IRQs to
686 *
687 * Migrate all peripheral interrupts with a target matching the current CPU
688 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
689 * is also updated. Targets to other CPU interfaces are unchanged.
690 * This must be called with IRQs locally disabled.
691 */
692void gic_migrate_target(unsigned int new_cpu_id)
693{
694 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
695 void __iomem *dist_base;
696 int i, ror_val, cpu = smp_processor_id();
697 u32 val, cur_target_mask, active_mask;
698
699 if (gic_nr >= MAX_GIC_NR)
700 BUG();
701
702 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
703 if (!dist_base)
704 return;
705 gic_irqs = gic_data[gic_nr].gic_irqs;
706
707 cur_cpu_id = __ffs(gic_cpu_map[cpu]);
708 cur_target_mask = 0x01010101 << cur_cpu_id;
709 ror_val = (cur_cpu_id - new_cpu_id) & 31;
710
711 raw_spin_lock(&irq_controller_lock);
712
713 /* Update the target interface for this logical CPU */
714 gic_cpu_map[cpu] = 1 << new_cpu_id;
715
716 /*
717 * Find all the peripheral interrupts targetting the current
718 * CPU interface and migrate them to the new CPU interface.
719 * We skip DIST_TARGET 0 to 7 as they are read-only.
720 */
721 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
722 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
723 active_mask = val & cur_target_mask;
724 if (active_mask) {
725 val &= ~active_mask;
726 val |= ror32(active_mask, ror_val);
727 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
728 }
729 }
730
731 raw_spin_unlock(&irq_controller_lock);
732
733 /*
734 * Now let's migrate and clear any potential SGIs that might be
735 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
736 * is a banked register, we can only forward the SGI using
737 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
738 * doesn't use that information anyway.
739 *
740 * For the same reason we do not adjust SGI source information
741 * for previously sent SGIs by us to other CPUs either.
742 */
743 for (i = 0; i < 16; i += 4) {
744 int j;
745 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
746 if (!val)
747 continue;
748 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
749 for (j = i; j < i + 4; j++) {
750 if (val & 0xff)
751 writel_relaxed((1 << (new_cpu_id + 16)) | j,
752 dist_base + GIC_DIST_SOFTINT);
753 val >>= 8;
754 }
755 }
b1cffebf 756}
eeb44658
NP
757
758/*
759 * gic_get_sgir_physaddr - get the physical address for the SGI register
760 *
761 * REturn the physical address of the SGI register to be used
762 * by some early assembly code when the kernel is not yet available.
763 */
764static unsigned long gic_dist_physaddr;
765
766unsigned long gic_get_sgir_physaddr(void)
767{
768 if (!gic_dist_physaddr)
769 return 0;
770 return gic_dist_physaddr + GIC_DIST_SOFTINT;
771}
772
773void __init gic_init_physaddr(struct device_node *node)
774{
775 struct resource res;
776 if (of_address_to_resource(node, 0, &res) == 0) {
777 gic_dist_physaddr = res.start;
778 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
779 }
780}
781
782#else
783#define gic_init_physaddr(node) do { } while (0)
b1cffebf
RH
784#endif
785
75294957
GL
786static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
787 irq_hw_number_t hw)
788{
789 if (hw < 32) {
790 irq_set_percpu_devid(irq);
9a1091ef
YC
791 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
792 handle_percpu_devid_irq, NULL, NULL);
75294957
GL
793 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
794 } else {
9a1091ef
YC
795 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
796 handle_fasteoi_irq, NULL, NULL);
75294957 797 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
006e983b
S
798
799 gic_routable_irq_domain_ops->map(d, irq, hw);
75294957 800 }
75294957
GL
801 return 0;
802}
803
006e983b
S
804static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
805{
806 gic_routable_irq_domain_ops->unmap(d, irq);
807}
808
7bb69bad
GL
809static int gic_irq_domain_xlate(struct irq_domain *d,
810 struct device_node *controller,
811 const u32 *intspec, unsigned int intsize,
812 unsigned long *out_hwirq, unsigned int *out_type)
b3f7ed03 813{
006e983b
S
814 unsigned long ret = 0;
815
b3f7ed03
RH
816 if (d->of_node != controller)
817 return -EINVAL;
818 if (intsize < 3)
819 return -EINVAL;
820
821 /* Get the interrupt number and add 16 to skip over SGIs */
822 *out_hwirq = intspec[1] + 16;
823
824 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
006e983b
S
825 if (!intspec[0]) {
826 ret = gic_routable_irq_domain_ops->xlate(d, controller,
827 intspec,
828 intsize,
829 out_hwirq,
830 out_type);
831
832 if (IS_ERR_VALUE(ret))
833 return ret;
834 }
b3f7ed03
RH
835
836 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
006e983b
S
837
838 return ret;
b3f7ed03 839}
b3f7ed03 840
c0114709 841#ifdef CONFIG_SMP
8c37bb3a
PG
842static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
843 void *hcpu)
c0114709 844{
8b6fd652 845 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
c0114709
CM
846 gic_cpu_init(&gic_data[0]);
847 return NOTIFY_OK;
848}
849
850/*
851 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
852 * priority because the GIC needs to be up before the ARM generic timers.
853 */
8c37bb3a 854static struct notifier_block gic_cpu_notifier = {
c0114709
CM
855 .notifier_call = gic_secondary_init,
856 .priority = 100,
857};
858#endif
859
9a1091ef
YC
860static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
861 unsigned int nr_irqs, void *arg)
862{
863 int i, ret;
864 irq_hw_number_t hwirq;
865 unsigned int type = IRQ_TYPE_NONE;
866 struct of_phandle_args *irq_data = arg;
867
868 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
869 irq_data->args_count, &hwirq, &type);
870 if (ret)
871 return ret;
872
873 for (i = 0; i < nr_irqs; i++)
874 gic_irq_domain_map(domain, virq + i, hwirq + i);
875
876 return 0;
877}
878
879static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
880 .xlate = gic_irq_domain_xlate,
881 .alloc = gic_irq_domain_alloc,
882 .free = irq_domain_free_irqs_top,
883};
884
6859358e 885static const struct irq_domain_ops gic_irq_domain_ops = {
75294957 886 .map = gic_irq_domain_map,
006e983b 887 .unmap = gic_irq_domain_unmap,
7bb69bad 888 .xlate = gic_irq_domain_xlate,
4294f8ba
RH
889};
890
006e983b
S
891/* Default functions for routable irq domain */
892static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq,
893 irq_hw_number_t hw)
894{
895 return 0;
896}
897
898static void gic_routable_irq_domain_unmap(struct irq_domain *d,
899 unsigned int irq)
900{
901}
902
903static int gic_routable_irq_domain_xlate(struct irq_domain *d,
904 struct device_node *controller,
905 const u32 *intspec, unsigned int intsize,
906 unsigned long *out_hwirq,
907 unsigned int *out_type)
908{
909 *out_hwirq += 16;
910 return 0;
911}
912
f3d147b8 913static const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
006e983b
S
914 .map = gic_routable_irq_domain_map,
915 .unmap = gic_routable_irq_domain_unmap,
916 .xlate = gic_routable_irq_domain_xlate,
917};
918
919const struct irq_domain_ops *gic_routable_irq_domain_ops =
920 &gic_default_routable_irq_domain_ops;
921
db0d4db2
MZ
922void __init gic_init_bases(unsigned int gic_nr, int irq_start,
923 void __iomem *dist_base, void __iomem *cpu_base,
75294957 924 u32 percpu_offset, struct device_node *node)
b580b899 925{
75294957 926 irq_hw_number_t hwirq_base;
bef8f9ee 927 struct gic_chip_data *gic;
384a2902 928 int gic_irqs, irq_base, i;
006e983b 929 int nr_routable_irqs;
bef8f9ee
RK
930
931 BUG_ON(gic_nr >= MAX_GIC_NR);
932
933 gic = &gic_data[gic_nr];
db0d4db2
MZ
934#ifdef CONFIG_GIC_NON_BANKED
935 if (percpu_offset) { /* Frankein-GIC without banked registers... */
936 unsigned int cpu;
937
938 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
939 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
940 if (WARN_ON(!gic->dist_base.percpu_base ||
941 !gic->cpu_base.percpu_base)) {
942 free_percpu(gic->dist_base.percpu_base);
943 free_percpu(gic->cpu_base.percpu_base);
944 return;
945 }
946
947 for_each_possible_cpu(cpu) {
29e697b1
TF
948 u32 mpidr = cpu_logical_map(cpu);
949 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
950 unsigned long offset = percpu_offset * core_id;
db0d4db2
MZ
951 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
952 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
953 }
954
955 gic_set_base_accessor(gic, gic_get_percpu_base);
956 } else
957#endif
958 { /* Normal, sane GIC... */
959 WARN(percpu_offset,
960 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
961 percpu_offset);
962 gic->dist_base.common_base = dist_base;
963 gic->cpu_base.common_base = cpu_base;
964 gic_set_base_accessor(gic, gic_get_common_base);
965 }
bef8f9ee 966
384a2902
NP
967 /*
968 * Initialize the CPU interface map to all CPUs.
969 * It will be refined as each CPU probes its ID.
970 */
971 for (i = 0; i < NR_GIC_CPU_IF; i++)
972 gic_cpu_map[i] = 0xff;
973
4294f8ba
RH
974 /*
975 * Find out how many interrupts are supported.
976 * The GIC only supports up to 1020 interrupt sources.
977 */
db0d4db2 978 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
4294f8ba
RH
979 gic_irqs = (gic_irqs + 1) * 32;
980 if (gic_irqs > 1020)
981 gic_irqs = 1020;
982 gic->gic_irqs = gic_irqs;
983
9a1091ef
YC
984 if (node) { /* DT case */
985 const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops;
986
987 if (!of_property_read_u32(node, "arm,routable-irqs",
988 &nr_routable_irqs)) {
989 ops = &gic_irq_domain_ops;
990 gic_irqs = nr_routable_irqs;
991 }
992
993 gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic);
994 } else { /* Non-DT case */
995 /*
996 * For primary GICs, skip over SGIs.
997 * For secondary GICs, skip over PPIs, too.
998 */
999 if (gic_nr == 0 && (irq_start & 31) > 0) {
1000 hwirq_base = 16;
1001 if (irq_start != -1)
1002 irq_start = (irq_start & ~31) + 16;
1003 } else {
1004 hwirq_base = 32;
1005 }
1006
1007 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
006e983b 1008
006e983b
S
1009 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
1010 numa_node_id());
1011 if (IS_ERR_VALUE(irq_base)) {
1012 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
1013 irq_start);
1014 irq_base = irq_start;
1015 }
1016
1017 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
1018 hwirq_base, &gic_irq_domain_ops, gic);
f37a53cc 1019 }
006e983b 1020
75294957
GL
1021 if (WARN_ON(!gic->domain))
1022 return;
bef8f9ee 1023
08332dff 1024 if (gic_nr == 0) {
b1cffebf 1025#ifdef CONFIG_SMP
08332dff
MR
1026 set_smp_cross_call(gic_raise_softirq);
1027 register_cpu_notifier(&gic_cpu_notifier);
b1cffebf 1028#endif
08332dff
MR
1029 set_handle_irq(gic_handle_irq);
1030 }
cfed7d60 1031
9c12845e 1032 gic_chip.flags |= gic_arch_extn.flags;
4294f8ba 1033 gic_dist_init(gic);
bef8f9ee 1034 gic_cpu_init(gic);
254056f3 1035 gic_pm_init(gic);
b580b899
RK
1036}
1037
b3f7ed03 1038#ifdef CONFIG_OF
46f101df 1039static int gic_cnt __initdata;
b3f7ed03 1040
6859358e
SB
1041static int __init
1042gic_of_init(struct device_node *node, struct device_node *parent)
b3f7ed03
RH
1043{
1044 void __iomem *cpu_base;
1045 void __iomem *dist_base;
db0d4db2 1046 u32 percpu_offset;
b3f7ed03 1047 int irq;
b3f7ed03
RH
1048
1049 if (WARN_ON(!node))
1050 return -ENODEV;
1051
1052 dist_base = of_iomap(node, 0);
1053 WARN(!dist_base, "unable to map gic dist registers\n");
1054
1055 cpu_base = of_iomap(node, 1);
1056 WARN(!cpu_base, "unable to map gic cpu registers\n");
1057
db0d4db2
MZ
1058 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
1059 percpu_offset = 0;
1060
75294957 1061 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
eeb44658
NP
1062 if (!gic_cnt)
1063 gic_init_physaddr(node);
b3f7ed03
RH
1064
1065 if (parent) {
1066 irq = irq_of_parse_and_map(node, 0);
1067 gic_cascade_irq(gic_cnt, irq);
1068 }
853a33ce
SS
1069
1070 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1071 gicv2m_of_init(node, gic_data[gic_cnt].domain);
1072
b3f7ed03
RH
1073 gic_cnt++;
1074 return 0;
1075}
144cb088 1076IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
fa6e2eec
LW
1077IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
1078IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
81243e44
RH
1079IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1080IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
a97e8027 1081IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
81243e44
RH
1082IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1083IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1084
b3f7ed03 1085#endif
This page took 0.80171 seconds and 5 git commands to generate.