Commit | Line | Data |
---|---|---|
f27ecacc RK |
1 | /* |
2 | * linux/arch/arm/common/gic.c | |
3 | * | |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Interrupt architecture for the GIC: | |
11 | * | |
12 | * o There is one Interrupt Distributor, which receives interrupts | |
13 | * from system devices and sends them to the Interrupt Controllers. | |
14 | * | |
15 | * o There is one CPU Interface per CPU, which sends interrupts sent | |
16 | * by the Distributor, and interrupts generated locally, to the | |
b3a1bde4 CM |
17 | * associated CPU. The base address of the CPU interface is usually |
18 | * aliased so that the same address points to different chips depending | |
19 | * on the CPU it is accessed from. | |
f27ecacc RK |
20 | * |
21 | * Note that IRQs 0-31 are special - they are local to each CPU. | |
22 | * As such, the enable set/clear, pending set/clear and active bit | |
23 | * registers are banked per-cpu for these sources. | |
24 | */ | |
25 | #include <linux/init.h> | |
26 | #include <linux/kernel.h> | |
f37a53cc | 27 | #include <linux/err.h> |
7e1efcf5 | 28 | #include <linux/module.h> |
f27ecacc RK |
29 | #include <linux/list.h> |
30 | #include <linux/smp.h> | |
254056f3 | 31 | #include <linux/cpu_pm.h> |
dcb86e8c | 32 | #include <linux/cpumask.h> |
fced80c7 | 33 | #include <linux/io.h> |
b3f7ed03 RH |
34 | #include <linux/of.h> |
35 | #include <linux/of_address.h> | |
36 | #include <linux/of_irq.h> | |
4294f8ba | 37 | #include <linux/irqdomain.h> |
292b293c MZ |
38 | #include <linux/interrupt.h> |
39 | #include <linux/percpu.h> | |
40 | #include <linux/slab.h> | |
f27ecacc RK |
41 | |
42 | #include <asm/irq.h> | |
f27ecacc RK |
43 | #include <asm/mach/irq.h> |
44 | #include <asm/hardware/gic.h> | |
45 | ||
bd31b859 | 46 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
f27ecacc | 47 | |
ff2e27ae | 48 | /* Address of GIC 0 CPU interface */ |
bef8f9ee | 49 | void __iomem *gic_cpu_base_addr __read_mostly; |
ff2e27ae | 50 | |
d7ed36a4 SS |
51 | /* |
52 | * Supported arch specific GIC irq extension. | |
53 | * Default make them NULL. | |
54 | */ | |
55 | struct irq_chip gic_arch_extn = { | |
1a01753e | 56 | .irq_eoi = NULL, |
d7ed36a4 SS |
57 | .irq_mask = NULL, |
58 | .irq_unmask = NULL, | |
59 | .irq_retrigger = NULL, | |
60 | .irq_set_type = NULL, | |
61 | .irq_set_wake = NULL, | |
62 | }; | |
63 | ||
b3a1bde4 CM |
64 | #ifndef MAX_GIC_NR |
65 | #define MAX_GIC_NR 1 | |
66 | #endif | |
67 | ||
bef8f9ee | 68 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; |
b3a1bde4 | 69 | |
7d1f4288 | 70 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
b3a1bde4 | 71 | { |
7d1f4288 | 72 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
b3a1bde4 CM |
73 | return gic_data->dist_base; |
74 | } | |
75 | ||
7d1f4288 | 76 | static inline void __iomem *gic_cpu_base(struct irq_data *d) |
b3a1bde4 | 77 | { |
7d1f4288 | 78 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
b3a1bde4 CM |
79 | return gic_data->cpu_base; |
80 | } | |
81 | ||
7d1f4288 | 82 | static inline unsigned int gic_irq(struct irq_data *d) |
b3a1bde4 | 83 | { |
4294f8ba | 84 | return d->hwirq; |
b3a1bde4 CM |
85 | } |
86 | ||
f27ecacc RK |
87 | /* |
88 | * Routines to acknowledge, disable and enable interrupts | |
f27ecacc | 89 | */ |
7d1f4288 | 90 | static void gic_mask_irq(struct irq_data *d) |
f27ecacc | 91 | { |
4294f8ba | 92 | u32 mask = 1 << (gic_irq(d) % 32); |
c4bfa28a | 93 | |
bd31b859 | 94 | raw_spin_lock(&irq_controller_lock); |
6ac77e46 | 95 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
d7ed36a4 SS |
96 | if (gic_arch_extn.irq_mask) |
97 | gic_arch_extn.irq_mask(d); | |
bd31b859 | 98 | raw_spin_unlock(&irq_controller_lock); |
f27ecacc RK |
99 | } |
100 | ||
7d1f4288 | 101 | static void gic_unmask_irq(struct irq_data *d) |
f27ecacc | 102 | { |
4294f8ba | 103 | u32 mask = 1 << (gic_irq(d) % 32); |
c4bfa28a | 104 | |
bd31b859 | 105 | raw_spin_lock(&irq_controller_lock); |
d7ed36a4 SS |
106 | if (gic_arch_extn.irq_unmask) |
107 | gic_arch_extn.irq_unmask(d); | |
6ac77e46 | 108 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
bd31b859 | 109 | raw_spin_unlock(&irq_controller_lock); |
f27ecacc RK |
110 | } |
111 | ||
1a01753e WD |
112 | static void gic_eoi_irq(struct irq_data *d) |
113 | { | |
114 | if (gic_arch_extn.irq_eoi) { | |
bd31b859 | 115 | raw_spin_lock(&irq_controller_lock); |
1a01753e | 116 | gic_arch_extn.irq_eoi(d); |
bd31b859 | 117 | raw_spin_unlock(&irq_controller_lock); |
1a01753e WD |
118 | } |
119 | ||
6ac77e46 | 120 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
1a01753e WD |
121 | } |
122 | ||
7d1f4288 | 123 | static int gic_set_type(struct irq_data *d, unsigned int type) |
5c0c1f08 | 124 | { |
7d1f4288 LB |
125 | void __iomem *base = gic_dist_base(d); |
126 | unsigned int gicirq = gic_irq(d); | |
5c0c1f08 RV |
127 | u32 enablemask = 1 << (gicirq % 32); |
128 | u32 enableoff = (gicirq / 32) * 4; | |
129 | u32 confmask = 0x2 << ((gicirq % 16) * 2); | |
130 | u32 confoff = (gicirq / 16) * 4; | |
131 | bool enabled = false; | |
132 | u32 val; | |
133 | ||
134 | /* Interrupt configuration for SGIs can't be changed */ | |
135 | if (gicirq < 16) | |
136 | return -EINVAL; | |
137 | ||
138 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | |
139 | return -EINVAL; | |
140 | ||
bd31b859 | 141 | raw_spin_lock(&irq_controller_lock); |
5c0c1f08 | 142 | |
d7ed36a4 SS |
143 | if (gic_arch_extn.irq_set_type) |
144 | gic_arch_extn.irq_set_type(d, type); | |
145 | ||
6ac77e46 | 146 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); |
5c0c1f08 RV |
147 | if (type == IRQ_TYPE_LEVEL_HIGH) |
148 | val &= ~confmask; | |
149 | else if (type == IRQ_TYPE_EDGE_RISING) | |
150 | val |= confmask; | |
151 | ||
152 | /* | |
153 | * As recommended by the spec, disable the interrupt before changing | |
154 | * the configuration | |
155 | */ | |
6ac77e46 SS |
156 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { |
157 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); | |
5c0c1f08 RV |
158 | enabled = true; |
159 | } | |
160 | ||
6ac77e46 | 161 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); |
5c0c1f08 RV |
162 | |
163 | if (enabled) | |
6ac77e46 | 164 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
5c0c1f08 | 165 | |
bd31b859 | 166 | raw_spin_unlock(&irq_controller_lock); |
5c0c1f08 RV |
167 | |
168 | return 0; | |
169 | } | |
170 | ||
d7ed36a4 SS |
171 | static int gic_retrigger(struct irq_data *d) |
172 | { | |
173 | if (gic_arch_extn.irq_retrigger) | |
174 | return gic_arch_extn.irq_retrigger(d); | |
175 | ||
176 | return -ENXIO; | |
177 | } | |
178 | ||
a06f5466 | 179 | #ifdef CONFIG_SMP |
c191789c RK |
180 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
181 | bool force) | |
f27ecacc | 182 | { |
7d1f4288 | 183 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
4294f8ba | 184 | unsigned int shift = (gic_irq(d) % 4) * 8; |
5dfc54e0 | 185 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
c191789c | 186 | u32 val, mask, bit; |
f27ecacc | 187 | |
5dfc54e0 | 188 | if (cpu >= 8 || cpu >= nr_cpu_ids) |
87507500 | 189 | return -EINVAL; |
c191789c RK |
190 | |
191 | mask = 0xff << shift; | |
267840f3 | 192 | bit = 1 << (cpu_logical_map(cpu) + shift); |
c191789c | 193 | |
bd31b859 | 194 | raw_spin_lock(&irq_controller_lock); |
6ac77e46 SS |
195 | val = readl_relaxed(reg) & ~mask; |
196 | writel_relaxed(val | bit, reg); | |
bd31b859 | 197 | raw_spin_unlock(&irq_controller_lock); |
d5dedd45 | 198 | |
5dfc54e0 | 199 | return IRQ_SET_MASK_OK; |
f27ecacc | 200 | } |
a06f5466 | 201 | #endif |
f27ecacc | 202 | |
d7ed36a4 SS |
203 | #ifdef CONFIG_PM |
204 | static int gic_set_wake(struct irq_data *d, unsigned int on) | |
205 | { | |
206 | int ret = -ENXIO; | |
207 | ||
208 | if (gic_arch_extn.irq_set_wake) | |
209 | ret = gic_arch_extn.irq_set_wake(d, on); | |
210 | ||
211 | return ret; | |
212 | } | |
213 | ||
214 | #else | |
215 | #define gic_set_wake NULL | |
216 | #endif | |
217 | ||
0f347bb9 | 218 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
b3a1bde4 | 219 | { |
6845664a TG |
220 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); |
221 | struct irq_chip *chip = irq_get_chip(irq); | |
0f347bb9 | 222 | unsigned int cascade_irq, gic_irq; |
b3a1bde4 CM |
223 | unsigned long status; |
224 | ||
1a01753e | 225 | chained_irq_enter(chip, desc); |
b3a1bde4 | 226 | |
bd31b859 | 227 | raw_spin_lock(&irq_controller_lock); |
6ac77e46 | 228 | status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); |
bd31b859 | 229 | raw_spin_unlock(&irq_controller_lock); |
b3a1bde4 | 230 | |
0f347bb9 RK |
231 | gic_irq = (status & 0x3ff); |
232 | if (gic_irq == 1023) | |
b3a1bde4 | 233 | goto out; |
b3a1bde4 | 234 | |
4294f8ba | 235 | cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq); |
0f347bb9 RK |
236 | if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) |
237 | do_bad_IRQ(cascade_irq, desc); | |
238 | else | |
239 | generic_handle_irq(cascade_irq); | |
b3a1bde4 CM |
240 | |
241 | out: | |
1a01753e | 242 | chained_irq_exit(chip, desc); |
b3a1bde4 CM |
243 | } |
244 | ||
38c677cb | 245 | static struct irq_chip gic_chip = { |
7d1f4288 | 246 | .name = "GIC", |
7d1f4288 LB |
247 | .irq_mask = gic_mask_irq, |
248 | .irq_unmask = gic_unmask_irq, | |
1a01753e | 249 | .irq_eoi = gic_eoi_irq, |
7d1f4288 | 250 | .irq_set_type = gic_set_type, |
d7ed36a4 | 251 | .irq_retrigger = gic_retrigger, |
f27ecacc | 252 | #ifdef CONFIG_SMP |
c191789c | 253 | .irq_set_affinity = gic_set_affinity, |
f27ecacc | 254 | #endif |
d7ed36a4 | 255 | .irq_set_wake = gic_set_wake, |
f27ecacc RK |
256 | }; |
257 | ||
b3a1bde4 CM |
258 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) |
259 | { | |
260 | if (gic_nr >= MAX_GIC_NR) | |
261 | BUG(); | |
6845664a | 262 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) |
b3a1bde4 | 263 | BUG(); |
6845664a | 264 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
b3a1bde4 CM |
265 | } |
266 | ||
4294f8ba | 267 | static void __init gic_dist_init(struct gic_chip_data *gic) |
f27ecacc | 268 | { |
4294f8ba | 269 | unsigned int i, irq; |
267840f3 | 270 | u32 cpumask; |
4294f8ba RH |
271 | unsigned int gic_irqs = gic->gic_irqs; |
272 | struct irq_domain *domain = &gic->domain; | |
bef8f9ee | 273 | void __iomem *base = gic->dist_base; |
267840f3 | 274 | u32 cpu = 0; |
f27ecacc | 275 | |
267840f3 WD |
276 | #ifdef CONFIG_SMP |
277 | cpu = cpu_logical_map(smp_processor_id()); | |
278 | #endif | |
279 | ||
280 | cpumask = 1 << cpu; | |
f27ecacc RK |
281 | cpumask |= cpumask << 8; |
282 | cpumask |= cpumask << 16; | |
283 | ||
6ac77e46 | 284 | writel_relaxed(0, base + GIC_DIST_CTRL); |
f27ecacc | 285 | |
f27ecacc RK |
286 | /* |
287 | * Set all global interrupts to be level triggered, active low. | |
288 | */ | |
e6afec9b | 289 | for (i = 32; i < gic_irqs; i += 16) |
6ac77e46 | 290 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); |
f27ecacc RK |
291 | |
292 | /* | |
293 | * Set all global interrupts to this CPU only. | |
294 | */ | |
e6afec9b | 295 | for (i = 32; i < gic_irqs; i += 4) |
6ac77e46 | 296 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
f27ecacc RK |
297 | |
298 | /* | |
9395f6ea | 299 | * Set priority on all global interrupts. |
f27ecacc | 300 | */ |
e6afec9b | 301 | for (i = 32; i < gic_irqs; i += 4) |
6ac77e46 | 302 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); |
f27ecacc RK |
303 | |
304 | /* | |
9395f6ea RK |
305 | * Disable all interrupts. Leave the PPI and SGIs alone |
306 | * as these enables are banked registers. | |
f27ecacc | 307 | */ |
e6afec9b | 308 | for (i = 32; i < gic_irqs; i += 32) |
6ac77e46 | 309 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
f27ecacc RK |
310 | |
311 | /* | |
312 | * Setup the Linux IRQ subsystem. | |
313 | */ | |
4294f8ba RH |
314 | irq_domain_for_each_irq(domain, i, irq) { |
315 | if (i < 32) { | |
316 | irq_set_percpu_devid(irq); | |
317 | irq_set_chip_and_handler(irq, &gic_chip, | |
318 | handle_percpu_devid_irq); | |
319 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | |
320 | } else { | |
321 | irq_set_chip_and_handler(irq, &gic_chip, | |
322 | handle_fasteoi_irq); | |
323 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | |
324 | } | |
325 | irq_set_chip_data(irq, gic); | |
f27ecacc RK |
326 | } |
327 | ||
6ac77e46 | 328 | writel_relaxed(1, base + GIC_DIST_CTRL); |
f27ecacc RK |
329 | } |
330 | ||
bef8f9ee | 331 | static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) |
f27ecacc | 332 | { |
bef8f9ee RK |
333 | void __iomem *dist_base = gic->dist_base; |
334 | void __iomem *base = gic->cpu_base; | |
9395f6ea RK |
335 | int i; |
336 | ||
9395f6ea RK |
337 | /* |
338 | * Deal with the banked PPI and SGI interrupts - disable all | |
339 | * PPI interrupts, ensure all SGI interrupts are enabled. | |
340 | */ | |
6ac77e46 SS |
341 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); |
342 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); | |
9395f6ea RK |
343 | |
344 | /* | |
345 | * Set priority on PPI and SGI interrupts | |
346 | */ | |
347 | for (i = 0; i < 32; i += 4) | |
6ac77e46 | 348 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); |
9395f6ea | 349 | |
6ac77e46 SS |
350 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); |
351 | writel_relaxed(1, base + GIC_CPU_CTRL); | |
f27ecacc RK |
352 | } |
353 | ||
254056f3 CC |
354 | #ifdef CONFIG_CPU_PM |
355 | /* | |
356 | * Saves the GIC distributor registers during suspend or idle. Must be called | |
357 | * with interrupts disabled but before powering down the GIC. After calling | |
358 | * this function, no interrupts will be delivered by the GIC, and another | |
359 | * platform-specific wakeup source must be enabled. | |
360 | */ | |
361 | static void gic_dist_save(unsigned int gic_nr) | |
362 | { | |
363 | unsigned int gic_irqs; | |
364 | void __iomem *dist_base; | |
365 | int i; | |
366 | ||
367 | if (gic_nr >= MAX_GIC_NR) | |
368 | BUG(); | |
369 | ||
370 | gic_irqs = gic_data[gic_nr].gic_irqs; | |
371 | dist_base = gic_data[gic_nr].dist_base; | |
372 | ||
373 | if (!dist_base) | |
374 | return; | |
375 | ||
376 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | |
377 | gic_data[gic_nr].saved_spi_conf[i] = | |
378 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | |
379 | ||
380 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | |
381 | gic_data[gic_nr].saved_spi_target[i] = | |
382 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | |
383 | ||
384 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | |
385 | gic_data[gic_nr].saved_spi_enable[i] = | |
386 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
387 | } | |
388 | ||
389 | /* | |
390 | * Restores the GIC distributor registers during resume or when coming out of | |
391 | * idle. Must be called before enabling interrupts. If a level interrupt | |
392 | * that occured while the GIC was suspended is still present, it will be | |
393 | * handled normally, but any edge interrupts that occured will not be seen by | |
394 | * the GIC and need to be handled by the platform-specific wakeup source. | |
395 | */ | |
396 | static void gic_dist_restore(unsigned int gic_nr) | |
397 | { | |
398 | unsigned int gic_irqs; | |
399 | unsigned int i; | |
400 | void __iomem *dist_base; | |
401 | ||
402 | if (gic_nr >= MAX_GIC_NR) | |
403 | BUG(); | |
404 | ||
405 | gic_irqs = gic_data[gic_nr].gic_irqs; | |
406 | dist_base = gic_data[gic_nr].dist_base; | |
407 | ||
408 | if (!dist_base) | |
409 | return; | |
410 | ||
411 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); | |
412 | ||
413 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | |
414 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], | |
415 | dist_base + GIC_DIST_CONFIG + i * 4); | |
416 | ||
417 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | |
418 | writel_relaxed(0xa0a0a0a0, | |
419 | dist_base + GIC_DIST_PRI + i * 4); | |
420 | ||
421 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | |
422 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | |
423 | dist_base + GIC_DIST_TARGET + i * 4); | |
424 | ||
425 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | |
426 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | |
427 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
428 | ||
429 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); | |
430 | } | |
431 | ||
432 | static void gic_cpu_save(unsigned int gic_nr) | |
433 | { | |
434 | int i; | |
435 | u32 *ptr; | |
436 | void __iomem *dist_base; | |
437 | void __iomem *cpu_base; | |
438 | ||
439 | if (gic_nr >= MAX_GIC_NR) | |
440 | BUG(); | |
441 | ||
442 | dist_base = gic_data[gic_nr].dist_base; | |
443 | cpu_base = gic_data[gic_nr].cpu_base; | |
444 | ||
445 | if (!dist_base || !cpu_base) | |
446 | return; | |
447 | ||
448 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | |
449 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | |
450 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
451 | ||
452 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | |
453 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | |
454 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | |
455 | ||
456 | } | |
457 | ||
458 | static void gic_cpu_restore(unsigned int gic_nr) | |
459 | { | |
460 | int i; | |
461 | u32 *ptr; | |
462 | void __iomem *dist_base; | |
463 | void __iomem *cpu_base; | |
464 | ||
465 | if (gic_nr >= MAX_GIC_NR) | |
466 | BUG(); | |
467 | ||
468 | dist_base = gic_data[gic_nr].dist_base; | |
469 | cpu_base = gic_data[gic_nr].cpu_base; | |
470 | ||
471 | if (!dist_base || !cpu_base) | |
472 | return; | |
473 | ||
474 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | |
475 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | |
476 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
477 | ||
478 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | |
479 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | |
480 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); | |
481 | ||
482 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) | |
483 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); | |
484 | ||
485 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); | |
486 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); | |
487 | } | |
488 | ||
489 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |
490 | { | |
491 | int i; | |
492 | ||
493 | for (i = 0; i < MAX_GIC_NR; i++) { | |
494 | switch (cmd) { | |
495 | case CPU_PM_ENTER: | |
496 | gic_cpu_save(i); | |
497 | break; | |
498 | case CPU_PM_ENTER_FAILED: | |
499 | case CPU_PM_EXIT: | |
500 | gic_cpu_restore(i); | |
501 | break; | |
502 | case CPU_CLUSTER_PM_ENTER: | |
503 | gic_dist_save(i); | |
504 | break; | |
505 | case CPU_CLUSTER_PM_ENTER_FAILED: | |
506 | case CPU_CLUSTER_PM_EXIT: | |
507 | gic_dist_restore(i); | |
508 | break; | |
509 | } | |
510 | } | |
511 | ||
512 | return NOTIFY_OK; | |
513 | } | |
514 | ||
515 | static struct notifier_block gic_notifier_block = { | |
516 | .notifier_call = gic_notifier, | |
517 | }; | |
518 | ||
519 | static void __init gic_pm_init(struct gic_chip_data *gic) | |
520 | { | |
521 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | |
522 | sizeof(u32)); | |
523 | BUG_ON(!gic->saved_ppi_enable); | |
524 | ||
525 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | |
526 | sizeof(u32)); | |
527 | BUG_ON(!gic->saved_ppi_conf); | |
528 | ||
529 | cpu_pm_register_notifier(&gic_notifier_block); | |
530 | } | |
531 | #else | |
532 | static void __init gic_pm_init(struct gic_chip_data *gic) | |
533 | { | |
534 | } | |
535 | #endif | |
536 | ||
b3f7ed03 RH |
537 | #ifdef CONFIG_OF |
538 | static int gic_irq_domain_dt_translate(struct irq_domain *d, | |
539 | struct device_node *controller, | |
540 | const u32 *intspec, unsigned int intsize, | |
541 | unsigned long *out_hwirq, unsigned int *out_type) | |
542 | { | |
543 | if (d->of_node != controller) | |
544 | return -EINVAL; | |
545 | if (intsize < 3) | |
546 | return -EINVAL; | |
547 | ||
548 | /* Get the interrupt number and add 16 to skip over SGIs */ | |
549 | *out_hwirq = intspec[1] + 16; | |
550 | ||
551 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ | |
552 | if (!intspec[0]) | |
553 | *out_hwirq += 16; | |
554 | ||
555 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | |
556 | return 0; | |
557 | } | |
558 | #endif | |
559 | ||
4294f8ba | 560 | const struct irq_domain_ops gic_irq_domain_ops = { |
b3f7ed03 RH |
561 | #ifdef CONFIG_OF |
562 | .dt_translate = gic_irq_domain_dt_translate, | |
563 | #endif | |
4294f8ba RH |
564 | }; |
565 | ||
f37a53cc | 566 | void __init gic_init(unsigned int gic_nr, int irq_start, |
b580b899 RK |
567 | void __iomem *dist_base, void __iomem *cpu_base) |
568 | { | |
bef8f9ee | 569 | struct gic_chip_data *gic; |
4294f8ba RH |
570 | struct irq_domain *domain; |
571 | int gic_irqs; | |
bef8f9ee RK |
572 | |
573 | BUG_ON(gic_nr >= MAX_GIC_NR); | |
574 | ||
575 | gic = &gic_data[gic_nr]; | |
4294f8ba | 576 | domain = &gic->domain; |
bef8f9ee RK |
577 | gic->dist_base = dist_base; |
578 | gic->cpu_base = cpu_base; | |
bef8f9ee | 579 | |
4294f8ba RH |
580 | /* |
581 | * For primary GICs, skip over SGIs. | |
582 | * For secondary GICs, skip over PPIs, too. | |
583 | */ | |
584 | if (gic_nr == 0) { | |
ff2e27ae | 585 | gic_cpu_base_addr = cpu_base; |
4294f8ba | 586 | domain->hwirq_base = 16; |
f37a53cc RH |
587 | if (irq_start > 0) |
588 | irq_start = (irq_start & ~31) + 16; | |
4294f8ba RH |
589 | } else |
590 | domain->hwirq_base = 32; | |
591 | ||
592 | /* | |
593 | * Find out how many interrupts are supported. | |
594 | * The GIC only supports up to 1020 interrupt sources. | |
595 | */ | |
596 | gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f; | |
597 | gic_irqs = (gic_irqs + 1) * 32; | |
598 | if (gic_irqs > 1020) | |
599 | gic_irqs = 1020; | |
600 | gic->gic_irqs = gic_irqs; | |
601 | ||
602 | domain->nr_irq = gic_irqs - domain->hwirq_base; | |
f37a53cc | 603 | domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq, |
4294f8ba | 604 | numa_node_id()); |
f37a53cc RH |
605 | if (IS_ERR_VALUE(domain->irq_base)) { |
606 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | |
607 | irq_start); | |
608 | domain->irq_base = irq_start; | |
609 | } | |
4294f8ba RH |
610 | domain->priv = gic; |
611 | domain->ops = &gic_irq_domain_ops; | |
612 | irq_domain_add(domain); | |
bef8f9ee | 613 | |
9c12845e | 614 | gic_chip.flags |= gic_arch_extn.flags; |
4294f8ba | 615 | gic_dist_init(gic); |
bef8f9ee | 616 | gic_cpu_init(gic); |
254056f3 | 617 | gic_pm_init(gic); |
b580b899 RK |
618 | } |
619 | ||
38489533 RK |
620 | void __cpuinit gic_secondary_init(unsigned int gic_nr) |
621 | { | |
bef8f9ee RK |
622 | BUG_ON(gic_nr >= MAX_GIC_NR); |
623 | ||
624 | gic_cpu_init(&gic_data[gic_nr]); | |
38489533 RK |
625 | } |
626 | ||
f27ecacc | 627 | #ifdef CONFIG_SMP |
82668104 | 628 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) |
f27ecacc | 629 | { |
267840f3 WD |
630 | int cpu; |
631 | unsigned long map = 0; | |
632 | ||
633 | /* Convert our logical CPU mask into a physical one. */ | |
634 | for_each_cpu(cpu, mask) | |
635 | map |= 1 << cpu_logical_map(cpu); | |
f27ecacc | 636 | |
6ac77e46 SS |
637 | /* |
638 | * Ensure that stores to Normal memory are visible to the | |
639 | * other CPUs before issuing the IPI. | |
640 | */ | |
641 | dsb(); | |
642 | ||
b3a1bde4 | 643 | /* this always happens on GIC0 */ |
6ac77e46 | 644 | writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); |
f27ecacc RK |
645 | } |
646 | #endif | |
b3f7ed03 RH |
647 | |
648 | #ifdef CONFIG_OF | |
649 | static int gic_cnt __initdata = 0; | |
650 | ||
651 | int __init gic_of_init(struct device_node *node, struct device_node *parent) | |
652 | { | |
653 | void __iomem *cpu_base; | |
654 | void __iomem *dist_base; | |
655 | int irq; | |
656 | struct irq_domain *domain = &gic_data[gic_cnt].domain; | |
657 | ||
658 | if (WARN_ON(!node)) | |
659 | return -ENODEV; | |
660 | ||
661 | dist_base = of_iomap(node, 0); | |
662 | WARN(!dist_base, "unable to map gic dist registers\n"); | |
663 | ||
664 | cpu_base = of_iomap(node, 1); | |
665 | WARN(!cpu_base, "unable to map gic cpu registers\n"); | |
666 | ||
667 | domain->of_node = of_node_get(node); | |
668 | ||
f37a53cc | 669 | gic_init(gic_cnt, -1, dist_base, cpu_base); |
b3f7ed03 RH |
670 | |
671 | if (parent) { | |
672 | irq = irq_of_parse_and_map(node, 0); | |
673 | gic_cascade_irq(gic_cnt, irq); | |
674 | } | |
675 | gic_cnt++; | |
676 | return 0; | |
677 | } | |
678 | #endif |