ARM: gic: add irq_domain support
[deliverable/linux.git] / arch / arm / common / gic.c
CommitLineData
f27ecacc
RK
1/*
2 * linux/arch/arm/common/gic.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Interrupt architecture for the GIC:
11 *
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
14 *
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
b3a1bde4
CM
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
f27ecacc
RK
20 *
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
24 */
25#include <linux/init.h>
26#include <linux/kernel.h>
4294f8ba 27#include <linux/export.h>
f27ecacc
RK
28#include <linux/list.h>
29#include <linux/smp.h>
254056f3 30#include <linux/cpu_pm.h>
dcb86e8c 31#include <linux/cpumask.h>
fced80c7 32#include <linux/io.h>
4294f8ba 33#include <linux/irqdomain.h>
292b293c
MZ
34#include <linux/interrupt.h>
35#include <linux/percpu.h>
36#include <linux/slab.h>
f27ecacc
RK
37
38#include <asm/irq.h>
f27ecacc
RK
39#include <asm/mach/irq.h>
40#include <asm/hardware/gic.h>
41
c4bfa28a 42static DEFINE_SPINLOCK(irq_controller_lock);
f27ecacc 43
ff2e27ae 44/* Address of GIC 0 CPU interface */
bef8f9ee 45void __iomem *gic_cpu_base_addr __read_mostly;
ff2e27ae 46
d7ed36a4
SS
47/*
48 * Supported arch specific GIC irq extension.
49 * Default make them NULL.
50 */
51struct irq_chip gic_arch_extn = {
1a01753e 52 .irq_eoi = NULL,
d7ed36a4
SS
53 .irq_mask = NULL,
54 .irq_unmask = NULL,
55 .irq_retrigger = NULL,
56 .irq_set_type = NULL,
57 .irq_set_wake = NULL,
58};
59
b3a1bde4
CM
60#ifndef MAX_GIC_NR
61#define MAX_GIC_NR 1
62#endif
63
bef8f9ee 64static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
b3a1bde4 65
7d1f4288 66static inline void __iomem *gic_dist_base(struct irq_data *d)
b3a1bde4 67{
7d1f4288 68 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
b3a1bde4
CM
69 return gic_data->dist_base;
70}
71
7d1f4288 72static inline void __iomem *gic_cpu_base(struct irq_data *d)
b3a1bde4 73{
7d1f4288 74 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
b3a1bde4
CM
75 return gic_data->cpu_base;
76}
77
7d1f4288 78static inline unsigned int gic_irq(struct irq_data *d)
b3a1bde4 79{
4294f8ba 80 return d->hwirq;
b3a1bde4
CM
81}
82
f27ecacc
RK
83/*
84 * Routines to acknowledge, disable and enable interrupts
f27ecacc 85 */
7d1f4288 86static void gic_mask_irq(struct irq_data *d)
f27ecacc 87{
4294f8ba 88 u32 mask = 1 << (gic_irq(d) % 32);
c4bfa28a
TG
89
90 spin_lock(&irq_controller_lock);
6ac77e46 91 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
d7ed36a4
SS
92 if (gic_arch_extn.irq_mask)
93 gic_arch_extn.irq_mask(d);
c4bfa28a 94 spin_unlock(&irq_controller_lock);
f27ecacc
RK
95}
96
7d1f4288 97static void gic_unmask_irq(struct irq_data *d)
f27ecacc 98{
4294f8ba 99 u32 mask = 1 << (gic_irq(d) % 32);
c4bfa28a
TG
100
101 spin_lock(&irq_controller_lock);
d7ed36a4
SS
102 if (gic_arch_extn.irq_unmask)
103 gic_arch_extn.irq_unmask(d);
6ac77e46 104 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
c4bfa28a 105 spin_unlock(&irq_controller_lock);
f27ecacc
RK
106}
107
1a01753e
WD
108static void gic_eoi_irq(struct irq_data *d)
109{
110 if (gic_arch_extn.irq_eoi) {
111 spin_lock(&irq_controller_lock);
112 gic_arch_extn.irq_eoi(d);
113 spin_unlock(&irq_controller_lock);
114 }
115
6ac77e46 116 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
1a01753e
WD
117}
118
7d1f4288 119static int gic_set_type(struct irq_data *d, unsigned int type)
5c0c1f08 120{
7d1f4288
LB
121 void __iomem *base = gic_dist_base(d);
122 unsigned int gicirq = gic_irq(d);
5c0c1f08
RV
123 u32 enablemask = 1 << (gicirq % 32);
124 u32 enableoff = (gicirq / 32) * 4;
125 u32 confmask = 0x2 << ((gicirq % 16) * 2);
126 u32 confoff = (gicirq / 16) * 4;
127 bool enabled = false;
128 u32 val;
129
130 /* Interrupt configuration for SGIs can't be changed */
131 if (gicirq < 16)
132 return -EINVAL;
133
134 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
135 return -EINVAL;
136
137 spin_lock(&irq_controller_lock);
138
d7ed36a4
SS
139 if (gic_arch_extn.irq_set_type)
140 gic_arch_extn.irq_set_type(d, type);
141
6ac77e46 142 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
5c0c1f08
RV
143 if (type == IRQ_TYPE_LEVEL_HIGH)
144 val &= ~confmask;
145 else if (type == IRQ_TYPE_EDGE_RISING)
146 val |= confmask;
147
148 /*
149 * As recommended by the spec, disable the interrupt before changing
150 * the configuration
151 */
6ac77e46
SS
152 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
153 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
5c0c1f08
RV
154 enabled = true;
155 }
156
6ac77e46 157 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
5c0c1f08
RV
158
159 if (enabled)
6ac77e46 160 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
5c0c1f08
RV
161
162 spin_unlock(&irq_controller_lock);
163
164 return 0;
165}
166
d7ed36a4
SS
167static int gic_retrigger(struct irq_data *d)
168{
169 if (gic_arch_extn.irq_retrigger)
170 return gic_arch_extn.irq_retrigger(d);
171
172 return -ENXIO;
173}
174
a06f5466 175#ifdef CONFIG_SMP
c191789c
RK
176static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
177 bool force)
f27ecacc 178{
7d1f4288 179 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
4294f8ba 180 unsigned int shift = (gic_irq(d) % 4) * 8;
5dfc54e0 181 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
c191789c 182 u32 val, mask, bit;
f27ecacc 183
5dfc54e0 184 if (cpu >= 8 || cpu >= nr_cpu_ids)
87507500 185 return -EINVAL;
c191789c
RK
186
187 mask = 0xff << shift;
267840f3 188 bit = 1 << (cpu_logical_map(cpu) + shift);
c191789c
RK
189
190 spin_lock(&irq_controller_lock);
6ac77e46
SS
191 val = readl_relaxed(reg) & ~mask;
192 writel_relaxed(val | bit, reg);
c4bfa28a 193 spin_unlock(&irq_controller_lock);
d5dedd45 194
5dfc54e0 195 return IRQ_SET_MASK_OK;
f27ecacc 196}
a06f5466 197#endif
f27ecacc 198
d7ed36a4
SS
199#ifdef CONFIG_PM
200static int gic_set_wake(struct irq_data *d, unsigned int on)
201{
202 int ret = -ENXIO;
203
204 if (gic_arch_extn.irq_set_wake)
205 ret = gic_arch_extn.irq_set_wake(d, on);
206
207 return ret;
208}
209
210#else
211#define gic_set_wake NULL
212#endif
213
0f347bb9 214static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
b3a1bde4 215{
6845664a
TG
216 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
217 struct irq_chip *chip = irq_get_chip(irq);
0f347bb9 218 unsigned int cascade_irq, gic_irq;
b3a1bde4
CM
219 unsigned long status;
220
1a01753e 221 chained_irq_enter(chip, desc);
b3a1bde4
CM
222
223 spin_lock(&irq_controller_lock);
6ac77e46 224 status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
b3a1bde4
CM
225 spin_unlock(&irq_controller_lock);
226
0f347bb9
RK
227 gic_irq = (status & 0x3ff);
228 if (gic_irq == 1023)
b3a1bde4 229 goto out;
b3a1bde4 230
4294f8ba 231 cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq);
0f347bb9
RK
232 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
233 do_bad_IRQ(cascade_irq, desc);
234 else
235 generic_handle_irq(cascade_irq);
b3a1bde4
CM
236
237 out:
1a01753e 238 chained_irq_exit(chip, desc);
b3a1bde4
CM
239}
240
38c677cb 241static struct irq_chip gic_chip = {
7d1f4288 242 .name = "GIC",
7d1f4288
LB
243 .irq_mask = gic_mask_irq,
244 .irq_unmask = gic_unmask_irq,
1a01753e 245 .irq_eoi = gic_eoi_irq,
7d1f4288 246 .irq_set_type = gic_set_type,
d7ed36a4 247 .irq_retrigger = gic_retrigger,
f27ecacc 248#ifdef CONFIG_SMP
c191789c 249 .irq_set_affinity = gic_set_affinity,
f27ecacc 250#endif
d7ed36a4 251 .irq_set_wake = gic_set_wake,
f27ecacc
RK
252};
253
b3a1bde4
CM
254void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
255{
256 if (gic_nr >= MAX_GIC_NR)
257 BUG();
6845664a 258 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
b3a1bde4 259 BUG();
6845664a 260 irq_set_chained_handler(irq, gic_handle_cascade_irq);
b3a1bde4
CM
261}
262
4294f8ba 263static void __init gic_dist_init(struct gic_chip_data *gic)
f27ecacc 264{
4294f8ba 265 unsigned int i, irq;
267840f3 266 u32 cpumask;
4294f8ba
RH
267 unsigned int gic_irqs = gic->gic_irqs;
268 struct irq_domain *domain = &gic->domain;
bef8f9ee 269 void __iomem *base = gic->dist_base;
267840f3 270 u32 cpu = 0;
f27ecacc 271
267840f3
WD
272#ifdef CONFIG_SMP
273 cpu = cpu_logical_map(smp_processor_id());
274#endif
275
276 cpumask = 1 << cpu;
f27ecacc
RK
277 cpumask |= cpumask << 8;
278 cpumask |= cpumask << 16;
279
6ac77e46 280 writel_relaxed(0, base + GIC_DIST_CTRL);
f27ecacc 281
f27ecacc
RK
282 /*
283 * Set all global interrupts to be level triggered, active low.
284 */
e6afec9b 285 for (i = 32; i < gic_irqs; i += 16)
6ac77e46 286 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
f27ecacc
RK
287
288 /*
289 * Set all global interrupts to this CPU only.
290 */
e6afec9b 291 for (i = 32; i < gic_irqs; i += 4)
6ac77e46 292 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
f27ecacc
RK
293
294 /*
9395f6ea 295 * Set priority on all global interrupts.
f27ecacc 296 */
e6afec9b 297 for (i = 32; i < gic_irqs; i += 4)
6ac77e46 298 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
f27ecacc
RK
299
300 /*
9395f6ea
RK
301 * Disable all interrupts. Leave the PPI and SGIs alone
302 * as these enables are banked registers.
f27ecacc 303 */
e6afec9b 304 for (i = 32; i < gic_irqs; i += 32)
6ac77e46 305 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
f27ecacc
RK
306
307 /*
308 * Setup the Linux IRQ subsystem.
309 */
4294f8ba
RH
310 irq_domain_for_each_irq(domain, i, irq) {
311 if (i < 32) {
312 irq_set_percpu_devid(irq);
313 irq_set_chip_and_handler(irq, &gic_chip,
314 handle_percpu_devid_irq);
315 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
316 } else {
317 irq_set_chip_and_handler(irq, &gic_chip,
318 handle_fasteoi_irq);
319 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
320 }
321 irq_set_chip_data(irq, gic);
f27ecacc
RK
322 }
323
6ac77e46 324 writel_relaxed(1, base + GIC_DIST_CTRL);
f27ecacc
RK
325}
326
bef8f9ee 327static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
f27ecacc 328{
bef8f9ee
RK
329 void __iomem *dist_base = gic->dist_base;
330 void __iomem *base = gic->cpu_base;
9395f6ea
RK
331 int i;
332
9395f6ea
RK
333 /*
334 * Deal with the banked PPI and SGI interrupts - disable all
335 * PPI interrupts, ensure all SGI interrupts are enabled.
336 */
6ac77e46
SS
337 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
338 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
9395f6ea
RK
339
340 /*
341 * Set priority on PPI and SGI interrupts
342 */
343 for (i = 0; i < 32; i += 4)
6ac77e46 344 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
9395f6ea 345
6ac77e46
SS
346 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
347 writel_relaxed(1, base + GIC_CPU_CTRL);
f27ecacc
RK
348}
349
254056f3
CC
350#ifdef CONFIG_CPU_PM
351/*
352 * Saves the GIC distributor registers during suspend or idle. Must be called
353 * with interrupts disabled but before powering down the GIC. After calling
354 * this function, no interrupts will be delivered by the GIC, and another
355 * platform-specific wakeup source must be enabled.
356 */
357static void gic_dist_save(unsigned int gic_nr)
358{
359 unsigned int gic_irqs;
360 void __iomem *dist_base;
361 int i;
362
363 if (gic_nr >= MAX_GIC_NR)
364 BUG();
365
366 gic_irqs = gic_data[gic_nr].gic_irqs;
367 dist_base = gic_data[gic_nr].dist_base;
368
369 if (!dist_base)
370 return;
371
372 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
373 gic_data[gic_nr].saved_spi_conf[i] =
374 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
375
376 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
377 gic_data[gic_nr].saved_spi_target[i] =
378 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
379
380 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
381 gic_data[gic_nr].saved_spi_enable[i] =
382 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
383}
384
385/*
386 * Restores the GIC distributor registers during resume or when coming out of
387 * idle. Must be called before enabling interrupts. If a level interrupt
388 * that occured while the GIC was suspended is still present, it will be
389 * handled normally, but any edge interrupts that occured will not be seen by
390 * the GIC and need to be handled by the platform-specific wakeup source.
391 */
392static void gic_dist_restore(unsigned int gic_nr)
393{
394 unsigned int gic_irqs;
395 unsigned int i;
396 void __iomem *dist_base;
397
398 if (gic_nr >= MAX_GIC_NR)
399 BUG();
400
401 gic_irqs = gic_data[gic_nr].gic_irqs;
402 dist_base = gic_data[gic_nr].dist_base;
403
404 if (!dist_base)
405 return;
406
407 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
408
409 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
410 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
411 dist_base + GIC_DIST_CONFIG + i * 4);
412
413 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
414 writel_relaxed(0xa0a0a0a0,
415 dist_base + GIC_DIST_PRI + i * 4);
416
417 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
418 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
419 dist_base + GIC_DIST_TARGET + i * 4);
420
421 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
422 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
423 dist_base + GIC_DIST_ENABLE_SET + i * 4);
424
425 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
426}
427
428static void gic_cpu_save(unsigned int gic_nr)
429{
430 int i;
431 u32 *ptr;
432 void __iomem *dist_base;
433 void __iomem *cpu_base;
434
435 if (gic_nr >= MAX_GIC_NR)
436 BUG();
437
438 dist_base = gic_data[gic_nr].dist_base;
439 cpu_base = gic_data[gic_nr].cpu_base;
440
441 if (!dist_base || !cpu_base)
442 return;
443
444 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
445 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
446 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
447
448 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
449 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
450 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
451
452}
453
454static void gic_cpu_restore(unsigned int gic_nr)
455{
456 int i;
457 u32 *ptr;
458 void __iomem *dist_base;
459 void __iomem *cpu_base;
460
461 if (gic_nr >= MAX_GIC_NR)
462 BUG();
463
464 dist_base = gic_data[gic_nr].dist_base;
465 cpu_base = gic_data[gic_nr].cpu_base;
466
467 if (!dist_base || !cpu_base)
468 return;
469
470 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
471 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
472 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
473
474 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
475 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
476 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
477
478 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
479 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
480
481 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
482 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
483}
484
485static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
486{
487 int i;
488
489 for (i = 0; i < MAX_GIC_NR; i++) {
490 switch (cmd) {
491 case CPU_PM_ENTER:
492 gic_cpu_save(i);
493 break;
494 case CPU_PM_ENTER_FAILED:
495 case CPU_PM_EXIT:
496 gic_cpu_restore(i);
497 break;
498 case CPU_CLUSTER_PM_ENTER:
499 gic_dist_save(i);
500 break;
501 case CPU_CLUSTER_PM_ENTER_FAILED:
502 case CPU_CLUSTER_PM_EXIT:
503 gic_dist_restore(i);
504 break;
505 }
506 }
507
508 return NOTIFY_OK;
509}
510
511static struct notifier_block gic_notifier_block = {
512 .notifier_call = gic_notifier,
513};
514
515static void __init gic_pm_init(struct gic_chip_data *gic)
516{
517 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
518 sizeof(u32));
519 BUG_ON(!gic->saved_ppi_enable);
520
521 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
522 sizeof(u32));
523 BUG_ON(!gic->saved_ppi_conf);
524
525 cpu_pm_register_notifier(&gic_notifier_block);
526}
527#else
528static void __init gic_pm_init(struct gic_chip_data *gic)
529{
530}
531#endif
532
4294f8ba
RH
533const struct irq_domain_ops gic_irq_domain_ops = {
534};
535
b580b899
RK
536void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
537 void __iomem *dist_base, void __iomem *cpu_base)
538{
bef8f9ee 539 struct gic_chip_data *gic;
4294f8ba
RH
540 struct irq_domain *domain;
541 int gic_irqs;
bef8f9ee
RK
542
543 BUG_ON(gic_nr >= MAX_GIC_NR);
544
545 gic = &gic_data[gic_nr];
4294f8ba 546 domain = &gic->domain;
bef8f9ee
RK
547 gic->dist_base = dist_base;
548 gic->cpu_base = cpu_base;
bef8f9ee 549
4294f8ba
RH
550 /*
551 * For primary GICs, skip over SGIs.
552 * For secondary GICs, skip over PPIs, too.
553 */
554 if (gic_nr == 0) {
ff2e27ae 555 gic_cpu_base_addr = cpu_base;
4294f8ba
RH
556 domain->hwirq_base = 16;
557 irq_start = (irq_start & ~31) + 16;
558 } else
559 domain->hwirq_base = 32;
560
561 /*
562 * Find out how many interrupts are supported.
563 * The GIC only supports up to 1020 interrupt sources.
564 */
565 gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
566 gic_irqs = (gic_irqs + 1) * 32;
567 if (gic_irqs > 1020)
568 gic_irqs = 1020;
569 gic->gic_irqs = gic_irqs;
570
571 domain->nr_irq = gic_irqs - domain->hwirq_base;
572 domain->irq_base = irq_alloc_descs(-1, irq_start, domain->nr_irq,
573 numa_node_id());
574 domain->priv = gic;
575 domain->ops = &gic_irq_domain_ops;
576 irq_domain_add(domain);
bef8f9ee 577
9c12845e 578 gic_chip.flags |= gic_arch_extn.flags;
4294f8ba 579 gic_dist_init(gic);
bef8f9ee 580 gic_cpu_init(gic);
254056f3 581 gic_pm_init(gic);
b580b899
RK
582}
583
38489533
RK
584void __cpuinit gic_secondary_init(unsigned int gic_nr)
585{
bef8f9ee
RK
586 BUG_ON(gic_nr >= MAX_GIC_NR);
587
588 gic_cpu_init(&gic_data[gic_nr]);
38489533
RK
589}
590
f27ecacc 591#ifdef CONFIG_SMP
82668104 592void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
f27ecacc 593{
267840f3
WD
594 int cpu;
595 unsigned long map = 0;
596
597 /* Convert our logical CPU mask into a physical one. */
598 for_each_cpu(cpu, mask)
599 map |= 1 << cpu_logical_map(cpu);
f27ecacc 600
6ac77e46
SS
601 /*
602 * Ensure that stores to Normal memory are visible to the
603 * other CPUs before issuing the IPI.
604 */
605 dsb();
606
b3a1bde4 607 /* this always happens on GIC0 */
6ac77e46 608 writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
f27ecacc
RK
609}
610#endif
This page took 0.600732 seconds and 5 git commands to generate.