2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
23 #include <linux/of_address.h>
24 #include <linux/of_irq.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
28 #include <linux/irqchip.h>
29 #include <linux/irqchip/arm-gic-v3.h>
31 #include <asm/cputype.h>
32 #include <asm/exception.h>
33 #include <asm/smp_plat.h>
36 #include "irq-gic-common.h"
38 struct redist_region
{
39 void __iomem
*redist_base
;
40 phys_addr_t phys_base
;
43 struct gic_chip_data
{
44 void __iomem
*dist_base
;
45 struct redist_region
*redist_regions
;
47 struct irq_domain
*domain
;
49 u32 nr_redist_regions
;
53 static struct gic_chip_data gic_data __read_mostly
;
54 static struct static_key supports_deactivate
= STATIC_KEY_INIT_TRUE
;
56 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
57 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
58 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
60 /* Our default, arbitrary priority value. Linux only uses one anyway. */
61 #define DEFAULT_PMR_VALUE 0xf0
63 static inline unsigned int gic_irq(struct irq_data
*d
)
68 static inline int gic_irq_in_rdist(struct irq_data
*d
)
70 return gic_irq(d
) < 32;
73 static inline void __iomem
*gic_dist_base(struct irq_data
*d
)
75 if (gic_irq_in_rdist(d
)) /* SGI+PPI -> SGI_base for this CPU */
76 return gic_data_rdist_sgi_base();
78 if (d
->hwirq
<= 1023) /* SPI -> dist_base */
79 return gic_data
.dist_base
;
84 static void gic_do_wait_for_rwp(void __iomem
*base
)
86 u32 count
= 1000000; /* 1s! */
88 while (readl_relaxed(base
+ GICD_CTLR
) & GICD_CTLR_RWP
) {
91 pr_err_ratelimited("RWP timeout, gone fishing\n");
99 /* Wait for completion of a distributor change */
100 static void gic_dist_wait_for_rwp(void)
102 gic_do_wait_for_rwp(gic_data
.dist_base
);
105 /* Wait for completion of a redistributor change */
106 static void gic_redist_wait_for_rwp(void)
108 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
111 /* Low level accessors */
112 static u64
gic_read_iar_common(void)
116 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1
) : "=r" (irqstat
));
121 * Cavium ThunderX erratum 23154
123 * The gicv3 of ThunderX requires a modified version for reading the
124 * IAR status to ensure data synchronization (access to icc_iar1_el1
125 * is not sync'ed before and after).
127 static u64
gic_read_iar_cavium_thunderx(void)
132 "nop;nop;nop;nop\n\t"
133 "nop;nop;nop;nop\n\t"
134 "mrs_s %0, " __stringify(ICC_IAR1_EL1
) "\n\t"
142 static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx
);
144 static u64 __maybe_unused
gic_read_iar(void)
146 if (static_branch_unlikely(&is_cavium_thunderx
))
147 return gic_read_iar_cavium_thunderx();
149 return gic_read_iar_common();
152 static void __maybe_unused
gic_write_pmr(u64 val
)
154 asm volatile("msr_s " __stringify(ICC_PMR_EL1
) ", %0" : : "r" (val
));
157 static void __maybe_unused
gic_write_ctlr(u64 val
)
159 asm volatile("msr_s " __stringify(ICC_CTLR_EL1
) ", %0" : : "r" (val
));
163 static void __maybe_unused
gic_write_grpen1(u64 val
)
165 asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1
) ", %0" : : "r" (val
));
169 static void __maybe_unused
gic_write_sgi1r(u64 val
)
171 asm volatile("msr_s " __stringify(ICC_SGI1R_EL1
) ", %0" : : "r" (val
));
174 static void gic_enable_sre(void)
178 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1
) : "=r" (val
));
179 val
|= ICC_SRE_EL1_SRE
;
180 asm volatile("msr_s " __stringify(ICC_SRE_EL1
) ", %0" : : "r" (val
));
184 * Need to check that the SRE bit has actually been set. If
185 * not, it means that SRE is disabled at EL2. We're going to
186 * die painfully, and there is nothing we can do about it.
188 * Kindly inform the luser.
190 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1
) : "=r" (val
));
191 if (!(val
& ICC_SRE_EL1_SRE
))
192 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
195 static void gic_enable_redist(bool enable
)
198 u32 count
= 1000000; /* 1s! */
201 rbase
= gic_data_rdist_rd_base();
203 val
= readl_relaxed(rbase
+ GICR_WAKER
);
205 /* Wake up this CPU redistributor */
206 val
&= ~GICR_WAKER_ProcessorSleep
;
208 val
|= GICR_WAKER_ProcessorSleep
;
209 writel_relaxed(val
, rbase
+ GICR_WAKER
);
211 if (!enable
) { /* Check that GICR_WAKER is writeable */
212 val
= readl_relaxed(rbase
+ GICR_WAKER
);
213 if (!(val
& GICR_WAKER_ProcessorSleep
))
214 return; /* No PM support in this redistributor */
218 val
= readl_relaxed(rbase
+ GICR_WAKER
);
219 if (enable
^ (val
& GICR_WAKER_ChildrenAsleep
))
225 pr_err_ratelimited("redistributor failed to %s...\n",
226 enable
? "wakeup" : "sleep");
230 * Routines to disable, enable, EOI and route interrupts
232 static int gic_peek_irq(struct irq_data
*d
, u32 offset
)
234 u32 mask
= 1 << (gic_irq(d
) % 32);
237 if (gic_irq_in_rdist(d
))
238 base
= gic_data_rdist_sgi_base();
240 base
= gic_data
.dist_base
;
242 return !!(readl_relaxed(base
+ offset
+ (gic_irq(d
) / 32) * 4) & mask
);
245 static void gic_poke_irq(struct irq_data
*d
, u32 offset
)
247 u32 mask
= 1 << (gic_irq(d
) % 32);
248 void (*rwp_wait
)(void);
251 if (gic_irq_in_rdist(d
)) {
252 base
= gic_data_rdist_sgi_base();
253 rwp_wait
= gic_redist_wait_for_rwp
;
255 base
= gic_data
.dist_base
;
256 rwp_wait
= gic_dist_wait_for_rwp
;
259 writel_relaxed(mask
, base
+ offset
+ (gic_irq(d
) / 32) * 4);
263 static void gic_mask_irq(struct irq_data
*d
)
265 gic_poke_irq(d
, GICD_ICENABLER
);
268 static void gic_eoimode1_mask_irq(struct irq_data
*d
)
272 * When masking a forwarded interrupt, make sure it is
273 * deactivated as well.
275 * This ensures that an interrupt that is getting
276 * disabled/masked will not get "stuck", because there is
277 * noone to deactivate it (guest is being terminated).
279 if (irqd_is_forwarded_to_vcpu(d
))
280 gic_poke_irq(d
, GICD_ICACTIVER
);
283 static void gic_unmask_irq(struct irq_data
*d
)
285 gic_poke_irq(d
, GICD_ISENABLER
);
288 static int gic_irq_set_irqchip_state(struct irq_data
*d
,
289 enum irqchip_irq_state which
, bool val
)
293 if (d
->hwirq
>= gic_data
.irq_nr
) /* PPI/SPI only */
297 case IRQCHIP_STATE_PENDING
:
298 reg
= val
? GICD_ISPENDR
: GICD_ICPENDR
;
301 case IRQCHIP_STATE_ACTIVE
:
302 reg
= val
? GICD_ISACTIVER
: GICD_ICACTIVER
;
305 case IRQCHIP_STATE_MASKED
:
306 reg
= val
? GICD_ICENABLER
: GICD_ISENABLER
;
313 gic_poke_irq(d
, reg
);
317 static int gic_irq_get_irqchip_state(struct irq_data
*d
,
318 enum irqchip_irq_state which
, bool *val
)
320 if (d
->hwirq
>= gic_data
.irq_nr
) /* PPI/SPI only */
324 case IRQCHIP_STATE_PENDING
:
325 *val
= gic_peek_irq(d
, GICD_ISPENDR
);
328 case IRQCHIP_STATE_ACTIVE
:
329 *val
= gic_peek_irq(d
, GICD_ISACTIVER
);
332 case IRQCHIP_STATE_MASKED
:
333 *val
= !gic_peek_irq(d
, GICD_ISENABLER
);
343 static void gic_eoi_irq(struct irq_data
*d
)
345 gic_write_eoir(gic_irq(d
));
348 static void gic_eoimode1_eoi_irq(struct irq_data
*d
)
351 * No need to deactivate an LPI, or an interrupt that
352 * is is getting forwarded to a vcpu.
354 if (gic_irq(d
) >= 8192 || irqd_is_forwarded_to_vcpu(d
))
356 gic_write_dir(gic_irq(d
));
359 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
361 unsigned int irq
= gic_irq(d
);
362 void (*rwp_wait
)(void);
365 /* Interrupt configuration for SGIs can't be changed */
369 /* SPIs have restrictions on the supported types */
370 if (irq
>= 32 && type
!= IRQ_TYPE_LEVEL_HIGH
&&
371 type
!= IRQ_TYPE_EDGE_RISING
)
374 if (gic_irq_in_rdist(d
)) {
375 base
= gic_data_rdist_sgi_base();
376 rwp_wait
= gic_redist_wait_for_rwp
;
378 base
= gic_data
.dist_base
;
379 rwp_wait
= gic_dist_wait_for_rwp
;
382 return gic_configure_irq(irq
, type
, base
, rwp_wait
);
385 static int gic_irq_set_vcpu_affinity(struct irq_data
*d
, void *vcpu
)
388 irqd_set_forwarded_to_vcpu(d
);
390 irqd_clr_forwarded_to_vcpu(d
);
394 static u64
gic_mpidr_to_affinity(u64 mpidr
)
398 aff
= (MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 32 |
399 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
400 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
401 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
406 static asmlinkage
void __exception_irq_entry
gic_handle_irq(struct pt_regs
*regs
)
411 irqnr
= gic_read_iar();
413 if (likely(irqnr
> 15 && irqnr
< 1020) || irqnr
>= 8192) {
416 if (static_key_true(&supports_deactivate
))
417 gic_write_eoir(irqnr
);
419 err
= handle_domain_irq(gic_data
.domain
, irqnr
, regs
);
421 WARN_ONCE(true, "Unexpected interrupt received!\n");
422 if (static_key_true(&supports_deactivate
)) {
424 gic_write_dir(irqnr
);
426 gic_write_eoir(irqnr
);
432 gic_write_eoir(irqnr
);
433 if (static_key_true(&supports_deactivate
))
434 gic_write_dir(irqnr
);
436 handle_IPI(irqnr
, regs
);
438 WARN_ONCE(true, "Unexpected SGI received!\n");
442 } while (irqnr
!= ICC_IAR1_EL1_SPURIOUS
);
445 static void __init
gic_dist_init(void)
449 void __iomem
*base
= gic_data
.dist_base
;
451 /* Disable the distributor */
452 writel_relaxed(0, base
+ GICD_CTLR
);
453 gic_dist_wait_for_rwp();
455 gic_dist_config(base
, gic_data
.irq_nr
, gic_dist_wait_for_rwp
);
457 /* Enable distributor with ARE, Group1 */
458 writel_relaxed(GICD_CTLR_ARE_NS
| GICD_CTLR_ENABLE_G1A
| GICD_CTLR_ENABLE_G1
,
462 * Set all global interrupts to the boot CPU only. ARE must be
465 affinity
= gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
466 for (i
= 32; i
< gic_data
.irq_nr
; i
++)
467 writeq_relaxed(affinity
, base
+ GICD_IROUTER
+ i
* 8);
470 static int gic_populate_rdist(void)
472 u64 mpidr
= cpu_logical_map(smp_processor_id());
478 * Convert affinity to a 32bit value that can be matched to
479 * GICR_TYPER bits [63:32].
481 aff
= (MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 24 |
482 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 16 |
483 MPIDR_AFFINITY_LEVEL(mpidr
, 1) << 8 |
484 MPIDR_AFFINITY_LEVEL(mpidr
, 0));
486 for (i
= 0; i
< gic_data
.nr_redist_regions
; i
++) {
487 void __iomem
*ptr
= gic_data
.redist_regions
[i
].redist_base
;
490 reg
= readl_relaxed(ptr
+ GICR_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
491 if (reg
!= GIC_PIDR2_ARCH_GICv3
&&
492 reg
!= GIC_PIDR2_ARCH_GICv4
) { /* We're in trouble... */
493 pr_warn("No redistributor present @%p\n", ptr
);
498 typer
= readq_relaxed(ptr
+ GICR_TYPER
);
499 if ((typer
>> 32) == aff
) {
500 u64 offset
= ptr
- gic_data
.redist_regions
[i
].redist_base
;
501 gic_data_rdist_rd_base() = ptr
;
502 gic_data_rdist()->phys_base
= gic_data
.redist_regions
[i
].phys_base
+ offset
;
503 pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
505 (unsigned long long)mpidr
,
506 i
, &gic_data_rdist()->phys_base
);
510 if (gic_data
.redist_stride
) {
511 ptr
+= gic_data
.redist_stride
;
513 ptr
+= SZ_64K
* 2; /* Skip RD_base + SGI_base */
514 if (typer
& GICR_TYPER_VLPIS
)
515 ptr
+= SZ_64K
* 2; /* Skip VLPI_base + reserved page */
517 } while (!(typer
& GICR_TYPER_LAST
));
520 /* We couldn't even deal with ourselves... */
521 WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
522 smp_processor_id(), (unsigned long long)mpidr
);
526 static void gic_cpu_sys_reg_init(void)
528 /* Enable system registers */
531 /* Set priority mask register */
532 gic_write_pmr(DEFAULT_PMR_VALUE
);
534 if (static_key_true(&supports_deactivate
)) {
535 /* EOI drops priority only (mode 1) */
536 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop
);
538 /* EOI deactivates interrupt too (mode 0) */
539 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir
);
542 /* ... and let's hit the road... */
546 static int gic_dist_supports_lpis(void)
548 return !!(readl_relaxed(gic_data
.dist_base
+ GICD_TYPER
) & GICD_TYPER_LPIS
);
551 static void gic_cpu_init(void)
555 /* Register ourselves with the rest of the world */
556 if (gic_populate_rdist())
559 gic_enable_redist(true);
561 rbase
= gic_data_rdist_sgi_base();
563 gic_cpu_config(rbase
, gic_redist_wait_for_rwp
);
565 /* Give LPIs a spin */
566 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS
) && gic_dist_supports_lpis())
569 /* initialise system registers */
570 gic_cpu_sys_reg_init();
574 static int gic_secondary_init(struct notifier_block
*nfb
,
575 unsigned long action
, void *hcpu
)
577 if (action
== CPU_STARTING
|| action
== CPU_STARTING_FROZEN
)
583 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
584 * priority because the GIC needs to be up before the ARM generic timers.
586 static struct notifier_block gic_cpu_notifier
= {
587 .notifier_call
= gic_secondary_init
,
591 static u16
gic_compute_target_list(int *base_cpu
, const struct cpumask
*mask
,
595 u64 mpidr
= cpu_logical_map(cpu
);
598 while (cpu
< nr_cpu_ids
) {
600 * If we ever get a cluster of more than 16 CPUs, just
601 * scream and skip that CPU.
603 if (WARN_ON((mpidr
& 0xff) >= 16))
606 tlist
|= 1 << (mpidr
& 0xf);
608 cpu
= cpumask_next(cpu
, mask
);
609 if (cpu
>= nr_cpu_ids
)
612 mpidr
= cpu_logical_map(cpu
);
614 if (cluster_id
!= (mpidr
& ~0xffUL
)) {
624 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
625 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
626 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
628 static void gic_send_sgi(u64 cluster_id
, u16 tlist
, unsigned int irq
)
632 val
= (MPIDR_TO_SGI_AFFINITY(cluster_id
, 3) |
633 MPIDR_TO_SGI_AFFINITY(cluster_id
, 2) |
634 irq
<< ICC_SGI1R_SGI_ID_SHIFT
|
635 MPIDR_TO_SGI_AFFINITY(cluster_id
, 1) |
636 tlist
<< ICC_SGI1R_TARGET_LIST_SHIFT
);
638 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val
);
639 gic_write_sgi1r(val
);
642 static void gic_raise_softirq(const struct cpumask
*mask
, unsigned int irq
)
646 if (WARN_ON(irq
>= 16))
650 * Ensure that stores to Normal memory are visible to the
651 * other CPUs before issuing the IPI.
655 for_each_cpu(cpu
, mask
) {
656 u64 cluster_id
= cpu_logical_map(cpu
) & ~0xffUL
;
659 tlist
= gic_compute_target_list(&cpu
, mask
, cluster_id
);
660 gic_send_sgi(cluster_id
, tlist
, irq
);
663 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
667 static void gic_smp_init(void)
669 set_smp_cross_call(gic_raise_softirq
);
670 register_cpu_notifier(&gic_cpu_notifier
);
673 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
676 unsigned int cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
681 if (gic_irq_in_rdist(d
))
684 /* If interrupt was enabled, disable it first */
685 enabled
= gic_peek_irq(d
, GICD_ISENABLER
);
689 reg
= gic_dist_base(d
) + GICD_IROUTER
+ (gic_irq(d
) * 8);
690 val
= gic_mpidr_to_affinity(cpu_logical_map(cpu
));
692 writeq_relaxed(val
, reg
);
695 * If the interrupt was enabled, enabled it again. Otherwise,
696 * just wait for the distributor to have digested our changes.
701 gic_dist_wait_for_rwp();
703 return IRQ_SET_MASK_OK
;
706 #define gic_set_affinity NULL
707 #define gic_smp_init() do { } while(0)
711 static int gic_cpu_pm_notifier(struct notifier_block
*self
,
712 unsigned long cmd
, void *v
)
714 if (cmd
== CPU_PM_EXIT
) {
715 gic_enable_redist(true);
716 gic_cpu_sys_reg_init();
717 } else if (cmd
== CPU_PM_ENTER
) {
719 gic_enable_redist(false);
724 static struct notifier_block gic_cpu_pm_notifier_block
= {
725 .notifier_call
= gic_cpu_pm_notifier
,
728 static void gic_cpu_pm_init(void)
730 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block
);
734 static inline void gic_cpu_pm_init(void) { }
735 #endif /* CONFIG_CPU_PM */
737 static struct irq_chip gic_chip
= {
739 .irq_mask
= gic_mask_irq
,
740 .irq_unmask
= gic_unmask_irq
,
741 .irq_eoi
= gic_eoi_irq
,
742 .irq_set_type
= gic_set_type
,
743 .irq_set_affinity
= gic_set_affinity
,
744 .irq_get_irqchip_state
= gic_irq_get_irqchip_state
,
745 .irq_set_irqchip_state
= gic_irq_set_irqchip_state
,
746 .flags
= IRQCHIP_SET_TYPE_MASKED
,
749 static struct irq_chip gic_eoimode1_chip
= {
751 .irq_mask
= gic_eoimode1_mask_irq
,
752 .irq_unmask
= gic_unmask_irq
,
753 .irq_eoi
= gic_eoimode1_eoi_irq
,
754 .irq_set_type
= gic_set_type
,
755 .irq_set_affinity
= gic_set_affinity
,
756 .irq_get_irqchip_state
= gic_irq_get_irqchip_state
,
757 .irq_set_irqchip_state
= gic_irq_set_irqchip_state
,
758 .irq_set_vcpu_affinity
= gic_irq_set_vcpu_affinity
,
759 .flags
= IRQCHIP_SET_TYPE_MASKED
,
762 #define GIC_ID_NR (1U << gic_data.rdists.id_bits)
764 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
767 struct irq_chip
*chip
= &gic_chip
;
769 if (static_key_true(&supports_deactivate
))
770 chip
= &gic_eoimode1_chip
;
772 /* SGIs are private to the core kernel */
776 if (hw
>= gic_data
.irq_nr
&& hw
< 8192)
784 irq_set_percpu_devid(irq
);
785 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
786 handle_percpu_devid_irq
, NULL
, NULL
);
787 irq_set_status_flags(irq
, IRQ_NOAUTOEN
);
790 if (hw
>= 32 && hw
< gic_data
.irq_nr
) {
791 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
792 handle_fasteoi_irq
, NULL
, NULL
);
796 if (hw
>= 8192 && hw
< GIC_ID_NR
) {
797 if (!gic_dist_supports_lpis())
799 irq_domain_set_info(d
, irq
, hw
, chip
, d
->host_data
,
800 handle_fasteoi_irq
, NULL
, NULL
);
806 static int gic_irq_domain_xlate(struct irq_domain
*d
,
807 struct device_node
*controller
,
808 const u32
*intspec
, unsigned int intsize
,
809 unsigned long *out_hwirq
, unsigned int *out_type
)
811 if (d
->of_node
!= controller
)
818 *out_hwirq
= intspec
[1] + 32;
821 *out_hwirq
= intspec
[1] + 16;
823 case GIC_IRQ_TYPE_LPI
: /* LPI */
824 *out_hwirq
= intspec
[1];
830 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
834 static int gic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
835 unsigned int nr_irqs
, void *arg
)
838 irq_hw_number_t hwirq
;
839 unsigned int type
= IRQ_TYPE_NONE
;
840 struct of_phandle_args
*irq_data
= arg
;
842 ret
= gic_irq_domain_xlate(domain
, irq_data
->np
, irq_data
->args
,
843 irq_data
->args_count
, &hwirq
, &type
);
847 for (i
= 0; i
< nr_irqs
; i
++)
848 gic_irq_domain_map(domain
, virq
+ i
, hwirq
+ i
);
853 static void gic_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
854 unsigned int nr_irqs
)
858 for (i
= 0; i
< nr_irqs
; i
++) {
859 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
860 irq_set_handler(virq
+ i
, NULL
);
861 irq_domain_reset_irq_data(d
);
865 static const struct irq_domain_ops gic_irq_domain_ops
= {
866 .xlate
= gic_irq_domain_xlate
,
867 .alloc
= gic_irq_domain_alloc
,
868 .free
= gic_irq_domain_free
,
871 static void gicv3_enable_quirks(void)
873 if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154
))
874 static_branch_enable(&is_cavium_thunderx
);
877 static int __init
gic_of_init(struct device_node
*node
, struct device_node
*parent
)
879 void __iomem
*dist_base
;
880 struct redist_region
*rdist_regs
;
882 u32 nr_redist_regions
;
889 dist_base
= of_iomap(node
, 0);
891 pr_err("%s: unable to map gic dist registers\n",
896 reg
= readl_relaxed(dist_base
+ GICD_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
897 if (reg
!= GIC_PIDR2_ARCH_GICv3
&& reg
!= GIC_PIDR2_ARCH_GICv4
) {
898 pr_err("%s: no distributor detected, giving up\n",
904 if (of_property_read_u32(node
, "#redistributor-regions", &nr_redist_regions
))
905 nr_redist_regions
= 1;
907 rdist_regs
= kzalloc(sizeof(*rdist_regs
) * nr_redist_regions
, GFP_KERNEL
);
913 for (i
= 0; i
< nr_redist_regions
; i
++) {
917 ret
= of_address_to_resource(node
, 1 + i
, &res
);
918 rdist_regs
[i
].redist_base
= of_iomap(node
, 1 + i
);
919 if (ret
|| !rdist_regs
[i
].redist_base
) {
920 pr_err("%s: couldn't map region %d\n",
923 goto out_unmap_rdist
;
925 rdist_regs
[i
].phys_base
= res
.start
;
928 if (of_property_read_u64(node
, "redistributor-stride", &redist_stride
))
931 if (!is_hyp_mode_available())
932 static_key_slow_dec(&supports_deactivate
);
934 if (static_key_true(&supports_deactivate
))
935 pr_info("GIC: Using split EOI/Deactivate mode\n");
937 gic_data
.dist_base
= dist_base
;
938 gic_data
.redist_regions
= rdist_regs
;
939 gic_data
.nr_redist_regions
= nr_redist_regions
;
940 gic_data
.redist_stride
= redist_stride
;
942 gicv3_enable_quirks();
945 * Find out how many interrupts are supported.
946 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
948 typer
= readl_relaxed(gic_data
.dist_base
+ GICD_TYPER
);
949 gic_data
.rdists
.id_bits
= GICD_TYPER_ID_BITS(typer
);
950 gic_irqs
= GICD_TYPER_IRQS(typer
);
953 gic_data
.irq_nr
= gic_irqs
;
955 gic_data
.domain
= irq_domain_add_tree(node
, &gic_irq_domain_ops
,
957 gic_data
.rdists
.rdist
= alloc_percpu(typeof(*gic_data
.rdists
.rdist
));
959 if (WARN_ON(!gic_data
.domain
) || WARN_ON(!gic_data
.rdists
.rdist
)) {
964 set_handle_irq(gic_handle_irq
);
966 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS
) && gic_dist_supports_lpis())
967 its_init(node
, &gic_data
.rdists
, gic_data
.domain
);
978 irq_domain_remove(gic_data
.domain
);
979 free_percpu(gic_data
.rdists
.rdist
);
981 for (i
= 0; i
< nr_redist_regions
; i
++)
982 if (rdist_regs
[i
].redist_base
)
983 iounmap(rdist_regs
[i
].redist_base
);
990 IRQCHIP_DECLARE(gic_v3
, "arm,gic-v3", gic_of_init
);