2 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Interrupt architecture for the GIC:
10 * o There is one Interrupt Distributor, which receives interrupts
11 * from system devices and sends them to the Interrupt Controllers.
13 * o There is one CPU Interface per CPU, which sends interrupts sent
14 * by the Distributor, and interrupts generated locally, to the
15 * associated CPU. The base address of the CPU interface is usually
16 * aliased so that the same address points to different chips depending
17 * on the CPU it is accessed from.
19 * Note that IRQs 0-31 are special - they are local to each CPU.
20 * As such, the enable set/clear, pending set/clear and active bit
21 * registers are banked per-cpu for these sources.
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpu.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/cpumask.h>
34 #include <linux/of_address.h>
35 #include <linux/of_irq.h>
36 #include <linux/acpi.h>
37 #include <linux/irqdomain.h>
38 #include <linux/interrupt.h>
39 #include <linux/percpu.h>
40 #include <linux/slab.h>
41 #include <linux/irqchip/chained_irq.h>
42 #include <linux/irqchip/arm-gic.h>
43 #include <linux/irqchip/arm-gic-acpi.h>
45 #include <asm/cputype.h>
47 #include <asm/exception.h>
48 #include <asm/smp_plat.h>
50 #include "irq-gic-common.h"
54 void __iomem
*common_base
;
55 void __percpu
* __iomem
*percpu_base
;
58 struct gic_chip_data
{
59 union gic_base dist_base
;
60 union gic_base cpu_base
;
62 u32 saved_spi_enable
[DIV_ROUND_UP(1020, 32)];
63 u32 saved_spi_conf
[DIV_ROUND_UP(1020, 16)];
64 u32 saved_spi_target
[DIV_ROUND_UP(1020, 4)];
65 u32 __percpu
*saved_ppi_enable
;
66 u32 __percpu
*saved_ppi_conf
;
68 struct irq_domain
*domain
;
69 unsigned int gic_irqs
;
70 #ifdef CONFIG_GIC_NON_BANKED
71 void __iomem
*(*get_base
)(union gic_base
*);
75 static DEFINE_RAW_SPINLOCK(irq_controller_lock
);
78 * The GIC mapping of CPU interfaces does not necessarily match
79 * the logical CPU numbering. Let's use a mapping as returned
82 #define NR_GIC_CPU_IF 8
83 static u8 gic_cpu_map
[NR_GIC_CPU_IF
] __read_mostly
;
86 * Supported arch specific GIC irq extension.
87 * Default make them NULL.
89 struct irq_chip gic_arch_extn
= {
93 .irq_retrigger
= NULL
,
102 static struct gic_chip_data gic_data
[MAX_GIC_NR
] __read_mostly
;
104 #ifdef CONFIG_GIC_NON_BANKED
105 static void __iomem
*gic_get_percpu_base(union gic_base
*base
)
107 return raw_cpu_read(*base
->percpu_base
);
110 static void __iomem
*gic_get_common_base(union gic_base
*base
)
112 return base
->common_base
;
115 static inline void __iomem
*gic_data_dist_base(struct gic_chip_data
*data
)
117 return data
->get_base(&data
->dist_base
);
120 static inline void __iomem
*gic_data_cpu_base(struct gic_chip_data
*data
)
122 return data
->get_base(&data
->cpu_base
);
125 static inline void gic_set_base_accessor(struct gic_chip_data
*data
,
126 void __iomem
*(*f
)(union gic_base
*))
131 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
132 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
133 #define gic_set_base_accessor(d, f)
136 static inline void __iomem
*gic_dist_base(struct irq_data
*d
)
138 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
139 return gic_data_dist_base(gic_data
);
142 static inline void __iomem
*gic_cpu_base(struct irq_data
*d
)
144 struct gic_chip_data
*gic_data
= irq_data_get_irq_chip_data(d
);
145 return gic_data_cpu_base(gic_data
);
148 static inline unsigned int gic_irq(struct irq_data
*d
)
154 * Routines to acknowledge, disable and enable interrupts
156 static void gic_poke_irq(struct irq_data
*d
, u32 offset
)
158 u32 mask
= 1 << (gic_irq(d
) % 32);
159 writel_relaxed(mask
, gic_dist_base(d
) + offset
+ (gic_irq(d
) / 32) * 4);
162 static int gic_peek_irq(struct irq_data
*d
, u32 offset
)
164 u32 mask
= 1 << (gic_irq(d
) % 32);
165 return !!(readl_relaxed(gic_dist_base(d
) + offset
+ (gic_irq(d
) / 32) * 4) & mask
);
168 static void gic_mask_irq(struct irq_data
*d
)
172 raw_spin_lock_irqsave(&irq_controller_lock
, flags
);
173 gic_poke_irq(d
, GIC_DIST_ENABLE_CLEAR
);
174 if (gic_arch_extn
.irq_mask
)
175 gic_arch_extn
.irq_mask(d
);
176 raw_spin_unlock_irqrestore(&irq_controller_lock
, flags
);
179 static void gic_unmask_irq(struct irq_data
*d
)
183 raw_spin_lock_irqsave(&irq_controller_lock
, flags
);
184 if (gic_arch_extn
.irq_unmask
)
185 gic_arch_extn
.irq_unmask(d
);
186 gic_poke_irq(d
, GIC_DIST_ENABLE_SET
);
187 raw_spin_unlock_irqrestore(&irq_controller_lock
, flags
);
190 static void gic_eoi_irq(struct irq_data
*d
)
192 if (gic_arch_extn
.irq_eoi
) {
193 raw_spin_lock(&irq_controller_lock
);
194 gic_arch_extn
.irq_eoi(d
);
195 raw_spin_unlock(&irq_controller_lock
);
198 writel_relaxed(gic_irq(d
), gic_cpu_base(d
) + GIC_CPU_EOI
);
201 static int gic_irq_set_irqchip_state(struct irq_data
*d
,
202 enum irqchip_irq_state which
, bool val
)
207 case IRQCHIP_STATE_PENDING
:
208 reg
= val
? GIC_DIST_PENDING_SET
: GIC_DIST_PENDING_CLEAR
;
211 case IRQCHIP_STATE_ACTIVE
:
212 reg
= val
? GIC_DIST_ACTIVE_SET
: GIC_DIST_ACTIVE_CLEAR
;
215 case IRQCHIP_STATE_MASKED
:
216 reg
= val
? GIC_DIST_ENABLE_CLEAR
: GIC_DIST_ENABLE_SET
;
223 gic_poke_irq(d
, reg
);
227 static int gic_irq_get_irqchip_state(struct irq_data
*d
,
228 enum irqchip_irq_state which
, bool *val
)
231 case IRQCHIP_STATE_PENDING
:
232 *val
= gic_peek_irq(d
, GIC_DIST_PENDING_SET
);
235 case IRQCHIP_STATE_ACTIVE
:
236 *val
= gic_peek_irq(d
, GIC_DIST_ACTIVE_SET
);
239 case IRQCHIP_STATE_MASKED
:
240 *val
= !gic_peek_irq(d
, GIC_DIST_ENABLE_SET
);
250 static int gic_set_type(struct irq_data
*d
, unsigned int type
)
252 void __iomem
*base
= gic_dist_base(d
);
253 unsigned int gicirq
= gic_irq(d
);
257 /* Interrupt configuration for SGIs can't be changed */
261 /* SPIs have restrictions on the supported types */
262 if (gicirq
>= 32 && type
!= IRQ_TYPE_LEVEL_HIGH
&&
263 type
!= IRQ_TYPE_EDGE_RISING
)
266 raw_spin_lock_irqsave(&irq_controller_lock
, flags
);
268 if (gic_arch_extn
.irq_set_type
)
269 gic_arch_extn
.irq_set_type(d
, type
);
271 ret
= gic_configure_irq(gicirq
, type
, base
, NULL
);
273 raw_spin_unlock_irqrestore(&irq_controller_lock
, flags
);
278 static int gic_retrigger(struct irq_data
*d
)
280 if (gic_arch_extn
.irq_retrigger
)
281 return gic_arch_extn
.irq_retrigger(d
);
283 /* the genirq layer expects 0 if we can't retrigger in hardware */
288 static int gic_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
291 void __iomem
*reg
= gic_dist_base(d
) + GIC_DIST_TARGET
+ (gic_irq(d
) & ~3);
292 unsigned int cpu
, shift
= (gic_irq(d
) % 4) * 8;
297 cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
299 cpu
= cpumask_first(mask_val
);
301 if (cpu
>= NR_GIC_CPU_IF
|| cpu
>= nr_cpu_ids
)
304 raw_spin_lock_irqsave(&irq_controller_lock
, flags
);
305 mask
= 0xff << shift
;
306 bit
= gic_cpu_map
[cpu
] << shift
;
307 val
= readl_relaxed(reg
) & ~mask
;
308 writel_relaxed(val
| bit
, reg
);
309 raw_spin_unlock_irqrestore(&irq_controller_lock
, flags
);
311 return IRQ_SET_MASK_OK
;
316 static int gic_set_wake(struct irq_data
*d
, unsigned int on
)
320 if (gic_arch_extn
.irq_set_wake
)
321 ret
= gic_arch_extn
.irq_set_wake(d
, on
);
327 #define gic_set_wake NULL
330 static void __exception_irq_entry
gic_handle_irq(struct pt_regs
*regs
)
333 struct gic_chip_data
*gic
= &gic_data
[0];
334 void __iomem
*cpu_base
= gic_data_cpu_base(gic
);
337 irqstat
= readl_relaxed(cpu_base
+ GIC_CPU_INTACK
);
338 irqnr
= irqstat
& GICC_IAR_INT_ID_MASK
;
340 if (likely(irqnr
> 15 && irqnr
< 1021)) {
341 handle_domain_irq(gic
->domain
, irqnr
, regs
);
345 writel_relaxed(irqstat
, cpu_base
+ GIC_CPU_EOI
);
347 handle_IPI(irqnr
, regs
);
355 static void gic_handle_cascade_irq(unsigned int irq
, struct irq_desc
*desc
)
357 struct gic_chip_data
*chip_data
= irq_get_handler_data(irq
);
358 struct irq_chip
*chip
= irq_get_chip(irq
);
359 unsigned int cascade_irq
, gic_irq
;
360 unsigned long status
;
362 chained_irq_enter(chip
, desc
);
364 raw_spin_lock(&irq_controller_lock
);
365 status
= readl_relaxed(gic_data_cpu_base(chip_data
) + GIC_CPU_INTACK
);
366 raw_spin_unlock(&irq_controller_lock
);
368 gic_irq
= (status
& GICC_IAR_INT_ID_MASK
);
369 if (gic_irq
== GICC_INT_SPURIOUS
)
372 cascade_irq
= irq_find_mapping(chip_data
->domain
, gic_irq
);
373 if (unlikely(gic_irq
< 32 || gic_irq
> 1020))
374 handle_bad_irq(cascade_irq
, desc
);
376 generic_handle_irq(cascade_irq
);
379 chained_irq_exit(chip
, desc
);
382 static struct irq_chip gic_chip
= {
384 .irq_mask
= gic_mask_irq
,
385 .irq_unmask
= gic_unmask_irq
,
386 .irq_eoi
= gic_eoi_irq
,
387 .irq_set_type
= gic_set_type
,
388 .irq_retrigger
= gic_retrigger
,
390 .irq_set_affinity
= gic_set_affinity
,
392 .irq_set_wake
= gic_set_wake
,
393 .irq_get_irqchip_state
= gic_irq_get_irqchip_state
,
394 .irq_set_irqchip_state
= gic_irq_set_irqchip_state
,
397 void __init
gic_cascade_irq(unsigned int gic_nr
, unsigned int irq
)
399 if (gic_nr
>= MAX_GIC_NR
)
401 if (irq_set_handler_data(irq
, &gic_data
[gic_nr
]) != 0)
403 irq_set_chained_handler(irq
, gic_handle_cascade_irq
);
406 static u8
gic_get_cpumask(struct gic_chip_data
*gic
)
408 void __iomem
*base
= gic_data_dist_base(gic
);
411 for (i
= mask
= 0; i
< 32; i
+= 4) {
412 mask
= readl_relaxed(base
+ GIC_DIST_TARGET
+ i
);
419 if (!mask
&& num_possible_cpus() > 1)
420 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
425 static void gic_cpu_if_up(void)
427 void __iomem
*cpu_base
= gic_data_cpu_base(&gic_data
[0]);
431 * Preserve bypass disable bits to be written back later
433 bypass
= readl(cpu_base
+ GIC_CPU_CTRL
);
434 bypass
&= GICC_DIS_BYPASS_MASK
;
436 writel_relaxed(bypass
| GICC_ENABLE
, cpu_base
+ GIC_CPU_CTRL
);
440 static void __init
gic_dist_init(struct gic_chip_data
*gic
)
444 unsigned int gic_irqs
= gic
->gic_irqs
;
445 void __iomem
*base
= gic_data_dist_base(gic
);
447 writel_relaxed(GICD_DISABLE
, base
+ GIC_DIST_CTRL
);
450 * Set all global interrupts to this CPU only.
452 cpumask
= gic_get_cpumask(gic
);
453 cpumask
|= cpumask
<< 8;
454 cpumask
|= cpumask
<< 16;
455 for (i
= 32; i
< gic_irqs
; i
+= 4)
456 writel_relaxed(cpumask
, base
+ GIC_DIST_TARGET
+ i
* 4 / 4);
458 gic_dist_config(base
, gic_irqs
, NULL
);
460 writel_relaxed(GICD_ENABLE
, base
+ GIC_DIST_CTRL
);
463 static void gic_cpu_init(struct gic_chip_data
*gic
)
465 void __iomem
*dist_base
= gic_data_dist_base(gic
);
466 void __iomem
*base
= gic_data_cpu_base(gic
);
467 unsigned int cpu_mask
, cpu
= smp_processor_id();
471 * Get what the GIC says our CPU mask is.
473 BUG_ON(cpu
>= NR_GIC_CPU_IF
);
474 cpu_mask
= gic_get_cpumask(gic
);
475 gic_cpu_map
[cpu
] = cpu_mask
;
478 * Clear our mask from the other map entries in case they're
481 for (i
= 0; i
< NR_GIC_CPU_IF
; i
++)
483 gic_cpu_map
[i
] &= ~cpu_mask
;
485 gic_cpu_config(dist_base
, NULL
);
487 writel_relaxed(GICC_INT_PRI_THRESHOLD
, base
+ GIC_CPU_PRIMASK
);
491 void gic_cpu_if_down(void)
493 void __iomem
*cpu_base
= gic_data_cpu_base(&gic_data
[0]);
496 val
= readl(cpu_base
+ GIC_CPU_CTRL
);
498 writel_relaxed(val
, cpu_base
+ GIC_CPU_CTRL
);
503 * Saves the GIC distributor registers during suspend or idle. Must be called
504 * with interrupts disabled but before powering down the GIC. After calling
505 * this function, no interrupts will be delivered by the GIC, and another
506 * platform-specific wakeup source must be enabled.
508 static void gic_dist_save(unsigned int gic_nr
)
510 unsigned int gic_irqs
;
511 void __iomem
*dist_base
;
514 if (gic_nr
>= MAX_GIC_NR
)
517 gic_irqs
= gic_data
[gic_nr
].gic_irqs
;
518 dist_base
= gic_data_dist_base(&gic_data
[gic_nr
]);
523 for (i
= 0; i
< DIV_ROUND_UP(gic_irqs
, 16); i
++)
524 gic_data
[gic_nr
].saved_spi_conf
[i
] =
525 readl_relaxed(dist_base
+ GIC_DIST_CONFIG
+ i
* 4);
527 for (i
= 0; i
< DIV_ROUND_UP(gic_irqs
, 4); i
++)
528 gic_data
[gic_nr
].saved_spi_target
[i
] =
529 readl_relaxed(dist_base
+ GIC_DIST_TARGET
+ i
* 4);
531 for (i
= 0; i
< DIV_ROUND_UP(gic_irqs
, 32); i
++)
532 gic_data
[gic_nr
].saved_spi_enable
[i
] =
533 readl_relaxed(dist_base
+ GIC_DIST_ENABLE_SET
+ i
* 4);
537 * Restores the GIC distributor registers during resume or when coming out of
538 * idle. Must be called before enabling interrupts. If a level interrupt
539 * that occured while the GIC was suspended is still present, it will be
540 * handled normally, but any edge interrupts that occured will not be seen by
541 * the GIC and need to be handled by the platform-specific wakeup source.
543 static void gic_dist_restore(unsigned int gic_nr
)
545 unsigned int gic_irqs
;
547 void __iomem
*dist_base
;
549 if (gic_nr
>= MAX_GIC_NR
)
552 gic_irqs
= gic_data
[gic_nr
].gic_irqs
;
553 dist_base
= gic_data_dist_base(&gic_data
[gic_nr
]);
558 writel_relaxed(GICD_DISABLE
, dist_base
+ GIC_DIST_CTRL
);
560 for (i
= 0; i
< DIV_ROUND_UP(gic_irqs
, 16); i
++)
561 writel_relaxed(gic_data
[gic_nr
].saved_spi_conf
[i
],
562 dist_base
+ GIC_DIST_CONFIG
+ i
* 4);
564 for (i
= 0; i
< DIV_ROUND_UP(gic_irqs
, 4); i
++)
565 writel_relaxed(GICD_INT_DEF_PRI_X4
,
566 dist_base
+ GIC_DIST_PRI
+ i
* 4);
568 for (i
= 0; i
< DIV_ROUND_UP(gic_irqs
, 4); i
++)
569 writel_relaxed(gic_data
[gic_nr
].saved_spi_target
[i
],
570 dist_base
+ GIC_DIST_TARGET
+ i
* 4);
572 for (i
= 0; i
< DIV_ROUND_UP(gic_irqs
, 32); i
++)
573 writel_relaxed(gic_data
[gic_nr
].saved_spi_enable
[i
],
574 dist_base
+ GIC_DIST_ENABLE_SET
+ i
* 4);
576 writel_relaxed(GICD_ENABLE
, dist_base
+ GIC_DIST_CTRL
);
579 static void gic_cpu_save(unsigned int gic_nr
)
583 void __iomem
*dist_base
;
584 void __iomem
*cpu_base
;
586 if (gic_nr
>= MAX_GIC_NR
)
589 dist_base
= gic_data_dist_base(&gic_data
[gic_nr
]);
590 cpu_base
= gic_data_cpu_base(&gic_data
[gic_nr
]);
592 if (!dist_base
|| !cpu_base
)
595 ptr
= raw_cpu_ptr(gic_data
[gic_nr
].saved_ppi_enable
);
596 for (i
= 0; i
< DIV_ROUND_UP(32, 32); i
++)
597 ptr
[i
] = readl_relaxed(dist_base
+ GIC_DIST_ENABLE_SET
+ i
* 4);
599 ptr
= raw_cpu_ptr(gic_data
[gic_nr
].saved_ppi_conf
);
600 for (i
= 0; i
< DIV_ROUND_UP(32, 16); i
++)
601 ptr
[i
] = readl_relaxed(dist_base
+ GIC_DIST_CONFIG
+ i
* 4);
605 static void gic_cpu_restore(unsigned int gic_nr
)
609 void __iomem
*dist_base
;
610 void __iomem
*cpu_base
;
612 if (gic_nr
>= MAX_GIC_NR
)
615 dist_base
= gic_data_dist_base(&gic_data
[gic_nr
]);
616 cpu_base
= gic_data_cpu_base(&gic_data
[gic_nr
]);
618 if (!dist_base
|| !cpu_base
)
621 ptr
= raw_cpu_ptr(gic_data
[gic_nr
].saved_ppi_enable
);
622 for (i
= 0; i
< DIV_ROUND_UP(32, 32); i
++)
623 writel_relaxed(ptr
[i
], dist_base
+ GIC_DIST_ENABLE_SET
+ i
* 4);
625 ptr
= raw_cpu_ptr(gic_data
[gic_nr
].saved_ppi_conf
);
626 for (i
= 0; i
< DIV_ROUND_UP(32, 16); i
++)
627 writel_relaxed(ptr
[i
], dist_base
+ GIC_DIST_CONFIG
+ i
* 4);
629 for (i
= 0; i
< DIV_ROUND_UP(32, 4); i
++)
630 writel_relaxed(GICD_INT_DEF_PRI_X4
,
631 dist_base
+ GIC_DIST_PRI
+ i
* 4);
633 writel_relaxed(GICC_INT_PRI_THRESHOLD
, cpu_base
+ GIC_CPU_PRIMASK
);
637 static int gic_notifier(struct notifier_block
*self
, unsigned long cmd
, void *v
)
641 for (i
= 0; i
< MAX_GIC_NR
; i
++) {
642 #ifdef CONFIG_GIC_NON_BANKED
643 /* Skip over unused GICs */
644 if (!gic_data
[i
].get_base
)
651 case CPU_PM_ENTER_FAILED
:
655 case CPU_CLUSTER_PM_ENTER
:
658 case CPU_CLUSTER_PM_ENTER_FAILED
:
659 case CPU_CLUSTER_PM_EXIT
:
668 static struct notifier_block gic_notifier_block
= {
669 .notifier_call
= gic_notifier
,
672 static void __init
gic_pm_init(struct gic_chip_data
*gic
)
674 gic
->saved_ppi_enable
= __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
676 BUG_ON(!gic
->saved_ppi_enable
);
678 gic
->saved_ppi_conf
= __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
680 BUG_ON(!gic
->saved_ppi_conf
);
682 if (gic
== &gic_data
[0])
683 cpu_pm_register_notifier(&gic_notifier_block
);
686 static void __init
gic_pm_init(struct gic_chip_data
*gic
)
692 static void gic_raise_softirq(const struct cpumask
*mask
, unsigned int irq
)
695 unsigned long flags
, map
= 0;
697 raw_spin_lock_irqsave(&irq_controller_lock
, flags
);
699 /* Convert our logical CPU mask into a physical one. */
700 for_each_cpu(cpu
, mask
)
701 map
|= gic_cpu_map
[cpu
];
704 * Ensure that stores to Normal memory are visible to the
705 * other CPUs before they observe us issuing the IPI.
709 /* this always happens on GIC0 */
710 writel_relaxed(map
<< 16 | irq
, gic_data_dist_base(&gic_data
[0]) + GIC_DIST_SOFTINT
);
712 raw_spin_unlock_irqrestore(&irq_controller_lock
, flags
);
716 #ifdef CONFIG_BL_SWITCHER
718 * gic_send_sgi - send a SGI directly to given CPU interface number
720 * cpu_id: the ID for the destination CPU interface
721 * irq: the IPI number to send a SGI for
723 void gic_send_sgi(unsigned int cpu_id
, unsigned int irq
)
725 BUG_ON(cpu_id
>= NR_GIC_CPU_IF
);
726 cpu_id
= 1 << cpu_id
;
727 /* this always happens on GIC0 */
728 writel_relaxed((cpu_id
<< 16) | irq
, gic_data_dist_base(&gic_data
[0]) + GIC_DIST_SOFTINT
);
732 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
734 * @cpu: the logical CPU number to get the GIC ID for.
736 * Return the CPU interface ID for the given logical CPU number,
737 * or -1 if the CPU number is too large or the interface ID is
738 * unknown (more than one bit set).
740 int gic_get_cpu_id(unsigned int cpu
)
742 unsigned int cpu_bit
;
744 if (cpu
>= NR_GIC_CPU_IF
)
746 cpu_bit
= gic_cpu_map
[cpu
];
747 if (cpu_bit
& (cpu_bit
- 1))
749 return __ffs(cpu_bit
);
753 * gic_migrate_target - migrate IRQs to another CPU interface
755 * @new_cpu_id: the CPU target ID to migrate IRQs to
757 * Migrate all peripheral interrupts with a target matching the current CPU
758 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
759 * is also updated. Targets to other CPU interfaces are unchanged.
760 * This must be called with IRQs locally disabled.
762 void gic_migrate_target(unsigned int new_cpu_id
)
764 unsigned int cur_cpu_id
, gic_irqs
, gic_nr
= 0;
765 void __iomem
*dist_base
;
766 int i
, ror_val
, cpu
= smp_processor_id();
767 u32 val
, cur_target_mask
, active_mask
;
769 if (gic_nr
>= MAX_GIC_NR
)
772 dist_base
= gic_data_dist_base(&gic_data
[gic_nr
]);
775 gic_irqs
= gic_data
[gic_nr
].gic_irqs
;
777 cur_cpu_id
= __ffs(gic_cpu_map
[cpu
]);
778 cur_target_mask
= 0x01010101 << cur_cpu_id
;
779 ror_val
= (cur_cpu_id
- new_cpu_id
) & 31;
781 raw_spin_lock(&irq_controller_lock
);
783 /* Update the target interface for this logical CPU */
784 gic_cpu_map
[cpu
] = 1 << new_cpu_id
;
787 * Find all the peripheral interrupts targetting the current
788 * CPU interface and migrate them to the new CPU interface.
789 * We skip DIST_TARGET 0 to 7 as they are read-only.
791 for (i
= 8; i
< DIV_ROUND_UP(gic_irqs
, 4); i
++) {
792 val
= readl_relaxed(dist_base
+ GIC_DIST_TARGET
+ i
* 4);
793 active_mask
= val
& cur_target_mask
;
796 val
|= ror32(active_mask
, ror_val
);
797 writel_relaxed(val
, dist_base
+ GIC_DIST_TARGET
+ i
*4);
801 raw_spin_unlock(&irq_controller_lock
);
804 * Now let's migrate and clear any potential SGIs that might be
805 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
806 * is a banked register, we can only forward the SGI using
807 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
808 * doesn't use that information anyway.
810 * For the same reason we do not adjust SGI source information
811 * for previously sent SGIs by us to other CPUs either.
813 for (i
= 0; i
< 16; i
+= 4) {
815 val
= readl_relaxed(dist_base
+ GIC_DIST_SGI_PENDING_SET
+ i
);
818 writel_relaxed(val
, dist_base
+ GIC_DIST_SGI_PENDING_CLEAR
+ i
);
819 for (j
= i
; j
< i
+ 4; j
++) {
821 writel_relaxed((1 << (new_cpu_id
+ 16)) | j
,
822 dist_base
+ GIC_DIST_SOFTINT
);
829 * gic_get_sgir_physaddr - get the physical address for the SGI register
831 * REturn the physical address of the SGI register to be used
832 * by some early assembly code when the kernel is not yet available.
834 static unsigned long gic_dist_physaddr
;
836 unsigned long gic_get_sgir_physaddr(void)
838 if (!gic_dist_physaddr
)
840 return gic_dist_physaddr
+ GIC_DIST_SOFTINT
;
843 void __init
gic_init_physaddr(struct device_node
*node
)
846 if (of_address_to_resource(node
, 0, &res
) == 0) {
847 gic_dist_physaddr
= res
.start
;
848 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr
);
853 #define gic_init_physaddr(node) do { } while (0)
856 static int gic_irq_domain_map(struct irq_domain
*d
, unsigned int irq
,
860 irq_set_percpu_devid(irq
);
861 irq_domain_set_info(d
, irq
, hw
, &gic_chip
, d
->host_data
,
862 handle_percpu_devid_irq
, NULL
, NULL
);
863 set_irq_flags(irq
, IRQF_VALID
| IRQF_NOAUTOEN
);
865 irq_domain_set_info(d
, irq
, hw
, &gic_chip
, d
->host_data
,
866 handle_fasteoi_irq
, NULL
, NULL
);
867 set_irq_flags(irq
, IRQF_VALID
| IRQF_PROBE
);
872 static void gic_irq_domain_unmap(struct irq_domain
*d
, unsigned int irq
)
876 static int gic_irq_domain_xlate(struct irq_domain
*d
,
877 struct device_node
*controller
,
878 const u32
*intspec
, unsigned int intsize
,
879 unsigned long *out_hwirq
, unsigned int *out_type
)
881 unsigned long ret
= 0;
883 if (d
->of_node
!= controller
)
888 /* Get the interrupt number and add 16 to skip over SGIs */
889 *out_hwirq
= intspec
[1] + 16;
891 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
895 *out_type
= intspec
[2] & IRQ_TYPE_SENSE_MASK
;
901 static int gic_secondary_init(struct notifier_block
*nfb
, unsigned long action
,
904 if (action
== CPU_STARTING
|| action
== CPU_STARTING_FROZEN
)
905 gic_cpu_init(&gic_data
[0]);
910 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
911 * priority because the GIC needs to be up before the ARM generic timers.
913 static struct notifier_block gic_cpu_notifier
= {
914 .notifier_call
= gic_secondary_init
,
919 static int gic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
920 unsigned int nr_irqs
, void *arg
)
923 irq_hw_number_t hwirq
;
924 unsigned int type
= IRQ_TYPE_NONE
;
925 struct of_phandle_args
*irq_data
= arg
;
927 ret
= gic_irq_domain_xlate(domain
, irq_data
->np
, irq_data
->args
,
928 irq_data
->args_count
, &hwirq
, &type
);
932 for (i
= 0; i
< nr_irqs
; i
++)
933 gic_irq_domain_map(domain
, virq
+ i
, hwirq
+ i
);
938 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops
= {
939 .xlate
= gic_irq_domain_xlate
,
940 .alloc
= gic_irq_domain_alloc
,
941 .free
= irq_domain_free_irqs_top
,
944 static const struct irq_domain_ops gic_irq_domain_ops
= {
945 .map
= gic_irq_domain_map
,
946 .unmap
= gic_irq_domain_unmap
,
947 .xlate
= gic_irq_domain_xlate
,
950 void gic_set_irqchip_flags(unsigned long flags
)
952 gic_chip
.flags
|= flags
;
955 void __init
gic_init_bases(unsigned int gic_nr
, int irq_start
,
956 void __iomem
*dist_base
, void __iomem
*cpu_base
,
957 u32 percpu_offset
, struct device_node
*node
)
959 irq_hw_number_t hwirq_base
;
960 struct gic_chip_data
*gic
;
961 int gic_irqs
, irq_base
, i
;
963 BUG_ON(gic_nr
>= MAX_GIC_NR
);
965 gic
= &gic_data
[gic_nr
];
966 #ifdef CONFIG_GIC_NON_BANKED
967 if (percpu_offset
) { /* Frankein-GIC without banked registers... */
970 gic
->dist_base
.percpu_base
= alloc_percpu(void __iomem
*);
971 gic
->cpu_base
.percpu_base
= alloc_percpu(void __iomem
*);
972 if (WARN_ON(!gic
->dist_base
.percpu_base
||
973 !gic
->cpu_base
.percpu_base
)) {
974 free_percpu(gic
->dist_base
.percpu_base
);
975 free_percpu(gic
->cpu_base
.percpu_base
);
979 for_each_possible_cpu(cpu
) {
980 u32 mpidr
= cpu_logical_map(cpu
);
981 u32 core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
982 unsigned long offset
= percpu_offset
* core_id
;
983 *per_cpu_ptr(gic
->dist_base
.percpu_base
, cpu
) = dist_base
+ offset
;
984 *per_cpu_ptr(gic
->cpu_base
.percpu_base
, cpu
) = cpu_base
+ offset
;
987 gic_set_base_accessor(gic
, gic_get_percpu_base
);
990 { /* Normal, sane GIC... */
992 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
994 gic
->dist_base
.common_base
= dist_base
;
995 gic
->cpu_base
.common_base
= cpu_base
;
996 gic_set_base_accessor(gic
, gic_get_common_base
);
1000 * Initialize the CPU interface map to all CPUs.
1001 * It will be refined as each CPU probes its ID.
1003 for (i
= 0; i
< NR_GIC_CPU_IF
; i
++)
1004 gic_cpu_map
[i
] = 0xff;
1007 * Find out how many interrupts are supported.
1008 * The GIC only supports up to 1020 interrupt sources.
1010 gic_irqs
= readl_relaxed(gic_data_dist_base(gic
) + GIC_DIST_CTR
) & 0x1f;
1011 gic_irqs
= (gic_irqs
+ 1) * 32;
1012 if (gic_irqs
> 1020)
1014 gic
->gic_irqs
= gic_irqs
;
1016 if (node
) { /* DT case */
1017 gic
->domain
= irq_domain_add_linear(node
, gic_irqs
,
1018 &gic_irq_domain_hierarchy_ops
,
1020 } else { /* Non-DT case */
1022 * For primary GICs, skip over SGIs.
1023 * For secondary GICs, skip over PPIs, too.
1025 if (gic_nr
== 0 && (irq_start
& 31) > 0) {
1027 if (irq_start
!= -1)
1028 irq_start
= (irq_start
& ~31) + 16;
1033 gic_irqs
-= hwirq_base
; /* calculate # of irqs to allocate */
1035 irq_base
= irq_alloc_descs(irq_start
, 16, gic_irqs
,
1037 if (IS_ERR_VALUE(irq_base
)) {
1038 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
1040 irq_base
= irq_start
;
1043 gic
->domain
= irq_domain_add_legacy(node
, gic_irqs
, irq_base
,
1044 hwirq_base
, &gic_irq_domain_ops
, gic
);
1047 if (WARN_ON(!gic
->domain
))
1052 set_smp_cross_call(gic_raise_softirq
);
1053 register_cpu_notifier(&gic_cpu_notifier
);
1055 set_handle_irq(gic_handle_irq
);
1058 gic_chip
.flags
|= gic_arch_extn
.flags
;
1065 static int gic_cnt __initdata
;
1068 gic_of_init(struct device_node
*node
, struct device_node
*parent
)
1070 void __iomem
*cpu_base
;
1071 void __iomem
*dist_base
;
1078 dist_base
= of_iomap(node
, 0);
1079 WARN(!dist_base
, "unable to map gic dist registers\n");
1081 cpu_base
= of_iomap(node
, 1);
1082 WARN(!cpu_base
, "unable to map gic cpu registers\n");
1084 if (of_property_read_u32(node
, "cpu-offset", &percpu_offset
))
1087 gic_init_bases(gic_cnt
, -1, dist_base
, cpu_base
, percpu_offset
, node
);
1089 gic_init_physaddr(node
);
1092 irq
= irq_of_parse_and_map(node
, 0);
1093 gic_cascade_irq(gic_cnt
, irq
);
1096 if (IS_ENABLED(CONFIG_ARM_GIC_V2M
))
1097 gicv2m_of_init(node
, gic_data
[gic_cnt
].domain
);
1102 IRQCHIP_DECLARE(gic_400
, "arm,gic-400", gic_of_init
);
1103 IRQCHIP_DECLARE(arm11mp_gic
, "arm,arm11mp-gic", gic_of_init
);
1104 IRQCHIP_DECLARE(arm1176jzf_dc_gic
, "arm,arm1176jzf-devchip-gic", gic_of_init
);
1105 IRQCHIP_DECLARE(cortex_a15_gic
, "arm,cortex-a15-gic", gic_of_init
);
1106 IRQCHIP_DECLARE(cortex_a9_gic
, "arm,cortex-a9-gic", gic_of_init
);
1107 IRQCHIP_DECLARE(cortex_a7_gic
, "arm,cortex-a7-gic", gic_of_init
);
1108 IRQCHIP_DECLARE(msm_8660_qgic
, "qcom,msm-8660-qgic", gic_of_init
);
1109 IRQCHIP_DECLARE(msm_qgic2
, "qcom,msm-qgic2", gic_of_init
);
1114 static phys_addr_t dist_phy_base
, cpu_phy_base __initdata
;
1117 gic_acpi_parse_madt_cpu(struct acpi_subtable_header
*header
,
1118 const unsigned long end
)
1120 struct acpi_madt_generic_interrupt
*processor
;
1121 phys_addr_t gic_cpu_base
;
1122 static int cpu_base_assigned
;
1124 processor
= (struct acpi_madt_generic_interrupt
*)header
;
1126 if (BAD_MADT_ENTRY(processor
, end
))
1130 * There is no support for non-banked GICv1/2 register in ACPI spec.
1131 * All CPU interface addresses have to be the same.
1133 gic_cpu_base
= processor
->base_address
;
1134 if (cpu_base_assigned
&& gic_cpu_base
!= cpu_phy_base
)
1137 cpu_phy_base
= gic_cpu_base
;
1138 cpu_base_assigned
= 1;
1143 gic_acpi_parse_madt_distributor(struct acpi_subtable_header
*header
,
1144 const unsigned long end
)
1146 struct acpi_madt_generic_distributor
*dist
;
1148 dist
= (struct acpi_madt_generic_distributor
*)header
;
1150 if (BAD_MADT_ENTRY(dist
, end
))
1153 dist_phy_base
= dist
->base_address
;
1158 gic_v2_acpi_init(struct acpi_table_header
*table
)
1160 void __iomem
*cpu_base
, *dist_base
;
1163 /* Collect CPU base addresses */
1164 count
= acpi_parse_entries(ACPI_SIG_MADT
,
1165 sizeof(struct acpi_table_madt
),
1166 gic_acpi_parse_madt_cpu
, table
,
1167 ACPI_MADT_TYPE_GENERIC_INTERRUPT
, 0);
1169 pr_err("No valid GICC entries exist\n");
1174 * Find distributor base address. We expect one distributor entry since
1175 * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
1177 count
= acpi_parse_entries(ACPI_SIG_MADT
,
1178 sizeof(struct acpi_table_madt
),
1179 gic_acpi_parse_madt_distributor
, table
,
1180 ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR
, 0);
1182 pr_err("No valid GICD entries exist\n");
1184 } else if (count
> 1) {
1185 pr_err("More than one GICD entry detected\n");
1189 cpu_base
= ioremap(cpu_phy_base
, ACPI_GIC_CPU_IF_MEM_SIZE
);
1191 pr_err("Unable to map GICC registers\n");
1195 dist_base
= ioremap(dist_phy_base
, ACPI_GICV2_DIST_MEM_SIZE
);
1197 pr_err("Unable to map GICD registers\n");
1203 * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
1204 * as default IRQ domain to allow for GSI registration and GSI to IRQ
1205 * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
1207 gic_init_bases(0, -1, dist_base
, cpu_base
, 0, NULL
);
1208 irq_set_default_host(gic_data
[0].domain
);
1210 acpi_irq_model
= ACPI_IRQ_MODEL_GIC
;