2 * Copyright (C) 2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/cpu.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/interrupt.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
27 #include <linux/irqchip/arm-gic-v3.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
33 /* These are for GICv2 emulation only */
34 #define GICH_LR_VIRTUALID (0x3ffUL << 0)
35 #define GICH_LR_PHYSID_CPUID_SHIFT (10)
36 #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
37 #define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
40 * LRs are stored in reverse order in memory. make sure we index them
43 #define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
45 static u32 ich_vtr_el2
;
47 static struct vgic_lr
vgic_v3_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
49 struct vgic_lr lr_desc
;
50 u64 val
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[LR_INDEX(lr
)];
52 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
53 lr_desc
.irq
= val
& ICH_LR_VIRTUALID_MASK
;
55 lr_desc
.irq
= val
& GICH_LR_VIRTUALID
;
58 if (lr_desc
.irq
<= 15 &&
59 vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V2
)
60 lr_desc
.source
= (val
>> GICH_LR_PHYSID_CPUID_SHIFT
) & 0x7;
64 if (val
& ICH_LR_PENDING_BIT
)
65 lr_desc
.state
|= LR_STATE_PENDING
;
66 if (val
& ICH_LR_ACTIVE_BIT
)
67 lr_desc
.state
|= LR_STATE_ACTIVE
;
69 lr_desc
.state
|= LR_EOI_INT
;
74 static void vgic_v3_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
75 struct vgic_lr lr_desc
)
82 * Currently all guest IRQs are Group1, as Group0 would result
83 * in a FIQ in the guest, which it wouldn't expect.
84 * Eventually we want to make this configurable, so we may revisit
87 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
88 lr_val
|= ICH_LR_GROUP
;
90 lr_val
|= (u32
)lr_desc
.source
<< GICH_LR_PHYSID_CPUID_SHIFT
;
92 if (lr_desc
.state
& LR_STATE_PENDING
)
93 lr_val
|= ICH_LR_PENDING_BIT
;
94 if (lr_desc
.state
& LR_STATE_ACTIVE
)
95 lr_val
|= ICH_LR_ACTIVE_BIT
;
96 if (lr_desc
.state
& LR_EOI_INT
)
99 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[LR_INDEX(lr
)] = lr_val
;
102 static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
103 struct vgic_lr lr_desc
)
105 if (!(lr_desc
.state
& LR_STATE_MASK
))
106 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_elrsr
|= (1U << lr
);
108 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_elrsr
&= ~(1U << lr
);
111 static u64
vgic_v3_get_elrsr(const struct kvm_vcpu
*vcpu
)
113 return vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_elrsr
;
116 static u64
vgic_v3_get_eisr(const struct kvm_vcpu
*vcpu
)
118 return vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_eisr
;
121 static void vgic_v3_clear_eisr(struct kvm_vcpu
*vcpu
)
123 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_eisr
= 0;
126 static u32
vgic_v3_get_interrupt_status(const struct kvm_vcpu
*vcpu
)
128 u32 misr
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_misr
;
131 if (misr
& ICH_MISR_EOI
)
132 ret
|= INT_STATUS_EOI
;
133 if (misr
& ICH_MISR_U
)
134 ret
|= INT_STATUS_UNDERFLOW
;
139 static void vgic_v3_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
141 u32 vmcr
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
;
143 vmcrp
->ctlr
= (vmcr
& ICH_VMCR_CTLR_MASK
) >> ICH_VMCR_CTLR_SHIFT
;
144 vmcrp
->abpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
145 vmcrp
->bpr
= (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
146 vmcrp
->pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
149 static void vgic_v3_enable_underflow(struct kvm_vcpu
*vcpu
)
151 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_hcr
|= ICH_HCR_UIE
;
154 static void vgic_v3_disable_underflow(struct kvm_vcpu
*vcpu
)
156 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_hcr
&= ~ICH_HCR_UIE
;
159 static void vgic_v3_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
163 vmcr
= (vmcrp
->ctlr
<< ICH_VMCR_CTLR_SHIFT
) & ICH_VMCR_CTLR_MASK
;
164 vmcr
|= (vmcrp
->abpr
<< ICH_VMCR_BPR1_SHIFT
) & ICH_VMCR_BPR1_MASK
;
165 vmcr
|= (vmcrp
->bpr
<< ICH_VMCR_BPR0_SHIFT
) & ICH_VMCR_BPR0_MASK
;
166 vmcr
|= (vmcrp
->pmr
<< ICH_VMCR_PMR_SHIFT
) & ICH_VMCR_PMR_MASK
;
168 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
= vmcr
;
171 static void vgic_v3_enable(struct kvm_vcpu
*vcpu
)
173 struct vgic_v3_cpu_if
*vgic_v3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
176 * By forcing VMCR to zero, the GIC will restore the binary
177 * points to their reset values. Anything else resets to zero
180 vgic_v3
->vgic_vmcr
= 0;
183 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
184 * way, so we force SRE to 1 to demonstrate this to the guest.
185 * This goes with the spec allowing the value to be RAO/WI.
187 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
188 vgic_v3
->vgic_sre
= ICC_SRE_EL1_SRE
;
190 vgic_v3
->vgic_sre
= 0;
192 /* Get the show on the road... */
193 vgic_v3
->vgic_hcr
= ICH_HCR_EN
;
196 static const struct vgic_ops vgic_v3_ops
= {
197 .get_lr
= vgic_v3_get_lr
,
198 .set_lr
= vgic_v3_set_lr
,
199 .sync_lr_elrsr
= vgic_v3_sync_lr_elrsr
,
200 .get_elrsr
= vgic_v3_get_elrsr
,
201 .get_eisr
= vgic_v3_get_eisr
,
202 .clear_eisr
= vgic_v3_clear_eisr
,
203 .get_interrupt_status
= vgic_v3_get_interrupt_status
,
204 .enable_underflow
= vgic_v3_enable_underflow
,
205 .disable_underflow
= vgic_v3_disable_underflow
,
206 .get_vmcr
= vgic_v3_get_vmcr
,
207 .set_vmcr
= vgic_v3_set_vmcr
,
208 .enable
= vgic_v3_enable
,
211 static struct vgic_params vgic_v3_params
;
214 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
215 * @node: pointer to the DT node
216 * @ops: address of a pointer to the GICv3 operations
217 * @params: address of a pointer to HW-specific parameters
219 * Returns 0 if a GICv3 has been found, with the low level operations
220 * in *ops and the HW parameters in *params. Returns an error code
223 int vgic_v3_probe(struct device_node
*vgic_node
,
224 const struct vgic_ops
**ops
,
225 const struct vgic_params
**params
)
229 struct resource vcpu_res
;
230 struct vgic_params
*vgic
= &vgic_v3_params
;
232 vgic
->maint_irq
= irq_of_parse_and_map(vgic_node
, 0);
233 if (!vgic
->maint_irq
) {
234 kvm_err("error getting vgic maintenance irq from DT\n");
239 ich_vtr_el2
= kvm_call_hyp(__vgic_v3_get_ich_vtr_el2
);
242 * The ListRegs field is 5 bits, but there is a architectural
243 * maximum of 16 list registers. Just ignore bit 4...
245 vgic
->nr_lr
= (ich_vtr_el2
& 0xf) + 1;
246 vgic
->can_emulate_gicv2
= false;
248 if (of_property_read_u32(vgic_node
, "#redistributor-regions", &gicv_idx
))
251 gicv_idx
+= 3; /* Also skip GICD, GICC, GICH */
252 if (of_address_to_resource(vgic_node
, gicv_idx
, &vcpu_res
)) {
253 kvm_info("GICv3: no GICV resource entry\n");
255 } else if (!PAGE_ALIGNED(vcpu_res
.start
)) {
256 pr_warn("GICV physical address 0x%llx not page aligned\n",
257 (unsigned long long)vcpu_res
.start
);
259 } else if (!PAGE_ALIGNED(resource_size(&vcpu_res
))) {
260 pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
261 (unsigned long long)resource_size(&vcpu_res
),
265 vgic
->vcpu_base
= vcpu_res
.start
;
266 vgic
->can_emulate_gicv2
= true;
267 kvm_register_device_ops(&kvm_arm_vgic_v2_ops
,
268 KVM_DEV_TYPE_ARM_VGIC_V2
);
270 if (vgic
->vcpu_base
== 0)
271 kvm_info("disabling GICv2 emulation\n");
272 kvm_register_device_ops(&kvm_arm_vgic_v3_ops
, KVM_DEV_TYPE_ARM_VGIC_V3
);
274 vgic
->vctrl_base
= NULL
;
275 vgic
->type
= VGIC_V3
;
276 vgic
->max_gic_vcpus
= KVM_MAX_VCPUS
;
278 kvm_info("%s@%llx IRQ%d\n", vgic_node
->name
,
279 vcpu_res
.start
, vgic
->maint_irq
);
285 of_node_put(vgic_node
);