2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic.h>
20 #include <linux/kvm_host.h>
22 #include <asm/kvm_hyp.h>
24 static void __hyp_text
save_maint_int_state(struct kvm_vcpu
*vcpu
,
27 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
28 int nr_lr
= (kern_hyp_va(&kvm_vgic_global_state
))->nr_lr
;
33 expect_mi
= !!(cpu_if
->vgic_hcr
& GICH_HCR_UIE
);
35 for (i
= 0; i
< nr_lr
; i
++) {
36 if (!(vcpu
->arch
.vgic_cpu
.live_lrs
& (1UL << i
)))
39 expect_mi
|= (!(cpu_if
->vgic_lr
[i
] & GICH_LR_HW
) &&
40 (cpu_if
->vgic_lr
[i
] & GICH_LR_EOI
));
44 cpu_if
->vgic_misr
= readl_relaxed(base
+ GICH_MISR
);
46 if (cpu_if
->vgic_misr
& GICH_MISR_EOI
) {
47 eisr0
= readl_relaxed(base
+ GICH_EISR0
);
48 if (unlikely(nr_lr
> 32))
49 eisr1
= readl_relaxed(base
+ GICH_EISR1
);
56 cpu_if
->vgic_misr
= 0;
60 #ifdef CONFIG_CPU_BIG_ENDIAN
61 cpu_if
->vgic_eisr
= ((u64
)eisr0
<< 32) | eisr1
;
63 cpu_if
->vgic_eisr
= ((u64
)eisr1
<< 32) | eisr0
;
67 static void __hyp_text
save_elrsr(struct kvm_vcpu
*vcpu
, void __iomem
*base
)
69 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
70 int nr_lr
= (kern_hyp_va(&kvm_vgic_global_state
))->nr_lr
;
73 elrsr0
= readl_relaxed(base
+ GICH_ELRSR0
);
74 if (unlikely(nr_lr
> 32))
75 elrsr1
= readl_relaxed(base
+ GICH_ELRSR1
);
79 #ifdef CONFIG_CPU_BIG_ENDIAN
80 cpu_if
->vgic_elrsr
= ((u64
)elrsr0
<< 32) | elrsr1
;
82 cpu_if
->vgic_elrsr
= ((u64
)elrsr1
<< 32) | elrsr0
;
86 static void __hyp_text
save_lrs(struct kvm_vcpu
*vcpu
, void __iomem
*base
)
88 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
89 int nr_lr
= (kern_hyp_va(&kvm_vgic_global_state
))->nr_lr
;
92 for (i
= 0; i
< nr_lr
; i
++) {
93 if (!(vcpu
->arch
.vgic_cpu
.live_lrs
& (1UL << i
)))
96 if (cpu_if
->vgic_elrsr
& (1UL << i
))
97 cpu_if
->vgic_lr
[i
] &= ~GICH_LR_STATE
;
99 cpu_if
->vgic_lr
[i
] = readl_relaxed(base
+ GICH_LR0
+ (i
* 4));
101 writel_relaxed(0, base
+ GICH_LR0
+ (i
* 4));
105 /* vcpu is already in the HYP VA space */
106 void __hyp_text
__vgic_v2_save_state(struct kvm_vcpu
*vcpu
)
108 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
109 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
110 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
111 void __iomem
*base
= kern_hyp_va(vgic
->vctrl_base
);
116 cpu_if
->vgic_vmcr
= readl_relaxed(base
+ GICH_VMCR
);
118 if (vcpu
->arch
.vgic_cpu
.live_lrs
) {
119 cpu_if
->vgic_apr
= readl_relaxed(base
+ GICH_APR
);
121 save_maint_int_state(vcpu
, base
);
122 save_elrsr(vcpu
, base
);
123 save_lrs(vcpu
, base
);
125 writel_relaxed(0, base
+ GICH_HCR
);
127 vcpu
->arch
.vgic_cpu
.live_lrs
= 0;
129 cpu_if
->vgic_eisr
= 0;
130 cpu_if
->vgic_elrsr
= ~0UL;
131 cpu_if
->vgic_misr
= 0;
132 cpu_if
->vgic_apr
= 0;
136 /* vcpu is already in the HYP VA space */
137 void __hyp_text
__vgic_v2_restore_state(struct kvm_vcpu
*vcpu
)
139 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
140 struct vgic_v2_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v2
;
141 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
142 void __iomem
*base
= kern_hyp_va(vgic
->vctrl_base
);
143 int nr_lr
= (kern_hyp_va(&kvm_vgic_global_state
))->nr_lr
;
151 for (i
= 0; i
< nr_lr
; i
++)
152 if (cpu_if
->vgic_lr
[i
] & GICH_LR_STATE
)
153 live_lrs
|= 1UL << i
;
156 writel_relaxed(cpu_if
->vgic_hcr
, base
+ GICH_HCR
);
157 writel_relaxed(cpu_if
->vgic_apr
, base
+ GICH_APR
);
158 for (i
= 0; i
< nr_lr
; i
++) {
159 if (!(live_lrs
& (1UL << i
)))
162 writel_relaxed(cpu_if
->vgic_lr
[i
],
163 base
+ GICH_LR0
+ (i
* 4));
167 writel_relaxed(cpu_if
->vgic_vmcr
, base
+ GICH_VMCR
);
168 vcpu
->arch
.vgic_cpu
.live_lrs
= live_lrs
;
This page took 0.036266 seconds and 6 git commands to generate.