Commit | Line | Data |
---|---|---|
c4b1afd0 MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * Derived from arch/arm/kvm/handle_exit.c: | |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm.h> | |
23 | #include <linux/kvm_host.h> | |
c6d01a94 MR |
24 | |
25 | #include <asm/esr.h> | |
9d8415d6 | 26 | #include <asm/kvm_asm.h> |
c4b1afd0 | 27 | #include <asm/kvm_coproc.h> |
c6d01a94 | 28 | #include <asm/kvm_emulate.h> |
c4b1afd0 | 29 | #include <asm/kvm_mmu.h> |
dcd2e40c | 30 | #include <asm/kvm_psci.h> |
c4b1afd0 | 31 | |
0d97f884 WH |
32 | #define CREATE_TRACE_POINTS |
33 | #include "trace.h" | |
34 | ||
c4b1afd0 MZ |
35 | typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); |
36 | ||
37 | static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
38 | { | |
e8e7fcc5 AP |
39 | int ret; |
40 | ||
f6be563a | 41 | trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), |
0d97f884 | 42 | kvm_vcpu_hvc_get_imm(vcpu)); |
b19e6892 | 43 | vcpu->stat.hvc_exit_stat++; |
0d97f884 | 44 | |
e8e7fcc5 AP |
45 | ret = kvm_psci_call(vcpu); |
46 | if (ret < 0) { | |
47 | kvm_inject_undefined(vcpu); | |
dcd2e40c | 48 | return 1; |
e8e7fcc5 | 49 | } |
dcd2e40c | 50 | |
e8e7fcc5 | 51 | return ret; |
c4b1afd0 MZ |
52 | } |
53 | ||
54 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
55 | { | |
c4b1afd0 MZ |
56 | kvm_inject_undefined(vcpu); |
57 | return 1; | |
58 | } | |
59 | ||
60 | /** | |
d241aac7 MZ |
61 | * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event |
62 | * instruction executed by a guest | |
63 | * | |
c4b1afd0 MZ |
64 | * @vcpu: the vcpu pointer |
65 | * | |
d241aac7 MZ |
66 | * WFE: Yield the CPU and come back to this vcpu when the scheduler |
67 | * decides to. | |
68 | * WFI: Simply call kvm_vcpu_block(), which will halt execution of | |
c4b1afd0 MZ |
69 | * world-switches and schedule other host processes until there is an |
70 | * incoming IRQ or FIQ to the VM. | |
71 | */ | |
d241aac7 | 72 | static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) |
c4b1afd0 | 73 | { |
1c6007d5 | 74 | if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { |
0d97f884 | 75 | trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); |
b19e6892 | 76 | vcpu->stat.wfe_exit_stat++; |
d241aac7 | 77 | kvm_vcpu_on_spin(vcpu); |
0d97f884 WH |
78 | } else { |
79 | trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); | |
b19e6892 | 80 | vcpu->stat.wfi_exit_stat++; |
d241aac7 | 81 | kvm_vcpu_block(vcpu); |
0d97f884 | 82 | } |
d241aac7 | 83 | |
05e0127f CD |
84 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
85 | ||
c4b1afd0 MZ |
86 | return 1; |
87 | } | |
88 | ||
4bd611ca AB |
89 | /** |
90 | * kvm_handle_guest_debug - handle a debug exception instruction | |
91 | * | |
92 | * @vcpu: the vcpu pointer | |
93 | * @run: access to the kvm_run structure for results | |
94 | * | |
95 | * We route all debug exceptions through the same handler. If both the | |
96 | * guest and host are using the same debug facilities it will be up to | |
97 | * userspace to re-inject the correct exception for guest delivery. | |
98 | * | |
99 | * @return: 0 (while setting run->exit_reason), -1 for error | |
100 | */ | |
101 | static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
102 | { | |
103 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | |
104 | int ret = 0; | |
105 | ||
106 | run->exit_reason = KVM_EXIT_DEBUG; | |
107 | run->debug.arch.hsr = hsr; | |
108 | ||
561454e2 | 109 | switch (ESR_ELx_EC(hsr)) { |
834bf887 AB |
110 | case ESR_ELx_EC_WATCHPT_LOW: |
111 | run->debug.arch.far = vcpu->arch.fault.far_el2; | |
112 | /* fall through */ | |
337b99bf | 113 | case ESR_ELx_EC_SOFTSTP_LOW: |
834bf887 | 114 | case ESR_ELx_EC_BREAKPT_LOW: |
4bd611ca AB |
115 | case ESR_ELx_EC_BKPT32: |
116 | case ESR_ELx_EC_BRK64: | |
117 | break; | |
118 | default: | |
119 | kvm_err("%s: un-handled case hsr: %#08x\n", | |
120 | __func__, (unsigned int) hsr); | |
121 | ret = -1; | |
122 | break; | |
123 | } | |
124 | ||
125 | return ret; | |
126 | } | |
127 | ||
c4b1afd0 | 128 | static exit_handle_fn arm_exit_handlers[] = { |
c6d01a94 MR |
129 | [ESR_ELx_EC_WFx] = kvm_handle_wfx, |
130 | [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, | |
131 | [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, | |
132 | [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32, | |
133 | [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, | |
134 | [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, | |
135 | [ESR_ELx_EC_HVC32] = handle_hvc, | |
136 | [ESR_ELx_EC_SMC32] = handle_smc, | |
137 | [ESR_ELx_EC_HVC64] = handle_hvc, | |
138 | [ESR_ELx_EC_SMC64] = handle_smc, | |
139 | [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg, | |
140 | [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort, | |
141 | [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort, | |
337b99bf | 142 | [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug, |
834bf887 AB |
143 | [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug, |
144 | [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, | |
4bd611ca AB |
145 | [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, |
146 | [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, | |
c4b1afd0 MZ |
147 | }; |
148 | ||
149 | static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) | |
150 | { | |
056bb5f5 | 151 | u32 hsr = kvm_vcpu_get_hsr(vcpu); |
561454e2 | 152 | u8 hsr_ec = ESR_ELx_EC(hsr); |
c4b1afd0 MZ |
153 | |
154 | if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || | |
155 | !arm_exit_handlers[hsr_ec]) { | |
056bb5f5 MR |
156 | kvm_err("Unknown exception class: hsr: %#08x -- %s\n", |
157 | hsr, esr_get_class_string(hsr)); | |
c4b1afd0 MZ |
158 | BUG(); |
159 | } | |
160 | ||
161 | return arm_exit_handlers[hsr_ec]; | |
162 | } | |
163 | ||
164 | /* | |
165 | * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on | |
166 | * proper exit to userspace. | |
167 | */ | |
168 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
169 | int exception_index) | |
170 | { | |
171 | exit_handle_fn exit_handler; | |
172 | ||
173 | switch (exception_index) { | |
174 | case ARM_EXCEPTION_IRQ: | |
175 | return 1; | |
176 | case ARM_EXCEPTION_TRAP: | |
177 | /* | |
178 | * See ARM ARM B1.14.1: "Hyp traps on instructions | |
179 | * that fail their condition code check" | |
180 | */ | |
181 | if (!kvm_condition_valid(vcpu)) { | |
182 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
183 | return 1; | |
184 | } | |
185 | ||
186 | exit_handler = kvm_get_exit_handler(vcpu); | |
187 | ||
188 | return exit_handler(vcpu, run); | |
c94b0cf2 JM |
189 | case ARM_EXCEPTION_HYP_GONE: |
190 | /* | |
191 | * EL2 has been reset to the hyp-stub. This happens when a guest | |
192 | * is pre-empted by kvm_reboot()'s shutdown call. | |
193 | */ | |
194 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; | |
195 | return 0; | |
c4b1afd0 MZ |
196 | default: |
197 | kvm_pr_unimpl("Unsupported exception type: %d", | |
198 | exception_index); | |
199 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
200 | return 0; | |
201 | } | |
202 | } |