Commit | Line | Data |
---|---|---|
f5c236dd | 1 | /* |
d116e812 DCZ |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | */ | |
f5c236dd SL |
11 | |
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/vmalloc.h> | |
16 | ||
17 | #include <linux/kvm_host.h> | |
18 | ||
d7d5b05f DCZ |
19 | #include "opcode.h" |
20 | #include "interrupt.h" | |
f5c236dd SL |
21 | |
22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |
23 | { | |
24 | gpa_t gpa; | |
25 | uint32_t kseg = KSEGX(gva); | |
26 | ||
27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | |
28 | gpa = CPHYSADDR(gva); | |
29 | else { | |
6ad78a5c | 30 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
f5c236dd SL |
31 | kvm_mips_dump_host_tlbs(); |
32 | gpa = KVM_INVALID_ADDR; | |
33 | } | |
34 | ||
f5c236dd | 35 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); |
f5c236dd SL |
36 | |
37 | return gpa; | |
38 | } | |
39 | ||
f5c236dd SL |
40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
41 | { | |
42 | struct kvm_run *run = vcpu->run; | |
43 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
44 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
45 | enum emulation_result er = EMULATE_DONE; | |
46 | int ret = RESUME_GUEST; | |
47 | ||
d116e812 | 48 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) |
f5c236dd | 49 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); |
d116e812 | 50 | else |
f5c236dd SL |
51 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
52 | ||
53 | switch (er) { | |
54 | case EMULATE_DONE: | |
55 | ret = RESUME_GUEST; | |
56 | break; | |
57 | ||
58 | case EMULATE_FAIL: | |
59 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
60 | ret = RESUME_HOST; | |
61 | break; | |
62 | ||
63 | case EMULATE_WAIT: | |
64 | run->exit_reason = KVM_EXIT_INTR; | |
65 | ret = RESUME_HOST; | |
66 | break; | |
67 | ||
68 | default: | |
69 | BUG(); | |
70 | } | |
71 | return ret; | |
72 | } | |
73 | ||
74 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |
75 | { | |
76 | struct kvm_run *run = vcpu->run; | |
77 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
78 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
79 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
80 | enum emulation_result er = EMULATE_DONE; | |
81 | int ret = RESUME_GUEST; | |
82 | ||
83 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
84 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
d116e812 DCZ |
85 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
86 | cause, opc, badvaddr); | |
f5c236dd SL |
87 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); |
88 | ||
89 | if (er == EMULATE_DONE) | |
90 | ret = RESUME_GUEST; | |
91 | else { | |
92 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
93 | ret = RESUME_HOST; | |
94 | } | |
95 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
d116e812 DCZ |
96 | /* |
97 | * XXXKYMA: The guest kernel does not expect to get this fault | |
98 | * when we are not using HIGHMEM. Need to address this in a | |
99 | * HIGHMEM kernel | |
f5c236dd | 100 | */ |
6ad78a5c DCZ |
101 | kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", |
102 | cause, opc, badvaddr); | |
f5c236dd SL |
103 | kvm_mips_dump_host_tlbs(); |
104 | kvm_arch_vcpu_dump_regs(vcpu); | |
105 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
106 | ret = RESUME_HOST; | |
107 | } else { | |
6ad78a5c DCZ |
108 | kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
109 | cause, opc, badvaddr); | |
f5c236dd SL |
110 | kvm_mips_dump_host_tlbs(); |
111 | kvm_arch_vcpu_dump_regs(vcpu); | |
112 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
113 | ret = RESUME_HOST; | |
114 | } | |
115 | return ret; | |
116 | } | |
117 | ||
118 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |
119 | { | |
120 | struct kvm_run *run = vcpu->run; | |
121 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
122 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
123 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
124 | enum emulation_result er = EMULATE_DONE; | |
125 | int ret = RESUME_GUEST; | |
126 | ||
127 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | |
128 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | |
129 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | |
130 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
131 | ret = RESUME_HOST; | |
132 | } | |
133 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
134 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
d116e812 DCZ |
135 | kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
136 | cause, opc, badvaddr); | |
f5c236dd SL |
137 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
138 | if (er == EMULATE_DONE) | |
139 | ret = RESUME_GUEST; | |
140 | else { | |
141 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
142 | ret = RESUME_HOST; | |
143 | } | |
144 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
d116e812 DCZ |
145 | /* |
146 | * All KSEG0 faults are handled by KVM, as the guest kernel does | |
147 | * not expect to ever get them | |
f5c236dd SL |
148 | */ |
149 | if (kvm_mips_handle_kseg0_tlb_fault | |
150 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | |
151 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
152 | ret = RESUME_HOST; | |
153 | } | |
154 | } else { | |
d116e812 DCZ |
155 | kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
156 | cause, opc, badvaddr); | |
f5c236dd SL |
157 | kvm_mips_dump_host_tlbs(); |
158 | kvm_arch_vcpu_dump_regs(vcpu); | |
159 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
160 | ret = RESUME_HOST; | |
161 | } | |
162 | return ret; | |
163 | } | |
164 | ||
165 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |
166 | { | |
167 | struct kvm_run *run = vcpu->run; | |
168 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
169 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
170 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
171 | enum emulation_result er = EMULATE_DONE; | |
172 | int ret = RESUME_GUEST; | |
173 | ||
174 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | |
175 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | |
176 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | |
177 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
178 | ret = RESUME_HOST; | |
179 | } | |
180 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
181 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
f5c236dd SL |
182 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", |
183 | vcpu->arch.pc, badvaddr); | |
f5c236dd | 184 | |
d116e812 DCZ |
185 | /* |
186 | * User Address (UA) fault, this could happen if | |
187 | * (1) TLB entry not present/valid in both Guest and shadow host | |
188 | * TLBs, in this case we pass on the fault to the guest | |
189 | * kernel and let it handle it. | |
190 | * (2) TLB entry is present in the Guest TLB but not in the | |
191 | * shadow, in this case we inject the TLB from the Guest TLB | |
192 | * into the shadow host TLB | |
f5c236dd SL |
193 | */ |
194 | ||
195 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | |
196 | if (er == EMULATE_DONE) | |
197 | ret = RESUME_GUEST; | |
198 | else { | |
199 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
200 | ret = RESUME_HOST; | |
201 | } | |
202 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
203 | if (kvm_mips_handle_kseg0_tlb_fault | |
204 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | |
205 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
206 | ret = RESUME_HOST; | |
207 | } | |
208 | } else { | |
6ad78a5c DCZ |
209 | kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
210 | cause, opc, badvaddr); | |
f5c236dd SL |
211 | kvm_mips_dump_host_tlbs(); |
212 | kvm_arch_vcpu_dump_regs(vcpu); | |
213 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
214 | ret = RESUME_HOST; | |
215 | } | |
216 | return ret; | |
217 | } | |
218 | ||
219 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |
220 | { | |
221 | struct kvm_run *run = vcpu->run; | |
222 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
223 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
224 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
225 | enum emulation_result er = EMULATE_DONE; | |
226 | int ret = RESUME_GUEST; | |
227 | ||
228 | if (KVM_GUEST_KERNEL_MODE(vcpu) | |
229 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | |
f5c236dd | 230 | kvm_debug("Emulate Store to MMIO space\n"); |
f5c236dd SL |
231 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
232 | if (er == EMULATE_FAIL) { | |
6ad78a5c | 233 | kvm_err("Emulate Store to MMIO space failed\n"); |
f5c236dd SL |
234 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
235 | ret = RESUME_HOST; | |
236 | } else { | |
237 | run->exit_reason = KVM_EXIT_MMIO; | |
238 | ret = RESUME_HOST; | |
239 | } | |
240 | } else { | |
6ad78a5c DCZ |
241 | kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
242 | cause, opc, badvaddr); | |
f5c236dd SL |
243 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
244 | ret = RESUME_HOST; | |
245 | } | |
246 | return ret; | |
247 | } | |
248 | ||
249 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |
250 | { | |
251 | struct kvm_run *run = vcpu->run; | |
252 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
253 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
254 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
255 | enum emulation_result er = EMULATE_DONE; | |
256 | int ret = RESUME_GUEST; | |
257 | ||
258 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | |
f5c236dd | 259 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); |
f5c236dd SL |
260 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
261 | if (er == EMULATE_FAIL) { | |
6ad78a5c | 262 | kvm_err("Emulate Load from MMIO space failed\n"); |
f5c236dd SL |
263 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
264 | ret = RESUME_HOST; | |
265 | } else { | |
266 | run->exit_reason = KVM_EXIT_MMIO; | |
267 | ret = RESUME_HOST; | |
268 | } | |
269 | } else { | |
6ad78a5c DCZ |
270 | kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
271 | cause, opc, badvaddr); | |
f5c236dd SL |
272 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
273 | ret = RESUME_HOST; | |
274 | er = EMULATE_FAIL; | |
275 | } | |
276 | return ret; | |
277 | } | |
278 | ||
279 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) | |
280 | { | |
281 | struct kvm_run *run = vcpu->run; | |
282 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
283 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
284 | enum emulation_result er = EMULATE_DONE; | |
285 | int ret = RESUME_GUEST; | |
286 | ||
287 | er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); | |
288 | if (er == EMULATE_DONE) | |
289 | ret = RESUME_GUEST; | |
290 | else { | |
291 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
292 | ret = RESUME_HOST; | |
293 | } | |
294 | return ret; | |
295 | } | |
296 | ||
297 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) | |
298 | { | |
299 | struct kvm_run *run = vcpu->run; | |
300 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
301 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
302 | enum emulation_result er = EMULATE_DONE; | |
303 | int ret = RESUME_GUEST; | |
304 | ||
305 | er = kvm_mips_handle_ri(cause, opc, run, vcpu); | |
306 | if (er == EMULATE_DONE) | |
307 | ret = RESUME_GUEST; | |
308 | else { | |
309 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
310 | ret = RESUME_HOST; | |
311 | } | |
312 | return ret; | |
313 | } | |
314 | ||
315 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | |
316 | { | |
317 | struct kvm_run *run = vcpu->run; | |
318 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
319 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
320 | enum emulation_result er = EMULATE_DONE; | |
321 | int ret = RESUME_GUEST; | |
322 | ||
323 | er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); | |
324 | if (er == EMULATE_DONE) | |
325 | ret = RESUME_GUEST; | |
326 | else { | |
327 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
328 | ret = RESUME_HOST; | |
329 | } | |
330 | return ret; | |
331 | } | |
332 | ||
0a560427 JH |
333 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) |
334 | { | |
335 | struct kvm_run *run = vcpu->run; | |
336 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; | |
337 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
338 | enum emulation_result er = EMULATE_DONE; | |
339 | int ret = RESUME_GUEST; | |
340 | ||
341 | er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); | |
342 | if (er == EMULATE_DONE) { | |
343 | ret = RESUME_GUEST; | |
344 | } else { | |
345 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
346 | ret = RESUME_HOST; | |
347 | } | |
348 | return ret; | |
349 | } | |
350 | ||
98119ad5 JH |
351 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) |
352 | { | |
353 | struct kvm_run *run = vcpu->run; | |
354 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
355 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
356 | enum emulation_result er = EMULATE_DONE; | |
357 | int ret = RESUME_GUEST; | |
358 | ||
359 | /* No MSA supported in guest, guest reserved instruction exception */ | |
360 | er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | |
361 | ||
362 | switch (er) { | |
363 | case EMULATE_DONE: | |
364 | ret = RESUME_GUEST; | |
365 | break; | |
366 | ||
367 | case EMULATE_FAIL: | |
368 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
369 | ret = RESUME_HOST; | |
370 | break; | |
371 | ||
372 | default: | |
373 | BUG(); | |
374 | } | |
375 | return ret; | |
376 | } | |
377 | ||
f5c236dd SL |
378 | static int kvm_trap_emul_vm_init(struct kvm *kvm) |
379 | { | |
380 | return 0; | |
381 | } | |
382 | ||
383 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) | |
384 | { | |
385 | return 0; | |
386 | } | |
387 | ||
388 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |
389 | { | |
390 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
391 | uint32_t config1; | |
392 | int vcpu_id = vcpu->vcpu_id; | |
393 | ||
d116e812 DCZ |
394 | /* |
395 | * Arch specific stuff, set up config registers properly so that the | |
396 | * guest will come up as expected, for now we simulate a MIPS 24kc | |
f5c236dd SL |
397 | */ |
398 | kvm_write_c0_guest_prid(cop0, 0x00019300); | |
2211ee81 JH |
399 | /* Have config1, Cacheable, noncoherent, write-back, write allocate */ |
400 | kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) | | |
401 | (0x1 << CP0C0_AR) | | |
f5c236dd SL |
402 | (MMU_TYPE_R4000 << CP0C0_MT)); |
403 | ||
404 | /* Read the cache characteristics from the host Config1 Register */ | |
405 | config1 = (read_c0_config1() & ~0x7f); | |
406 | ||
407 | /* Set up MMU size */ | |
408 | config1 &= ~(0x3f << 25); | |
409 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); | |
410 | ||
411 | /* We unset some bits that we aren't emulating */ | |
412 | config1 &= | |
413 | ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) | | |
414 | (1 << CP0C1_WR) | (1 << CP0C1_CA)); | |
415 | kvm_write_c0_guest_config1(cop0, config1); | |
416 | ||
2211ee81 JH |
417 | /* Have config3, no tertiary/secondary caches implemented */ |
418 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); | |
419 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ | |
420 | ||
c771607a JH |
421 | /* Have config4, UserLocal */ |
422 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); | |
423 | ||
424 | /* Have config5 */ | |
425 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); | |
426 | ||
427 | /* No config6 */ | |
428 | kvm_write_c0_guest_config5(cop0, 0); | |
f5c236dd SL |
429 | |
430 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | |
431 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | |
432 | ||
d116e812 DCZ |
433 | /* |
434 | * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) | |
435 | */ | |
f5c236dd SL |
436 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); |
437 | ||
438 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | |
439 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF)); | |
440 | ||
441 | return 0; | |
442 | } | |
443 | ||
f8be02da JH |
444 | static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, |
445 | const struct kvm_one_reg *reg, | |
446 | s64 *v) | |
447 | { | |
448 | switch (reg->id) { | |
449 | case KVM_REG_MIPS_CP0_COUNT: | |
e30492bb | 450 | *v = kvm_mips_read_count(vcpu); |
f8be02da | 451 | break; |
f8239342 JH |
452 | case KVM_REG_MIPS_COUNT_CTL: |
453 | *v = vcpu->arch.count_ctl; | |
454 | break; | |
455 | case KVM_REG_MIPS_COUNT_RESUME: | |
456 | *v = ktime_to_ns(vcpu->arch.count_resume); | |
457 | break; | |
f74a8e22 JH |
458 | case KVM_REG_MIPS_COUNT_HZ: |
459 | *v = vcpu->arch.count_hz; | |
460 | break; | |
f8be02da JH |
461 | default: |
462 | return -EINVAL; | |
463 | } | |
464 | return 0; | |
465 | } | |
466 | ||
467 | static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | |
468 | const struct kvm_one_reg *reg, | |
469 | s64 v) | |
470 | { | |
471 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
f8239342 | 472 | int ret = 0; |
c771607a | 473 | unsigned int cur, change; |
f8be02da JH |
474 | |
475 | switch (reg->id) { | |
476 | case KVM_REG_MIPS_CP0_COUNT: | |
e30492bb | 477 | kvm_mips_write_count(vcpu, v); |
f8be02da JH |
478 | break; |
479 | case KVM_REG_MIPS_CP0_COMPARE: | |
e30492bb JH |
480 | kvm_mips_write_compare(vcpu, v); |
481 | break; | |
482 | case KVM_REG_MIPS_CP0_CAUSE: | |
483 | /* | |
484 | * If the timer is stopped or started (DC bit) it must look | |
485 | * atomic with changes to the interrupt pending bits (TI, IRQ5). | |
486 | * A timer interrupt should not happen in between. | |
487 | */ | |
488 | if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { | |
489 | if (v & CAUSEF_DC) { | |
490 | /* disable timer first */ | |
491 | kvm_mips_count_disable_cause(vcpu); | |
492 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | |
493 | } else { | |
494 | /* enable timer last */ | |
495 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | |
496 | kvm_mips_count_enable_cause(vcpu); | |
497 | } | |
498 | } else { | |
499 | kvm_write_c0_guest_cause(cop0, v); | |
500 | } | |
f8be02da | 501 | break; |
c771607a JH |
502 | case KVM_REG_MIPS_CP0_CONFIG: |
503 | /* read-only for now */ | |
504 | break; | |
505 | case KVM_REG_MIPS_CP0_CONFIG1: | |
506 | cur = kvm_read_c0_guest_config1(cop0); | |
507 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); | |
508 | if (change) { | |
509 | v = cur ^ change; | |
510 | kvm_write_c0_guest_config1(cop0, v); | |
511 | } | |
512 | break; | |
513 | case KVM_REG_MIPS_CP0_CONFIG2: | |
514 | /* read-only for now */ | |
515 | break; | |
516 | case KVM_REG_MIPS_CP0_CONFIG3: | |
517 | cur = kvm_read_c0_guest_config3(cop0); | |
518 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); | |
519 | if (change) { | |
520 | v = cur ^ change; | |
521 | kvm_write_c0_guest_config3(cop0, v); | |
522 | } | |
523 | break; | |
524 | case KVM_REG_MIPS_CP0_CONFIG4: | |
525 | cur = kvm_read_c0_guest_config4(cop0); | |
526 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); | |
527 | if (change) { | |
528 | v = cur ^ change; | |
529 | kvm_write_c0_guest_config4(cop0, v); | |
530 | } | |
531 | break; | |
532 | case KVM_REG_MIPS_CP0_CONFIG5: | |
533 | cur = kvm_read_c0_guest_config5(cop0); | |
534 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); | |
535 | if (change) { | |
536 | v = cur ^ change; | |
537 | kvm_write_c0_guest_config5(cop0, v); | |
538 | } | |
539 | break; | |
f8239342 JH |
540 | case KVM_REG_MIPS_COUNT_CTL: |
541 | ret = kvm_mips_set_count_ctl(vcpu, v); | |
542 | break; | |
543 | case KVM_REG_MIPS_COUNT_RESUME: | |
544 | ret = kvm_mips_set_count_resume(vcpu, v); | |
545 | break; | |
f74a8e22 JH |
546 | case KVM_REG_MIPS_COUNT_HZ: |
547 | ret = kvm_mips_set_count_hz(vcpu, v); | |
548 | break; | |
f8be02da JH |
549 | default: |
550 | return -EINVAL; | |
551 | } | |
f8239342 | 552 | return ret; |
f8be02da JH |
553 | } |
554 | ||
b86ecb37 JH |
555 | static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) |
556 | { | |
98e91b84 JH |
557 | kvm_lose_fpu(vcpu); |
558 | ||
b86ecb37 JH |
559 | return 0; |
560 | } | |
561 | ||
562 | static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) | |
563 | { | |
564 | return 0; | |
565 | } | |
566 | ||
f5c236dd SL |
567 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
568 | /* exit handlers */ | |
569 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | |
570 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, | |
571 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, | |
572 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, | |
573 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, | |
574 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, | |
575 | .handle_syscall = kvm_trap_emul_handle_syscall, | |
576 | .handle_res_inst = kvm_trap_emul_handle_res_inst, | |
577 | .handle_break = kvm_trap_emul_handle_break, | |
0a560427 | 578 | .handle_trap = kvm_trap_emul_handle_trap, |
98119ad5 | 579 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, |
f5c236dd SL |
580 | |
581 | .vm_init = kvm_trap_emul_vm_init, | |
582 | .vcpu_init = kvm_trap_emul_vcpu_init, | |
583 | .vcpu_setup = kvm_trap_emul_vcpu_setup, | |
584 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, | |
585 | .queue_timer_int = kvm_mips_queue_timer_int_cb, | |
586 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, | |
587 | .queue_io_int = kvm_mips_queue_io_int_cb, | |
588 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, | |
589 | .irq_deliver = kvm_mips_irq_deliver_cb, | |
590 | .irq_clear = kvm_mips_irq_clear_cb, | |
f8be02da JH |
591 | .get_one_reg = kvm_trap_emul_get_one_reg, |
592 | .set_one_reg = kvm_trap_emul_set_one_reg, | |
b86ecb37 JH |
593 | .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, |
594 | .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, | |
f5c236dd SL |
595 | }; |
596 | ||
597 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | |
598 | { | |
599 | *install_callbacks = &kvm_trap_emul_callbacks; | |
600 | return 0; | |
601 | } |