MIPS: KVM: Set CP0_Status.KX on MIPS64
[deliverable/linux.git] / arch / mips / kvm / entry.c
CommitLineData
90e9311a
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Generation of main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 *
11 * Copyright (C) 2016 Imagination Technologies Ltd.
12 */
13
14#include <linux/kvm_host.h>
15#include <asm/msa.h>
16#include <asm/setup.h>
17#include <asm/uasm.h>
18
19/* Register names */
20#define ZERO 0
21#define AT 1
22#define V0 2
23#define V1 3
24#define A0 4
25#define A1 5
26
27#if _MIPS_SIM == _MIPS_SIM_ABI32
28#define T0 8
29#define T1 9
30#define T2 10
31#define T3 11
32#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
33
34#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
35#define T0 12
36#define T1 13
37#define T2 14
38#define T3 15
39#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
40
41#define S0 16
42#define S1 17
43#define T9 25
44#define K0 26
45#define K1 27
46#define GP 28
47#define SP 29
48#define RA 31
49
50/* Some CP0 registers */
51#define C0_HWRENA 7, 0
52#define C0_BADVADDR 8, 0
53#define C0_ENTRYHI 10, 0
54#define C0_STATUS 12, 0
55#define C0_CAUSE 13, 0
56#define C0_EPC 14, 0
57#define C0_EBASE 15, 1
90e9311a
JH
58#define C0_CONFIG5 16, 5
59#define C0_DDATA_LO 28, 3
60#define C0_ERROREPC 30, 0
61
62#define CALLFRAME_SIZ 32
63
1d756942
JH
64#ifdef CONFIG_64BIT
65#define ST0_KX_IF_64 ST0_KX
66#else
67#define ST0_KX_IF_64 0
68#endif
69
1e5217f5
JH
70static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
71static unsigned int scratch_tmp[2] = { C0_ERROREPC };
72
90e9311a
JH
73enum label_id {
74 label_fpu_1 = 1,
75 label_msa_1,
76 label_return_to_host,
77 label_kernel_asid,
1f9ca62c 78 label_exit_common,
90e9311a
JH
79};
80
81UASM_L_LA(_fpu_1)
82UASM_L_LA(_msa_1)
83UASM_L_LA(_return_to_host)
84UASM_L_LA(_kernel_asid)
1f9ca62c 85UASM_L_LA(_exit_common)
90e9311a
JH
86
87static void *kvm_mips_build_enter_guest(void *addr);
88static void *kvm_mips_build_ret_from_exit(void *addr);
89static void *kvm_mips_build_ret_to_guest(void *addr);
90static void *kvm_mips_build_ret_to_host(void *addr);
91
1e5217f5
JH
92/**
93 * kvm_mips_entry_setup() - Perform global setup for entry code.
94 *
95 * Perform global setup for entry code, such as choosing a scratch register.
96 *
97 * Returns: 0 on success.
98 * -errno on failure.
99 */
100int kvm_mips_entry_setup(void)
101{
102 /*
103 * We prefer to use KScratchN registers if they are available over the
104 * defaults above, which may not work on all cores.
105 */
106 unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
107
108 /* Pick a scratch register for storing VCPU */
109 if (kscratch_mask) {
110 scratch_vcpu[0] = 31;
111 scratch_vcpu[1] = ffs(kscratch_mask) - 1;
112 kscratch_mask &= ~BIT(scratch_vcpu[1]);
113 }
114
115 /* Pick a scratch register to use as a temp for saving state */
116 if (kscratch_mask) {
117 scratch_tmp[0] = 31;
118 scratch_tmp[1] = ffs(kscratch_mask) - 1;
119 kscratch_mask &= ~BIT(scratch_tmp[1]);
120 }
121
122 return 0;
123}
124
125static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
126 unsigned int frame)
127{
128 /* Save the VCPU scratch register value in cp0_epc of the stack frame */
e41637d8 129 UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
1e5217f5
JH
130 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
131
132 /* Save the temp scratch register value in cp0_cause of stack frame */
133 if (scratch_tmp[0] == 31) {
e41637d8 134 UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
1e5217f5
JH
135 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
136 }
137}
138
139static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
140 unsigned int frame)
141{
142 /*
143 * Restore host scratch register values saved by
144 * kvm_mips_build_save_scratch().
145 */
146 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
e41637d8 147 UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
1e5217f5
JH
148
149 if (scratch_tmp[0] == 31) {
150 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
e41637d8 151 UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
1e5217f5
JH
152 }
153}
154
90e9311a
JH
155/**
156 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
157 * @addr: Address to start writing code.
158 *
159 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
160 * conforms to the following prototype:
161 *
162 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
163 *
164 * The exit from the guest and return to the caller is handled by the code
165 * generated by kvm_mips_build_ret_to_host().
166 *
167 * Returns: Next address after end of written function.
168 */
169void *kvm_mips_build_vcpu_run(void *addr)
170{
171 u32 *p = addr;
172 unsigned int i;
173
174 /*
175 * A0: run
176 * A1: vcpu
177 */
178
179 /* k0/k1 not being used in host kernel context */
e41637d8 180 UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
90e9311a
JH
181 for (i = 16; i < 32; ++i) {
182 if (i == 24)
183 i = 28;
184 UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
185 }
186
90e9311a
JH
187 /* Save host status */
188 uasm_i_mfc0(&p, V0, C0_STATUS);
189 UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
190
1e5217f5
JH
191 /* Save scratch registers, will be used to store pointer to vcpu etc */
192 kvm_mips_build_save_scratch(&p, V1, K1);
90e9311a 193
1e5217f5 194 /* VCPU scratch register has pointer to vcpu */
e41637d8 195 UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
196
197 /* Offset into vcpu->arch */
e41637d8 198 UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
90e9311a
JH
199
200 /*
201 * Save the host stack to VCPU, used for exception processing
202 * when we exit from the Guest
203 */
204 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
205
206 /* Save the kernel gp as well */
207 UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
208
209 /*
210 * Setup status register for running the guest in UM, interrupts
211 * are disabled
212 */
1d756942 213 UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
90e9311a
JH
214 uasm_i_mtc0(&p, K0, C0_STATUS);
215 uasm_i_ehb(&p);
216
217 /* load up the new EBASE */
218 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
219 uasm_i_mtc0(&p, K0, C0_EBASE);
220
221 /*
222 * Now that the new EBASE has been loaded, unset BEV, set
223 * interrupt mask as it was but make sure that timer interrupts
224 * are enabled
225 */
1d756942 226 uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
90e9311a
JH
227 uasm_i_andi(&p, V0, V0, ST0_IM);
228 uasm_i_or(&p, K0, K0, V0);
229 uasm_i_mtc0(&p, K0, C0_STATUS);
230 uasm_i_ehb(&p);
231
232 p = kvm_mips_build_enter_guest(p);
233
234 return p;
235}
236
237/**
238 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
239 * @addr: Address to start writing code.
240 *
241 * Assemble the code to resume guest execution. This code is common between the
242 * initial entry into the guest from the host, and returning from the exit
243 * handler back to the guest.
244 *
245 * Returns: Next address after end of written function.
246 */
247static void *kvm_mips_build_enter_guest(void *addr)
248{
249 u32 *p = addr;
250 unsigned int i;
251 struct uasm_label labels[2];
252 struct uasm_reloc relocs[2];
253 struct uasm_label *l = labels;
254 struct uasm_reloc *r = relocs;
255
256 memset(labels, 0, sizeof(labels));
257 memset(relocs, 0, sizeof(relocs));
258
259 /* Set Guest EPC */
260 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
e41637d8 261 UASM_i_MTC0(&p, T0, C0_EPC);
90e9311a
JH
262
263 /* Set the ASID for the Guest Kernel */
264 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
265 UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
266 T0);
267 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
268 uasm_i_xori(&p, T0, T0, KSU_USER);
269 uasm_il_bnez(&p, &r, T0, label_kernel_asid);
e41637d8 270 UASM_i_ADDIU(&p, T1, K1,
90e9311a
JH
271 offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
272 /* else user */
e41637d8 273 UASM_i_ADDIU(&p, T1, K1,
90e9311a
JH
274 offsetof(struct kvm_vcpu_arch, guest_user_asid));
275 uasm_l_kernel_asid(&l, p);
276
277 /* t1: contains the base of the ASID array, need to get the cpu id */
278 /* smp_processor_id */
e41637d8 279 uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
90e9311a
JH
280 /* x4 */
281 uasm_i_sll(&p, T2, T2, 2);
282 UASM_i_ADDU(&p, T3, T1, T2);
e41637d8 283 uasm_i_lw(&p, K0, 0, T3);
90e9311a
JH
284#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
285 /* x sizeof(struct cpuinfo_mips)/4 */
286 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
287 uasm_i_mul(&p, T2, T2, T3);
288
289 UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
290 UASM_i_ADDU(&p, AT, AT, T2);
291 UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
292 uasm_i_and(&p, K0, K0, T2);
293#else
294 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
295#endif
296 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
297 uasm_i_ehb(&p);
298
299 /* Disable RDHWR access */
300 uasm_i_mtc0(&p, ZERO, C0_HWRENA);
301
302 /* load the guest context from VCPU and return */
303 for (i = 1; i < 32; ++i) {
304 /* Guest k0/k1 loaded later */
305 if (i == K0 || i == K1)
306 continue;
307 UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
308 }
309
70e92c7e 310#ifndef CONFIG_CPU_MIPSR6
90e9311a
JH
311 /* Restore hi/lo */
312 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
313 uasm_i_mthi(&p, K0);
314
315 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
316 uasm_i_mtlo(&p, K0);
70e92c7e 317#endif
90e9311a
JH
318
319 /* Restore the guest's k0/k1 registers */
320 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
321 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
322
323 /* Jump to guest */
324 uasm_i_eret(&p);
325
326 uasm_resolve_relocs(relocs, labels);
327
328 return p;
329}
330
331/**
332 * kvm_mips_build_exception() - Assemble first level guest exception handler.
333 * @addr: Address to start writing code.
1f9ca62c 334 * @handler: Address of common handler (within range of @addr).
90e9311a
JH
335 *
336 * Assemble exception vector code for guest execution. The generated vector will
1f9ca62c 337 * branch to the common exception handler generated by kvm_mips_build_exit().
90e9311a
JH
338 *
339 * Returns: Next address after end of written function.
340 */
1f9ca62c 341void *kvm_mips_build_exception(void *addr, void *handler)
90e9311a
JH
342{
343 u32 *p = addr;
1f9ca62c
JH
344 struct uasm_label labels[2];
345 struct uasm_reloc relocs[2];
346 struct uasm_label *l = labels;
347 struct uasm_reloc *r = relocs;
348
349 memset(labels, 0, sizeof(labels));
350 memset(relocs, 0, sizeof(relocs));
90e9311a 351
eadfb501 352 /* Save guest k1 into scratch register */
e41637d8 353 UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
90e9311a 354
eadfb501 355 /* Get the VCPU pointer from the VCPU scratch register */
e41637d8
JH
356 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
357 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
eadfb501
JH
358
359 /* Save guest k0 into VCPU structure */
360 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
90e9311a 361
1f9ca62c
JH
362 /* Branch to the common handler */
363 uasm_il_b(&p, &r, label_exit_common);
90e9311a
JH
364 uasm_i_nop(&p);
365
1f9ca62c
JH
366 uasm_l_exit_common(&l, handler);
367 uasm_resolve_relocs(relocs, labels);
368
90e9311a
JH
369 return p;
370}
371
372/**
373 * kvm_mips_build_exit() - Assemble common guest exit handler.
374 * @addr: Address to start writing code.
375 *
376 * Assemble the generic guest exit handling code. This is called by the
377 * exception vectors (generated by kvm_mips_build_exception()), and calls
378 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
379 * depending on the return value.
380 *
381 * Returns: Next address after end of written function.
382 */
383void *kvm_mips_build_exit(void *addr)
384{
385 u32 *p = addr;
386 unsigned int i;
387 struct uasm_label labels[3];
388 struct uasm_reloc relocs[3];
389 struct uasm_label *l = labels;
390 struct uasm_reloc *r = relocs;
391
392 memset(labels, 0, sizeof(labels));
393 memset(relocs, 0, sizeof(relocs));
394
395 /*
396 * Generic Guest exception handler. We end up here when the guest
397 * does something that causes a trap to kernel mode.
eadfb501
JH
398 *
399 * Both k0/k1 registers will have already been saved (k0 into the vcpu
400 * structure, and k1 into the scratch_tmp register).
401 *
402 * The k1 register will already contain the kvm_vcpu_arch pointer.
90e9311a
JH
403 */
404
90e9311a
JH
405 /* Start saving Guest context to VCPU */
406 for (i = 0; i < 32; ++i) {
407 /* Guest k0/k1 saved later */
408 if (i == K0 || i == K1)
409 continue;
410 UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
411 }
412
70e92c7e 413#ifndef CONFIG_CPU_MIPSR6
90e9311a
JH
414 /* We need to save hi/lo and restore them on the way out */
415 uasm_i_mfhi(&p, T0);
416 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
417
418 uasm_i_mflo(&p, T0);
419 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
70e92c7e 420#endif
90e9311a 421
eadfb501
JH
422 /* Finally save guest k1 to VCPU */
423 uasm_i_ehb(&p);
e41637d8 424 UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
90e9311a
JH
425 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
426
427 /* Now that context has been saved, we can use other registers */
428
429 /* Restore vcpu */
e41637d8 430 UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
431 uasm_i_move(&p, S1, A1);
432
433 /* Restore run (vcpu->run) */
434 UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
435 /* Save pointer to run in s0, will be saved by the compiler */
436 uasm_i_move(&p, S0, A0);
437
438 /*
439 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
440 * the exception
441 */
e41637d8 442 UASM_i_MFC0(&p, K0, C0_EPC);
90e9311a
JH
443 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
444
e41637d8 445 UASM_i_MFC0(&p, K0, C0_BADVADDR);
90e9311a
JH
446 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
447 K1);
448
449 uasm_i_mfc0(&p, K0, C0_CAUSE);
450 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
451
452 /* Now restore the host state just enough to run the handlers */
453
454 /* Switch EBASE to the one used by Linux */
455 /* load up the host EBASE */
456 uasm_i_mfc0(&p, V0, C0_STATUS);
457
458 uasm_i_lui(&p, AT, ST0_BEV >> 16);
459 uasm_i_or(&p, K0, V0, AT);
460
461 uasm_i_mtc0(&p, K0, C0_STATUS);
462 uasm_i_ehb(&p);
463
464 UASM_i_LA_mostly(&p, K0, (long)&ebase);
465 UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
466 uasm_i_mtc0(&p, K0, C0_EBASE);
467
d37f4038
JH
468 if (raw_cpu_has_fpu) {
469 /*
470 * If FPU is enabled, save FCR31 and clear it so that later
471 * ctc1's don't trigger FPE for pending exceptions.
472 */
473 uasm_i_lui(&p, AT, ST0_CU1 >> 16);
474 uasm_i_and(&p, V1, V0, AT);
475 uasm_il_beqz(&p, &r, V1, label_fpu_1);
476 uasm_i_nop(&p);
477 uasm_i_cfc1(&p, T0, 31);
478 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
479 K1);
480 uasm_i_ctc1(&p, ZERO, 31);
481 uasm_l_fpu_1(&l, p);
482 }
90e9311a 483
38ea7a71
JH
484 if (cpu_has_msa) {
485 /*
486 * If MSA is enabled, save MSACSR and clear it so that later
487 * instructions don't trigger MSAFPE for pending exceptions.
488 */
489 uasm_i_mfc0(&p, T0, C0_CONFIG5);
490 uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
491 uasm_il_beqz(&p, &r, T0, label_msa_1);
492 uasm_i_nop(&p);
493 uasm_i_cfcmsa(&p, T0, MSA_CSR);
494 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
495 K1);
496 uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
497 uasm_l_msa_1(&l, p);
498 }
90e9311a
JH
499
500 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
501 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
502 uasm_i_and(&p, V0, V0, AT);
503 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
504 uasm_i_or(&p, V0, V0, AT);
505 uasm_i_mtc0(&p, V0, C0_STATUS);
506 uasm_i_ehb(&p);
507
508 /* Load up host GP */
509 UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
510
511 /* Need a stack before we can jump to "C" */
512 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
513
514 /* Saved host state */
e41637d8 515 UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
90e9311a
JH
516
517 /*
518 * XXXKYMA do we need to load the host ASID, maybe not because the
519 * kernel entries are marked GLOBAL, need to verify
520 */
521
1e5217f5
JH
522 /* Restore host scratch registers, as we'll have clobbered them */
523 kvm_mips_build_restore_scratch(&p, K0, SP);
90e9311a
JH
524
525 /* Restore RDHWR access */
526 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
527 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
528 uasm_i_mtc0(&p, K0, C0_HWRENA);
529
530 /* Jump to handler */
531 /*
532 * XXXKYMA: not sure if this is safe, how large is the stack??
533 * Now jump to the kvm_mips_handle_exit() to see if we can deal
534 * with this in the kernel
535 */
536 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
537 uasm_i_jalr(&p, RA, T9);
e41637d8 538 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
90e9311a
JH
539
540 uasm_resolve_relocs(relocs, labels);
541
542 p = kvm_mips_build_ret_from_exit(p);
543
544 return p;
545}
546
547/**
548 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
549 * @addr: Address to start writing code.
550 *
551 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
552 * resuming the guest or returning to the host depending on the return value.
553 *
554 * Returns: Next address after end of written function.
555 */
556static void *kvm_mips_build_ret_from_exit(void *addr)
557{
558 u32 *p = addr;
559 struct uasm_label labels[2];
560 struct uasm_reloc relocs[2];
561 struct uasm_label *l = labels;
562 struct uasm_reloc *r = relocs;
563
564 memset(labels, 0, sizeof(labels));
565 memset(relocs, 0, sizeof(relocs));
566
567 /* Return from handler Make sure interrupts are disabled */
568 uasm_i_di(&p, ZERO);
569 uasm_i_ehb(&p);
570
571 /*
572 * XXXKYMA: k0/k1 could have been blown away if we processed
573 * an exception while we were handling the exception from the
574 * guest, reload k1
575 */
576
577 uasm_i_move(&p, K1, S1);
e41637d8 578 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
90e9311a
JH
579
580 /*
581 * Check return value, should tell us if we are returning to the
582 * host (handle I/O etc)or resuming the guest
583 */
584 uasm_i_andi(&p, T0, V0, RESUME_HOST);
585 uasm_il_bnez(&p, &r, T0, label_return_to_host);
586 uasm_i_nop(&p);
587
588 p = kvm_mips_build_ret_to_guest(p);
589
590 uasm_l_return_to_host(&l, p);
591 p = kvm_mips_build_ret_to_host(p);
592
593 uasm_resolve_relocs(relocs, labels);
594
595 return p;
596}
597
598/**
599 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
600 * @addr: Address to start writing code.
601 *
602 * Assemble the code to handle return from the guest exit handler
603 * (kvm_mips_handle_exit()) back to the guest.
604 *
605 * Returns: Next address after end of written function.
606 */
607static void *kvm_mips_build_ret_to_guest(void *addr)
608{
609 u32 *p = addr;
610
1e5217f5 611 /* Put the saved pointer to vcpu (s1) back into the scratch register */
e41637d8 612 UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
613
614 /* Load up the Guest EBASE to minimize the window where BEV is set */
615 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
616
617 /* Switch EBASE back to the one used by KVM */
618 uasm_i_mfc0(&p, V1, C0_STATUS);
619 uasm_i_lui(&p, AT, ST0_BEV >> 16);
620 uasm_i_or(&p, K0, V1, AT);
621 uasm_i_mtc0(&p, K0, C0_STATUS);
622 uasm_i_ehb(&p);
623 uasm_i_mtc0(&p, T0, C0_EBASE);
624
625 /* Setup status register for running guest in UM */
626 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
627 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
628 uasm_i_and(&p, V1, V1, AT);
629 uasm_i_mtc0(&p, V1, C0_STATUS);
630 uasm_i_ehb(&p);
631
632 p = kvm_mips_build_enter_guest(p);
633
634 return p;
635}
636
637/**
638 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
639 * @addr: Address to start writing code.
640 *
641 * Assemble the code to handle return from the guest exit handler
642 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
643 * function generated by kvm_mips_build_vcpu_run().
644 *
645 * Returns: Next address after end of written function.
646 */
647static void *kvm_mips_build_ret_to_host(void *addr)
648{
649 u32 *p = addr;
650 unsigned int i;
651
652 /* EBASE is already pointing to Linux */
653 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
e41637d8 654 UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
90e9311a 655
90e9311a
JH
656 /*
657 * r2/v0 is the return code, shift it down by 2 (arithmetic)
658 * to recover the err code
659 */
660 uasm_i_sra(&p, K0, V0, 2);
661 uasm_i_move(&p, V0, K0);
662
663 /* Load context saved on the host stack */
664 for (i = 16; i < 31; ++i) {
665 if (i == 24)
666 i = 28;
667 UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
668 }
669
90e9311a
JH
670 /* Restore RDHWR access */
671 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
672 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
673 uasm_i_mtc0(&p, K0, C0_HWRENA);
674
675 /* Restore RA, which is the address we will return to */
676 UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
677 uasm_i_jr(&p, RA);
678 uasm_i_nop(&p);
679
680 return p;
681}
682
This page took 0.055032 seconds and 5 git commands to generate.