MIPS: KVM: Dynamically choose scratch registers
[deliverable/linux.git] / arch / mips / kvm / entry.c
CommitLineData
90e9311a
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Generation of main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 *
11 * Copyright (C) 2016 Imagination Technologies Ltd.
12 */
13
14#include <linux/kvm_host.h>
15#include <asm/msa.h>
16#include <asm/setup.h>
17#include <asm/uasm.h>
18
19/* Register names */
20#define ZERO 0
21#define AT 1
22#define V0 2
23#define V1 3
24#define A0 4
25#define A1 5
26
27#if _MIPS_SIM == _MIPS_SIM_ABI32
28#define T0 8
29#define T1 9
30#define T2 10
31#define T3 11
32#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
33
34#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
35#define T0 12
36#define T1 13
37#define T2 14
38#define T3 15
39#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
40
41#define S0 16
42#define S1 17
43#define T9 25
44#define K0 26
45#define K1 27
46#define GP 28
47#define SP 29
48#define RA 31
49
50/* Some CP0 registers */
51#define C0_HWRENA 7, 0
52#define C0_BADVADDR 8, 0
53#define C0_ENTRYHI 10, 0
54#define C0_STATUS 12, 0
55#define C0_CAUSE 13, 0
56#define C0_EPC 14, 0
57#define C0_EBASE 15, 1
90e9311a
JH
58#define C0_CONFIG5 16, 5
59#define C0_DDATA_LO 28, 3
60#define C0_ERROREPC 30, 0
61
62#define CALLFRAME_SIZ 32
63
1e5217f5
JH
64static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
65static unsigned int scratch_tmp[2] = { C0_ERROREPC };
66
90e9311a
JH
67enum label_id {
68 label_fpu_1 = 1,
69 label_msa_1,
70 label_return_to_host,
71 label_kernel_asid,
72};
73
74UASM_L_LA(_fpu_1)
75UASM_L_LA(_msa_1)
76UASM_L_LA(_return_to_host)
77UASM_L_LA(_kernel_asid)
78
79static void *kvm_mips_build_enter_guest(void *addr);
80static void *kvm_mips_build_ret_from_exit(void *addr);
81static void *kvm_mips_build_ret_to_guest(void *addr);
82static void *kvm_mips_build_ret_to_host(void *addr);
83
1e5217f5
JH
84/**
85 * kvm_mips_entry_setup() - Perform global setup for entry code.
86 *
87 * Perform global setup for entry code, such as choosing a scratch register.
88 *
89 * Returns: 0 on success.
90 * -errno on failure.
91 */
92int kvm_mips_entry_setup(void)
93{
94 /*
95 * We prefer to use KScratchN registers if they are available over the
96 * defaults above, which may not work on all cores.
97 */
98 unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
99
100 /* Pick a scratch register for storing VCPU */
101 if (kscratch_mask) {
102 scratch_vcpu[0] = 31;
103 scratch_vcpu[1] = ffs(kscratch_mask) - 1;
104 kscratch_mask &= ~BIT(scratch_vcpu[1]);
105 }
106
107 /* Pick a scratch register to use as a temp for saving state */
108 if (kscratch_mask) {
109 scratch_tmp[0] = 31;
110 scratch_tmp[1] = ffs(kscratch_mask) - 1;
111 kscratch_mask &= ~BIT(scratch_tmp[1]);
112 }
113
114 return 0;
115}
116
117static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
118 unsigned int frame)
119{
120 /* Save the VCPU scratch register value in cp0_epc of the stack frame */
121 uasm_i_mfc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
122 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
123
124 /* Save the temp scratch register value in cp0_cause of stack frame */
125 if (scratch_tmp[0] == 31) {
126 uasm_i_mfc0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
127 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
128 }
129}
130
131static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
132 unsigned int frame)
133{
134 /*
135 * Restore host scratch register values saved by
136 * kvm_mips_build_save_scratch().
137 */
138 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
139 uasm_i_mtc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
140
141 if (scratch_tmp[0] == 31) {
142 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
143 uasm_i_mtc0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
144 }
145}
146
90e9311a
JH
147/**
148 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
149 * @addr: Address to start writing code.
150 *
151 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
152 * conforms to the following prototype:
153 *
154 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
155 *
156 * The exit from the guest and return to the caller is handled by the code
157 * generated by kvm_mips_build_ret_to_host().
158 *
159 * Returns: Next address after end of written function.
160 */
161void *kvm_mips_build_vcpu_run(void *addr)
162{
163 u32 *p = addr;
164 unsigned int i;
165
166 /*
167 * A0: run
168 * A1: vcpu
169 */
170
171 /* k0/k1 not being used in host kernel context */
172 uasm_i_addiu(&p, K1, SP, -(int)sizeof(struct pt_regs));
173 for (i = 16; i < 32; ++i) {
174 if (i == 24)
175 i = 28;
176 UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
177 }
178
179 /* Save hi/lo */
180 uasm_i_mflo(&p, V0);
181 UASM_i_SW(&p, V0, offsetof(struct pt_regs, lo), K1);
182 uasm_i_mfhi(&p, V1);
183 UASM_i_SW(&p, V1, offsetof(struct pt_regs, hi), K1);
184
185 /* Save host status */
186 uasm_i_mfc0(&p, V0, C0_STATUS);
187 UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
188
1e5217f5
JH
189 /* Save scratch registers, will be used to store pointer to vcpu etc */
190 kvm_mips_build_save_scratch(&p, V1, K1);
90e9311a 191
1e5217f5
JH
192 /* VCPU scratch register has pointer to vcpu */
193 uasm_i_mtc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
194
195 /* Offset into vcpu->arch */
196 uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
197
198 /*
199 * Save the host stack to VCPU, used for exception processing
200 * when we exit from the Guest
201 */
202 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
203
204 /* Save the kernel gp as well */
205 UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
206
207 /*
208 * Setup status register for running the guest in UM, interrupts
209 * are disabled
210 */
211 UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV);
212 uasm_i_mtc0(&p, K0, C0_STATUS);
213 uasm_i_ehb(&p);
214
215 /* load up the new EBASE */
216 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
217 uasm_i_mtc0(&p, K0, C0_EBASE);
218
219 /*
220 * Now that the new EBASE has been loaded, unset BEV, set
221 * interrupt mask as it was but make sure that timer interrupts
222 * are enabled
223 */
224 uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE);
225 uasm_i_andi(&p, V0, V0, ST0_IM);
226 uasm_i_or(&p, K0, K0, V0);
227 uasm_i_mtc0(&p, K0, C0_STATUS);
228 uasm_i_ehb(&p);
229
230 p = kvm_mips_build_enter_guest(p);
231
232 return p;
233}
234
235/**
236 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
237 * @addr: Address to start writing code.
238 *
239 * Assemble the code to resume guest execution. This code is common between the
240 * initial entry into the guest from the host, and returning from the exit
241 * handler back to the guest.
242 *
243 * Returns: Next address after end of written function.
244 */
245static void *kvm_mips_build_enter_guest(void *addr)
246{
247 u32 *p = addr;
248 unsigned int i;
249 struct uasm_label labels[2];
250 struct uasm_reloc relocs[2];
251 struct uasm_label *l = labels;
252 struct uasm_reloc *r = relocs;
253
254 memset(labels, 0, sizeof(labels));
255 memset(relocs, 0, sizeof(relocs));
256
257 /* Set Guest EPC */
258 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
259 uasm_i_mtc0(&p, T0, C0_EPC);
260
261 /* Set the ASID for the Guest Kernel */
262 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
263 UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
264 T0);
265 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
266 uasm_i_xori(&p, T0, T0, KSU_USER);
267 uasm_il_bnez(&p, &r, T0, label_kernel_asid);
268 uasm_i_addiu(&p, T1, K1,
269 offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
270 /* else user */
271 uasm_i_addiu(&p, T1, K1,
272 offsetof(struct kvm_vcpu_arch, guest_user_asid));
273 uasm_l_kernel_asid(&l, p);
274
275 /* t1: contains the base of the ASID array, need to get the cpu id */
276 /* smp_processor_id */
277 UASM_i_LW(&p, T2, offsetof(struct thread_info, cpu), GP);
278 /* x4 */
279 uasm_i_sll(&p, T2, T2, 2);
280 UASM_i_ADDU(&p, T3, T1, T2);
281 UASM_i_LW(&p, K0, 0, T3);
282#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
283 /* x sizeof(struct cpuinfo_mips)/4 */
284 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
285 uasm_i_mul(&p, T2, T2, T3);
286
287 UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
288 UASM_i_ADDU(&p, AT, AT, T2);
289 UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
290 uasm_i_and(&p, K0, K0, T2);
291#else
292 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
293#endif
294 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
295 uasm_i_ehb(&p);
296
297 /* Disable RDHWR access */
298 uasm_i_mtc0(&p, ZERO, C0_HWRENA);
299
300 /* load the guest context from VCPU and return */
301 for (i = 1; i < 32; ++i) {
302 /* Guest k0/k1 loaded later */
303 if (i == K0 || i == K1)
304 continue;
305 UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
306 }
307
308 /* Restore hi/lo */
309 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
310 uasm_i_mthi(&p, K0);
311
312 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
313 uasm_i_mtlo(&p, K0);
314
315 /* Restore the guest's k0/k1 registers */
316 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
317 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
318
319 /* Jump to guest */
320 uasm_i_eret(&p);
321
322 uasm_resolve_relocs(relocs, labels);
323
324 return p;
325}
326
327/**
328 * kvm_mips_build_exception() - Assemble first level guest exception handler.
329 * @addr: Address to start writing code.
330 *
331 * Assemble exception vector code for guest execution. The generated vector will
332 * jump to the common exception handler generated by kvm_mips_build_exit().
333 *
334 * Returns: Next address after end of written function.
335 */
336void *kvm_mips_build_exception(void *addr)
337{
338 u32 *p = addr;
339
340 /* Save guest k0 */
1e5217f5 341 uasm_i_mtc0(&p, K0, scratch_tmp[0], scratch_tmp[1]);
90e9311a
JH
342 uasm_i_ehb(&p);
343
344 /* Get EBASE */
345 uasm_i_mfc0(&p, K0, C0_EBASE);
346 /* Get rid of CPUNum */
347 uasm_i_srl(&p, K0, K0, 10);
348 uasm_i_sll(&p, K0, K0, 10);
349 /* Save k1 @ offset 0x3000 */
350 UASM_i_SW(&p, K1, 0x3000, K0);
351
352 /* Exception handler is installed @ offset 0x2000 */
353 uasm_i_addiu(&p, K0, K0, 0x2000);
354 /* Jump to the function */
355 uasm_i_jr(&p, K0);
356 uasm_i_nop(&p);
357
358 return p;
359}
360
361/**
362 * kvm_mips_build_exit() - Assemble common guest exit handler.
363 * @addr: Address to start writing code.
364 *
365 * Assemble the generic guest exit handling code. This is called by the
366 * exception vectors (generated by kvm_mips_build_exception()), and calls
367 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
368 * depending on the return value.
369 *
370 * Returns: Next address after end of written function.
371 */
372void *kvm_mips_build_exit(void *addr)
373{
374 u32 *p = addr;
375 unsigned int i;
376 struct uasm_label labels[3];
377 struct uasm_reloc relocs[3];
378 struct uasm_label *l = labels;
379 struct uasm_reloc *r = relocs;
380
381 memset(labels, 0, sizeof(labels));
382 memset(relocs, 0, sizeof(relocs));
383
384 /*
385 * Generic Guest exception handler. We end up here when the guest
386 * does something that causes a trap to kernel mode.
387 */
388
1e5217f5
JH
389 /* Get the VCPU pointer from the scratch register */
390 uasm_i_mfc0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
391 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
392
393 /* Start saving Guest context to VCPU */
394 for (i = 0; i < 32; ++i) {
395 /* Guest k0/k1 saved later */
396 if (i == K0 || i == K1)
397 continue;
398 UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
399 }
400
401 /* We need to save hi/lo and restore them on the way out */
402 uasm_i_mfhi(&p, T0);
403 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
404
405 uasm_i_mflo(&p, T0);
406 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
407
408 /* Finally save guest k0/k1 to VCPU */
1e5217f5 409 uasm_i_mfc0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
90e9311a
JH
410 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
411
412 /* Get GUEST k1 and save it in VCPU */
413 uasm_i_addiu(&p, T1, ZERO, ~0x2ff);
414 uasm_i_mfc0(&p, T0, C0_EBASE);
415 uasm_i_and(&p, T0, T0, T1);
416 UASM_i_LW(&p, T0, 0x3000, T0);
417 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
418
419 /* Now that context has been saved, we can use other registers */
420
421 /* Restore vcpu */
1e5217f5 422 uasm_i_mfc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
423 uasm_i_move(&p, S1, A1);
424
425 /* Restore run (vcpu->run) */
426 UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
427 /* Save pointer to run in s0, will be saved by the compiler */
428 uasm_i_move(&p, S0, A0);
429
430 /*
431 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
432 * the exception
433 */
434 uasm_i_mfc0(&p, K0, C0_EPC);
435 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
436
437 uasm_i_mfc0(&p, K0, C0_BADVADDR);
438 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
439 K1);
440
441 uasm_i_mfc0(&p, K0, C0_CAUSE);
442 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
443
444 /* Now restore the host state just enough to run the handlers */
445
446 /* Switch EBASE to the one used by Linux */
447 /* load up the host EBASE */
448 uasm_i_mfc0(&p, V0, C0_STATUS);
449
450 uasm_i_lui(&p, AT, ST0_BEV >> 16);
451 uasm_i_or(&p, K0, V0, AT);
452
453 uasm_i_mtc0(&p, K0, C0_STATUS);
454 uasm_i_ehb(&p);
455
456 UASM_i_LA_mostly(&p, K0, (long)&ebase);
457 UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
458 uasm_i_mtc0(&p, K0, C0_EBASE);
459
d37f4038
JH
460 if (raw_cpu_has_fpu) {
461 /*
462 * If FPU is enabled, save FCR31 and clear it so that later
463 * ctc1's don't trigger FPE for pending exceptions.
464 */
465 uasm_i_lui(&p, AT, ST0_CU1 >> 16);
466 uasm_i_and(&p, V1, V0, AT);
467 uasm_il_beqz(&p, &r, V1, label_fpu_1);
468 uasm_i_nop(&p);
469 uasm_i_cfc1(&p, T0, 31);
470 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
471 K1);
472 uasm_i_ctc1(&p, ZERO, 31);
473 uasm_l_fpu_1(&l, p);
474 }
90e9311a 475
38ea7a71
JH
476 if (cpu_has_msa) {
477 /*
478 * If MSA is enabled, save MSACSR and clear it so that later
479 * instructions don't trigger MSAFPE for pending exceptions.
480 */
481 uasm_i_mfc0(&p, T0, C0_CONFIG5);
482 uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
483 uasm_il_beqz(&p, &r, T0, label_msa_1);
484 uasm_i_nop(&p);
485 uasm_i_cfcmsa(&p, T0, MSA_CSR);
486 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
487 K1);
488 uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
489 uasm_l_msa_1(&l, p);
490 }
90e9311a
JH
491
492 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
493 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
494 uasm_i_and(&p, V0, V0, AT);
495 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
496 uasm_i_or(&p, V0, V0, AT);
497 uasm_i_mtc0(&p, V0, C0_STATUS);
498 uasm_i_ehb(&p);
499
500 /* Load up host GP */
501 UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
502
503 /* Need a stack before we can jump to "C" */
504 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
505
506 /* Saved host state */
507 uasm_i_addiu(&p, SP, SP, -(int)sizeof(struct pt_regs));
508
509 /*
510 * XXXKYMA do we need to load the host ASID, maybe not because the
511 * kernel entries are marked GLOBAL, need to verify
512 */
513
1e5217f5
JH
514 /* Restore host scratch registers, as we'll have clobbered them */
515 kvm_mips_build_restore_scratch(&p, K0, SP);
90e9311a
JH
516
517 /* Restore RDHWR access */
518 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
519 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
520 uasm_i_mtc0(&p, K0, C0_HWRENA);
521
522 /* Jump to handler */
523 /*
524 * XXXKYMA: not sure if this is safe, how large is the stack??
525 * Now jump to the kvm_mips_handle_exit() to see if we can deal
526 * with this in the kernel
527 */
528 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
529 uasm_i_jalr(&p, RA, T9);
530 uasm_i_addiu(&p, SP, SP, -CALLFRAME_SIZ);
531
532 uasm_resolve_relocs(relocs, labels);
533
534 p = kvm_mips_build_ret_from_exit(p);
535
536 return p;
537}
538
539/**
540 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
541 * @addr: Address to start writing code.
542 *
543 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
544 * resuming the guest or returning to the host depending on the return value.
545 *
546 * Returns: Next address after end of written function.
547 */
548static void *kvm_mips_build_ret_from_exit(void *addr)
549{
550 u32 *p = addr;
551 struct uasm_label labels[2];
552 struct uasm_reloc relocs[2];
553 struct uasm_label *l = labels;
554 struct uasm_reloc *r = relocs;
555
556 memset(labels, 0, sizeof(labels));
557 memset(relocs, 0, sizeof(relocs));
558
559 /* Return from handler Make sure interrupts are disabled */
560 uasm_i_di(&p, ZERO);
561 uasm_i_ehb(&p);
562
563 /*
564 * XXXKYMA: k0/k1 could have been blown away if we processed
565 * an exception while we were handling the exception from the
566 * guest, reload k1
567 */
568
569 uasm_i_move(&p, K1, S1);
570 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
571
572 /*
573 * Check return value, should tell us if we are returning to the
574 * host (handle I/O etc)or resuming the guest
575 */
576 uasm_i_andi(&p, T0, V0, RESUME_HOST);
577 uasm_il_bnez(&p, &r, T0, label_return_to_host);
578 uasm_i_nop(&p);
579
580 p = kvm_mips_build_ret_to_guest(p);
581
582 uasm_l_return_to_host(&l, p);
583 p = kvm_mips_build_ret_to_host(p);
584
585 uasm_resolve_relocs(relocs, labels);
586
587 return p;
588}
589
590/**
591 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
592 * @addr: Address to start writing code.
593 *
594 * Assemble the code to handle return from the guest exit handler
595 * (kvm_mips_handle_exit()) back to the guest.
596 *
597 * Returns: Next address after end of written function.
598 */
599static void *kvm_mips_build_ret_to_guest(void *addr)
600{
601 u32 *p = addr;
602
1e5217f5
JH
603 /* Put the saved pointer to vcpu (s1) back into the scratch register */
604 uasm_i_mtc0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
605
606 /* Load up the Guest EBASE to minimize the window where BEV is set */
607 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
608
609 /* Switch EBASE back to the one used by KVM */
610 uasm_i_mfc0(&p, V1, C0_STATUS);
611 uasm_i_lui(&p, AT, ST0_BEV >> 16);
612 uasm_i_or(&p, K0, V1, AT);
613 uasm_i_mtc0(&p, K0, C0_STATUS);
614 uasm_i_ehb(&p);
615 uasm_i_mtc0(&p, T0, C0_EBASE);
616
617 /* Setup status register for running guest in UM */
618 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
619 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
620 uasm_i_and(&p, V1, V1, AT);
621 uasm_i_mtc0(&p, V1, C0_STATUS);
622 uasm_i_ehb(&p);
623
624 p = kvm_mips_build_enter_guest(p);
625
626 return p;
627}
628
629/**
630 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
631 * @addr: Address to start writing code.
632 *
633 * Assemble the code to handle return from the guest exit handler
634 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
635 * function generated by kvm_mips_build_vcpu_run().
636 *
637 * Returns: Next address after end of written function.
638 */
639static void *kvm_mips_build_ret_to_host(void *addr)
640{
641 u32 *p = addr;
642 unsigned int i;
643
644 /* EBASE is already pointing to Linux */
645 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
646 uasm_i_addiu(&p, K1, K1, -(int)sizeof(struct pt_regs));
647
90e9311a
JH
648 /*
649 * r2/v0 is the return code, shift it down by 2 (arithmetic)
650 * to recover the err code
651 */
652 uasm_i_sra(&p, K0, V0, 2);
653 uasm_i_move(&p, V0, K0);
654
655 /* Load context saved on the host stack */
656 for (i = 16; i < 31; ++i) {
657 if (i == 24)
658 i = 28;
659 UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
660 }
661
662 UASM_i_LW(&p, K0, offsetof(struct pt_regs, hi), K1);
663 uasm_i_mthi(&p, K0);
664
665 UASM_i_LW(&p, K0, offsetof(struct pt_regs, lo), K1);
666 uasm_i_mtlo(&p, K0);
667
668 /* Restore RDHWR access */
669 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
670 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
671 uasm_i_mtc0(&p, K0, C0_HWRENA);
672
673 /* Restore RA, which is the address we will return to */
674 UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
675 uasm_i_jr(&p, RA);
676 uasm_i_nop(&p);
677
678 return p;
679}
680
This page took 0.211151 seconds and 5 git commands to generate.