Commit | Line | Data |
---|---|---|
90e9311a JH |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Generation of main entry point for the guest, exception handling. | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | * | |
11 | * Copyright (C) 2016 Imagination Technologies Ltd. | |
12 | */ | |
13 | ||
14 | #include <linux/kvm_host.h> | |
15 | #include <asm/msa.h> | |
16 | #include <asm/setup.h> | |
17 | #include <asm/uasm.h> | |
18 | ||
19 | /* Register names */ | |
20 | #define ZERO 0 | |
21 | #define AT 1 | |
22 | #define V0 2 | |
23 | #define V1 3 | |
24 | #define A0 4 | |
25 | #define A1 5 | |
26 | ||
27 | #if _MIPS_SIM == _MIPS_SIM_ABI32 | |
28 | #define T0 8 | |
29 | #define T1 9 | |
30 | #define T2 10 | |
31 | #define T3 11 | |
32 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | |
33 | ||
34 | #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 | |
35 | #define T0 12 | |
36 | #define T1 13 | |
37 | #define T2 14 | |
38 | #define T3 15 | |
39 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ | |
40 | ||
41 | #define S0 16 | |
42 | #define S1 17 | |
43 | #define T9 25 | |
44 | #define K0 26 | |
45 | #define K1 27 | |
46 | #define GP 28 | |
47 | #define SP 29 | |
48 | #define RA 31 | |
49 | ||
50 | /* Some CP0 registers */ | |
51 | #define C0_HWRENA 7, 0 | |
52 | #define C0_BADVADDR 8, 0 | |
53 | #define C0_ENTRYHI 10, 0 | |
54 | #define C0_STATUS 12, 0 | |
55 | #define C0_CAUSE 13, 0 | |
56 | #define C0_EPC 14, 0 | |
57 | #define C0_EBASE 15, 1 | |
90e9311a JH |
58 | #define C0_CONFIG5 16, 5 |
59 | #define C0_DDATA_LO 28, 3 | |
60 | #define C0_ERROREPC 30, 0 | |
61 | ||
62 | #define CALLFRAME_SIZ 32 | |
63 | ||
1d756942 JH |
64 | #ifdef CONFIG_64BIT |
65 | #define ST0_KX_IF_64 ST0_KX | |
66 | #else | |
67 | #define ST0_KX_IF_64 0 | |
68 | #endif | |
69 | ||
1e5217f5 JH |
70 | static unsigned int scratch_vcpu[2] = { C0_DDATA_LO }; |
71 | static unsigned int scratch_tmp[2] = { C0_ERROREPC }; | |
72 | ||
90e9311a JH |
73 | enum label_id { |
74 | label_fpu_1 = 1, | |
75 | label_msa_1, | |
76 | label_return_to_host, | |
77 | label_kernel_asid, | |
1f9ca62c | 78 | label_exit_common, |
90e9311a JH |
79 | }; |
80 | ||
81 | UASM_L_LA(_fpu_1) | |
82 | UASM_L_LA(_msa_1) | |
83 | UASM_L_LA(_return_to_host) | |
84 | UASM_L_LA(_kernel_asid) | |
1f9ca62c | 85 | UASM_L_LA(_exit_common) |
90e9311a JH |
86 | |
87 | static void *kvm_mips_build_enter_guest(void *addr); | |
88 | static void *kvm_mips_build_ret_from_exit(void *addr); | |
89 | static void *kvm_mips_build_ret_to_guest(void *addr); | |
90 | static void *kvm_mips_build_ret_to_host(void *addr); | |
91 | ||
1e5217f5 JH |
92 | /** |
93 | * kvm_mips_entry_setup() - Perform global setup for entry code. | |
94 | * | |
95 | * Perform global setup for entry code, such as choosing a scratch register. | |
96 | * | |
97 | * Returns: 0 on success. | |
98 | * -errno on failure. | |
99 | */ | |
100 | int kvm_mips_entry_setup(void) | |
101 | { | |
102 | /* | |
103 | * We prefer to use KScratchN registers if they are available over the | |
104 | * defaults above, which may not work on all cores. | |
105 | */ | |
106 | unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc; | |
107 | ||
108 | /* Pick a scratch register for storing VCPU */ | |
109 | if (kscratch_mask) { | |
110 | scratch_vcpu[0] = 31; | |
111 | scratch_vcpu[1] = ffs(kscratch_mask) - 1; | |
112 | kscratch_mask &= ~BIT(scratch_vcpu[1]); | |
113 | } | |
114 | ||
115 | /* Pick a scratch register to use as a temp for saving state */ | |
116 | if (kscratch_mask) { | |
117 | scratch_tmp[0] = 31; | |
118 | scratch_tmp[1] = ffs(kscratch_mask) - 1; | |
119 | kscratch_mask &= ~BIT(scratch_tmp[1]); | |
120 | } | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, | |
126 | unsigned int frame) | |
127 | { | |
128 | /* Save the VCPU scratch register value in cp0_epc of the stack frame */ | |
e41637d8 | 129 | UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
1e5217f5 JH |
130 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); |
131 | ||
132 | /* Save the temp scratch register value in cp0_cause of stack frame */ | |
133 | if (scratch_tmp[0] == 31) { | |
e41637d8 | 134 | UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
1e5217f5 JH |
135 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
136 | } | |
137 | } | |
138 | ||
139 | static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, | |
140 | unsigned int frame) | |
141 | { | |
142 | /* | |
143 | * Restore host scratch register values saved by | |
144 | * kvm_mips_build_save_scratch(). | |
145 | */ | |
146 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); | |
e41637d8 | 147 | UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
1e5217f5 JH |
148 | |
149 | if (scratch_tmp[0] == 31) { | |
150 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); | |
e41637d8 | 151 | UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
1e5217f5 JH |
152 | } |
153 | } | |
154 | ||
0d17aea5 JH |
155 | /** |
156 | * build_set_exc_base() - Assemble code to write exception base address. | |
157 | * @p: Code buffer pointer. | |
158 | * @reg: Source register (generated code may set WG bit in @reg). | |
159 | * | |
160 | * Assemble code to modify the exception base address in the EBase register, | |
161 | * using the appropriately sized access and setting the WG bit if necessary. | |
162 | */ | |
163 | static inline void build_set_exc_base(u32 **p, unsigned int reg) | |
164 | { | |
165 | if (cpu_has_ebase_wg) { | |
166 | /* Set WG so that all the bits get written */ | |
167 | uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); | |
168 | UASM_i_MTC0(p, reg, C0_EBASE); | |
169 | } else { | |
170 | uasm_i_mtc0(p, reg, C0_EBASE); | |
171 | } | |
172 | } | |
173 | ||
90e9311a JH |
174 | /** |
175 | * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. | |
176 | * @addr: Address to start writing code. | |
177 | * | |
178 | * Assemble the start of the vcpu_run function to run a guest VCPU. The function | |
179 | * conforms to the following prototype: | |
180 | * | |
181 | * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); | |
182 | * | |
183 | * The exit from the guest and return to the caller is handled by the code | |
184 | * generated by kvm_mips_build_ret_to_host(). | |
185 | * | |
186 | * Returns: Next address after end of written function. | |
187 | */ | |
188 | void *kvm_mips_build_vcpu_run(void *addr) | |
189 | { | |
190 | u32 *p = addr; | |
191 | unsigned int i; | |
192 | ||
193 | /* | |
194 | * A0: run | |
195 | * A1: vcpu | |
196 | */ | |
197 | ||
198 | /* k0/k1 not being used in host kernel context */ | |
e41637d8 | 199 | UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs)); |
90e9311a JH |
200 | for (i = 16; i < 32; ++i) { |
201 | if (i == 24) | |
202 | i = 28; | |
203 | UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); | |
204 | } | |
205 | ||
90e9311a JH |
206 | /* Save host status */ |
207 | uasm_i_mfc0(&p, V0, C0_STATUS); | |
208 | UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); | |
209 | ||
1e5217f5 JH |
210 | /* Save scratch registers, will be used to store pointer to vcpu etc */ |
211 | kvm_mips_build_save_scratch(&p, V1, K1); | |
90e9311a | 212 | |
1e5217f5 | 213 | /* VCPU scratch register has pointer to vcpu */ |
e41637d8 | 214 | UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
215 | |
216 | /* Offset into vcpu->arch */ | |
e41637d8 | 217 | UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); |
90e9311a JH |
218 | |
219 | /* | |
220 | * Save the host stack to VCPU, used for exception processing | |
221 | * when we exit from the Guest | |
222 | */ | |
223 | UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); | |
224 | ||
225 | /* Save the kernel gp as well */ | |
226 | UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); | |
227 | ||
228 | /* | |
229 | * Setup status register for running the guest in UM, interrupts | |
230 | * are disabled | |
231 | */ | |
1d756942 | 232 | UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); |
90e9311a JH |
233 | uasm_i_mtc0(&p, K0, C0_STATUS); |
234 | uasm_i_ehb(&p); | |
235 | ||
236 | /* load up the new EBASE */ | |
237 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); | |
0d17aea5 | 238 | build_set_exc_base(&p, K0); |
90e9311a JH |
239 | |
240 | /* | |
241 | * Now that the new EBASE has been loaded, unset BEV, set | |
242 | * interrupt mask as it was but make sure that timer interrupts | |
243 | * are enabled | |
244 | */ | |
1d756942 | 245 | uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); |
90e9311a JH |
246 | uasm_i_andi(&p, V0, V0, ST0_IM); |
247 | uasm_i_or(&p, K0, K0, V0); | |
248 | uasm_i_mtc0(&p, K0, C0_STATUS); | |
249 | uasm_i_ehb(&p); | |
250 | ||
251 | p = kvm_mips_build_enter_guest(p); | |
252 | ||
253 | return p; | |
254 | } | |
255 | ||
256 | /** | |
257 | * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. | |
258 | * @addr: Address to start writing code. | |
259 | * | |
260 | * Assemble the code to resume guest execution. This code is common between the | |
261 | * initial entry into the guest from the host, and returning from the exit | |
262 | * handler back to the guest. | |
263 | * | |
264 | * Returns: Next address after end of written function. | |
265 | */ | |
266 | static void *kvm_mips_build_enter_guest(void *addr) | |
267 | { | |
268 | u32 *p = addr; | |
269 | unsigned int i; | |
270 | struct uasm_label labels[2]; | |
271 | struct uasm_reloc relocs[2]; | |
272 | struct uasm_label *l = labels; | |
273 | struct uasm_reloc *r = relocs; | |
274 | ||
275 | memset(labels, 0, sizeof(labels)); | |
276 | memset(relocs, 0, sizeof(relocs)); | |
277 | ||
278 | /* Set Guest EPC */ | |
279 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); | |
e41637d8 | 280 | UASM_i_MTC0(&p, T0, C0_EPC); |
90e9311a JH |
281 | |
282 | /* Set the ASID for the Guest Kernel */ | |
283 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); | |
284 | UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), | |
285 | T0); | |
286 | uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); | |
287 | uasm_i_xori(&p, T0, T0, KSU_USER); | |
288 | uasm_il_bnez(&p, &r, T0, label_kernel_asid); | |
e41637d8 | 289 | UASM_i_ADDIU(&p, T1, K1, |
90e9311a JH |
290 | offsetof(struct kvm_vcpu_arch, guest_kernel_asid)); |
291 | /* else user */ | |
e41637d8 | 292 | UASM_i_ADDIU(&p, T1, K1, |
90e9311a JH |
293 | offsetof(struct kvm_vcpu_arch, guest_user_asid)); |
294 | uasm_l_kernel_asid(&l, p); | |
295 | ||
296 | /* t1: contains the base of the ASID array, need to get the cpu id */ | |
297 | /* smp_processor_id */ | |
e41637d8 | 298 | uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); |
90e9311a JH |
299 | /* x4 */ |
300 | uasm_i_sll(&p, T2, T2, 2); | |
301 | UASM_i_ADDU(&p, T3, T1, T2); | |
e41637d8 | 302 | uasm_i_lw(&p, K0, 0, T3); |
90e9311a JH |
303 | #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE |
304 | /* x sizeof(struct cpuinfo_mips)/4 */ | |
305 | uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4); | |
306 | uasm_i_mul(&p, T2, T2, T3); | |
307 | ||
308 | UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); | |
309 | UASM_i_ADDU(&p, AT, AT, T2); | |
310 | UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); | |
311 | uasm_i_and(&p, K0, K0, T2); | |
312 | #else | |
313 | uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); | |
314 | #endif | |
315 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); | |
316 | uasm_i_ehb(&p); | |
317 | ||
318 | /* Disable RDHWR access */ | |
319 | uasm_i_mtc0(&p, ZERO, C0_HWRENA); | |
320 | ||
321 | /* load the guest context from VCPU and return */ | |
322 | for (i = 1; i < 32; ++i) { | |
323 | /* Guest k0/k1 loaded later */ | |
324 | if (i == K0 || i == K1) | |
325 | continue; | |
326 | UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); | |
327 | } | |
328 | ||
70e92c7e | 329 | #ifndef CONFIG_CPU_MIPSR6 |
90e9311a JH |
330 | /* Restore hi/lo */ |
331 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); | |
332 | uasm_i_mthi(&p, K0); | |
333 | ||
334 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); | |
335 | uasm_i_mtlo(&p, K0); | |
70e92c7e | 336 | #endif |
90e9311a JH |
337 | |
338 | /* Restore the guest's k0/k1 registers */ | |
339 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); | |
340 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); | |
341 | ||
342 | /* Jump to guest */ | |
343 | uasm_i_eret(&p); | |
344 | ||
345 | uasm_resolve_relocs(relocs, labels); | |
346 | ||
347 | return p; | |
348 | } | |
349 | ||
350 | /** | |
351 | * kvm_mips_build_exception() - Assemble first level guest exception handler. | |
352 | * @addr: Address to start writing code. | |
1f9ca62c | 353 | * @handler: Address of common handler (within range of @addr). |
90e9311a JH |
354 | * |
355 | * Assemble exception vector code for guest execution. The generated vector will | |
1f9ca62c | 356 | * branch to the common exception handler generated by kvm_mips_build_exit(). |
90e9311a JH |
357 | * |
358 | * Returns: Next address after end of written function. | |
359 | */ | |
1f9ca62c | 360 | void *kvm_mips_build_exception(void *addr, void *handler) |
90e9311a JH |
361 | { |
362 | u32 *p = addr; | |
1f9ca62c JH |
363 | struct uasm_label labels[2]; |
364 | struct uasm_reloc relocs[2]; | |
365 | struct uasm_label *l = labels; | |
366 | struct uasm_reloc *r = relocs; | |
367 | ||
368 | memset(labels, 0, sizeof(labels)); | |
369 | memset(relocs, 0, sizeof(relocs)); | |
90e9311a | 370 | |
eadfb501 | 371 | /* Save guest k1 into scratch register */ |
e41637d8 | 372 | UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); |
90e9311a | 373 | |
eadfb501 | 374 | /* Get the VCPU pointer from the VCPU scratch register */ |
e41637d8 JH |
375 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); |
376 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); | |
eadfb501 JH |
377 | |
378 | /* Save guest k0 into VCPU structure */ | |
379 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); | |
90e9311a | 380 | |
1f9ca62c JH |
381 | /* Branch to the common handler */ |
382 | uasm_il_b(&p, &r, label_exit_common); | |
90e9311a JH |
383 | uasm_i_nop(&p); |
384 | ||
1f9ca62c JH |
385 | uasm_l_exit_common(&l, handler); |
386 | uasm_resolve_relocs(relocs, labels); | |
387 | ||
90e9311a JH |
388 | return p; |
389 | } | |
390 | ||
391 | /** | |
392 | * kvm_mips_build_exit() - Assemble common guest exit handler. | |
393 | * @addr: Address to start writing code. | |
394 | * | |
395 | * Assemble the generic guest exit handling code. This is called by the | |
396 | * exception vectors (generated by kvm_mips_build_exception()), and calls | |
397 | * kvm_mips_handle_exit(), then either resumes the guest or returns to the host | |
398 | * depending on the return value. | |
399 | * | |
400 | * Returns: Next address after end of written function. | |
401 | */ | |
402 | void *kvm_mips_build_exit(void *addr) | |
403 | { | |
404 | u32 *p = addr; | |
405 | unsigned int i; | |
406 | struct uasm_label labels[3]; | |
407 | struct uasm_reloc relocs[3]; | |
408 | struct uasm_label *l = labels; | |
409 | struct uasm_reloc *r = relocs; | |
410 | ||
411 | memset(labels, 0, sizeof(labels)); | |
412 | memset(relocs, 0, sizeof(relocs)); | |
413 | ||
414 | /* | |
415 | * Generic Guest exception handler. We end up here when the guest | |
416 | * does something that causes a trap to kernel mode. | |
eadfb501 JH |
417 | * |
418 | * Both k0/k1 registers will have already been saved (k0 into the vcpu | |
419 | * structure, and k1 into the scratch_tmp register). | |
420 | * | |
421 | * The k1 register will already contain the kvm_vcpu_arch pointer. | |
90e9311a JH |
422 | */ |
423 | ||
90e9311a JH |
424 | /* Start saving Guest context to VCPU */ |
425 | for (i = 0; i < 32; ++i) { | |
426 | /* Guest k0/k1 saved later */ | |
427 | if (i == K0 || i == K1) | |
428 | continue; | |
429 | UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); | |
430 | } | |
431 | ||
70e92c7e | 432 | #ifndef CONFIG_CPU_MIPSR6 |
90e9311a JH |
433 | /* We need to save hi/lo and restore them on the way out */ |
434 | uasm_i_mfhi(&p, T0); | |
435 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); | |
436 | ||
437 | uasm_i_mflo(&p, T0); | |
438 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); | |
70e92c7e | 439 | #endif |
90e9311a | 440 | |
eadfb501 JH |
441 | /* Finally save guest k1 to VCPU */ |
442 | uasm_i_ehb(&p); | |
e41637d8 | 443 | UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]); |
90e9311a JH |
444 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); |
445 | ||
446 | /* Now that context has been saved, we can use other registers */ | |
447 | ||
448 | /* Restore vcpu */ | |
e41637d8 | 449 | UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
450 | uasm_i_move(&p, S1, A1); |
451 | ||
452 | /* Restore run (vcpu->run) */ | |
453 | UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1); | |
454 | /* Save pointer to run in s0, will be saved by the compiler */ | |
455 | uasm_i_move(&p, S0, A0); | |
456 | ||
457 | /* | |
458 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process | |
459 | * the exception | |
460 | */ | |
e41637d8 | 461 | UASM_i_MFC0(&p, K0, C0_EPC); |
90e9311a JH |
462 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); |
463 | ||
e41637d8 | 464 | UASM_i_MFC0(&p, K0, C0_BADVADDR); |
90e9311a JH |
465 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), |
466 | K1); | |
467 | ||
468 | uasm_i_mfc0(&p, K0, C0_CAUSE); | |
469 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); | |
470 | ||
471 | /* Now restore the host state just enough to run the handlers */ | |
472 | ||
473 | /* Switch EBASE to the one used by Linux */ | |
474 | /* load up the host EBASE */ | |
475 | uasm_i_mfc0(&p, V0, C0_STATUS); | |
476 | ||
477 | uasm_i_lui(&p, AT, ST0_BEV >> 16); | |
478 | uasm_i_or(&p, K0, V0, AT); | |
479 | ||
480 | uasm_i_mtc0(&p, K0, C0_STATUS); | |
481 | uasm_i_ehb(&p); | |
482 | ||
483 | UASM_i_LA_mostly(&p, K0, (long)&ebase); | |
484 | UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); | |
0d17aea5 | 485 | build_set_exc_base(&p, K0); |
90e9311a | 486 | |
d37f4038 JH |
487 | if (raw_cpu_has_fpu) { |
488 | /* | |
489 | * If FPU is enabled, save FCR31 and clear it so that later | |
490 | * ctc1's don't trigger FPE for pending exceptions. | |
491 | */ | |
492 | uasm_i_lui(&p, AT, ST0_CU1 >> 16); | |
493 | uasm_i_and(&p, V1, V0, AT); | |
494 | uasm_il_beqz(&p, &r, V1, label_fpu_1); | |
495 | uasm_i_nop(&p); | |
496 | uasm_i_cfc1(&p, T0, 31); | |
497 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), | |
498 | K1); | |
499 | uasm_i_ctc1(&p, ZERO, 31); | |
500 | uasm_l_fpu_1(&l, p); | |
501 | } | |
90e9311a | 502 | |
38ea7a71 JH |
503 | if (cpu_has_msa) { |
504 | /* | |
505 | * If MSA is enabled, save MSACSR and clear it so that later | |
506 | * instructions don't trigger MSAFPE for pending exceptions. | |
507 | */ | |
508 | uasm_i_mfc0(&p, T0, C0_CONFIG5); | |
509 | uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ | |
510 | uasm_il_beqz(&p, &r, T0, label_msa_1); | |
511 | uasm_i_nop(&p); | |
512 | uasm_i_cfcmsa(&p, T0, MSA_CSR); | |
513 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), | |
514 | K1); | |
515 | uasm_i_ctcmsa(&p, MSA_CSR, ZERO); | |
516 | uasm_l_msa_1(&l, p); | |
517 | } | |
90e9311a JH |
518 | |
519 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ | |
520 | uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); | |
521 | uasm_i_and(&p, V0, V0, AT); | |
522 | uasm_i_lui(&p, AT, ST0_CU0 >> 16); | |
523 | uasm_i_or(&p, V0, V0, AT); | |
524 | uasm_i_mtc0(&p, V0, C0_STATUS); | |
525 | uasm_i_ehb(&p); | |
526 | ||
527 | /* Load up host GP */ | |
528 | UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); | |
529 | ||
530 | /* Need a stack before we can jump to "C" */ | |
531 | UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); | |
532 | ||
533 | /* Saved host state */ | |
e41637d8 | 534 | UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); |
90e9311a JH |
535 | |
536 | /* | |
537 | * XXXKYMA do we need to load the host ASID, maybe not because the | |
538 | * kernel entries are marked GLOBAL, need to verify | |
539 | */ | |
540 | ||
1e5217f5 JH |
541 | /* Restore host scratch registers, as we'll have clobbered them */ |
542 | kvm_mips_build_restore_scratch(&p, K0, SP); | |
90e9311a JH |
543 | |
544 | /* Restore RDHWR access */ | |
545 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); | |
546 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); | |
547 | uasm_i_mtc0(&p, K0, C0_HWRENA); | |
548 | ||
549 | /* Jump to handler */ | |
550 | /* | |
551 | * XXXKYMA: not sure if this is safe, how large is the stack?? | |
552 | * Now jump to the kvm_mips_handle_exit() to see if we can deal | |
553 | * with this in the kernel | |
554 | */ | |
555 | UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); | |
556 | uasm_i_jalr(&p, RA, T9); | |
e41637d8 | 557 | UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); |
90e9311a JH |
558 | |
559 | uasm_resolve_relocs(relocs, labels); | |
560 | ||
561 | p = kvm_mips_build_ret_from_exit(p); | |
562 | ||
563 | return p; | |
564 | } | |
565 | ||
566 | /** | |
567 | * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. | |
568 | * @addr: Address to start writing code. | |
569 | * | |
570 | * Assemble the code to handle the return from kvm_mips_handle_exit(), either | |
571 | * resuming the guest or returning to the host depending on the return value. | |
572 | * | |
573 | * Returns: Next address after end of written function. | |
574 | */ | |
575 | static void *kvm_mips_build_ret_from_exit(void *addr) | |
576 | { | |
577 | u32 *p = addr; | |
578 | struct uasm_label labels[2]; | |
579 | struct uasm_reloc relocs[2]; | |
580 | struct uasm_label *l = labels; | |
581 | struct uasm_reloc *r = relocs; | |
582 | ||
583 | memset(labels, 0, sizeof(labels)); | |
584 | memset(relocs, 0, sizeof(relocs)); | |
585 | ||
586 | /* Return from handler Make sure interrupts are disabled */ | |
587 | uasm_i_di(&p, ZERO); | |
588 | uasm_i_ehb(&p); | |
589 | ||
590 | /* | |
591 | * XXXKYMA: k0/k1 could have been blown away if we processed | |
592 | * an exception while we were handling the exception from the | |
593 | * guest, reload k1 | |
594 | */ | |
595 | ||
596 | uasm_i_move(&p, K1, S1); | |
e41637d8 | 597 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); |
90e9311a JH |
598 | |
599 | /* | |
600 | * Check return value, should tell us if we are returning to the | |
601 | * host (handle I/O etc)or resuming the guest | |
602 | */ | |
603 | uasm_i_andi(&p, T0, V0, RESUME_HOST); | |
604 | uasm_il_bnez(&p, &r, T0, label_return_to_host); | |
605 | uasm_i_nop(&p); | |
606 | ||
607 | p = kvm_mips_build_ret_to_guest(p); | |
608 | ||
609 | uasm_l_return_to_host(&l, p); | |
610 | p = kvm_mips_build_ret_to_host(p); | |
611 | ||
612 | uasm_resolve_relocs(relocs, labels); | |
613 | ||
614 | return p; | |
615 | } | |
616 | ||
617 | /** | |
618 | * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. | |
619 | * @addr: Address to start writing code. | |
620 | * | |
621 | * Assemble the code to handle return from the guest exit handler | |
622 | * (kvm_mips_handle_exit()) back to the guest. | |
623 | * | |
624 | * Returns: Next address after end of written function. | |
625 | */ | |
626 | static void *kvm_mips_build_ret_to_guest(void *addr) | |
627 | { | |
628 | u32 *p = addr; | |
629 | ||
1e5217f5 | 630 | /* Put the saved pointer to vcpu (s1) back into the scratch register */ |
e41637d8 | 631 | UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); |
90e9311a JH |
632 | |
633 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | |
634 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); | |
635 | ||
636 | /* Switch EBASE back to the one used by KVM */ | |
637 | uasm_i_mfc0(&p, V1, C0_STATUS); | |
638 | uasm_i_lui(&p, AT, ST0_BEV >> 16); | |
639 | uasm_i_or(&p, K0, V1, AT); | |
640 | uasm_i_mtc0(&p, K0, C0_STATUS); | |
641 | uasm_i_ehb(&p); | |
0d17aea5 | 642 | build_set_exc_base(&p, T0); |
90e9311a JH |
643 | |
644 | /* Setup status register for running guest in UM */ | |
645 | uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); | |
646 | UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX)); | |
647 | uasm_i_and(&p, V1, V1, AT); | |
648 | uasm_i_mtc0(&p, V1, C0_STATUS); | |
649 | uasm_i_ehb(&p); | |
650 | ||
651 | p = kvm_mips_build_enter_guest(p); | |
652 | ||
653 | return p; | |
654 | } | |
655 | ||
656 | /** | |
657 | * kvm_mips_build_ret_to_host() - Assemble code to return to the host. | |
658 | * @addr: Address to start writing code. | |
659 | * | |
660 | * Assemble the code to handle return from the guest exit handler | |
661 | * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run | |
662 | * function generated by kvm_mips_build_vcpu_run(). | |
663 | * | |
664 | * Returns: Next address after end of written function. | |
665 | */ | |
666 | static void *kvm_mips_build_ret_to_host(void *addr) | |
667 | { | |
668 | u32 *p = addr; | |
669 | unsigned int i; | |
670 | ||
671 | /* EBASE is already pointing to Linux */ | |
672 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); | |
e41637d8 | 673 | UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs)); |
90e9311a | 674 | |
90e9311a JH |
675 | /* |
676 | * r2/v0 is the return code, shift it down by 2 (arithmetic) | |
677 | * to recover the err code | |
678 | */ | |
679 | uasm_i_sra(&p, K0, V0, 2); | |
680 | uasm_i_move(&p, V0, K0); | |
681 | ||
682 | /* Load context saved on the host stack */ | |
683 | for (i = 16; i < 31; ++i) { | |
684 | if (i == 24) | |
685 | i = 28; | |
686 | UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); | |
687 | } | |
688 | ||
90e9311a JH |
689 | /* Restore RDHWR access */ |
690 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); | |
691 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); | |
692 | uasm_i_mtc0(&p, K0, C0_HWRENA); | |
693 | ||
694 | /* Restore RA, which is the address we will return to */ | |
695 | UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1); | |
696 | uasm_i_jr(&p, RA); | |
697 | uasm_i_nop(&p); | |
698 | ||
699 | return p; | |
700 | } | |
701 |