2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Main entry point for the guest, exception handling.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <asm/asmmacro.h>
14 #include <asm/regdef.h>
15 #include <asm/mipsregs.h>
16 #include <asm/stackframe.h>
17 #include <asm/asm-offsets.h>
20 #define MIPSX(name) mips32_ ## name
21 #define CALLFRAME_SIZ 32
25 * exception vector entrypoint
27 #define VECTOR(x, regmask) \
31 #define VECTOR_END(x) \
34 /* Overload, Danger Will Robinson!! */
35 #define PT_HOST_USERLOCAL PT_EPC
37 #define CP0_DDATA_LO $28,3
40 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
42 #define RESUME_GUEST 0
43 #define RESUME_HOST RESUME_FLAG_HOST
46 * __kvm_mips_vcpu_run: entry point to the guest
52 FEXPORT(__kvm_mips_vcpu_run)
53 /* k0/k1 not being used in host kernel context */
54 INT_ADDIU k1, sp, -PT_SIZE
55 LONG_S $16, PT_R16(k1)
56 LONG_S $17, PT_R17(k1)
57 LONG_S $18, PT_R18(k1)
58 LONG_S $19, PT_R19(k1)
59 LONG_S $20, PT_R20(k1)
60 LONG_S $21, PT_R21(k1)
61 LONG_S $22, PT_R22(k1)
62 LONG_S $23, PT_R23(k1)
64 LONG_S $28, PT_R28(k1)
65 LONG_S $29, PT_R29(k1)
66 LONG_S $30, PT_R30(k1)
67 LONG_S $31, PT_R31(k1)
75 /* Save host status */
77 LONG_S v0, PT_STATUS(k1)
79 /* Save DDATA_LO, will be used to store pointer to vcpu */
81 LONG_S v1, PT_HOST_USERLOCAL(k1)
83 /* DDATA_LO has pointer to vcpu */
86 /* Offset into vcpu->arch */
87 INT_ADDIU k1, a1, VCPU_HOST_ARCH
90 * Save the host stack to VCPU, used for exception processing
91 * when we exit from the Guest
93 LONG_S sp, VCPU_HOST_STACK(k1)
95 /* Save the kernel gp as well */
96 LONG_S gp, VCPU_HOST_GP(k1)
99 * Setup status register for running the guest in UM, interrupts
102 li k0, (ST0_EXL | KSU_USER | ST0_BEV)
106 /* load up the new EBASE */
107 LONG_L k0, VCPU_GUEST_EBASE(k1)
111 * Now that the new EBASE has been loaded, unset BEV, set
112 * interrupt mask as it was but make sure that timer interrupts
115 li k0, (ST0_EXL | KSU_USER | ST0_IE)
122 LONG_L t0, VCPU_PC(k1)
125 FEXPORT(__kvm_mips_load_asid)
126 /* Set the ASID for the Guest Kernel */
127 PTR_L t0, VCPU_COP0(k1)
128 LONG_L t0, COP0_STATUS(t0)
129 andi t0, KSU_USER | ST0_ERL | ST0_EXL
131 bnez t0, 1f /* If kernel */
132 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
133 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
135 /* t1: contains the base of the ASID array, need to get the cpu id */
136 LONG_L t2, TI_CPU($28) /* smp_processor_id */
137 INT_SLL t2, t2, 2 /* x4 */
140 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
141 li t3, CPUINFO_SIZE/4
142 mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
143 LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
146 andi k0, k0, MIPS_ENTRYHI_ASID
151 /* Disable RDHWR access */
152 mtc0 zero, CP0_HWRENA
155 /* Now load up the Guest Context from VCPU */
156 LONG_L $1, VCPU_R1(k1)
157 LONG_L $2, VCPU_R2(k1)
158 LONG_L $3, VCPU_R3(k1)
160 LONG_L $4, VCPU_R4(k1)
161 LONG_L $5, VCPU_R5(k1)
162 LONG_L $6, VCPU_R6(k1)
163 LONG_L $7, VCPU_R7(k1)
165 LONG_L $8, VCPU_R8(k1)
166 LONG_L $9, VCPU_R9(k1)
167 LONG_L $10, VCPU_R10(k1)
168 LONG_L $11, VCPU_R11(k1)
169 LONG_L $12, VCPU_R12(k1)
170 LONG_L $13, VCPU_R13(k1)
171 LONG_L $14, VCPU_R14(k1)
172 LONG_L $15, VCPU_R15(k1)
173 LONG_L $16, VCPU_R16(k1)
174 LONG_L $17, VCPU_R17(k1)
175 LONG_L $18, VCPU_R18(k1)
176 LONG_L $19, VCPU_R19(k1)
177 LONG_L $20, VCPU_R20(k1)
178 LONG_L $21, VCPU_R21(k1)
179 LONG_L $22, VCPU_R22(k1)
180 LONG_L $23, VCPU_R23(k1)
181 LONG_L $24, VCPU_R24(k1)
182 LONG_L $25, VCPU_R25(k1)
184 /* k0/k1 loaded up later */
186 LONG_L $28, VCPU_R28(k1)
187 LONG_L $29, VCPU_R29(k1)
188 LONG_L $30, VCPU_R30(k1)
189 LONG_L $31, VCPU_R31(k1)
192 LONG_L k0, VCPU_LO(k1)
195 LONG_L k0, VCPU_HI(k1)
198 FEXPORT(__kvm_mips_load_k0k1)
199 /* Restore the guest's k0/k1 registers */
200 LONG_L k0, VCPU_R26(k1)
201 LONG_L k1, VCPU_R27(k1)
205 EXPORT(__kvm_mips_vcpu_run_end)
207 VECTOR(MIPSX(exception), unknown)
208 /* Find out what mode we came from and jump to the proper handler. */
209 mtc0 k0, CP0_ERROREPC #01: Save guest k0
212 mfc0 k0, CP0_EBASE #02: Get EBASE
213 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
214 INT_SLL k0, k0, 10 #04
215 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
216 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
217 # installed @ offset 0x2000
218 j k0 #07: jump to the function
219 nop #08: branch delay slot
220 VECTOR_END(MIPSX(exceptionEnd))
221 .end MIPSX(exception)
224 * Generic Guest exception handler. We end up here when the guest
225 * does something that causes a trap to kernel mode.
227 NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
228 /* Get the VCPU pointer from DDTATA_LO */
229 mfc0 k1, CP0_DDATA_LO
230 INT_ADDIU k1, k1, VCPU_HOST_ARCH
232 /* Start saving Guest context to VCPU */
233 LONG_S $0, VCPU_R0(k1)
234 LONG_S $1, VCPU_R1(k1)
235 LONG_S $2, VCPU_R2(k1)
236 LONG_S $3, VCPU_R3(k1)
237 LONG_S $4, VCPU_R4(k1)
238 LONG_S $5, VCPU_R5(k1)
239 LONG_S $6, VCPU_R6(k1)
240 LONG_S $7, VCPU_R7(k1)
241 LONG_S $8, VCPU_R8(k1)
242 LONG_S $9, VCPU_R9(k1)
243 LONG_S $10, VCPU_R10(k1)
244 LONG_S $11, VCPU_R11(k1)
245 LONG_S $12, VCPU_R12(k1)
246 LONG_S $13, VCPU_R13(k1)
247 LONG_S $14, VCPU_R14(k1)
248 LONG_S $15, VCPU_R15(k1)
249 LONG_S $16, VCPU_R16(k1)
250 LONG_S $17, VCPU_R17(k1)
251 LONG_S $18, VCPU_R18(k1)
252 LONG_S $19, VCPU_R19(k1)
253 LONG_S $20, VCPU_R20(k1)
254 LONG_S $21, VCPU_R21(k1)
255 LONG_S $22, VCPU_R22(k1)
256 LONG_S $23, VCPU_R23(k1)
257 LONG_S $24, VCPU_R24(k1)
258 LONG_S $25, VCPU_R25(k1)
260 /* Guest k0/k1 saved later */
262 LONG_S $28, VCPU_R28(k1)
263 LONG_S $29, VCPU_R29(k1)
264 LONG_S $30, VCPU_R30(k1)
265 LONG_S $31, VCPU_R31(k1)
269 /* We need to save hi/lo and restore them on the way out */
271 LONG_S t0, VCPU_HI(k1)
274 LONG_S t0, VCPU_LO(k1)
276 /* Finally save guest k0/k1 to VCPU */
277 mfc0 t0, CP0_ERROREPC
278 LONG_S t0, VCPU_R26(k1)
280 /* Get GUEST k1 and save it in VCPU */
284 LONG_L t0, 0x3000(t0)
285 LONG_S t0, VCPU_R27(k1)
287 /* Now that context has been saved, we can use other registers */
290 mfc0 a1, CP0_DDATA_LO
293 /* Restore run (vcpu->run) */
294 LONG_L a0, VCPU_RUN(a1)
295 /* Save pointer to run in s0, will be saved by the compiler */
299 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
300 * process the exception
303 LONG_S k0, VCPU_PC(k1)
305 mfc0 k0, CP0_BADVADDR
306 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
309 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
312 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
314 /* Now restore the host state just enough to run the handlers */
316 /* Switch EBASE to the one used by Linux */
317 /* load up the host EBASE */
325 LONG_L k0, VCPU_HOST_EBASE(k1)
329 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
330 * trigger FPE for pending exceptions.
338 sw t0, VCPU_FCR31(k1)
343 #ifdef CONFIG_CPU_HAS_MSA
345 * If MSA is enabled, save MSACSR and clear it so that later
346 * instructions don't trigger MSAFPE for pending exceptions.
349 ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
353 ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
357 sw t0, VCPU_MSA_CSR(k1)
358 _ctcmsa MSA_CSR, zero
362 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
363 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
368 /* Load up host GP */
369 LONG_L gp, VCPU_HOST_GP(k1)
371 /* Need a stack before we can jump to "C" */
372 LONG_L sp, VCPU_HOST_STACK(k1)
374 /* Saved host state */
375 INT_ADDIU sp, sp, -PT_SIZE
378 * XXXKYMA do we need to load the host ASID, maybe not because the
379 * kernel entries are marked GLOBAL, need to verify
382 /* Restore host DDATA_LO */
383 LONG_L k0, PT_HOST_USERLOCAL(sp)
384 mtc0 k0, CP0_DDATA_LO
386 /* Restore RDHWR access */
387 PTR_LI k0, 0x2000000F
390 /* Jump to handler */
391 FEXPORT(__kvm_mips_jump_to_handler)
393 * XXXKYMA: not sure if this is safe, how large is the stack??
394 * Now jump to the kvm_mips_handle_exit() to see if we can deal
395 * with this in the kernel
397 PTR_LA t9, kvm_mips_handle_exit
399 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
401 /* Return from handler Make sure interrupts are disabled */
406 * XXXKYMA: k0/k1 could have been blown away if we processed
407 * an exception while we were handling the exception from the
412 INT_ADDIU k1, k1, VCPU_HOST_ARCH
415 * Check return value, should tell us if we are returning to the
416 * host (handle I/O etc)or resuming the guest
418 andi t0, v0, RESUME_HOST
419 bnez t0, __kvm_mips_return_to_host
422 __kvm_mips_return_to_guest:
423 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
424 mtc0 s1, CP0_DDATA_LO
426 /* Load up the Guest EBASE to minimize the window where BEV is set */
427 LONG_L t0, VCPU_GUEST_EBASE(k1)
429 /* Switch EBASE back to the one used by KVM */
436 /* Setup status register for running guest in UM */
437 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
438 and v1, v1, ~(ST0_CU0 | ST0_MX)
443 LONG_L t0, VCPU_PC(k1)
446 /* Set the ASID for the Guest Kernel */
447 PTR_L t0, VCPU_COP0(k1)
448 LONG_L t0, COP0_STATUS(t0)
449 andi t0, KSU_USER | ST0_ERL | ST0_EXL
451 bnez t0, 1f /* If kernel */
452 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
453 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
455 /* t1: contains the base of the ASID array, need to get the cpu id */
456 LONG_L t2, TI_CPU($28) /* smp_processor_id */
457 INT_SLL t2, t2, 2 /* x4 */
460 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
461 li t3, CPUINFO_SIZE/4
462 mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
463 LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
466 andi k0, k0, MIPS_ENTRYHI_ASID
471 /* Disable RDHWR access */
472 mtc0 zero, CP0_HWRENA
475 /* load the guest context from VCPU and return */
476 LONG_L $0, VCPU_R0(k1)
477 LONG_L $1, VCPU_R1(k1)
478 LONG_L $2, VCPU_R2(k1)
479 LONG_L $3, VCPU_R3(k1)
480 LONG_L $4, VCPU_R4(k1)
481 LONG_L $5, VCPU_R5(k1)
482 LONG_L $6, VCPU_R6(k1)
483 LONG_L $7, VCPU_R7(k1)
484 LONG_L $8, VCPU_R8(k1)
485 LONG_L $9, VCPU_R9(k1)
486 LONG_L $10, VCPU_R10(k1)
487 LONG_L $11, VCPU_R11(k1)
488 LONG_L $12, VCPU_R12(k1)
489 LONG_L $13, VCPU_R13(k1)
490 LONG_L $14, VCPU_R14(k1)
491 LONG_L $15, VCPU_R15(k1)
492 LONG_L $16, VCPU_R16(k1)
493 LONG_L $17, VCPU_R17(k1)
494 LONG_L $18, VCPU_R18(k1)
495 LONG_L $19, VCPU_R19(k1)
496 LONG_L $20, VCPU_R20(k1)
497 LONG_L $21, VCPU_R21(k1)
498 LONG_L $22, VCPU_R22(k1)
499 LONG_L $23, VCPU_R23(k1)
500 LONG_L $24, VCPU_R24(k1)
501 LONG_L $25, VCPU_R25(k1)
503 /* $/k1 loaded later */
504 LONG_L $28, VCPU_R28(k1)
505 LONG_L $29, VCPU_R29(k1)
506 LONG_L $30, VCPU_R30(k1)
507 LONG_L $31, VCPU_R31(k1)
509 FEXPORT(__kvm_mips_skip_guest_restore)
510 LONG_L k0, VCPU_HI(k1)
513 LONG_L k0, VCPU_LO(k1)
516 LONG_L k0, VCPU_R26(k1)
517 LONG_L k1, VCPU_R27(k1)
522 __kvm_mips_return_to_host:
523 /* EBASE is already pointing to Linux */
524 LONG_L k1, VCPU_HOST_STACK(k1)
525 INT_ADDIU k1,k1, -PT_SIZE
527 /* Restore host DDATA_LO */
528 LONG_L k0, PT_HOST_USERLOCAL(k1)
529 mtc0 k0, CP0_DDATA_LO
532 * r2/v0 is the return code, shift it down by 2 (arithmetic)
533 * to recover the err code
538 /* Load context saved on the host stack */
539 LONG_L $16, PT_R16(k1)
540 LONG_L $17, PT_R17(k1)
541 LONG_L $18, PT_R18(k1)
542 LONG_L $19, PT_R19(k1)
543 LONG_L $20, PT_R20(k1)
544 LONG_L $21, PT_R21(k1)
545 LONG_L $22, PT_R22(k1)
546 LONG_L $23, PT_R23(k1)
548 LONG_L $28, PT_R28(k1)
549 LONG_L $29, PT_R29(k1)
550 LONG_L $30, PT_R30(k1)
558 /* Restore RDHWR access */
559 PTR_LI k0, 0x2000000F
562 /* Restore RA, which is the address we will return to */
563 LONG_L ra, PT_R31(k1)
567 VECTOR_END(MIPSX(GuestExceptionEnd))
568 .end MIPSX(GuestException)
572 ##### The exception handlers.
574 .word _C_LABEL(MIPSX(GuestException)) # 0
575 .word _C_LABEL(MIPSX(GuestException)) # 1
576 .word _C_LABEL(MIPSX(GuestException)) # 2
577 .word _C_LABEL(MIPSX(GuestException)) # 3
578 .word _C_LABEL(MIPSX(GuestException)) # 4
579 .word _C_LABEL(MIPSX(GuestException)) # 5
580 .word _C_LABEL(MIPSX(GuestException)) # 6
581 .word _C_LABEL(MIPSX(GuestException)) # 7
582 .word _C_LABEL(MIPSX(GuestException)) # 8
583 .word _C_LABEL(MIPSX(GuestException)) # 9
584 .word _C_LABEL(MIPSX(GuestException)) # 10
585 .word _C_LABEL(MIPSX(GuestException)) # 11
586 .word _C_LABEL(MIPSX(GuestException)) # 12
587 .word _C_LABEL(MIPSX(GuestException)) # 13
588 .word _C_LABEL(MIPSX(GuestException)) # 14
589 .word _C_LABEL(MIPSX(GuestException)) # 15
590 .word _C_LABEL(MIPSX(GuestException)) # 16
591 .word _C_LABEL(MIPSX(GuestException)) # 17
592 .word _C_LABEL(MIPSX(GuestException)) # 18
593 .word _C_LABEL(MIPSX(GuestException)) # 19
594 .word _C_LABEL(MIPSX(GuestException)) # 20
595 .word _C_LABEL(MIPSX(GuestException)) # 21
596 .word _C_LABEL(MIPSX(GuestException)) # 22
597 .word _C_LABEL(MIPSX(GuestException)) # 23
598 .word _C_LABEL(MIPSX(GuestException)) # 24
599 .word _C_LABEL(MIPSX(GuestException)) # 25
600 .word _C_LABEL(MIPSX(GuestException)) # 26
601 .word _C_LABEL(MIPSX(GuestException)) # 27
602 .word _C_LABEL(MIPSX(GuestException)) # 28
603 .word _C_LABEL(MIPSX(GuestException)) # 29
604 .word _C_LABEL(MIPSX(GuestException)) # 30
605 .word _C_LABEL(MIPSX(GuestException)) # 31