Merge tag 'for-linus-v4.8' of git://github.com/martinbrandenburg/linux
[deliverable/linux.git] / arch / arm64 / kvm / hyp / entry.S
CommitLineData
b97b66c1
MZ
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/asm-offsets.h>
21#include <asm/assembler.h>
22#include <asm/fpsimdmacros.h>
23#include <asm/kvm.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27
28#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
29#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
30
31 .text
32 .pushsection .hyp.text, "ax"
33
34.macro save_callee_saved_regs ctxt
35 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
36 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
37 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
38 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
39 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
40 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
41.endm
42
43.macro restore_callee_saved_regs ctxt
44 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
45 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
46 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
47 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
48 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
49 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
50.endm
51
52/*
53 * u64 __guest_enter(struct kvm_vcpu *vcpu,
54 * struct kvm_cpu_context *host_ctxt);
55 */
56ENTRY(__guest_enter)
57 // x0: vcpu
58 // x1: host/guest context
59 // x2-x18: clobbered by macros
60
61 // Store the host regs
62 save_callee_saved_regs x1
63
64 // Preserve vcpu & host_ctxt for use at exit time
65 stp x0, x1, [sp, #-16]!
66
67 add x1, x0, #VCPU_CONTEXT
68
69 // Prepare x0-x1 for later restore by pushing them onto the stack
70 ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
71 stp x2, x3, [sp, #-16]!
72
73 // x2-x18
74 ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
75 ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
76 ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
77 ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
78 ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
79 ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
80 ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
81 ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
82 ldr x18, [x1, #CPU_XREG_OFFSET(18)]
83
84 // x19-x29, lr
85 restore_callee_saved_regs x1
86
87 // Last bits of the 64bit state
88 ldp x0, x1, [sp], #16
89
90 // Do not touch any register after this!
91 eret
92ENDPROC(__guest_enter)
93
94ENTRY(__guest_exit)
95 // x0: vcpu
96 // x1: return code
97 // x2-x3: free
98 // x4-x29,lr: vcpu regs
99 // vcpu x0-x3 on the stack
100
101 add x2, x0, #VCPU_CONTEXT
102
103 stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
104 stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
105 stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
106 stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
107 stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
108 stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
109 stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
110 str x18, [x2, #CPU_XREG_OFFSET(18)]
111
112 ldp x6, x7, [sp], #16 // x2, x3
113 ldp x4, x5, [sp], #16 // x0, x1
114
115 stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
116 stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
117
118 save_callee_saved_regs x2
119
120 // Restore vcpu & host_ctxt from the stack
121 // (preserving return code in x1)
122 ldp x0, x2, [sp], #16
123 // Now restore the host regs
124 restore_callee_saved_regs x2
125
126 mov x0, x1
127 ret
128ENDPROC(__guest_exit)
129
c13d1683
MZ
130ENTRY(__fpsimd_guest_restore)
131 stp x4, lr, [sp, #-16]!
132
77cb2d91 133alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
c13d1683
MZ
134 mrs x2, cptr_el2
135 bic x2, x2, #CPTR_EL2_TFP
136 msr cptr_el2, x2
77cb2d91
MZ
137alternative_else
138 mrs x2, cpacr_el1
139 orr x2, x2, #CPACR_EL1_FPEN
140 msr cpacr_el1, x2
141alternative_endif
c13d1683
MZ
142 isb
143
144 mrs x3, tpidr_el2
145
146 ldr x0, [x3, #VCPU_HOST_CONTEXT]
147 kern_hyp_va x0
148 add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
149 bl __fpsimd_save_state
150
151 add x2, x3, #VCPU_CONTEXT
152 add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
153 bl __fpsimd_restore_state
154
5eec0a91 155 // Skip restoring fpexc32 for AArch64 guests
c13d1683
MZ
156 mrs x1, hcr_el2
157 tbnz x1, #HCR_RW_SHIFT, 1f
9d8415d6 158 ldr x4, [x3, #VCPU_FPEXC32_EL2]
c13d1683
MZ
159 msr fpexc32_el2, x4
1601:
161 ldp x4, lr, [sp], #16
162 ldp x2, x3, [sp], #16
163 ldp x0, x1, [sp], #16
164
165 eret
166ENDPROC(__fpsimd_guest_restore)
This page took 0.090594 seconds and 5 git commands to generate.