Commit | Line | Data |
---|---|---|
55c7401d MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/linkage.h> | |
55c7401d MZ |
19 | |
20 | #include <asm/assembler.h> | |
21 | #include <asm/memory.h> | |
22 | #include <asm/asm-offsets.h> | |
23 | #include <asm/fpsimdmacros.h> | |
24 | #include <asm/kvm.h> | |
25 | #include <asm/kvm_asm.h> | |
26 | #include <asm/kvm_arm.h> | |
27 | #include <asm/kvm_mmu.h> | |
28 | ||
29 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | |
30 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | |
31 | #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) | |
32 | #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) | |
33 | ||
34 | .text | |
35 | .pushsection .hyp.text, "ax" | |
36 | .align PAGE_SHIFT | |
37 | ||
55c7401d MZ |
38 | .macro save_common_regs |
39 | // x2: base address for cpu context | |
40 | // x3: tmp register | |
41 | ||
42 | add x3, x2, #CPU_XREG_OFFSET(19) | |
43 | stp x19, x20, [x3] | |
44 | stp x21, x22, [x3, #16] | |
45 | stp x23, x24, [x3, #32] | |
46 | stp x25, x26, [x3, #48] | |
47 | stp x27, x28, [x3, #64] | |
48 | stp x29, lr, [x3, #80] | |
49 | ||
50 | mrs x19, sp_el0 | |
51 | mrs x20, elr_el2 // EL1 PC | |
52 | mrs x21, spsr_el2 // EL1 pstate | |
53 | ||
54 | stp x19, x20, [x3, #96] | |
55 | str x21, [x3, #112] | |
56 | ||
57 | mrs x22, sp_el1 | |
58 | mrs x23, elr_el1 | |
59 | mrs x24, spsr_el1 | |
60 | ||
61 | str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | |
62 | str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | |
63 | str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | |
64 | .endm | |
65 | ||
66 | .macro restore_common_regs | |
67 | // x2: base address for cpu context | |
68 | // x3: tmp register | |
69 | ||
70 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | |
71 | ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | |
72 | ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | |
73 | ||
74 | msr sp_el1, x22 | |
75 | msr elr_el1, x23 | |
76 | msr spsr_el1, x24 | |
77 | ||
78 | add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 | |
79 | ldp x19, x20, [x3] | |
80 | ldr x21, [x3, #16] | |
81 | ||
82 | msr sp_el0, x19 | |
83 | msr elr_el2, x20 // EL1 PC | |
84 | msr spsr_el2, x21 // EL1 pstate | |
85 | ||
86 | add x3, x2, #CPU_XREG_OFFSET(19) | |
87 | ldp x19, x20, [x3] | |
88 | ldp x21, x22, [x3, #16] | |
89 | ldp x23, x24, [x3, #32] | |
90 | ldp x25, x26, [x3, #48] | |
91 | ldp x27, x28, [x3, #64] | |
92 | ldp x29, lr, [x3, #80] | |
93 | .endm | |
94 | ||
95 | .macro save_host_regs | |
96 | save_common_regs | |
97 | .endm | |
98 | ||
99 | .macro restore_host_regs | |
100 | restore_common_regs | |
101 | .endm | |
102 | ||
103 | .macro save_fpsimd | |
104 | // x2: cpu context address | |
105 | // x3, x4: tmp regs | |
106 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | |
107 | fpsimd_save x3, 4 | |
108 | .endm | |
109 | ||
110 | .macro restore_fpsimd | |
111 | // x2: cpu context address | |
112 | // x3, x4: tmp regs | |
113 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | |
114 | fpsimd_restore x3, 4 | |
115 | .endm | |
116 | ||
117 | .macro save_guest_regs | |
118 | // x0 is the vcpu address | |
119 | // x1 is the return code, do not corrupt! | |
120 | // x2 is the cpu context | |
121 | // x3 is a tmp register | |
122 | // Guest's x0-x3 are on the stack | |
123 | ||
124 | // Compute base to save registers | |
125 | add x3, x2, #CPU_XREG_OFFSET(4) | |
126 | stp x4, x5, [x3] | |
127 | stp x6, x7, [x3, #16] | |
128 | stp x8, x9, [x3, #32] | |
129 | stp x10, x11, [x3, #48] | |
130 | stp x12, x13, [x3, #64] | |
131 | stp x14, x15, [x3, #80] | |
132 | stp x16, x17, [x3, #96] | |
133 | str x18, [x3, #112] | |
134 | ||
135 | pop x6, x7 // x2, x3 | |
136 | pop x4, x5 // x0, x1 | |
137 | ||
138 | add x3, x2, #CPU_XREG_OFFSET(0) | |
139 | stp x4, x5, [x3] | |
140 | stp x6, x7, [x3, #16] | |
141 | ||
142 | save_common_regs | |
143 | .endm | |
144 | ||
145 | .macro restore_guest_regs | |
146 | // x0 is the vcpu address. | |
147 | // x2 is the cpu context | |
148 | // x3 is a tmp register | |
149 | ||
150 | // Prepare x0-x3 for later restore | |
151 | add x3, x2, #CPU_XREG_OFFSET(0) | |
152 | ldp x4, x5, [x3] | |
153 | ldp x6, x7, [x3, #16] | |
154 | push x4, x5 // Push x0-x3 on the stack | |
155 | push x6, x7 | |
156 | ||
157 | // x4-x18 | |
158 | ldp x4, x5, [x3, #32] | |
159 | ldp x6, x7, [x3, #48] | |
160 | ldp x8, x9, [x3, #64] | |
161 | ldp x10, x11, [x3, #80] | |
162 | ldp x12, x13, [x3, #96] | |
163 | ldp x14, x15, [x3, #112] | |
164 | ldp x16, x17, [x3, #128] | |
165 | ldr x18, [x3, #144] | |
166 | ||
167 | // x19-x29, lr, sp*, elr*, spsr* | |
168 | restore_common_regs | |
169 | ||
170 | // Last bits of the 64bit state | |
171 | pop x2, x3 | |
172 | pop x0, x1 | |
173 | ||
174 | // Do not touch any register after this! | |
175 | .endm | |
176 | ||
177 | /* | |
178 | * Macros to perform system register save/restore. | |
179 | * | |
180 | * Ordering here is absolutely critical, and must be kept consistent | |
181 | * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, | |
182 | * and in kvm_asm.h. | |
183 | * | |
184 | * In other words, don't touch any of these unless you know what | |
185 | * you are doing. | |
186 | */ | |
187 | .macro save_sysregs | |
188 | // x2: base address for cpu context | |
189 | // x3: tmp register | |
190 | ||
191 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | |
192 | ||
193 | mrs x4, vmpidr_el2 | |
194 | mrs x5, csselr_el1 | |
195 | mrs x6, sctlr_el1 | |
196 | mrs x7, actlr_el1 | |
197 | mrs x8, cpacr_el1 | |
198 | mrs x9, ttbr0_el1 | |
199 | mrs x10, ttbr1_el1 | |
200 | mrs x11, tcr_el1 | |
201 | mrs x12, esr_el1 | |
202 | mrs x13, afsr0_el1 | |
203 | mrs x14, afsr1_el1 | |
204 | mrs x15, far_el1 | |
205 | mrs x16, mair_el1 | |
206 | mrs x17, vbar_el1 | |
207 | mrs x18, contextidr_el1 | |
208 | mrs x19, tpidr_el0 | |
209 | mrs x20, tpidrro_el0 | |
210 | mrs x21, tpidr_el1 | |
211 | mrs x22, amair_el1 | |
212 | mrs x23, cntkctl_el1 | |
1bbd8054 | 213 | mrs x24, par_el1 |
55c7401d MZ |
214 | |
215 | stp x4, x5, [x3] | |
216 | stp x6, x7, [x3, #16] | |
217 | stp x8, x9, [x3, #32] | |
218 | stp x10, x11, [x3, #48] | |
219 | stp x12, x13, [x3, #64] | |
220 | stp x14, x15, [x3, #80] | |
221 | stp x16, x17, [x3, #96] | |
222 | stp x18, x19, [x3, #112] | |
223 | stp x20, x21, [x3, #128] | |
224 | stp x22, x23, [x3, #144] | |
1bbd8054 | 225 | str x24, [x3, #160] |
55c7401d MZ |
226 | .endm |
227 | ||
228 | .macro restore_sysregs | |
229 | // x2: base address for cpu context | |
230 | // x3: tmp register | |
231 | ||
232 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | |
233 | ||
234 | ldp x4, x5, [x3] | |
235 | ldp x6, x7, [x3, #16] | |
236 | ldp x8, x9, [x3, #32] | |
237 | ldp x10, x11, [x3, #48] | |
238 | ldp x12, x13, [x3, #64] | |
239 | ldp x14, x15, [x3, #80] | |
240 | ldp x16, x17, [x3, #96] | |
241 | ldp x18, x19, [x3, #112] | |
242 | ldp x20, x21, [x3, #128] | |
243 | ldp x22, x23, [x3, #144] | |
1bbd8054 | 244 | ldr x24, [x3, #160] |
55c7401d MZ |
245 | |
246 | msr vmpidr_el2, x4 | |
247 | msr csselr_el1, x5 | |
248 | msr sctlr_el1, x6 | |
249 | msr actlr_el1, x7 | |
250 | msr cpacr_el1, x8 | |
251 | msr ttbr0_el1, x9 | |
252 | msr ttbr1_el1, x10 | |
253 | msr tcr_el1, x11 | |
254 | msr esr_el1, x12 | |
255 | msr afsr0_el1, x13 | |
256 | msr afsr1_el1, x14 | |
257 | msr far_el1, x15 | |
258 | msr mair_el1, x16 | |
259 | msr vbar_el1, x17 | |
260 | msr contextidr_el1, x18 | |
261 | msr tpidr_el0, x19 | |
262 | msr tpidrro_el0, x20 | |
263 | msr tpidr_el1, x21 | |
264 | msr amair_el1, x22 | |
265 | msr cntkctl_el1, x23 | |
1bbd8054 | 266 | msr par_el1, x24 |
55c7401d MZ |
267 | .endm |
268 | ||
b4afad06 MZ |
269 | .macro skip_32bit_state tmp, target |
270 | // Skip 32bit state if not needed | |
271 | mrs \tmp, hcr_el2 | |
272 | tbnz \tmp, #HCR_RW_SHIFT, \target | |
273 | .endm | |
274 | ||
275 | .macro skip_tee_state tmp, target | |
276 | // Skip ThumbEE state if not needed | |
277 | mrs \tmp, id_pfr0_el1 | |
278 | tbz \tmp, #12, \target | |
279 | .endm | |
280 | ||
281 | .macro save_guest_32bit_state | |
282 | skip_32bit_state x3, 1f | |
283 | ||
284 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | |
285 | mrs x4, spsr_abt | |
286 | mrs x5, spsr_und | |
287 | mrs x6, spsr_irq | |
288 | mrs x7, spsr_fiq | |
289 | stp x4, x5, [x3] | |
290 | stp x6, x7, [x3, #16] | |
291 | ||
292 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | |
293 | mrs x4, dacr32_el2 | |
294 | mrs x5, ifsr32_el2 | |
295 | mrs x6, fpexc32_el2 | |
296 | mrs x7, dbgvcr32_el2 | |
297 | stp x4, x5, [x3] | |
298 | stp x6, x7, [x3, #16] | |
299 | ||
300 | skip_tee_state x8, 1f | |
301 | ||
302 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | |
303 | mrs x4, teecr32_el1 | |
304 | mrs x5, teehbr32_el1 | |
305 | stp x4, x5, [x3] | |
306 | 1: | |
307 | .endm | |
308 | ||
309 | .macro restore_guest_32bit_state | |
310 | skip_32bit_state x3, 1f | |
311 | ||
312 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | |
313 | ldp x4, x5, [x3] | |
314 | ldp x6, x7, [x3, #16] | |
315 | msr spsr_abt, x4 | |
316 | msr spsr_und, x5 | |
317 | msr spsr_irq, x6 | |
318 | msr spsr_fiq, x7 | |
319 | ||
320 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | |
321 | ldp x4, x5, [x3] | |
322 | ldp x6, x7, [x3, #16] | |
323 | msr dacr32_el2, x4 | |
324 | msr ifsr32_el2, x5 | |
325 | msr fpexc32_el2, x6 | |
326 | msr dbgvcr32_el2, x7 | |
327 | ||
328 | skip_tee_state x8, 1f | |
329 | ||
330 | add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) | |
331 | ldp x4, x5, [x3] | |
332 | msr teecr32_el1, x4 | |
333 | msr teehbr32_el1, x5 | |
334 | 1: | |
335 | .endm | |
336 | ||
55c7401d MZ |
337 | .macro activate_traps |
338 | ldr x2, [x0, #VCPU_IRQ_LINES] | |
339 | ldr x1, [x0, #VCPU_HCR_EL2] | |
340 | orr x2, x2, x1 | |
341 | msr hcr_el2, x2 | |
342 | ||
343 | ldr x2, =(CPTR_EL2_TTA) | |
344 | msr cptr_el2, x2 | |
345 | ||
346 | ldr x2, =(1 << 15) // Trap CP15 Cr=15 | |
347 | msr hstr_el2, x2 | |
348 | ||
349 | mrs x2, mdcr_el2 | |
350 | and x2, x2, #MDCR_EL2_HPMN_MASK | |
351 | orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) | |
352 | msr mdcr_el2, x2 | |
353 | .endm | |
354 | ||
355 | .macro deactivate_traps | |
356 | mov x2, #HCR_RW | |
357 | msr hcr_el2, x2 | |
358 | msr cptr_el2, xzr | |
359 | msr hstr_el2, xzr | |
360 | ||
361 | mrs x2, mdcr_el2 | |
362 | and x2, x2, #MDCR_EL2_HPMN_MASK | |
363 | msr mdcr_el2, x2 | |
364 | .endm | |
365 | ||
366 | .macro activate_vm | |
367 | ldr x1, [x0, #VCPU_KVM] | |
368 | kern_hyp_va x1 | |
369 | ldr x2, [x1, #KVM_VTTBR] | |
370 | msr vttbr_el2, x2 | |
371 | .endm | |
372 | ||
373 | .macro deactivate_vm | |
374 | msr vttbr_el2, xzr | |
375 | .endm | |
376 | ||
1f17f3b6 | 377 | /* |
1a9b1305 | 378 | * Call into the vgic backend for state saving |
1f17f3b6 MZ |
379 | */ |
380 | .macro save_vgic_state | |
1a9b1305 MZ |
381 | adr x24, __vgic_sr_vectors |
382 | ldr x24, [x24, VGIC_SAVE_FN] | |
383 | kern_hyp_va x24 | |
384 | blr x24 | |
1f17f3b6 MZ |
385 | .endm |
386 | ||
387 | /* | |
1a9b1305 | 388 | * Call into the vgic backend for state restoring |
1f17f3b6 MZ |
389 | */ |
390 | .macro restore_vgic_state | |
1a9b1305 MZ |
391 | adr x24, __vgic_sr_vectors |
392 | ldr x24, [x24, #VGIC_RESTORE_FN] | |
393 | kern_hyp_va x24 | |
394 | blr x24 | |
1f17f3b6 MZ |
395 | .endm |
396 | ||
003300de MZ |
397 | .macro save_timer_state |
398 | // x0: vcpu pointer | |
399 | ldr x2, [x0, #VCPU_KVM] | |
400 | kern_hyp_va x2 | |
401 | ldr w3, [x2, #KVM_TIMER_ENABLED] | |
402 | cbz w3, 1f | |
403 | ||
404 | mrs x3, cntv_ctl_el0 | |
405 | and x3, x3, #3 | |
406 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] | |
407 | bic x3, x3, #1 // Clear Enable | |
408 | msr cntv_ctl_el0, x3 | |
409 | ||
410 | isb | |
411 | ||
412 | mrs x3, cntv_cval_el0 | |
413 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] | |
414 | ||
415 | 1: | |
416 | // Allow physical timer/counter access for the host | |
417 | mrs x2, cnthctl_el2 | |
418 | orr x2, x2, #3 | |
419 | msr cnthctl_el2, x2 | |
420 | ||
421 | // Clear cntvoff for the host | |
422 | msr cntvoff_el2, xzr | |
423 | .endm | |
424 | ||
425 | .macro restore_timer_state | |
426 | // x0: vcpu pointer | |
427 | // Disallow physical timer access for the guest | |
428 | // Physical counter access is allowed | |
429 | mrs x2, cnthctl_el2 | |
430 | orr x2, x2, #1 | |
431 | bic x2, x2, #2 | |
432 | msr cnthctl_el2, x2 | |
433 | ||
434 | ldr x2, [x0, #VCPU_KVM] | |
435 | kern_hyp_va x2 | |
436 | ldr w3, [x2, #KVM_TIMER_ENABLED] | |
437 | cbz w3, 1f | |
438 | ||
439 | ldr x3, [x2, #KVM_TIMER_CNTVOFF] | |
440 | msr cntvoff_el2, x3 | |
441 | ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] | |
442 | msr cntv_cval_el0, x2 | |
443 | isb | |
444 | ||
445 | ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] | |
446 | and x2, x2, #3 | |
447 | msr cntv_ctl_el0, x2 | |
448 | 1: | |
449 | .endm | |
450 | ||
55c7401d MZ |
451 | __save_sysregs: |
452 | save_sysregs | |
453 | ret | |
454 | ||
455 | __restore_sysregs: | |
456 | restore_sysregs | |
457 | ret | |
458 | ||
459 | __save_fpsimd: | |
460 | save_fpsimd | |
461 | ret | |
462 | ||
463 | __restore_fpsimd: | |
464 | restore_fpsimd | |
465 | ret | |
466 | ||
467 | /* | |
468 | * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); | |
469 | * | |
470 | * This is the world switch. The first half of the function | |
471 | * deals with entering the guest, and anything from __kvm_vcpu_return | |
472 | * to the end of the function deals with reentering the host. | |
473 | * On the enter path, only x0 (vcpu pointer) must be preserved until | |
474 | * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception | |
475 | * code) must both be preserved until the epilogue. | |
476 | * In both cases, x2 points to the CPU context we're saving/restoring from/to. | |
477 | */ | |
478 | ENTRY(__kvm_vcpu_run) | |
479 | kern_hyp_va x0 | |
480 | msr tpidr_el2, x0 // Save the vcpu register | |
481 | ||
482 | // Host context | |
483 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
484 | kern_hyp_va x2 | |
485 | ||
486 | save_host_regs | |
487 | bl __save_fpsimd | |
488 | bl __save_sysregs | |
489 | ||
490 | activate_traps | |
491 | activate_vm | |
492 | ||
1f17f3b6 | 493 | restore_vgic_state |
003300de | 494 | restore_timer_state |
1f17f3b6 | 495 | |
55c7401d MZ |
496 | // Guest context |
497 | add x2, x0, #VCPU_CONTEXT | |
498 | ||
499 | bl __restore_sysregs | |
500 | bl __restore_fpsimd | |
b4afad06 | 501 | restore_guest_32bit_state |
55c7401d MZ |
502 | restore_guest_regs |
503 | ||
504 | // That's it, no more messing around. | |
505 | eret | |
506 | ||
507 | __kvm_vcpu_return: | |
508 | // Assume x0 is the vcpu pointer, x1 the return code | |
509 | // Guest's x0-x3 are on the stack | |
510 | ||
511 | // Guest context | |
512 | add x2, x0, #VCPU_CONTEXT | |
513 | ||
514 | save_guest_regs | |
515 | bl __save_fpsimd | |
516 | bl __save_sysregs | |
b4afad06 | 517 | save_guest_32bit_state |
55c7401d | 518 | |
003300de | 519 | save_timer_state |
1f17f3b6 MZ |
520 | save_vgic_state |
521 | ||
55c7401d MZ |
522 | deactivate_traps |
523 | deactivate_vm | |
524 | ||
525 | // Host context | |
526 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
527 | kern_hyp_va x2 | |
528 | ||
529 | bl __restore_sysregs | |
530 | bl __restore_fpsimd | |
531 | restore_host_regs | |
532 | ||
533 | mov x0, x1 | |
534 | ret | |
535 | END(__kvm_vcpu_run) | |
536 | ||
537 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | |
538 | ENTRY(__kvm_tlb_flush_vmid_ipa) | |
f142e5ee MZ |
539 | dsb ishst |
540 | ||
55c7401d MZ |
541 | kern_hyp_va x0 |
542 | ldr x2, [x0, #KVM_VTTBR] | |
543 | msr vttbr_el2, x2 | |
544 | isb | |
545 | ||
546 | /* | |
547 | * We could do so much better if we had the VA as well. | |
548 | * Instead, we invalidate Stage-2 for this IPA, and the | |
549 | * whole of Stage-1. Weep... | |
550 | */ | |
551 | tlbi ipas2e1is, x1 | |
ee9e101c WD |
552 | /* |
553 | * We have to ensure completion of the invalidation at Stage-2, | |
554 | * since a table walk on another CPU could refill a TLB with a | |
555 | * complete (S1 + S2) walk based on the old Stage-2 mapping if | |
556 | * the Stage-1 invalidation happened first. | |
557 | */ | |
558 | dsb ish | |
55c7401d | 559 | tlbi vmalle1is |
ee9e101c | 560 | dsb ish |
55c7401d MZ |
561 | isb |
562 | ||
563 | msr vttbr_el2, xzr | |
564 | ret | |
565 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | |
566 | ||
567 | ENTRY(__kvm_flush_vm_context) | |
f142e5ee | 568 | dsb ishst |
55c7401d MZ |
569 | tlbi alle1is |
570 | ic ialluis | |
ee9e101c | 571 | dsb ish |
55c7401d MZ |
572 | ret |
573 | ENDPROC(__kvm_flush_vm_context) | |
574 | ||
1a9b1305 MZ |
575 | // struct vgic_sr_vectors __vgi_sr_vectors; |
576 | .align 3 | |
577 | ENTRY(__vgic_sr_vectors) | |
578 | .skip VGIC_SR_VECTOR_SZ | |
579 | ENDPROC(__vgic_sr_vectors) | |
580 | ||
55c7401d MZ |
581 | __kvm_hyp_panic: |
582 | // Guess the context by looking at VTTBR: | |
583 | // If zero, then we're already a host. | |
584 | // Otherwise restore a minimal host context before panicing. | |
585 | mrs x0, vttbr_el2 | |
586 | cbz x0, 1f | |
587 | ||
588 | mrs x0, tpidr_el2 | |
589 | ||
590 | deactivate_traps | |
591 | deactivate_vm | |
592 | ||
593 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
594 | kern_hyp_va x2 | |
595 | ||
596 | bl __restore_sysregs | |
597 | ||
598 | 1: adr x0, __hyp_panic_str | |
599 | adr x1, 2f | |
600 | ldp x2, x3, [x1] | |
601 | sub x0, x0, x2 | |
602 | add x0, x0, x3 | |
603 | mrs x1, spsr_el2 | |
604 | mrs x2, elr_el2 | |
605 | mrs x3, esr_el2 | |
606 | mrs x4, far_el2 | |
607 | mrs x5, hpfar_el2 | |
608 | mrs x6, par_el1 | |
609 | mrs x7, tpidr_el2 | |
610 | ||
611 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | |
612 | PSR_MODE_EL1h) | |
613 | msr spsr_el2, lr | |
614 | ldr lr, =panic | |
615 | msr elr_el2, lr | |
616 | eret | |
617 | ||
618 | .align 3 | |
619 | 2: .quad HYP_PAGE_OFFSET | |
620 | .quad PAGE_OFFSET | |
621 | ENDPROC(__kvm_hyp_panic) | |
622 | ||
623 | __hyp_panic_str: | |
624 | .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" | |
625 | ||
626 | .align 2 | |
627 | ||
b20c9f29 MZ |
628 | /* |
629 | * u64 kvm_call_hyp(void *hypfn, ...); | |
630 | * | |
631 | * This is not really a variadic function in the classic C-way and care must | |
632 | * be taken when calling this to ensure parameters are passed in registers | |
633 | * only, since the stack will change between the caller and the callee. | |
634 | * | |
635 | * Call the function with the first argument containing a pointer to the | |
636 | * function you wish to call in Hyp mode, and subsequent arguments will be | |
637 | * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the | |
638 | * function pointer can be passed). The function being called must be mapped | |
639 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | |
640 | * passed in r0 and r1. | |
641 | * | |
642 | * A function pointer with a value of 0 has a special meaning, and is | |
643 | * used to implement __hyp_get_vectors in the same way as in | |
644 | * arch/arm64/kernel/hyp_stub.S. | |
645 | */ | |
55c7401d MZ |
646 | ENTRY(kvm_call_hyp) |
647 | hvc #0 | |
648 | ret | |
649 | ENDPROC(kvm_call_hyp) | |
650 | ||
651 | .macro invalid_vector label, target | |
652 | .align 2 | |
653 | \label: | |
654 | b \target | |
655 | ENDPROC(\label) | |
656 | .endm | |
657 | ||
658 | /* None of these should ever happen */ | |
659 | invalid_vector el2t_sync_invalid, __kvm_hyp_panic | |
660 | invalid_vector el2t_irq_invalid, __kvm_hyp_panic | |
661 | invalid_vector el2t_fiq_invalid, __kvm_hyp_panic | |
662 | invalid_vector el2t_error_invalid, __kvm_hyp_panic | |
663 | invalid_vector el2h_sync_invalid, __kvm_hyp_panic | |
664 | invalid_vector el2h_irq_invalid, __kvm_hyp_panic | |
665 | invalid_vector el2h_fiq_invalid, __kvm_hyp_panic | |
666 | invalid_vector el2h_error_invalid, __kvm_hyp_panic | |
667 | invalid_vector el1_sync_invalid, __kvm_hyp_panic | |
668 | invalid_vector el1_irq_invalid, __kvm_hyp_panic | |
669 | invalid_vector el1_fiq_invalid, __kvm_hyp_panic | |
670 | invalid_vector el1_error_invalid, __kvm_hyp_panic | |
671 | ||
672 | el1_sync: // Guest trapped into EL2 | |
673 | push x0, x1 | |
674 | push x2, x3 | |
675 | ||
676 | mrs x1, esr_el2 | |
677 | lsr x2, x1, #ESR_EL2_EC_SHIFT | |
678 | ||
679 | cmp x2, #ESR_EL2_EC_HVC64 | |
680 | b.ne el1_trap | |
681 | ||
682 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | |
683 | cbnz x3, el1_trap // called HVC | |
684 | ||
685 | /* Here, we're pretty sure the host called HVC. */ | |
686 | pop x2, x3 | |
687 | pop x0, x1 | |
688 | ||
b20c9f29 MZ |
689 | /* Check for __hyp_get_vectors */ |
690 | cbnz x0, 1f | |
691 | mrs x0, vbar_el2 | |
692 | b 2f | |
693 | ||
694 | 1: push lr, xzr | |
55c7401d MZ |
695 | |
696 | /* | |
697 | * Compute the function address in EL2, and shuffle the parameters. | |
698 | */ | |
699 | kern_hyp_va x0 | |
700 | mov lr, x0 | |
701 | mov x0, x1 | |
702 | mov x1, x2 | |
703 | mov x2, x3 | |
704 | blr lr | |
705 | ||
706 | pop lr, xzr | |
b20c9f29 | 707 | 2: eret |
55c7401d MZ |
708 | |
709 | el1_trap: | |
710 | /* | |
711 | * x1: ESR | |
712 | * x2: ESR_EC | |
713 | */ | |
714 | cmp x2, #ESR_EL2_EC_DABT | |
715 | mov x0, #ESR_EL2_EC_IABT | |
716 | ccmp x2, x0, #4, ne | |
717 | b.ne 1f // Not an abort we care about | |
718 | ||
719 | /* This is an abort. Check for permission fault */ | |
720 | and x2, x1, #ESR_EL2_FSC_TYPE | |
721 | cmp x2, #FSC_PERM | |
722 | b.ne 1f // Not a permission fault | |
723 | ||
724 | /* | |
725 | * Check for Stage-1 page table walk, which is guaranteed | |
726 | * to give a valid HPFAR_EL2. | |
727 | */ | |
728 | tbnz x1, #7, 1f // S1PTW is set | |
729 | ||
1bbd8054 MZ |
730 | /* Preserve PAR_EL1 */ |
731 | mrs x3, par_el1 | |
732 | push x3, xzr | |
733 | ||
55c7401d MZ |
734 | /* |
735 | * Permission fault, HPFAR_EL2 is invalid. | |
736 | * Resolve the IPA the hard way using the guest VA. | |
737 | * Stage-1 translation already validated the memory access rights. | |
738 | * As such, we can use the EL1 translation regime, and don't have | |
739 | * to distinguish between EL0 and EL1 access. | |
740 | */ | |
741 | mrs x2, far_el2 | |
742 | at s1e1r, x2 | |
743 | isb | |
744 | ||
745 | /* Read result */ | |
746 | mrs x3, par_el1 | |
1bbd8054 MZ |
747 | pop x0, xzr // Restore PAR_EL1 from the stack |
748 | msr par_el1, x0 | |
55c7401d MZ |
749 | tbnz x3, #0, 3f // Bail out if we failed the translation |
750 | ubfx x3, x3, #12, #36 // Extract IPA | |
751 | lsl x3, x3, #4 // and present it like HPFAR | |
752 | b 2f | |
753 | ||
754 | 1: mrs x3, hpfar_el2 | |
755 | mrs x2, far_el2 | |
756 | ||
757 | 2: mrs x0, tpidr_el2 | |
758 | str x1, [x0, #VCPU_ESR_EL2] | |
759 | str x2, [x0, #VCPU_FAR_EL2] | |
760 | str x3, [x0, #VCPU_HPFAR_EL2] | |
761 | ||
762 | mov x1, #ARM_EXCEPTION_TRAP | |
763 | b __kvm_vcpu_return | |
764 | ||
765 | /* | |
766 | * Translation failed. Just return to the guest and | |
767 | * let it fault again. Another CPU is probably playing | |
768 | * behind our back. | |
769 | */ | |
770 | 3: pop x2, x3 | |
771 | pop x0, x1 | |
772 | ||
773 | eret | |
774 | ||
775 | el1_irq: | |
776 | push x0, x1 | |
777 | push x2, x3 | |
778 | mrs x0, tpidr_el2 | |
779 | mov x1, #ARM_EXCEPTION_IRQ | |
780 | b __kvm_vcpu_return | |
781 | ||
782 | .ltorg | |
783 | ||
784 | .align 11 | |
785 | ||
786 | ENTRY(__kvm_hyp_vector) | |
787 | ventry el2t_sync_invalid // Synchronous EL2t | |
788 | ventry el2t_irq_invalid // IRQ EL2t | |
789 | ventry el2t_fiq_invalid // FIQ EL2t | |
790 | ventry el2t_error_invalid // Error EL2t | |
791 | ||
792 | ventry el2h_sync_invalid // Synchronous EL2h | |
793 | ventry el2h_irq_invalid // IRQ EL2h | |
794 | ventry el2h_fiq_invalid // FIQ EL2h | |
795 | ventry el2h_error_invalid // Error EL2h | |
796 | ||
797 | ventry el1_sync // Synchronous 64-bit EL1 | |
798 | ventry el1_irq // IRQ 64-bit EL1 | |
799 | ventry el1_fiq_invalid // FIQ 64-bit EL1 | |
800 | ventry el1_error_invalid // Error 64-bit EL1 | |
801 | ||
802 | ventry el1_sync // Synchronous 32-bit EL1 | |
803 | ventry el1_irq // IRQ 32-bit EL1 | |
804 | ventry el1_fiq_invalid // FIQ 32-bit EL1 | |
805 | ventry el1_error_invalid // Error 32-bit EL1 | |
806 | ENDPROC(__kvm_hyp_vector) | |
807 | ||
55c7401d | 808 | .popsection |