ARM64: KVM: MMIO support BE host running LE code
[deliverable/linux.git] / arch / arm64 / kvm / hyp.S
CommitLineData
55c7401d
MZ
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
55c7401d
MZ
19
20#include <asm/assembler.h>
21#include <asm/memory.h>
22#include <asm/asm-offsets.h>
23#include <asm/fpsimdmacros.h>
24#include <asm/kvm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
28
29#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
30#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
31#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
32#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
33
34 .text
35 .pushsection .hyp.text, "ax"
36 .align PAGE_SHIFT
37
55c7401d
MZ
38.macro save_common_regs
39 // x2: base address for cpu context
40 // x3: tmp register
41
42 add x3, x2, #CPU_XREG_OFFSET(19)
43 stp x19, x20, [x3]
44 stp x21, x22, [x3, #16]
45 stp x23, x24, [x3, #32]
46 stp x25, x26, [x3, #48]
47 stp x27, x28, [x3, #64]
48 stp x29, lr, [x3, #80]
49
50 mrs x19, sp_el0
51 mrs x20, elr_el2 // EL1 PC
52 mrs x21, spsr_el2 // EL1 pstate
53
54 stp x19, x20, [x3, #96]
55 str x21, [x3, #112]
56
57 mrs x22, sp_el1
58 mrs x23, elr_el1
59 mrs x24, spsr_el1
60
61 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
62 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
63 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
64.endm
65
66.macro restore_common_regs
67 // x2: base address for cpu context
68 // x3: tmp register
69
70 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
71 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
72 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
73
74 msr sp_el1, x22
75 msr elr_el1, x23
76 msr spsr_el1, x24
77
78 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
79 ldp x19, x20, [x3]
80 ldr x21, [x3, #16]
81
82 msr sp_el0, x19
83 msr elr_el2, x20 // EL1 PC
84 msr spsr_el2, x21 // EL1 pstate
85
86 add x3, x2, #CPU_XREG_OFFSET(19)
87 ldp x19, x20, [x3]
88 ldp x21, x22, [x3, #16]
89 ldp x23, x24, [x3, #32]
90 ldp x25, x26, [x3, #48]
91 ldp x27, x28, [x3, #64]
92 ldp x29, lr, [x3, #80]
93.endm
94
95.macro save_host_regs
96 save_common_regs
97.endm
98
99.macro restore_host_regs
100 restore_common_regs
101.endm
102
103.macro save_fpsimd
104 // x2: cpu context address
105 // x3, x4: tmp regs
106 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
107 fpsimd_save x3, 4
108.endm
109
110.macro restore_fpsimd
111 // x2: cpu context address
112 // x3, x4: tmp regs
113 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
114 fpsimd_restore x3, 4
115.endm
116
117.macro save_guest_regs
118 // x0 is the vcpu address
119 // x1 is the return code, do not corrupt!
120 // x2 is the cpu context
121 // x3 is a tmp register
122 // Guest's x0-x3 are on the stack
123
124 // Compute base to save registers
125 add x3, x2, #CPU_XREG_OFFSET(4)
126 stp x4, x5, [x3]
127 stp x6, x7, [x3, #16]
128 stp x8, x9, [x3, #32]
129 stp x10, x11, [x3, #48]
130 stp x12, x13, [x3, #64]
131 stp x14, x15, [x3, #80]
132 stp x16, x17, [x3, #96]
133 str x18, [x3, #112]
134
135 pop x6, x7 // x2, x3
136 pop x4, x5 // x0, x1
137
138 add x3, x2, #CPU_XREG_OFFSET(0)
139 stp x4, x5, [x3]
140 stp x6, x7, [x3, #16]
141
142 save_common_regs
143.endm
144
145.macro restore_guest_regs
146 // x0 is the vcpu address.
147 // x2 is the cpu context
148 // x3 is a tmp register
149
150 // Prepare x0-x3 for later restore
151 add x3, x2, #CPU_XREG_OFFSET(0)
152 ldp x4, x5, [x3]
153 ldp x6, x7, [x3, #16]
154 push x4, x5 // Push x0-x3 on the stack
155 push x6, x7
156
157 // x4-x18
158 ldp x4, x5, [x3, #32]
159 ldp x6, x7, [x3, #48]
160 ldp x8, x9, [x3, #64]
161 ldp x10, x11, [x3, #80]
162 ldp x12, x13, [x3, #96]
163 ldp x14, x15, [x3, #112]
164 ldp x16, x17, [x3, #128]
165 ldr x18, [x3, #144]
166
167 // x19-x29, lr, sp*, elr*, spsr*
168 restore_common_regs
169
170 // Last bits of the 64bit state
171 pop x2, x3
172 pop x0, x1
173
174 // Do not touch any register after this!
175.endm
176
177/*
178 * Macros to perform system register save/restore.
179 *
180 * Ordering here is absolutely critical, and must be kept consistent
181 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
182 * and in kvm_asm.h.
183 *
184 * In other words, don't touch any of these unless you know what
185 * you are doing.
186 */
187.macro save_sysregs
188 // x2: base address for cpu context
189 // x3: tmp register
190
191 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
192
193 mrs x4, vmpidr_el2
194 mrs x5, csselr_el1
195 mrs x6, sctlr_el1
196 mrs x7, actlr_el1
197 mrs x8, cpacr_el1
198 mrs x9, ttbr0_el1
199 mrs x10, ttbr1_el1
200 mrs x11, tcr_el1
201 mrs x12, esr_el1
202 mrs x13, afsr0_el1
203 mrs x14, afsr1_el1
204 mrs x15, far_el1
205 mrs x16, mair_el1
206 mrs x17, vbar_el1
207 mrs x18, contextidr_el1
208 mrs x19, tpidr_el0
209 mrs x20, tpidrro_el0
210 mrs x21, tpidr_el1
211 mrs x22, amair_el1
212 mrs x23, cntkctl_el1
1bbd8054 213 mrs x24, par_el1
55c7401d
MZ
214
215 stp x4, x5, [x3]
216 stp x6, x7, [x3, #16]
217 stp x8, x9, [x3, #32]
218 stp x10, x11, [x3, #48]
219 stp x12, x13, [x3, #64]
220 stp x14, x15, [x3, #80]
221 stp x16, x17, [x3, #96]
222 stp x18, x19, [x3, #112]
223 stp x20, x21, [x3, #128]
224 stp x22, x23, [x3, #144]
1bbd8054 225 str x24, [x3, #160]
55c7401d
MZ
226.endm
227
228.macro restore_sysregs
229 // x2: base address for cpu context
230 // x3: tmp register
231
232 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
233
234 ldp x4, x5, [x3]
235 ldp x6, x7, [x3, #16]
236 ldp x8, x9, [x3, #32]
237 ldp x10, x11, [x3, #48]
238 ldp x12, x13, [x3, #64]
239 ldp x14, x15, [x3, #80]
240 ldp x16, x17, [x3, #96]
241 ldp x18, x19, [x3, #112]
242 ldp x20, x21, [x3, #128]
243 ldp x22, x23, [x3, #144]
1bbd8054 244 ldr x24, [x3, #160]
55c7401d
MZ
245
246 msr vmpidr_el2, x4
247 msr csselr_el1, x5
248 msr sctlr_el1, x6
249 msr actlr_el1, x7
250 msr cpacr_el1, x8
251 msr ttbr0_el1, x9
252 msr ttbr1_el1, x10
253 msr tcr_el1, x11
254 msr esr_el1, x12
255 msr afsr0_el1, x13
256 msr afsr1_el1, x14
257 msr far_el1, x15
258 msr mair_el1, x16
259 msr vbar_el1, x17
260 msr contextidr_el1, x18
261 msr tpidr_el0, x19
262 msr tpidrro_el0, x20
263 msr tpidr_el1, x21
264 msr amair_el1, x22
265 msr cntkctl_el1, x23
1bbd8054 266 msr par_el1, x24
55c7401d
MZ
267.endm
268
b4afad06
MZ
269.macro skip_32bit_state tmp, target
270 // Skip 32bit state if not needed
271 mrs \tmp, hcr_el2
272 tbnz \tmp, #HCR_RW_SHIFT, \target
273.endm
274
275.macro skip_tee_state tmp, target
276 // Skip ThumbEE state if not needed
277 mrs \tmp, id_pfr0_el1
278 tbz \tmp, #12, \target
279.endm
280
281.macro save_guest_32bit_state
282 skip_32bit_state x3, 1f
283
284 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
285 mrs x4, spsr_abt
286 mrs x5, spsr_und
287 mrs x6, spsr_irq
288 mrs x7, spsr_fiq
289 stp x4, x5, [x3]
290 stp x6, x7, [x3, #16]
291
292 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
293 mrs x4, dacr32_el2
294 mrs x5, ifsr32_el2
295 mrs x6, fpexc32_el2
296 mrs x7, dbgvcr32_el2
297 stp x4, x5, [x3]
298 stp x6, x7, [x3, #16]
299
300 skip_tee_state x8, 1f
301
302 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
303 mrs x4, teecr32_el1
304 mrs x5, teehbr32_el1
305 stp x4, x5, [x3]
3061:
307.endm
308
309.macro restore_guest_32bit_state
310 skip_32bit_state x3, 1f
311
312 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
313 ldp x4, x5, [x3]
314 ldp x6, x7, [x3, #16]
315 msr spsr_abt, x4
316 msr spsr_und, x5
317 msr spsr_irq, x6
318 msr spsr_fiq, x7
319
320 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
321 ldp x4, x5, [x3]
322 ldp x6, x7, [x3, #16]
323 msr dacr32_el2, x4
324 msr ifsr32_el2, x5
325 msr fpexc32_el2, x6
326 msr dbgvcr32_el2, x7
327
328 skip_tee_state x8, 1f
329
330 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
331 ldp x4, x5, [x3]
332 msr teecr32_el1, x4
333 msr teehbr32_el1, x5
3341:
335.endm
336
55c7401d 337.macro activate_traps
ac3c3747
MZ
338 ldr x2, [x0, #VCPU_HCR_EL2]
339 msr hcr_el2, x2
55c7401d
MZ
340 ldr x2, =(CPTR_EL2_TTA)
341 msr cptr_el2, x2
342
343 ldr x2, =(1 << 15) // Trap CP15 Cr=15
344 msr hstr_el2, x2
345
346 mrs x2, mdcr_el2
347 and x2, x2, #MDCR_EL2_HPMN_MASK
348 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
349 msr mdcr_el2, x2
350.endm
351
352.macro deactivate_traps
353 mov x2, #HCR_RW
354 msr hcr_el2, x2
355 msr cptr_el2, xzr
356 msr hstr_el2, xzr
357
358 mrs x2, mdcr_el2
359 and x2, x2, #MDCR_EL2_HPMN_MASK
360 msr mdcr_el2, x2
361.endm
362
363.macro activate_vm
364 ldr x1, [x0, #VCPU_KVM]
365 kern_hyp_va x1
366 ldr x2, [x1, #KVM_VTTBR]
367 msr vttbr_el2, x2
368.endm
369
370.macro deactivate_vm
371 msr vttbr_el2, xzr
372.endm
373
1f17f3b6 374/*
1a9b1305 375 * Call into the vgic backend for state saving
1f17f3b6
MZ
376 */
377.macro save_vgic_state
1a9b1305
MZ
378 adr x24, __vgic_sr_vectors
379 ldr x24, [x24, VGIC_SAVE_FN]
380 kern_hyp_va x24
381 blr x24
ac3c3747
MZ
382 mrs x24, hcr_el2
383 mov x25, #HCR_INT_OVERRIDE
384 neg x25, x25
385 and x24, x24, x25
386 msr hcr_el2, x24
1f17f3b6
MZ
387.endm
388
389/*
1a9b1305 390 * Call into the vgic backend for state restoring
1f17f3b6
MZ
391 */
392.macro restore_vgic_state
ac3c3747
MZ
393 mrs x24, hcr_el2
394 ldr x25, [x0, #VCPU_IRQ_LINES]
395 orr x24, x24, #HCR_INT_OVERRIDE
396 orr x24, x24, x25
397 msr hcr_el2, x24
1a9b1305
MZ
398 adr x24, __vgic_sr_vectors
399 ldr x24, [x24, #VGIC_RESTORE_FN]
400 kern_hyp_va x24
401 blr x24
1f17f3b6
MZ
402.endm
403
003300de
MZ
404.macro save_timer_state
405 // x0: vcpu pointer
406 ldr x2, [x0, #VCPU_KVM]
407 kern_hyp_va x2
408 ldr w3, [x2, #KVM_TIMER_ENABLED]
409 cbz w3, 1f
410
411 mrs x3, cntv_ctl_el0
412 and x3, x3, #3
413 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
414 bic x3, x3, #1 // Clear Enable
415 msr cntv_ctl_el0, x3
416
417 isb
418
419 mrs x3, cntv_cval_el0
420 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
421
4221:
423 // Allow physical timer/counter access for the host
424 mrs x2, cnthctl_el2
425 orr x2, x2, #3
426 msr cnthctl_el2, x2
427
428 // Clear cntvoff for the host
429 msr cntvoff_el2, xzr
430.endm
431
432.macro restore_timer_state
433 // x0: vcpu pointer
434 // Disallow physical timer access for the guest
435 // Physical counter access is allowed
436 mrs x2, cnthctl_el2
437 orr x2, x2, #1
438 bic x2, x2, #2
439 msr cnthctl_el2, x2
440
441 ldr x2, [x0, #VCPU_KVM]
442 kern_hyp_va x2
443 ldr w3, [x2, #KVM_TIMER_ENABLED]
444 cbz w3, 1f
445
446 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
447 msr cntvoff_el2, x3
448 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
449 msr cntv_cval_el0, x2
450 isb
451
452 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
453 and x2, x2, #3
454 msr cntv_ctl_el0, x2
4551:
456.endm
457
55c7401d
MZ
458__save_sysregs:
459 save_sysregs
460 ret
461
462__restore_sysregs:
463 restore_sysregs
464 ret
465
466__save_fpsimd:
467 save_fpsimd
468 ret
469
470__restore_fpsimd:
471 restore_fpsimd
472 ret
473
474/*
475 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
476 *
477 * This is the world switch. The first half of the function
478 * deals with entering the guest, and anything from __kvm_vcpu_return
479 * to the end of the function deals with reentering the host.
480 * On the enter path, only x0 (vcpu pointer) must be preserved until
481 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
482 * code) must both be preserved until the epilogue.
483 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
484 */
485ENTRY(__kvm_vcpu_run)
486 kern_hyp_va x0
487 msr tpidr_el2, x0 // Save the vcpu register
488
489 // Host context
490 ldr x2, [x0, #VCPU_HOST_CONTEXT]
491 kern_hyp_va x2
492
493 save_host_regs
494 bl __save_fpsimd
495 bl __save_sysregs
496
497 activate_traps
498 activate_vm
499
1f17f3b6 500 restore_vgic_state
003300de 501 restore_timer_state
1f17f3b6 502
55c7401d
MZ
503 // Guest context
504 add x2, x0, #VCPU_CONTEXT
505
506 bl __restore_sysregs
507 bl __restore_fpsimd
b4afad06 508 restore_guest_32bit_state
55c7401d
MZ
509 restore_guest_regs
510
511 // That's it, no more messing around.
512 eret
513
514__kvm_vcpu_return:
515 // Assume x0 is the vcpu pointer, x1 the return code
516 // Guest's x0-x3 are on the stack
517
518 // Guest context
519 add x2, x0, #VCPU_CONTEXT
520
521 save_guest_regs
522 bl __save_fpsimd
523 bl __save_sysregs
b4afad06 524 save_guest_32bit_state
55c7401d 525
003300de 526 save_timer_state
1f17f3b6
MZ
527 save_vgic_state
528
55c7401d
MZ
529 deactivate_traps
530 deactivate_vm
531
532 // Host context
533 ldr x2, [x0, #VCPU_HOST_CONTEXT]
534 kern_hyp_va x2
535
536 bl __restore_sysregs
537 bl __restore_fpsimd
538 restore_host_regs
539
540 mov x0, x1
541 ret
542END(__kvm_vcpu_run)
543
544// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
545ENTRY(__kvm_tlb_flush_vmid_ipa)
f142e5ee
MZ
546 dsb ishst
547
55c7401d
MZ
548 kern_hyp_va x0
549 ldr x2, [x0, #KVM_VTTBR]
550 msr vttbr_el2, x2
551 isb
552
553 /*
554 * We could do so much better if we had the VA as well.
555 * Instead, we invalidate Stage-2 for this IPA, and the
556 * whole of Stage-1. Weep...
557 */
558 tlbi ipas2e1is, x1
ee9e101c
WD
559 /*
560 * We have to ensure completion of the invalidation at Stage-2,
561 * since a table walk on another CPU could refill a TLB with a
562 * complete (S1 + S2) walk based on the old Stage-2 mapping if
563 * the Stage-1 invalidation happened first.
564 */
565 dsb ish
55c7401d 566 tlbi vmalle1is
ee9e101c 567 dsb ish
55c7401d
MZ
568 isb
569
570 msr vttbr_el2, xzr
571 ret
572ENDPROC(__kvm_tlb_flush_vmid_ipa)
573
574ENTRY(__kvm_flush_vm_context)
f142e5ee 575 dsb ishst
55c7401d
MZ
576 tlbi alle1is
577 ic ialluis
ee9e101c 578 dsb ish
55c7401d
MZ
579 ret
580ENDPROC(__kvm_flush_vm_context)
581
1a9b1305
MZ
582 // struct vgic_sr_vectors __vgi_sr_vectors;
583 .align 3
584ENTRY(__vgic_sr_vectors)
585 .skip VGIC_SR_VECTOR_SZ
586ENDPROC(__vgic_sr_vectors)
587
55c7401d
MZ
588__kvm_hyp_panic:
589 // Guess the context by looking at VTTBR:
590 // If zero, then we're already a host.
591 // Otherwise restore a minimal host context before panicing.
592 mrs x0, vttbr_el2
593 cbz x0, 1f
594
595 mrs x0, tpidr_el2
596
597 deactivate_traps
598 deactivate_vm
599
600 ldr x2, [x0, #VCPU_HOST_CONTEXT]
601 kern_hyp_va x2
602
603 bl __restore_sysregs
604
6051: adr x0, __hyp_panic_str
606 adr x1, 2f
607 ldp x2, x3, [x1]
608 sub x0, x0, x2
609 add x0, x0, x3
610 mrs x1, spsr_el2
611 mrs x2, elr_el2
612 mrs x3, esr_el2
613 mrs x4, far_el2
614 mrs x5, hpfar_el2
615 mrs x6, par_el1
616 mrs x7, tpidr_el2
617
618 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
619 PSR_MODE_EL1h)
620 msr spsr_el2, lr
621 ldr lr, =panic
622 msr elr_el2, lr
623 eret
624
625 .align 3
6262: .quad HYP_PAGE_OFFSET
627 .quad PAGE_OFFSET
628ENDPROC(__kvm_hyp_panic)
629
630__hyp_panic_str:
631 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
632
633 .align 2
634
b20c9f29
MZ
635/*
636 * u64 kvm_call_hyp(void *hypfn, ...);
637 *
638 * This is not really a variadic function in the classic C-way and care must
639 * be taken when calling this to ensure parameters are passed in registers
640 * only, since the stack will change between the caller and the callee.
641 *
642 * Call the function with the first argument containing a pointer to the
643 * function you wish to call in Hyp mode, and subsequent arguments will be
644 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
645 * function pointer can be passed). The function being called must be mapped
646 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
647 * passed in r0 and r1.
648 *
649 * A function pointer with a value of 0 has a special meaning, and is
650 * used to implement __hyp_get_vectors in the same way as in
651 * arch/arm64/kernel/hyp_stub.S.
652 */
55c7401d
MZ
653ENTRY(kvm_call_hyp)
654 hvc #0
655 ret
656ENDPROC(kvm_call_hyp)
657
658.macro invalid_vector label, target
659 .align 2
660\label:
661 b \target
662ENDPROC(\label)
663.endm
664
665 /* None of these should ever happen */
666 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
667 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
668 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
669 invalid_vector el2t_error_invalid, __kvm_hyp_panic
670 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
671 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
672 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
673 invalid_vector el2h_error_invalid, __kvm_hyp_panic
674 invalid_vector el1_sync_invalid, __kvm_hyp_panic
675 invalid_vector el1_irq_invalid, __kvm_hyp_panic
676 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
677 invalid_vector el1_error_invalid, __kvm_hyp_panic
678
679el1_sync: // Guest trapped into EL2
680 push x0, x1
681 push x2, x3
682
683 mrs x1, esr_el2
684 lsr x2, x1, #ESR_EL2_EC_SHIFT
685
686 cmp x2, #ESR_EL2_EC_HVC64
687 b.ne el1_trap
688
689 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
690 cbnz x3, el1_trap // called HVC
691
692 /* Here, we're pretty sure the host called HVC. */
693 pop x2, x3
694 pop x0, x1
695
b20c9f29
MZ
696 /* Check for __hyp_get_vectors */
697 cbnz x0, 1f
698 mrs x0, vbar_el2
699 b 2f
700
7011: push lr, xzr
55c7401d
MZ
702
703 /*
704 * Compute the function address in EL2, and shuffle the parameters.
705 */
706 kern_hyp_va x0
707 mov lr, x0
708 mov x0, x1
709 mov x1, x2
710 mov x2, x3
711 blr lr
712
713 pop lr, xzr
b20c9f29 7142: eret
55c7401d
MZ
715
716el1_trap:
717 /*
718 * x1: ESR
719 * x2: ESR_EC
720 */
721 cmp x2, #ESR_EL2_EC_DABT
722 mov x0, #ESR_EL2_EC_IABT
723 ccmp x2, x0, #4, ne
724 b.ne 1f // Not an abort we care about
725
726 /* This is an abort. Check for permission fault */
727 and x2, x1, #ESR_EL2_FSC_TYPE
728 cmp x2, #FSC_PERM
729 b.ne 1f // Not a permission fault
730
731 /*
732 * Check for Stage-1 page table walk, which is guaranteed
733 * to give a valid HPFAR_EL2.
734 */
735 tbnz x1, #7, 1f // S1PTW is set
736
1bbd8054
MZ
737 /* Preserve PAR_EL1 */
738 mrs x3, par_el1
739 push x3, xzr
740
55c7401d
MZ
741 /*
742 * Permission fault, HPFAR_EL2 is invalid.
743 * Resolve the IPA the hard way using the guest VA.
744 * Stage-1 translation already validated the memory access rights.
745 * As such, we can use the EL1 translation regime, and don't have
746 * to distinguish between EL0 and EL1 access.
747 */
748 mrs x2, far_el2
749 at s1e1r, x2
750 isb
751
752 /* Read result */
753 mrs x3, par_el1
1bbd8054
MZ
754 pop x0, xzr // Restore PAR_EL1 from the stack
755 msr par_el1, x0
55c7401d
MZ
756 tbnz x3, #0, 3f // Bail out if we failed the translation
757 ubfx x3, x3, #12, #36 // Extract IPA
758 lsl x3, x3, #4 // and present it like HPFAR
759 b 2f
760
7611: mrs x3, hpfar_el2
762 mrs x2, far_el2
763
7642: mrs x0, tpidr_el2
765 str x1, [x0, #VCPU_ESR_EL2]
766 str x2, [x0, #VCPU_FAR_EL2]
767 str x3, [x0, #VCPU_HPFAR_EL2]
768
769 mov x1, #ARM_EXCEPTION_TRAP
770 b __kvm_vcpu_return
771
772 /*
773 * Translation failed. Just return to the guest and
774 * let it fault again. Another CPU is probably playing
775 * behind our back.
776 */
7773: pop x2, x3
778 pop x0, x1
779
780 eret
781
782el1_irq:
783 push x0, x1
784 push x2, x3
785 mrs x0, tpidr_el2
786 mov x1, #ARM_EXCEPTION_IRQ
787 b __kvm_vcpu_return
788
789 .ltorg
790
791 .align 11
792
793ENTRY(__kvm_hyp_vector)
794 ventry el2t_sync_invalid // Synchronous EL2t
795 ventry el2t_irq_invalid // IRQ EL2t
796 ventry el2t_fiq_invalid // FIQ EL2t
797 ventry el2t_error_invalid // Error EL2t
798
799 ventry el2h_sync_invalid // Synchronous EL2h
800 ventry el2h_irq_invalid // IRQ EL2h
801 ventry el2h_fiq_invalid // FIQ EL2h
802 ventry el2h_error_invalid // Error EL2h
803
804 ventry el1_sync // Synchronous 64-bit EL1
805 ventry el1_irq // IRQ 64-bit EL1
806 ventry el1_fiq_invalid // FIQ 64-bit EL1
807 ventry el1_error_invalid // Error 64-bit EL1
808
809 ventry el1_sync // Synchronous 32-bit EL1
810 ventry el1_irq // IRQ 32-bit EL1
811 ventry el1_fiq_invalid // FIQ 32-bit EL1
812 ventry el1_error_invalid // Error 32-bit EL1
813ENDPROC(__kvm_hyp_vector)
814
55c7401d 815 .popsection
This page took 0.097273 seconds and 5 git commands to generate.