arm64: KVM: VHE: Patch out kern_hyp_va
[deliverable/linux.git] / arch / arm64 / kvm / hyp / hyp.h
CommitLineData
c76a0a66
MZ
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_HYP_H__
19#define __ARM64_KVM_HYP_H__
20
21#include <linux/compiler.h>
22#include <linux/kvm_host.h>
23#include <asm/kvm_mmu.h>
24#include <asm/sysreg.h>
25
26#define __hyp_text __section(.hyp.text) notrace
27
cedbb8b7
MZ
28static inline unsigned long __kern_hyp_va(unsigned long v)
29{
30 asm volatile(ALTERNATIVE("and %0, %0, %1",
31 "nop",
32 ARM64_HAS_VIRT_HOST_EXTN)
33 : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
34 return v;
35}
36
37#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
38
39static inline unsigned long __hyp_kern_va(unsigned long v)
40{
41 u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
42 asm volatile(ALTERNATIVE("add %0, %0, %1",
43 "nop",
44 ARM64_HAS_VIRT_HOST_EXTN)
45 : "+r" (v) : "r" (offset));
46 return v;
47}
48
49#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
c76a0a66 50
c1bf6e18
MZ
51/**
52 * hyp_alternate_select - Generates patchable code sequences that are
53 * used to switch between two implementations of a function, depending
54 * on the availability of a feature.
55 *
56 * @fname: a symbol name that will be defined as a function returning a
57 * function pointer whose type will match @orig and @alt
58 * @orig: A pointer to the default function, as returned by @fname when
59 * @cond doesn't hold
60 * @alt: A pointer to the alternate function, as returned by @fname
61 * when @cond holds
62 * @cond: a CPU feature (as described in asm/cpufeature.h)
63 */
64#define hyp_alternate_select(fname, orig, alt, cond) \
65typeof(orig) * __hyp_text fname(void) \
66{ \
67 typeof(alt) *val = orig; \
68 asm volatile(ALTERNATIVE("nop \n", \
69 "mov %0, %1 \n", \
70 cond) \
71 : "+r" (val) : "r" (alt)); \
72 return val; \
73}
74
06282fd2
MZ
75void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
76void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
77
f68d2b1b
MZ
78void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
79void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
80
1431af36
MZ
81void __timer_save_state(struct kvm_vcpu *vcpu);
82void __timer_restore_state(struct kvm_vcpu *vcpu);
83
6d6ec20f
MZ
84void __sysreg_save_state(struct kvm_cpu_context *ctxt);
85void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
c209ec85
MZ
86void __sysreg32_save_state(struct kvm_vcpu *vcpu);
87void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
6d6ec20f 88
8eb99267
MZ
89void __debug_save_state(struct kvm_vcpu *vcpu,
90 struct kvm_guest_debug_arch *dbg,
91 struct kvm_cpu_context *ctxt);
92void __debug_restore_state(struct kvm_vcpu *vcpu,
93 struct kvm_guest_debug_arch *dbg,
94 struct kvm_cpu_context *ctxt);
95void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
96void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
97
c13d1683
MZ
98void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
99void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
100static inline bool __fpsimd_enabled(void)
101{
102 return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
103}
104
b97b66c1 105u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
53fd5b64 106void __noreturn __hyp_do_panic(unsigned long, ...);
b97b66c1 107
c76a0a66
MZ
108#endif /* __ARM64_KVM_HYP_H__ */
109
This page took 0.057551 seconds and 5 git commands to generate.