Commit | Line | Data |
---|---|---|
4f8d6632 MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * Derived from arch/arm/include/asm/kvm_host.h: | |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #ifndef __ARM64_KVM_HOST_H__ | |
23 | #define __ARM64_KVM_HOST_H__ | |
24 | ||
25 | #include <asm/kvm.h> | |
26 | #include <asm/kvm_asm.h> | |
27 | #include <asm/kvm_mmio.h> | |
28 | ||
29 | #define KVM_MAX_VCPUS 4 | |
30 | #define KVM_USER_MEM_SLOTS 32 | |
31 | #define KVM_PRIVATE_MEM_SLOTS 4 | |
32 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | |
33 | ||
34 | #include <kvm/arm_vgic.h> | |
35 | #include <kvm/arm_arch_timer.h> | |
36 | ||
dcd2e40c | 37 | #define KVM_VCPU_MAX_FEATURES 1 |
4f8d6632 MZ |
38 | |
39 | /* We don't currently support large pages. */ | |
40 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | |
41 | #define KVM_NR_PAGE_SIZES 1 | |
42 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | |
43 | ||
44 | struct kvm_vcpu; | |
45 | int kvm_target_cpu(void); | |
46 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | |
47 | int kvm_arch_dev_ioctl_check_extension(long ext); | |
48 | ||
49 | struct kvm_arch { | |
50 | /* The VMID generation used for the virt. memory system */ | |
51 | u64 vmid_gen; | |
52 | u32 vmid; | |
53 | ||
54 | /* 1-level 2nd stage table and lock */ | |
55 | spinlock_t pgd_lock; | |
56 | pgd_t *pgd; | |
57 | ||
58 | /* VTTBR value associated with above pgd and vmid */ | |
59 | u64 vttbr; | |
60 | ||
61 | /* Interrupt controller */ | |
62 | struct vgic_dist vgic; | |
63 | ||
64 | /* Timer */ | |
65 | struct arch_timer_kvm timer; | |
66 | }; | |
67 | ||
68 | #define KVM_NR_MEM_OBJS 40 | |
69 | ||
70 | /* | |
71 | * We don't want allocation failures within the mmu code, so we preallocate | |
72 | * enough memory for a single page fault in a cache. | |
73 | */ | |
74 | struct kvm_mmu_memory_cache { | |
75 | int nobjs; | |
76 | void *objects[KVM_NR_MEM_OBJS]; | |
77 | }; | |
78 | ||
79 | struct kvm_vcpu_fault_info { | |
80 | u32 esr_el2; /* Hyp Syndrom Register */ | |
81 | u64 far_el2; /* Hyp Fault Address Register */ | |
82 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ | |
83 | }; | |
84 | ||
85 | struct kvm_cpu_context { | |
86 | struct kvm_regs gp_regs; | |
87 | u64 sys_regs[NR_SYS_REGS]; | |
88 | }; | |
89 | ||
90 | typedef struct kvm_cpu_context kvm_cpu_context_t; | |
91 | ||
92 | struct kvm_vcpu_arch { | |
93 | struct kvm_cpu_context ctxt; | |
94 | ||
95 | /* HYP configuration */ | |
96 | u64 hcr_el2; | |
97 | ||
98 | /* Exception Information */ | |
99 | struct kvm_vcpu_fault_info fault; | |
100 | ||
101 | /* Pointer to host CPU context */ | |
102 | kvm_cpu_context_t *host_cpu_context; | |
103 | ||
104 | /* VGIC state */ | |
105 | struct vgic_cpu vgic_cpu; | |
106 | struct arch_timer_cpu timer_cpu; | |
107 | ||
108 | /* | |
109 | * Anything that is not used directly from assembly code goes | |
110 | * here. | |
111 | */ | |
112 | /* dcache set/way operation pending */ | |
113 | int last_pcpu; | |
114 | cpumask_t require_dcache_flush; | |
115 | ||
116 | /* Don't run the guest */ | |
117 | bool pause; | |
118 | ||
119 | /* IO related fields */ | |
120 | struct kvm_decode mmio_decode; | |
121 | ||
122 | /* Interrupt related fields */ | |
123 | u64 irq_lines; /* IRQ and FIQ levels */ | |
124 | ||
125 | /* Cache some mmu pages needed inside spinlock regions */ | |
126 | struct kvm_mmu_memory_cache mmu_page_cache; | |
127 | ||
128 | /* Target CPU and feature flags */ | |
129 | u32 target; | |
130 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | |
131 | ||
132 | /* Detect first run of a vcpu */ | |
133 | bool has_run_once; | |
134 | }; | |
135 | ||
136 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) | |
137 | #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) | |
138 | #define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) | |
139 | ||
140 | struct kvm_vm_stat { | |
141 | u32 remote_tlb_flush; | |
142 | }; | |
143 | ||
144 | struct kvm_vcpu_stat { | |
145 | u32 halt_wakeup; | |
146 | }; | |
147 | ||
148 | struct kvm_vcpu_init; | |
149 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | |
150 | const struct kvm_vcpu_init *init); | |
151 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | |
152 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | |
153 | struct kvm_one_reg; | |
154 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | |
155 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | |
156 | ||
157 | #define KVM_ARCH_WANT_MMU_NOTIFIER | |
158 | struct kvm; | |
159 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | |
160 | int kvm_unmap_hva_range(struct kvm *kvm, | |
161 | unsigned long start, unsigned long end); | |
162 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | |
163 | ||
164 | /* We do not have shadow page tables, hence the empty hooks */ | |
165 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) | |
166 | { | |
167 | return 0; | |
168 | } | |
169 | ||
170 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
171 | { | |
172 | return 0; | |
173 | } | |
174 | ||
175 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | |
176 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); | |
177 | ||
178 | u64 kvm_call_hyp(void *hypfn, ...); | |
179 | ||
180 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
181 | int exception_index); | |
182 | ||
183 | int kvm_perf_init(void); | |
184 | int kvm_perf_teardown(void); | |
185 | ||
092bd143 MZ |
186 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, |
187 | phys_addr_t pgd_ptr, | |
188 | unsigned long hyp_stack_ptr, | |
189 | unsigned long vector_ptr) | |
190 | { | |
191 | /* | |
192 | * Call initialization code, and switch to the full blown | |
193 | * HYP code. | |
194 | */ | |
195 | kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, | |
196 | hyp_stack_ptr, vector_ptr); | |
197 | } | |
198 | ||
4f8d6632 | 199 | #endif /* __ARM64_KVM_HOST_H__ */ |