Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / arm / include / asm / kvm_mmu.h
1 /*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19 #ifndef __ARM_KVM_MMU_H__
20 #define __ARM_KVM_MMU_H__
21
22 #include <asm/memory.h>
23 #include <asm/page.h>
24
25 /*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
29 #define HYP_PAGE_OFFSET_MASK UL(~0)
30 #define HYP_PAGE_OFFSET PAGE_OFFSET
31 #define KERN_TO_HYP(kva) (kva)
32
33 /*
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
37 */
38 #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
39
40 /*
41 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
42 */
43 #define KVM_MMU_CACHE_MIN_PAGES 2
44
45 #ifndef __ASSEMBLY__
46
47 #include <linux/highmem.h>
48 #include <asm/cacheflush.h>
49 #include <asm/pgalloc.h>
50 #include <asm/stage2_pgtable.h>
51
52 int create_hyp_mappings(void *from, void *to);
53 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
54 void free_boot_hyp_pgd(void);
55 void free_hyp_pgds(void);
56
57 void stage2_unmap_vm(struct kvm *kvm);
58 int kvm_alloc_stage2_pgd(struct kvm *kvm);
59 void kvm_free_stage2_pgd(struct kvm *kvm);
60 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
61 phys_addr_t pa, unsigned long size, bool writable);
62
63 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
64
65 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
66
67 phys_addr_t kvm_mmu_get_httbr(void);
68 phys_addr_t kvm_mmu_get_boot_httbr(void);
69 phys_addr_t kvm_get_idmap_vector(void);
70 phys_addr_t kvm_get_idmap_start(void);
71 int kvm_mmu_init(void);
72 void kvm_clear_hyp_idmap(void);
73
74 static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
75 {
76 *pmd = new_pmd;
77 flush_pmd_entry(pmd);
78 }
79
80 static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
81 {
82 *pte = new_pte;
83 /*
84 * flush_pmd_entry just takes a void pointer and cleans the necessary
85 * cache entries, so we can reuse the function for ptes.
86 */
87 flush_pmd_entry(pte);
88 }
89
90 static inline void kvm_clean_pgd(pgd_t *pgd)
91 {
92 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
93 }
94
95 static inline void kvm_clean_pmd(pmd_t *pmd)
96 {
97 clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
98 }
99
100 static inline void kvm_clean_pmd_entry(pmd_t *pmd)
101 {
102 clean_pmd_entry(pmd);
103 }
104
105 static inline void kvm_clean_pte(pte_t *pte)
106 {
107 clean_pte_table(pte);
108 }
109
110 static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
111 {
112 pte_val(pte) |= L_PTE_S2_RDWR;
113 return pte;
114 }
115
116 static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
117 {
118 pmd_val(pmd) |= L_PMD_S2_RDWR;
119 return pmd;
120 }
121
122 static inline void kvm_set_s2pte_readonly(pte_t *pte)
123 {
124 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
125 }
126
127 static inline bool kvm_s2pte_readonly(pte_t *pte)
128 {
129 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
130 }
131
132 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
133 {
134 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
135 }
136
137 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
138 {
139 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
140 }
141
142 static inline bool kvm_page_empty(void *ptr)
143 {
144 struct page *ptr_page = virt_to_page(ptr);
145 return page_count(ptr_page) == 1;
146 }
147
148 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
149 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
150 #define kvm_pud_table_empty(kvm, pudp) false
151
152 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
153 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
154 #define hyp_pud_table_empty(pudp) false
155
156 struct kvm;
157
158 #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
159
160 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
161 {
162 return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
163 }
164
165 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
166 kvm_pfn_t pfn,
167 unsigned long size,
168 bool ipa_uncached)
169 {
170 /*
171 * If we are going to insert an instruction page and the icache is
172 * either VIPT or PIPT, there is a potential problem where the host
173 * (or another VM) may have used the same page as this guest, and we
174 * read incorrect data from the icache. If we're using a PIPT cache,
175 * we can invalidate just that page, but if we are using a VIPT cache
176 * we need to invalidate the entire icache - damn shame - as written
177 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
178 *
179 * VIVT caches are tagged using both the ASID and the VMID and doesn't
180 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
181 *
182 * We need to do this through a kernel mapping (using the
183 * user-space mapping has proved to be the wrong
184 * solution). For that, we need to kmap one page at a time,
185 * and iterate over the range.
186 */
187
188 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
189
190 VM_BUG_ON(size & ~PAGE_MASK);
191
192 if (!need_flush && !icache_is_pipt())
193 goto vipt_cache;
194
195 while (size) {
196 void *va = kmap_atomic_pfn(pfn);
197
198 if (need_flush)
199 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
200
201 if (icache_is_pipt())
202 __cpuc_coherent_user_range((unsigned long)va,
203 (unsigned long)va + PAGE_SIZE);
204
205 size -= PAGE_SIZE;
206 pfn++;
207
208 kunmap_atomic(va);
209 }
210
211 vipt_cache:
212 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
213 /* any kind of VIPT cache */
214 __flush_icache_all();
215 }
216 }
217
218 static inline void __kvm_flush_dcache_pte(pte_t pte)
219 {
220 void *va = kmap_atomic(pte_page(pte));
221
222 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
223
224 kunmap_atomic(va);
225 }
226
227 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
228 {
229 unsigned long size = PMD_SIZE;
230 kvm_pfn_t pfn = pmd_pfn(pmd);
231
232 while (size) {
233 void *va = kmap_atomic_pfn(pfn);
234
235 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
236
237 pfn++;
238 size -= PAGE_SIZE;
239
240 kunmap_atomic(va);
241 }
242 }
243
244 static inline void __kvm_flush_dcache_pud(pud_t pud)
245 {
246 }
247
248 #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
249
250 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
251 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
252
253 static inline bool __kvm_cpu_uses_extended_idmap(void)
254 {
255 return false;
256 }
257
258 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
259 pgd_t *hyp_pgd,
260 pgd_t *merged_hyp_pgd,
261 unsigned long hyp_idmap_start) { }
262
263 static inline unsigned int kvm_get_vmid_bits(void)
264 {
265 return 8;
266 }
267
268 #endif /* !__ASSEMBLY__ */
269
270 #endif /* __ARM_KVM_MMU_H__ */
This page took 0.036107 seconds and 6 git commands to generate.