Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / arm / include / asm / kvm_mmu.h
CommitLineData
342cd0ab
CD
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
5a677ce0
MZ
22#include <asm/memory.h>
23#include <asm/page.h>
c62ee2b2 24
06e8c3b0
MZ
25/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
5a677ce0 29#define HYP_PAGE_OFFSET_MASK UL(~0)
06e8c3b0
MZ
30#define HYP_PAGE_OFFSET PAGE_OFFSET
31#define KERN_TO_HYP(kva) (kva)
32
5a677ce0
MZ
33/*
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
37 */
38#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
39
38f791a4
CD
40/*
41 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
42 */
43#define KVM_MMU_CACHE_MIN_PAGES 2
44
5a677ce0
MZ
45#ifndef __ASSEMBLY__
46
363ef89f 47#include <linux/highmem.h>
5a677ce0
MZ
48#include <asm/cacheflush.h>
49#include <asm/pgalloc.h>
50
342cd0ab
CD
51int create_hyp_mappings(void *from, void *to);
52int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
d157f4a5 53void free_boot_hyp_pgd(void);
4f728276 54void free_hyp_pgds(void);
342cd0ab 55
957db105 56void stage2_unmap_vm(struct kvm *kvm);
d5d8184d
CD
57int kvm_alloc_stage2_pgd(struct kvm *kvm);
58void kvm_free_stage2_pgd(struct kvm *kvm);
59int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
c40f2f8f 60 phys_addr_t pa, unsigned long size, bool writable);
d5d8184d
CD
61
62int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
63
64void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
65
342cd0ab 66phys_addr_t kvm_mmu_get_httbr(void);
5a677ce0
MZ
67phys_addr_t kvm_mmu_get_boot_httbr(void);
68phys_addr_t kvm_get_idmap_vector(void);
342cd0ab
CD
69int kvm_mmu_init(void);
70void kvm_clear_hyp_idmap(void);
94f8e641 71
ad361f09
CD
72static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
73{
74 *pmd = new_pmd;
75 flush_pmd_entry(pmd);
76}
77
c62ee2b2
MZ
78static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
79{
0963e5d0 80 *pte = new_pte;
c62ee2b2
MZ
81 /*
82 * flush_pmd_entry just takes a void pointer and cleans the necessary
83 * cache entries, so we can reuse the function for ptes.
84 */
85 flush_pmd_entry(pte);
86}
87
c62ee2b2
MZ
88static inline void kvm_clean_pgd(pgd_t *pgd)
89{
90 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
91}
92
38f791a4
CD
93static inline void kvm_clean_pmd(pmd_t *pmd)
94{
95 clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
96}
97
c62ee2b2
MZ
98static inline void kvm_clean_pmd_entry(pmd_t *pmd)
99{
100 clean_pmd_entry(pmd);
101}
102
103static inline void kvm_clean_pte(pte_t *pte)
104{
105 clean_pte_table(pte);
106}
107
108static inline void kvm_set_s2pte_writable(pte_t *pte)
109{
110 pte_val(*pte) |= L_PTE_S2_RDWR;
111}
112
ad361f09
CD
113static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
114{
115 pmd_val(*pmd) |= L_PMD_S2_RDWR;
116}
117
c6473555
MS
118static inline void kvm_set_s2pte_readonly(pte_t *pte)
119{
120 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
121}
122
123static inline bool kvm_s2pte_readonly(pte_t *pte)
124{
125 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
126}
127
128static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
129{
130 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
131}
132
133static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
134{
135 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
136}
137
138
a3c8bd31
MZ
139/* Open coded p*d_addr_end that can deal with 64bit addresses */
140#define kvm_pgd_addr_end(addr, end) \
141({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
142 (__boundary - 1 < (end) - 1)? __boundary: (end); \
143})
144
145#define kvm_pud_addr_end(addr,end) (end)
146
147#define kvm_pmd_addr_end(addr, end) \
148({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
149 (__boundary - 1 < (end) - 1)? __boundary: (end); \
150})
151
04b8dc85
MZ
152#define kvm_pgd_index(addr) pgd_index(addr)
153
4f853a71
CD
154static inline bool kvm_page_empty(void *ptr)
155{
156 struct page *ptr_page = virt_to_page(ptr);
157 return page_count(ptr_page) == 1;
158}
159
38f791a4
CD
160#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
161#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
162#define kvm_pud_table_empty(kvm, pudp) (0)
163
164#define KVM_PREALLOC_LEVEL 0
4f853a71 165
a987370f 166static inline void *kvm_get_hwpgd(struct kvm *kvm)
38f791a4 167{
a987370f 168 return kvm->arch.pgd;
38f791a4
CD
169}
170
a987370f 171static inline unsigned int kvm_get_hwpgd_size(void)
38f791a4 172{
a987370f 173 return PTRS_PER_S2_PGD * sizeof(pgd_t);
38f791a4 174}
4f853a71 175
c62ee2b2
MZ
176struct kvm;
177
15979300
MZ
178#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
179
180static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
181{
182 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
183}
184
ba049e93
DW
185static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
186 kvm_pfn_t pfn,
0d3e4d4f
MZ
187 unsigned long size,
188 bool ipa_uncached)
c62ee2b2
MZ
189{
190 /*
191 * If we are going to insert an instruction page and the icache is
192 * either VIPT or PIPT, there is a potential problem where the host
193 * (or another VM) may have used the same page as this guest, and we
194 * read incorrect data from the icache. If we're using a PIPT cache,
195 * we can invalidate just that page, but if we are using a VIPT cache
196 * we need to invalidate the entire icache - damn shame - as written
197 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
198 *
199 * VIVT caches are tagged using both the ASID and the VMID and doesn't
200 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
0d3e4d4f
MZ
201 *
202 * We need to do this through a kernel mapping (using the
203 * user-space mapping has proved to be the wrong
204 * solution). For that, we need to kmap one page at a time,
205 * and iterate over the range.
c62ee2b2 206 */
0d3e4d4f
MZ
207
208 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
209
a050dfb2 210 VM_BUG_ON(size & ~PAGE_MASK);
0d3e4d4f
MZ
211
212 if (!need_flush && !icache_is_pipt())
213 goto vipt_cache;
214
215 while (size) {
216 void *va = kmap_atomic_pfn(pfn);
217
218 if (need_flush)
219 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
220
221 if (icache_is_pipt())
222 __cpuc_coherent_user_range((unsigned long)va,
223 (unsigned long)va + PAGE_SIZE);
224
225 size -= PAGE_SIZE;
226 pfn++;
227
228 kunmap_atomic(va);
229 }
230
231vipt_cache:
232 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
c62ee2b2
MZ
233 /* any kind of VIPT cache */
234 __flush_icache_all();
235 }
236}
237
363ef89f
MZ
238static inline void __kvm_flush_dcache_pte(pte_t pte)
239{
240 void *va = kmap_atomic(pte_page(pte));
241
242 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
243
244 kunmap_atomic(va);
245}
246
247static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
248{
249 unsigned long size = PMD_SIZE;
ba049e93 250 kvm_pfn_t pfn = pmd_pfn(pmd);
363ef89f
MZ
251
252 while (size) {
253 void *va = kmap_atomic_pfn(pfn);
254
255 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
256
257 pfn++;
258 size -= PAGE_SIZE;
259
260 kunmap_atomic(va);
261 }
262}
263
264static inline void __kvm_flush_dcache_pud(pud_t pud)
265{
266}
267
4fda342c 268#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
5a677ce0 269
3c1e7165
MZ
270void kvm_set_way_flush(struct kvm_vcpu *vcpu);
271void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
9d218a1f 272
e4c5a685
AB
273static inline bool __kvm_cpu_uses_extended_idmap(void)
274{
275 return false;
276}
277
278static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
279 pgd_t *hyp_pgd,
280 pgd_t *merged_hyp_pgd,
281 unsigned long hyp_idmap_start) { }
282
20475f78
VM
283static inline unsigned int kvm_get_vmid_bits(void)
284{
285 return 8;
286}
287
5a677ce0
MZ
288#endif /* !__ASSEMBLY__ */
289
342cd0ab 290#endif /* __ARM_KVM_MMU_H__ */
This page took 0.129497 seconds and 5 git commands to generate.