Commit | Line | Data |
---|---|---|
749cf76c CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
342cd0ab CD |
18 | |
19 | #include <linux/mman.h> | |
20 | #include <linux/kvm_host.h> | |
21 | #include <linux/io.h> | |
ad361f09 | 22 | #include <linux/hugetlb.h> |
45e96ea6 | 23 | #include <trace/events/kvm.h> |
342cd0ab | 24 | #include <asm/pgalloc.h> |
94f8e641 | 25 | #include <asm/cacheflush.h> |
342cd0ab CD |
26 | #include <asm/kvm_arm.h> |
27 | #include <asm/kvm_mmu.h> | |
45e96ea6 | 28 | #include <asm/kvm_mmio.h> |
d5d8184d | 29 | #include <asm/kvm_asm.h> |
94f8e641 | 30 | #include <asm/kvm_emulate.h> |
d5d8184d CD |
31 | |
32 | #include "trace.h" | |
342cd0ab CD |
33 | |
34 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | |
35 | ||
5a677ce0 | 36 | static pgd_t *boot_hyp_pgd; |
2fb41059 | 37 | static pgd_t *hyp_pgd; |
342cd0ab CD |
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
39 | ||
5a677ce0 MZ |
40 | static void *init_bounce_page; |
41 | static unsigned long hyp_idmap_start; | |
42 | static unsigned long hyp_idmap_end; | |
43 | static phys_addr_t hyp_idmap_vector; | |
44 | ||
5d4e08c4 MS |
45 | #define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
46 | ||
9b5fdb97 | 47 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) |
ad361f09 | 48 | |
48762767 | 49 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
d5d8184d | 50 | { |
d4cb9df5 MZ |
51 | /* |
52 | * This function also gets called when dealing with HYP page | |
53 | * tables. As HYP doesn't have an associated struct kvm (and | |
54 | * the HYP page tables are fairly static), we don't do | |
55 | * anything there. | |
56 | */ | |
57 | if (kvm) | |
58 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | |
d5d8184d CD |
59 | } |
60 | ||
d5d8184d CD |
61 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
62 | int min, int max) | |
63 | { | |
64 | void *page; | |
65 | ||
66 | BUG_ON(max > KVM_NR_MEM_OBJS); | |
67 | if (cache->nobjs >= min) | |
68 | return 0; | |
69 | while (cache->nobjs < max) { | |
70 | page = (void *)__get_free_page(PGALLOC_GFP); | |
71 | if (!page) | |
72 | return -ENOMEM; | |
73 | cache->objects[cache->nobjs++] = page; | |
74 | } | |
75 | return 0; | |
76 | } | |
77 | ||
78 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | |
79 | { | |
80 | while (mc->nobjs) | |
81 | free_page((unsigned long)mc->objects[--mc->nobjs]); | |
82 | } | |
83 | ||
84 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |
85 | { | |
86 | void *p; | |
87 | ||
88 | BUG_ON(!mc || !mc->nobjs); | |
89 | p = mc->objects[--mc->nobjs]; | |
90 | return p; | |
91 | } | |
92 | ||
979acd5e MZ |
93 | static bool page_empty(void *ptr) |
94 | { | |
95 | struct page *ptr_page = virt_to_page(ptr); | |
96 | return page_count(ptr_page) == 1; | |
97 | } | |
98 | ||
d4cb9df5 | 99 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
342cd0ab | 100 | { |
ad361f09 CD |
101 | if (pud_huge(*pud)) { |
102 | pud_clear(pud); | |
103 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
104 | } else { | |
105 | pmd_t *pmd_table = pmd_offset(pud, 0); | |
106 | pud_clear(pud); | |
107 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
108 | pmd_free(NULL, pmd_table); | |
109 | } | |
4f728276 MZ |
110 | put_page(virt_to_page(pud)); |
111 | } | |
342cd0ab | 112 | |
d4cb9df5 | 113 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
4f728276 | 114 | { |
ad361f09 CD |
115 | if (kvm_pmd_huge(*pmd)) { |
116 | pmd_clear(pmd); | |
117 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
118 | } else { | |
119 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | |
120 | pmd_clear(pmd); | |
121 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
122 | pte_free_kernel(NULL, pte_table); | |
123 | } | |
4f728276 MZ |
124 | put_page(virt_to_page(pmd)); |
125 | } | |
126 | ||
d4cb9df5 | 127 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) |
4f728276 MZ |
128 | { |
129 | if (pte_present(*pte)) { | |
130 | kvm_set_pte(pte, __pte(0)); | |
131 | put_page(virt_to_page(pte)); | |
d4cb9df5 | 132 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
342cd0ab CD |
133 | } |
134 | } | |
135 | ||
d4cb9df5 MZ |
136 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
137 | unsigned long long start, u64 size) | |
000d3996 MZ |
138 | { |
139 | pgd_t *pgd; | |
140 | pud_t *pud; | |
141 | pmd_t *pmd; | |
4f728276 MZ |
142 | pte_t *pte; |
143 | unsigned long long addr = start, end = start + size; | |
d3840b26 | 144 | u64 next; |
000d3996 | 145 | |
4f728276 MZ |
146 | while (addr < end) { |
147 | pgd = pgdp + pgd_index(addr); | |
148 | pud = pud_offset(pgd, addr); | |
56041bf9 | 149 | pte = NULL; |
4f728276 | 150 | if (pud_none(*pud)) { |
a3c8bd31 | 151 | addr = kvm_pud_addr_end(addr, end); |
4f728276 MZ |
152 | continue; |
153 | } | |
000d3996 | 154 | |
ad361f09 CD |
155 | if (pud_huge(*pud)) { |
156 | /* | |
157 | * If we are dealing with a huge pud, just clear it and | |
158 | * move on. | |
159 | */ | |
160 | clear_pud_entry(kvm, pud, addr); | |
a3c8bd31 | 161 | addr = kvm_pud_addr_end(addr, end); |
ad361f09 CD |
162 | continue; |
163 | } | |
164 | ||
4f728276 MZ |
165 | pmd = pmd_offset(pud, addr); |
166 | if (pmd_none(*pmd)) { | |
a3c8bd31 | 167 | addr = kvm_pmd_addr_end(addr, end); |
4f728276 MZ |
168 | continue; |
169 | } | |
000d3996 | 170 | |
ad361f09 CD |
171 | if (!kvm_pmd_huge(*pmd)) { |
172 | pte = pte_offset_kernel(pmd, addr); | |
173 | clear_pte_entry(kvm, pte, addr); | |
174 | next = addr + PAGE_SIZE; | |
175 | } | |
4f728276 | 176 | |
ad361f09 CD |
177 | /* |
178 | * If the pmd entry is to be cleared, walk back up the ladder | |
179 | */ | |
56041bf9 | 180 | if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) { |
d4cb9df5 | 181 | clear_pmd_entry(kvm, pmd, addr); |
a3c8bd31 | 182 | next = kvm_pmd_addr_end(addr, end); |
979acd5e | 183 | if (page_empty(pmd) && !page_empty(pud)) { |
d4cb9df5 | 184 | clear_pud_entry(kvm, pud, addr); |
a3c8bd31 | 185 | next = kvm_pud_addr_end(addr, end); |
4f728276 MZ |
186 | } |
187 | } | |
188 | ||
d3840b26 | 189 | addr = next; |
4f728276 | 190 | } |
000d3996 MZ |
191 | } |
192 | ||
9d218a1f MZ |
193 | static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, |
194 | phys_addr_t addr, phys_addr_t end) | |
195 | { | |
196 | pte_t *pte; | |
197 | ||
198 | pte = pte_offset_kernel(pmd, addr); | |
199 | do { | |
200 | if (!pte_none(*pte)) { | |
201 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | |
202 | kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); | |
203 | } | |
204 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
205 | } | |
206 | ||
207 | static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | |
208 | phys_addr_t addr, phys_addr_t end) | |
209 | { | |
210 | pmd_t *pmd; | |
211 | phys_addr_t next; | |
212 | ||
213 | pmd = pmd_offset(pud, addr); | |
214 | do { | |
215 | next = kvm_pmd_addr_end(addr, end); | |
216 | if (!pmd_none(*pmd)) { | |
217 | if (kvm_pmd_huge(*pmd)) { | |
218 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | |
219 | kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); | |
220 | } else { | |
221 | stage2_flush_ptes(kvm, pmd, addr, next); | |
222 | } | |
223 | } | |
224 | } while (pmd++, addr = next, addr != end); | |
225 | } | |
226 | ||
227 | static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, | |
228 | phys_addr_t addr, phys_addr_t end) | |
229 | { | |
230 | pud_t *pud; | |
231 | phys_addr_t next; | |
232 | ||
233 | pud = pud_offset(pgd, addr); | |
234 | do { | |
235 | next = kvm_pud_addr_end(addr, end); | |
236 | if (!pud_none(*pud)) { | |
237 | if (pud_huge(*pud)) { | |
238 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | |
239 | kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); | |
240 | } else { | |
241 | stage2_flush_pmds(kvm, pud, addr, next); | |
242 | } | |
243 | } | |
244 | } while (pud++, addr = next, addr != end); | |
245 | } | |
246 | ||
247 | static void stage2_flush_memslot(struct kvm *kvm, | |
248 | struct kvm_memory_slot *memslot) | |
249 | { | |
250 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
251 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; | |
252 | phys_addr_t next; | |
253 | pgd_t *pgd; | |
254 | ||
255 | pgd = kvm->arch.pgd + pgd_index(addr); | |
256 | do { | |
257 | next = kvm_pgd_addr_end(addr, end); | |
258 | stage2_flush_puds(kvm, pgd, addr, next); | |
259 | } while (pgd++, addr = next, addr != end); | |
260 | } | |
261 | ||
262 | /** | |
263 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 | |
264 | * @kvm: The struct kvm pointer | |
265 | * | |
266 | * Go through the stage 2 page tables and invalidate any cache lines | |
267 | * backing memory already mapped to the VM. | |
268 | */ | |
269 | void stage2_flush_vm(struct kvm *kvm) | |
270 | { | |
271 | struct kvm_memslots *slots; | |
272 | struct kvm_memory_slot *memslot; | |
273 | int idx; | |
274 | ||
275 | idx = srcu_read_lock(&kvm->srcu); | |
276 | spin_lock(&kvm->mmu_lock); | |
277 | ||
278 | slots = kvm_memslots(kvm); | |
279 | kvm_for_each_memslot(memslot, slots) | |
280 | stage2_flush_memslot(kvm, memslot); | |
281 | ||
282 | spin_unlock(&kvm->mmu_lock); | |
283 | srcu_read_unlock(&kvm->srcu, idx); | |
284 | } | |
285 | ||
d157f4a5 MZ |
286 | /** |
287 | * free_boot_hyp_pgd - free HYP boot page tables | |
288 | * | |
289 | * Free the HYP boot page tables. The bounce page is also freed. | |
290 | */ | |
291 | void free_boot_hyp_pgd(void) | |
292 | { | |
293 | mutex_lock(&kvm_hyp_pgd_mutex); | |
294 | ||
295 | if (boot_hyp_pgd) { | |
d4cb9df5 MZ |
296 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
297 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | |
5d4e08c4 | 298 | free_pages((unsigned long)boot_hyp_pgd, pgd_order); |
d157f4a5 MZ |
299 | boot_hyp_pgd = NULL; |
300 | } | |
301 | ||
302 | if (hyp_pgd) | |
d4cb9df5 | 303 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
d157f4a5 | 304 | |
5d4e08c4 | 305 | free_page((unsigned long)init_bounce_page); |
d157f4a5 MZ |
306 | init_bounce_page = NULL; |
307 | ||
308 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
309 | } | |
310 | ||
342cd0ab | 311 | /** |
4f728276 | 312 | * free_hyp_pgds - free Hyp-mode page tables |
342cd0ab | 313 | * |
5a677ce0 MZ |
314 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
315 | * therefore contains either mappings in the kernel memory area (above | |
316 | * PAGE_OFFSET), or device mappings in the vmalloc range (from | |
317 | * VMALLOC_START to VMALLOC_END). | |
318 | * | |
319 | * boot_hyp_pgd should only map two pages for the init code. | |
342cd0ab | 320 | */ |
4f728276 | 321 | void free_hyp_pgds(void) |
342cd0ab | 322 | { |
342cd0ab CD |
323 | unsigned long addr; |
324 | ||
d157f4a5 | 325 | free_boot_hyp_pgd(); |
4f728276 | 326 | |
d157f4a5 | 327 | mutex_lock(&kvm_hyp_pgd_mutex); |
5a677ce0 | 328 | |
4f728276 MZ |
329 | if (hyp_pgd) { |
330 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | |
d4cb9df5 | 331 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
4f728276 | 332 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
d4cb9df5 MZ |
333 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
334 | ||
5d4e08c4 | 335 | free_pages((unsigned long)hyp_pgd, pgd_order); |
d157f4a5 | 336 | hyp_pgd = NULL; |
4f728276 MZ |
337 | } |
338 | ||
342cd0ab CD |
339 | mutex_unlock(&kvm_hyp_pgd_mutex); |
340 | } | |
341 | ||
342 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | |
6060df84 MZ |
343 | unsigned long end, unsigned long pfn, |
344 | pgprot_t prot) | |
342cd0ab CD |
345 | { |
346 | pte_t *pte; | |
347 | unsigned long addr; | |
342cd0ab | 348 | |
3562c76d MZ |
349 | addr = start; |
350 | do { | |
6060df84 MZ |
351 | pte = pte_offset_kernel(pmd, addr); |
352 | kvm_set_pte(pte, pfn_pte(pfn, prot)); | |
4f728276 | 353 | get_page(virt_to_page(pte)); |
5a677ce0 | 354 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); |
6060df84 | 355 | pfn++; |
3562c76d | 356 | } while (addr += PAGE_SIZE, addr != end); |
342cd0ab CD |
357 | } |
358 | ||
359 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | |
6060df84 MZ |
360 | unsigned long end, unsigned long pfn, |
361 | pgprot_t prot) | |
342cd0ab CD |
362 | { |
363 | pmd_t *pmd; | |
364 | pte_t *pte; | |
365 | unsigned long addr, next; | |
366 | ||
3562c76d MZ |
367 | addr = start; |
368 | do { | |
6060df84 | 369 | pmd = pmd_offset(pud, addr); |
342cd0ab CD |
370 | |
371 | BUG_ON(pmd_sect(*pmd)); | |
372 | ||
373 | if (pmd_none(*pmd)) { | |
6060df84 | 374 | pte = pte_alloc_one_kernel(NULL, addr); |
342cd0ab CD |
375 | if (!pte) { |
376 | kvm_err("Cannot allocate Hyp pte\n"); | |
377 | return -ENOMEM; | |
378 | } | |
379 | pmd_populate_kernel(NULL, pmd, pte); | |
4f728276 | 380 | get_page(virt_to_page(pmd)); |
5a677ce0 | 381 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); |
342cd0ab CD |
382 | } |
383 | ||
384 | next = pmd_addr_end(addr, end); | |
385 | ||
6060df84 MZ |
386 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
387 | pfn += (next - addr) >> PAGE_SHIFT; | |
3562c76d | 388 | } while (addr = next, addr != end); |
342cd0ab CD |
389 | |
390 | return 0; | |
391 | } | |
392 | ||
6060df84 MZ |
393 | static int __create_hyp_mappings(pgd_t *pgdp, |
394 | unsigned long start, unsigned long end, | |
395 | unsigned long pfn, pgprot_t prot) | |
342cd0ab | 396 | { |
342cd0ab CD |
397 | pgd_t *pgd; |
398 | pud_t *pud; | |
399 | pmd_t *pmd; | |
400 | unsigned long addr, next; | |
401 | int err = 0; | |
402 | ||
342cd0ab | 403 | mutex_lock(&kvm_hyp_pgd_mutex); |
3562c76d MZ |
404 | addr = start & PAGE_MASK; |
405 | end = PAGE_ALIGN(end); | |
406 | do { | |
6060df84 MZ |
407 | pgd = pgdp + pgd_index(addr); |
408 | pud = pud_offset(pgd, addr); | |
342cd0ab CD |
409 | |
410 | if (pud_none_or_clear_bad(pud)) { | |
6060df84 | 411 | pmd = pmd_alloc_one(NULL, addr); |
342cd0ab CD |
412 | if (!pmd) { |
413 | kvm_err("Cannot allocate Hyp pmd\n"); | |
414 | err = -ENOMEM; | |
415 | goto out; | |
416 | } | |
417 | pud_populate(NULL, pud, pmd); | |
4f728276 | 418 | get_page(virt_to_page(pud)); |
5a677ce0 | 419 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); |
342cd0ab CD |
420 | } |
421 | ||
422 | next = pgd_addr_end(addr, end); | |
6060df84 | 423 | err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); |
342cd0ab CD |
424 | if (err) |
425 | goto out; | |
6060df84 | 426 | pfn += (next - addr) >> PAGE_SHIFT; |
3562c76d | 427 | } while (addr = next, addr != end); |
342cd0ab CD |
428 | out: |
429 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
430 | return err; | |
431 | } | |
432 | ||
40c2729b CD |
433 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
434 | { | |
435 | if (!is_vmalloc_addr(kaddr)) { | |
436 | BUG_ON(!virt_addr_valid(kaddr)); | |
437 | return __pa(kaddr); | |
438 | } else { | |
439 | return page_to_phys(vmalloc_to_page(kaddr)) + | |
440 | offset_in_page(kaddr); | |
441 | } | |
442 | } | |
443 | ||
342cd0ab | 444 | /** |
06e8c3b0 | 445 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
342cd0ab CD |
446 | * @from: The virtual kernel start address of the range |
447 | * @to: The virtual kernel end address of the range (exclusive) | |
448 | * | |
06e8c3b0 MZ |
449 | * The same virtual address as the kernel virtual address is also used |
450 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | |
451 | * physical pages. | |
342cd0ab CD |
452 | */ |
453 | int create_hyp_mappings(void *from, void *to) | |
454 | { | |
40c2729b CD |
455 | phys_addr_t phys_addr; |
456 | unsigned long virt_addr; | |
6060df84 MZ |
457 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
458 | unsigned long end = KERN_TO_HYP((unsigned long)to); | |
459 | ||
40c2729b CD |
460 | start = start & PAGE_MASK; |
461 | end = PAGE_ALIGN(end); | |
6060df84 | 462 | |
40c2729b CD |
463 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
464 | int err; | |
6060df84 | 465 | |
40c2729b CD |
466 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
467 | err = __create_hyp_mappings(hyp_pgd, virt_addr, | |
468 | virt_addr + PAGE_SIZE, | |
469 | __phys_to_pfn(phys_addr), | |
470 | PAGE_HYP); | |
471 | if (err) | |
472 | return err; | |
473 | } | |
474 | ||
475 | return 0; | |
342cd0ab CD |
476 | } |
477 | ||
478 | /** | |
06e8c3b0 MZ |
479 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
480 | * @from: The kernel start VA of the range | |
481 | * @to: The kernel end VA of the range (exclusive) | |
6060df84 | 482 | * @phys_addr: The physical start address which gets mapped |
06e8c3b0 MZ |
483 | * |
484 | * The resulting HYP VA is the same as the kernel VA, modulo | |
485 | * HYP_PAGE_OFFSET. | |
342cd0ab | 486 | */ |
6060df84 | 487 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) |
342cd0ab | 488 | { |
6060df84 MZ |
489 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
490 | unsigned long end = KERN_TO_HYP((unsigned long)to); | |
491 | ||
492 | /* Check for a valid kernel IO mapping */ | |
493 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) | |
494 | return -EINVAL; | |
495 | ||
496 | return __create_hyp_mappings(hyp_pgd, start, end, | |
497 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | |
342cd0ab CD |
498 | } |
499 | ||
d5d8184d CD |
500 | /** |
501 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | |
502 | * @kvm: The KVM struct pointer for the VM. | |
503 | * | |
504 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can | |
505 | * support either full 40-bit input addresses or limited to 32-bit input | |
506 | * addresses). Clears the allocated pages. | |
507 | * | |
508 | * Note we don't need locking here as this is only called when the VM is | |
509 | * created, which can only be done once. | |
510 | */ | |
511 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | |
512 | { | |
513 | pgd_t *pgd; | |
514 | ||
515 | if (kvm->arch.pgd != NULL) { | |
516 | kvm_err("kvm_arch already initialized?\n"); | |
517 | return -EINVAL; | |
518 | } | |
519 | ||
520 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); | |
521 | if (!pgd) | |
522 | return -ENOMEM; | |
523 | ||
d5d8184d | 524 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
c62ee2b2 | 525 | kvm_clean_pgd(pgd); |
d5d8184d CD |
526 | kvm->arch.pgd = pgd; |
527 | ||
528 | return 0; | |
529 | } | |
530 | ||
d5d8184d CD |
531 | /** |
532 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | |
533 | * @kvm: The VM pointer | |
534 | * @start: The intermediate physical base address of the range to unmap | |
535 | * @size: The size of the area to unmap | |
536 | * | |
537 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | |
538 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | |
539 | * destroying the VM), otherwise another faulting VCPU may come in and mess | |
540 | * with things behind our backs. | |
541 | */ | |
542 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |
543 | { | |
d4cb9df5 | 544 | unmap_range(kvm, kvm->arch.pgd, start, size); |
d5d8184d CD |
545 | } |
546 | ||
547 | /** | |
548 | * kvm_free_stage2_pgd - free all stage-2 tables | |
549 | * @kvm: The KVM struct pointer for the VM. | |
550 | * | |
551 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | |
552 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | |
553 | * and setting the struct pointer to NULL. | |
554 | * | |
555 | * Note we don't need locking here as this is only called when the VM is | |
556 | * destroyed, which can only be done once. | |
557 | */ | |
558 | void kvm_free_stage2_pgd(struct kvm *kvm) | |
559 | { | |
560 | if (kvm->arch.pgd == NULL) | |
561 | return; | |
562 | ||
563 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | |
564 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | |
565 | kvm->arch.pgd = NULL; | |
566 | } | |
567 | ||
ad361f09 CD |
568 | static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
569 | phys_addr_t addr) | |
d5d8184d CD |
570 | { |
571 | pgd_t *pgd; | |
572 | pud_t *pud; | |
573 | pmd_t *pmd; | |
d5d8184d | 574 | |
d5d8184d CD |
575 | pgd = kvm->arch.pgd + pgd_index(addr); |
576 | pud = pud_offset(pgd, addr); | |
577 | if (pud_none(*pud)) { | |
578 | if (!cache) | |
ad361f09 | 579 | return NULL; |
d5d8184d CD |
580 | pmd = mmu_memory_cache_alloc(cache); |
581 | pud_populate(NULL, pud, pmd); | |
d5d8184d | 582 | get_page(virt_to_page(pud)); |
c62ee2b2 MZ |
583 | } |
584 | ||
ad361f09 CD |
585 | return pmd_offset(pud, addr); |
586 | } | |
587 | ||
588 | static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache | |
589 | *cache, phys_addr_t addr, const pmd_t *new_pmd) | |
590 | { | |
591 | pmd_t *pmd, old_pmd; | |
592 | ||
593 | pmd = stage2_get_pmd(kvm, cache, addr); | |
594 | VM_BUG_ON(!pmd); | |
d5d8184d | 595 | |
ad361f09 CD |
596 | /* |
597 | * Mapping in huge pages should only happen through a fault. If a | |
598 | * page is merged into a transparent huge page, the individual | |
599 | * subpages of that huge page should be unmapped through MMU | |
600 | * notifiers before we get here. | |
601 | * | |
602 | * Merging of CompoundPages is not supported; they should become | |
603 | * splitting first, unmapped, merged, and mapped back in on-demand. | |
604 | */ | |
605 | VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); | |
606 | ||
607 | old_pmd = *pmd; | |
608 | kvm_set_pmd(pmd, *new_pmd); | |
609 | if (pmd_present(old_pmd)) | |
610 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
611 | else | |
612 | get_page(virt_to_page(pmd)); | |
613 | return 0; | |
614 | } | |
615 | ||
616 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
617 | phys_addr_t addr, const pte_t *new_pte, bool iomap) | |
618 | { | |
619 | pmd_t *pmd; | |
620 | pte_t *pte, old_pte; | |
621 | ||
622 | /* Create stage-2 page table mapping - Level 1 */ | |
623 | pmd = stage2_get_pmd(kvm, cache, addr); | |
624 | if (!pmd) { | |
625 | /* | |
626 | * Ignore calls from kvm_set_spte_hva for unallocated | |
627 | * address ranges. | |
628 | */ | |
629 | return 0; | |
630 | } | |
631 | ||
632 | /* Create stage-2 page mappings - Level 2 */ | |
d5d8184d CD |
633 | if (pmd_none(*pmd)) { |
634 | if (!cache) | |
635 | return 0; /* ignore calls from kvm_set_spte_hva */ | |
636 | pte = mmu_memory_cache_alloc(cache); | |
c62ee2b2 | 637 | kvm_clean_pte(pte); |
d5d8184d | 638 | pmd_populate_kernel(NULL, pmd, pte); |
d5d8184d | 639 | get_page(virt_to_page(pmd)); |
c62ee2b2 MZ |
640 | } |
641 | ||
642 | pte = pte_offset_kernel(pmd, addr); | |
d5d8184d CD |
643 | |
644 | if (iomap && pte_present(*pte)) | |
645 | return -EFAULT; | |
646 | ||
647 | /* Create 2nd stage page table mapping - Level 3 */ | |
648 | old_pte = *pte; | |
649 | kvm_set_pte(pte, *new_pte); | |
650 | if (pte_present(old_pte)) | |
48762767 | 651 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
d5d8184d CD |
652 | else |
653 | get_page(virt_to_page(pte)); | |
654 | ||
655 | return 0; | |
656 | } | |
657 | ||
658 | /** | |
659 | * kvm_phys_addr_ioremap - map a device range to guest IPA | |
660 | * | |
661 | * @kvm: The KVM pointer | |
662 | * @guest_ipa: The IPA at which to insert the mapping | |
663 | * @pa: The physical address of the device | |
664 | * @size: The size of the mapping | |
665 | */ | |
666 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
667 | phys_addr_t pa, unsigned long size) | |
668 | { | |
669 | phys_addr_t addr, end; | |
670 | int ret = 0; | |
671 | unsigned long pfn; | |
672 | struct kvm_mmu_memory_cache cache = { 0, }; | |
673 | ||
674 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | |
675 | pfn = __phys_to_pfn(pa); | |
676 | ||
677 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | |
c62ee2b2 | 678 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
d5d8184d CD |
679 | |
680 | ret = mmu_topup_memory_cache(&cache, 2, 2); | |
681 | if (ret) | |
682 | goto out; | |
683 | spin_lock(&kvm->mmu_lock); | |
684 | ret = stage2_set_pte(kvm, &cache, addr, &pte, true); | |
685 | spin_unlock(&kvm->mmu_lock); | |
686 | if (ret) | |
687 | goto out; | |
688 | ||
689 | pfn++; | |
690 | } | |
691 | ||
692 | out: | |
693 | mmu_free_memory_cache(&cache); | |
694 | return ret; | |
695 | } | |
696 | ||
9b5fdb97 CD |
697 | static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) |
698 | { | |
699 | pfn_t pfn = *pfnp; | |
700 | gfn_t gfn = *ipap >> PAGE_SHIFT; | |
701 | ||
702 | if (PageTransCompound(pfn_to_page(pfn))) { | |
703 | unsigned long mask; | |
704 | /* | |
705 | * The address we faulted on is backed by a transparent huge | |
706 | * page. However, because we map the compound huge page and | |
707 | * not the individual tail page, we need to transfer the | |
708 | * refcount to the head page. We have to be careful that the | |
709 | * THP doesn't start to split while we are adjusting the | |
710 | * refcounts. | |
711 | * | |
712 | * We are sure this doesn't happen, because mmu_notifier_retry | |
713 | * was successful and we are holding the mmu_lock, so if this | |
714 | * THP is trying to split, it will be blocked in the mmu | |
715 | * notifier before touching any of the pages, specifically | |
716 | * before being able to call __split_huge_page_refcount(). | |
717 | * | |
718 | * We can therefore safely transfer the refcount from PG_tail | |
719 | * to PG_head and switch the pfn from a tail page to the head | |
720 | * page accordingly. | |
721 | */ | |
722 | mask = PTRS_PER_PMD - 1; | |
723 | VM_BUG_ON((gfn & mask) != (pfn & mask)); | |
724 | if (pfn & mask) { | |
725 | *ipap &= PMD_MASK; | |
726 | kvm_release_pfn_clean(pfn); | |
727 | pfn &= ~mask; | |
728 | kvm_get_pfn(pfn); | |
729 | *pfnp = pfn; | |
730 | } | |
731 | ||
732 | return true; | |
733 | } | |
734 | ||
735 | return false; | |
736 | } | |
737 | ||
94f8e641 | 738 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
ad361f09 | 739 | struct kvm_memory_slot *memslot, |
94f8e641 CD |
740 | unsigned long fault_status) |
741 | { | |
94f8e641 | 742 | int ret; |
9b5fdb97 | 743 | bool write_fault, writable, hugetlb = false, force_pte = false; |
94f8e641 | 744 | unsigned long mmu_seq; |
ad361f09 CD |
745 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
746 | unsigned long hva = gfn_to_hva(vcpu->kvm, gfn); | |
747 | struct kvm *kvm = vcpu->kvm; | |
94f8e641 | 748 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
ad361f09 CD |
749 | struct vm_area_struct *vma; |
750 | pfn_t pfn; | |
94f8e641 | 751 | |
7393b599 | 752 | write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); |
94f8e641 CD |
753 | if (fault_status == FSC_PERM && !write_fault) { |
754 | kvm_err("Unexpected L2 read permission error\n"); | |
755 | return -EFAULT; | |
756 | } | |
757 | ||
ad361f09 CD |
758 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
759 | down_read(¤t->mm->mmap_sem); | |
760 | vma = find_vma_intersection(current->mm, hva, hva + 1); | |
761 | if (is_vm_hugetlb_page(vma)) { | |
762 | hugetlb = true; | |
763 | gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; | |
9b5fdb97 CD |
764 | } else { |
765 | /* | |
136d737f MZ |
766 | * Pages belonging to memslots that don't have the same |
767 | * alignment for userspace and IPA cannot be mapped using | |
768 | * block descriptors even if the pages belong to a THP for | |
769 | * the process, because the stage-2 block descriptor will | |
770 | * cover more than a single THP and we loose atomicity for | |
771 | * unmapping, updates, and splits of the THP or other pages | |
772 | * in the stage-2 block range. | |
9b5fdb97 | 773 | */ |
136d737f MZ |
774 | if ((memslot->userspace_addr & ~PMD_MASK) != |
775 | ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK)) | |
9b5fdb97 | 776 | force_pte = true; |
ad361f09 CD |
777 | } |
778 | up_read(¤t->mm->mmap_sem); | |
779 | ||
94f8e641 CD |
780 | /* We need minimum second+third level pages */ |
781 | ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); | |
782 | if (ret) | |
783 | return ret; | |
784 | ||
785 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | |
786 | /* | |
787 | * Ensure the read of mmu_notifier_seq happens before we call | |
788 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | |
789 | * the page we just got a reference to gets unmapped before we have a | |
790 | * chance to grab the mmu_lock, which ensure that if the page gets | |
791 | * unmapped afterwards, the call to kvm_unmap_hva will take it away | |
792 | * from us again properly. This smp_rmb() interacts with the smp_wmb() | |
793 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | |
794 | */ | |
795 | smp_rmb(); | |
796 | ||
ad361f09 | 797 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); |
94f8e641 CD |
798 | if (is_error_pfn(pfn)) |
799 | return -EFAULT; | |
800 | ||
ad361f09 CD |
801 | spin_lock(&kvm->mmu_lock); |
802 | if (mmu_notifier_retry(kvm, mmu_seq)) | |
94f8e641 | 803 | goto out_unlock; |
9b5fdb97 CD |
804 | if (!hugetlb && !force_pte) |
805 | hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); | |
ad361f09 CD |
806 | |
807 | if (hugetlb) { | |
808 | pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); | |
809 | new_pmd = pmd_mkhuge(new_pmd); | |
810 | if (writable) { | |
811 | kvm_set_s2pmd_writable(&new_pmd); | |
812 | kvm_set_pfn_dirty(pfn); | |
813 | } | |
2d58b733 | 814 | coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); |
ad361f09 CD |
815 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
816 | } else { | |
817 | pte_t new_pte = pfn_pte(pfn, PAGE_S2); | |
818 | if (writable) { | |
819 | kvm_set_s2pte_writable(&new_pte); | |
820 | kvm_set_pfn_dirty(pfn); | |
821 | } | |
2d58b733 | 822 | coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); |
ad361f09 | 823 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); |
94f8e641 | 824 | } |
ad361f09 | 825 | |
94f8e641 CD |
826 | |
827 | out_unlock: | |
ad361f09 | 828 | spin_unlock(&kvm->mmu_lock); |
94f8e641 | 829 | kvm_release_pfn_clean(pfn); |
ad361f09 | 830 | return ret; |
94f8e641 CD |
831 | } |
832 | ||
833 | /** | |
834 | * kvm_handle_guest_abort - handles all 2nd stage aborts | |
835 | * @vcpu: the VCPU pointer | |
836 | * @run: the kvm_run structure | |
837 | * | |
838 | * Any abort that gets to the host is almost guaranteed to be caused by a | |
839 | * missing second stage translation table entry, which can mean that either the | |
840 | * guest simply needs more memory and we must allocate an appropriate page or it | |
841 | * can mean that the guest tried to access I/O memory, which is emulated by user | |
842 | * space. The distinction is based on the IPA causing the fault and whether this | |
843 | * memory region has been registered as standard RAM by user space. | |
844 | */ | |
342cd0ab CD |
845 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
846 | { | |
94f8e641 CD |
847 | unsigned long fault_status; |
848 | phys_addr_t fault_ipa; | |
849 | struct kvm_memory_slot *memslot; | |
850 | bool is_iabt; | |
851 | gfn_t gfn; | |
852 | int ret, idx; | |
853 | ||
52d1dba9 | 854 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
7393b599 | 855 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
94f8e641 | 856 | |
7393b599 MZ |
857 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
858 | kvm_vcpu_get_hfar(vcpu), fault_ipa); | |
94f8e641 CD |
859 | |
860 | /* Check the stage-2 fault is trans. fault or write fault */ | |
1cc287dd | 861 | fault_status = kvm_vcpu_trap_get_fault(vcpu); |
94f8e641 | 862 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
52d1dba9 MZ |
863 | kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n", |
864 | kvm_vcpu_trap_get_class(vcpu), fault_status); | |
94f8e641 CD |
865 | return -EFAULT; |
866 | } | |
867 | ||
868 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
869 | ||
870 | gfn = fault_ipa >> PAGE_SHIFT; | |
871 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
872 | if (is_iabt) { | |
873 | /* Prefetch Abort on I/O address */ | |
7393b599 | 874 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
94f8e641 CD |
875 | ret = 1; |
876 | goto out_unlock; | |
877 | } | |
878 | ||
879 | if (fault_status != FSC_FAULT) { | |
880 | kvm_err("Unsupported fault status on io memory: %#lx\n", | |
881 | fault_status); | |
882 | ret = -EFAULT; | |
883 | goto out_unlock; | |
884 | } | |
885 | ||
cfe3950c MZ |
886 | /* |
887 | * The IPA is reported as [MAX:12], so we need to | |
888 | * complement it with the bottom 12 bits from the | |
889 | * faulting VA. This is always 12 bits, irrespective | |
890 | * of the page size. | |
891 | */ | |
892 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | |
45e96ea6 | 893 | ret = io_mem_abort(vcpu, run, fault_ipa); |
94f8e641 CD |
894 | goto out_unlock; |
895 | } | |
896 | ||
897 | memslot = gfn_to_memslot(vcpu->kvm, gfn); | |
94f8e641 | 898 | |
ad361f09 | 899 | ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status); |
94f8e641 CD |
900 | if (ret == 0) |
901 | ret = 1; | |
902 | out_unlock: | |
903 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
904 | return ret; | |
342cd0ab CD |
905 | } |
906 | ||
d5d8184d CD |
907 | static void handle_hva_to_gpa(struct kvm *kvm, |
908 | unsigned long start, | |
909 | unsigned long end, | |
910 | void (*handler)(struct kvm *kvm, | |
911 | gpa_t gpa, void *data), | |
912 | void *data) | |
913 | { | |
914 | struct kvm_memslots *slots; | |
915 | struct kvm_memory_slot *memslot; | |
916 | ||
917 | slots = kvm_memslots(kvm); | |
918 | ||
919 | /* we only care about the pages that the guest sees */ | |
920 | kvm_for_each_memslot(memslot, slots) { | |
921 | unsigned long hva_start, hva_end; | |
922 | gfn_t gfn, gfn_end; | |
923 | ||
924 | hva_start = max(start, memslot->userspace_addr); | |
925 | hva_end = min(end, memslot->userspace_addr + | |
926 | (memslot->npages << PAGE_SHIFT)); | |
927 | if (hva_start >= hva_end) | |
928 | continue; | |
929 | ||
930 | /* | |
931 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
932 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | |
933 | */ | |
934 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
935 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
936 | ||
937 | for (; gfn < gfn_end; ++gfn) { | |
938 | gpa_t gpa = gfn << PAGE_SHIFT; | |
939 | handler(kvm, gpa, data); | |
940 | } | |
941 | } | |
942 | } | |
943 | ||
944 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | |
945 | { | |
946 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | |
d5d8184d CD |
947 | } |
948 | ||
949 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
950 | { | |
951 | unsigned long end = hva + PAGE_SIZE; | |
952 | ||
953 | if (!kvm->arch.pgd) | |
954 | return 0; | |
955 | ||
956 | trace_kvm_unmap_hva(hva); | |
957 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | |
958 | return 0; | |
959 | } | |
960 | ||
961 | int kvm_unmap_hva_range(struct kvm *kvm, | |
962 | unsigned long start, unsigned long end) | |
963 | { | |
964 | if (!kvm->arch.pgd) | |
965 | return 0; | |
966 | ||
967 | trace_kvm_unmap_hva_range(start, end); | |
968 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | |
969 | return 0; | |
970 | } | |
971 | ||
972 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | |
973 | { | |
974 | pte_t *pte = (pte_t *)data; | |
975 | ||
976 | stage2_set_pte(kvm, NULL, gpa, pte, false); | |
977 | } | |
978 | ||
979 | ||
980 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
981 | { | |
982 | unsigned long end = hva + PAGE_SIZE; | |
983 | pte_t stage2_pte; | |
984 | ||
985 | if (!kvm->arch.pgd) | |
986 | return; | |
987 | ||
988 | trace_kvm_set_spte_hva(hva); | |
989 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | |
990 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | |
991 | } | |
992 | ||
993 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | |
994 | { | |
995 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | |
996 | } | |
997 | ||
342cd0ab CD |
998 | phys_addr_t kvm_mmu_get_httbr(void) |
999 | { | |
342cd0ab CD |
1000 | return virt_to_phys(hyp_pgd); |
1001 | } | |
1002 | ||
5a677ce0 MZ |
1003 | phys_addr_t kvm_mmu_get_boot_httbr(void) |
1004 | { | |
1005 | return virt_to_phys(boot_hyp_pgd); | |
1006 | } | |
1007 | ||
1008 | phys_addr_t kvm_get_idmap_vector(void) | |
1009 | { | |
1010 | return hyp_idmap_vector; | |
1011 | } | |
1012 | ||
342cd0ab CD |
1013 | int kvm_mmu_init(void) |
1014 | { | |
2fb41059 MZ |
1015 | int err; |
1016 | ||
4fda342c SS |
1017 | hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start); |
1018 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); | |
1019 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); | |
5a677ce0 MZ |
1020 | |
1021 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { | |
1022 | /* | |
1023 | * Our init code is crossing a page boundary. Allocate | |
1024 | * a bounce page, copy the code over and use that. | |
1025 | */ | |
1026 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | |
1027 | phys_addr_t phys_base; | |
1028 | ||
5d4e08c4 | 1029 | init_bounce_page = (void *)__get_free_page(GFP_KERNEL); |
5a677ce0 MZ |
1030 | if (!init_bounce_page) { |
1031 | kvm_err("Couldn't allocate HYP init bounce page\n"); | |
1032 | err = -ENOMEM; | |
1033 | goto out; | |
1034 | } | |
1035 | ||
1036 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); | |
1037 | /* | |
1038 | * Warning: the code we just copied to the bounce page | |
1039 | * must be flushed to the point of coherency. | |
1040 | * Otherwise, the data may be sitting in L2, and HYP | |
1041 | * mode won't be able to observe it as it runs with | |
1042 | * caches off at that point. | |
1043 | */ | |
1044 | kvm_flush_dcache_to_poc(init_bounce_page, len); | |
1045 | ||
4fda342c | 1046 | phys_base = kvm_virt_to_phys(init_bounce_page); |
5a677ce0 MZ |
1047 | hyp_idmap_vector += phys_base - hyp_idmap_start; |
1048 | hyp_idmap_start = phys_base; | |
1049 | hyp_idmap_end = phys_base + len; | |
1050 | ||
1051 | kvm_info("Using HYP init bounce page @%lx\n", | |
1052 | (unsigned long)phys_base); | |
1053 | } | |
1054 | ||
5d4e08c4 MS |
1055 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); |
1056 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); | |
1057 | ||
5a677ce0 | 1058 | if (!hyp_pgd || !boot_hyp_pgd) { |
d5d8184d | 1059 | kvm_err("Hyp mode PGD not allocated\n"); |
2fb41059 MZ |
1060 | err = -ENOMEM; |
1061 | goto out; | |
1062 | } | |
1063 | ||
1064 | /* Create the idmap in the boot page tables */ | |
1065 | err = __create_hyp_mappings(boot_hyp_pgd, | |
1066 | hyp_idmap_start, hyp_idmap_end, | |
1067 | __phys_to_pfn(hyp_idmap_start), | |
1068 | PAGE_HYP); | |
1069 | ||
1070 | if (err) { | |
1071 | kvm_err("Failed to idmap %lx-%lx\n", | |
1072 | hyp_idmap_start, hyp_idmap_end); | |
1073 | goto out; | |
d5d8184d CD |
1074 | } |
1075 | ||
5a677ce0 MZ |
1076 | /* Map the very same page at the trampoline VA */ |
1077 | err = __create_hyp_mappings(boot_hyp_pgd, | |
1078 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | |
1079 | __phys_to_pfn(hyp_idmap_start), | |
1080 | PAGE_HYP); | |
1081 | if (err) { | |
1082 | kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", | |
1083 | TRAMPOLINE_VA); | |
1084 | goto out; | |
1085 | } | |
1086 | ||
1087 | /* Map the same page again into the runtime page tables */ | |
1088 | err = __create_hyp_mappings(hyp_pgd, | |
1089 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | |
1090 | __phys_to_pfn(hyp_idmap_start), | |
1091 | PAGE_HYP); | |
1092 | if (err) { | |
1093 | kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", | |
1094 | TRAMPOLINE_VA); | |
1095 | goto out; | |
1096 | } | |
1097 | ||
d5d8184d | 1098 | return 0; |
2fb41059 | 1099 | out: |
4f728276 | 1100 | free_hyp_pgds(); |
2fb41059 | 1101 | return err; |
342cd0ab | 1102 | } |