Merge branch 'timers-core-for-linus' of https://git.kernel.org/cgit/linux/kernel...
[deliverable/linux.git] / arch / arm / kvm / mmu.c
1 /*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
21 #include <linux/io.h>
22 #include <trace/events/kvm.h>
23 #include <asm/pgalloc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/kvm_mmio.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_emulate.h>
30
31 #include "trace.h"
32
33 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
34
35 static pgd_t *boot_hyp_pgd;
36 static pgd_t *hyp_pgd;
37 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
38
39 static void *init_bounce_page;
40 static unsigned long hyp_idmap_start;
41 static unsigned long hyp_idmap_end;
42 static phys_addr_t hyp_idmap_vector;
43
44 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
45 {
46 /*
47 * This function also gets called when dealing with HYP page
48 * tables. As HYP doesn't have an associated struct kvm (and
49 * the HYP page tables are fairly static), we don't do
50 * anything there.
51 */
52 if (kvm)
53 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
54 }
55
56 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
57 int min, int max)
58 {
59 void *page;
60
61 BUG_ON(max > KVM_NR_MEM_OBJS);
62 if (cache->nobjs >= min)
63 return 0;
64 while (cache->nobjs < max) {
65 page = (void *)__get_free_page(PGALLOC_GFP);
66 if (!page)
67 return -ENOMEM;
68 cache->objects[cache->nobjs++] = page;
69 }
70 return 0;
71 }
72
73 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
74 {
75 while (mc->nobjs)
76 free_page((unsigned long)mc->objects[--mc->nobjs]);
77 }
78
79 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
80 {
81 void *p;
82
83 BUG_ON(!mc || !mc->nobjs);
84 p = mc->objects[--mc->nobjs];
85 return p;
86 }
87
88 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
89 {
90 pmd_t *pmd_table = pmd_offset(pud, 0);
91 pud_clear(pud);
92 kvm_tlb_flush_vmid_ipa(kvm, addr);
93 pmd_free(NULL, pmd_table);
94 put_page(virt_to_page(pud));
95 }
96
97 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
98 {
99 pte_t *pte_table = pte_offset_kernel(pmd, 0);
100 pmd_clear(pmd);
101 kvm_tlb_flush_vmid_ipa(kvm, addr);
102 pte_free_kernel(NULL, pte_table);
103 put_page(virt_to_page(pmd));
104 }
105
106 static bool pmd_empty(pmd_t *pmd)
107 {
108 struct page *pmd_page = virt_to_page(pmd);
109 return page_count(pmd_page) == 1;
110 }
111
112 static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
113 {
114 if (pte_present(*pte)) {
115 kvm_set_pte(pte, __pte(0));
116 put_page(virt_to_page(pte));
117 kvm_tlb_flush_vmid_ipa(kvm, addr);
118 }
119 }
120
121 static bool pte_empty(pte_t *pte)
122 {
123 struct page *pte_page = virt_to_page(pte);
124 return page_count(pte_page) == 1;
125 }
126
127 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
128 unsigned long long start, u64 size)
129 {
130 pgd_t *pgd;
131 pud_t *pud;
132 pmd_t *pmd;
133 pte_t *pte;
134 unsigned long long addr = start, end = start + size;
135 u64 range;
136
137 while (addr < end) {
138 pgd = pgdp + pgd_index(addr);
139 pud = pud_offset(pgd, addr);
140 if (pud_none(*pud)) {
141 addr += PUD_SIZE;
142 continue;
143 }
144
145 pmd = pmd_offset(pud, addr);
146 if (pmd_none(*pmd)) {
147 addr += PMD_SIZE;
148 continue;
149 }
150
151 pte = pte_offset_kernel(pmd, addr);
152 clear_pte_entry(kvm, pte, addr);
153 range = PAGE_SIZE;
154
155 /* If we emptied the pte, walk back up the ladder */
156 if (pte_empty(pte)) {
157 clear_pmd_entry(kvm, pmd, addr);
158 range = PMD_SIZE;
159 if (pmd_empty(pmd)) {
160 clear_pud_entry(kvm, pud, addr);
161 range = PUD_SIZE;
162 }
163 }
164
165 addr += range;
166 }
167 }
168
169 /**
170 * free_boot_hyp_pgd - free HYP boot page tables
171 *
172 * Free the HYP boot page tables. The bounce page is also freed.
173 */
174 void free_boot_hyp_pgd(void)
175 {
176 mutex_lock(&kvm_hyp_pgd_mutex);
177
178 if (boot_hyp_pgd) {
179 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
180 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
181 kfree(boot_hyp_pgd);
182 boot_hyp_pgd = NULL;
183 }
184
185 if (hyp_pgd)
186 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
187
188 kfree(init_bounce_page);
189 init_bounce_page = NULL;
190
191 mutex_unlock(&kvm_hyp_pgd_mutex);
192 }
193
194 /**
195 * free_hyp_pgds - free Hyp-mode page tables
196 *
197 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
198 * therefore contains either mappings in the kernel memory area (above
199 * PAGE_OFFSET), or device mappings in the vmalloc range (from
200 * VMALLOC_START to VMALLOC_END).
201 *
202 * boot_hyp_pgd should only map two pages for the init code.
203 */
204 void free_hyp_pgds(void)
205 {
206 unsigned long addr;
207
208 free_boot_hyp_pgd();
209
210 mutex_lock(&kvm_hyp_pgd_mutex);
211
212 if (hyp_pgd) {
213 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
214 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
215 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
216 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
217
218 kfree(hyp_pgd);
219 hyp_pgd = NULL;
220 }
221
222 mutex_unlock(&kvm_hyp_pgd_mutex);
223 }
224
225 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
226 unsigned long end, unsigned long pfn,
227 pgprot_t prot)
228 {
229 pte_t *pte;
230 unsigned long addr;
231
232 addr = start;
233 do {
234 pte = pte_offset_kernel(pmd, addr);
235 kvm_set_pte(pte, pfn_pte(pfn, prot));
236 get_page(virt_to_page(pte));
237 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
238 pfn++;
239 } while (addr += PAGE_SIZE, addr != end);
240 }
241
242 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
243 unsigned long end, unsigned long pfn,
244 pgprot_t prot)
245 {
246 pmd_t *pmd;
247 pte_t *pte;
248 unsigned long addr, next;
249
250 addr = start;
251 do {
252 pmd = pmd_offset(pud, addr);
253
254 BUG_ON(pmd_sect(*pmd));
255
256 if (pmd_none(*pmd)) {
257 pte = pte_alloc_one_kernel(NULL, addr);
258 if (!pte) {
259 kvm_err("Cannot allocate Hyp pte\n");
260 return -ENOMEM;
261 }
262 pmd_populate_kernel(NULL, pmd, pte);
263 get_page(virt_to_page(pmd));
264 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
265 }
266
267 next = pmd_addr_end(addr, end);
268
269 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
270 pfn += (next - addr) >> PAGE_SHIFT;
271 } while (addr = next, addr != end);
272
273 return 0;
274 }
275
276 static int __create_hyp_mappings(pgd_t *pgdp,
277 unsigned long start, unsigned long end,
278 unsigned long pfn, pgprot_t prot)
279 {
280 pgd_t *pgd;
281 pud_t *pud;
282 pmd_t *pmd;
283 unsigned long addr, next;
284 int err = 0;
285
286 mutex_lock(&kvm_hyp_pgd_mutex);
287 addr = start & PAGE_MASK;
288 end = PAGE_ALIGN(end);
289 do {
290 pgd = pgdp + pgd_index(addr);
291 pud = pud_offset(pgd, addr);
292
293 if (pud_none_or_clear_bad(pud)) {
294 pmd = pmd_alloc_one(NULL, addr);
295 if (!pmd) {
296 kvm_err("Cannot allocate Hyp pmd\n");
297 err = -ENOMEM;
298 goto out;
299 }
300 pud_populate(NULL, pud, pmd);
301 get_page(virt_to_page(pud));
302 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
303 }
304
305 next = pgd_addr_end(addr, end);
306 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
307 if (err)
308 goto out;
309 pfn += (next - addr) >> PAGE_SHIFT;
310 } while (addr = next, addr != end);
311 out:
312 mutex_unlock(&kvm_hyp_pgd_mutex);
313 return err;
314 }
315
316 /**
317 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
318 * @from: The virtual kernel start address of the range
319 * @to: The virtual kernel end address of the range (exclusive)
320 *
321 * The same virtual address as the kernel virtual address is also used
322 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
323 * physical pages.
324 */
325 int create_hyp_mappings(void *from, void *to)
326 {
327 unsigned long phys_addr = virt_to_phys(from);
328 unsigned long start = KERN_TO_HYP((unsigned long)from);
329 unsigned long end = KERN_TO_HYP((unsigned long)to);
330
331 /* Check for a valid kernel memory mapping */
332 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
333 return -EINVAL;
334
335 return __create_hyp_mappings(hyp_pgd, start, end,
336 __phys_to_pfn(phys_addr), PAGE_HYP);
337 }
338
339 /**
340 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
341 * @from: The kernel start VA of the range
342 * @to: The kernel end VA of the range (exclusive)
343 * @phys_addr: The physical start address which gets mapped
344 *
345 * The resulting HYP VA is the same as the kernel VA, modulo
346 * HYP_PAGE_OFFSET.
347 */
348 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
349 {
350 unsigned long start = KERN_TO_HYP((unsigned long)from);
351 unsigned long end = KERN_TO_HYP((unsigned long)to);
352
353 /* Check for a valid kernel IO mapping */
354 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
355 return -EINVAL;
356
357 return __create_hyp_mappings(hyp_pgd, start, end,
358 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
359 }
360
361 /**
362 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
363 * @kvm: The KVM struct pointer for the VM.
364 *
365 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
366 * support either full 40-bit input addresses or limited to 32-bit input
367 * addresses). Clears the allocated pages.
368 *
369 * Note we don't need locking here as this is only called when the VM is
370 * created, which can only be done once.
371 */
372 int kvm_alloc_stage2_pgd(struct kvm *kvm)
373 {
374 pgd_t *pgd;
375
376 if (kvm->arch.pgd != NULL) {
377 kvm_err("kvm_arch already initialized?\n");
378 return -EINVAL;
379 }
380
381 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
382 if (!pgd)
383 return -ENOMEM;
384
385 /* stage-2 pgd must be aligned to its size */
386 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
387
388 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
389 kvm_clean_pgd(pgd);
390 kvm->arch.pgd = pgd;
391
392 return 0;
393 }
394
395 /**
396 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
397 * @kvm: The VM pointer
398 * @start: The intermediate physical base address of the range to unmap
399 * @size: The size of the area to unmap
400 *
401 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
402 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
403 * destroying the VM), otherwise another faulting VCPU may come in and mess
404 * with things behind our backs.
405 */
406 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
407 {
408 unmap_range(kvm, kvm->arch.pgd, start, size);
409 }
410
411 /**
412 * kvm_free_stage2_pgd - free all stage-2 tables
413 * @kvm: The KVM struct pointer for the VM.
414 *
415 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
416 * underlying level-2 and level-3 tables before freeing the actual level-1 table
417 * and setting the struct pointer to NULL.
418 *
419 * Note we don't need locking here as this is only called when the VM is
420 * destroyed, which can only be done once.
421 */
422 void kvm_free_stage2_pgd(struct kvm *kvm)
423 {
424 if (kvm->arch.pgd == NULL)
425 return;
426
427 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
428 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
429 kvm->arch.pgd = NULL;
430 }
431
432
433 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
434 phys_addr_t addr, const pte_t *new_pte, bool iomap)
435 {
436 pgd_t *pgd;
437 pud_t *pud;
438 pmd_t *pmd;
439 pte_t *pte, old_pte;
440
441 /* Create 2nd stage page table mapping - Level 1 */
442 pgd = kvm->arch.pgd + pgd_index(addr);
443 pud = pud_offset(pgd, addr);
444 if (pud_none(*pud)) {
445 if (!cache)
446 return 0; /* ignore calls from kvm_set_spte_hva */
447 pmd = mmu_memory_cache_alloc(cache);
448 pud_populate(NULL, pud, pmd);
449 get_page(virt_to_page(pud));
450 }
451
452 pmd = pmd_offset(pud, addr);
453
454 /* Create 2nd stage page table mapping - Level 2 */
455 if (pmd_none(*pmd)) {
456 if (!cache)
457 return 0; /* ignore calls from kvm_set_spte_hva */
458 pte = mmu_memory_cache_alloc(cache);
459 kvm_clean_pte(pte);
460 pmd_populate_kernel(NULL, pmd, pte);
461 get_page(virt_to_page(pmd));
462 }
463
464 pte = pte_offset_kernel(pmd, addr);
465
466 if (iomap && pte_present(*pte))
467 return -EFAULT;
468
469 /* Create 2nd stage page table mapping - Level 3 */
470 old_pte = *pte;
471 kvm_set_pte(pte, *new_pte);
472 if (pte_present(old_pte))
473 kvm_tlb_flush_vmid_ipa(kvm, addr);
474 else
475 get_page(virt_to_page(pte));
476
477 return 0;
478 }
479
480 /**
481 * kvm_phys_addr_ioremap - map a device range to guest IPA
482 *
483 * @kvm: The KVM pointer
484 * @guest_ipa: The IPA at which to insert the mapping
485 * @pa: The physical address of the device
486 * @size: The size of the mapping
487 */
488 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
489 phys_addr_t pa, unsigned long size)
490 {
491 phys_addr_t addr, end;
492 int ret = 0;
493 unsigned long pfn;
494 struct kvm_mmu_memory_cache cache = { 0, };
495
496 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
497 pfn = __phys_to_pfn(pa);
498
499 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
500 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
501 kvm_set_s2pte_writable(&pte);
502
503 ret = mmu_topup_memory_cache(&cache, 2, 2);
504 if (ret)
505 goto out;
506 spin_lock(&kvm->mmu_lock);
507 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
508 spin_unlock(&kvm->mmu_lock);
509 if (ret)
510 goto out;
511
512 pfn++;
513 }
514
515 out:
516 mmu_free_memory_cache(&cache);
517 return ret;
518 }
519
520 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
521 gfn_t gfn, struct kvm_memory_slot *memslot,
522 unsigned long fault_status)
523 {
524 pte_t new_pte;
525 pfn_t pfn;
526 int ret;
527 bool write_fault, writable;
528 unsigned long mmu_seq;
529 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
530
531 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
532 if (fault_status == FSC_PERM && !write_fault) {
533 kvm_err("Unexpected L2 read permission error\n");
534 return -EFAULT;
535 }
536
537 /* We need minimum second+third level pages */
538 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
539 if (ret)
540 return ret;
541
542 mmu_seq = vcpu->kvm->mmu_notifier_seq;
543 /*
544 * Ensure the read of mmu_notifier_seq happens before we call
545 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
546 * the page we just got a reference to gets unmapped before we have a
547 * chance to grab the mmu_lock, which ensure that if the page gets
548 * unmapped afterwards, the call to kvm_unmap_hva will take it away
549 * from us again properly. This smp_rmb() interacts with the smp_wmb()
550 * in kvm_mmu_notifier_invalidate_<page|range_end>.
551 */
552 smp_rmb();
553
554 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
555 if (is_error_pfn(pfn))
556 return -EFAULT;
557
558 new_pte = pfn_pte(pfn, PAGE_S2);
559 coherent_icache_guest_page(vcpu->kvm, gfn);
560
561 spin_lock(&vcpu->kvm->mmu_lock);
562 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
563 goto out_unlock;
564 if (writable) {
565 kvm_set_s2pte_writable(&new_pte);
566 kvm_set_pfn_dirty(pfn);
567 }
568 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
569
570 out_unlock:
571 spin_unlock(&vcpu->kvm->mmu_lock);
572 kvm_release_pfn_clean(pfn);
573 return 0;
574 }
575
576 /**
577 * kvm_handle_guest_abort - handles all 2nd stage aborts
578 * @vcpu: the VCPU pointer
579 * @run: the kvm_run structure
580 *
581 * Any abort that gets to the host is almost guaranteed to be caused by a
582 * missing second stage translation table entry, which can mean that either the
583 * guest simply needs more memory and we must allocate an appropriate page or it
584 * can mean that the guest tried to access I/O memory, which is emulated by user
585 * space. The distinction is based on the IPA causing the fault and whether this
586 * memory region has been registered as standard RAM by user space.
587 */
588 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
589 {
590 unsigned long fault_status;
591 phys_addr_t fault_ipa;
592 struct kvm_memory_slot *memslot;
593 bool is_iabt;
594 gfn_t gfn;
595 int ret, idx;
596
597 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
598 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
599
600 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
601 kvm_vcpu_get_hfar(vcpu), fault_ipa);
602
603 /* Check the stage-2 fault is trans. fault or write fault */
604 fault_status = kvm_vcpu_trap_get_fault(vcpu);
605 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
606 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
607 kvm_vcpu_trap_get_class(vcpu), fault_status);
608 return -EFAULT;
609 }
610
611 idx = srcu_read_lock(&vcpu->kvm->srcu);
612
613 gfn = fault_ipa >> PAGE_SHIFT;
614 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
615 if (is_iabt) {
616 /* Prefetch Abort on I/O address */
617 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
618 ret = 1;
619 goto out_unlock;
620 }
621
622 if (fault_status != FSC_FAULT) {
623 kvm_err("Unsupported fault status on io memory: %#lx\n",
624 fault_status);
625 ret = -EFAULT;
626 goto out_unlock;
627 }
628
629 /*
630 * The IPA is reported as [MAX:12], so we need to
631 * complement it with the bottom 12 bits from the
632 * faulting VA. This is always 12 bits, irrespective
633 * of the page size.
634 */
635 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
636 ret = io_mem_abort(vcpu, run, fault_ipa);
637 goto out_unlock;
638 }
639
640 memslot = gfn_to_memslot(vcpu->kvm, gfn);
641
642 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
643 if (ret == 0)
644 ret = 1;
645 out_unlock:
646 srcu_read_unlock(&vcpu->kvm->srcu, idx);
647 return ret;
648 }
649
650 static void handle_hva_to_gpa(struct kvm *kvm,
651 unsigned long start,
652 unsigned long end,
653 void (*handler)(struct kvm *kvm,
654 gpa_t gpa, void *data),
655 void *data)
656 {
657 struct kvm_memslots *slots;
658 struct kvm_memory_slot *memslot;
659
660 slots = kvm_memslots(kvm);
661
662 /* we only care about the pages that the guest sees */
663 kvm_for_each_memslot(memslot, slots) {
664 unsigned long hva_start, hva_end;
665 gfn_t gfn, gfn_end;
666
667 hva_start = max(start, memslot->userspace_addr);
668 hva_end = min(end, memslot->userspace_addr +
669 (memslot->npages << PAGE_SHIFT));
670 if (hva_start >= hva_end)
671 continue;
672
673 /*
674 * {gfn(page) | page intersects with [hva_start, hva_end)} =
675 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
676 */
677 gfn = hva_to_gfn_memslot(hva_start, memslot);
678 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
679
680 for (; gfn < gfn_end; ++gfn) {
681 gpa_t gpa = gfn << PAGE_SHIFT;
682 handler(kvm, gpa, data);
683 }
684 }
685 }
686
687 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
688 {
689 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
690 }
691
692 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
693 {
694 unsigned long end = hva + PAGE_SIZE;
695
696 if (!kvm->arch.pgd)
697 return 0;
698
699 trace_kvm_unmap_hva(hva);
700 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
701 return 0;
702 }
703
704 int kvm_unmap_hva_range(struct kvm *kvm,
705 unsigned long start, unsigned long end)
706 {
707 if (!kvm->arch.pgd)
708 return 0;
709
710 trace_kvm_unmap_hva_range(start, end);
711 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
712 return 0;
713 }
714
715 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
716 {
717 pte_t *pte = (pte_t *)data;
718
719 stage2_set_pte(kvm, NULL, gpa, pte, false);
720 }
721
722
723 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
724 {
725 unsigned long end = hva + PAGE_SIZE;
726 pte_t stage2_pte;
727
728 if (!kvm->arch.pgd)
729 return;
730
731 trace_kvm_set_spte_hva(hva);
732 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
733 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
734 }
735
736 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
737 {
738 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
739 }
740
741 phys_addr_t kvm_mmu_get_httbr(void)
742 {
743 return virt_to_phys(hyp_pgd);
744 }
745
746 phys_addr_t kvm_mmu_get_boot_httbr(void)
747 {
748 return virt_to_phys(boot_hyp_pgd);
749 }
750
751 phys_addr_t kvm_get_idmap_vector(void)
752 {
753 return hyp_idmap_vector;
754 }
755
756 int kvm_mmu_init(void)
757 {
758 int err;
759
760 hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
761 hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
762 hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
763
764 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
765 /*
766 * Our init code is crossing a page boundary. Allocate
767 * a bounce page, copy the code over and use that.
768 */
769 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
770 phys_addr_t phys_base;
771
772 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
773 if (!init_bounce_page) {
774 kvm_err("Couldn't allocate HYP init bounce page\n");
775 err = -ENOMEM;
776 goto out;
777 }
778
779 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
780 /*
781 * Warning: the code we just copied to the bounce page
782 * must be flushed to the point of coherency.
783 * Otherwise, the data may be sitting in L2, and HYP
784 * mode won't be able to observe it as it runs with
785 * caches off at that point.
786 */
787 kvm_flush_dcache_to_poc(init_bounce_page, len);
788
789 phys_base = virt_to_phys(init_bounce_page);
790 hyp_idmap_vector += phys_base - hyp_idmap_start;
791 hyp_idmap_start = phys_base;
792 hyp_idmap_end = phys_base + len;
793
794 kvm_info("Using HYP init bounce page @%lx\n",
795 (unsigned long)phys_base);
796 }
797
798 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
799 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
800 if (!hyp_pgd || !boot_hyp_pgd) {
801 kvm_err("Hyp mode PGD not allocated\n");
802 err = -ENOMEM;
803 goto out;
804 }
805
806 /* Create the idmap in the boot page tables */
807 err = __create_hyp_mappings(boot_hyp_pgd,
808 hyp_idmap_start, hyp_idmap_end,
809 __phys_to_pfn(hyp_idmap_start),
810 PAGE_HYP);
811
812 if (err) {
813 kvm_err("Failed to idmap %lx-%lx\n",
814 hyp_idmap_start, hyp_idmap_end);
815 goto out;
816 }
817
818 /* Map the very same page at the trampoline VA */
819 err = __create_hyp_mappings(boot_hyp_pgd,
820 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
821 __phys_to_pfn(hyp_idmap_start),
822 PAGE_HYP);
823 if (err) {
824 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
825 TRAMPOLINE_VA);
826 goto out;
827 }
828
829 /* Map the same page again into the runtime page tables */
830 err = __create_hyp_mappings(hyp_pgd,
831 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
832 __phys_to_pfn(hyp_idmap_start),
833 PAGE_HYP);
834 if (err) {
835 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
836 TRAMPOLINE_VA);
837 goto out;
838 }
839
840 return 0;
841 out:
842 free_hyp_pgds();
843 return err;
844 }
This page took 0.057425 seconds and 6 git commands to generate.