2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_BITS PT32_LEVEL_BITS
51 #define PT_MAX_FULL_LEVELS 2
52 #define CMPXCHG cmpxchg
54 #error Invalid PTTYPE value
57 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
58 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
61 * The guest_walker structure emulates the behavior of the hardware page
66 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
67 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
68 pt_element_t prefetch_ptes
[PTE_PREFETCH_NUM
];
69 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
73 struct x86_exception fault
;
76 static gfn_t
gpte_to_gfn_lvl(pt_element_t gpte
, int lvl
)
78 return (gpte
& PT_LVL_ADDR_MASK(lvl
)) >> PAGE_SHIFT
;
81 static int FNAME(cmpxchg_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
82 pt_element_t __user
*ptep_user
, unsigned index
,
83 pt_element_t orig_pte
, pt_element_t new_pte
)
90 npages
= get_user_pages_fast((unsigned long)ptep_user
, 1, 1, &page
);
91 /* Check if the user is doing something meaningless. */
92 if (unlikely(npages
!= 1))
95 table
= kmap_atomic(page
, KM_USER0
);
96 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
97 kunmap_atomic(table
, KM_USER0
);
99 kvm_release_page_dirty(page
);
101 return (ret
!= orig_pte
);
104 static unsigned FNAME(gpte_access
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
)
108 access
= (gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
)) | ACC_EXEC_MASK
;
110 if (vcpu
->arch
.mmu
.nx
)
111 access
&= ~(gpte
>> PT64_NX_SHIFT
);
117 * Fetch a guest pte for a guest virtual address
119 static int FNAME(walk_addr_generic
)(struct guest_walker
*walker
,
120 struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
121 gva_t addr
, u32 access
)
124 pt_element_t __user
*uninitialized_var(ptep_user
);
126 unsigned index
, pt_access
, uninitialized_var(pte_access
);
130 const int write_fault
= access
& PFERR_WRITE_MASK
;
131 const int user_fault
= access
& PFERR_USER_MASK
;
132 const int fetch_fault
= access
& PFERR_FETCH_MASK
;
135 trace_kvm_mmu_pagetable_walk(addr
, write_fault
, user_fault
,
139 walker
->level
= mmu
->root_level
;
140 pte
= mmu
->get_cr3(vcpu
);
143 if (walker
->level
== PT32E_ROOT_LEVEL
) {
144 pte
= kvm_pdptr_read_mmu(vcpu
, mmu
, (addr
>> 30) & 3);
145 trace_kvm_mmu_paging_element(pte
, walker
->level
);
146 if (!is_present_gpte(pte
))
151 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
152 (mmu
->get_cr3(vcpu
) & CR3_NONPAE_RESERVED_BITS
) == 0);
158 unsigned long host_addr
;
160 index
= PT_INDEX(addr
, walker
->level
);
162 table_gfn
= gpte_to_gfn(pte
);
163 offset
= index
* sizeof(pt_element_t
);
164 pte_gpa
= gfn_to_gpa(table_gfn
) + offset
;
165 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
166 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
168 real_gfn
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(table_gfn
),
169 PFERR_USER_MASK
|PFERR_WRITE_MASK
);
170 if (unlikely(real_gfn
== UNMAPPED_GVA
))
172 real_gfn
= gpa_to_gfn(real_gfn
);
174 host_addr
= gfn_to_hva(vcpu
->kvm
, real_gfn
);
175 if (unlikely(kvm_is_error_hva(host_addr
)))
178 ptep_user
= (pt_element_t __user
*)((void *)host_addr
+ offset
);
179 if (unlikely(__copy_from_user(&pte
, ptep_user
, sizeof(pte
))))
182 trace_kvm_mmu_paging_element(pte
, walker
->level
);
184 if (unlikely(!is_present_gpte(pte
)))
187 if (unlikely(is_rsvd_bits_set(&vcpu
->arch
.mmu
, pte
,
189 errcode
|= PFERR_RSVD_MASK
| PFERR_PRESENT_MASK
;
193 if (unlikely(write_fault
&& !is_writable_pte(pte
)
194 && (user_fault
|| is_write_protection(vcpu
))))
197 if (unlikely(user_fault
&& !(pte
& PT_USER_MASK
)))
201 if (unlikely(fetch_fault
&& (pte
& PT64_NX_MASK
)))
205 if (!eperm
&& unlikely(!(pte
& PT_ACCESSED_MASK
))) {
207 trace_kvm_mmu_set_accessed_bit(table_gfn
, index
,
209 ret
= FNAME(cmpxchg_gpte
)(vcpu
, mmu
, ptep_user
, index
,
210 pte
, pte
|PT_ACCESSED_MASK
);
211 if (unlikely(ret
< 0))
216 mark_page_dirty(vcpu
->kvm
, table_gfn
);
217 pte
|= PT_ACCESSED_MASK
;
220 pte_access
= pt_access
& FNAME(gpte_access
)(vcpu
, pte
);
222 walker
->ptes
[walker
->level
- 1] = pte
;
224 if ((walker
->level
== PT_PAGE_TABLE_LEVEL
) ||
225 ((walker
->level
== PT_DIRECTORY_LEVEL
) &&
227 (PTTYPE
== 64 || is_pse(vcpu
))) ||
228 ((walker
->level
== PT_PDPE_LEVEL
) &&
230 mmu
->root_level
== PT64_ROOT_LEVEL
)) {
231 int lvl
= walker
->level
;
236 /* check if the kernel is fetching from user page */
237 if (unlikely(pte_access
& PT_USER_MASK
) &&
238 kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
))
239 if (fetch_fault
&& !user_fault
)
242 gfn
= gpte_to_gfn_lvl(pte
, lvl
);
243 gfn
+= (addr
& PT_LVL_OFFSET_MASK(lvl
)) >> PAGE_SHIFT
;
246 walker
->level
== PT_DIRECTORY_LEVEL
&&
248 gfn
+= pse36_gfn_delta(pte
);
250 ac
= write_fault
| fetch_fault
| user_fault
;
252 real_gpa
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(gfn
),
254 if (real_gpa
== UNMAPPED_GVA
)
257 walker
->gfn
= real_gpa
>> PAGE_SHIFT
;
262 pt_access
= pte_access
;
266 if (unlikely(eperm
)) {
267 errcode
|= PFERR_PRESENT_MASK
;
271 if (write_fault
&& unlikely(!is_dirty_gpte(pte
))) {
274 trace_kvm_mmu_set_dirty_bit(table_gfn
, index
, sizeof(pte
));
275 ret
= FNAME(cmpxchg_gpte
)(vcpu
, mmu
, ptep_user
, index
,
276 pte
, pte
|PT_DIRTY_MASK
);
277 if (unlikely(ret
< 0))
282 mark_page_dirty(vcpu
->kvm
, table_gfn
);
283 pte
|= PT_DIRTY_MASK
;
284 walker
->ptes
[walker
->level
- 1] = pte
;
287 walker
->pt_access
= pt_access
;
288 walker
->pte_access
= pte_access
;
289 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
290 __func__
, (u64
)pte
, pte_access
, pt_access
);
294 errcode
|= write_fault
| user_fault
;
295 if (fetch_fault
&& (mmu
->nx
||
296 kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
)))
297 errcode
|= PFERR_FETCH_MASK
;
299 walker
->fault
.vector
= PF_VECTOR
;
300 walker
->fault
.error_code_valid
= true;
301 walker
->fault
.error_code
= errcode
;
302 walker
->fault
.address
= addr
;
303 walker
->fault
.nested_page_fault
= mmu
!= vcpu
->arch
.walk_mmu
;
305 trace_kvm_mmu_walker_error(walker
->fault
.error_code
);
309 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
310 struct kvm_vcpu
*vcpu
, gva_t addr
, u32 access
)
312 return FNAME(walk_addr_generic
)(walker
, vcpu
, &vcpu
->arch
.mmu
, addr
,
316 static int FNAME(walk_addr_nested
)(struct guest_walker
*walker
,
317 struct kvm_vcpu
*vcpu
, gva_t addr
,
320 return FNAME(walk_addr_generic
)(walker
, vcpu
, &vcpu
->arch
.nested_mmu
,
324 static bool FNAME(prefetch_invalid_gpte
)(struct kvm_vcpu
*vcpu
,
325 struct kvm_mmu_page
*sp
, u64
*spte
,
328 u64 nonpresent
= shadow_trap_nonpresent_pte
;
330 if (is_rsvd_bits_set(&vcpu
->arch
.mmu
, gpte
, PT_PAGE_TABLE_LEVEL
))
333 if (!is_present_gpte(gpte
)) {
335 nonpresent
= shadow_notrap_nonpresent_pte
;
339 if (!(gpte
& PT_ACCESSED_MASK
))
345 drop_spte(vcpu
->kvm
, spte
, nonpresent
);
349 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
350 u64
*spte
, const void *pte
)
356 gpte
= *(const pt_element_t
*)pte
;
357 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, spte
, gpte
))
360 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
361 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
362 pfn
= gfn_to_pfn_atomic(vcpu
->kvm
, gpte_to_gfn(gpte
));
363 if (is_error_pfn(pfn
)) {
364 kvm_release_pfn_clean(pfn
);
369 * we call mmu_set_spte() with host_writable = true because that
370 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
372 mmu_set_spte(vcpu
, spte
, sp
->role
.access
, pte_access
, 0, 0,
373 is_dirty_gpte(gpte
), NULL
, PT_PAGE_TABLE_LEVEL
,
374 gpte_to_gfn(gpte
), pfn
, true, true);
377 static bool FNAME(gpte_changed
)(struct kvm_vcpu
*vcpu
,
378 struct guest_walker
*gw
, int level
)
380 pt_element_t curr_pte
;
381 gpa_t base_gpa
, pte_gpa
= gw
->pte_gpa
[level
- 1];
385 if (level
== PT_PAGE_TABLE_LEVEL
) {
386 mask
= PTE_PREFETCH_NUM
* sizeof(pt_element_t
) - 1;
387 base_gpa
= pte_gpa
& ~mask
;
388 index
= (pte_gpa
- base_gpa
) / sizeof(pt_element_t
);
390 r
= kvm_read_guest_atomic(vcpu
->kvm
, base_gpa
,
391 gw
->prefetch_ptes
, sizeof(gw
->prefetch_ptes
));
392 curr_pte
= gw
->prefetch_ptes
[index
];
394 r
= kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
,
395 &curr_pte
, sizeof(curr_pte
));
397 return r
|| curr_pte
!= gw
->ptes
[level
- 1];
400 static void FNAME(pte_prefetch
)(struct kvm_vcpu
*vcpu
, struct guest_walker
*gw
,
403 struct kvm_mmu_page
*sp
;
404 pt_element_t
*gptep
= gw
->prefetch_ptes
;
408 sp
= page_header(__pa(sptep
));
410 if (sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)
414 return __direct_pte_prefetch(vcpu
, sp
, sptep
);
416 i
= (sptep
- sp
->spt
) & ~(PTE_PREFETCH_NUM
- 1);
419 for (i
= 0; i
< PTE_PREFETCH_NUM
; i
++, spte
++) {
429 if (*spte
!= shadow_trap_nonpresent_pte
)
434 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, spte
, gpte
))
437 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
438 gfn
= gpte_to_gfn(gpte
);
439 dirty
= is_dirty_gpte(gpte
);
440 pfn
= pte_prefetch_gfn_to_pfn(vcpu
, gfn
,
441 (pte_access
& ACC_WRITE_MASK
) && dirty
);
442 if (is_error_pfn(pfn
)) {
443 kvm_release_pfn_clean(pfn
);
447 mmu_set_spte(vcpu
, spte
, sp
->role
.access
, pte_access
, 0, 0,
448 dirty
, NULL
, PT_PAGE_TABLE_LEVEL
, gfn
,
454 * Fetch a shadow pte for a specific level in the paging hierarchy.
456 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
457 struct guest_walker
*gw
,
458 int user_fault
, int write_fault
, int hlevel
,
459 int *ptwrite
, pfn_t pfn
, bool map_writable
,
462 unsigned access
= gw
->pt_access
;
463 struct kvm_mmu_page
*sp
= NULL
;
464 bool dirty
= is_dirty_gpte(gw
->ptes
[gw
->level
- 1]);
466 unsigned direct_access
;
467 struct kvm_shadow_walk_iterator it
;
469 if (!is_present_gpte(gw
->ptes
[gw
->level
- 1]))
472 direct_access
= gw
->pt_access
& gw
->pte_access
;
474 direct_access
&= ~ACC_WRITE_MASK
;
476 top_level
= vcpu
->arch
.mmu
.root_level
;
477 if (top_level
== PT32E_ROOT_LEVEL
)
478 top_level
= PT32_ROOT_LEVEL
;
480 * Verify that the top-level gpte is still there. Since the page
481 * is a root page, it is either write protected (and cannot be
482 * changed from now on) or it is invalid (in which case, we don't
483 * really care if it changes underneath us after this point).
485 if (FNAME(gpte_changed
)(vcpu
, gw
, top_level
))
486 goto out_gpte_changed
;
488 for (shadow_walk_init(&it
, vcpu
, addr
);
489 shadow_walk_okay(&it
) && it
.level
> gw
->level
;
490 shadow_walk_next(&it
)) {
493 drop_large_spte(vcpu
, it
.sptep
);
496 if (!is_shadow_present_pte(*it
.sptep
)) {
497 table_gfn
= gw
->table_gfn
[it
.level
- 2];
498 sp
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, it
.level
-1,
499 false, access
, it
.sptep
);
503 * Verify that the gpte in the page we've just write
504 * protected is still there.
506 if (FNAME(gpte_changed
)(vcpu
, gw
, it
.level
- 1))
507 goto out_gpte_changed
;
510 link_shadow_page(it
.sptep
, sp
);
514 shadow_walk_okay(&it
) && it
.level
> hlevel
;
515 shadow_walk_next(&it
)) {
518 validate_direct_spte(vcpu
, it
.sptep
, direct_access
);
520 drop_large_spte(vcpu
, it
.sptep
);
522 if (is_shadow_present_pte(*it
.sptep
))
525 direct_gfn
= gw
->gfn
& ~(KVM_PAGES_PER_HPAGE(it
.level
) - 1);
527 sp
= kvm_mmu_get_page(vcpu
, direct_gfn
, addr
, it
.level
-1,
528 true, direct_access
, it
.sptep
);
529 link_shadow_page(it
.sptep
, sp
);
532 mmu_set_spte(vcpu
, it
.sptep
, access
, gw
->pte_access
& access
,
533 user_fault
, write_fault
, dirty
, ptwrite
, it
.level
,
534 gw
->gfn
, pfn
, prefault
, map_writable
);
535 FNAME(pte_prefetch
)(vcpu
, gw
, it
.sptep
);
541 kvm_mmu_put_page(sp
, it
.sptep
);
542 kvm_release_pfn_clean(pfn
);
547 * Page fault handler. There are several causes for a page fault:
548 * - there is no shadow pte for the guest pte
549 * - write access through a shadow pte marked read only so that we can set
551 * - write access to a shadow pte marked read only so we can update the page
552 * dirty bitmap, when userspace requests it
553 * - mmio access; in this case we will never install a present shadow pte
554 * - normal guest page fault due to the guest pte marked not present, not
555 * writable, or not executable
557 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
558 * a negative value on error.
560 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
, u32 error_code
,
563 int write_fault
= error_code
& PFERR_WRITE_MASK
;
564 int user_fault
= error_code
& PFERR_USER_MASK
;
565 struct guest_walker walker
;
570 int level
= PT_PAGE_TABLE_LEVEL
;
572 unsigned long mmu_seq
;
575 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
577 r
= mmu_topup_memory_caches(vcpu
);
582 * Look up the guest pte for the faulting address.
584 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, error_code
);
587 * The page is not mapped by the guest. Let the guest handle it.
590 pgprintk("%s: guest page fault\n", __func__
);
592 inject_page_fault(vcpu
, &walker
.fault
);
593 /* reset fork detector */
594 vcpu
->arch
.last_pt_write_count
= 0;
599 if (walker
.level
>= PT_DIRECTORY_LEVEL
)
600 force_pt_level
= mapping_level_dirty_bitmap(vcpu
, walker
.gfn
);
603 if (!force_pt_level
) {
604 level
= min(walker
.level
, mapping_level(vcpu
, walker
.gfn
));
605 walker
.gfn
= walker
.gfn
& ~(KVM_PAGES_PER_HPAGE(level
) - 1);
608 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
611 if (try_async_pf(vcpu
, prefault
, walker
.gfn
, addr
, &pfn
, write_fault
,
616 if (is_error_pfn(pfn
))
617 return kvm_handle_bad_page(vcpu
->kvm
, walker
.gfn
, pfn
);
619 spin_lock(&vcpu
->kvm
->mmu_lock
);
620 if (mmu_notifier_retry(vcpu
, mmu_seq
))
623 trace_kvm_mmu_audit(vcpu
, AUDIT_PRE_PAGE_FAULT
);
624 kvm_mmu_free_some_pages(vcpu
);
626 transparent_hugepage_adjust(vcpu
, &walker
.gfn
, &pfn
, &level
);
627 sptep
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
628 level
, &write_pt
, pfn
, map_writable
, prefault
);
630 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__
,
631 sptep
, *sptep
, write_pt
);
634 vcpu
->arch
.last_pt_write_count
= 0; /* reset fork detector */
636 ++vcpu
->stat
.pf_fixed
;
637 trace_kvm_mmu_audit(vcpu
, AUDIT_POST_PAGE_FAULT
);
638 spin_unlock(&vcpu
->kvm
->mmu_lock
);
643 spin_unlock(&vcpu
->kvm
->mmu_lock
);
644 kvm_release_pfn_clean(pfn
);
648 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
)
650 struct kvm_shadow_walk_iterator iterator
;
651 struct kvm_mmu_page
*sp
;
657 spin_lock(&vcpu
->kvm
->mmu_lock
);
659 for_each_shadow_entry(vcpu
, gva
, iterator
) {
660 level
= iterator
.level
;
661 sptep
= iterator
.sptep
;
663 sp
= page_header(__pa(sptep
));
664 if (is_last_spte(*sptep
, level
)) {
671 (PT_LEVEL_BITS
- PT64_LEVEL_BITS
) * level
;
672 offset
= sp
->role
.quadrant
<< shift
;
674 pte_gpa
= (sp
->gfn
<< PAGE_SHIFT
) + offset
;
675 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
677 if (is_shadow_present_pte(*sptep
)) {
678 if (is_large_pte(*sptep
))
679 --vcpu
->kvm
->stat
.lpages
;
680 drop_spte(vcpu
->kvm
, sptep
,
681 shadow_trap_nonpresent_pte
);
684 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
688 if (!is_shadow_present_pte(*sptep
) || !sp
->unsync_children
)
693 kvm_flush_remote_tlbs(vcpu
->kvm
);
695 atomic_inc(&vcpu
->kvm
->arch
.invlpg_counter
);
697 spin_unlock(&vcpu
->kvm
->mmu_lock
);
702 if (mmu_topup_memory_caches(vcpu
))
704 kvm_mmu_pte_write(vcpu
, pte_gpa
, NULL
, sizeof(pt_element_t
), 0);
707 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
, u32 access
,
708 struct x86_exception
*exception
)
710 struct guest_walker walker
;
711 gpa_t gpa
= UNMAPPED_GVA
;
714 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, access
);
717 gpa
= gfn_to_gpa(walker
.gfn
);
718 gpa
|= vaddr
& ~PAGE_MASK
;
719 } else if (exception
)
720 *exception
= walker
.fault
;
725 static gpa_t
FNAME(gva_to_gpa_nested
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
727 struct x86_exception
*exception
)
729 struct guest_walker walker
;
730 gpa_t gpa
= UNMAPPED_GVA
;
733 r
= FNAME(walk_addr_nested
)(&walker
, vcpu
, vaddr
, access
);
736 gpa
= gfn_to_gpa(walker
.gfn
);
737 gpa
|= vaddr
& ~PAGE_MASK
;
738 } else if (exception
)
739 *exception
= walker
.fault
;
744 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
745 struct kvm_mmu_page
*sp
)
748 pt_element_t pt
[256 / sizeof(pt_element_t
)];
752 || (PTTYPE
== 32 && sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)) {
753 nonpaging_prefetch_page(vcpu
, sp
);
757 pte_gpa
= gfn_to_gpa(sp
->gfn
);
759 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
760 pte_gpa
+= offset
* sizeof(pt_element_t
);
763 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
+= ARRAY_SIZE(pt
)) {
764 r
= kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, pt
, sizeof pt
);
765 pte_gpa
+= ARRAY_SIZE(pt
) * sizeof(pt_element_t
);
766 for (j
= 0; j
< ARRAY_SIZE(pt
); ++j
)
767 if (r
|| is_present_gpte(pt
[j
]))
768 sp
->spt
[i
+j
] = shadow_trap_nonpresent_pte
;
770 sp
->spt
[i
+j
] = shadow_notrap_nonpresent_pte
;
775 * Using the cached information from sp->gfns is safe because:
776 * - The spte has a reference to the struct page, so the pfn for a given gfn
777 * can't change unless all sptes pointing to it are nuked first.
780 * We should flush all tlbs if spte is dropped even though guest is
781 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
782 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
783 * used by guest then tlbs are not flushed, so guest is allowed to access the
785 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
787 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
789 int i
, offset
, nr_present
;
793 offset
= nr_present
= 0;
795 /* direct kvm_mmu_page can not be unsync. */
796 BUG_ON(sp
->role
.direct
);
799 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
801 first_pte_gpa
= gfn_to_gpa(sp
->gfn
) + offset
* sizeof(pt_element_t
);
803 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
809 if (!is_shadow_present_pte(sp
->spt
[i
]))
812 pte_gpa
= first_pte_gpa
+ i
* sizeof(pt_element_t
);
814 if (kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &gpte
,
815 sizeof(pt_element_t
)))
818 gfn
= gpte_to_gfn(gpte
);
820 if (FNAME(prefetch_invalid_gpte
)(vcpu
, sp
, &sp
->spt
[i
], gpte
)) {
821 vcpu
->kvm
->tlbs_dirty
++;
825 if (gfn
!= sp
->gfns
[i
]) {
826 drop_spte(vcpu
->kvm
, &sp
->spt
[i
],
827 shadow_trap_nonpresent_pte
);
828 vcpu
->kvm
->tlbs_dirty
++;
833 pte_access
= sp
->role
.access
& FNAME(gpte_access
)(vcpu
, gpte
);
834 host_writable
= sp
->spt
[i
] & SPTE_HOST_WRITEABLE
;
836 set_spte(vcpu
, &sp
->spt
[i
], pte_access
, 0, 0,
837 is_dirty_gpte(gpte
), PT_PAGE_TABLE_LEVEL
, gfn
,
838 spte_to_pfn(sp
->spt
[i
]), true, false,
848 #undef PT_BASE_ADDR_MASK
850 #undef PT_LVL_ADDR_MASK
851 #undef PT_LVL_OFFSET_MASK
853 #undef PT_MAX_FULL_LEVELS
855 #undef gpte_to_gfn_lvl