2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_BITS PT32_LEVEL_BITS
51 #define PT_MAX_FULL_LEVELS 2
52 #define CMPXCHG cmpxchg
54 #error Invalid PTTYPE value
57 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
58 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
61 * The guest_walker structure emulates the behavior of the hardware page
67 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
68 pt_element_t ptes
[PT_MAX_FULL_LEVELS
];
69 pt_element_t prefetch_ptes
[PTE_PREFETCH_NUM
];
70 gpa_t pte_gpa
[PT_MAX_FULL_LEVELS
];
71 pt_element_t __user
*ptep_user
[PT_MAX_FULL_LEVELS
];
75 struct x86_exception fault
;
78 static gfn_t
gpte_to_gfn_lvl(pt_element_t gpte
, int lvl
)
80 return (gpte
& PT_LVL_ADDR_MASK(lvl
)) >> PAGE_SHIFT
;
83 static int FNAME(cmpxchg_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
84 pt_element_t __user
*ptep_user
, unsigned index
,
85 pt_element_t orig_pte
, pt_element_t new_pte
)
92 npages
= get_user_pages_fast((unsigned long)ptep_user
, 1, 1, &page
);
93 /* Check if the user is doing something meaningless. */
94 if (unlikely(npages
!= 1))
97 table
= kmap_atomic(page
);
98 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
101 kvm_release_page_dirty(page
);
103 return (ret
!= orig_pte
);
106 static int FNAME(update_accessed_dirty_bits
)(struct kvm_vcpu
*vcpu
,
108 struct guest_walker
*walker
,
111 unsigned level
, index
;
112 pt_element_t pte
, orig_pte
;
113 pt_element_t __user
*ptep_user
;
117 for (level
= walker
->max_level
; level
>= walker
->level
; --level
) {
118 pte
= orig_pte
= walker
->ptes
[level
- 1];
119 table_gfn
= walker
->table_gfn
[level
- 1];
120 ptep_user
= walker
->ptep_user
[level
- 1];
121 index
= offset_in_page(ptep_user
) / sizeof(pt_element_t
);
122 if (!(pte
& PT_ACCESSED_MASK
)) {
123 trace_kvm_mmu_set_accessed_bit(table_gfn
, index
, sizeof(pte
));
124 pte
|= PT_ACCESSED_MASK
;
126 if (level
== walker
->level
&& write_fault
&& !is_dirty_gpte(pte
)) {
127 trace_kvm_mmu_set_dirty_bit(table_gfn
, index
, sizeof(pte
));
128 pte
|= PT_DIRTY_MASK
;
133 ret
= FNAME(cmpxchg_gpte
)(vcpu
, mmu
, ptep_user
, index
, orig_pte
, pte
);
137 mark_page_dirty(vcpu
->kvm
, table_gfn
);
138 walker
->ptes
[level
] = pte
;
144 * Fetch a guest pte for a guest virtual address
146 static int FNAME(walk_addr_generic
)(struct guest_walker
*walker
,
147 struct kvm_vcpu
*vcpu
, struct kvm_mmu
*mmu
,
148 gva_t addr
, u32 access
)
152 pt_element_t __user
*uninitialized_var(ptep_user
);
154 unsigned index
, pt_access
, pte_access
, accessed_dirty
;
157 const int write_fault
= access
& PFERR_WRITE_MASK
;
158 const int user_fault
= access
& PFERR_USER_MASK
;
159 const int fetch_fault
= access
& PFERR_FETCH_MASK
;
164 trace_kvm_mmu_pagetable_walk(addr
, access
);
166 walker
->level
= mmu
->root_level
;
167 pte
= mmu
->get_cr3(vcpu
);
170 if (walker
->level
== PT32E_ROOT_LEVEL
) {
171 pte
= mmu
->get_pdptr(vcpu
, (addr
>> 30) & 3);
172 trace_kvm_mmu_paging_element(pte
, walker
->level
);
173 if (!is_present_gpte(pte
))
178 walker
->max_level
= walker
->level
;
179 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
180 (mmu
->get_cr3(vcpu
) & CR3_NONPAE_RESERVED_BITS
) == 0);
182 accessed_dirty
= PT_ACCESSED_MASK
;
183 pt_access
= pte_access
= ACC_ALL
;
188 unsigned long host_addr
;
190 pt_access
&= pte_access
;
193 index
= PT_INDEX(addr
, walker
->level
);
195 table_gfn
= gpte_to_gfn(pte
);
196 offset
= index
* sizeof(pt_element_t
);
197 pte_gpa
= gfn_to_gpa(table_gfn
) + offset
;
198 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
199 walker
->pte_gpa
[walker
->level
- 1] = pte_gpa
;
201 real_gfn
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(table_gfn
),
202 PFERR_USER_MASK
|PFERR_WRITE_MASK
);
203 if (unlikely(real_gfn
== UNMAPPED_GVA
))
205 real_gfn
= gpa_to_gfn(real_gfn
);
207 host_addr
= gfn_to_hva(vcpu
->kvm
, real_gfn
);
208 if (unlikely(kvm_is_error_hva(host_addr
)))
211 ptep_user
= (pt_element_t __user
*)((void *)host_addr
+ offset
);
212 if (unlikely(__copy_from_user(&pte
, ptep_user
, sizeof(pte
))))
214 walker
->ptep_user
[walker
->level
- 1] = ptep_user
;
216 trace_kvm_mmu_paging_element(pte
, walker
->level
);
218 if (unlikely(!is_present_gpte(pte
)))
221 if (unlikely(is_rsvd_bits_set(&vcpu
->arch
.mmu
, pte
,
223 errcode
|= PFERR_RSVD_MASK
| PFERR_PRESENT_MASK
;
227 accessed_dirty
&= pte
;
228 pte_access
= pt_access
& gpte_access(vcpu
, pte
);
230 walker
->ptes
[walker
->level
- 1] = pte
;
231 } while (!is_last_gpte(mmu
, walker
->level
, pte
));
233 if (unlikely(permission_fault(mmu
, pte_access
, access
))) {
234 errcode
|= PFERR_PRESENT_MASK
;
238 gfn
= gpte_to_gfn_lvl(pte
, walker
->level
);
239 gfn
+= (addr
& PT_LVL_OFFSET_MASK(walker
->level
)) >> PAGE_SHIFT
;
241 if (PTTYPE
== 32 && walker
->level
== PT_DIRECTORY_LEVEL
&& is_cpuid_PSE36())
242 gfn
+= pse36_gfn_delta(pte
);
244 real_gpa
= mmu
->translate_gpa(vcpu
, gfn_to_gpa(gfn
), access
);
245 if (real_gpa
== UNMAPPED_GVA
)
248 walker
->gfn
= real_gpa
>> PAGE_SHIFT
;
251 protect_clean_gpte(&pte_access
, pte
);
254 * On a write fault, fold the dirty bit into accessed_dirty by
255 * shifting it one place right.
257 accessed_dirty
&= pte
>> (PT_DIRTY_SHIFT
- PT_ACCESSED_SHIFT
);
259 if (unlikely(!accessed_dirty
)) {
260 ret
= FNAME(update_accessed_dirty_bits
)(vcpu
, mmu
, walker
, write_fault
);
261 if (unlikely(ret
< 0))
267 walker
->pt_access
= pt_access
;
268 walker
->pte_access
= pte_access
;
269 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
270 __func__
, (u64
)pte
, pte_access
, pt_access
);
274 errcode
|= write_fault
| user_fault
;
275 if (fetch_fault
&& (mmu
->nx
||
276 kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
)))
277 errcode
|= PFERR_FETCH_MASK
;
279 walker
->fault
.vector
= PF_VECTOR
;
280 walker
->fault
.error_code_valid
= true;
281 walker
->fault
.error_code
= errcode
;
282 walker
->fault
.address
= addr
;
283 walker
->fault
.nested_page_fault
= mmu
!= vcpu
->arch
.walk_mmu
;
285 trace_kvm_mmu_walker_error(walker
->fault
.error_code
);
289 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
290 struct kvm_vcpu
*vcpu
, gva_t addr
, u32 access
)
292 return FNAME(walk_addr_generic
)(walker
, vcpu
, &vcpu
->arch
.mmu
, addr
,
296 static int FNAME(walk_addr_nested
)(struct guest_walker
*walker
,
297 struct kvm_vcpu
*vcpu
, gva_t addr
,
300 return FNAME(walk_addr_generic
)(walker
, vcpu
, &vcpu
->arch
.nested_mmu
,
305 FNAME(prefetch_gpte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
306 u64
*spte
, pt_element_t gpte
, bool no_dirty_log
)
312 if (prefetch_invalid_gpte(vcpu
, sp
, spte
, gpte
))
315 pgprintk("%s: gpte %llx spte %p\n", __func__
, (u64
)gpte
, spte
);
317 gfn
= gpte_to_gfn(gpte
);
318 pte_access
= sp
->role
.access
& gpte_access(vcpu
, gpte
);
319 protect_clean_gpte(&pte_access
, gpte
);
320 pfn
= pte_prefetch_gfn_to_pfn(vcpu
, gfn
,
321 no_dirty_log
&& (pte_access
& ACC_WRITE_MASK
));
322 if (is_error_pfn(pfn
))
326 * we call mmu_set_spte() with host_writable = true because
327 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
329 mmu_set_spte(vcpu
, spte
, pte_access
, 0, NULL
, PT_PAGE_TABLE_LEVEL
,
330 gfn
, pfn
, true, true);
335 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
336 u64
*spte
, const void *pte
)
338 pt_element_t gpte
= *(const pt_element_t
*)pte
;
340 FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gpte
, false);
343 static bool FNAME(gpte_changed
)(struct kvm_vcpu
*vcpu
,
344 struct guest_walker
*gw
, int level
)
346 pt_element_t curr_pte
;
347 gpa_t base_gpa
, pte_gpa
= gw
->pte_gpa
[level
- 1];
351 if (level
== PT_PAGE_TABLE_LEVEL
) {
352 mask
= PTE_PREFETCH_NUM
* sizeof(pt_element_t
) - 1;
353 base_gpa
= pte_gpa
& ~mask
;
354 index
= (pte_gpa
- base_gpa
) / sizeof(pt_element_t
);
356 r
= kvm_read_guest_atomic(vcpu
->kvm
, base_gpa
,
357 gw
->prefetch_ptes
, sizeof(gw
->prefetch_ptes
));
358 curr_pte
= gw
->prefetch_ptes
[index
];
360 r
= kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
,
361 &curr_pte
, sizeof(curr_pte
));
363 return r
|| curr_pte
!= gw
->ptes
[level
- 1];
366 static void FNAME(pte_prefetch
)(struct kvm_vcpu
*vcpu
, struct guest_walker
*gw
,
369 struct kvm_mmu_page
*sp
;
370 pt_element_t
*gptep
= gw
->prefetch_ptes
;
374 sp
= page_header(__pa(sptep
));
376 if (sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)
380 return __direct_pte_prefetch(vcpu
, sp
, sptep
);
382 i
= (sptep
- sp
->spt
) & ~(PTE_PREFETCH_NUM
- 1);
385 for (i
= 0; i
< PTE_PREFETCH_NUM
; i
++, spte
++) {
389 if (is_shadow_present_pte(*spte
))
392 if (!FNAME(prefetch_gpte
)(vcpu
, sp
, spte
, gptep
[i
], true))
398 * Fetch a shadow pte for a specific level in the paging hierarchy.
399 * If the guest tries to write a write-protected page, we need to
400 * emulate this operation, return 1 to indicate this case.
402 static int FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
403 struct guest_walker
*gw
,
404 int write_fault
, int hlevel
,
405 pfn_t pfn
, bool map_writable
, bool prefault
)
407 struct kvm_mmu_page
*sp
= NULL
;
408 struct kvm_shadow_walk_iterator it
;
409 unsigned direct_access
, access
= gw
->pt_access
;
410 int top_level
, emulate
= 0;
412 direct_access
= gw
->pte_access
;
414 top_level
= vcpu
->arch
.mmu
.root_level
;
415 if (top_level
== PT32E_ROOT_LEVEL
)
416 top_level
= PT32_ROOT_LEVEL
;
418 * Verify that the top-level gpte is still there. Since the page
419 * is a root page, it is either write protected (and cannot be
420 * changed from now on) or it is invalid (in which case, we don't
421 * really care if it changes underneath us after this point).
423 if (FNAME(gpte_changed
)(vcpu
, gw
, top_level
))
424 goto out_gpte_changed
;
426 for (shadow_walk_init(&it
, vcpu
, addr
);
427 shadow_walk_okay(&it
) && it
.level
> gw
->level
;
428 shadow_walk_next(&it
)) {
431 clear_sp_write_flooding_count(it
.sptep
);
432 drop_large_spte(vcpu
, it
.sptep
);
435 if (!is_shadow_present_pte(*it
.sptep
)) {
436 table_gfn
= gw
->table_gfn
[it
.level
- 2];
437 sp
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, it
.level
-1,
438 false, access
, it
.sptep
);
442 * Verify that the gpte in the page we've just write
443 * protected is still there.
445 if (FNAME(gpte_changed
)(vcpu
, gw
, it
.level
- 1))
446 goto out_gpte_changed
;
449 link_shadow_page(it
.sptep
, sp
);
453 shadow_walk_okay(&it
) && it
.level
> hlevel
;
454 shadow_walk_next(&it
)) {
457 clear_sp_write_flooding_count(it
.sptep
);
458 validate_direct_spte(vcpu
, it
.sptep
, direct_access
);
460 drop_large_spte(vcpu
, it
.sptep
);
462 if (is_shadow_present_pte(*it
.sptep
))
465 direct_gfn
= gw
->gfn
& ~(KVM_PAGES_PER_HPAGE(it
.level
) - 1);
467 sp
= kvm_mmu_get_page(vcpu
, direct_gfn
, addr
, it
.level
-1,
468 true, direct_access
, it
.sptep
);
469 link_shadow_page(it
.sptep
, sp
);
472 clear_sp_write_flooding_count(it
.sptep
);
473 mmu_set_spte(vcpu
, it
.sptep
, gw
->pte_access
, write_fault
, &emulate
,
474 it
.level
, gw
->gfn
, pfn
, prefault
, map_writable
);
475 FNAME(pte_prefetch
)(vcpu
, gw
, it
.sptep
);
481 kvm_mmu_put_page(sp
, it
.sptep
);
482 kvm_release_pfn_clean(pfn
);
487 * To see whether the mapped gfn can write its page table in the current
490 * It is the helper function of FNAME(page_fault). When guest uses large page
491 * size to map the writable gfn which is used as current page table, we should
492 * force kvm to use small page size to map it because new shadow page will be
493 * created when kvm establishes shadow page table that stop kvm using large
494 * page size. Do it early can avoid unnecessary #PF and emulation.
496 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
497 * currently used as its page table.
499 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
500 * since the PDPT is always shadowed, that means, we can not use large page
501 * size to map the gfn which is used as PDPT.
504 FNAME(is_self_change_mapping
)(struct kvm_vcpu
*vcpu
,
505 struct guest_walker
*walker
, int user_fault
,
506 bool *write_fault_to_shadow_pgtable
)
509 gfn_t mask
= ~(KVM_PAGES_PER_HPAGE(walker
->level
) - 1);
510 bool self_changed
= false;
512 if (!(walker
->pte_access
& ACC_WRITE_MASK
||
513 (!is_write_protection(vcpu
) && !user_fault
)))
516 for (level
= walker
->level
; level
<= walker
->max_level
; level
++) {
517 gfn_t gfn
= walker
->gfn
^ walker
->table_gfn
[level
- 1];
519 self_changed
|= !(gfn
& mask
);
520 *write_fault_to_shadow_pgtable
|= !gfn
;
527 * Page fault handler. There are several causes for a page fault:
528 * - there is no shadow pte for the guest pte
529 * - write access through a shadow pte marked read only so that we can set
531 * - write access to a shadow pte marked read only so we can update the page
532 * dirty bitmap, when userspace requests it
533 * - mmio access; in this case we will never install a present shadow pte
534 * - normal guest page fault due to the guest pte marked not present, not
535 * writable, or not executable
537 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
538 * a negative value on error.
540 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
, u32 error_code
,
543 int write_fault
= error_code
& PFERR_WRITE_MASK
;
544 int user_fault
= error_code
& PFERR_USER_MASK
;
545 struct guest_walker walker
;
548 int level
= PT_PAGE_TABLE_LEVEL
;
550 unsigned long mmu_seq
;
551 bool map_writable
, is_self_change_mapping
;
553 pgprintk("%s: addr %lx err %x\n", __func__
, addr
, error_code
);
555 if (unlikely(error_code
& PFERR_RSVD_MASK
)) {
556 r
= handle_mmio_page_fault(vcpu
, addr
, error_code
,
557 mmu_is_nested(vcpu
));
558 if (likely(r
!= RET_MMIO_PF_INVALID
))
562 r
= mmu_topup_memory_caches(vcpu
);
567 * Look up the guest pte for the faulting address.
569 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, error_code
);
572 * The page is not mapped by the guest. Let the guest handle it.
575 pgprintk("%s: guest page fault\n", __func__
);
577 inject_page_fault(vcpu
, &walker
.fault
);
582 vcpu
->arch
.write_fault_to_shadow_pgtable
= false;
584 is_self_change_mapping
= FNAME(is_self_change_mapping
)(vcpu
,
585 &walker
, user_fault
, &vcpu
->arch
.write_fault_to_shadow_pgtable
);
587 if (walker
.level
>= PT_DIRECTORY_LEVEL
)
588 force_pt_level
= mapping_level_dirty_bitmap(vcpu
, walker
.gfn
)
589 || is_self_change_mapping
;
592 if (!force_pt_level
) {
593 level
= min(walker
.level
, mapping_level(vcpu
, walker
.gfn
));
594 walker
.gfn
= walker
.gfn
& ~(KVM_PAGES_PER_HPAGE(level
) - 1);
597 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
600 if (try_async_pf(vcpu
, prefault
, walker
.gfn
, addr
, &pfn
, write_fault
,
604 if (handle_abnormal_pfn(vcpu
, mmu_is_nested(vcpu
) ? 0 : addr
,
605 walker
.gfn
, pfn
, walker
.pte_access
, &r
))
609 * Do not change pte_access if the pfn is a mmio page, otherwise
610 * we will cache the incorrect access into mmio spte.
612 if (write_fault
&& !(walker
.pte_access
& ACC_WRITE_MASK
) &&
613 !is_write_protection(vcpu
) && !user_fault
&&
614 !is_noslot_pfn(pfn
)) {
615 walker
.pte_access
|= ACC_WRITE_MASK
;
616 walker
.pte_access
&= ~ACC_USER_MASK
;
619 * If we converted a user page to a kernel page,
620 * so that the kernel can write to it when cr0.wp=0,
621 * then we should prevent the kernel from executing it
622 * if SMEP is enabled.
624 if (kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
))
625 walker
.pte_access
&= ~ACC_EXEC_MASK
;
628 spin_lock(&vcpu
->kvm
->mmu_lock
);
629 if (mmu_notifier_retry(vcpu
->kvm
, mmu_seq
))
632 kvm_mmu_audit(vcpu
, AUDIT_PRE_PAGE_FAULT
);
633 make_mmu_pages_available(vcpu
);
635 transparent_hugepage_adjust(vcpu
, &walker
.gfn
, &pfn
, &level
);
636 r
= FNAME(fetch
)(vcpu
, addr
, &walker
, write_fault
,
637 level
, pfn
, map_writable
, prefault
);
638 ++vcpu
->stat
.pf_fixed
;
639 kvm_mmu_audit(vcpu
, AUDIT_POST_PAGE_FAULT
);
640 spin_unlock(&vcpu
->kvm
->mmu_lock
);
645 spin_unlock(&vcpu
->kvm
->mmu_lock
);
646 kvm_release_pfn_clean(pfn
);
650 static gpa_t
FNAME(get_level1_sp_gpa
)(struct kvm_mmu_page
*sp
)
654 WARN_ON(sp
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
657 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
659 return gfn_to_gpa(sp
->gfn
) + offset
* sizeof(pt_element_t
);
662 static void FNAME(invlpg
)(struct kvm_vcpu
*vcpu
, gva_t gva
)
664 struct kvm_shadow_walk_iterator iterator
;
665 struct kvm_mmu_page
*sp
;
669 vcpu_clear_mmio_info(vcpu
, gva
);
672 * No need to check return value here, rmap_can_add() can
673 * help us to skip pte prefetch later.
675 mmu_topup_memory_caches(vcpu
);
677 spin_lock(&vcpu
->kvm
->mmu_lock
);
678 for_each_shadow_entry(vcpu
, gva
, iterator
) {
679 level
= iterator
.level
;
680 sptep
= iterator
.sptep
;
682 sp
= page_header(__pa(sptep
));
683 if (is_last_spte(*sptep
, level
)) {
690 pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
691 pte_gpa
+= (sptep
- sp
->spt
) * sizeof(pt_element_t
);
693 if (mmu_page_zap_pte(vcpu
->kvm
, sp
, sptep
))
694 kvm_flush_remote_tlbs(vcpu
->kvm
);
696 if (!rmap_can_add(vcpu
))
699 if (kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &gpte
,
700 sizeof(pt_element_t
)))
703 FNAME(update_pte
)(vcpu
, sp
, sptep
, &gpte
);
706 if (!is_shadow_present_pte(*sptep
) || !sp
->unsync_children
)
709 spin_unlock(&vcpu
->kvm
->mmu_lock
);
712 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
, u32 access
,
713 struct x86_exception
*exception
)
715 struct guest_walker walker
;
716 gpa_t gpa
= UNMAPPED_GVA
;
719 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, access
);
722 gpa
= gfn_to_gpa(walker
.gfn
);
723 gpa
|= vaddr
& ~PAGE_MASK
;
724 } else if (exception
)
725 *exception
= walker
.fault
;
730 static gpa_t
FNAME(gva_to_gpa_nested
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
732 struct x86_exception
*exception
)
734 struct guest_walker walker
;
735 gpa_t gpa
= UNMAPPED_GVA
;
738 r
= FNAME(walk_addr_nested
)(&walker
, vcpu
, vaddr
, access
);
741 gpa
= gfn_to_gpa(walker
.gfn
);
742 gpa
|= vaddr
& ~PAGE_MASK
;
743 } else if (exception
)
744 *exception
= walker
.fault
;
750 * Using the cached information from sp->gfns is safe because:
751 * - The spte has a reference to the struct page, so the pfn for a given gfn
752 * can't change unless all sptes pointing to it are nuked first.
755 * We should flush all tlbs if spte is dropped even though guest is
756 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
757 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
758 * used by guest then tlbs are not flushed, so guest is allowed to access the
760 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
762 static int FNAME(sync_page
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
764 int i
, nr_present
= 0;
768 /* direct kvm_mmu_page can not be unsync. */
769 BUG_ON(sp
->role
.direct
);
771 first_pte_gpa
= FNAME(get_level1_sp_gpa
)(sp
);
773 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
782 pte_gpa
= first_pte_gpa
+ i
* sizeof(pt_element_t
);
784 if (kvm_read_guest_atomic(vcpu
->kvm
, pte_gpa
, &gpte
,
785 sizeof(pt_element_t
)))
788 if (prefetch_invalid_gpte(vcpu
, sp
, &sp
->spt
[i
], gpte
)) {
789 vcpu
->kvm
->tlbs_dirty
++;
793 gfn
= gpte_to_gfn(gpte
);
794 pte_access
= sp
->role
.access
;
795 pte_access
&= gpte_access(vcpu
, gpte
);
796 protect_clean_gpte(&pte_access
, gpte
);
798 if (sync_mmio_spte(vcpu
->kvm
, &sp
->spt
[i
], gfn
, pte_access
,
802 if (gfn
!= sp
->gfns
[i
]) {
803 drop_spte(vcpu
->kvm
, &sp
->spt
[i
]);
804 vcpu
->kvm
->tlbs_dirty
++;
810 host_writable
= sp
->spt
[i
] & SPTE_HOST_WRITEABLE
;
812 set_spte(vcpu
, &sp
->spt
[i
], pte_access
,
813 PT_PAGE_TABLE_LEVEL
, gfn
,
814 spte_to_pfn(sp
->spt
[i
]), true, false,
824 #undef PT_BASE_ADDR_MASK
826 #undef PT_LVL_ADDR_MASK
827 #undef PT_LVL_OFFSET_MASK
829 #undef PT_MAX_FULL_LEVELS
831 #undef gpte_to_gfn_lvl