2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affilates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
23 #include "kvm_cache_regs.h"
25 #include <linux/kvm_host.h>
26 #include <linux/types.h>
27 #include <linux/string.h>
29 #include <linux/highmem.h>
30 #include <linux/module.h>
31 #include <linux/swap.h>
32 #include <linux/hugetlb.h>
33 #include <linux/compiler.h>
34 #include <linux/srcu.h>
35 #include <linux/slab.h>
36 #include <linux/uaccess.h>
39 #include <asm/cmpxchg.h>
44 * When setting this variable to true it enables Two-Dimensional-Paging
45 * where the hardware walks 2 page tables:
46 * 1. the guest-virtual to guest-physical
47 * 2. while doing 1. it walks guest-physical to host-physical
48 * If the hardware supports that we don't need to do shadow paging.
50 bool tdp_enabled
= false;
57 static void kvm_mmu_audit(struct kvm_vcpu
*vcpu
, const char *msg
);
59 static void kvm_mmu_audit(struct kvm_vcpu
*vcpu
, const char *msg
) {}
64 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
65 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
69 #define pgprintk(x...) do { } while (0)
70 #define rmap_printk(x...) do { } while (0)
74 #if defined(MMU_DEBUG) || defined(AUDIT)
76 module_param(dbg
, bool, 0644);
79 static int oos_shadow
= 1;
80 module_param(oos_shadow
, bool, 0644);
83 #define ASSERT(x) do { } while (0)
87 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
88 __FILE__, __LINE__, #x); \
92 #define PT_FIRST_AVAIL_BITS_SHIFT 9
93 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
95 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
97 #define PT64_LEVEL_BITS 9
99 #define PT64_LEVEL_SHIFT(level) \
100 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
102 #define PT64_LEVEL_MASK(level) \
103 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
105 #define PT64_INDEX(address, level)\
106 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
109 #define PT32_LEVEL_BITS 10
111 #define PT32_LEVEL_SHIFT(level) \
112 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
114 #define PT32_LEVEL_MASK(level) \
115 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
116 #define PT32_LVL_OFFSET_MASK(level) \
117 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
118 * PT32_LEVEL_BITS))) - 1))
120 #define PT32_INDEX(address, level)\
121 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
124 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
125 #define PT64_DIR_BASE_ADDR_MASK \
126 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
127 #define PT64_LVL_ADDR_MASK(level) \
128 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
129 * PT64_LEVEL_BITS))) - 1))
130 #define PT64_LVL_OFFSET_MASK(level) \
131 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
132 * PT64_LEVEL_BITS))) - 1))
134 #define PT32_BASE_ADDR_MASK PAGE_MASK
135 #define PT32_DIR_BASE_ADDR_MASK \
136 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
137 #define PT32_LVL_ADDR_MASK(level) \
138 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
139 * PT32_LEVEL_BITS))) - 1))
141 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
146 #define ACC_EXEC_MASK 1
147 #define ACC_WRITE_MASK PT_WRITABLE_MASK
148 #define ACC_USER_MASK PT_USER_MASK
149 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
151 #include <trace/events/kvm.h>
153 #define CREATE_TRACE_POINTS
154 #include "mmutrace.h"
156 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
158 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
160 struct kvm_rmap_desc
{
161 u64
*sptes
[RMAP_EXT
];
162 struct kvm_rmap_desc
*more
;
165 struct kvm_shadow_walk_iterator
{
173 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
174 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
175 shadow_walk_okay(&(_walker)); \
176 shadow_walk_next(&(_walker)))
178 typedef int (*mmu_parent_walk_fn
) (struct kvm_mmu_page
*sp
);
180 static struct kmem_cache
*pte_chain_cache
;
181 static struct kmem_cache
*rmap_desc_cache
;
182 static struct kmem_cache
*mmu_page_header_cache
;
184 static u64 __read_mostly shadow_trap_nonpresent_pte
;
185 static u64 __read_mostly shadow_notrap_nonpresent_pte
;
186 static u64 __read_mostly shadow_base_present_pte
;
187 static u64 __read_mostly shadow_nx_mask
;
188 static u64 __read_mostly shadow_x_mask
; /* mutual exclusive with nx_mask */
189 static u64 __read_mostly shadow_user_mask
;
190 static u64 __read_mostly shadow_accessed_mask
;
191 static u64 __read_mostly shadow_dirty_mask
;
193 static inline u64
rsvd_bits(int s
, int e
)
195 return ((1ULL << (e
- s
+ 1)) - 1) << s
;
198 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte
, u64 notrap_pte
)
200 shadow_trap_nonpresent_pte
= trap_pte
;
201 shadow_notrap_nonpresent_pte
= notrap_pte
;
203 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes
);
205 void kvm_mmu_set_base_ptes(u64 base_pte
)
207 shadow_base_present_pte
= base_pte
;
209 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes
);
211 void kvm_mmu_set_mask_ptes(u64 user_mask
, u64 accessed_mask
,
212 u64 dirty_mask
, u64 nx_mask
, u64 x_mask
)
214 shadow_user_mask
= user_mask
;
215 shadow_accessed_mask
= accessed_mask
;
216 shadow_dirty_mask
= dirty_mask
;
217 shadow_nx_mask
= nx_mask
;
218 shadow_x_mask
= x_mask
;
220 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes
);
222 static bool is_write_protection(struct kvm_vcpu
*vcpu
)
224 return kvm_read_cr0_bits(vcpu
, X86_CR0_WP
);
227 static int is_cpuid_PSE36(void)
232 static int is_nx(struct kvm_vcpu
*vcpu
)
234 return vcpu
->arch
.efer
& EFER_NX
;
237 static int is_shadow_present_pte(u64 pte
)
239 return pte
!= shadow_trap_nonpresent_pte
240 && pte
!= shadow_notrap_nonpresent_pte
;
243 static int is_large_pte(u64 pte
)
245 return pte
& PT_PAGE_SIZE_MASK
;
248 static int is_writable_pte(unsigned long pte
)
250 return pte
& PT_WRITABLE_MASK
;
253 static int is_dirty_gpte(unsigned long pte
)
255 return pte
& PT_DIRTY_MASK
;
258 static int is_rmap_spte(u64 pte
)
260 return is_shadow_present_pte(pte
);
263 static int is_last_spte(u64 pte
, int level
)
265 if (level
== PT_PAGE_TABLE_LEVEL
)
267 if (is_large_pte(pte
))
272 static pfn_t
spte_to_pfn(u64 pte
)
274 return (pte
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
277 static gfn_t
pse36_gfn_delta(u32 gpte
)
279 int shift
= 32 - PT32_DIR_PSE36_SHIFT
- PAGE_SHIFT
;
281 return (gpte
& PT32_DIR_PSE36_MASK
) << shift
;
284 static void __set_spte(u64
*sptep
, u64 spte
)
287 set_64bit((unsigned long *)sptep
, spte
);
289 set_64bit((unsigned long long *)sptep
, spte
);
293 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
294 struct kmem_cache
*base_cache
, int min
)
298 if (cache
->nobjs
>= min
)
300 while (cache
->nobjs
< ARRAY_SIZE(cache
->objects
)) {
301 obj
= kmem_cache_zalloc(base_cache
, GFP_KERNEL
);
304 cache
->objects
[cache
->nobjs
++] = obj
;
309 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
,
310 struct kmem_cache
*cache
)
313 kmem_cache_free(cache
, mc
->objects
[--mc
->nobjs
]);
316 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache
*cache
,
321 if (cache
->nobjs
>= min
)
323 while (cache
->nobjs
< ARRAY_SIZE(cache
->objects
)) {
324 page
= alloc_page(GFP_KERNEL
);
327 cache
->objects
[cache
->nobjs
++] = page_address(page
);
332 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache
*mc
)
335 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
338 static int mmu_topup_memory_caches(struct kvm_vcpu
*vcpu
)
342 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_pte_chain_cache
,
346 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_rmap_desc_cache
,
350 r
= mmu_topup_memory_cache_page(&vcpu
->arch
.mmu_page_cache
, 8);
353 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_page_header_cache
,
354 mmu_page_header_cache
, 4);
359 static void mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
361 mmu_free_memory_cache(&vcpu
->arch
.mmu_pte_chain_cache
, pte_chain_cache
);
362 mmu_free_memory_cache(&vcpu
->arch
.mmu_rmap_desc_cache
, rmap_desc_cache
);
363 mmu_free_memory_cache_page(&vcpu
->arch
.mmu_page_cache
);
364 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_header_cache
,
365 mmu_page_header_cache
);
368 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
,
374 p
= mc
->objects
[--mc
->nobjs
];
378 static struct kvm_pte_chain
*mmu_alloc_pte_chain(struct kvm_vcpu
*vcpu
)
380 return mmu_memory_cache_alloc(&vcpu
->arch
.mmu_pte_chain_cache
,
381 sizeof(struct kvm_pte_chain
));
384 static void mmu_free_pte_chain(struct kvm_pte_chain
*pc
)
386 kmem_cache_free(pte_chain_cache
, pc
);
389 static struct kvm_rmap_desc
*mmu_alloc_rmap_desc(struct kvm_vcpu
*vcpu
)
391 return mmu_memory_cache_alloc(&vcpu
->arch
.mmu_rmap_desc_cache
,
392 sizeof(struct kvm_rmap_desc
));
395 static void mmu_free_rmap_desc(struct kvm_rmap_desc
*rd
)
397 kmem_cache_free(rmap_desc_cache
, rd
);
400 static gfn_t
kvm_mmu_page_get_gfn(struct kvm_mmu_page
*sp
, int index
)
402 if (!sp
->role
.direct
)
403 return sp
->gfns
[index
];
405 return sp
->gfn
+ (index
<< ((sp
->role
.level
- 1) * PT64_LEVEL_BITS
));
408 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page
*sp
, int index
, gfn_t gfn
)
411 BUG_ON(gfn
!= kvm_mmu_page_get_gfn(sp
, index
));
413 sp
->gfns
[index
] = gfn
;
417 * Return the pointer to the largepage write count for a given
418 * gfn, handling slots that are not large page aligned.
420 static int *slot_largepage_idx(gfn_t gfn
,
421 struct kvm_memory_slot
*slot
,
426 idx
= (gfn
/ KVM_PAGES_PER_HPAGE(level
)) -
427 (slot
->base_gfn
/ KVM_PAGES_PER_HPAGE(level
));
428 return &slot
->lpage_info
[level
- 2][idx
].write_count
;
431 static void account_shadowed(struct kvm
*kvm
, gfn_t gfn
)
433 struct kvm_memory_slot
*slot
;
437 gfn
= unalias_gfn(kvm
, gfn
);
439 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
440 for (i
= PT_DIRECTORY_LEVEL
;
441 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
442 write_count
= slot_largepage_idx(gfn
, slot
, i
);
447 static void unaccount_shadowed(struct kvm
*kvm
, gfn_t gfn
)
449 struct kvm_memory_slot
*slot
;
453 gfn
= unalias_gfn(kvm
, gfn
);
454 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
455 for (i
= PT_DIRECTORY_LEVEL
;
456 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
457 write_count
= slot_largepage_idx(gfn
, slot
, i
);
459 WARN_ON(*write_count
< 0);
463 static int has_wrprotected_page(struct kvm
*kvm
,
467 struct kvm_memory_slot
*slot
;
470 gfn
= unalias_gfn(kvm
, gfn
);
471 slot
= gfn_to_memslot_unaliased(kvm
, gfn
);
473 largepage_idx
= slot_largepage_idx(gfn
, slot
, level
);
474 return *largepage_idx
;
480 static int host_mapping_level(struct kvm
*kvm
, gfn_t gfn
)
482 unsigned long page_size
;
485 page_size
= kvm_host_page_size(kvm
, gfn
);
487 for (i
= PT_PAGE_TABLE_LEVEL
;
488 i
< (PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
); ++i
) {
489 if (page_size
>= KVM_HPAGE_SIZE(i
))
498 static int mapping_level(struct kvm_vcpu
*vcpu
, gfn_t large_gfn
)
500 struct kvm_memory_slot
*slot
;
501 int host_level
, level
, max_level
;
503 slot
= gfn_to_memslot(vcpu
->kvm
, large_gfn
);
504 if (slot
&& slot
->dirty_bitmap
)
505 return PT_PAGE_TABLE_LEVEL
;
507 host_level
= host_mapping_level(vcpu
->kvm
, large_gfn
);
509 if (host_level
== PT_PAGE_TABLE_LEVEL
)
512 max_level
= kvm_x86_ops
->get_lpage_level() < host_level
?
513 kvm_x86_ops
->get_lpage_level() : host_level
;
515 for (level
= PT_DIRECTORY_LEVEL
; level
<= max_level
; ++level
)
516 if (has_wrprotected_page(vcpu
->kvm
, large_gfn
, level
))
523 * Take gfn and return the reverse mapping to it.
524 * Note: gfn must be unaliased before this function get called
527 static unsigned long *gfn_to_rmap(struct kvm
*kvm
, gfn_t gfn
, int level
)
529 struct kvm_memory_slot
*slot
;
532 slot
= gfn_to_memslot(kvm
, gfn
);
533 if (likely(level
== PT_PAGE_TABLE_LEVEL
))
534 return &slot
->rmap
[gfn
- slot
->base_gfn
];
536 idx
= (gfn
/ KVM_PAGES_PER_HPAGE(level
)) -
537 (slot
->base_gfn
/ KVM_PAGES_PER_HPAGE(level
));
539 return &slot
->lpage_info
[level
- 2][idx
].rmap_pde
;
543 * Reverse mapping data structures:
545 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
546 * that points to page_address(page).
548 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
549 * containing more mappings.
551 * Returns the number of rmap entries before the spte was added or zero if
552 * the spte was not added.
555 static int rmap_add(struct kvm_vcpu
*vcpu
, u64
*spte
, gfn_t gfn
)
557 struct kvm_mmu_page
*sp
;
558 struct kvm_rmap_desc
*desc
;
559 unsigned long *rmapp
;
562 if (!is_rmap_spte(*spte
))
564 gfn
= unalias_gfn(vcpu
->kvm
, gfn
);
565 sp
= page_header(__pa(spte
));
566 kvm_mmu_page_set_gfn(sp
, spte
- sp
->spt
, gfn
);
567 rmapp
= gfn_to_rmap(vcpu
->kvm
, gfn
, sp
->role
.level
);
569 rmap_printk("rmap_add: %p %llx 0->1\n", spte
, *spte
);
570 *rmapp
= (unsigned long)spte
;
571 } else if (!(*rmapp
& 1)) {
572 rmap_printk("rmap_add: %p %llx 1->many\n", spte
, *spte
);
573 desc
= mmu_alloc_rmap_desc(vcpu
);
574 desc
->sptes
[0] = (u64
*)*rmapp
;
575 desc
->sptes
[1] = spte
;
576 *rmapp
= (unsigned long)desc
| 1;
578 rmap_printk("rmap_add: %p %llx many->many\n", spte
, *spte
);
579 desc
= (struct kvm_rmap_desc
*)(*rmapp
& ~1ul);
580 while (desc
->sptes
[RMAP_EXT
-1] && desc
->more
) {
584 if (desc
->sptes
[RMAP_EXT
-1]) {
585 desc
->more
= mmu_alloc_rmap_desc(vcpu
);
588 for (i
= 0; desc
->sptes
[i
]; ++i
)
590 desc
->sptes
[i
] = spte
;
595 static void rmap_desc_remove_entry(unsigned long *rmapp
,
596 struct kvm_rmap_desc
*desc
,
598 struct kvm_rmap_desc
*prev_desc
)
602 for (j
= RMAP_EXT
- 1; !desc
->sptes
[j
] && j
> i
; --j
)
604 desc
->sptes
[i
] = desc
->sptes
[j
];
605 desc
->sptes
[j
] = NULL
;
608 if (!prev_desc
&& !desc
->more
)
609 *rmapp
= (unsigned long)desc
->sptes
[0];
612 prev_desc
->more
= desc
->more
;
614 *rmapp
= (unsigned long)desc
->more
| 1;
615 mmu_free_rmap_desc(desc
);
618 static void rmap_remove(struct kvm
*kvm
, u64
*spte
)
620 struct kvm_rmap_desc
*desc
;
621 struct kvm_rmap_desc
*prev_desc
;
622 struct kvm_mmu_page
*sp
;
625 unsigned long *rmapp
;
628 if (!is_rmap_spte(*spte
))
630 sp
= page_header(__pa(spte
));
631 pfn
= spte_to_pfn(*spte
);
632 if (*spte
& shadow_accessed_mask
)
633 kvm_set_pfn_accessed(pfn
);
634 if (is_writable_pte(*spte
))
635 kvm_set_pfn_dirty(pfn
);
636 gfn
= kvm_mmu_page_get_gfn(sp
, spte
- sp
->spt
);
637 rmapp
= gfn_to_rmap(kvm
, gfn
, sp
->role
.level
);
639 printk(KERN_ERR
"rmap_remove: %p %llx 0->BUG\n", spte
, *spte
);
641 } else if (!(*rmapp
& 1)) {
642 rmap_printk("rmap_remove: %p %llx 1->0\n", spte
, *spte
);
643 if ((u64
*)*rmapp
!= spte
) {
644 printk(KERN_ERR
"rmap_remove: %p %llx 1->BUG\n",
650 rmap_printk("rmap_remove: %p %llx many->many\n", spte
, *spte
);
651 desc
= (struct kvm_rmap_desc
*)(*rmapp
& ~1ul);
654 for (i
= 0; i
< RMAP_EXT
&& desc
->sptes
[i
]; ++i
)
655 if (desc
->sptes
[i
] == spte
) {
656 rmap_desc_remove_entry(rmapp
,
664 pr_err("rmap_remove: %p %llx many->many\n", spte
, *spte
);
669 static u64
*rmap_next(struct kvm
*kvm
, unsigned long *rmapp
, u64
*spte
)
671 struct kvm_rmap_desc
*desc
;
677 else if (!(*rmapp
& 1)) {
679 return (u64
*)*rmapp
;
682 desc
= (struct kvm_rmap_desc
*)(*rmapp
& ~1ul);
685 for (i
= 0; i
< RMAP_EXT
&& desc
->sptes
[i
]; ++i
) {
686 if (prev_spte
== spte
)
687 return desc
->sptes
[i
];
688 prev_spte
= desc
->sptes
[i
];
695 static int rmap_write_protect(struct kvm
*kvm
, u64 gfn
)
697 unsigned long *rmapp
;
699 int i
, write_protected
= 0;
701 gfn
= unalias_gfn(kvm
, gfn
);
702 rmapp
= gfn_to_rmap(kvm
, gfn
, PT_PAGE_TABLE_LEVEL
);
704 spte
= rmap_next(kvm
, rmapp
, NULL
);
707 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
708 rmap_printk("rmap_write_protect: spte %p %llx\n", spte
, *spte
);
709 if (is_writable_pte(*spte
)) {
710 __set_spte(spte
, *spte
& ~PT_WRITABLE_MASK
);
713 spte
= rmap_next(kvm
, rmapp
, spte
);
715 if (write_protected
) {
718 spte
= rmap_next(kvm
, rmapp
, NULL
);
719 pfn
= spte_to_pfn(*spte
);
720 kvm_set_pfn_dirty(pfn
);
723 /* check for huge page mappings */
724 for (i
= PT_DIRECTORY_LEVEL
;
725 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
726 rmapp
= gfn_to_rmap(kvm
, gfn
, i
);
727 spte
= rmap_next(kvm
, rmapp
, NULL
);
730 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
731 BUG_ON((*spte
& (PT_PAGE_SIZE_MASK
|PT_PRESENT_MASK
)) != (PT_PAGE_SIZE_MASK
|PT_PRESENT_MASK
));
732 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte
, *spte
, gfn
);
733 if (is_writable_pte(*spte
)) {
734 rmap_remove(kvm
, spte
);
736 __set_spte(spte
, shadow_trap_nonpresent_pte
);
740 spte
= rmap_next(kvm
, rmapp
, spte
);
744 return write_protected
;
747 static int kvm_unmap_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
751 int need_tlb_flush
= 0;
753 while ((spte
= rmap_next(kvm
, rmapp
, NULL
))) {
754 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
755 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte
, *spte
);
756 rmap_remove(kvm
, spte
);
757 __set_spte(spte
, shadow_trap_nonpresent_pte
);
760 return need_tlb_flush
;
763 static int kvm_set_pte_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
768 pte_t
*ptep
= (pte_t
*)data
;
771 WARN_ON(pte_huge(*ptep
));
772 new_pfn
= pte_pfn(*ptep
);
773 spte
= rmap_next(kvm
, rmapp
, NULL
);
775 BUG_ON(!is_shadow_present_pte(*spte
));
776 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte
, *spte
);
778 if (pte_write(*ptep
)) {
779 rmap_remove(kvm
, spte
);
780 __set_spte(spte
, shadow_trap_nonpresent_pte
);
781 spte
= rmap_next(kvm
, rmapp
, NULL
);
783 new_spte
= *spte
&~ (PT64_BASE_ADDR_MASK
);
784 new_spte
|= (u64
)new_pfn
<< PAGE_SHIFT
;
786 new_spte
&= ~PT_WRITABLE_MASK
;
787 new_spte
&= ~SPTE_HOST_WRITEABLE
;
788 if (is_writable_pte(*spte
))
789 kvm_set_pfn_dirty(spte_to_pfn(*spte
));
790 __set_spte(spte
, new_spte
);
791 spte
= rmap_next(kvm
, rmapp
, spte
);
795 kvm_flush_remote_tlbs(kvm
);
800 static int kvm_handle_hva(struct kvm
*kvm
, unsigned long hva
,
802 int (*handler
)(struct kvm
*kvm
, unsigned long *rmapp
,
808 struct kvm_memslots
*slots
;
810 slots
= kvm_memslots(kvm
);
812 for (i
= 0; i
< slots
->nmemslots
; i
++) {
813 struct kvm_memory_slot
*memslot
= &slots
->memslots
[i
];
814 unsigned long start
= memslot
->userspace_addr
;
817 end
= start
+ (memslot
->npages
<< PAGE_SHIFT
);
818 if (hva
>= start
&& hva
< end
) {
819 gfn_t gfn_offset
= (hva
- start
) >> PAGE_SHIFT
;
821 ret
= handler(kvm
, &memslot
->rmap
[gfn_offset
], data
);
823 for (j
= 0; j
< KVM_NR_PAGE_SIZES
- 1; ++j
) {
824 int idx
= gfn_offset
;
825 idx
/= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL
+ j
);
827 &memslot
->lpage_info
[j
][idx
].rmap_pde
,
830 trace_kvm_age_page(hva
, memslot
, ret
);
838 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
840 return kvm_handle_hva(kvm
, hva
, 0, kvm_unmap_rmapp
);
843 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
845 kvm_handle_hva(kvm
, hva
, (unsigned long)&pte
, kvm_set_pte_rmapp
);
848 static int kvm_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
855 * Emulate the accessed bit for EPT, by checking if this page has
856 * an EPT mapping, and clearing it if it does. On the next access,
857 * a new EPT mapping will be established.
858 * This has some overhead, but not as much as the cost of swapping
859 * out actively used pages or breaking up actively used hugepages.
861 if (!shadow_accessed_mask
)
862 return kvm_unmap_rmapp(kvm
, rmapp
, data
);
864 spte
= rmap_next(kvm
, rmapp
, NULL
);
868 BUG_ON(!(_spte
& PT_PRESENT_MASK
));
869 _young
= _spte
& PT_ACCESSED_MASK
;
872 clear_bit(PT_ACCESSED_SHIFT
, (unsigned long *)spte
);
874 spte
= rmap_next(kvm
, rmapp
, spte
);
879 #define RMAP_RECYCLE_THRESHOLD 1000
881 static void rmap_recycle(struct kvm_vcpu
*vcpu
, u64
*spte
, gfn_t gfn
)
883 unsigned long *rmapp
;
884 struct kvm_mmu_page
*sp
;
886 sp
= page_header(__pa(spte
));
888 gfn
= unalias_gfn(vcpu
->kvm
, gfn
);
889 rmapp
= gfn_to_rmap(vcpu
->kvm
, gfn
, sp
->role
.level
);
891 kvm_unmap_rmapp(vcpu
->kvm
, rmapp
, 0);
892 kvm_flush_remote_tlbs(vcpu
->kvm
);
895 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
897 return kvm_handle_hva(kvm
, hva
, 0, kvm_age_rmapp
);
901 static int is_empty_shadow_page(u64
*spt
)
906 for (pos
= spt
, end
= pos
+ PAGE_SIZE
/ sizeof(u64
); pos
!= end
; pos
++)
907 if (is_shadow_present_pte(*pos
)) {
908 printk(KERN_ERR
"%s: %p %llx\n", __func__
,
916 static void kvm_mmu_free_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
918 ASSERT(is_empty_shadow_page(sp
->spt
));
919 hlist_del(&sp
->hash_link
);
921 __free_page(virt_to_page(sp
->spt
));
922 if (!sp
->role
.direct
)
923 __free_page(virt_to_page(sp
->gfns
));
924 kmem_cache_free(mmu_page_header_cache
, sp
);
925 ++kvm
->arch
.n_free_mmu_pages
;
928 static unsigned kvm_page_table_hashfn(gfn_t gfn
)
930 return gfn
& ((1 << KVM_MMU_HASH_SHIFT
) - 1);
933 static struct kvm_mmu_page
*kvm_mmu_alloc_page(struct kvm_vcpu
*vcpu
,
934 u64
*parent_pte
, int direct
)
936 struct kvm_mmu_page
*sp
;
938 sp
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_header_cache
, sizeof *sp
);
939 sp
->spt
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_cache
, PAGE_SIZE
);
941 sp
->gfns
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_cache
,
943 set_page_private(virt_to_page(sp
->spt
), (unsigned long)sp
);
944 list_add(&sp
->link
, &vcpu
->kvm
->arch
.active_mmu_pages
);
945 bitmap_zero(sp
->slot_bitmap
, KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
);
947 sp
->parent_pte
= parent_pte
;
948 --vcpu
->kvm
->arch
.n_free_mmu_pages
;
952 static void mmu_page_add_parent_pte(struct kvm_vcpu
*vcpu
,
953 struct kvm_mmu_page
*sp
, u64
*parent_pte
)
955 struct kvm_pte_chain
*pte_chain
;
956 struct hlist_node
*node
;
961 if (!sp
->multimapped
) {
962 u64
*old
= sp
->parent_pte
;
965 sp
->parent_pte
= parent_pte
;
969 pte_chain
= mmu_alloc_pte_chain(vcpu
);
970 INIT_HLIST_HEAD(&sp
->parent_ptes
);
971 hlist_add_head(&pte_chain
->link
, &sp
->parent_ptes
);
972 pte_chain
->parent_ptes
[0] = old
;
974 hlist_for_each_entry(pte_chain
, node
, &sp
->parent_ptes
, link
) {
975 if (pte_chain
->parent_ptes
[NR_PTE_CHAIN_ENTRIES
-1])
977 for (i
= 0; i
< NR_PTE_CHAIN_ENTRIES
; ++i
)
978 if (!pte_chain
->parent_ptes
[i
]) {
979 pte_chain
->parent_ptes
[i
] = parent_pte
;
983 pte_chain
= mmu_alloc_pte_chain(vcpu
);
985 hlist_add_head(&pte_chain
->link
, &sp
->parent_ptes
);
986 pte_chain
->parent_ptes
[0] = parent_pte
;
989 static void mmu_page_remove_parent_pte(struct kvm_mmu_page
*sp
,
992 struct kvm_pte_chain
*pte_chain
;
993 struct hlist_node
*node
;
996 if (!sp
->multimapped
) {
997 BUG_ON(sp
->parent_pte
!= parent_pte
);
998 sp
->parent_pte
= NULL
;
1001 hlist_for_each_entry(pte_chain
, node
, &sp
->parent_ptes
, link
)
1002 for (i
= 0; i
< NR_PTE_CHAIN_ENTRIES
; ++i
) {
1003 if (!pte_chain
->parent_ptes
[i
])
1005 if (pte_chain
->parent_ptes
[i
] != parent_pte
)
1007 while (i
+ 1 < NR_PTE_CHAIN_ENTRIES
1008 && pte_chain
->parent_ptes
[i
+ 1]) {
1009 pte_chain
->parent_ptes
[i
]
1010 = pte_chain
->parent_ptes
[i
+ 1];
1013 pte_chain
->parent_ptes
[i
] = NULL
;
1015 hlist_del(&pte_chain
->link
);
1016 mmu_free_pte_chain(pte_chain
);
1017 if (hlist_empty(&sp
->parent_ptes
)) {
1018 sp
->multimapped
= 0;
1019 sp
->parent_pte
= NULL
;
1028 static void mmu_parent_walk(struct kvm_mmu_page
*sp
, mmu_parent_walk_fn fn
)
1030 struct kvm_pte_chain
*pte_chain
;
1031 struct hlist_node
*node
;
1032 struct kvm_mmu_page
*parent_sp
;
1035 if (!sp
->multimapped
&& sp
->parent_pte
) {
1036 parent_sp
= page_header(__pa(sp
->parent_pte
));
1038 mmu_parent_walk(parent_sp
, fn
);
1041 hlist_for_each_entry(pte_chain
, node
, &sp
->parent_ptes
, link
)
1042 for (i
= 0; i
< NR_PTE_CHAIN_ENTRIES
; ++i
) {
1043 if (!pte_chain
->parent_ptes
[i
])
1045 parent_sp
= page_header(__pa(pte_chain
->parent_ptes
[i
]));
1047 mmu_parent_walk(parent_sp
, fn
);
1051 static void kvm_mmu_update_unsync_bitmap(u64
*spte
)
1054 struct kvm_mmu_page
*sp
= page_header(__pa(spte
));
1056 index
= spte
- sp
->spt
;
1057 if (!__test_and_set_bit(index
, sp
->unsync_child_bitmap
))
1058 sp
->unsync_children
++;
1059 WARN_ON(!sp
->unsync_children
);
1062 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page
*sp
)
1064 struct kvm_pte_chain
*pte_chain
;
1065 struct hlist_node
*node
;
1068 if (!sp
->parent_pte
)
1071 if (!sp
->multimapped
) {
1072 kvm_mmu_update_unsync_bitmap(sp
->parent_pte
);
1076 hlist_for_each_entry(pte_chain
, node
, &sp
->parent_ptes
, link
)
1077 for (i
= 0; i
< NR_PTE_CHAIN_ENTRIES
; ++i
) {
1078 if (!pte_chain
->parent_ptes
[i
])
1080 kvm_mmu_update_unsync_bitmap(pte_chain
->parent_ptes
[i
]);
1084 static int unsync_walk_fn(struct kvm_mmu_page
*sp
)
1086 kvm_mmu_update_parents_unsync(sp
);
1090 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page
*sp
)
1092 mmu_parent_walk(sp
, unsync_walk_fn
);
1093 kvm_mmu_update_parents_unsync(sp
);
1096 static void nonpaging_prefetch_page(struct kvm_vcpu
*vcpu
,
1097 struct kvm_mmu_page
*sp
)
1101 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
1102 sp
->spt
[i
] = shadow_trap_nonpresent_pte
;
1105 static int nonpaging_sync_page(struct kvm_vcpu
*vcpu
,
1106 struct kvm_mmu_page
*sp
)
1111 static void nonpaging_invlpg(struct kvm_vcpu
*vcpu
, gva_t gva
)
1115 #define KVM_PAGE_ARRAY_NR 16
1117 struct kvm_mmu_pages
{
1118 struct mmu_page_and_offset
{
1119 struct kvm_mmu_page
*sp
;
1121 } page
[KVM_PAGE_ARRAY_NR
];
1125 #define for_each_unsync_children(bitmap, idx) \
1126 for (idx = find_first_bit(bitmap, 512); \
1128 idx = find_next_bit(bitmap, 512, idx+1))
1130 static int mmu_pages_add(struct kvm_mmu_pages
*pvec
, struct kvm_mmu_page
*sp
,
1136 for (i
=0; i
< pvec
->nr
; i
++)
1137 if (pvec
->page
[i
].sp
== sp
)
1140 pvec
->page
[pvec
->nr
].sp
= sp
;
1141 pvec
->page
[pvec
->nr
].idx
= idx
;
1143 return (pvec
->nr
== KVM_PAGE_ARRAY_NR
);
1146 static int __mmu_unsync_walk(struct kvm_mmu_page
*sp
,
1147 struct kvm_mmu_pages
*pvec
)
1149 int i
, ret
, nr_unsync_leaf
= 0;
1151 for_each_unsync_children(sp
->unsync_child_bitmap
, i
) {
1152 u64 ent
= sp
->spt
[i
];
1154 if (is_shadow_present_pte(ent
) && !is_large_pte(ent
)) {
1155 struct kvm_mmu_page
*child
;
1156 child
= page_header(ent
& PT64_BASE_ADDR_MASK
);
1158 if (child
->unsync_children
) {
1159 if (mmu_pages_add(pvec
, child
, i
))
1162 ret
= __mmu_unsync_walk(child
, pvec
);
1164 __clear_bit(i
, sp
->unsync_child_bitmap
);
1166 nr_unsync_leaf
+= ret
;
1171 if (child
->unsync
) {
1173 if (mmu_pages_add(pvec
, child
, i
))
1179 if (find_first_bit(sp
->unsync_child_bitmap
, 512) == 512)
1180 sp
->unsync_children
= 0;
1182 return nr_unsync_leaf
;
1185 static int mmu_unsync_walk(struct kvm_mmu_page
*sp
,
1186 struct kvm_mmu_pages
*pvec
)
1188 if (!sp
->unsync_children
)
1191 mmu_pages_add(pvec
, sp
, 0);
1192 return __mmu_unsync_walk(sp
, pvec
);
1195 static void kvm_unlink_unsync_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
1197 WARN_ON(!sp
->unsync
);
1198 trace_kvm_mmu_sync_page(sp
);
1200 --kvm
->stat
.mmu_unsync
;
1203 static int kvm_mmu_prepare_zap_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
1204 struct list_head
*invalid_list
);
1205 static void kvm_mmu_commit_zap_page(struct kvm
*kvm
,
1206 struct list_head
*invalid_list
);
1208 #define for_each_gfn_sp(kvm, sp, gfn, pos) \
1209 hlist_for_each_entry(sp, pos, \
1210 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1211 if ((sp)->gfn != (gfn)) {} else
1213 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
1214 hlist_for_each_entry(sp, pos, \
1215 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1216 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1217 (sp)->role.invalid) {} else
1219 static int __kvm_sync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
1220 struct list_head
*invalid_list
, bool clear_unsync
)
1222 if (sp
->role
.cr4_pae
!= !!is_pae(vcpu
)) {
1223 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, invalid_list
);
1228 if (rmap_write_protect(vcpu
->kvm
, sp
->gfn
))
1229 kvm_flush_remote_tlbs(vcpu
->kvm
);
1230 kvm_unlink_unsync_page(vcpu
->kvm
, sp
);
1233 if (vcpu
->arch
.mmu
.sync_page(vcpu
, sp
)) {
1234 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, invalid_list
);
1238 kvm_mmu_flush_tlb(vcpu
);
1242 static void mmu_convert_notrap(struct kvm_mmu_page
*sp
);
1243 static int kvm_sync_page_transient(struct kvm_vcpu
*vcpu
,
1244 struct kvm_mmu_page
*sp
)
1246 LIST_HEAD(invalid_list
);
1249 ret
= __kvm_sync_page(vcpu
, sp
, &invalid_list
, false);
1251 mmu_convert_notrap(sp
);
1253 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1258 static int kvm_sync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
1259 struct list_head
*invalid_list
)
1261 return __kvm_sync_page(vcpu
, sp
, invalid_list
, true);
1264 /* @gfn should be write-protected at the call site */
1265 static void kvm_sync_pages(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1267 struct kvm_mmu_page
*s
;
1268 struct hlist_node
*node
;
1269 LIST_HEAD(invalid_list
);
1272 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1276 WARN_ON(s
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
1277 if ((s
->role
.cr4_pae
!= !!is_pae(vcpu
)) ||
1278 (vcpu
->arch
.mmu
.sync_page(vcpu
, s
))) {
1279 kvm_mmu_prepare_zap_page(vcpu
->kvm
, s
, &invalid_list
);
1282 kvm_unlink_unsync_page(vcpu
->kvm
, s
);
1286 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1288 kvm_mmu_flush_tlb(vcpu
);
1291 struct mmu_page_path
{
1292 struct kvm_mmu_page
*parent
[PT64_ROOT_LEVEL
-1];
1293 unsigned int idx
[PT64_ROOT_LEVEL
-1];
1296 #define for_each_sp(pvec, sp, parents, i) \
1297 for (i = mmu_pages_next(&pvec, &parents, -1), \
1298 sp = pvec.page[i].sp; \
1299 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1300 i = mmu_pages_next(&pvec, &parents, i))
1302 static int mmu_pages_next(struct kvm_mmu_pages
*pvec
,
1303 struct mmu_page_path
*parents
,
1308 for (n
= i
+1; n
< pvec
->nr
; n
++) {
1309 struct kvm_mmu_page
*sp
= pvec
->page
[n
].sp
;
1311 if (sp
->role
.level
== PT_PAGE_TABLE_LEVEL
) {
1312 parents
->idx
[0] = pvec
->page
[n
].idx
;
1316 parents
->parent
[sp
->role
.level
-2] = sp
;
1317 parents
->idx
[sp
->role
.level
-1] = pvec
->page
[n
].idx
;
1323 static void mmu_pages_clear_parents(struct mmu_page_path
*parents
)
1325 struct kvm_mmu_page
*sp
;
1326 unsigned int level
= 0;
1329 unsigned int idx
= parents
->idx
[level
];
1331 sp
= parents
->parent
[level
];
1335 --sp
->unsync_children
;
1336 WARN_ON((int)sp
->unsync_children
< 0);
1337 __clear_bit(idx
, sp
->unsync_child_bitmap
);
1339 } while (level
< PT64_ROOT_LEVEL
-1 && !sp
->unsync_children
);
1342 static void kvm_mmu_pages_init(struct kvm_mmu_page
*parent
,
1343 struct mmu_page_path
*parents
,
1344 struct kvm_mmu_pages
*pvec
)
1346 parents
->parent
[parent
->role
.level
-1] = NULL
;
1350 static void mmu_sync_children(struct kvm_vcpu
*vcpu
,
1351 struct kvm_mmu_page
*parent
)
1354 struct kvm_mmu_page
*sp
;
1355 struct mmu_page_path parents
;
1356 struct kvm_mmu_pages pages
;
1357 LIST_HEAD(invalid_list
);
1359 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1360 while (mmu_unsync_walk(parent
, &pages
)) {
1363 for_each_sp(pages
, sp
, parents
, i
)
1364 protected |= rmap_write_protect(vcpu
->kvm
, sp
->gfn
);
1367 kvm_flush_remote_tlbs(vcpu
->kvm
);
1369 for_each_sp(pages
, sp
, parents
, i
) {
1370 kvm_sync_page(vcpu
, sp
, &invalid_list
);
1371 mmu_pages_clear_parents(&parents
);
1373 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1374 cond_resched_lock(&vcpu
->kvm
->mmu_lock
);
1375 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1379 static struct kvm_mmu_page
*kvm_mmu_get_page(struct kvm_vcpu
*vcpu
,
1387 union kvm_mmu_page_role role
;
1389 struct kvm_mmu_page
*sp
;
1390 struct hlist_node
*node
;
1391 bool need_sync
= false;
1393 role
= vcpu
->arch
.mmu
.base_role
;
1395 role
.direct
= direct
;
1398 role
.access
= access
;
1399 if (!tdp_enabled
&& vcpu
->arch
.mmu
.root_level
<= PT32_ROOT_LEVEL
) {
1400 quadrant
= gaddr
>> (PAGE_SHIFT
+ (PT64_PT_BITS
* level
));
1401 quadrant
&= (1 << ((PT32_PT_BITS
- PT64_PT_BITS
) * level
)) - 1;
1402 role
.quadrant
= quadrant
;
1404 for_each_gfn_sp(vcpu
->kvm
, sp
, gfn
, node
) {
1405 if (!need_sync
&& sp
->unsync
)
1408 if (sp
->role
.word
!= role
.word
)
1411 if (sp
->unsync
&& kvm_sync_page_transient(vcpu
, sp
))
1414 mmu_page_add_parent_pte(vcpu
, sp
, parent_pte
);
1415 if (sp
->unsync_children
) {
1416 set_bit(KVM_REQ_MMU_SYNC
, &vcpu
->requests
);
1417 kvm_mmu_mark_parents_unsync(sp
);
1418 } else if (sp
->unsync
)
1419 kvm_mmu_mark_parents_unsync(sp
);
1421 trace_kvm_mmu_get_page(sp
, false);
1424 ++vcpu
->kvm
->stat
.mmu_cache_miss
;
1425 sp
= kvm_mmu_alloc_page(vcpu
, parent_pte
, direct
);
1430 hlist_add_head(&sp
->hash_link
,
1431 &vcpu
->kvm
->arch
.mmu_page_hash
[kvm_page_table_hashfn(gfn
)]);
1433 if (rmap_write_protect(vcpu
->kvm
, gfn
))
1434 kvm_flush_remote_tlbs(vcpu
->kvm
);
1435 if (level
> PT_PAGE_TABLE_LEVEL
&& need_sync
)
1436 kvm_sync_pages(vcpu
, gfn
);
1438 account_shadowed(vcpu
->kvm
, gfn
);
1440 if (shadow_trap_nonpresent_pte
!= shadow_notrap_nonpresent_pte
)
1441 vcpu
->arch
.mmu
.prefetch_page(vcpu
, sp
);
1443 nonpaging_prefetch_page(vcpu
, sp
);
1444 trace_kvm_mmu_get_page(sp
, true);
1448 static void shadow_walk_init(struct kvm_shadow_walk_iterator
*iterator
,
1449 struct kvm_vcpu
*vcpu
, u64 addr
)
1451 iterator
->addr
= addr
;
1452 iterator
->shadow_addr
= vcpu
->arch
.mmu
.root_hpa
;
1453 iterator
->level
= vcpu
->arch
.mmu
.shadow_root_level
;
1454 if (iterator
->level
== PT32E_ROOT_LEVEL
) {
1455 iterator
->shadow_addr
1456 = vcpu
->arch
.mmu
.pae_root
[(addr
>> 30) & 3];
1457 iterator
->shadow_addr
&= PT64_BASE_ADDR_MASK
;
1459 if (!iterator
->shadow_addr
)
1460 iterator
->level
= 0;
1464 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator
*iterator
)
1466 if (iterator
->level
< PT_PAGE_TABLE_LEVEL
)
1469 if (iterator
->level
== PT_PAGE_TABLE_LEVEL
)
1470 if (is_large_pte(*iterator
->sptep
))
1473 iterator
->index
= SHADOW_PT_INDEX(iterator
->addr
, iterator
->level
);
1474 iterator
->sptep
= ((u64
*)__va(iterator
->shadow_addr
)) + iterator
->index
;
1478 static void shadow_walk_next(struct kvm_shadow_walk_iterator
*iterator
)
1480 iterator
->shadow_addr
= *iterator
->sptep
& PT64_BASE_ADDR_MASK
;
1484 static void kvm_mmu_page_unlink_children(struct kvm
*kvm
,
1485 struct kvm_mmu_page
*sp
)
1493 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
1496 if (is_shadow_present_pte(ent
)) {
1497 if (!is_last_spte(ent
, sp
->role
.level
)) {
1498 ent
&= PT64_BASE_ADDR_MASK
;
1499 mmu_page_remove_parent_pte(page_header(ent
),
1502 if (is_large_pte(ent
))
1504 rmap_remove(kvm
, &pt
[i
]);
1507 pt
[i
] = shadow_trap_nonpresent_pte
;
1511 static void kvm_mmu_put_page(struct kvm_mmu_page
*sp
, u64
*parent_pte
)
1513 mmu_page_remove_parent_pte(sp
, parent_pte
);
1516 static void kvm_mmu_reset_last_pte_updated(struct kvm
*kvm
)
1519 struct kvm_vcpu
*vcpu
;
1521 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1522 vcpu
->arch
.last_pte_updated
= NULL
;
1525 static void kvm_mmu_unlink_parents(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
1529 while (sp
->multimapped
|| sp
->parent_pte
) {
1530 if (!sp
->multimapped
)
1531 parent_pte
= sp
->parent_pte
;
1533 struct kvm_pte_chain
*chain
;
1535 chain
= container_of(sp
->parent_ptes
.first
,
1536 struct kvm_pte_chain
, link
);
1537 parent_pte
= chain
->parent_ptes
[0];
1539 BUG_ON(!parent_pte
);
1540 kvm_mmu_put_page(sp
, parent_pte
);
1541 __set_spte(parent_pte
, shadow_trap_nonpresent_pte
);
1545 static int mmu_zap_unsync_children(struct kvm
*kvm
,
1546 struct kvm_mmu_page
*parent
,
1547 struct list_head
*invalid_list
)
1550 struct mmu_page_path parents
;
1551 struct kvm_mmu_pages pages
;
1553 if (parent
->role
.level
== PT_PAGE_TABLE_LEVEL
)
1556 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1557 while (mmu_unsync_walk(parent
, &pages
)) {
1558 struct kvm_mmu_page
*sp
;
1560 for_each_sp(pages
, sp
, parents
, i
) {
1561 kvm_mmu_prepare_zap_page(kvm
, sp
, invalid_list
);
1562 mmu_pages_clear_parents(&parents
);
1565 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1571 static int kvm_mmu_prepare_zap_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
1572 struct list_head
*invalid_list
)
1576 trace_kvm_mmu_prepare_zap_page(sp
);
1577 ++kvm
->stat
.mmu_shadow_zapped
;
1578 ret
= mmu_zap_unsync_children(kvm
, sp
, invalid_list
);
1579 kvm_mmu_page_unlink_children(kvm
, sp
);
1580 kvm_mmu_unlink_parents(kvm
, sp
);
1581 if (!sp
->role
.invalid
&& !sp
->role
.direct
)
1582 unaccount_shadowed(kvm
, sp
->gfn
);
1584 kvm_unlink_unsync_page(kvm
, sp
);
1585 if (!sp
->root_count
) {
1588 list_move(&sp
->link
, invalid_list
);
1590 list_move(&sp
->link
, &kvm
->arch
.active_mmu_pages
);
1591 kvm_reload_remote_mmus(kvm
);
1594 sp
->role
.invalid
= 1;
1595 kvm_mmu_reset_last_pte_updated(kvm
);
1599 static void kvm_mmu_commit_zap_page(struct kvm
*kvm
,
1600 struct list_head
*invalid_list
)
1602 struct kvm_mmu_page
*sp
;
1604 if (list_empty(invalid_list
))
1607 kvm_flush_remote_tlbs(kvm
);
1610 sp
= list_first_entry(invalid_list
, struct kvm_mmu_page
, link
);
1611 WARN_ON(!sp
->role
.invalid
|| sp
->root_count
);
1612 kvm_mmu_free_page(kvm
, sp
);
1613 } while (!list_empty(invalid_list
));
1618 * Changing the number of mmu pages allocated to the vm
1619 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1621 void kvm_mmu_change_mmu_pages(struct kvm
*kvm
, unsigned int kvm_nr_mmu_pages
)
1624 LIST_HEAD(invalid_list
);
1626 used_pages
= kvm
->arch
.n_alloc_mmu_pages
- kvm
->arch
.n_free_mmu_pages
;
1627 used_pages
= max(0, used_pages
);
1630 * If we set the number of mmu pages to be smaller be than the
1631 * number of actived pages , we must to free some mmu pages before we
1635 if (used_pages
> kvm_nr_mmu_pages
) {
1636 while (used_pages
> kvm_nr_mmu_pages
&&
1637 !list_empty(&kvm
->arch
.active_mmu_pages
)) {
1638 struct kvm_mmu_page
*page
;
1640 page
= container_of(kvm
->arch
.active_mmu_pages
.prev
,
1641 struct kvm_mmu_page
, link
);
1642 used_pages
-= kvm_mmu_prepare_zap_page(kvm
, page
,
1645 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1646 kvm_nr_mmu_pages
= used_pages
;
1647 kvm
->arch
.n_free_mmu_pages
= 0;
1650 kvm
->arch
.n_free_mmu_pages
+= kvm_nr_mmu_pages
1651 - kvm
->arch
.n_alloc_mmu_pages
;
1653 kvm
->arch
.n_alloc_mmu_pages
= kvm_nr_mmu_pages
;
1656 static int kvm_mmu_unprotect_page(struct kvm
*kvm
, gfn_t gfn
)
1658 struct kvm_mmu_page
*sp
;
1659 struct hlist_node
*node
;
1660 LIST_HEAD(invalid_list
);
1663 pgprintk("%s: looking for gfn %lx\n", __func__
, gfn
);
1666 for_each_gfn_indirect_valid_sp(kvm
, sp
, gfn
, node
) {
1667 pgprintk("%s: gfn %lx role %x\n", __func__
, gfn
,
1670 kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
);
1672 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1676 static void mmu_unshadow(struct kvm
*kvm
, gfn_t gfn
)
1678 struct kvm_mmu_page
*sp
;
1679 struct hlist_node
*node
;
1680 LIST_HEAD(invalid_list
);
1682 for_each_gfn_indirect_valid_sp(kvm
, sp
, gfn
, node
) {
1683 pgprintk("%s: zap %lx %x\n",
1684 __func__
, gfn
, sp
->role
.word
);
1685 kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
);
1687 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1690 static void page_header_update_slot(struct kvm
*kvm
, void *pte
, gfn_t gfn
)
1692 int slot
= memslot_id(kvm
, gfn
);
1693 struct kvm_mmu_page
*sp
= page_header(__pa(pte
));
1695 __set_bit(slot
, sp
->slot_bitmap
);
1698 static void mmu_convert_notrap(struct kvm_mmu_page
*sp
)
1703 if (shadow_trap_nonpresent_pte
== shadow_notrap_nonpresent_pte
)
1706 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
1707 if (pt
[i
] == shadow_notrap_nonpresent_pte
)
1708 __set_spte(&pt
[i
], shadow_trap_nonpresent_pte
);
1713 * The function is based on mtrr_type_lookup() in
1714 * arch/x86/kernel/cpu/mtrr/generic.c
1716 static int get_mtrr_type(struct mtrr_state_type
*mtrr_state
,
1721 u8 prev_match
, curr_match
;
1722 int num_var_ranges
= KVM_NR_VAR_MTRR
;
1724 if (!mtrr_state
->enabled
)
1727 /* Make end inclusive end, instead of exclusive */
1730 /* Look in fixed ranges. Just return the type as per start */
1731 if (mtrr_state
->have_fixed
&& (start
< 0x100000)) {
1734 if (start
< 0x80000) {
1736 idx
+= (start
>> 16);
1737 return mtrr_state
->fixed_ranges
[idx
];
1738 } else if (start
< 0xC0000) {
1740 idx
+= ((start
- 0x80000) >> 14);
1741 return mtrr_state
->fixed_ranges
[idx
];
1742 } else if (start
< 0x1000000) {
1744 idx
+= ((start
- 0xC0000) >> 12);
1745 return mtrr_state
->fixed_ranges
[idx
];
1750 * Look in variable ranges
1751 * Look of multiple ranges matching this address and pick type
1752 * as per MTRR precedence
1754 if (!(mtrr_state
->enabled
& 2))
1755 return mtrr_state
->def_type
;
1758 for (i
= 0; i
< num_var_ranges
; ++i
) {
1759 unsigned short start_state
, end_state
;
1761 if (!(mtrr_state
->var_ranges
[i
].mask_lo
& (1 << 11)))
1764 base
= (((u64
)mtrr_state
->var_ranges
[i
].base_hi
) << 32) +
1765 (mtrr_state
->var_ranges
[i
].base_lo
& PAGE_MASK
);
1766 mask
= (((u64
)mtrr_state
->var_ranges
[i
].mask_hi
) << 32) +
1767 (mtrr_state
->var_ranges
[i
].mask_lo
& PAGE_MASK
);
1769 start_state
= ((start
& mask
) == (base
& mask
));
1770 end_state
= ((end
& mask
) == (base
& mask
));
1771 if (start_state
!= end_state
)
1774 if ((start
& mask
) != (base
& mask
))
1777 curr_match
= mtrr_state
->var_ranges
[i
].base_lo
& 0xff;
1778 if (prev_match
== 0xFF) {
1779 prev_match
= curr_match
;
1783 if (prev_match
== MTRR_TYPE_UNCACHABLE
||
1784 curr_match
== MTRR_TYPE_UNCACHABLE
)
1785 return MTRR_TYPE_UNCACHABLE
;
1787 if ((prev_match
== MTRR_TYPE_WRBACK
&&
1788 curr_match
== MTRR_TYPE_WRTHROUGH
) ||
1789 (prev_match
== MTRR_TYPE_WRTHROUGH
&&
1790 curr_match
== MTRR_TYPE_WRBACK
)) {
1791 prev_match
= MTRR_TYPE_WRTHROUGH
;
1792 curr_match
= MTRR_TYPE_WRTHROUGH
;
1795 if (prev_match
!= curr_match
)
1796 return MTRR_TYPE_UNCACHABLE
;
1799 if (prev_match
!= 0xFF)
1802 return mtrr_state
->def_type
;
1805 u8
kvm_get_guest_memory_type(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1809 mtrr
= get_mtrr_type(&vcpu
->arch
.mtrr_state
, gfn
<< PAGE_SHIFT
,
1810 (gfn
<< PAGE_SHIFT
) + PAGE_SIZE
);
1811 if (mtrr
== 0xfe || mtrr
== 0xff)
1812 mtrr
= MTRR_TYPE_WRBACK
;
1815 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type
);
1817 static void __kvm_unsync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
1819 trace_kvm_mmu_unsync_page(sp
);
1820 ++vcpu
->kvm
->stat
.mmu_unsync
;
1823 kvm_mmu_mark_parents_unsync(sp
);
1824 mmu_convert_notrap(sp
);
1827 static void kvm_unsync_pages(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1829 struct kvm_mmu_page
*s
;
1830 struct hlist_node
*node
;
1832 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1835 WARN_ON(s
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
1836 __kvm_unsync_page(vcpu
, s
);
1840 static int mmu_need_write_protect(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
1843 struct kvm_mmu_page
*s
;
1844 struct hlist_node
*node
;
1845 bool need_unsync
= false;
1847 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1848 if (s
->role
.level
!= PT_PAGE_TABLE_LEVEL
)
1851 if (!need_unsync
&& !s
->unsync
) {
1852 if (!can_unsync
|| !oos_shadow
)
1858 kvm_unsync_pages(vcpu
, gfn
);
1862 static int set_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
1863 unsigned pte_access
, int user_fault
,
1864 int write_fault
, int dirty
, int level
,
1865 gfn_t gfn
, pfn_t pfn
, bool speculative
,
1866 bool can_unsync
, bool reset_host_protection
)
1872 * We don't set the accessed bit, since we sometimes want to see
1873 * whether the guest actually used the pte (in order to detect
1876 spte
= shadow_base_present_pte
| shadow_dirty_mask
;
1878 spte
|= shadow_accessed_mask
;
1880 pte_access
&= ~ACC_WRITE_MASK
;
1881 if (pte_access
& ACC_EXEC_MASK
)
1882 spte
|= shadow_x_mask
;
1884 spte
|= shadow_nx_mask
;
1885 if (pte_access
& ACC_USER_MASK
)
1886 spte
|= shadow_user_mask
;
1887 if (level
> PT_PAGE_TABLE_LEVEL
)
1888 spte
|= PT_PAGE_SIZE_MASK
;
1890 spte
|= kvm_x86_ops
->get_mt_mask(vcpu
, gfn
,
1891 kvm_is_mmio_pfn(pfn
));
1893 if (reset_host_protection
)
1894 spte
|= SPTE_HOST_WRITEABLE
;
1896 spte
|= (u64
)pfn
<< PAGE_SHIFT
;
1898 if ((pte_access
& ACC_WRITE_MASK
)
1899 || (!tdp_enabled
&& write_fault
&& !is_write_protection(vcpu
)
1902 if (level
> PT_PAGE_TABLE_LEVEL
&&
1903 has_wrprotected_page(vcpu
->kvm
, gfn
, level
)) {
1905 rmap_remove(vcpu
->kvm
, sptep
);
1906 spte
= shadow_trap_nonpresent_pte
;
1910 spte
|= PT_WRITABLE_MASK
;
1912 if (!tdp_enabled
&& !(pte_access
& ACC_WRITE_MASK
))
1913 spte
&= ~PT_USER_MASK
;
1916 * Optimization: for pte sync, if spte was writable the hash
1917 * lookup is unnecessary (and expensive). Write protection
1918 * is responsibility of mmu_get_page / kvm_sync_page.
1919 * Same reasoning can be applied to dirty page accounting.
1921 if (!can_unsync
&& is_writable_pte(*sptep
))
1924 if (mmu_need_write_protect(vcpu
, gfn
, can_unsync
)) {
1925 pgprintk("%s: found shadow page for %lx, marking ro\n",
1928 pte_access
&= ~ACC_WRITE_MASK
;
1929 if (is_writable_pte(spte
))
1930 spte
&= ~PT_WRITABLE_MASK
;
1934 if (pte_access
& ACC_WRITE_MASK
)
1935 mark_page_dirty(vcpu
->kvm
, gfn
);
1938 __set_spte(sptep
, spte
);
1942 static void mmu_set_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
1943 unsigned pt_access
, unsigned pte_access
,
1944 int user_fault
, int write_fault
, int dirty
,
1945 int *ptwrite
, int level
, gfn_t gfn
,
1946 pfn_t pfn
, bool speculative
,
1947 bool reset_host_protection
)
1949 int was_rmapped
= 0;
1950 int was_writable
= is_writable_pte(*sptep
);
1953 pgprintk("%s: spte %llx access %x write_fault %d"
1954 " user_fault %d gfn %lx\n",
1955 __func__
, *sptep
, pt_access
,
1956 write_fault
, user_fault
, gfn
);
1958 if (is_rmap_spte(*sptep
)) {
1960 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1961 * the parent of the now unreachable PTE.
1963 if (level
> PT_PAGE_TABLE_LEVEL
&&
1964 !is_large_pte(*sptep
)) {
1965 struct kvm_mmu_page
*child
;
1968 child
= page_header(pte
& PT64_BASE_ADDR_MASK
);
1969 mmu_page_remove_parent_pte(child
, sptep
);
1970 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
1971 kvm_flush_remote_tlbs(vcpu
->kvm
);
1972 } else if (pfn
!= spte_to_pfn(*sptep
)) {
1973 pgprintk("hfn old %lx new %lx\n",
1974 spte_to_pfn(*sptep
), pfn
);
1975 rmap_remove(vcpu
->kvm
, sptep
);
1976 __set_spte(sptep
, shadow_trap_nonpresent_pte
);
1977 kvm_flush_remote_tlbs(vcpu
->kvm
);
1982 if (set_spte(vcpu
, sptep
, pte_access
, user_fault
, write_fault
,
1983 dirty
, level
, gfn
, pfn
, speculative
, true,
1984 reset_host_protection
)) {
1987 kvm_mmu_flush_tlb(vcpu
);
1990 pgprintk("%s: setting spte %llx\n", __func__
, *sptep
);
1991 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1992 is_large_pte(*sptep
)? "2MB" : "4kB",
1993 *sptep
& PT_PRESENT_MASK
?"RW":"R", gfn
,
1995 if (!was_rmapped
&& is_large_pte(*sptep
))
1996 ++vcpu
->kvm
->stat
.lpages
;
1998 page_header_update_slot(vcpu
->kvm
, sptep
, gfn
);
2000 rmap_count
= rmap_add(vcpu
, sptep
, gfn
);
2001 kvm_release_pfn_clean(pfn
);
2002 if (rmap_count
> RMAP_RECYCLE_THRESHOLD
)
2003 rmap_recycle(vcpu
, sptep
, gfn
);
2006 kvm_release_pfn_dirty(pfn
);
2008 kvm_release_pfn_clean(pfn
);
2011 vcpu
->arch
.last_pte_updated
= sptep
;
2012 vcpu
->arch
.last_pte_gfn
= gfn
;
2016 static void nonpaging_new_cr3(struct kvm_vcpu
*vcpu
)
2020 static int __direct_map(struct kvm_vcpu
*vcpu
, gpa_t v
, int write
,
2021 int level
, gfn_t gfn
, pfn_t pfn
)
2023 struct kvm_shadow_walk_iterator iterator
;
2024 struct kvm_mmu_page
*sp
;
2028 for_each_shadow_entry(vcpu
, (u64
)gfn
<< PAGE_SHIFT
, iterator
) {
2029 if (iterator
.level
== level
) {
2030 mmu_set_spte(vcpu
, iterator
.sptep
, ACC_ALL
, ACC_ALL
,
2031 0, write
, 1, &pt_write
,
2032 level
, gfn
, pfn
, false, true);
2033 ++vcpu
->stat
.pf_fixed
;
2037 if (*iterator
.sptep
== shadow_trap_nonpresent_pte
) {
2038 u64 base_addr
= iterator
.addr
;
2040 base_addr
&= PT64_LVL_ADDR_MASK(iterator
.level
);
2041 pseudo_gfn
= base_addr
>> PAGE_SHIFT
;
2042 sp
= kvm_mmu_get_page(vcpu
, pseudo_gfn
, iterator
.addr
,
2044 1, ACC_ALL
, iterator
.sptep
);
2046 pgprintk("nonpaging_map: ENOMEM\n");
2047 kvm_release_pfn_clean(pfn
);
2051 __set_spte(iterator
.sptep
,
2053 | PT_PRESENT_MASK
| PT_WRITABLE_MASK
2054 | shadow_user_mask
| shadow_x_mask
);
2060 static void kvm_send_hwpoison_signal(struct kvm
*kvm
, gfn_t gfn
)
2066 /* Touch the page, so send SIGBUS */
2067 hva
= (void __user
*)gfn_to_hva(kvm
, gfn
);
2068 r
= copy_from_user(buf
, hva
, 1);
2071 static int kvm_handle_bad_page(struct kvm
*kvm
, gfn_t gfn
, pfn_t pfn
)
2073 kvm_release_pfn_clean(pfn
);
2074 if (is_hwpoison_pfn(pfn
)) {
2075 kvm_send_hwpoison_signal(kvm
, gfn
);
2081 static int nonpaging_map(struct kvm_vcpu
*vcpu
, gva_t v
, int write
, gfn_t gfn
)
2086 unsigned long mmu_seq
;
2088 level
= mapping_level(vcpu
, gfn
);
2091 * This path builds a PAE pagetable - so we can map 2mb pages at
2092 * maximum. Therefore check if the level is larger than that.
2094 if (level
> PT_DIRECTORY_LEVEL
)
2095 level
= PT_DIRECTORY_LEVEL
;
2097 gfn
&= ~(KVM_PAGES_PER_HPAGE(level
) - 1);
2099 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
2101 pfn
= gfn_to_pfn(vcpu
->kvm
, gfn
);
2104 if (is_error_pfn(pfn
))
2105 return kvm_handle_bad_page(vcpu
->kvm
, gfn
, pfn
);
2107 spin_lock(&vcpu
->kvm
->mmu_lock
);
2108 if (mmu_notifier_retry(vcpu
, mmu_seq
))
2110 kvm_mmu_free_some_pages(vcpu
);
2111 r
= __direct_map(vcpu
, v
, write
, level
, gfn
, pfn
);
2112 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2118 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2119 kvm_release_pfn_clean(pfn
);
2124 static void mmu_free_roots(struct kvm_vcpu
*vcpu
)
2127 struct kvm_mmu_page
*sp
;
2128 LIST_HEAD(invalid_list
);
2130 if (!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
2132 spin_lock(&vcpu
->kvm
->mmu_lock
);
2133 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
2134 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2136 sp
= page_header(root
);
2138 if (!sp
->root_count
&& sp
->role
.invalid
) {
2139 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, &invalid_list
);
2140 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2142 vcpu
->arch
.mmu
.root_hpa
= INVALID_PAGE
;
2143 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2146 for (i
= 0; i
< 4; ++i
) {
2147 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2150 root
&= PT64_BASE_ADDR_MASK
;
2151 sp
= page_header(root
);
2153 if (!sp
->root_count
&& sp
->role
.invalid
)
2154 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
,
2157 vcpu
->arch
.mmu
.pae_root
[i
] = INVALID_PAGE
;
2159 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2160 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2161 vcpu
->arch
.mmu
.root_hpa
= INVALID_PAGE
;
2164 static int mmu_check_root(struct kvm_vcpu
*vcpu
, gfn_t root_gfn
)
2168 if (!kvm_is_visible_gfn(vcpu
->kvm
, root_gfn
)) {
2169 set_bit(KVM_REQ_TRIPLE_FAULT
, &vcpu
->requests
);
2176 static int mmu_alloc_roots(struct kvm_vcpu
*vcpu
)
2180 struct kvm_mmu_page
*sp
;
2184 root_gfn
= vcpu
->arch
.cr3
>> PAGE_SHIFT
;
2186 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
2187 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2189 ASSERT(!VALID_PAGE(root
));
2190 if (mmu_check_root(vcpu
, root_gfn
))
2196 spin_lock(&vcpu
->kvm
->mmu_lock
);
2197 kvm_mmu_free_some_pages(vcpu
);
2198 sp
= kvm_mmu_get_page(vcpu
, root_gfn
, 0,
2199 PT64_ROOT_LEVEL
, direct
,
2201 root
= __pa(sp
->spt
);
2203 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2204 vcpu
->arch
.mmu
.root_hpa
= root
;
2207 direct
= !is_paging(vcpu
);
2208 for (i
= 0; i
< 4; ++i
) {
2209 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2211 ASSERT(!VALID_PAGE(root
));
2212 if (vcpu
->arch
.mmu
.root_level
== PT32E_ROOT_LEVEL
) {
2213 pdptr
= kvm_pdptr_read(vcpu
, i
);
2214 if (!is_present_gpte(pdptr
)) {
2215 vcpu
->arch
.mmu
.pae_root
[i
] = 0;
2218 root_gfn
= pdptr
>> PAGE_SHIFT
;
2219 } else if (vcpu
->arch
.mmu
.root_level
== 0)
2221 if (mmu_check_root(vcpu
, root_gfn
))
2227 spin_lock(&vcpu
->kvm
->mmu_lock
);
2228 kvm_mmu_free_some_pages(vcpu
);
2229 sp
= kvm_mmu_get_page(vcpu
, root_gfn
, i
<< 30,
2230 PT32_ROOT_LEVEL
, direct
,
2232 root
= __pa(sp
->spt
);
2234 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2236 vcpu
->arch
.mmu
.pae_root
[i
] = root
| PT_PRESENT_MASK
;
2238 vcpu
->arch
.mmu
.root_hpa
= __pa(vcpu
->arch
.mmu
.pae_root
);
2242 static void mmu_sync_roots(struct kvm_vcpu
*vcpu
)
2245 struct kvm_mmu_page
*sp
;
2247 if (!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
2249 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
2250 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2251 sp
= page_header(root
);
2252 mmu_sync_children(vcpu
, sp
);
2255 for (i
= 0; i
< 4; ++i
) {
2256 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2258 if (root
&& VALID_PAGE(root
)) {
2259 root
&= PT64_BASE_ADDR_MASK
;
2260 sp
= page_header(root
);
2261 mmu_sync_children(vcpu
, sp
);
2266 void kvm_mmu_sync_roots(struct kvm_vcpu
*vcpu
)
2268 spin_lock(&vcpu
->kvm
->mmu_lock
);
2269 mmu_sync_roots(vcpu
);
2270 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2273 static gpa_t
nonpaging_gva_to_gpa(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
2274 u32 access
, u32
*error
)
2281 static int nonpaging_page_fault(struct kvm_vcpu
*vcpu
, gva_t gva
,
2287 pgprintk("%s: gva %lx error %x\n", __func__
, gva
, error_code
);
2288 r
= mmu_topup_memory_caches(vcpu
);
2293 ASSERT(VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2295 gfn
= gva
>> PAGE_SHIFT
;
2297 return nonpaging_map(vcpu
, gva
& PAGE_MASK
,
2298 error_code
& PFERR_WRITE_MASK
, gfn
);
2301 static int tdp_page_fault(struct kvm_vcpu
*vcpu
, gva_t gpa
,
2307 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2308 unsigned long mmu_seq
;
2311 ASSERT(VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2313 r
= mmu_topup_memory_caches(vcpu
);
2317 level
= mapping_level(vcpu
, gfn
);
2319 gfn
&= ~(KVM_PAGES_PER_HPAGE(level
) - 1);
2321 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
2323 pfn
= gfn_to_pfn(vcpu
->kvm
, gfn
);
2324 if (is_error_pfn(pfn
))
2325 return kvm_handle_bad_page(vcpu
->kvm
, gfn
, pfn
);
2326 spin_lock(&vcpu
->kvm
->mmu_lock
);
2327 if (mmu_notifier_retry(vcpu
, mmu_seq
))
2329 kvm_mmu_free_some_pages(vcpu
);
2330 r
= __direct_map(vcpu
, gpa
, error_code
& PFERR_WRITE_MASK
,
2332 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2337 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2338 kvm_release_pfn_clean(pfn
);
2342 static void nonpaging_free(struct kvm_vcpu
*vcpu
)
2344 mmu_free_roots(vcpu
);
2347 static int nonpaging_init_context(struct kvm_vcpu
*vcpu
)
2349 struct kvm_mmu
*context
= &vcpu
->arch
.mmu
;
2351 context
->new_cr3
= nonpaging_new_cr3
;
2352 context
->page_fault
= nonpaging_page_fault
;
2353 context
->gva_to_gpa
= nonpaging_gva_to_gpa
;
2354 context
->free
= nonpaging_free
;
2355 context
->prefetch_page
= nonpaging_prefetch_page
;
2356 context
->sync_page
= nonpaging_sync_page
;
2357 context
->invlpg
= nonpaging_invlpg
;
2358 context
->root_level
= 0;
2359 context
->shadow_root_level
= PT32E_ROOT_LEVEL
;
2360 context
->root_hpa
= INVALID_PAGE
;
2364 void kvm_mmu_flush_tlb(struct kvm_vcpu
*vcpu
)
2366 ++vcpu
->stat
.tlb_flush
;
2367 set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
);
2370 static void paging_new_cr3(struct kvm_vcpu
*vcpu
)
2372 pgprintk("%s: cr3 %lx\n", __func__
, vcpu
->arch
.cr3
);
2373 mmu_free_roots(vcpu
);
2376 static void inject_page_fault(struct kvm_vcpu
*vcpu
,
2380 kvm_inject_page_fault(vcpu
, addr
, err_code
);
2383 static void paging_free(struct kvm_vcpu
*vcpu
)
2385 nonpaging_free(vcpu
);
2388 static bool is_rsvd_bits_set(struct kvm_vcpu
*vcpu
, u64 gpte
, int level
)
2392 bit7
= (gpte
>> 7) & 1;
2393 return (gpte
& vcpu
->arch
.mmu
.rsvd_bits_mask
[bit7
][level
-1]) != 0;
2397 #include "paging_tmpl.h"
2401 #include "paging_tmpl.h"
2404 static void reset_rsvds_bits_mask(struct kvm_vcpu
*vcpu
, int level
)
2406 struct kvm_mmu
*context
= &vcpu
->arch
.mmu
;
2407 int maxphyaddr
= cpuid_maxphyaddr(vcpu
);
2408 u64 exb_bit_rsvd
= 0;
2411 exb_bit_rsvd
= rsvd_bits(63, 63);
2413 case PT32_ROOT_LEVEL
:
2414 /* no rsvd bits for 2 level 4K page table entries */
2415 context
->rsvd_bits_mask
[0][1] = 0;
2416 context
->rsvd_bits_mask
[0][0] = 0;
2417 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2419 if (!is_pse(vcpu
)) {
2420 context
->rsvd_bits_mask
[1][1] = 0;
2424 if (is_cpuid_PSE36())
2425 /* 36bits PSE 4MB page */
2426 context
->rsvd_bits_mask
[1][1] = rsvd_bits(17, 21);
2428 /* 32 bits PSE 4MB page */
2429 context
->rsvd_bits_mask
[1][1] = rsvd_bits(13, 21);
2431 case PT32E_ROOT_LEVEL
:
2432 context
->rsvd_bits_mask
[0][2] =
2433 rsvd_bits(maxphyaddr
, 63) |
2434 rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
2435 context
->rsvd_bits_mask
[0][1] = exb_bit_rsvd
|
2436 rsvd_bits(maxphyaddr
, 62); /* PDE */
2437 context
->rsvd_bits_mask
[0][0] = exb_bit_rsvd
|
2438 rsvd_bits(maxphyaddr
, 62); /* PTE */
2439 context
->rsvd_bits_mask
[1][1] = exb_bit_rsvd
|
2440 rsvd_bits(maxphyaddr
, 62) |
2441 rsvd_bits(13, 20); /* large page */
2442 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2444 case PT64_ROOT_LEVEL
:
2445 context
->rsvd_bits_mask
[0][3] = exb_bit_rsvd
|
2446 rsvd_bits(maxphyaddr
, 51) | rsvd_bits(7, 8);
2447 context
->rsvd_bits_mask
[0][2] = exb_bit_rsvd
|
2448 rsvd_bits(maxphyaddr
, 51) | rsvd_bits(7, 8);
2449 context
->rsvd_bits_mask
[0][1] = exb_bit_rsvd
|
2450 rsvd_bits(maxphyaddr
, 51);
2451 context
->rsvd_bits_mask
[0][0] = exb_bit_rsvd
|
2452 rsvd_bits(maxphyaddr
, 51);
2453 context
->rsvd_bits_mask
[1][3] = context
->rsvd_bits_mask
[0][3];
2454 context
->rsvd_bits_mask
[1][2] = exb_bit_rsvd
|
2455 rsvd_bits(maxphyaddr
, 51) |
2457 context
->rsvd_bits_mask
[1][1] = exb_bit_rsvd
|
2458 rsvd_bits(maxphyaddr
, 51) |
2459 rsvd_bits(13, 20); /* large page */
2460 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2465 static int paging64_init_context_common(struct kvm_vcpu
*vcpu
, int level
)
2467 struct kvm_mmu
*context
= &vcpu
->arch
.mmu
;
2469 ASSERT(is_pae(vcpu
));
2470 context
->new_cr3
= paging_new_cr3
;
2471 context
->page_fault
= paging64_page_fault
;
2472 context
->gva_to_gpa
= paging64_gva_to_gpa
;
2473 context
->prefetch_page
= paging64_prefetch_page
;
2474 context
->sync_page
= paging64_sync_page
;
2475 context
->invlpg
= paging64_invlpg
;
2476 context
->free
= paging_free
;
2477 context
->root_level
= level
;
2478 context
->shadow_root_level
= level
;
2479 context
->root_hpa
= INVALID_PAGE
;
2483 static int paging64_init_context(struct kvm_vcpu
*vcpu
)
2485 reset_rsvds_bits_mask(vcpu
, PT64_ROOT_LEVEL
);
2486 return paging64_init_context_common(vcpu
, PT64_ROOT_LEVEL
);
2489 static int paging32_init_context(struct kvm_vcpu
*vcpu
)
2491 struct kvm_mmu
*context
= &vcpu
->arch
.mmu
;
2493 reset_rsvds_bits_mask(vcpu
, PT32_ROOT_LEVEL
);
2494 context
->new_cr3
= paging_new_cr3
;
2495 context
->page_fault
= paging32_page_fault
;
2496 context
->gva_to_gpa
= paging32_gva_to_gpa
;
2497 context
->free
= paging_free
;
2498 context
->prefetch_page
= paging32_prefetch_page
;
2499 context
->sync_page
= paging32_sync_page
;
2500 context
->invlpg
= paging32_invlpg
;
2501 context
->root_level
= PT32_ROOT_LEVEL
;
2502 context
->shadow_root_level
= PT32E_ROOT_LEVEL
;
2503 context
->root_hpa
= INVALID_PAGE
;
2507 static int paging32E_init_context(struct kvm_vcpu
*vcpu
)
2509 reset_rsvds_bits_mask(vcpu
, PT32E_ROOT_LEVEL
);
2510 return paging64_init_context_common(vcpu
, PT32E_ROOT_LEVEL
);
2513 static int init_kvm_tdp_mmu(struct kvm_vcpu
*vcpu
)
2515 struct kvm_mmu
*context
= &vcpu
->arch
.mmu
;
2517 context
->new_cr3
= nonpaging_new_cr3
;
2518 context
->page_fault
= tdp_page_fault
;
2519 context
->free
= nonpaging_free
;
2520 context
->prefetch_page
= nonpaging_prefetch_page
;
2521 context
->sync_page
= nonpaging_sync_page
;
2522 context
->invlpg
= nonpaging_invlpg
;
2523 context
->shadow_root_level
= kvm_x86_ops
->get_tdp_level();
2524 context
->root_hpa
= INVALID_PAGE
;
2526 if (!is_paging(vcpu
)) {
2527 context
->gva_to_gpa
= nonpaging_gva_to_gpa
;
2528 context
->root_level
= 0;
2529 } else if (is_long_mode(vcpu
)) {
2530 reset_rsvds_bits_mask(vcpu
, PT64_ROOT_LEVEL
);
2531 context
->gva_to_gpa
= paging64_gva_to_gpa
;
2532 context
->root_level
= PT64_ROOT_LEVEL
;
2533 } else if (is_pae(vcpu
)) {
2534 reset_rsvds_bits_mask(vcpu
, PT32E_ROOT_LEVEL
);
2535 context
->gva_to_gpa
= paging64_gva_to_gpa
;
2536 context
->root_level
= PT32E_ROOT_LEVEL
;
2538 reset_rsvds_bits_mask(vcpu
, PT32_ROOT_LEVEL
);
2539 context
->gva_to_gpa
= paging32_gva_to_gpa
;
2540 context
->root_level
= PT32_ROOT_LEVEL
;
2546 static int init_kvm_softmmu(struct kvm_vcpu
*vcpu
)
2551 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2553 if (!is_paging(vcpu
))
2554 r
= nonpaging_init_context(vcpu
);
2555 else if (is_long_mode(vcpu
))
2556 r
= paging64_init_context(vcpu
);
2557 else if (is_pae(vcpu
))
2558 r
= paging32E_init_context(vcpu
);
2560 r
= paging32_init_context(vcpu
);
2562 vcpu
->arch
.mmu
.base_role
.cr4_pae
= !!is_pae(vcpu
);
2563 vcpu
->arch
.mmu
.base_role
.cr0_wp
= is_write_protection(vcpu
);
2568 static int init_kvm_mmu(struct kvm_vcpu
*vcpu
)
2570 vcpu
->arch
.update_pte
.pfn
= bad_pfn
;
2573 return init_kvm_tdp_mmu(vcpu
);
2575 return init_kvm_softmmu(vcpu
);
2578 static void destroy_kvm_mmu(struct kvm_vcpu
*vcpu
)
2581 if (VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
2582 /* mmu.free() should set root_hpa = INVALID_PAGE */
2583 vcpu
->arch
.mmu
.free(vcpu
);
2586 int kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
)
2588 destroy_kvm_mmu(vcpu
);
2589 return init_kvm_mmu(vcpu
);
2591 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context
);
2593 int kvm_mmu_load(struct kvm_vcpu
*vcpu
)
2597 r
= mmu_topup_memory_caches(vcpu
);
2600 r
= mmu_alloc_roots(vcpu
);
2601 spin_lock(&vcpu
->kvm
->mmu_lock
);
2602 mmu_sync_roots(vcpu
);
2603 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2606 /* set_cr3() should ensure TLB has been flushed */
2607 kvm_x86_ops
->set_cr3(vcpu
, vcpu
->arch
.mmu
.root_hpa
);
2611 EXPORT_SYMBOL_GPL(kvm_mmu_load
);
2613 void kvm_mmu_unload(struct kvm_vcpu
*vcpu
)
2615 mmu_free_roots(vcpu
);
2618 static void mmu_pte_write_zap_pte(struct kvm_vcpu
*vcpu
,
2619 struct kvm_mmu_page
*sp
,
2623 struct kvm_mmu_page
*child
;
2626 if (is_shadow_present_pte(pte
)) {
2627 if (is_last_spte(pte
, sp
->role
.level
))
2628 rmap_remove(vcpu
->kvm
, spte
);
2630 child
= page_header(pte
& PT64_BASE_ADDR_MASK
);
2631 mmu_page_remove_parent_pte(child
, spte
);
2634 __set_spte(spte
, shadow_trap_nonpresent_pte
);
2635 if (is_large_pte(pte
))
2636 --vcpu
->kvm
->stat
.lpages
;
2639 static void mmu_pte_write_new_pte(struct kvm_vcpu
*vcpu
,
2640 struct kvm_mmu_page
*sp
,
2644 if (sp
->role
.level
!= PT_PAGE_TABLE_LEVEL
) {
2645 ++vcpu
->kvm
->stat
.mmu_pde_zapped
;
2649 ++vcpu
->kvm
->stat
.mmu_pte_updated
;
2650 if (!sp
->role
.cr4_pae
)
2651 paging32_update_pte(vcpu
, sp
, spte
, new);
2653 paging64_update_pte(vcpu
, sp
, spte
, new);
2656 static bool need_remote_flush(u64 old
, u64
new)
2658 if (!is_shadow_present_pte(old
))
2660 if (!is_shadow_present_pte(new))
2662 if ((old
^ new) & PT64_BASE_ADDR_MASK
)
2664 old
^= PT64_NX_MASK
;
2665 new ^= PT64_NX_MASK
;
2666 return (old
& ~new & PT64_PERM_MASK
) != 0;
2669 static void mmu_pte_write_flush_tlb(struct kvm_vcpu
*vcpu
, bool zap_page
,
2670 bool remote_flush
, bool local_flush
)
2676 kvm_flush_remote_tlbs(vcpu
->kvm
);
2677 else if (local_flush
)
2678 kvm_mmu_flush_tlb(vcpu
);
2681 static bool last_updated_pte_accessed(struct kvm_vcpu
*vcpu
)
2683 u64
*spte
= vcpu
->arch
.last_pte_updated
;
2685 return !!(spte
&& (*spte
& shadow_accessed_mask
));
2688 static void mmu_guess_page_from_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
2694 if (!is_present_gpte(gpte
))
2696 gfn
= (gpte
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
2698 vcpu
->arch
.update_pte
.mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
2700 pfn
= gfn_to_pfn(vcpu
->kvm
, gfn
);
2702 if (is_error_pfn(pfn
)) {
2703 kvm_release_pfn_clean(pfn
);
2706 vcpu
->arch
.update_pte
.gfn
= gfn
;
2707 vcpu
->arch
.update_pte
.pfn
= pfn
;
2710 static void kvm_mmu_access_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
2712 u64
*spte
= vcpu
->arch
.last_pte_updated
;
2715 && vcpu
->arch
.last_pte_gfn
== gfn
2716 && shadow_accessed_mask
2717 && !(*spte
& shadow_accessed_mask
)
2718 && is_shadow_present_pte(*spte
))
2719 set_bit(PT_ACCESSED_SHIFT
, (unsigned long *)spte
);
2722 void kvm_mmu_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
2723 const u8
*new, int bytes
,
2724 bool guest_initiated
)
2726 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2727 struct kvm_mmu_page
*sp
;
2728 struct hlist_node
*node
;
2729 LIST_HEAD(invalid_list
);
2732 unsigned offset
= offset_in_page(gpa
);
2734 unsigned page_offset
;
2735 unsigned misaligned
;
2742 bool remote_flush
, local_flush
, zap_page
;
2744 zap_page
= remote_flush
= local_flush
= false;
2746 pgprintk("%s: gpa %llx bytes %d\n", __func__
, gpa
, bytes
);
2748 invlpg_counter
= atomic_read(&vcpu
->kvm
->arch
.invlpg_counter
);
2751 * Assume that the pte write on a page table of the same type
2752 * as the current vcpu paging mode. This is nearly always true
2753 * (might be false while changing modes). Note it is verified later
2756 if ((is_pae(vcpu
) && bytes
== 4) || !new) {
2757 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2762 r
= kvm_read_guest(vcpu
->kvm
, gpa
, &gentry
, min(bytes
, 8));
2765 new = (const u8
*)&gentry
;
2770 gentry
= *(const u32
*)new;
2773 gentry
= *(const u64
*)new;
2780 mmu_guess_page_from_pte_write(vcpu
, gpa
, gentry
);
2781 spin_lock(&vcpu
->kvm
->mmu_lock
);
2782 if (atomic_read(&vcpu
->kvm
->arch
.invlpg_counter
) != invlpg_counter
)
2784 kvm_mmu_access_page(vcpu
, gfn
);
2785 kvm_mmu_free_some_pages(vcpu
);
2786 ++vcpu
->kvm
->stat
.mmu_pte_write
;
2787 kvm_mmu_audit(vcpu
, "pre pte write");
2788 if (guest_initiated
) {
2789 if (gfn
== vcpu
->arch
.last_pt_write_gfn
2790 && !last_updated_pte_accessed(vcpu
)) {
2791 ++vcpu
->arch
.last_pt_write_count
;
2792 if (vcpu
->arch
.last_pt_write_count
>= 3)
2795 vcpu
->arch
.last_pt_write_gfn
= gfn
;
2796 vcpu
->arch
.last_pt_write_count
= 1;
2797 vcpu
->arch
.last_pte_updated
= NULL
;
2801 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, sp
, gfn
, node
) {
2802 pte_size
= sp
->role
.cr4_pae
? 8 : 4;
2803 misaligned
= (offset
^ (offset
+ bytes
- 1)) & ~(pte_size
- 1);
2804 misaligned
|= bytes
< 4;
2805 if (misaligned
|| flooded
) {
2807 * Misaligned accesses are too much trouble to fix
2808 * up; also, they usually indicate a page is not used
2811 * If we're seeing too many writes to a page,
2812 * it may no longer be a page table, or we may be
2813 * forking, in which case it is better to unmap the
2816 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2817 gpa
, bytes
, sp
->role
.word
);
2818 zap_page
|= !!kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
,
2820 ++vcpu
->kvm
->stat
.mmu_flooded
;
2823 page_offset
= offset
;
2824 level
= sp
->role
.level
;
2826 if (!sp
->role
.cr4_pae
) {
2827 page_offset
<<= 1; /* 32->64 */
2829 * A 32-bit pde maps 4MB while the shadow pdes map
2830 * only 2MB. So we need to double the offset again
2831 * and zap two pdes instead of one.
2833 if (level
== PT32_ROOT_LEVEL
) {
2834 page_offset
&= ~7; /* kill rounding error */
2838 quadrant
= page_offset
>> PAGE_SHIFT
;
2839 page_offset
&= ~PAGE_MASK
;
2840 if (quadrant
!= sp
->role
.quadrant
)
2844 spte
= &sp
->spt
[page_offset
/ sizeof(*spte
)];
2847 mmu_pte_write_zap_pte(vcpu
, sp
, spte
);
2849 mmu_pte_write_new_pte(vcpu
, sp
, spte
, &gentry
);
2850 if (!remote_flush
&& need_remote_flush(entry
, *spte
))
2851 remote_flush
= true;
2855 mmu_pte_write_flush_tlb(vcpu
, zap_page
, remote_flush
, local_flush
);
2856 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2857 kvm_mmu_audit(vcpu
, "post pte write");
2858 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2859 if (!is_error_pfn(vcpu
->arch
.update_pte
.pfn
)) {
2860 kvm_release_pfn_clean(vcpu
->arch
.update_pte
.pfn
);
2861 vcpu
->arch
.update_pte
.pfn
= bad_pfn
;
2865 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu
, gva_t gva
)
2873 gpa
= kvm_mmu_gva_to_gpa_read(vcpu
, gva
, NULL
);
2875 spin_lock(&vcpu
->kvm
->mmu_lock
);
2876 r
= kvm_mmu_unprotect_page(vcpu
->kvm
, gpa
>> PAGE_SHIFT
);
2877 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2880 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt
);
2882 void __kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
)
2885 LIST_HEAD(invalid_list
);
2887 free_pages
= vcpu
->kvm
->arch
.n_free_mmu_pages
;
2888 while (free_pages
< KVM_REFILL_PAGES
&&
2889 !list_empty(&vcpu
->kvm
->arch
.active_mmu_pages
)) {
2890 struct kvm_mmu_page
*sp
;
2892 sp
= container_of(vcpu
->kvm
->arch
.active_mmu_pages
.prev
,
2893 struct kvm_mmu_page
, link
);
2894 free_pages
+= kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
,
2896 ++vcpu
->kvm
->stat
.mmu_recycled
;
2898 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2901 int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gva_t cr2
, u32 error_code
)
2904 enum emulation_result er
;
2906 r
= vcpu
->arch
.mmu
.page_fault(vcpu
, cr2
, error_code
);
2915 r
= mmu_topup_memory_caches(vcpu
);
2919 er
= emulate_instruction(vcpu
, cr2
, error_code
, 0);
2924 case EMULATE_DO_MMIO
:
2925 ++vcpu
->stat
.mmio_exits
;
2935 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault
);
2937 void kvm_mmu_invlpg(struct kvm_vcpu
*vcpu
, gva_t gva
)
2939 vcpu
->arch
.mmu
.invlpg(vcpu
, gva
);
2940 kvm_mmu_flush_tlb(vcpu
);
2941 ++vcpu
->stat
.invlpg
;
2943 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg
);
2945 void kvm_enable_tdp(void)
2949 EXPORT_SYMBOL_GPL(kvm_enable_tdp
);
2951 void kvm_disable_tdp(void)
2953 tdp_enabled
= false;
2955 EXPORT_SYMBOL_GPL(kvm_disable_tdp
);
2957 static void free_mmu_pages(struct kvm_vcpu
*vcpu
)
2959 free_page((unsigned long)vcpu
->arch
.mmu
.pae_root
);
2962 static int alloc_mmu_pages(struct kvm_vcpu
*vcpu
)
2970 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2971 * Therefore we need to allocate shadow page tables in the first
2972 * 4GB of memory, which happens to fit the DMA32 zone.
2974 page
= alloc_page(GFP_KERNEL
| __GFP_DMA32
);
2978 vcpu
->arch
.mmu
.pae_root
= page_address(page
);
2979 for (i
= 0; i
< 4; ++i
)
2980 vcpu
->arch
.mmu
.pae_root
[i
] = INVALID_PAGE
;
2985 int kvm_mmu_create(struct kvm_vcpu
*vcpu
)
2988 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2990 return alloc_mmu_pages(vcpu
);
2993 int kvm_mmu_setup(struct kvm_vcpu
*vcpu
)
2996 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2998 return init_kvm_mmu(vcpu
);
3001 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
)
3005 destroy_kvm_mmu(vcpu
);
3006 free_mmu_pages(vcpu
);
3007 mmu_free_memory_caches(vcpu
);
3010 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
, int slot
)
3012 struct kvm_mmu_page
*sp
;
3014 list_for_each_entry(sp
, &kvm
->arch
.active_mmu_pages
, link
) {
3018 if (!test_bit(slot
, sp
->slot_bitmap
))
3022 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
3024 if (is_writable_pte(pt
[i
]))
3025 pt
[i
] &= ~PT_WRITABLE_MASK
;
3027 kvm_flush_remote_tlbs(kvm
);
3030 void kvm_mmu_zap_all(struct kvm
*kvm
)
3032 struct kvm_mmu_page
*sp
, *node
;
3033 LIST_HEAD(invalid_list
);
3035 spin_lock(&kvm
->mmu_lock
);
3037 list_for_each_entry_safe(sp
, node
, &kvm
->arch
.active_mmu_pages
, link
)
3038 if (kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
))
3041 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
3042 spin_unlock(&kvm
->mmu_lock
);
3045 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm
*kvm
,
3046 struct list_head
*invalid_list
)
3048 struct kvm_mmu_page
*page
;
3050 page
= container_of(kvm
->arch
.active_mmu_pages
.prev
,
3051 struct kvm_mmu_page
, link
);
3052 return kvm_mmu_prepare_zap_page(kvm
, page
, invalid_list
);
3055 static int mmu_shrink(struct shrinker
*shrink
, int nr_to_scan
, gfp_t gfp_mask
)
3058 struct kvm
*kvm_freed
= NULL
;
3059 int cache_count
= 0;
3061 spin_lock(&kvm_lock
);
3063 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
3064 int npages
, idx
, freed_pages
;
3065 LIST_HEAD(invalid_list
);
3067 idx
= srcu_read_lock(&kvm
->srcu
);
3068 spin_lock(&kvm
->mmu_lock
);
3069 npages
= kvm
->arch
.n_alloc_mmu_pages
-
3070 kvm
->arch
.n_free_mmu_pages
;
3071 cache_count
+= npages
;
3072 if (!kvm_freed
&& nr_to_scan
> 0 && npages
> 0) {
3073 freed_pages
= kvm_mmu_remove_some_alloc_mmu_pages(kvm
,
3075 cache_count
-= freed_pages
;
3080 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
3081 spin_unlock(&kvm
->mmu_lock
);
3082 srcu_read_unlock(&kvm
->srcu
, idx
);
3085 list_move_tail(&kvm_freed
->vm_list
, &vm_list
);
3087 spin_unlock(&kvm_lock
);
3092 static struct shrinker mmu_shrinker
= {
3093 .shrink
= mmu_shrink
,
3094 .seeks
= DEFAULT_SEEKS
* 10,
3097 static void mmu_destroy_caches(void)
3099 if (pte_chain_cache
)
3100 kmem_cache_destroy(pte_chain_cache
);
3101 if (rmap_desc_cache
)
3102 kmem_cache_destroy(rmap_desc_cache
);
3103 if (mmu_page_header_cache
)
3104 kmem_cache_destroy(mmu_page_header_cache
);
3107 void kvm_mmu_module_exit(void)
3109 mmu_destroy_caches();
3110 unregister_shrinker(&mmu_shrinker
);
3113 int kvm_mmu_module_init(void)
3115 pte_chain_cache
= kmem_cache_create("kvm_pte_chain",
3116 sizeof(struct kvm_pte_chain
),
3118 if (!pte_chain_cache
)
3120 rmap_desc_cache
= kmem_cache_create("kvm_rmap_desc",
3121 sizeof(struct kvm_rmap_desc
),
3123 if (!rmap_desc_cache
)
3126 mmu_page_header_cache
= kmem_cache_create("kvm_mmu_page_header",
3127 sizeof(struct kvm_mmu_page
),
3129 if (!mmu_page_header_cache
)
3132 register_shrinker(&mmu_shrinker
);
3137 mmu_destroy_caches();
3142 * Caculate mmu pages needed for kvm.
3144 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm
*kvm
)
3147 unsigned int nr_mmu_pages
;
3148 unsigned int nr_pages
= 0;
3149 struct kvm_memslots
*slots
;
3151 slots
= kvm_memslots(kvm
);
3153 for (i
= 0; i
< slots
->nmemslots
; i
++)
3154 nr_pages
+= slots
->memslots
[i
].npages
;
3156 nr_mmu_pages
= nr_pages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
3157 nr_mmu_pages
= max(nr_mmu_pages
,
3158 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES
);
3160 return nr_mmu_pages
;
3163 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer
*buffer
,
3166 if (len
> buffer
->len
)
3171 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer
*buffer
,
3176 ret
= pv_mmu_peek_buffer(buffer
, len
);
3181 buffer
->processed
+= len
;
3185 static int kvm_pv_mmu_write(struct kvm_vcpu
*vcpu
,
3186 gpa_t addr
, gpa_t value
)
3191 if (!is_long_mode(vcpu
) && !is_pae(vcpu
))
3194 r
= mmu_topup_memory_caches(vcpu
);
3198 if (!emulator_write_phys(vcpu
, addr
, &value
, bytes
))
3204 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu
*vcpu
)
3206 (void)kvm_set_cr3(vcpu
, vcpu
->arch
.cr3
);
3210 static int kvm_pv_mmu_release_pt(struct kvm_vcpu
*vcpu
, gpa_t addr
)
3212 spin_lock(&vcpu
->kvm
->mmu_lock
);
3213 mmu_unshadow(vcpu
->kvm
, addr
>> PAGE_SHIFT
);
3214 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3218 static int kvm_pv_mmu_op_one(struct kvm_vcpu
*vcpu
,
3219 struct kvm_pv_mmu_op_buffer
*buffer
)
3221 struct kvm_mmu_op_header
*header
;
3223 header
= pv_mmu_peek_buffer(buffer
, sizeof *header
);
3226 switch (header
->op
) {
3227 case KVM_MMU_OP_WRITE_PTE
: {
3228 struct kvm_mmu_op_write_pte
*wpte
;
3230 wpte
= pv_mmu_read_buffer(buffer
, sizeof *wpte
);
3233 return kvm_pv_mmu_write(vcpu
, wpte
->pte_phys
,
3236 case KVM_MMU_OP_FLUSH_TLB
: {
3237 struct kvm_mmu_op_flush_tlb
*ftlb
;
3239 ftlb
= pv_mmu_read_buffer(buffer
, sizeof *ftlb
);
3242 return kvm_pv_mmu_flush_tlb(vcpu
);
3244 case KVM_MMU_OP_RELEASE_PT
: {
3245 struct kvm_mmu_op_release_pt
*rpt
;
3247 rpt
= pv_mmu_read_buffer(buffer
, sizeof *rpt
);
3250 return kvm_pv_mmu_release_pt(vcpu
, rpt
->pt_phys
);
3256 int kvm_pv_mmu_op(struct kvm_vcpu
*vcpu
, unsigned long bytes
,
3257 gpa_t addr
, unsigned long *ret
)
3260 struct kvm_pv_mmu_op_buffer
*buffer
= &vcpu
->arch
.mmu_op_buffer
;
3262 buffer
->ptr
= buffer
->buf
;
3263 buffer
->len
= min_t(unsigned long, bytes
, sizeof buffer
->buf
);
3264 buffer
->processed
= 0;
3266 r
= kvm_read_guest(vcpu
->kvm
, addr
, buffer
->buf
, buffer
->len
);
3270 while (buffer
->len
) {
3271 r
= kvm_pv_mmu_op_one(vcpu
, buffer
);
3280 *ret
= buffer
->processed
;
3284 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu
*vcpu
, u64 addr
, u64 sptes
[4])
3286 struct kvm_shadow_walk_iterator iterator
;
3289 spin_lock(&vcpu
->kvm
->mmu_lock
);
3290 for_each_shadow_entry(vcpu
, addr
, iterator
) {
3291 sptes
[iterator
.level
-1] = *iterator
.sptep
;
3293 if (!is_shadow_present_pte(*iterator
.sptep
))
3296 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3300 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy
);
3304 static const char *audit_msg
;
3306 static gva_t
canonicalize(gva_t gva
)
3308 #ifdef CONFIG_X86_64
3309 gva
= (long long)(gva
<< 16) >> 16;
3315 typedef void (*inspect_spte_fn
) (struct kvm
*kvm
, u64
*sptep
);
3317 static void __mmu_spte_walk(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
3322 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
3323 u64 ent
= sp
->spt
[i
];
3325 if (is_shadow_present_pte(ent
)) {
3326 if (!is_last_spte(ent
, sp
->role
.level
)) {
3327 struct kvm_mmu_page
*child
;
3328 child
= page_header(ent
& PT64_BASE_ADDR_MASK
);
3329 __mmu_spte_walk(kvm
, child
, fn
);
3331 fn(kvm
, &sp
->spt
[i
]);
3336 static void mmu_spte_walk(struct kvm_vcpu
*vcpu
, inspect_spte_fn fn
)
3339 struct kvm_mmu_page
*sp
;
3341 if (!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
3343 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
3344 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
3345 sp
= page_header(root
);
3346 __mmu_spte_walk(vcpu
->kvm
, sp
, fn
);
3349 for (i
= 0; i
< 4; ++i
) {
3350 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
3352 if (root
&& VALID_PAGE(root
)) {
3353 root
&= PT64_BASE_ADDR_MASK
;
3354 sp
= page_header(root
);
3355 __mmu_spte_walk(vcpu
->kvm
, sp
, fn
);
3361 static void audit_mappings_page(struct kvm_vcpu
*vcpu
, u64 page_pte
,
3362 gva_t va
, int level
)
3364 u64
*pt
= __va(page_pte
& PT64_BASE_ADDR_MASK
);
3366 gva_t va_delta
= 1ul << (PAGE_SHIFT
+ 9 * (level
- 1));
3368 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
, va
+= va_delta
) {
3371 if (ent
== shadow_trap_nonpresent_pte
)
3374 va
= canonicalize(va
);
3375 if (is_shadow_present_pte(ent
) && !is_last_spte(ent
, level
))
3376 audit_mappings_page(vcpu
, ent
, va
, level
- 1);
3378 gpa_t gpa
= kvm_mmu_gva_to_gpa_read(vcpu
, va
, NULL
);
3379 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
3380 pfn_t pfn
= gfn_to_pfn(vcpu
->kvm
, gfn
);
3381 hpa_t hpa
= (hpa_t
)pfn
<< PAGE_SHIFT
;
3383 if (is_error_pfn(pfn
)) {
3384 kvm_release_pfn_clean(pfn
);
3388 if (is_shadow_present_pte(ent
)
3389 && (ent
& PT64_BASE_ADDR_MASK
) != hpa
)
3390 printk(KERN_ERR
"xx audit error: (%s) levels %d"
3391 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3392 audit_msg
, vcpu
->arch
.mmu
.root_level
,
3394 is_shadow_present_pte(ent
));
3395 else if (ent
== shadow_notrap_nonpresent_pte
3396 && !is_error_hpa(hpa
))
3397 printk(KERN_ERR
"audit: (%s) notrap shadow,"
3398 " valid guest gva %lx\n", audit_msg
, va
);
3399 kvm_release_pfn_clean(pfn
);
3405 static void audit_mappings(struct kvm_vcpu
*vcpu
)
3409 if (vcpu
->arch
.mmu
.root_level
== 4)
3410 audit_mappings_page(vcpu
, vcpu
->arch
.mmu
.root_hpa
, 0, 4);
3412 for (i
= 0; i
< 4; ++i
)
3413 if (vcpu
->arch
.mmu
.pae_root
[i
] & PT_PRESENT_MASK
)
3414 audit_mappings_page(vcpu
,
3415 vcpu
->arch
.mmu
.pae_root
[i
],
3420 static int count_rmaps(struct kvm_vcpu
*vcpu
)
3422 struct kvm
*kvm
= vcpu
->kvm
;
3423 struct kvm_memslots
*slots
;
3427 idx
= srcu_read_lock(&kvm
->srcu
);
3428 slots
= kvm_memslots(kvm
);
3429 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
3430 struct kvm_memory_slot
*m
= &slots
->memslots
[i
];
3431 struct kvm_rmap_desc
*d
;
3433 for (j
= 0; j
< m
->npages
; ++j
) {
3434 unsigned long *rmapp
= &m
->rmap
[j
];
3438 if (!(*rmapp
& 1)) {
3442 d
= (struct kvm_rmap_desc
*)(*rmapp
& ~1ul);
3444 for (k
= 0; k
< RMAP_EXT
; ++k
)
3453 srcu_read_unlock(&kvm
->srcu
, idx
);
3457 void inspect_spte_has_rmap(struct kvm
*kvm
, u64
*sptep
)
3459 unsigned long *rmapp
;
3460 struct kvm_mmu_page
*rev_sp
;
3463 if (is_writable_pte(*sptep
)) {
3464 rev_sp
= page_header(__pa(sptep
));
3465 gfn
= kvm_mmu_page_get_gfn(rev_sp
, sptep
- rev_sp
->spt
);
3467 if (!gfn_to_memslot(kvm
, gfn
)) {
3468 if (!printk_ratelimit())
3470 printk(KERN_ERR
"%s: no memslot for gfn %ld\n",
3472 printk(KERN_ERR
"%s: index %ld of sp (gfn=%lx)\n",
3473 audit_msg
, (long int)(sptep
- rev_sp
->spt
),
3479 rmapp
= gfn_to_rmap(kvm
, gfn
, rev_sp
->role
.level
);
3481 if (!printk_ratelimit())
3483 printk(KERN_ERR
"%s: no rmap for writable spte %llx\n",
3491 void audit_writable_sptes_have_rmaps(struct kvm_vcpu
*vcpu
)
3493 mmu_spte_walk(vcpu
, inspect_spte_has_rmap
);
3496 static void check_writable_mappings_rmap(struct kvm_vcpu
*vcpu
)
3498 struct kvm_mmu_page
*sp
;
3501 list_for_each_entry(sp
, &vcpu
->kvm
->arch
.active_mmu_pages
, link
) {
3504 if (sp
->role
.level
!= PT_PAGE_TABLE_LEVEL
)
3507 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
3510 if (!(ent
& PT_PRESENT_MASK
))
3512 if (!is_writable_pte(ent
))
3514 inspect_spte_has_rmap(vcpu
->kvm
, &pt
[i
]);
3520 static void audit_rmap(struct kvm_vcpu
*vcpu
)
3522 check_writable_mappings_rmap(vcpu
);
3526 static void audit_write_protection(struct kvm_vcpu
*vcpu
)
3528 struct kvm_mmu_page
*sp
;
3529 struct kvm_memory_slot
*slot
;
3530 unsigned long *rmapp
;
3534 list_for_each_entry(sp
, &vcpu
->kvm
->arch
.active_mmu_pages
, link
) {
3535 if (sp
->role
.direct
)
3540 gfn
= unalias_gfn(vcpu
->kvm
, sp
->gfn
);
3541 slot
= gfn_to_memslot_unaliased(vcpu
->kvm
, sp
->gfn
);
3542 rmapp
= &slot
->rmap
[gfn
- slot
->base_gfn
];
3544 spte
= rmap_next(vcpu
->kvm
, rmapp
, NULL
);
3546 if (is_writable_pte(*spte
))
3547 printk(KERN_ERR
"%s: (%s) shadow page has "
3548 "writable mappings: gfn %lx role %x\n",
3549 __func__
, audit_msg
, sp
->gfn
,
3551 spte
= rmap_next(vcpu
->kvm
, rmapp
, spte
);
3556 static void kvm_mmu_audit(struct kvm_vcpu
*vcpu
, const char *msg
)
3563 audit_write_protection(vcpu
);
3564 if (strcmp("pre pte write", audit_msg
) != 0)
3565 audit_mappings(vcpu
);
3566 audit_writable_sptes_have_rmaps(vcpu
);