2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
24 #include "kvm_cache_regs.h"
27 #include <linux/kvm_host.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/swap.h>
34 #include <linux/hugetlb.h>
35 #include <linux/compiler.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
41 #include <asm/cmpxchg.h>
46 * When setting this variable to true it enables Two-Dimensional-Paging
47 * where the hardware walks 2 page tables:
48 * 1. the guest-virtual to guest-physical
49 * 2. while doing 1. it walks guest-physical to host-physical
50 * If the hardware supports that we don't need to do shadow paging.
52 bool tdp_enabled
= false;
56 AUDIT_POST_PAGE_FAULT
,
63 char *audit_point_name
[] = {
76 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
77 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
81 #define pgprintk(x...) do { } while (0)
82 #define rmap_printk(x...) do { } while (0)
88 module_param(dbg
, bool, 0644);
91 static int oos_shadow
= 1;
92 module_param(oos_shadow
, bool, 0644);
95 #define ASSERT(x) do { } while (0)
99 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
100 __FILE__, __LINE__, #x); \
104 #define PTE_PREFETCH_NUM 8
106 #define PT_FIRST_AVAIL_BITS_SHIFT 9
107 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
109 #define PT64_LEVEL_BITS 9
111 #define PT64_LEVEL_SHIFT(level) \
112 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
114 #define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
118 #define PT32_LEVEL_BITS 10
120 #define PT32_LEVEL_SHIFT(level) \
121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123 #define PT32_LVL_OFFSET_MASK(level) \
124 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
125 * PT32_LEVEL_BITS))) - 1))
127 #define PT32_INDEX(address, level)\
128 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
131 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
132 #define PT64_DIR_BASE_ADDR_MASK \
133 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
134 #define PT64_LVL_ADDR_MASK(level) \
135 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
136 * PT64_LEVEL_BITS))) - 1))
137 #define PT64_LVL_OFFSET_MASK(level) \
138 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
139 * PT64_LEVEL_BITS))) - 1))
141 #define PT32_BASE_ADDR_MASK PAGE_MASK
142 #define PT32_DIR_BASE_ADDR_MASK \
143 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
144 #define PT32_LVL_ADDR_MASK(level) \
145 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
146 * PT32_LEVEL_BITS))) - 1))
148 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
151 #define PTE_LIST_EXT 4
153 #define ACC_EXEC_MASK 1
154 #define ACC_WRITE_MASK PT_WRITABLE_MASK
155 #define ACC_USER_MASK PT_USER_MASK
156 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
158 #include <trace/events/kvm.h>
160 #define CREATE_TRACE_POINTS
161 #include "mmutrace.h"
163 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
165 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
167 struct pte_list_desc
{
168 u64
*sptes
[PTE_LIST_EXT
];
169 struct pte_list_desc
*more
;
172 struct kvm_shadow_walk_iterator
{
180 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
181 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
182 shadow_walk_okay(&(_walker)); \
183 shadow_walk_next(&(_walker)))
185 static struct kmem_cache
*pte_list_desc_cache
;
186 static struct kmem_cache
*mmu_page_header_cache
;
187 static struct percpu_counter kvm_total_used_mmu_pages
;
189 static u64 __read_mostly shadow_nx_mask
;
190 static u64 __read_mostly shadow_x_mask
; /* mutual exclusive with nx_mask */
191 static u64 __read_mostly shadow_user_mask
;
192 static u64 __read_mostly shadow_accessed_mask
;
193 static u64 __read_mostly shadow_dirty_mask
;
195 static inline u64
rsvd_bits(int s
, int e
)
197 return ((1ULL << (e
- s
+ 1)) - 1) << s
;
200 void kvm_mmu_set_mask_ptes(u64 user_mask
, u64 accessed_mask
,
201 u64 dirty_mask
, u64 nx_mask
, u64 x_mask
)
203 shadow_user_mask
= user_mask
;
204 shadow_accessed_mask
= accessed_mask
;
205 shadow_dirty_mask
= dirty_mask
;
206 shadow_nx_mask
= nx_mask
;
207 shadow_x_mask
= x_mask
;
209 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes
);
211 static int is_cpuid_PSE36(void)
216 static int is_nx(struct kvm_vcpu
*vcpu
)
218 return vcpu
->arch
.efer
& EFER_NX
;
221 static int is_shadow_present_pte(u64 pte
)
223 return pte
& PT_PRESENT_MASK
;
226 static int is_large_pte(u64 pte
)
228 return pte
& PT_PAGE_SIZE_MASK
;
231 static int is_dirty_gpte(unsigned long pte
)
233 return pte
& PT_DIRTY_MASK
;
236 static int is_rmap_spte(u64 pte
)
238 return is_shadow_present_pte(pte
);
241 static int is_last_spte(u64 pte
, int level
)
243 if (level
== PT_PAGE_TABLE_LEVEL
)
245 if (is_large_pte(pte
))
250 static pfn_t
spte_to_pfn(u64 pte
)
252 return (pte
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
255 static gfn_t
pse36_gfn_delta(u32 gpte
)
257 int shift
= 32 - PT32_DIR_PSE36_SHIFT
- PAGE_SHIFT
;
259 return (gpte
& PT32_DIR_PSE36_MASK
) << shift
;
263 static void __set_spte(u64
*sptep
, u64 spte
)
268 static void __update_clear_spte_fast(u64
*sptep
, u64 spte
)
273 static u64
__update_clear_spte_slow(u64
*sptep
, u64 spte
)
275 return xchg(sptep
, spte
);
286 static void __set_spte(u64
*sptep
, u64 spte
)
288 union split_spte
*ssptep
, sspte
;
290 ssptep
= (union split_spte
*)sptep
;
291 sspte
= (union split_spte
)spte
;
293 ssptep
->spte_high
= sspte
.spte_high
;
296 * If we map the spte from nonpresent to present, We should store
297 * the high bits firstly, then set present bit, so cpu can not
298 * fetch this spte while we are setting the spte.
302 ssptep
->spte_low
= sspte
.spte_low
;
305 static void __update_clear_spte_fast(u64
*sptep
, u64 spte
)
307 union split_spte
*ssptep
, sspte
;
309 ssptep
= (union split_spte
*)sptep
;
310 sspte
= (union split_spte
)spte
;
312 ssptep
->spte_low
= sspte
.spte_low
;
315 * If we map the spte from present to nonpresent, we should clear
316 * present bit firstly to avoid vcpu fetch the old high bits.
320 ssptep
->spte_high
= sspte
.spte_high
;
323 static u64
__update_clear_spte_slow(u64
*sptep
, u64 spte
)
325 union split_spte
*ssptep
, sspte
, orig
;
327 ssptep
= (union split_spte
*)sptep
;
328 sspte
= (union split_spte
)spte
;
330 /* xchg acts as a barrier before the setting of the high bits */
331 orig
.spte_low
= xchg(&ssptep
->spte_low
, sspte
.spte_low
);
332 orig
.spte_high
= ssptep
->spte_high
= sspte
.spte_high
;
338 static bool spte_has_volatile_bits(u64 spte
)
340 if (!shadow_accessed_mask
)
343 if (!is_shadow_present_pte(spte
))
346 if ((spte
& shadow_accessed_mask
) &&
347 (!is_writable_pte(spte
) || (spte
& shadow_dirty_mask
)))
353 static bool spte_is_bit_cleared(u64 old_spte
, u64 new_spte
, u64 bit_mask
)
355 return (old_spte
& bit_mask
) && !(new_spte
& bit_mask
);
358 /* Rules for using mmu_spte_set:
359 * Set the sptep from nonpresent to present.
360 * Note: the sptep being assigned *must* be either not present
361 * or in a state where the hardware will not attempt to update
364 static void mmu_spte_set(u64
*sptep
, u64 new_spte
)
366 WARN_ON(is_shadow_present_pte(*sptep
));
367 __set_spte(sptep
, new_spte
);
370 /* Rules for using mmu_spte_update:
371 * Update the state bits, it means the mapped pfn is not changged.
373 static void mmu_spte_update(u64
*sptep
, u64 new_spte
)
375 u64 mask
, old_spte
= *sptep
;
377 WARN_ON(!is_rmap_spte(new_spte
));
379 if (!is_shadow_present_pte(old_spte
))
380 return mmu_spte_set(sptep
, new_spte
);
382 new_spte
|= old_spte
& shadow_dirty_mask
;
384 mask
= shadow_accessed_mask
;
385 if (is_writable_pte(old_spte
))
386 mask
|= shadow_dirty_mask
;
388 if (!spte_has_volatile_bits(old_spte
) || (new_spte
& mask
) == mask
)
389 __update_clear_spte_fast(sptep
, new_spte
);
391 old_spte
= __update_clear_spte_slow(sptep
, new_spte
);
393 if (!shadow_accessed_mask
)
396 if (spte_is_bit_cleared(old_spte
, new_spte
, shadow_accessed_mask
))
397 kvm_set_pfn_accessed(spte_to_pfn(old_spte
));
398 if (spte_is_bit_cleared(old_spte
, new_spte
, shadow_dirty_mask
))
399 kvm_set_pfn_dirty(spte_to_pfn(old_spte
));
403 * Rules for using mmu_spte_clear_track_bits:
404 * It sets the sptep from present to nonpresent, and track the
405 * state bits, it is used to clear the last level sptep.
407 static int mmu_spte_clear_track_bits(u64
*sptep
)
410 u64 old_spte
= *sptep
;
412 if (!spte_has_volatile_bits(old_spte
))
413 __update_clear_spte_fast(sptep
, 0ull);
415 old_spte
= __update_clear_spte_slow(sptep
, 0ull);
417 if (!is_rmap_spte(old_spte
))
420 pfn
= spte_to_pfn(old_spte
);
421 if (!shadow_accessed_mask
|| old_spte
& shadow_accessed_mask
)
422 kvm_set_pfn_accessed(pfn
);
423 if (!shadow_dirty_mask
|| (old_spte
& shadow_dirty_mask
))
424 kvm_set_pfn_dirty(pfn
);
429 * Rules for using mmu_spte_clear_no_track:
430 * Directly clear spte without caring the state bits of sptep,
431 * it is used to set the upper level spte.
433 static void mmu_spte_clear_no_track(u64
*sptep
)
435 __update_clear_spte_fast(sptep
, 0ull);
438 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
439 struct kmem_cache
*base_cache
, int min
)
443 if (cache
->nobjs
>= min
)
445 while (cache
->nobjs
< ARRAY_SIZE(cache
->objects
)) {
446 obj
= kmem_cache_zalloc(base_cache
, GFP_KERNEL
);
449 cache
->objects
[cache
->nobjs
++] = obj
;
454 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
,
455 struct kmem_cache
*cache
)
458 kmem_cache_free(cache
, mc
->objects
[--mc
->nobjs
]);
461 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache
*cache
,
466 if (cache
->nobjs
>= min
)
468 while (cache
->nobjs
< ARRAY_SIZE(cache
->objects
)) {
469 page
= (void *)__get_free_page(GFP_KERNEL
);
472 cache
->objects
[cache
->nobjs
++] = page
;
477 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache
*mc
)
480 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
483 static int mmu_topup_memory_caches(struct kvm_vcpu
*vcpu
)
487 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_pte_list_desc_cache
,
488 pte_list_desc_cache
, 8 + PTE_PREFETCH_NUM
);
491 r
= mmu_topup_memory_cache_page(&vcpu
->arch
.mmu_page_cache
, 8);
494 r
= mmu_topup_memory_cache(&vcpu
->arch
.mmu_page_header_cache
,
495 mmu_page_header_cache
, 4);
500 static void mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
502 mmu_free_memory_cache(&vcpu
->arch
.mmu_pte_list_desc_cache
,
503 pte_list_desc_cache
);
504 mmu_free_memory_cache_page(&vcpu
->arch
.mmu_page_cache
);
505 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_header_cache
,
506 mmu_page_header_cache
);
509 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
,
515 p
= mc
->objects
[--mc
->nobjs
];
519 static struct pte_list_desc
*mmu_alloc_pte_list_desc(struct kvm_vcpu
*vcpu
)
521 return mmu_memory_cache_alloc(&vcpu
->arch
.mmu_pte_list_desc_cache
,
522 sizeof(struct pte_list_desc
));
525 static void mmu_free_pte_list_desc(struct pte_list_desc
*pte_list_desc
)
527 kmem_cache_free(pte_list_desc_cache
, pte_list_desc
);
530 static gfn_t
kvm_mmu_page_get_gfn(struct kvm_mmu_page
*sp
, int index
)
532 if (!sp
->role
.direct
)
533 return sp
->gfns
[index
];
535 return sp
->gfn
+ (index
<< ((sp
->role
.level
- 1) * PT64_LEVEL_BITS
));
538 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page
*sp
, int index
, gfn_t gfn
)
541 BUG_ON(gfn
!= kvm_mmu_page_get_gfn(sp
, index
));
543 sp
->gfns
[index
] = gfn
;
547 * Return the pointer to the large page information for a given gfn,
548 * handling slots that are not large page aligned.
550 static struct kvm_lpage_info
*lpage_info_slot(gfn_t gfn
,
551 struct kvm_memory_slot
*slot
,
556 idx
= (gfn
>> KVM_HPAGE_GFN_SHIFT(level
)) -
557 (slot
->base_gfn
>> KVM_HPAGE_GFN_SHIFT(level
));
558 return &slot
->lpage_info
[level
- 2][idx
];
561 static void account_shadowed(struct kvm
*kvm
, gfn_t gfn
)
563 struct kvm_memory_slot
*slot
;
564 struct kvm_lpage_info
*linfo
;
567 slot
= gfn_to_memslot(kvm
, gfn
);
568 for (i
= PT_DIRECTORY_LEVEL
;
569 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
570 linfo
= lpage_info_slot(gfn
, slot
, i
);
571 linfo
->write_count
+= 1;
573 kvm
->arch
.indirect_shadow_pages
++;
576 static void unaccount_shadowed(struct kvm
*kvm
, gfn_t gfn
)
578 struct kvm_memory_slot
*slot
;
579 struct kvm_lpage_info
*linfo
;
582 slot
= gfn_to_memslot(kvm
, gfn
);
583 for (i
= PT_DIRECTORY_LEVEL
;
584 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
585 linfo
= lpage_info_slot(gfn
, slot
, i
);
586 linfo
->write_count
-= 1;
587 WARN_ON(linfo
->write_count
< 0);
589 kvm
->arch
.indirect_shadow_pages
--;
592 static int has_wrprotected_page(struct kvm
*kvm
,
596 struct kvm_memory_slot
*slot
;
597 struct kvm_lpage_info
*linfo
;
599 slot
= gfn_to_memslot(kvm
, gfn
);
601 linfo
= lpage_info_slot(gfn
, slot
, level
);
602 return linfo
->write_count
;
608 static int host_mapping_level(struct kvm
*kvm
, gfn_t gfn
)
610 unsigned long page_size
;
613 page_size
= kvm_host_page_size(kvm
, gfn
);
615 for (i
= PT_PAGE_TABLE_LEVEL
;
616 i
< (PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
); ++i
) {
617 if (page_size
>= KVM_HPAGE_SIZE(i
))
626 static struct kvm_memory_slot
*
627 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
630 struct kvm_memory_slot
*slot
;
632 slot
= gfn_to_memslot(vcpu
->kvm
, gfn
);
633 if (!slot
|| slot
->flags
& KVM_MEMSLOT_INVALID
||
634 (no_dirty_log
&& slot
->dirty_bitmap
))
640 static bool mapping_level_dirty_bitmap(struct kvm_vcpu
*vcpu
, gfn_t large_gfn
)
642 return !gfn_to_memslot_dirty_bitmap(vcpu
, large_gfn
, true);
645 static int mapping_level(struct kvm_vcpu
*vcpu
, gfn_t large_gfn
)
647 int host_level
, level
, max_level
;
649 host_level
= host_mapping_level(vcpu
->kvm
, large_gfn
);
651 if (host_level
== PT_PAGE_TABLE_LEVEL
)
654 max_level
= kvm_x86_ops
->get_lpage_level() < host_level
?
655 kvm_x86_ops
->get_lpage_level() : host_level
;
657 for (level
= PT_DIRECTORY_LEVEL
; level
<= max_level
; ++level
)
658 if (has_wrprotected_page(vcpu
->kvm
, large_gfn
, level
))
665 * Pte mapping structures:
667 * If pte_list bit zero is zero, then pte_list point to the spte.
669 * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
670 * pte_list_desc containing more mappings.
672 * Returns the number of pte entries before the spte was added or zero if
673 * the spte was not added.
676 static int pte_list_add(struct kvm_vcpu
*vcpu
, u64
*spte
,
677 unsigned long *pte_list
)
679 struct pte_list_desc
*desc
;
683 rmap_printk("pte_list_add: %p %llx 0->1\n", spte
, *spte
);
684 *pte_list
= (unsigned long)spte
;
685 } else if (!(*pte_list
& 1)) {
686 rmap_printk("pte_list_add: %p %llx 1->many\n", spte
, *spte
);
687 desc
= mmu_alloc_pte_list_desc(vcpu
);
688 desc
->sptes
[0] = (u64
*)*pte_list
;
689 desc
->sptes
[1] = spte
;
690 *pte_list
= (unsigned long)desc
| 1;
693 rmap_printk("pte_list_add: %p %llx many->many\n", spte
, *spte
);
694 desc
= (struct pte_list_desc
*)(*pte_list
& ~1ul);
695 while (desc
->sptes
[PTE_LIST_EXT
-1] && desc
->more
) {
697 count
+= PTE_LIST_EXT
;
699 if (desc
->sptes
[PTE_LIST_EXT
-1]) {
700 desc
->more
= mmu_alloc_pte_list_desc(vcpu
);
703 for (i
= 0; desc
->sptes
[i
]; ++i
)
705 desc
->sptes
[i
] = spte
;
710 static u64
*pte_list_next(unsigned long *pte_list
, u64
*spte
)
712 struct pte_list_desc
*desc
;
718 else if (!(*pte_list
& 1)) {
720 return (u64
*)*pte_list
;
723 desc
= (struct pte_list_desc
*)(*pte_list
& ~1ul);
726 for (i
= 0; i
< PTE_LIST_EXT
&& desc
->sptes
[i
]; ++i
) {
727 if (prev_spte
== spte
)
728 return desc
->sptes
[i
];
729 prev_spte
= desc
->sptes
[i
];
737 pte_list_desc_remove_entry(unsigned long *pte_list
, struct pte_list_desc
*desc
,
738 int i
, struct pte_list_desc
*prev_desc
)
742 for (j
= PTE_LIST_EXT
- 1; !desc
->sptes
[j
] && j
> i
; --j
)
744 desc
->sptes
[i
] = desc
->sptes
[j
];
745 desc
->sptes
[j
] = NULL
;
748 if (!prev_desc
&& !desc
->more
)
749 *pte_list
= (unsigned long)desc
->sptes
[0];
752 prev_desc
->more
= desc
->more
;
754 *pte_list
= (unsigned long)desc
->more
| 1;
755 mmu_free_pte_list_desc(desc
);
758 static void pte_list_remove(u64
*spte
, unsigned long *pte_list
)
760 struct pte_list_desc
*desc
;
761 struct pte_list_desc
*prev_desc
;
765 printk(KERN_ERR
"pte_list_remove: %p 0->BUG\n", spte
);
767 } else if (!(*pte_list
& 1)) {
768 rmap_printk("pte_list_remove: %p 1->0\n", spte
);
769 if ((u64
*)*pte_list
!= spte
) {
770 printk(KERN_ERR
"pte_list_remove: %p 1->BUG\n", spte
);
775 rmap_printk("pte_list_remove: %p many->many\n", spte
);
776 desc
= (struct pte_list_desc
*)(*pte_list
& ~1ul);
779 for (i
= 0; i
< PTE_LIST_EXT
&& desc
->sptes
[i
]; ++i
)
780 if (desc
->sptes
[i
] == spte
) {
781 pte_list_desc_remove_entry(pte_list
,
789 pr_err("pte_list_remove: %p many->many\n", spte
);
794 typedef void (*pte_list_walk_fn
) (u64
*spte
);
795 static void pte_list_walk(unsigned long *pte_list
, pte_list_walk_fn fn
)
797 struct pte_list_desc
*desc
;
803 if (!(*pte_list
& 1))
804 return fn((u64
*)*pte_list
);
806 desc
= (struct pte_list_desc
*)(*pte_list
& ~1ul);
808 for (i
= 0; i
< PTE_LIST_EXT
&& desc
->sptes
[i
]; ++i
)
815 * Take gfn and return the reverse mapping to it.
817 static unsigned long *gfn_to_rmap(struct kvm
*kvm
, gfn_t gfn
, int level
)
819 struct kvm_memory_slot
*slot
;
820 struct kvm_lpage_info
*linfo
;
822 slot
= gfn_to_memslot(kvm
, gfn
);
823 if (likely(level
== PT_PAGE_TABLE_LEVEL
))
824 return &slot
->rmap
[gfn
- slot
->base_gfn
];
826 linfo
= lpage_info_slot(gfn
, slot
, level
);
828 return &linfo
->rmap_pde
;
831 static int rmap_add(struct kvm_vcpu
*vcpu
, u64
*spte
, gfn_t gfn
)
833 struct kvm_mmu_page
*sp
;
834 unsigned long *rmapp
;
836 sp
= page_header(__pa(spte
));
837 kvm_mmu_page_set_gfn(sp
, spte
- sp
->spt
, gfn
);
838 rmapp
= gfn_to_rmap(vcpu
->kvm
, gfn
, sp
->role
.level
);
839 return pte_list_add(vcpu
, spte
, rmapp
);
842 static u64
*rmap_next(struct kvm
*kvm
, unsigned long *rmapp
, u64
*spte
)
844 return pte_list_next(rmapp
, spte
);
847 static void rmap_remove(struct kvm
*kvm
, u64
*spte
)
849 struct kvm_mmu_page
*sp
;
851 unsigned long *rmapp
;
853 sp
= page_header(__pa(spte
));
854 gfn
= kvm_mmu_page_get_gfn(sp
, spte
- sp
->spt
);
855 rmapp
= gfn_to_rmap(kvm
, gfn
, sp
->role
.level
);
856 pte_list_remove(spte
, rmapp
);
859 static void drop_spte(struct kvm
*kvm
, u64
*sptep
)
861 if (mmu_spte_clear_track_bits(sptep
))
862 rmap_remove(kvm
, sptep
);
865 static int rmap_write_protect(struct kvm
*kvm
, u64 gfn
)
867 unsigned long *rmapp
;
869 int i
, write_protected
= 0;
871 rmapp
= gfn_to_rmap(kvm
, gfn
, PT_PAGE_TABLE_LEVEL
);
873 spte
= rmap_next(kvm
, rmapp
, NULL
);
876 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
877 rmap_printk("rmap_write_protect: spte %p %llx\n", spte
, *spte
);
878 if (is_writable_pte(*spte
)) {
879 mmu_spte_update(spte
, *spte
& ~PT_WRITABLE_MASK
);
882 spte
= rmap_next(kvm
, rmapp
, spte
);
885 /* check for huge page mappings */
886 for (i
= PT_DIRECTORY_LEVEL
;
887 i
< PT_PAGE_TABLE_LEVEL
+ KVM_NR_PAGE_SIZES
; ++i
) {
888 rmapp
= gfn_to_rmap(kvm
, gfn
, i
);
889 spte
= rmap_next(kvm
, rmapp
, NULL
);
892 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
893 BUG_ON((*spte
& (PT_PAGE_SIZE_MASK
|PT_PRESENT_MASK
)) != (PT_PAGE_SIZE_MASK
|PT_PRESENT_MASK
));
894 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte
, *spte
, gfn
);
895 if (is_writable_pte(*spte
)) {
896 drop_spte(kvm
, spte
);
901 spte
= rmap_next(kvm
, rmapp
, spte
);
905 return write_protected
;
908 static int kvm_unmap_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
912 int need_tlb_flush
= 0;
914 while ((spte
= rmap_next(kvm
, rmapp
, NULL
))) {
915 BUG_ON(!(*spte
& PT_PRESENT_MASK
));
916 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte
, *spte
);
917 drop_spte(kvm
, spte
);
920 return need_tlb_flush
;
923 static int kvm_set_pte_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
928 pte_t
*ptep
= (pte_t
*)data
;
931 WARN_ON(pte_huge(*ptep
));
932 new_pfn
= pte_pfn(*ptep
);
933 spte
= rmap_next(kvm
, rmapp
, NULL
);
935 BUG_ON(!is_shadow_present_pte(*spte
));
936 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte
, *spte
);
938 if (pte_write(*ptep
)) {
939 drop_spte(kvm
, spte
);
940 spte
= rmap_next(kvm
, rmapp
, NULL
);
942 new_spte
= *spte
&~ (PT64_BASE_ADDR_MASK
);
943 new_spte
|= (u64
)new_pfn
<< PAGE_SHIFT
;
945 new_spte
&= ~PT_WRITABLE_MASK
;
946 new_spte
&= ~SPTE_HOST_WRITEABLE
;
947 new_spte
&= ~shadow_accessed_mask
;
948 mmu_spte_clear_track_bits(spte
);
949 mmu_spte_set(spte
, new_spte
);
950 spte
= rmap_next(kvm
, rmapp
, spte
);
954 kvm_flush_remote_tlbs(kvm
);
959 static int kvm_handle_hva(struct kvm
*kvm
, unsigned long hva
,
961 int (*handler
)(struct kvm
*kvm
, unsigned long *rmapp
,
967 struct kvm_memslots
*slots
;
969 slots
= kvm_memslots(kvm
);
971 for (i
= 0; i
< slots
->nmemslots
; i
++) {
972 struct kvm_memory_slot
*memslot
= &slots
->memslots
[i
];
973 unsigned long start
= memslot
->userspace_addr
;
976 end
= start
+ (memslot
->npages
<< PAGE_SHIFT
);
977 if (hva
>= start
&& hva
< end
) {
978 gfn_t gfn_offset
= (hva
- start
) >> PAGE_SHIFT
;
979 gfn_t gfn
= memslot
->base_gfn
+ gfn_offset
;
981 ret
= handler(kvm
, &memslot
->rmap
[gfn_offset
], data
);
983 for (j
= 0; j
< KVM_NR_PAGE_SIZES
- 1; ++j
) {
984 struct kvm_lpage_info
*linfo
;
986 linfo
= lpage_info_slot(gfn
, memslot
,
987 PT_DIRECTORY_LEVEL
+ j
);
988 ret
|= handler(kvm
, &linfo
->rmap_pde
, data
);
990 trace_kvm_age_page(hva
, memslot
, ret
);
998 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
1000 return kvm_handle_hva(kvm
, hva
, 0, kvm_unmap_rmapp
);
1003 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
1005 kvm_handle_hva(kvm
, hva
, (unsigned long)&pte
, kvm_set_pte_rmapp
);
1008 static int kvm_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
1015 * Emulate the accessed bit for EPT, by checking if this page has
1016 * an EPT mapping, and clearing it if it does. On the next access,
1017 * a new EPT mapping will be established.
1018 * This has some overhead, but not as much as the cost of swapping
1019 * out actively used pages or breaking up actively used hugepages.
1021 if (!shadow_accessed_mask
)
1022 return kvm_unmap_rmapp(kvm
, rmapp
, data
);
1024 spte
= rmap_next(kvm
, rmapp
, NULL
);
1028 BUG_ON(!(_spte
& PT_PRESENT_MASK
));
1029 _young
= _spte
& PT_ACCESSED_MASK
;
1032 clear_bit(PT_ACCESSED_SHIFT
, (unsigned long *)spte
);
1034 spte
= rmap_next(kvm
, rmapp
, spte
);
1039 static int kvm_test_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
1046 * If there's no access bit in the secondary pte set by the
1047 * hardware it's up to gup-fast/gup to set the access bit in
1048 * the primary pte or in the page structure.
1050 if (!shadow_accessed_mask
)
1053 spte
= rmap_next(kvm
, rmapp
, NULL
);
1056 BUG_ON(!(_spte
& PT_PRESENT_MASK
));
1057 young
= _spte
& PT_ACCESSED_MASK
;
1062 spte
= rmap_next(kvm
, rmapp
, spte
);
1068 #define RMAP_RECYCLE_THRESHOLD 1000
1070 static void rmap_recycle(struct kvm_vcpu
*vcpu
, u64
*spte
, gfn_t gfn
)
1072 unsigned long *rmapp
;
1073 struct kvm_mmu_page
*sp
;
1075 sp
= page_header(__pa(spte
));
1077 rmapp
= gfn_to_rmap(vcpu
->kvm
, gfn
, sp
->role
.level
);
1079 kvm_unmap_rmapp(vcpu
->kvm
, rmapp
, 0);
1080 kvm_flush_remote_tlbs(vcpu
->kvm
);
1083 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
1085 return kvm_handle_hva(kvm
, hva
, 0, kvm_age_rmapp
);
1088 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
1090 return kvm_handle_hva(kvm
, hva
, 0, kvm_test_age_rmapp
);
1094 static int is_empty_shadow_page(u64
*spt
)
1099 for (pos
= spt
, end
= pos
+ PAGE_SIZE
/ sizeof(u64
); pos
!= end
; pos
++)
1100 if (is_shadow_present_pte(*pos
)) {
1101 printk(KERN_ERR
"%s: %p %llx\n", __func__
,
1110 * This value is the sum of all of the kvm instances's
1111 * kvm->arch.n_used_mmu_pages values. We need a global,
1112 * aggregate version in order to make the slab shrinker
1115 static inline void kvm_mod_used_mmu_pages(struct kvm
*kvm
, int nr
)
1117 kvm
->arch
.n_used_mmu_pages
+= nr
;
1118 percpu_counter_add(&kvm_total_used_mmu_pages
, nr
);
1122 * Remove the sp from shadow page cache, after call it,
1123 * we can not find this sp from the cache, and the shadow
1124 * page table is still valid.
1125 * It should be under the protection of mmu lock.
1127 static void kvm_mmu_isolate_page(struct kvm_mmu_page
*sp
)
1129 ASSERT(is_empty_shadow_page(sp
->spt
));
1130 hlist_del(&sp
->hash_link
);
1131 if (!sp
->role
.direct
)
1132 free_page((unsigned long)sp
->gfns
);
1136 * Free the shadow page table and the sp, we can do it
1137 * out of the protection of mmu lock.
1139 static void kvm_mmu_free_page(struct kvm_mmu_page
*sp
)
1141 list_del(&sp
->link
);
1142 free_page((unsigned long)sp
->spt
);
1143 kmem_cache_free(mmu_page_header_cache
, sp
);
1146 static unsigned kvm_page_table_hashfn(gfn_t gfn
)
1148 return gfn
& ((1 << KVM_MMU_HASH_SHIFT
) - 1);
1151 static void mmu_page_add_parent_pte(struct kvm_vcpu
*vcpu
,
1152 struct kvm_mmu_page
*sp
, u64
*parent_pte
)
1157 pte_list_add(vcpu
, parent_pte
, &sp
->parent_ptes
);
1160 static void mmu_page_remove_parent_pte(struct kvm_mmu_page
*sp
,
1163 pte_list_remove(parent_pte
, &sp
->parent_ptes
);
1166 static void drop_parent_pte(struct kvm_mmu_page
*sp
,
1169 mmu_page_remove_parent_pte(sp
, parent_pte
);
1170 mmu_spte_clear_no_track(parent_pte
);
1173 static struct kvm_mmu_page
*kvm_mmu_alloc_page(struct kvm_vcpu
*vcpu
,
1174 u64
*parent_pte
, int direct
)
1176 struct kvm_mmu_page
*sp
;
1177 sp
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_header_cache
,
1179 sp
->spt
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_cache
, PAGE_SIZE
);
1181 sp
->gfns
= mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_cache
,
1183 set_page_private(virt_to_page(sp
->spt
), (unsigned long)sp
);
1184 list_add(&sp
->link
, &vcpu
->kvm
->arch
.active_mmu_pages
);
1185 bitmap_zero(sp
->slot_bitmap
, KVM_MEMORY_SLOTS
+ KVM_PRIVATE_MEM_SLOTS
);
1186 sp
->parent_ptes
= 0;
1187 mmu_page_add_parent_pte(vcpu
, sp
, parent_pte
);
1188 kvm_mod_used_mmu_pages(vcpu
->kvm
, +1);
1192 static void mark_unsync(u64
*spte
);
1193 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page
*sp
)
1195 pte_list_walk(&sp
->parent_ptes
, mark_unsync
);
1198 static void mark_unsync(u64
*spte
)
1200 struct kvm_mmu_page
*sp
;
1203 sp
= page_header(__pa(spte
));
1204 index
= spte
- sp
->spt
;
1205 if (__test_and_set_bit(index
, sp
->unsync_child_bitmap
))
1207 if (sp
->unsync_children
++)
1209 kvm_mmu_mark_parents_unsync(sp
);
1212 static int nonpaging_sync_page(struct kvm_vcpu
*vcpu
,
1213 struct kvm_mmu_page
*sp
)
1218 static void nonpaging_invlpg(struct kvm_vcpu
*vcpu
, gva_t gva
)
1222 static void nonpaging_update_pte(struct kvm_vcpu
*vcpu
,
1223 struct kvm_mmu_page
*sp
, u64
*spte
,
1229 #define KVM_PAGE_ARRAY_NR 16
1231 struct kvm_mmu_pages
{
1232 struct mmu_page_and_offset
{
1233 struct kvm_mmu_page
*sp
;
1235 } page
[KVM_PAGE_ARRAY_NR
];
1239 #define for_each_unsync_children(bitmap, idx) \
1240 for (idx = find_first_bit(bitmap, 512); \
1242 idx = find_next_bit(bitmap, 512, idx+1))
1244 static int mmu_pages_add(struct kvm_mmu_pages
*pvec
, struct kvm_mmu_page
*sp
,
1250 for (i
=0; i
< pvec
->nr
; i
++)
1251 if (pvec
->page
[i
].sp
== sp
)
1254 pvec
->page
[pvec
->nr
].sp
= sp
;
1255 pvec
->page
[pvec
->nr
].idx
= idx
;
1257 return (pvec
->nr
== KVM_PAGE_ARRAY_NR
);
1260 static int __mmu_unsync_walk(struct kvm_mmu_page
*sp
,
1261 struct kvm_mmu_pages
*pvec
)
1263 int i
, ret
, nr_unsync_leaf
= 0;
1265 for_each_unsync_children(sp
->unsync_child_bitmap
, i
) {
1266 struct kvm_mmu_page
*child
;
1267 u64 ent
= sp
->spt
[i
];
1269 if (!is_shadow_present_pte(ent
) || is_large_pte(ent
))
1270 goto clear_child_bitmap
;
1272 child
= page_header(ent
& PT64_BASE_ADDR_MASK
);
1274 if (child
->unsync_children
) {
1275 if (mmu_pages_add(pvec
, child
, i
))
1278 ret
= __mmu_unsync_walk(child
, pvec
);
1280 goto clear_child_bitmap
;
1282 nr_unsync_leaf
+= ret
;
1285 } else if (child
->unsync
) {
1287 if (mmu_pages_add(pvec
, child
, i
))
1290 goto clear_child_bitmap
;
1295 __clear_bit(i
, sp
->unsync_child_bitmap
);
1296 sp
->unsync_children
--;
1297 WARN_ON((int)sp
->unsync_children
< 0);
1301 return nr_unsync_leaf
;
1304 static int mmu_unsync_walk(struct kvm_mmu_page
*sp
,
1305 struct kvm_mmu_pages
*pvec
)
1307 if (!sp
->unsync_children
)
1310 mmu_pages_add(pvec
, sp
, 0);
1311 return __mmu_unsync_walk(sp
, pvec
);
1314 static void kvm_unlink_unsync_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
1316 WARN_ON(!sp
->unsync
);
1317 trace_kvm_mmu_sync_page(sp
);
1319 --kvm
->stat
.mmu_unsync
;
1322 static int kvm_mmu_prepare_zap_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
1323 struct list_head
*invalid_list
);
1324 static void kvm_mmu_commit_zap_page(struct kvm
*kvm
,
1325 struct list_head
*invalid_list
);
1327 #define for_each_gfn_sp(kvm, sp, gfn, pos) \
1328 hlist_for_each_entry(sp, pos, \
1329 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1330 if ((sp)->gfn != (gfn)) {} else
1332 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
1333 hlist_for_each_entry(sp, pos, \
1334 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1335 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1336 (sp)->role.invalid) {} else
1338 /* @sp->gfn should be write-protected at the call site */
1339 static int __kvm_sync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
1340 struct list_head
*invalid_list
, bool clear_unsync
)
1342 if (sp
->role
.cr4_pae
!= !!is_pae(vcpu
)) {
1343 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, invalid_list
);
1348 kvm_unlink_unsync_page(vcpu
->kvm
, sp
);
1350 if (vcpu
->arch
.mmu
.sync_page(vcpu
, sp
)) {
1351 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, invalid_list
);
1355 kvm_mmu_flush_tlb(vcpu
);
1359 static int kvm_sync_page_transient(struct kvm_vcpu
*vcpu
,
1360 struct kvm_mmu_page
*sp
)
1362 LIST_HEAD(invalid_list
);
1365 ret
= __kvm_sync_page(vcpu
, sp
, &invalid_list
, false);
1367 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1372 static int kvm_sync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
,
1373 struct list_head
*invalid_list
)
1375 return __kvm_sync_page(vcpu
, sp
, invalid_list
, true);
1378 /* @gfn should be write-protected at the call site */
1379 static void kvm_sync_pages(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1381 struct kvm_mmu_page
*s
;
1382 struct hlist_node
*node
;
1383 LIST_HEAD(invalid_list
);
1386 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1390 WARN_ON(s
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
1391 kvm_unlink_unsync_page(vcpu
->kvm
, s
);
1392 if ((s
->role
.cr4_pae
!= !!is_pae(vcpu
)) ||
1393 (vcpu
->arch
.mmu
.sync_page(vcpu
, s
))) {
1394 kvm_mmu_prepare_zap_page(vcpu
->kvm
, s
, &invalid_list
);
1400 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1402 kvm_mmu_flush_tlb(vcpu
);
1405 struct mmu_page_path
{
1406 struct kvm_mmu_page
*parent
[PT64_ROOT_LEVEL
-1];
1407 unsigned int idx
[PT64_ROOT_LEVEL
-1];
1410 #define for_each_sp(pvec, sp, parents, i) \
1411 for (i = mmu_pages_next(&pvec, &parents, -1), \
1412 sp = pvec.page[i].sp; \
1413 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1414 i = mmu_pages_next(&pvec, &parents, i))
1416 static int mmu_pages_next(struct kvm_mmu_pages
*pvec
,
1417 struct mmu_page_path
*parents
,
1422 for (n
= i
+1; n
< pvec
->nr
; n
++) {
1423 struct kvm_mmu_page
*sp
= pvec
->page
[n
].sp
;
1425 if (sp
->role
.level
== PT_PAGE_TABLE_LEVEL
) {
1426 parents
->idx
[0] = pvec
->page
[n
].idx
;
1430 parents
->parent
[sp
->role
.level
-2] = sp
;
1431 parents
->idx
[sp
->role
.level
-1] = pvec
->page
[n
].idx
;
1437 static void mmu_pages_clear_parents(struct mmu_page_path
*parents
)
1439 struct kvm_mmu_page
*sp
;
1440 unsigned int level
= 0;
1443 unsigned int idx
= parents
->idx
[level
];
1445 sp
= parents
->parent
[level
];
1449 --sp
->unsync_children
;
1450 WARN_ON((int)sp
->unsync_children
< 0);
1451 __clear_bit(idx
, sp
->unsync_child_bitmap
);
1453 } while (level
< PT64_ROOT_LEVEL
-1 && !sp
->unsync_children
);
1456 static void kvm_mmu_pages_init(struct kvm_mmu_page
*parent
,
1457 struct mmu_page_path
*parents
,
1458 struct kvm_mmu_pages
*pvec
)
1460 parents
->parent
[parent
->role
.level
-1] = NULL
;
1464 static void mmu_sync_children(struct kvm_vcpu
*vcpu
,
1465 struct kvm_mmu_page
*parent
)
1468 struct kvm_mmu_page
*sp
;
1469 struct mmu_page_path parents
;
1470 struct kvm_mmu_pages pages
;
1471 LIST_HEAD(invalid_list
);
1473 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1474 while (mmu_unsync_walk(parent
, &pages
)) {
1477 for_each_sp(pages
, sp
, parents
, i
)
1478 protected |= rmap_write_protect(vcpu
->kvm
, sp
->gfn
);
1481 kvm_flush_remote_tlbs(vcpu
->kvm
);
1483 for_each_sp(pages
, sp
, parents
, i
) {
1484 kvm_sync_page(vcpu
, sp
, &invalid_list
);
1485 mmu_pages_clear_parents(&parents
);
1487 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
1488 cond_resched_lock(&vcpu
->kvm
->mmu_lock
);
1489 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1493 static void init_shadow_page_table(struct kvm_mmu_page
*sp
)
1497 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
1501 static struct kvm_mmu_page
*kvm_mmu_get_page(struct kvm_vcpu
*vcpu
,
1509 union kvm_mmu_page_role role
;
1511 struct kvm_mmu_page
*sp
;
1512 struct hlist_node
*node
;
1513 bool need_sync
= false;
1515 role
= vcpu
->arch
.mmu
.base_role
;
1517 role
.direct
= direct
;
1520 role
.access
= access
;
1521 if (!vcpu
->arch
.mmu
.direct_map
1522 && vcpu
->arch
.mmu
.root_level
<= PT32_ROOT_LEVEL
) {
1523 quadrant
= gaddr
>> (PAGE_SHIFT
+ (PT64_PT_BITS
* level
));
1524 quadrant
&= (1 << ((PT32_PT_BITS
- PT64_PT_BITS
) * level
)) - 1;
1525 role
.quadrant
= quadrant
;
1527 for_each_gfn_sp(vcpu
->kvm
, sp
, gfn
, node
) {
1528 if (!need_sync
&& sp
->unsync
)
1531 if (sp
->role
.word
!= role
.word
)
1534 if (sp
->unsync
&& kvm_sync_page_transient(vcpu
, sp
))
1537 mmu_page_add_parent_pte(vcpu
, sp
, parent_pte
);
1538 if (sp
->unsync_children
) {
1539 kvm_make_request(KVM_REQ_MMU_SYNC
, vcpu
);
1540 kvm_mmu_mark_parents_unsync(sp
);
1541 } else if (sp
->unsync
)
1542 kvm_mmu_mark_parents_unsync(sp
);
1544 trace_kvm_mmu_get_page(sp
, false);
1547 ++vcpu
->kvm
->stat
.mmu_cache_miss
;
1548 sp
= kvm_mmu_alloc_page(vcpu
, parent_pte
, direct
);
1553 hlist_add_head(&sp
->hash_link
,
1554 &vcpu
->kvm
->arch
.mmu_page_hash
[kvm_page_table_hashfn(gfn
)]);
1556 if (rmap_write_protect(vcpu
->kvm
, gfn
))
1557 kvm_flush_remote_tlbs(vcpu
->kvm
);
1558 if (level
> PT_PAGE_TABLE_LEVEL
&& need_sync
)
1559 kvm_sync_pages(vcpu
, gfn
);
1561 account_shadowed(vcpu
->kvm
, gfn
);
1563 init_shadow_page_table(sp
);
1564 trace_kvm_mmu_get_page(sp
, true);
1568 static void shadow_walk_init(struct kvm_shadow_walk_iterator
*iterator
,
1569 struct kvm_vcpu
*vcpu
, u64 addr
)
1571 iterator
->addr
= addr
;
1572 iterator
->shadow_addr
= vcpu
->arch
.mmu
.root_hpa
;
1573 iterator
->level
= vcpu
->arch
.mmu
.shadow_root_level
;
1575 if (iterator
->level
== PT64_ROOT_LEVEL
&&
1576 vcpu
->arch
.mmu
.root_level
< PT64_ROOT_LEVEL
&&
1577 !vcpu
->arch
.mmu
.direct_map
)
1580 if (iterator
->level
== PT32E_ROOT_LEVEL
) {
1581 iterator
->shadow_addr
1582 = vcpu
->arch
.mmu
.pae_root
[(addr
>> 30) & 3];
1583 iterator
->shadow_addr
&= PT64_BASE_ADDR_MASK
;
1585 if (!iterator
->shadow_addr
)
1586 iterator
->level
= 0;
1590 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator
*iterator
)
1592 if (iterator
->level
< PT_PAGE_TABLE_LEVEL
)
1595 iterator
->index
= SHADOW_PT_INDEX(iterator
->addr
, iterator
->level
);
1596 iterator
->sptep
= ((u64
*)__va(iterator
->shadow_addr
)) + iterator
->index
;
1600 static void shadow_walk_next(struct kvm_shadow_walk_iterator
*iterator
)
1602 if (is_last_spte(*iterator
->sptep
, iterator
->level
)) {
1603 iterator
->level
= 0;
1607 iterator
->shadow_addr
= *iterator
->sptep
& PT64_BASE_ADDR_MASK
;
1611 static void link_shadow_page(u64
*sptep
, struct kvm_mmu_page
*sp
)
1615 spte
= __pa(sp
->spt
)
1616 | PT_PRESENT_MASK
| PT_ACCESSED_MASK
1617 | PT_WRITABLE_MASK
| PT_USER_MASK
;
1618 mmu_spte_set(sptep
, spte
);
1621 static void drop_large_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
)
1623 if (is_large_pte(*sptep
)) {
1624 drop_spte(vcpu
->kvm
, sptep
);
1625 kvm_flush_remote_tlbs(vcpu
->kvm
);
1629 static void validate_direct_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
1630 unsigned direct_access
)
1632 if (is_shadow_present_pte(*sptep
) && !is_large_pte(*sptep
)) {
1633 struct kvm_mmu_page
*child
;
1636 * For the direct sp, if the guest pte's dirty bit
1637 * changed form clean to dirty, it will corrupt the
1638 * sp's access: allow writable in the read-only sp,
1639 * so we should update the spte at this point to get
1640 * a new sp with the correct access.
1642 child
= page_header(*sptep
& PT64_BASE_ADDR_MASK
);
1643 if (child
->role
.access
== direct_access
)
1646 drop_parent_pte(child
, sptep
);
1647 kvm_flush_remote_tlbs(vcpu
->kvm
);
1651 static void mmu_page_zap_pte(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
1655 struct kvm_mmu_page
*child
;
1658 if (is_shadow_present_pte(pte
)) {
1659 if (is_last_spte(pte
, sp
->role
.level
))
1660 drop_spte(kvm
, spte
);
1662 child
= page_header(pte
& PT64_BASE_ADDR_MASK
);
1663 drop_parent_pte(child
, spte
);
1667 if (is_large_pte(pte
))
1671 static void kvm_mmu_page_unlink_children(struct kvm
*kvm
,
1672 struct kvm_mmu_page
*sp
)
1676 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
1677 mmu_page_zap_pte(kvm
, sp
, sp
->spt
+ i
);
1680 static void kvm_mmu_put_page(struct kvm_mmu_page
*sp
, u64
*parent_pte
)
1682 mmu_page_remove_parent_pte(sp
, parent_pte
);
1685 static void kvm_mmu_reset_last_pte_updated(struct kvm
*kvm
)
1688 struct kvm_vcpu
*vcpu
;
1690 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1691 vcpu
->arch
.last_pte_updated
= NULL
;
1694 static void kvm_mmu_unlink_parents(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
1698 while ((parent_pte
= pte_list_next(&sp
->parent_ptes
, NULL
)))
1699 drop_parent_pte(sp
, parent_pte
);
1702 static int mmu_zap_unsync_children(struct kvm
*kvm
,
1703 struct kvm_mmu_page
*parent
,
1704 struct list_head
*invalid_list
)
1707 struct mmu_page_path parents
;
1708 struct kvm_mmu_pages pages
;
1710 if (parent
->role
.level
== PT_PAGE_TABLE_LEVEL
)
1713 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1714 while (mmu_unsync_walk(parent
, &pages
)) {
1715 struct kvm_mmu_page
*sp
;
1717 for_each_sp(pages
, sp
, parents
, i
) {
1718 kvm_mmu_prepare_zap_page(kvm
, sp
, invalid_list
);
1719 mmu_pages_clear_parents(&parents
);
1722 kvm_mmu_pages_init(parent
, &parents
, &pages
);
1728 static int kvm_mmu_prepare_zap_page(struct kvm
*kvm
, struct kvm_mmu_page
*sp
,
1729 struct list_head
*invalid_list
)
1733 trace_kvm_mmu_prepare_zap_page(sp
);
1734 ++kvm
->stat
.mmu_shadow_zapped
;
1735 ret
= mmu_zap_unsync_children(kvm
, sp
, invalid_list
);
1736 kvm_mmu_page_unlink_children(kvm
, sp
);
1737 kvm_mmu_unlink_parents(kvm
, sp
);
1738 if (!sp
->role
.invalid
&& !sp
->role
.direct
)
1739 unaccount_shadowed(kvm
, sp
->gfn
);
1741 kvm_unlink_unsync_page(kvm
, sp
);
1742 if (!sp
->root_count
) {
1745 list_move(&sp
->link
, invalid_list
);
1746 kvm_mod_used_mmu_pages(kvm
, -1);
1748 list_move(&sp
->link
, &kvm
->arch
.active_mmu_pages
);
1749 kvm_reload_remote_mmus(kvm
);
1752 sp
->role
.invalid
= 1;
1753 kvm_mmu_reset_last_pte_updated(kvm
);
1757 static void kvm_mmu_commit_zap_page(struct kvm
*kvm
,
1758 struct list_head
*invalid_list
)
1760 struct kvm_mmu_page
*sp
;
1762 if (list_empty(invalid_list
))
1765 kvm_flush_remote_tlbs(kvm
);
1768 sp
= list_first_entry(invalid_list
, struct kvm_mmu_page
, link
);
1769 WARN_ON(!sp
->role
.invalid
|| sp
->root_count
);
1770 kvm_mmu_isolate_page(sp
);
1771 kvm_mmu_free_page(sp
);
1772 } while (!list_empty(invalid_list
));
1777 * Changing the number of mmu pages allocated to the vm
1778 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1780 void kvm_mmu_change_mmu_pages(struct kvm
*kvm
, unsigned int goal_nr_mmu_pages
)
1782 LIST_HEAD(invalid_list
);
1784 * If we set the number of mmu pages to be smaller be than the
1785 * number of actived pages , we must to free some mmu pages before we
1789 if (kvm
->arch
.n_used_mmu_pages
> goal_nr_mmu_pages
) {
1790 while (kvm
->arch
.n_used_mmu_pages
> goal_nr_mmu_pages
&&
1791 !list_empty(&kvm
->arch
.active_mmu_pages
)) {
1792 struct kvm_mmu_page
*page
;
1794 page
= container_of(kvm
->arch
.active_mmu_pages
.prev
,
1795 struct kvm_mmu_page
, link
);
1796 kvm_mmu_prepare_zap_page(kvm
, page
, &invalid_list
);
1798 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1799 goal_nr_mmu_pages
= kvm
->arch
.n_used_mmu_pages
;
1802 kvm
->arch
.n_max_mmu_pages
= goal_nr_mmu_pages
;
1805 static int kvm_mmu_unprotect_page(struct kvm
*kvm
, gfn_t gfn
)
1807 struct kvm_mmu_page
*sp
;
1808 struct hlist_node
*node
;
1809 LIST_HEAD(invalid_list
);
1812 pgprintk("%s: looking for gfn %llx\n", __func__
, gfn
);
1815 for_each_gfn_indirect_valid_sp(kvm
, sp
, gfn
, node
) {
1816 pgprintk("%s: gfn %llx role %x\n", __func__
, gfn
,
1819 kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
);
1821 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1825 static void mmu_unshadow(struct kvm
*kvm
, gfn_t gfn
)
1827 struct kvm_mmu_page
*sp
;
1828 struct hlist_node
*node
;
1829 LIST_HEAD(invalid_list
);
1831 for_each_gfn_indirect_valid_sp(kvm
, sp
, gfn
, node
) {
1832 pgprintk("%s: zap %llx %x\n",
1833 __func__
, gfn
, sp
->role
.word
);
1834 kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
);
1836 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
1839 static void page_header_update_slot(struct kvm
*kvm
, void *pte
, gfn_t gfn
)
1841 int slot
= memslot_id(kvm
, gfn
);
1842 struct kvm_mmu_page
*sp
= page_header(__pa(pte
));
1844 __set_bit(slot
, sp
->slot_bitmap
);
1848 * The function is based on mtrr_type_lookup() in
1849 * arch/x86/kernel/cpu/mtrr/generic.c
1851 static int get_mtrr_type(struct mtrr_state_type
*mtrr_state
,
1856 u8 prev_match
, curr_match
;
1857 int num_var_ranges
= KVM_NR_VAR_MTRR
;
1859 if (!mtrr_state
->enabled
)
1862 /* Make end inclusive end, instead of exclusive */
1865 /* Look in fixed ranges. Just return the type as per start */
1866 if (mtrr_state
->have_fixed
&& (start
< 0x100000)) {
1869 if (start
< 0x80000) {
1871 idx
+= (start
>> 16);
1872 return mtrr_state
->fixed_ranges
[idx
];
1873 } else if (start
< 0xC0000) {
1875 idx
+= ((start
- 0x80000) >> 14);
1876 return mtrr_state
->fixed_ranges
[idx
];
1877 } else if (start
< 0x1000000) {
1879 idx
+= ((start
- 0xC0000) >> 12);
1880 return mtrr_state
->fixed_ranges
[idx
];
1885 * Look in variable ranges
1886 * Look of multiple ranges matching this address and pick type
1887 * as per MTRR precedence
1889 if (!(mtrr_state
->enabled
& 2))
1890 return mtrr_state
->def_type
;
1893 for (i
= 0; i
< num_var_ranges
; ++i
) {
1894 unsigned short start_state
, end_state
;
1896 if (!(mtrr_state
->var_ranges
[i
].mask_lo
& (1 << 11)))
1899 base
= (((u64
)mtrr_state
->var_ranges
[i
].base_hi
) << 32) +
1900 (mtrr_state
->var_ranges
[i
].base_lo
& PAGE_MASK
);
1901 mask
= (((u64
)mtrr_state
->var_ranges
[i
].mask_hi
) << 32) +
1902 (mtrr_state
->var_ranges
[i
].mask_lo
& PAGE_MASK
);
1904 start_state
= ((start
& mask
) == (base
& mask
));
1905 end_state
= ((end
& mask
) == (base
& mask
));
1906 if (start_state
!= end_state
)
1909 if ((start
& mask
) != (base
& mask
))
1912 curr_match
= mtrr_state
->var_ranges
[i
].base_lo
& 0xff;
1913 if (prev_match
== 0xFF) {
1914 prev_match
= curr_match
;
1918 if (prev_match
== MTRR_TYPE_UNCACHABLE
||
1919 curr_match
== MTRR_TYPE_UNCACHABLE
)
1920 return MTRR_TYPE_UNCACHABLE
;
1922 if ((prev_match
== MTRR_TYPE_WRBACK
&&
1923 curr_match
== MTRR_TYPE_WRTHROUGH
) ||
1924 (prev_match
== MTRR_TYPE_WRTHROUGH
&&
1925 curr_match
== MTRR_TYPE_WRBACK
)) {
1926 prev_match
= MTRR_TYPE_WRTHROUGH
;
1927 curr_match
= MTRR_TYPE_WRTHROUGH
;
1930 if (prev_match
!= curr_match
)
1931 return MTRR_TYPE_UNCACHABLE
;
1934 if (prev_match
!= 0xFF)
1937 return mtrr_state
->def_type
;
1940 u8
kvm_get_guest_memory_type(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1944 mtrr
= get_mtrr_type(&vcpu
->arch
.mtrr_state
, gfn
<< PAGE_SHIFT
,
1945 (gfn
<< PAGE_SHIFT
) + PAGE_SIZE
);
1946 if (mtrr
== 0xfe || mtrr
== 0xff)
1947 mtrr
= MTRR_TYPE_WRBACK
;
1950 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type
);
1952 static void __kvm_unsync_page(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*sp
)
1954 trace_kvm_mmu_unsync_page(sp
);
1955 ++vcpu
->kvm
->stat
.mmu_unsync
;
1958 kvm_mmu_mark_parents_unsync(sp
);
1961 static void kvm_unsync_pages(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
1963 struct kvm_mmu_page
*s
;
1964 struct hlist_node
*node
;
1966 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1969 WARN_ON(s
->role
.level
!= PT_PAGE_TABLE_LEVEL
);
1970 __kvm_unsync_page(vcpu
, s
);
1974 static int mmu_need_write_protect(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
1977 struct kvm_mmu_page
*s
;
1978 struct hlist_node
*node
;
1979 bool need_unsync
= false;
1981 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, s
, gfn
, node
) {
1985 if (s
->role
.level
!= PT_PAGE_TABLE_LEVEL
)
1988 if (!need_unsync
&& !s
->unsync
) {
1995 kvm_unsync_pages(vcpu
, gfn
);
1999 static int set_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
2000 unsigned pte_access
, int user_fault
,
2001 int write_fault
, int level
,
2002 gfn_t gfn
, pfn_t pfn
, bool speculative
,
2003 bool can_unsync
, bool host_writable
)
2005 u64 spte
, entry
= *sptep
;
2009 * We don't set the accessed bit, since we sometimes want to see
2010 * whether the guest actually used the pte (in order to detect
2013 spte
= PT_PRESENT_MASK
;
2015 spte
|= shadow_accessed_mask
;
2017 if (pte_access
& ACC_EXEC_MASK
)
2018 spte
|= shadow_x_mask
;
2020 spte
|= shadow_nx_mask
;
2021 if (pte_access
& ACC_USER_MASK
)
2022 spte
|= shadow_user_mask
;
2023 if (level
> PT_PAGE_TABLE_LEVEL
)
2024 spte
|= PT_PAGE_SIZE_MASK
;
2026 spte
|= kvm_x86_ops
->get_mt_mask(vcpu
, gfn
,
2027 kvm_is_mmio_pfn(pfn
));
2030 spte
|= SPTE_HOST_WRITEABLE
;
2032 pte_access
&= ~ACC_WRITE_MASK
;
2034 spte
|= (u64
)pfn
<< PAGE_SHIFT
;
2036 if ((pte_access
& ACC_WRITE_MASK
)
2037 || (!vcpu
->arch
.mmu
.direct_map
&& write_fault
2038 && !is_write_protection(vcpu
) && !user_fault
)) {
2040 if (level
> PT_PAGE_TABLE_LEVEL
&&
2041 has_wrprotected_page(vcpu
->kvm
, gfn
, level
)) {
2043 drop_spte(vcpu
->kvm
, sptep
);
2047 spte
|= PT_WRITABLE_MASK
;
2049 if (!vcpu
->arch
.mmu
.direct_map
2050 && !(pte_access
& ACC_WRITE_MASK
)) {
2051 spte
&= ~PT_USER_MASK
;
2053 * If we converted a user page to a kernel page,
2054 * so that the kernel can write to it when cr0.wp=0,
2055 * then we should prevent the kernel from executing it
2056 * if SMEP is enabled.
2058 if (kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
))
2059 spte
|= PT64_NX_MASK
;
2063 * Optimization: for pte sync, if spte was writable the hash
2064 * lookup is unnecessary (and expensive). Write protection
2065 * is responsibility of mmu_get_page / kvm_sync_page.
2066 * Same reasoning can be applied to dirty page accounting.
2068 if (!can_unsync
&& is_writable_pte(*sptep
))
2071 if (mmu_need_write_protect(vcpu
, gfn
, can_unsync
)) {
2072 pgprintk("%s: found shadow page for %llx, marking ro\n",
2075 pte_access
&= ~ACC_WRITE_MASK
;
2076 if (is_writable_pte(spte
))
2077 spte
&= ~PT_WRITABLE_MASK
;
2081 if (pte_access
& ACC_WRITE_MASK
)
2082 mark_page_dirty(vcpu
->kvm
, gfn
);
2085 mmu_spte_update(sptep
, spte
);
2087 * If we overwrite a writable spte with a read-only one we
2088 * should flush remote TLBs. Otherwise rmap_write_protect
2089 * will find a read-only spte, even though the writable spte
2090 * might be cached on a CPU's TLB.
2092 if (is_writable_pte(entry
) && !is_writable_pte(*sptep
))
2093 kvm_flush_remote_tlbs(vcpu
->kvm
);
2098 static void mmu_set_spte(struct kvm_vcpu
*vcpu
, u64
*sptep
,
2099 unsigned pt_access
, unsigned pte_access
,
2100 int user_fault
, int write_fault
,
2101 int *emulate
, int level
, gfn_t gfn
,
2102 pfn_t pfn
, bool speculative
,
2105 int was_rmapped
= 0;
2108 pgprintk("%s: spte %llx access %x write_fault %d"
2109 " user_fault %d gfn %llx\n",
2110 __func__
, *sptep
, pt_access
,
2111 write_fault
, user_fault
, gfn
);
2113 if (is_rmap_spte(*sptep
)) {
2115 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2116 * the parent of the now unreachable PTE.
2118 if (level
> PT_PAGE_TABLE_LEVEL
&&
2119 !is_large_pte(*sptep
)) {
2120 struct kvm_mmu_page
*child
;
2123 child
= page_header(pte
& PT64_BASE_ADDR_MASK
);
2124 drop_parent_pte(child
, sptep
);
2125 kvm_flush_remote_tlbs(vcpu
->kvm
);
2126 } else if (pfn
!= spte_to_pfn(*sptep
)) {
2127 pgprintk("hfn old %llx new %llx\n",
2128 spte_to_pfn(*sptep
), pfn
);
2129 drop_spte(vcpu
->kvm
, sptep
);
2130 kvm_flush_remote_tlbs(vcpu
->kvm
);
2135 if (set_spte(vcpu
, sptep
, pte_access
, user_fault
, write_fault
,
2136 level
, gfn
, pfn
, speculative
, true,
2140 kvm_mmu_flush_tlb(vcpu
);
2143 pgprintk("%s: setting spte %llx\n", __func__
, *sptep
);
2144 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2145 is_large_pte(*sptep
)? "2MB" : "4kB",
2146 *sptep
& PT_PRESENT_MASK
?"RW":"R", gfn
,
2148 if (!was_rmapped
&& is_large_pte(*sptep
))
2149 ++vcpu
->kvm
->stat
.lpages
;
2151 if (is_shadow_present_pte(*sptep
)) {
2152 page_header_update_slot(vcpu
->kvm
, sptep
, gfn
);
2154 rmap_count
= rmap_add(vcpu
, sptep
, gfn
);
2155 if (rmap_count
> RMAP_RECYCLE_THRESHOLD
)
2156 rmap_recycle(vcpu
, sptep
, gfn
);
2159 kvm_release_pfn_clean(pfn
);
2161 vcpu
->arch
.last_pte_updated
= sptep
;
2162 vcpu
->arch
.last_pte_gfn
= gfn
;
2166 static void nonpaging_new_cr3(struct kvm_vcpu
*vcpu
)
2170 static pfn_t
pte_prefetch_gfn_to_pfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
2173 struct kvm_memory_slot
*slot
;
2176 slot
= gfn_to_memslot_dirty_bitmap(vcpu
, gfn
, no_dirty_log
);
2178 get_page(fault_page
);
2179 return page_to_pfn(fault_page
);
2182 hva
= gfn_to_hva_memslot(slot
, gfn
);
2184 return hva_to_pfn_atomic(vcpu
->kvm
, hva
);
2187 static int direct_pte_prefetch_many(struct kvm_vcpu
*vcpu
,
2188 struct kvm_mmu_page
*sp
,
2189 u64
*start
, u64
*end
)
2191 struct page
*pages
[PTE_PREFETCH_NUM
];
2192 unsigned access
= sp
->role
.access
;
2196 gfn
= kvm_mmu_page_get_gfn(sp
, start
- sp
->spt
);
2197 if (!gfn_to_memslot_dirty_bitmap(vcpu
, gfn
, access
& ACC_WRITE_MASK
))
2200 ret
= gfn_to_page_many_atomic(vcpu
->kvm
, gfn
, pages
, end
- start
);
2204 for (i
= 0; i
< ret
; i
++, gfn
++, start
++)
2205 mmu_set_spte(vcpu
, start
, ACC_ALL
,
2207 sp
->role
.level
, gfn
,
2208 page_to_pfn(pages
[i
]), true, true);
2213 static void __direct_pte_prefetch(struct kvm_vcpu
*vcpu
,
2214 struct kvm_mmu_page
*sp
, u64
*sptep
)
2216 u64
*spte
, *start
= NULL
;
2219 WARN_ON(!sp
->role
.direct
);
2221 i
= (sptep
- sp
->spt
) & ~(PTE_PREFETCH_NUM
- 1);
2224 for (i
= 0; i
< PTE_PREFETCH_NUM
; i
++, spte
++) {
2225 if (is_shadow_present_pte(*spte
) || spte
== sptep
) {
2228 if (direct_pte_prefetch_many(vcpu
, sp
, start
, spte
) < 0)
2236 static void direct_pte_prefetch(struct kvm_vcpu
*vcpu
, u64
*sptep
)
2238 struct kvm_mmu_page
*sp
;
2241 * Since it's no accessed bit on EPT, it's no way to
2242 * distinguish between actually accessed translations
2243 * and prefetched, so disable pte prefetch if EPT is
2246 if (!shadow_accessed_mask
)
2249 sp
= page_header(__pa(sptep
));
2250 if (sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)
2253 __direct_pte_prefetch(vcpu
, sp
, sptep
);
2256 static int __direct_map(struct kvm_vcpu
*vcpu
, gpa_t v
, int write
,
2257 int map_writable
, int level
, gfn_t gfn
, pfn_t pfn
,
2260 struct kvm_shadow_walk_iterator iterator
;
2261 struct kvm_mmu_page
*sp
;
2265 for_each_shadow_entry(vcpu
, (u64
)gfn
<< PAGE_SHIFT
, iterator
) {
2266 if (iterator
.level
== level
) {
2267 unsigned pte_access
= ACC_ALL
;
2269 mmu_set_spte(vcpu
, iterator
.sptep
, ACC_ALL
, pte_access
,
2271 level
, gfn
, pfn
, prefault
, map_writable
);
2272 direct_pte_prefetch(vcpu
, iterator
.sptep
);
2273 ++vcpu
->stat
.pf_fixed
;
2277 if (!is_shadow_present_pte(*iterator
.sptep
)) {
2278 u64 base_addr
= iterator
.addr
;
2280 base_addr
&= PT64_LVL_ADDR_MASK(iterator
.level
);
2281 pseudo_gfn
= base_addr
>> PAGE_SHIFT
;
2282 sp
= kvm_mmu_get_page(vcpu
, pseudo_gfn
, iterator
.addr
,
2284 1, ACC_ALL
, iterator
.sptep
);
2286 pgprintk("nonpaging_map: ENOMEM\n");
2287 kvm_release_pfn_clean(pfn
);
2291 mmu_spte_set(iterator
.sptep
,
2293 | PT_PRESENT_MASK
| PT_WRITABLE_MASK
2294 | shadow_user_mask
| shadow_x_mask
2295 | shadow_accessed_mask
);
2301 static void kvm_send_hwpoison_signal(unsigned long address
, struct task_struct
*tsk
)
2305 info
.si_signo
= SIGBUS
;
2307 info
.si_code
= BUS_MCEERR_AR
;
2308 info
.si_addr
= (void __user
*)address
;
2309 info
.si_addr_lsb
= PAGE_SHIFT
;
2311 send_sig_info(SIGBUS
, &info
, tsk
);
2314 static int kvm_handle_bad_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
, pfn_t pfn
)
2316 kvm_release_pfn_clean(pfn
);
2317 if (is_hwpoison_pfn(pfn
)) {
2318 kvm_send_hwpoison_signal(gfn_to_hva(vcpu
->kvm
, gfn
), current
);
2325 static void transparent_hugepage_adjust(struct kvm_vcpu
*vcpu
,
2326 gfn_t
*gfnp
, pfn_t
*pfnp
, int *levelp
)
2330 int level
= *levelp
;
2333 * Check if it's a transparent hugepage. If this would be an
2334 * hugetlbfs page, level wouldn't be set to
2335 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2338 if (!is_error_pfn(pfn
) && !kvm_is_mmio_pfn(pfn
) &&
2339 level
== PT_PAGE_TABLE_LEVEL
&&
2340 PageTransCompound(pfn_to_page(pfn
)) &&
2341 !has_wrprotected_page(vcpu
->kvm
, gfn
, PT_DIRECTORY_LEVEL
)) {
2344 * mmu_notifier_retry was successful and we hold the
2345 * mmu_lock here, so the pmd can't become splitting
2346 * from under us, and in turn
2347 * __split_huge_page_refcount() can't run from under
2348 * us and we can safely transfer the refcount from
2349 * PG_tail to PG_head as we switch the pfn to tail to
2352 *levelp
= level
= PT_DIRECTORY_LEVEL
;
2353 mask
= KVM_PAGES_PER_HPAGE(level
) - 1;
2354 VM_BUG_ON((gfn
& mask
) != (pfn
& mask
));
2358 kvm_release_pfn_clean(pfn
);
2360 if (!get_page_unless_zero(pfn_to_page(pfn
)))
2367 static bool mmu_invalid_pfn(pfn_t pfn
)
2369 return unlikely(is_invalid_pfn(pfn
) || is_noslot_pfn(pfn
));
2372 static bool handle_abnormal_pfn(struct kvm_vcpu
*vcpu
, gva_t gva
, gfn_t gfn
,
2373 pfn_t pfn
, unsigned access
, int *ret_val
)
2377 /* The pfn is invalid, report the error! */
2378 if (unlikely(is_invalid_pfn(pfn
))) {
2379 *ret_val
= kvm_handle_bad_page(vcpu
, gfn
, pfn
);
2383 if (unlikely(is_noslot_pfn(pfn
))) {
2384 vcpu_cache_mmio_info(vcpu
, gva
, gfn
, access
);
2394 static bool try_async_pf(struct kvm_vcpu
*vcpu
, bool prefault
, gfn_t gfn
,
2395 gva_t gva
, pfn_t
*pfn
, bool write
, bool *writable
);
2397 static int nonpaging_map(struct kvm_vcpu
*vcpu
, gva_t v
, int write
, gfn_t gfn
,
2404 unsigned long mmu_seq
;
2407 force_pt_level
= mapping_level_dirty_bitmap(vcpu
, gfn
);
2408 if (likely(!force_pt_level
)) {
2409 level
= mapping_level(vcpu
, gfn
);
2411 * This path builds a PAE pagetable - so we can map
2412 * 2mb pages at maximum. Therefore check if the level
2413 * is larger than that.
2415 if (level
> PT_DIRECTORY_LEVEL
)
2416 level
= PT_DIRECTORY_LEVEL
;
2418 gfn
&= ~(KVM_PAGES_PER_HPAGE(level
) - 1);
2420 level
= PT_PAGE_TABLE_LEVEL
;
2422 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
2425 if (try_async_pf(vcpu
, prefault
, gfn
, v
, &pfn
, write
, &map_writable
))
2428 if (handle_abnormal_pfn(vcpu
, v
, gfn
, pfn
, ACC_ALL
, &r
))
2431 spin_lock(&vcpu
->kvm
->mmu_lock
);
2432 if (mmu_notifier_retry(vcpu
, mmu_seq
))
2434 kvm_mmu_free_some_pages(vcpu
);
2435 if (likely(!force_pt_level
))
2436 transparent_hugepage_adjust(vcpu
, &gfn
, &pfn
, &level
);
2437 r
= __direct_map(vcpu
, v
, write
, map_writable
, level
, gfn
, pfn
,
2439 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2445 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2446 kvm_release_pfn_clean(pfn
);
2451 static void mmu_free_roots(struct kvm_vcpu
*vcpu
)
2454 struct kvm_mmu_page
*sp
;
2455 LIST_HEAD(invalid_list
);
2457 if (!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
2459 spin_lock(&vcpu
->kvm
->mmu_lock
);
2460 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
&&
2461 (vcpu
->arch
.mmu
.root_level
== PT64_ROOT_LEVEL
||
2462 vcpu
->arch
.mmu
.direct_map
)) {
2463 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2465 sp
= page_header(root
);
2467 if (!sp
->root_count
&& sp
->role
.invalid
) {
2468 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, &invalid_list
);
2469 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2471 vcpu
->arch
.mmu
.root_hpa
= INVALID_PAGE
;
2472 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2475 for (i
= 0; i
< 4; ++i
) {
2476 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2479 root
&= PT64_BASE_ADDR_MASK
;
2480 sp
= page_header(root
);
2482 if (!sp
->root_count
&& sp
->role
.invalid
)
2483 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
,
2486 vcpu
->arch
.mmu
.pae_root
[i
] = INVALID_PAGE
;
2488 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
2489 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2490 vcpu
->arch
.mmu
.root_hpa
= INVALID_PAGE
;
2493 static int mmu_check_root(struct kvm_vcpu
*vcpu
, gfn_t root_gfn
)
2497 if (!kvm_is_visible_gfn(vcpu
->kvm
, root_gfn
)) {
2498 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
2505 static int mmu_alloc_direct_roots(struct kvm_vcpu
*vcpu
)
2507 struct kvm_mmu_page
*sp
;
2510 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
2511 spin_lock(&vcpu
->kvm
->mmu_lock
);
2512 kvm_mmu_free_some_pages(vcpu
);
2513 sp
= kvm_mmu_get_page(vcpu
, 0, 0, PT64_ROOT_LEVEL
,
2516 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2517 vcpu
->arch
.mmu
.root_hpa
= __pa(sp
->spt
);
2518 } else if (vcpu
->arch
.mmu
.shadow_root_level
== PT32E_ROOT_LEVEL
) {
2519 for (i
= 0; i
< 4; ++i
) {
2520 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2522 ASSERT(!VALID_PAGE(root
));
2523 spin_lock(&vcpu
->kvm
->mmu_lock
);
2524 kvm_mmu_free_some_pages(vcpu
);
2525 sp
= kvm_mmu_get_page(vcpu
, i
<< (30 - PAGE_SHIFT
),
2527 PT32_ROOT_LEVEL
, 1, ACC_ALL
,
2529 root
= __pa(sp
->spt
);
2531 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2532 vcpu
->arch
.mmu
.pae_root
[i
] = root
| PT_PRESENT_MASK
;
2534 vcpu
->arch
.mmu
.root_hpa
= __pa(vcpu
->arch
.mmu
.pae_root
);
2541 static int mmu_alloc_shadow_roots(struct kvm_vcpu
*vcpu
)
2543 struct kvm_mmu_page
*sp
;
2548 root_gfn
= vcpu
->arch
.mmu
.get_cr3(vcpu
) >> PAGE_SHIFT
;
2550 if (mmu_check_root(vcpu
, root_gfn
))
2554 * Do we shadow a long mode page table? If so we need to
2555 * write-protect the guests page table root.
2557 if (vcpu
->arch
.mmu
.root_level
== PT64_ROOT_LEVEL
) {
2558 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2560 ASSERT(!VALID_PAGE(root
));
2562 spin_lock(&vcpu
->kvm
->mmu_lock
);
2563 kvm_mmu_free_some_pages(vcpu
);
2564 sp
= kvm_mmu_get_page(vcpu
, root_gfn
, 0, PT64_ROOT_LEVEL
,
2566 root
= __pa(sp
->spt
);
2568 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2569 vcpu
->arch
.mmu
.root_hpa
= root
;
2574 * We shadow a 32 bit page table. This may be a legacy 2-level
2575 * or a PAE 3-level page table. In either case we need to be aware that
2576 * the shadow page table may be a PAE or a long mode page table.
2578 pm_mask
= PT_PRESENT_MASK
;
2579 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
)
2580 pm_mask
|= PT_ACCESSED_MASK
| PT_WRITABLE_MASK
| PT_USER_MASK
;
2582 for (i
= 0; i
< 4; ++i
) {
2583 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2585 ASSERT(!VALID_PAGE(root
));
2586 if (vcpu
->arch
.mmu
.root_level
== PT32E_ROOT_LEVEL
) {
2587 pdptr
= kvm_pdptr_read_mmu(vcpu
, &vcpu
->arch
.mmu
, i
);
2588 if (!is_present_gpte(pdptr
)) {
2589 vcpu
->arch
.mmu
.pae_root
[i
] = 0;
2592 root_gfn
= pdptr
>> PAGE_SHIFT
;
2593 if (mmu_check_root(vcpu
, root_gfn
))
2596 spin_lock(&vcpu
->kvm
->mmu_lock
);
2597 kvm_mmu_free_some_pages(vcpu
);
2598 sp
= kvm_mmu_get_page(vcpu
, root_gfn
, i
<< 30,
2601 root
= __pa(sp
->spt
);
2603 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2605 vcpu
->arch
.mmu
.pae_root
[i
] = root
| pm_mask
;
2607 vcpu
->arch
.mmu
.root_hpa
= __pa(vcpu
->arch
.mmu
.pae_root
);
2610 * If we shadow a 32 bit page table with a long mode page
2611 * table we enter this path.
2613 if (vcpu
->arch
.mmu
.shadow_root_level
== PT64_ROOT_LEVEL
) {
2614 if (vcpu
->arch
.mmu
.lm_root
== NULL
) {
2616 * The additional page necessary for this is only
2617 * allocated on demand.
2622 lm_root
= (void*)get_zeroed_page(GFP_KERNEL
);
2623 if (lm_root
== NULL
)
2626 lm_root
[0] = __pa(vcpu
->arch
.mmu
.pae_root
) | pm_mask
;
2628 vcpu
->arch
.mmu
.lm_root
= lm_root
;
2631 vcpu
->arch
.mmu
.root_hpa
= __pa(vcpu
->arch
.mmu
.lm_root
);
2637 static int mmu_alloc_roots(struct kvm_vcpu
*vcpu
)
2639 if (vcpu
->arch
.mmu
.direct_map
)
2640 return mmu_alloc_direct_roots(vcpu
);
2642 return mmu_alloc_shadow_roots(vcpu
);
2645 static void mmu_sync_roots(struct kvm_vcpu
*vcpu
)
2648 struct kvm_mmu_page
*sp
;
2650 if (vcpu
->arch
.mmu
.direct_map
)
2653 if (!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
2656 vcpu_clear_mmio_info(vcpu
, ~0ul);
2657 trace_kvm_mmu_audit(vcpu
, AUDIT_PRE_SYNC
);
2658 if (vcpu
->arch
.mmu
.root_level
== PT64_ROOT_LEVEL
) {
2659 hpa_t root
= vcpu
->arch
.mmu
.root_hpa
;
2660 sp
= page_header(root
);
2661 mmu_sync_children(vcpu
, sp
);
2662 trace_kvm_mmu_audit(vcpu
, AUDIT_POST_SYNC
);
2665 for (i
= 0; i
< 4; ++i
) {
2666 hpa_t root
= vcpu
->arch
.mmu
.pae_root
[i
];
2668 if (root
&& VALID_PAGE(root
)) {
2669 root
&= PT64_BASE_ADDR_MASK
;
2670 sp
= page_header(root
);
2671 mmu_sync_children(vcpu
, sp
);
2674 trace_kvm_mmu_audit(vcpu
, AUDIT_POST_SYNC
);
2677 void kvm_mmu_sync_roots(struct kvm_vcpu
*vcpu
)
2679 spin_lock(&vcpu
->kvm
->mmu_lock
);
2680 mmu_sync_roots(vcpu
);
2681 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2684 static gpa_t
nonpaging_gva_to_gpa(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
2685 u32 access
, struct x86_exception
*exception
)
2688 exception
->error_code
= 0;
2692 static gpa_t
nonpaging_gva_to_gpa_nested(struct kvm_vcpu
*vcpu
, gva_t vaddr
,
2694 struct x86_exception
*exception
)
2697 exception
->error_code
= 0;
2698 return vcpu
->arch
.nested_mmu
.translate_gpa(vcpu
, vaddr
, access
);
2701 static int nonpaging_page_fault(struct kvm_vcpu
*vcpu
, gva_t gva
,
2702 u32 error_code
, bool prefault
)
2707 pgprintk("%s: gva %lx error %x\n", __func__
, gva
, error_code
);
2708 r
= mmu_topup_memory_caches(vcpu
);
2713 ASSERT(VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2715 gfn
= gva
>> PAGE_SHIFT
;
2717 return nonpaging_map(vcpu
, gva
& PAGE_MASK
,
2718 error_code
& PFERR_WRITE_MASK
, gfn
, prefault
);
2721 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
, gva_t gva
, gfn_t gfn
)
2723 struct kvm_arch_async_pf arch
;
2725 arch
.token
= (vcpu
->arch
.apf
.id
++ << 12) | vcpu
->vcpu_id
;
2727 arch
.direct_map
= vcpu
->arch
.mmu
.direct_map
;
2728 arch
.cr3
= vcpu
->arch
.mmu
.get_cr3(vcpu
);
2730 return kvm_setup_async_pf(vcpu
, gva
, gfn
, &arch
);
2733 static bool can_do_async_pf(struct kvm_vcpu
*vcpu
)
2735 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
) ||
2736 kvm_event_needs_reinjection(vcpu
)))
2739 return kvm_x86_ops
->interrupt_allowed(vcpu
);
2742 static bool try_async_pf(struct kvm_vcpu
*vcpu
, bool prefault
, gfn_t gfn
,
2743 gva_t gva
, pfn_t
*pfn
, bool write
, bool *writable
)
2747 *pfn
= gfn_to_pfn_async(vcpu
->kvm
, gfn
, &async
, write
, writable
);
2750 return false; /* *pfn has correct page already */
2752 put_page(pfn_to_page(*pfn
));
2754 if (!prefault
&& can_do_async_pf(vcpu
)) {
2755 trace_kvm_try_async_get_page(gva
, gfn
);
2756 if (kvm_find_async_pf_gfn(vcpu
, gfn
)) {
2757 trace_kvm_async_pf_doublefault(gva
, gfn
);
2758 kvm_make_request(KVM_REQ_APF_HALT
, vcpu
);
2760 } else if (kvm_arch_setup_async_pf(vcpu
, gva
, gfn
))
2764 *pfn
= gfn_to_pfn_prot(vcpu
->kvm
, gfn
, write
, writable
);
2769 static int tdp_page_fault(struct kvm_vcpu
*vcpu
, gva_t gpa
, u32 error_code
,
2776 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
2777 unsigned long mmu_seq
;
2778 int write
= error_code
& PFERR_WRITE_MASK
;
2782 ASSERT(VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
2784 r
= mmu_topup_memory_caches(vcpu
);
2788 force_pt_level
= mapping_level_dirty_bitmap(vcpu
, gfn
);
2789 if (likely(!force_pt_level
)) {
2790 level
= mapping_level(vcpu
, gfn
);
2791 gfn
&= ~(KVM_PAGES_PER_HPAGE(level
) - 1);
2793 level
= PT_PAGE_TABLE_LEVEL
;
2795 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
2798 if (try_async_pf(vcpu
, prefault
, gfn
, gpa
, &pfn
, write
, &map_writable
))
2801 if (handle_abnormal_pfn(vcpu
, 0, gfn
, pfn
, ACC_ALL
, &r
))
2804 spin_lock(&vcpu
->kvm
->mmu_lock
);
2805 if (mmu_notifier_retry(vcpu
, mmu_seq
))
2807 kvm_mmu_free_some_pages(vcpu
);
2808 if (likely(!force_pt_level
))
2809 transparent_hugepage_adjust(vcpu
, &gfn
, &pfn
, &level
);
2810 r
= __direct_map(vcpu
, gpa
, write
, map_writable
,
2811 level
, gfn
, pfn
, prefault
);
2812 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2817 spin_unlock(&vcpu
->kvm
->mmu_lock
);
2818 kvm_release_pfn_clean(pfn
);
2822 static void nonpaging_free(struct kvm_vcpu
*vcpu
)
2824 mmu_free_roots(vcpu
);
2827 static int nonpaging_init_context(struct kvm_vcpu
*vcpu
,
2828 struct kvm_mmu
*context
)
2830 context
->new_cr3
= nonpaging_new_cr3
;
2831 context
->page_fault
= nonpaging_page_fault
;
2832 context
->gva_to_gpa
= nonpaging_gva_to_gpa
;
2833 context
->free
= nonpaging_free
;
2834 context
->sync_page
= nonpaging_sync_page
;
2835 context
->invlpg
= nonpaging_invlpg
;
2836 context
->update_pte
= nonpaging_update_pte
;
2837 context
->root_level
= 0;
2838 context
->shadow_root_level
= PT32E_ROOT_LEVEL
;
2839 context
->root_hpa
= INVALID_PAGE
;
2840 context
->direct_map
= true;
2841 context
->nx
= false;
2845 void kvm_mmu_flush_tlb(struct kvm_vcpu
*vcpu
)
2847 ++vcpu
->stat
.tlb_flush
;
2848 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2851 static void paging_new_cr3(struct kvm_vcpu
*vcpu
)
2853 pgprintk("%s: cr3 %lx\n", __func__
, kvm_read_cr3(vcpu
));
2854 mmu_free_roots(vcpu
);
2857 static unsigned long get_cr3(struct kvm_vcpu
*vcpu
)
2859 return kvm_read_cr3(vcpu
);
2862 static void inject_page_fault(struct kvm_vcpu
*vcpu
,
2863 struct x86_exception
*fault
)
2865 vcpu
->arch
.mmu
.inject_page_fault(vcpu
, fault
);
2868 static void paging_free(struct kvm_vcpu
*vcpu
)
2870 nonpaging_free(vcpu
);
2873 static bool is_rsvd_bits_set(struct kvm_mmu
*mmu
, u64 gpte
, int level
)
2877 bit7
= (gpte
>> 7) & 1;
2878 return (gpte
& mmu
->rsvd_bits_mask
[bit7
][level
-1]) != 0;
2882 #include "paging_tmpl.h"
2886 #include "paging_tmpl.h"
2889 static void reset_rsvds_bits_mask(struct kvm_vcpu
*vcpu
,
2890 struct kvm_mmu
*context
,
2893 int maxphyaddr
= cpuid_maxphyaddr(vcpu
);
2894 u64 exb_bit_rsvd
= 0;
2897 exb_bit_rsvd
= rsvd_bits(63, 63);
2899 case PT32_ROOT_LEVEL
:
2900 /* no rsvd bits for 2 level 4K page table entries */
2901 context
->rsvd_bits_mask
[0][1] = 0;
2902 context
->rsvd_bits_mask
[0][0] = 0;
2903 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2905 if (!is_pse(vcpu
)) {
2906 context
->rsvd_bits_mask
[1][1] = 0;
2910 if (is_cpuid_PSE36())
2911 /* 36bits PSE 4MB page */
2912 context
->rsvd_bits_mask
[1][1] = rsvd_bits(17, 21);
2914 /* 32 bits PSE 4MB page */
2915 context
->rsvd_bits_mask
[1][1] = rsvd_bits(13, 21);
2917 case PT32E_ROOT_LEVEL
:
2918 context
->rsvd_bits_mask
[0][2] =
2919 rsvd_bits(maxphyaddr
, 63) |
2920 rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
2921 context
->rsvd_bits_mask
[0][1] = exb_bit_rsvd
|
2922 rsvd_bits(maxphyaddr
, 62); /* PDE */
2923 context
->rsvd_bits_mask
[0][0] = exb_bit_rsvd
|
2924 rsvd_bits(maxphyaddr
, 62); /* PTE */
2925 context
->rsvd_bits_mask
[1][1] = exb_bit_rsvd
|
2926 rsvd_bits(maxphyaddr
, 62) |
2927 rsvd_bits(13, 20); /* large page */
2928 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2930 case PT64_ROOT_LEVEL
:
2931 context
->rsvd_bits_mask
[0][3] = exb_bit_rsvd
|
2932 rsvd_bits(maxphyaddr
, 51) | rsvd_bits(7, 8);
2933 context
->rsvd_bits_mask
[0][2] = exb_bit_rsvd
|
2934 rsvd_bits(maxphyaddr
, 51) | rsvd_bits(7, 8);
2935 context
->rsvd_bits_mask
[0][1] = exb_bit_rsvd
|
2936 rsvd_bits(maxphyaddr
, 51);
2937 context
->rsvd_bits_mask
[0][0] = exb_bit_rsvd
|
2938 rsvd_bits(maxphyaddr
, 51);
2939 context
->rsvd_bits_mask
[1][3] = context
->rsvd_bits_mask
[0][3];
2940 context
->rsvd_bits_mask
[1][2] = exb_bit_rsvd
|
2941 rsvd_bits(maxphyaddr
, 51) |
2943 context
->rsvd_bits_mask
[1][1] = exb_bit_rsvd
|
2944 rsvd_bits(maxphyaddr
, 51) |
2945 rsvd_bits(13, 20); /* large page */
2946 context
->rsvd_bits_mask
[1][0] = context
->rsvd_bits_mask
[0][0];
2951 static int paging64_init_context_common(struct kvm_vcpu
*vcpu
,
2952 struct kvm_mmu
*context
,
2955 context
->nx
= is_nx(vcpu
);
2957 reset_rsvds_bits_mask(vcpu
, context
, level
);
2959 ASSERT(is_pae(vcpu
));
2960 context
->new_cr3
= paging_new_cr3
;
2961 context
->page_fault
= paging64_page_fault
;
2962 context
->gva_to_gpa
= paging64_gva_to_gpa
;
2963 context
->sync_page
= paging64_sync_page
;
2964 context
->invlpg
= paging64_invlpg
;
2965 context
->update_pte
= paging64_update_pte
;
2966 context
->free
= paging_free
;
2967 context
->root_level
= level
;
2968 context
->shadow_root_level
= level
;
2969 context
->root_hpa
= INVALID_PAGE
;
2970 context
->direct_map
= false;
2974 static int paging64_init_context(struct kvm_vcpu
*vcpu
,
2975 struct kvm_mmu
*context
)
2977 return paging64_init_context_common(vcpu
, context
, PT64_ROOT_LEVEL
);
2980 static int paging32_init_context(struct kvm_vcpu
*vcpu
,
2981 struct kvm_mmu
*context
)
2983 context
->nx
= false;
2985 reset_rsvds_bits_mask(vcpu
, context
, PT32_ROOT_LEVEL
);
2987 context
->new_cr3
= paging_new_cr3
;
2988 context
->page_fault
= paging32_page_fault
;
2989 context
->gva_to_gpa
= paging32_gva_to_gpa
;
2990 context
->free
= paging_free
;
2991 context
->sync_page
= paging32_sync_page
;
2992 context
->invlpg
= paging32_invlpg
;
2993 context
->update_pte
= paging32_update_pte
;
2994 context
->root_level
= PT32_ROOT_LEVEL
;
2995 context
->shadow_root_level
= PT32E_ROOT_LEVEL
;
2996 context
->root_hpa
= INVALID_PAGE
;
2997 context
->direct_map
= false;
3001 static int paging32E_init_context(struct kvm_vcpu
*vcpu
,
3002 struct kvm_mmu
*context
)
3004 return paging64_init_context_common(vcpu
, context
, PT32E_ROOT_LEVEL
);
3007 static int init_kvm_tdp_mmu(struct kvm_vcpu
*vcpu
)
3009 struct kvm_mmu
*context
= vcpu
->arch
.walk_mmu
;
3011 context
->base_role
.word
= 0;
3012 context
->new_cr3
= nonpaging_new_cr3
;
3013 context
->page_fault
= tdp_page_fault
;
3014 context
->free
= nonpaging_free
;
3015 context
->sync_page
= nonpaging_sync_page
;
3016 context
->invlpg
= nonpaging_invlpg
;
3017 context
->update_pte
= nonpaging_update_pte
;
3018 context
->shadow_root_level
= kvm_x86_ops
->get_tdp_level();
3019 context
->root_hpa
= INVALID_PAGE
;
3020 context
->direct_map
= true;
3021 context
->set_cr3
= kvm_x86_ops
->set_tdp_cr3
;
3022 context
->get_cr3
= get_cr3
;
3023 context
->inject_page_fault
= kvm_inject_page_fault
;
3024 context
->nx
= is_nx(vcpu
);
3026 if (!is_paging(vcpu
)) {
3027 context
->nx
= false;
3028 context
->gva_to_gpa
= nonpaging_gva_to_gpa
;
3029 context
->root_level
= 0;
3030 } else if (is_long_mode(vcpu
)) {
3031 context
->nx
= is_nx(vcpu
);
3032 reset_rsvds_bits_mask(vcpu
, context
, PT64_ROOT_LEVEL
);
3033 context
->gva_to_gpa
= paging64_gva_to_gpa
;
3034 context
->root_level
= PT64_ROOT_LEVEL
;
3035 } else if (is_pae(vcpu
)) {
3036 context
->nx
= is_nx(vcpu
);
3037 reset_rsvds_bits_mask(vcpu
, context
, PT32E_ROOT_LEVEL
);
3038 context
->gva_to_gpa
= paging64_gva_to_gpa
;
3039 context
->root_level
= PT32E_ROOT_LEVEL
;
3041 context
->nx
= false;
3042 reset_rsvds_bits_mask(vcpu
, context
, PT32_ROOT_LEVEL
);
3043 context
->gva_to_gpa
= paging32_gva_to_gpa
;
3044 context
->root_level
= PT32_ROOT_LEVEL
;
3050 int kvm_init_shadow_mmu(struct kvm_vcpu
*vcpu
, struct kvm_mmu
*context
)
3053 bool smep
= kvm_read_cr4_bits(vcpu
, X86_CR4_SMEP
);
3055 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
3057 if (!is_paging(vcpu
))
3058 r
= nonpaging_init_context(vcpu
, context
);
3059 else if (is_long_mode(vcpu
))
3060 r
= paging64_init_context(vcpu
, context
);
3061 else if (is_pae(vcpu
))
3062 r
= paging32E_init_context(vcpu
, context
);
3064 r
= paging32_init_context(vcpu
, context
);
3066 vcpu
->arch
.mmu
.base_role
.cr4_pae
= !!is_pae(vcpu
);
3067 vcpu
->arch
.mmu
.base_role
.cr0_wp
= is_write_protection(vcpu
);
3068 vcpu
->arch
.mmu
.base_role
.smep_andnot_wp
3069 = smep
&& !is_write_protection(vcpu
);
3073 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu
);
3075 static int init_kvm_softmmu(struct kvm_vcpu
*vcpu
)
3077 int r
= kvm_init_shadow_mmu(vcpu
, vcpu
->arch
.walk_mmu
);
3079 vcpu
->arch
.walk_mmu
->set_cr3
= kvm_x86_ops
->set_cr3
;
3080 vcpu
->arch
.walk_mmu
->get_cr3
= get_cr3
;
3081 vcpu
->arch
.walk_mmu
->inject_page_fault
= kvm_inject_page_fault
;
3086 static int init_kvm_nested_mmu(struct kvm_vcpu
*vcpu
)
3088 struct kvm_mmu
*g_context
= &vcpu
->arch
.nested_mmu
;
3090 g_context
->get_cr3
= get_cr3
;
3091 g_context
->inject_page_fault
= kvm_inject_page_fault
;
3094 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
3095 * translation of l2_gpa to l1_gpa addresses is done using the
3096 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
3097 * functions between mmu and nested_mmu are swapped.
3099 if (!is_paging(vcpu
)) {
3100 g_context
->nx
= false;
3101 g_context
->root_level
= 0;
3102 g_context
->gva_to_gpa
= nonpaging_gva_to_gpa_nested
;
3103 } else if (is_long_mode(vcpu
)) {
3104 g_context
->nx
= is_nx(vcpu
);
3105 reset_rsvds_bits_mask(vcpu
, g_context
, PT64_ROOT_LEVEL
);
3106 g_context
->root_level
= PT64_ROOT_LEVEL
;
3107 g_context
->gva_to_gpa
= paging64_gva_to_gpa_nested
;
3108 } else if (is_pae(vcpu
)) {
3109 g_context
->nx
= is_nx(vcpu
);
3110 reset_rsvds_bits_mask(vcpu
, g_context
, PT32E_ROOT_LEVEL
);
3111 g_context
->root_level
= PT32E_ROOT_LEVEL
;
3112 g_context
->gva_to_gpa
= paging64_gva_to_gpa_nested
;
3114 g_context
->nx
= false;
3115 reset_rsvds_bits_mask(vcpu
, g_context
, PT32_ROOT_LEVEL
);
3116 g_context
->root_level
= PT32_ROOT_LEVEL
;
3117 g_context
->gva_to_gpa
= paging32_gva_to_gpa_nested
;
3123 static int init_kvm_mmu(struct kvm_vcpu
*vcpu
)
3125 if (mmu_is_nested(vcpu
))
3126 return init_kvm_nested_mmu(vcpu
);
3127 else if (tdp_enabled
)
3128 return init_kvm_tdp_mmu(vcpu
);
3130 return init_kvm_softmmu(vcpu
);
3133 static void destroy_kvm_mmu(struct kvm_vcpu
*vcpu
)
3136 if (VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
))
3137 /* mmu.free() should set root_hpa = INVALID_PAGE */
3138 vcpu
->arch
.mmu
.free(vcpu
);
3141 int kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
)
3143 destroy_kvm_mmu(vcpu
);
3144 return init_kvm_mmu(vcpu
);
3146 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context
);
3148 int kvm_mmu_load(struct kvm_vcpu
*vcpu
)
3152 r
= mmu_topup_memory_caches(vcpu
);
3155 r
= mmu_alloc_roots(vcpu
);
3156 spin_lock(&vcpu
->kvm
->mmu_lock
);
3157 mmu_sync_roots(vcpu
);
3158 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3161 /* set_cr3() should ensure TLB has been flushed */
3162 vcpu
->arch
.mmu
.set_cr3(vcpu
, vcpu
->arch
.mmu
.root_hpa
);
3166 EXPORT_SYMBOL_GPL(kvm_mmu_load
);
3168 void kvm_mmu_unload(struct kvm_vcpu
*vcpu
)
3170 mmu_free_roots(vcpu
);
3172 EXPORT_SYMBOL_GPL(kvm_mmu_unload
);
3174 static void mmu_pte_write_new_pte(struct kvm_vcpu
*vcpu
,
3175 struct kvm_mmu_page
*sp
, u64
*spte
,
3178 if (sp
->role
.level
!= PT_PAGE_TABLE_LEVEL
) {
3179 ++vcpu
->kvm
->stat
.mmu_pde_zapped
;
3183 ++vcpu
->kvm
->stat
.mmu_pte_updated
;
3184 vcpu
->arch
.mmu
.update_pte(vcpu
, sp
, spte
, new);
3187 static bool need_remote_flush(u64 old
, u64
new)
3189 if (!is_shadow_present_pte(old
))
3191 if (!is_shadow_present_pte(new))
3193 if ((old
^ new) & PT64_BASE_ADDR_MASK
)
3195 old
^= PT64_NX_MASK
;
3196 new ^= PT64_NX_MASK
;
3197 return (old
& ~new & PT64_PERM_MASK
) != 0;
3200 static void mmu_pte_write_flush_tlb(struct kvm_vcpu
*vcpu
, bool zap_page
,
3201 bool remote_flush
, bool local_flush
)
3207 kvm_flush_remote_tlbs(vcpu
->kvm
);
3208 else if (local_flush
)
3209 kvm_mmu_flush_tlb(vcpu
);
3212 static bool last_updated_pte_accessed(struct kvm_vcpu
*vcpu
)
3214 u64
*spte
= vcpu
->arch
.last_pte_updated
;
3216 return !!(spte
&& (*spte
& shadow_accessed_mask
));
3219 static void kvm_mmu_access_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
3221 u64
*spte
= vcpu
->arch
.last_pte_updated
;
3224 && vcpu
->arch
.last_pte_gfn
== gfn
3225 && shadow_accessed_mask
3226 && !(*spte
& shadow_accessed_mask
)
3227 && is_shadow_present_pte(*spte
))
3228 set_bit(PT_ACCESSED_SHIFT
, (unsigned long *)spte
);
3231 void kvm_mmu_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
3232 const u8
*new, int bytes
,
3233 bool guest_initiated
)
3235 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
3236 union kvm_mmu_page_role mask
= { .word
= 0 };
3237 struct kvm_mmu_page
*sp
;
3238 struct hlist_node
*node
;
3239 LIST_HEAD(invalid_list
);
3240 u64 entry
, gentry
, *spte
;
3241 unsigned pte_size
, page_offset
, misaligned
, quadrant
, offset
;
3242 int level
, npte
, invlpg_counter
, r
, flooded
= 0;
3243 bool remote_flush
, local_flush
, zap_page
;
3246 * If we don't have indirect shadow pages, it means no page is
3247 * write-protected, so we can exit simply.
3249 if (!ACCESS_ONCE(vcpu
->kvm
->arch
.indirect_shadow_pages
))
3252 zap_page
= remote_flush
= local_flush
= false;
3253 offset
= offset_in_page(gpa
);
3255 pgprintk("%s: gpa %llx bytes %d\n", __func__
, gpa
, bytes
);
3257 invlpg_counter
= atomic_read(&vcpu
->kvm
->arch
.invlpg_counter
);
3260 * Assume that the pte write on a page table of the same type
3261 * as the current vcpu paging mode since we update the sptes only
3262 * when they have the same mode.
3264 if ((is_pae(vcpu
) && bytes
== 4) || !new) {
3265 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3270 r
= kvm_read_guest(vcpu
->kvm
, gpa
, &gentry
, min(bytes
, 8));
3273 new = (const u8
*)&gentry
;
3278 gentry
= *(const u32
*)new;
3281 gentry
= *(const u64
*)new;
3288 spin_lock(&vcpu
->kvm
->mmu_lock
);
3289 if (atomic_read(&vcpu
->kvm
->arch
.invlpg_counter
) != invlpg_counter
)
3291 kvm_mmu_free_some_pages(vcpu
);
3292 ++vcpu
->kvm
->stat
.mmu_pte_write
;
3293 trace_kvm_mmu_audit(vcpu
, AUDIT_PRE_PTE_WRITE
);
3294 if (guest_initiated
) {
3295 kvm_mmu_access_page(vcpu
, gfn
);
3296 if (gfn
== vcpu
->arch
.last_pt_write_gfn
3297 && !last_updated_pte_accessed(vcpu
)) {
3298 ++vcpu
->arch
.last_pt_write_count
;
3299 if (vcpu
->arch
.last_pt_write_count
>= 3)
3302 vcpu
->arch
.last_pt_write_gfn
= gfn
;
3303 vcpu
->arch
.last_pt_write_count
= 1;
3304 vcpu
->arch
.last_pte_updated
= NULL
;
3308 mask
.cr0_wp
= mask
.cr4_pae
= mask
.nxe
= 1;
3309 for_each_gfn_indirect_valid_sp(vcpu
->kvm
, sp
, gfn
, node
) {
3310 pte_size
= sp
->role
.cr4_pae
? 8 : 4;
3311 misaligned
= (offset
^ (offset
+ bytes
- 1)) & ~(pte_size
- 1);
3312 misaligned
|= bytes
< 4;
3313 if (misaligned
|| flooded
) {
3315 * Misaligned accesses are too much trouble to fix
3316 * up; also, they usually indicate a page is not used
3319 * If we're seeing too many writes to a page,
3320 * it may no longer be a page table, or we may be
3321 * forking, in which case it is better to unmap the
3324 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
3325 gpa
, bytes
, sp
->role
.word
);
3326 zap_page
|= !!kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
,
3328 ++vcpu
->kvm
->stat
.mmu_flooded
;
3331 page_offset
= offset
;
3332 level
= sp
->role
.level
;
3334 if (!sp
->role
.cr4_pae
) {
3335 page_offset
<<= 1; /* 32->64 */
3337 * A 32-bit pde maps 4MB while the shadow pdes map
3338 * only 2MB. So we need to double the offset again
3339 * and zap two pdes instead of one.
3341 if (level
== PT32_ROOT_LEVEL
) {
3342 page_offset
&= ~7; /* kill rounding error */
3346 quadrant
= page_offset
>> PAGE_SHIFT
;
3347 page_offset
&= ~PAGE_MASK
;
3348 if (quadrant
!= sp
->role
.quadrant
)
3352 spte
= &sp
->spt
[page_offset
/ sizeof(*spte
)];
3355 mmu_page_zap_pte(vcpu
->kvm
, sp
, spte
);
3357 !((sp
->role
.word
^ vcpu
->arch
.mmu
.base_role
.word
)
3359 mmu_pte_write_new_pte(vcpu
, sp
, spte
, &gentry
);
3360 if (!remote_flush
&& need_remote_flush(entry
, *spte
))
3361 remote_flush
= true;
3365 mmu_pte_write_flush_tlb(vcpu
, zap_page
, remote_flush
, local_flush
);
3366 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
3367 trace_kvm_mmu_audit(vcpu
, AUDIT_POST_PTE_WRITE
);
3368 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3371 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu
, gva_t gva
)
3376 if (vcpu
->arch
.mmu
.direct_map
)
3379 gpa
= kvm_mmu_gva_to_gpa_read(vcpu
, gva
, NULL
);
3381 spin_lock(&vcpu
->kvm
->mmu_lock
);
3382 r
= kvm_mmu_unprotect_page(vcpu
->kvm
, gpa
>> PAGE_SHIFT
);
3383 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3386 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt
);
3388 void __kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
)
3390 LIST_HEAD(invalid_list
);
3392 while (kvm_mmu_available_pages(vcpu
->kvm
) < KVM_REFILL_PAGES
&&
3393 !list_empty(&vcpu
->kvm
->arch
.active_mmu_pages
)) {
3394 struct kvm_mmu_page
*sp
;
3396 sp
= container_of(vcpu
->kvm
->arch
.active_mmu_pages
.prev
,
3397 struct kvm_mmu_page
, link
);
3398 kvm_mmu_prepare_zap_page(vcpu
->kvm
, sp
, &invalid_list
);
3399 ++vcpu
->kvm
->stat
.mmu_recycled
;
3401 kvm_mmu_commit_zap_page(vcpu
->kvm
, &invalid_list
);
3404 int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gva_t cr2
, u32 error_code
,
3405 void *insn
, int insn_len
)
3408 enum emulation_result er
;
3410 r
= vcpu
->arch
.mmu
.page_fault(vcpu
, cr2
, error_code
, false);
3419 r
= mmu_topup_memory_caches(vcpu
);
3423 er
= x86_emulate_instruction(vcpu
, cr2
, 0, insn
, insn_len
);
3428 case EMULATE_DO_MMIO
:
3429 ++vcpu
->stat
.mmio_exits
;
3439 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault
);
3441 void kvm_mmu_invlpg(struct kvm_vcpu
*vcpu
, gva_t gva
)
3443 vcpu
->arch
.mmu
.invlpg(vcpu
, gva
);
3444 kvm_mmu_flush_tlb(vcpu
);
3445 ++vcpu
->stat
.invlpg
;
3447 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg
);
3449 void kvm_enable_tdp(void)
3453 EXPORT_SYMBOL_GPL(kvm_enable_tdp
);
3455 void kvm_disable_tdp(void)
3457 tdp_enabled
= false;
3459 EXPORT_SYMBOL_GPL(kvm_disable_tdp
);
3461 static void free_mmu_pages(struct kvm_vcpu
*vcpu
)
3463 free_page((unsigned long)vcpu
->arch
.mmu
.pae_root
);
3464 if (vcpu
->arch
.mmu
.lm_root
!= NULL
)
3465 free_page((unsigned long)vcpu
->arch
.mmu
.lm_root
);
3468 static int alloc_mmu_pages(struct kvm_vcpu
*vcpu
)
3476 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3477 * Therefore we need to allocate shadow page tables in the first
3478 * 4GB of memory, which happens to fit the DMA32 zone.
3480 page
= alloc_page(GFP_KERNEL
| __GFP_DMA32
);
3484 vcpu
->arch
.mmu
.pae_root
= page_address(page
);
3485 for (i
= 0; i
< 4; ++i
)
3486 vcpu
->arch
.mmu
.pae_root
[i
] = INVALID_PAGE
;
3491 int kvm_mmu_create(struct kvm_vcpu
*vcpu
)
3494 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
3496 return alloc_mmu_pages(vcpu
);
3499 int kvm_mmu_setup(struct kvm_vcpu
*vcpu
)
3502 ASSERT(!VALID_PAGE(vcpu
->arch
.mmu
.root_hpa
));
3504 return init_kvm_mmu(vcpu
);
3507 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
, int slot
)
3509 struct kvm_mmu_page
*sp
;
3511 list_for_each_entry(sp
, &kvm
->arch
.active_mmu_pages
, link
) {
3515 if (!test_bit(slot
, sp
->slot_bitmap
))
3519 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
) {
3520 if (!is_shadow_present_pte(pt
[i
]) ||
3521 !is_last_spte(pt
[i
], sp
->role
.level
))
3524 if (is_large_pte(pt
[i
])) {
3525 drop_spte(kvm
, &pt
[i
]);
3531 if (is_writable_pte(pt
[i
]))
3532 mmu_spte_update(&pt
[i
],
3533 pt
[i
] & ~PT_WRITABLE_MASK
);
3536 kvm_flush_remote_tlbs(kvm
);
3539 void kvm_mmu_zap_all(struct kvm
*kvm
)
3541 struct kvm_mmu_page
*sp
, *node
;
3542 LIST_HEAD(invalid_list
);
3544 spin_lock(&kvm
->mmu_lock
);
3546 list_for_each_entry_safe(sp
, node
, &kvm
->arch
.active_mmu_pages
, link
)
3547 if (kvm_mmu_prepare_zap_page(kvm
, sp
, &invalid_list
))
3550 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
3551 spin_unlock(&kvm
->mmu_lock
);
3554 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm
*kvm
,
3555 struct list_head
*invalid_list
)
3557 struct kvm_mmu_page
*page
;
3559 page
= container_of(kvm
->arch
.active_mmu_pages
.prev
,
3560 struct kvm_mmu_page
, link
);
3561 return kvm_mmu_prepare_zap_page(kvm
, page
, invalid_list
);
3564 static int mmu_shrink(struct shrinker
*shrink
, struct shrink_control
*sc
)
3567 struct kvm
*kvm_freed
= NULL
;
3568 int nr_to_scan
= sc
->nr_to_scan
;
3570 if (nr_to_scan
== 0)
3573 raw_spin_lock(&kvm_lock
);
3575 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
3576 int idx
, freed_pages
;
3577 LIST_HEAD(invalid_list
);
3579 idx
= srcu_read_lock(&kvm
->srcu
);
3580 spin_lock(&kvm
->mmu_lock
);
3581 if (!kvm_freed
&& nr_to_scan
> 0 &&
3582 kvm
->arch
.n_used_mmu_pages
> 0) {
3583 freed_pages
= kvm_mmu_remove_some_alloc_mmu_pages(kvm
,
3589 kvm_mmu_commit_zap_page(kvm
, &invalid_list
);
3590 spin_unlock(&kvm
->mmu_lock
);
3591 srcu_read_unlock(&kvm
->srcu
, idx
);
3594 list_move_tail(&kvm_freed
->vm_list
, &vm_list
);
3596 raw_spin_unlock(&kvm_lock
);
3599 return percpu_counter_read_positive(&kvm_total_used_mmu_pages
);
3602 static struct shrinker mmu_shrinker
= {
3603 .shrink
= mmu_shrink
,
3604 .seeks
= DEFAULT_SEEKS
* 10,
3607 static void mmu_destroy_caches(void)
3609 if (pte_list_desc_cache
)
3610 kmem_cache_destroy(pte_list_desc_cache
);
3611 if (mmu_page_header_cache
)
3612 kmem_cache_destroy(mmu_page_header_cache
);
3615 int kvm_mmu_module_init(void)
3617 pte_list_desc_cache
= kmem_cache_create("pte_list_desc",
3618 sizeof(struct pte_list_desc
),
3620 if (!pte_list_desc_cache
)
3623 mmu_page_header_cache
= kmem_cache_create("kvm_mmu_page_header",
3624 sizeof(struct kvm_mmu_page
),
3626 if (!mmu_page_header_cache
)
3629 if (percpu_counter_init(&kvm_total_used_mmu_pages
, 0))
3632 register_shrinker(&mmu_shrinker
);
3637 mmu_destroy_caches();
3642 * Caculate mmu pages needed for kvm.
3644 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm
*kvm
)
3647 unsigned int nr_mmu_pages
;
3648 unsigned int nr_pages
= 0;
3649 struct kvm_memslots
*slots
;
3651 slots
= kvm_memslots(kvm
);
3653 for (i
= 0; i
< slots
->nmemslots
; i
++)
3654 nr_pages
+= slots
->memslots
[i
].npages
;
3656 nr_mmu_pages
= nr_pages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
3657 nr_mmu_pages
= max(nr_mmu_pages
,
3658 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES
);
3660 return nr_mmu_pages
;
3663 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer
*buffer
,
3666 if (len
> buffer
->len
)
3671 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer
*buffer
,
3676 ret
= pv_mmu_peek_buffer(buffer
, len
);
3681 buffer
->processed
+= len
;
3685 static int kvm_pv_mmu_write(struct kvm_vcpu
*vcpu
,
3686 gpa_t addr
, gpa_t value
)
3691 if (!is_long_mode(vcpu
) && !is_pae(vcpu
))
3694 r
= mmu_topup_memory_caches(vcpu
);
3698 if (!emulator_write_phys(vcpu
, addr
, &value
, bytes
))
3704 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu
*vcpu
)
3706 (void)kvm_set_cr3(vcpu
, kvm_read_cr3(vcpu
));
3710 static int kvm_pv_mmu_release_pt(struct kvm_vcpu
*vcpu
, gpa_t addr
)
3712 spin_lock(&vcpu
->kvm
->mmu_lock
);
3713 mmu_unshadow(vcpu
->kvm
, addr
>> PAGE_SHIFT
);
3714 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3718 static int kvm_pv_mmu_op_one(struct kvm_vcpu
*vcpu
,
3719 struct kvm_pv_mmu_op_buffer
*buffer
)
3721 struct kvm_mmu_op_header
*header
;
3723 header
= pv_mmu_peek_buffer(buffer
, sizeof *header
);
3726 switch (header
->op
) {
3727 case KVM_MMU_OP_WRITE_PTE
: {
3728 struct kvm_mmu_op_write_pte
*wpte
;
3730 wpte
= pv_mmu_read_buffer(buffer
, sizeof *wpte
);
3733 return kvm_pv_mmu_write(vcpu
, wpte
->pte_phys
,
3736 case KVM_MMU_OP_FLUSH_TLB
: {
3737 struct kvm_mmu_op_flush_tlb
*ftlb
;
3739 ftlb
= pv_mmu_read_buffer(buffer
, sizeof *ftlb
);
3742 return kvm_pv_mmu_flush_tlb(vcpu
);
3744 case KVM_MMU_OP_RELEASE_PT
: {
3745 struct kvm_mmu_op_release_pt
*rpt
;
3747 rpt
= pv_mmu_read_buffer(buffer
, sizeof *rpt
);
3750 return kvm_pv_mmu_release_pt(vcpu
, rpt
->pt_phys
);
3756 int kvm_pv_mmu_op(struct kvm_vcpu
*vcpu
, unsigned long bytes
,
3757 gpa_t addr
, unsigned long *ret
)
3760 struct kvm_pv_mmu_op_buffer
*buffer
= &vcpu
->arch
.mmu_op_buffer
;
3762 buffer
->ptr
= buffer
->buf
;
3763 buffer
->len
= min_t(unsigned long, bytes
, sizeof buffer
->buf
);
3764 buffer
->processed
= 0;
3766 r
= kvm_read_guest(vcpu
->kvm
, addr
, buffer
->buf
, buffer
->len
);
3770 while (buffer
->len
) {
3771 r
= kvm_pv_mmu_op_one(vcpu
, buffer
);
3780 *ret
= buffer
->processed
;
3784 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu
*vcpu
, u64 addr
, u64 sptes
[4])
3786 struct kvm_shadow_walk_iterator iterator
;
3789 spin_lock(&vcpu
->kvm
->mmu_lock
);
3790 for_each_shadow_entry(vcpu
, addr
, iterator
) {
3791 sptes
[iterator
.level
-1] = *iterator
.sptep
;
3793 if (!is_shadow_present_pte(*iterator
.sptep
))
3796 spin_unlock(&vcpu
->kvm
->mmu_lock
);
3800 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy
);
3802 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
)
3806 destroy_kvm_mmu(vcpu
);
3807 free_mmu_pages(vcpu
);
3808 mmu_free_memory_caches(vcpu
);
3811 #ifdef CONFIG_KVM_MMU_AUDIT
3812 #include "mmu_audit.c"
3814 static void mmu_audit_disable(void) { }
3817 void kvm_mmu_module_exit(void)
3819 mmu_destroy_caches();
3820 percpu_counter_destroy(&kvm_total_used_mmu_pages
);
3821 unregister_shrinker(&mmu_shrinker
);
3822 mmu_audit_disable();