KVM: MMU: speed up mmu_unsync_walk
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
1d737c8a 21#include "mmu.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
448353ca 29#include <linux/swap.h>
05da4558 30#include <linux/hugetlb.h>
2f333bcb 31#include <linux/compiler.h>
6aa8b732 32
e495606d
AK
33#include <asm/page.h>
34#include <asm/cmpxchg.h>
4e542370 35#include <asm/io.h>
6aa8b732 36
18552672
JR
37/*
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
43 */
2f333bcb 44bool tdp_enabled = false;
18552672 45
37a7d8b0
AK
46#undef MMU_DEBUG
47
48#undef AUDIT
49
50#ifdef AUDIT
51static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52#else
53static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54#endif
55
56#ifdef MMU_DEBUG
57
58#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61#else
62
63#define pgprintk(x...) do { } while (0)
64#define rmap_printk(x...) do { } while (0)
65
66#endif
67
68#if defined(MMU_DEBUG) || defined(AUDIT)
6ada8cca
AK
69static int dbg = 0;
70module_param(dbg, bool, 0644);
37a7d8b0 71#endif
6aa8b732 72
d6c69ee9
YD
73#ifndef MMU_DEBUG
74#define ASSERT(x) do { } while (0)
75#else
6aa8b732
AK
76#define ASSERT(x) \
77 if (!(x)) { \
78 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
79 __FILE__, __LINE__, #x); \
80 }
d6c69ee9 81#endif
6aa8b732 82
6aa8b732
AK
83#define PT_FIRST_AVAIL_BITS_SHIFT 9
84#define PT64_SECOND_AVAIL_BITS_SHIFT 52
85
6aa8b732
AK
86#define VALID_PAGE(x) ((x) != INVALID_PAGE)
87
88#define PT64_LEVEL_BITS 9
89
90#define PT64_LEVEL_SHIFT(level) \
d77c26fc 91 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
92
93#define PT64_LEVEL_MASK(level) \
94 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
95
96#define PT64_INDEX(address, level)\
97 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
98
99
100#define PT32_LEVEL_BITS 10
101
102#define PT32_LEVEL_SHIFT(level) \
d77c26fc 103 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
104
105#define PT32_LEVEL_MASK(level) \
106 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
107
108#define PT32_INDEX(address, level)\
109 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
110
111
27aba766 112#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
113#define PT64_DIR_BASE_ADDR_MASK \
114 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115
116#define PT32_BASE_ADDR_MASK PAGE_MASK
117#define PT32_DIR_BASE_ADDR_MASK \
118 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
119
79539cec
AK
120#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
121 | PT64_NX_MASK)
6aa8b732
AK
122
123#define PFERR_PRESENT_MASK (1U << 0)
124#define PFERR_WRITE_MASK (1U << 1)
125#define PFERR_USER_MASK (1U << 2)
73b1087e 126#define PFERR_FETCH_MASK (1U << 4)
6aa8b732 127
6aa8b732
AK
128#define PT_DIRECTORY_LEVEL 2
129#define PT_PAGE_TABLE_LEVEL 1
130
cd4a4e53
AK
131#define RMAP_EXT 4
132
fe135d2c
AK
133#define ACC_EXEC_MASK 1
134#define ACC_WRITE_MASK PT_WRITABLE_MASK
135#define ACC_USER_MASK PT_USER_MASK
136#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
137
135f8c2b
AK
138#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
139
cd4a4e53
AK
140struct kvm_rmap_desc {
141 u64 *shadow_ptes[RMAP_EXT];
142 struct kvm_rmap_desc *more;
143};
144
3d000db5
AK
145struct kvm_shadow_walk {
146 int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
d40a1ee4 147 u64 addr, u64 *spte, int level);
3d000db5
AK
148};
149
4731d4c7
MT
150struct kvm_unsync_walk {
151 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
152};
153
ad8cfbe3
MT
154typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
155
b5a33a75
AK
156static struct kmem_cache *pte_chain_cache;
157static struct kmem_cache *rmap_desc_cache;
d3d25b04 158static struct kmem_cache *mmu_page_header_cache;
b5a33a75 159
c7addb90
AK
160static u64 __read_mostly shadow_trap_nonpresent_pte;
161static u64 __read_mostly shadow_notrap_nonpresent_pte;
7b52345e
SY
162static u64 __read_mostly shadow_base_present_pte;
163static u64 __read_mostly shadow_nx_mask;
164static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
165static u64 __read_mostly shadow_user_mask;
166static u64 __read_mostly shadow_accessed_mask;
167static u64 __read_mostly shadow_dirty_mask;
c7addb90
AK
168
169void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
170{
171 shadow_trap_nonpresent_pte = trap_pte;
172 shadow_notrap_nonpresent_pte = notrap_pte;
173}
174EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
175
7b52345e
SY
176void kvm_mmu_set_base_ptes(u64 base_pte)
177{
178 shadow_base_present_pte = base_pte;
179}
180EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
181
182void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
183 u64 dirty_mask, u64 nx_mask, u64 x_mask)
184{
185 shadow_user_mask = user_mask;
186 shadow_accessed_mask = accessed_mask;
187 shadow_dirty_mask = dirty_mask;
188 shadow_nx_mask = nx_mask;
189 shadow_x_mask = x_mask;
190}
191EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
192
6aa8b732
AK
193static int is_write_protection(struct kvm_vcpu *vcpu)
194{
ad312c7c 195 return vcpu->arch.cr0 & X86_CR0_WP;
6aa8b732
AK
196}
197
198static int is_cpuid_PSE36(void)
199{
200 return 1;
201}
202
73b1087e
AK
203static int is_nx(struct kvm_vcpu *vcpu)
204{
ad312c7c 205 return vcpu->arch.shadow_efer & EFER_NX;
73b1087e
AK
206}
207
6aa8b732
AK
208static int is_present_pte(unsigned long pte)
209{
210 return pte & PT_PRESENT_MASK;
211}
212
c7addb90
AK
213static int is_shadow_present_pte(u64 pte)
214{
c7addb90
AK
215 return pte != shadow_trap_nonpresent_pte
216 && pte != shadow_notrap_nonpresent_pte;
217}
218
05da4558
MT
219static int is_large_pte(u64 pte)
220{
221 return pte & PT_PAGE_SIZE_MASK;
222}
223
6aa8b732
AK
224static int is_writeble_pte(unsigned long pte)
225{
226 return pte & PT_WRITABLE_MASK;
227}
228
e3c5e7ec
AK
229static int is_dirty_pte(unsigned long pte)
230{
7b52345e 231 return pte & shadow_dirty_mask;
e3c5e7ec
AK
232}
233
cd4a4e53
AK
234static int is_rmap_pte(u64 pte)
235{
4b1a80fa 236 return is_shadow_present_pte(pte);
cd4a4e53
AK
237}
238
35149e21 239static pfn_t spte_to_pfn(u64 pte)
0b49ea86 240{
35149e21 241 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
0b49ea86
AK
242}
243
da928521
AK
244static gfn_t pse36_gfn_delta(u32 gpte)
245{
246 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
247
248 return (gpte & PT32_DIR_PSE36_MASK) << shift;
249}
250
e663ee64
AK
251static void set_shadow_pte(u64 *sptep, u64 spte)
252{
253#ifdef CONFIG_X86_64
254 set_64bit((unsigned long *)sptep, spte);
255#else
256 set_64bit((unsigned long long *)sptep, spte);
257#endif
258}
259
e2dec939 260static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 261 struct kmem_cache *base_cache, int min)
714b93da
AK
262{
263 void *obj;
264
265 if (cache->nobjs >= min)
e2dec939 266 return 0;
714b93da 267 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 268 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 269 if (!obj)
e2dec939 270 return -ENOMEM;
714b93da
AK
271 cache->objects[cache->nobjs++] = obj;
272 }
e2dec939 273 return 0;
714b93da
AK
274}
275
276static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
277{
278 while (mc->nobjs)
279 kfree(mc->objects[--mc->nobjs]);
280}
281
c1158e63 282static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 283 int min)
c1158e63
AK
284{
285 struct page *page;
286
287 if (cache->nobjs >= min)
288 return 0;
289 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 290 page = alloc_page(GFP_KERNEL);
c1158e63
AK
291 if (!page)
292 return -ENOMEM;
293 set_page_private(page, 0);
294 cache->objects[cache->nobjs++] = page_address(page);
295 }
296 return 0;
297}
298
299static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
300{
301 while (mc->nobjs)
c4d198d5 302 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
303}
304
2e3e5882 305static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 306{
e2dec939
AK
307 int r;
308
ad312c7c 309 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 310 pte_chain_cache, 4);
e2dec939
AK
311 if (r)
312 goto out;
ad312c7c 313 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
2e3e5882 314 rmap_desc_cache, 1);
d3d25b04
AK
315 if (r)
316 goto out;
ad312c7c 317 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
318 if (r)
319 goto out;
ad312c7c 320 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 321 mmu_page_header_cache, 4);
e2dec939
AK
322out:
323 return r;
714b93da
AK
324}
325
326static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
327{
ad312c7c
ZX
328 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
329 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
330 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
331 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
714b93da
AK
332}
333
334static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
335 size_t size)
336{
337 void *p;
338
339 BUG_ON(!mc->nobjs);
340 p = mc->objects[--mc->nobjs];
341 memset(p, 0, size);
342 return p;
343}
344
714b93da
AK
345static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
346{
ad312c7c 347 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
348 sizeof(struct kvm_pte_chain));
349}
350
90cb0529 351static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 352{
90cb0529 353 kfree(pc);
714b93da
AK
354}
355
356static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
357{
ad312c7c 358 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
359 sizeof(struct kvm_rmap_desc));
360}
361
90cb0529 362static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 363{
90cb0529 364 kfree(rd);
714b93da
AK
365}
366
05da4558
MT
367/*
368 * Return the pointer to the largepage write count for a given
369 * gfn, handling slots that are not large page aligned.
370 */
371static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
372{
373 unsigned long idx;
374
375 idx = (gfn / KVM_PAGES_PER_HPAGE) -
376 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
377 return &slot->lpage_info[idx].write_count;
378}
379
380static void account_shadowed(struct kvm *kvm, gfn_t gfn)
381{
382 int *write_count;
383
384 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
385 *write_count += 1;
05da4558
MT
386}
387
388static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
389{
390 int *write_count;
391
392 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
393 *write_count -= 1;
394 WARN_ON(*write_count < 0);
395}
396
397static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
398{
399 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
400 int *largepage_idx;
401
402 if (slot) {
403 largepage_idx = slot_largepage_idx(gfn, slot);
404 return *largepage_idx;
405 }
406
407 return 1;
408}
409
410static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
411{
412 struct vm_area_struct *vma;
413 unsigned long addr;
4c2155ce 414 int ret = 0;
05da4558
MT
415
416 addr = gfn_to_hva(kvm, gfn);
417 if (kvm_is_error_hva(addr))
4c2155ce 418 return ret;
05da4558 419
4c2155ce 420 down_read(&current->mm->mmap_sem);
05da4558
MT
421 vma = find_vma(current->mm, addr);
422 if (vma && is_vm_hugetlb_page(vma))
4c2155ce
MT
423 ret = 1;
424 up_read(&current->mm->mmap_sem);
05da4558 425
4c2155ce 426 return ret;
05da4558
MT
427}
428
429static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
430{
431 struct kvm_memory_slot *slot;
432
433 if (has_wrprotected_page(vcpu->kvm, large_gfn))
434 return 0;
435
436 if (!host_largepage_backed(vcpu->kvm, large_gfn))
437 return 0;
438
439 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
440 if (slot && slot->dirty_bitmap)
441 return 0;
442
443 return 1;
444}
445
290fc38d
IE
446/*
447 * Take gfn and return the reverse mapping to it.
448 * Note: gfn must be unaliased before this function get called
449 */
450
05da4558 451static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
290fc38d
IE
452{
453 struct kvm_memory_slot *slot;
05da4558 454 unsigned long idx;
290fc38d
IE
455
456 slot = gfn_to_memslot(kvm, gfn);
05da4558
MT
457 if (!lpage)
458 return &slot->rmap[gfn - slot->base_gfn];
459
460 idx = (gfn / KVM_PAGES_PER_HPAGE) -
461 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
462
463 return &slot->lpage_info[idx].rmap_pde;
290fc38d
IE
464}
465
cd4a4e53
AK
466/*
467 * Reverse mapping data structures:
468 *
290fc38d
IE
469 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
470 * that points to page_address(page).
cd4a4e53 471 *
290fc38d
IE
472 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
473 * containing more mappings.
cd4a4e53 474 */
05da4558 475static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
cd4a4e53 476{
4db35314 477 struct kvm_mmu_page *sp;
cd4a4e53 478 struct kvm_rmap_desc *desc;
290fc38d 479 unsigned long *rmapp;
cd4a4e53
AK
480 int i;
481
482 if (!is_rmap_pte(*spte))
483 return;
290fc38d 484 gfn = unalias_gfn(vcpu->kvm, gfn);
4db35314
AK
485 sp = page_header(__pa(spte));
486 sp->gfns[spte - sp->spt] = gfn;
05da4558 487 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
290fc38d 488 if (!*rmapp) {
cd4a4e53 489 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
490 *rmapp = (unsigned long)spte;
491 } else if (!(*rmapp & 1)) {
cd4a4e53 492 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 493 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 494 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 495 desc->shadow_ptes[1] = spte;
290fc38d 496 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
497 } else {
498 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 499 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
500 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
501 desc = desc->more;
502 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 503 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
504 desc = desc->more;
505 }
506 for (i = 0; desc->shadow_ptes[i]; ++i)
507 ;
508 desc->shadow_ptes[i] = spte;
509 }
510}
511
290fc38d 512static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
513 struct kvm_rmap_desc *desc,
514 int i,
515 struct kvm_rmap_desc *prev_desc)
516{
517 int j;
518
519 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
520 ;
521 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 522 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
523 if (j != 0)
524 return;
525 if (!prev_desc && !desc->more)
290fc38d 526 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
527 else
528 if (prev_desc)
529 prev_desc->more = desc->more;
530 else
290fc38d 531 *rmapp = (unsigned long)desc->more | 1;
90cb0529 532 mmu_free_rmap_desc(desc);
cd4a4e53
AK
533}
534
290fc38d 535static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 536{
cd4a4e53
AK
537 struct kvm_rmap_desc *desc;
538 struct kvm_rmap_desc *prev_desc;
4db35314 539 struct kvm_mmu_page *sp;
35149e21 540 pfn_t pfn;
290fc38d 541 unsigned long *rmapp;
cd4a4e53
AK
542 int i;
543
544 if (!is_rmap_pte(*spte))
545 return;
4db35314 546 sp = page_header(__pa(spte));
35149e21 547 pfn = spte_to_pfn(*spte);
7b52345e 548 if (*spte & shadow_accessed_mask)
35149e21 549 kvm_set_pfn_accessed(pfn);
b4231d61 550 if (is_writeble_pte(*spte))
35149e21 551 kvm_release_pfn_dirty(pfn);
b4231d61 552 else
35149e21 553 kvm_release_pfn_clean(pfn);
05da4558 554 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
290fc38d 555 if (!*rmapp) {
cd4a4e53
AK
556 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
557 BUG();
290fc38d 558 } else if (!(*rmapp & 1)) {
cd4a4e53 559 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 560 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
561 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
562 spte, *spte);
563 BUG();
564 }
290fc38d 565 *rmapp = 0;
cd4a4e53
AK
566 } else {
567 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 568 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
569 prev_desc = NULL;
570 while (desc) {
571 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
572 if (desc->shadow_ptes[i] == spte) {
290fc38d 573 rmap_desc_remove_entry(rmapp,
714b93da 574 desc, i,
cd4a4e53
AK
575 prev_desc);
576 return;
577 }
578 prev_desc = desc;
579 desc = desc->more;
580 }
581 BUG();
582 }
583}
584
98348e95 585static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 586{
374cbac0 587 struct kvm_rmap_desc *desc;
98348e95
IE
588 struct kvm_rmap_desc *prev_desc;
589 u64 *prev_spte;
590 int i;
591
592 if (!*rmapp)
593 return NULL;
594 else if (!(*rmapp & 1)) {
595 if (!spte)
596 return (u64 *)*rmapp;
597 return NULL;
598 }
599 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
600 prev_desc = NULL;
601 prev_spte = NULL;
602 while (desc) {
603 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
604 if (prev_spte == spte)
605 return desc->shadow_ptes[i];
606 prev_spte = desc->shadow_ptes[i];
607 }
608 desc = desc->more;
609 }
610 return NULL;
611}
612
613static void rmap_write_protect(struct kvm *kvm, u64 gfn)
614{
290fc38d 615 unsigned long *rmapp;
374cbac0 616 u64 *spte;
caa5b8a5 617 int write_protected = 0;
374cbac0 618
4a4c9924 619 gfn = unalias_gfn(kvm, gfn);
05da4558 620 rmapp = gfn_to_rmap(kvm, gfn, 0);
374cbac0 621
98348e95
IE
622 spte = rmap_next(kvm, rmapp, NULL);
623 while (spte) {
374cbac0 624 BUG_ON(!spte);
374cbac0 625 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 626 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
caa5b8a5 627 if (is_writeble_pte(*spte)) {
9647c14c 628 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
629 write_protected = 1;
630 }
9647c14c 631 spte = rmap_next(kvm, rmapp, spte);
374cbac0 632 }
855149aa 633 if (write_protected) {
35149e21 634 pfn_t pfn;
855149aa
IE
635
636 spte = rmap_next(kvm, rmapp, NULL);
35149e21
AL
637 pfn = spte_to_pfn(*spte);
638 kvm_set_pfn_dirty(pfn);
855149aa
IE
639 }
640
05da4558
MT
641 /* check for huge page mappings */
642 rmapp = gfn_to_rmap(kvm, gfn, 1);
643 spte = rmap_next(kvm, rmapp, NULL);
644 while (spte) {
645 BUG_ON(!spte);
646 BUG_ON(!(*spte & PT_PRESENT_MASK));
647 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
648 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
649 if (is_writeble_pte(*spte)) {
650 rmap_remove(kvm, spte);
651 --kvm->stat.lpages;
652 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
6597ca09 653 spte = NULL;
05da4558
MT
654 write_protected = 1;
655 }
656 spte = rmap_next(kvm, rmapp, spte);
657 }
658
caa5b8a5
ED
659 if (write_protected)
660 kvm_flush_remote_tlbs(kvm);
374cbac0
AK
661}
662
e930bffe
AA
663static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
664{
665 u64 *spte;
666 int need_tlb_flush = 0;
667
668 while ((spte = rmap_next(kvm, rmapp, NULL))) {
669 BUG_ON(!(*spte & PT_PRESENT_MASK));
670 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
671 rmap_remove(kvm, spte);
672 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
673 need_tlb_flush = 1;
674 }
675 return need_tlb_flush;
676}
677
678static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
679 int (*handler)(struct kvm *kvm, unsigned long *rmapp))
680{
681 int i;
682 int retval = 0;
683
684 /*
685 * If mmap_sem isn't taken, we can look the memslots with only
686 * the mmu_lock by skipping over the slots with userspace_addr == 0.
687 */
688 for (i = 0; i < kvm->nmemslots; i++) {
689 struct kvm_memory_slot *memslot = &kvm->memslots[i];
690 unsigned long start = memslot->userspace_addr;
691 unsigned long end;
692
693 /* mmu_lock protects userspace_addr */
694 if (!start)
695 continue;
696
697 end = start + (memslot->npages << PAGE_SHIFT);
698 if (hva >= start && hva < end) {
699 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
700 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
701 retval |= handler(kvm,
702 &memslot->lpage_info[
703 gfn_offset /
704 KVM_PAGES_PER_HPAGE].rmap_pde);
705 }
706 }
707
708 return retval;
709}
710
711int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
712{
713 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
714}
715
716static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
717{
718 u64 *spte;
719 int young = 0;
720
534e38b4
SY
721 /* always return old for EPT */
722 if (!shadow_accessed_mask)
723 return 0;
724
e930bffe
AA
725 spte = rmap_next(kvm, rmapp, NULL);
726 while (spte) {
727 int _young;
728 u64 _spte = *spte;
729 BUG_ON(!(_spte & PT_PRESENT_MASK));
730 _young = _spte & PT_ACCESSED_MASK;
731 if (_young) {
732 young = 1;
733 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
734 }
735 spte = rmap_next(kvm, rmapp, spte);
736 }
737 return young;
738}
739
740int kvm_age_hva(struct kvm *kvm, unsigned long hva)
741{
742 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
743}
744
d6c69ee9 745#ifdef MMU_DEBUG
47ad8e68 746static int is_empty_shadow_page(u64 *spt)
6aa8b732 747{
139bdb2d
AK
748 u64 *pos;
749 u64 *end;
750
47ad8e68 751 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
3c915510 752 if (is_shadow_present_pte(*pos)) {
b8688d51 753 printk(KERN_ERR "%s: %p %llx\n", __func__,
139bdb2d 754 pos, *pos);
6aa8b732 755 return 0;
139bdb2d 756 }
6aa8b732
AK
757 return 1;
758}
d6c69ee9 759#endif
6aa8b732 760
4db35314 761static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 762{
4db35314
AK
763 ASSERT(is_empty_shadow_page(sp->spt));
764 list_del(&sp->link);
765 __free_page(virt_to_page(sp->spt));
766 __free_page(virt_to_page(sp->gfns));
767 kfree(sp);
f05e70ac 768 ++kvm->arch.n_free_mmu_pages;
260746c0
AK
769}
770
cea0f0e7
AK
771static unsigned kvm_page_table_hashfn(gfn_t gfn)
772{
1ae0a13d 773 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
cea0f0e7
AK
774}
775
25c0de2c
AK
776static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
777 u64 *parent_pte)
6aa8b732 778{
4db35314 779 struct kvm_mmu_page *sp;
6aa8b732 780
ad312c7c
ZX
781 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
782 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
783 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
4db35314 784 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 785 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
4db35314
AK
786 ASSERT(is_empty_shadow_page(sp->spt));
787 sp->slot_bitmap = 0;
788 sp->multimapped = 0;
789 sp->parent_pte = parent_pte;
f05e70ac 790 --vcpu->kvm->arch.n_free_mmu_pages;
4db35314 791 return sp;
6aa8b732
AK
792}
793
714b93da 794static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 795 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
796{
797 struct kvm_pte_chain *pte_chain;
798 struct hlist_node *node;
799 int i;
800
801 if (!parent_pte)
802 return;
4db35314
AK
803 if (!sp->multimapped) {
804 u64 *old = sp->parent_pte;
cea0f0e7
AK
805
806 if (!old) {
4db35314 807 sp->parent_pte = parent_pte;
cea0f0e7
AK
808 return;
809 }
4db35314 810 sp->multimapped = 1;
714b93da 811 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
812 INIT_HLIST_HEAD(&sp->parent_ptes);
813 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
814 pte_chain->parent_ptes[0] = old;
815 }
4db35314 816 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
817 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
818 continue;
819 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
820 if (!pte_chain->parent_ptes[i]) {
821 pte_chain->parent_ptes[i] = parent_pte;
822 return;
823 }
824 }
714b93da 825 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 826 BUG_ON(!pte_chain);
4db35314 827 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
828 pte_chain->parent_ptes[0] = parent_pte;
829}
830
4db35314 831static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
832 u64 *parent_pte)
833{
834 struct kvm_pte_chain *pte_chain;
835 struct hlist_node *node;
836 int i;
837
4db35314
AK
838 if (!sp->multimapped) {
839 BUG_ON(sp->parent_pte != parent_pte);
840 sp->parent_pte = NULL;
cea0f0e7
AK
841 return;
842 }
4db35314 843 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
844 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
845 if (!pte_chain->parent_ptes[i])
846 break;
847 if (pte_chain->parent_ptes[i] != parent_pte)
848 continue;
697fe2e2
AK
849 while (i + 1 < NR_PTE_CHAIN_ENTRIES
850 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
851 pte_chain->parent_ptes[i]
852 = pte_chain->parent_ptes[i + 1];
853 ++i;
854 }
855 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
856 if (i == 0) {
857 hlist_del(&pte_chain->link);
90cb0529 858 mmu_free_pte_chain(pte_chain);
4db35314
AK
859 if (hlist_empty(&sp->parent_ptes)) {
860 sp->multimapped = 0;
861 sp->parent_pte = NULL;
697fe2e2
AK
862 }
863 }
cea0f0e7
AK
864 return;
865 }
866 BUG();
867}
868
ad8cfbe3
MT
869
870static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
871 mmu_parent_walk_fn fn)
872{
873 struct kvm_pte_chain *pte_chain;
874 struct hlist_node *node;
875 struct kvm_mmu_page *parent_sp;
876 int i;
877
878 if (!sp->multimapped && sp->parent_pte) {
879 parent_sp = page_header(__pa(sp->parent_pte));
880 fn(vcpu, parent_sp);
881 mmu_parent_walk(vcpu, parent_sp, fn);
882 return;
883 }
884 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
885 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
886 if (!pte_chain->parent_ptes[i])
887 break;
888 parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
889 fn(vcpu, parent_sp);
890 mmu_parent_walk(vcpu, parent_sp, fn);
891 }
892}
893
0074ff63
MT
894static void kvm_mmu_update_unsync_bitmap(u64 *spte)
895{
896 unsigned int index;
897 struct kvm_mmu_page *sp = page_header(__pa(spte));
898
899 index = spte - sp->spt;
900 __set_bit(index, sp->unsync_child_bitmap);
901 sp->unsync_children = 1;
902}
903
904static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
905{
906 struct kvm_pte_chain *pte_chain;
907 struct hlist_node *node;
908 int i;
909
910 if (!sp->parent_pte)
911 return;
912
913 if (!sp->multimapped) {
914 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
915 return;
916 }
917
918 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
919 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
920 if (!pte_chain->parent_ptes[i])
921 break;
922 kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
923 }
924}
925
926static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
927{
928 sp->unsync_children = 1;
929 kvm_mmu_update_parents_unsync(sp);
930 return 1;
931}
932
933static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
934 struct kvm_mmu_page *sp)
935{
936 mmu_parent_walk(vcpu, sp, unsync_walk_fn);
937 kvm_mmu_update_parents_unsync(sp);
938}
939
d761a501
AK
940static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
941 struct kvm_mmu_page *sp)
942{
943 int i;
944
945 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
946 sp->spt[i] = shadow_trap_nonpresent_pte;
947}
948
e8bc217a
MT
949static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
950 struct kvm_mmu_page *sp)
951{
952 return 1;
953}
954
a7052897
MT
955static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
956{
957}
958
0074ff63
MT
959#define for_each_unsync_children(bitmap, idx) \
960 for (idx = find_first_bit(bitmap, 512); \
961 idx < 512; \
962 idx = find_next_bit(bitmap, 512, idx+1))
963
4731d4c7
MT
964static int mmu_unsync_walk(struct kvm_mmu_page *sp,
965 struct kvm_unsync_walk *walker)
966{
967 int i, ret;
968
969 if (!sp->unsync_children)
970 return 0;
971
0074ff63 972 for_each_unsync_children(sp->unsync_child_bitmap, i) {
4731d4c7
MT
973 u64 ent = sp->spt[i];
974
975 if (is_shadow_present_pte(ent)) {
976 struct kvm_mmu_page *child;
977 child = page_header(ent & PT64_BASE_ADDR_MASK);
978
979 if (child->unsync_children) {
980 ret = mmu_unsync_walk(child, walker);
981 if (ret)
982 return ret;
0074ff63 983 __clear_bit(i, sp->unsync_child_bitmap);
4731d4c7
MT
984 }
985
986 if (child->unsync) {
987 ret = walker->entry(child, walker);
0074ff63 988 __clear_bit(i, sp->unsync_child_bitmap);
4731d4c7
MT
989 if (ret)
990 return ret;
991 }
992 }
993 }
994
0074ff63 995 if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
4731d4c7
MT
996 sp->unsync_children = 0;
997
998 return 0;
999}
1000
4db35314 1001static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
cea0f0e7
AK
1002{
1003 unsigned index;
1004 struct hlist_head *bucket;
4db35314 1005 struct kvm_mmu_page *sp;
cea0f0e7
AK
1006 struct hlist_node *node;
1007
b8688d51 1008 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1ae0a13d 1009 index = kvm_page_table_hashfn(gfn);
f05e70ac 1010 bucket = &kvm->arch.mmu_page_hash[index];
4db35314 1011 hlist_for_each_entry(sp, node, bucket, hash_link)
2e53d63a
MT
1012 if (sp->gfn == gfn && !sp->role.metaphysical
1013 && !sp->role.invalid) {
cea0f0e7 1014 pgprintk("%s: found role %x\n",
b8688d51 1015 __func__, sp->role.word);
4db35314 1016 return sp;
cea0f0e7
AK
1017 }
1018 return NULL;
1019}
1020
4731d4c7
MT
1021static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1022{
1023 WARN_ON(!sp->unsync);
1024 sp->unsync = 0;
1025 --kvm->stat.mmu_unsync;
1026}
1027
1028static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1029
1030static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1031{
1032 if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1033 kvm_mmu_zap_page(vcpu->kvm, sp);
1034 return 1;
1035 }
1036
1037 rmap_write_protect(vcpu->kvm, sp->gfn);
1038 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1039 kvm_mmu_zap_page(vcpu->kvm, sp);
1040 return 1;
1041 }
1042
1043 kvm_mmu_flush_tlb(vcpu);
1044 kvm_unlink_unsync_page(vcpu->kvm, sp);
1045 return 0;
1046}
1047
1048struct sync_walker {
1049 struct kvm_vcpu *vcpu;
1050 struct kvm_unsync_walk walker;
1051};
1052
1053static int mmu_sync_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk)
1054{
1055 struct sync_walker *sync_walk = container_of(walk, struct sync_walker,
1056 walker);
1057 struct kvm_vcpu *vcpu = sync_walk->vcpu;
1058
1059 kvm_sync_page(vcpu, sp);
1060 return (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock));
1061}
1062
1063static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1064{
1065 struct sync_walker walker = {
1066 .walker = { .entry = mmu_sync_fn, },
1067 .vcpu = vcpu,
1068 };
1069
1070 while (mmu_unsync_walk(sp, &walker.walker))
1071 cond_resched_lock(&vcpu->kvm->mmu_lock);
1072}
1073
cea0f0e7
AK
1074static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1075 gfn_t gfn,
1076 gva_t gaddr,
1077 unsigned level,
1078 int metaphysical,
41074d07 1079 unsigned access,
f7d9c7b7 1080 u64 *parent_pte)
cea0f0e7
AK
1081{
1082 union kvm_mmu_page_role role;
1083 unsigned index;
1084 unsigned quadrant;
1085 struct hlist_head *bucket;
4db35314 1086 struct kvm_mmu_page *sp;
4731d4c7 1087 struct hlist_node *node, *tmp;
cea0f0e7
AK
1088
1089 role.word = 0;
ad312c7c 1090 role.glevels = vcpu->arch.mmu.root_level;
cea0f0e7
AK
1091 role.level = level;
1092 role.metaphysical = metaphysical;
41074d07 1093 role.access = access;
ad312c7c 1094 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
1095 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1096 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1097 role.quadrant = quadrant;
1098 }
b8688d51 1099 pgprintk("%s: looking gfn %lx role %x\n", __func__,
cea0f0e7 1100 gfn, role.word);
1ae0a13d 1101 index = kvm_page_table_hashfn(gfn);
f05e70ac 1102 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4731d4c7
MT
1103 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1104 if (sp->gfn == gfn) {
1105 if (sp->unsync)
1106 if (kvm_sync_page(vcpu, sp))
1107 continue;
1108
1109 if (sp->role.word != role.word)
1110 continue;
1111
4db35314 1112 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
0074ff63
MT
1113 if (sp->unsync_children) {
1114 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1115 kvm_mmu_mark_parents_unsync(vcpu, sp);
1116 }
b8688d51 1117 pgprintk("%s: found\n", __func__);
4db35314 1118 return sp;
cea0f0e7 1119 }
dfc5aa00 1120 ++vcpu->kvm->stat.mmu_cache_miss;
4db35314
AK
1121 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1122 if (!sp)
1123 return sp;
b8688d51 1124 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
4db35314
AK
1125 sp->gfn = gfn;
1126 sp->role = role;
1127 hlist_add_head(&sp->hash_link, bucket);
4731d4c7 1128 if (!metaphysical) {
4a4c9924 1129 rmap_write_protect(vcpu->kvm, gfn);
4731d4c7
MT
1130 account_shadowed(vcpu->kvm, gfn);
1131 }
131d8279
AK
1132 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1133 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1134 else
1135 nonpaging_prefetch_page(vcpu, sp);
4db35314 1136 return sp;
cea0f0e7
AK
1137}
1138
3d000db5 1139static int walk_shadow(struct kvm_shadow_walk *walker,
d40a1ee4 1140 struct kvm_vcpu *vcpu, u64 addr)
3d000db5
AK
1141{
1142 hpa_t shadow_addr;
1143 int level;
1144 int r;
1145 u64 *sptep;
1146 unsigned index;
1147
1148 shadow_addr = vcpu->arch.mmu.root_hpa;
1149 level = vcpu->arch.mmu.shadow_root_level;
1150 if (level == PT32E_ROOT_LEVEL) {
1151 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1152 shadow_addr &= PT64_BASE_ADDR_MASK;
1153 --level;
1154 }
1155
1156 while (level >= PT_PAGE_TABLE_LEVEL) {
1157 index = SHADOW_PT_INDEX(addr, level);
1158 sptep = ((u64 *)__va(shadow_addr)) + index;
1159 r = walker->entry(walker, vcpu, addr, sptep, level);
1160 if (r)
1161 return r;
1162 shadow_addr = *sptep & PT64_BASE_ADDR_MASK;
1163 --level;
1164 }
1165 return 0;
1166}
1167
90cb0529 1168static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 1169 struct kvm_mmu_page *sp)
a436036b 1170{
697fe2e2
AK
1171 unsigned i;
1172 u64 *pt;
1173 u64 ent;
1174
4db35314 1175 pt = sp->spt;
697fe2e2 1176
4db35314 1177 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
697fe2e2 1178 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 1179 if (is_shadow_present_pte(pt[i]))
290fc38d 1180 rmap_remove(kvm, &pt[i]);
c7addb90 1181 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2
AK
1182 }
1183 return;
1184 }
1185
1186 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1187 ent = pt[i];
1188
05da4558
MT
1189 if (is_shadow_present_pte(ent)) {
1190 if (!is_large_pte(ent)) {
1191 ent &= PT64_BASE_ADDR_MASK;
1192 mmu_page_remove_parent_pte(page_header(ent),
1193 &pt[i]);
1194 } else {
1195 --kvm->stat.lpages;
1196 rmap_remove(kvm, &pt[i]);
1197 }
1198 }
c7addb90 1199 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 1200 }
a436036b
AK
1201}
1202
4db35314 1203static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 1204{
4db35314 1205 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
1206}
1207
12b7d28f
AK
1208static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1209{
1210 int i;
1211
1212 for (i = 0; i < KVM_MAX_VCPUS; ++i)
1213 if (kvm->vcpus[i])
ad312c7c 1214 kvm->vcpus[i]->arch.last_pte_updated = NULL;
12b7d28f
AK
1215}
1216
31aa2b44 1217static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
1218{
1219 u64 *parent_pte;
1220
4db35314
AK
1221 while (sp->multimapped || sp->parent_pte) {
1222 if (!sp->multimapped)
1223 parent_pte = sp->parent_pte;
a436036b
AK
1224 else {
1225 struct kvm_pte_chain *chain;
1226
4db35314 1227 chain = container_of(sp->parent_ptes.first,
a436036b
AK
1228 struct kvm_pte_chain, link);
1229 parent_pte = chain->parent_ptes[0];
1230 }
697fe2e2 1231 BUG_ON(!parent_pte);
4db35314 1232 kvm_mmu_put_page(sp, parent_pte);
c7addb90 1233 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 1234 }
31aa2b44
AK
1235}
1236
4731d4c7
MT
1237struct zap_walker {
1238 struct kvm_unsync_walk walker;
1239 struct kvm *kvm;
1240 int zapped;
1241};
1242
1243static int mmu_zap_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk)
1244{
1245 struct zap_walker *zap_walk = container_of(walk, struct zap_walker,
1246 walker);
1247 kvm_mmu_zap_page(zap_walk->kvm, sp);
1248 zap_walk->zapped = 1;
1249 return 0;
1250}
1251
1252static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *sp)
1253{
1254 struct zap_walker walker = {
1255 .walker = { .entry = mmu_zap_fn, },
1256 .kvm = kvm,
1257 .zapped = 0,
1258 };
1259
1260 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
1261 return 0;
1262 mmu_unsync_walk(sp, &walker.walker);
1263 return walker.zapped;
1264}
1265
07385413 1266static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
31aa2b44 1267{
4731d4c7 1268 int ret;
31aa2b44 1269 ++kvm->stat.mmu_shadow_zapped;
4731d4c7 1270 ret = mmu_zap_unsync_children(kvm, sp);
4db35314 1271 kvm_mmu_page_unlink_children(kvm, sp);
31aa2b44 1272 kvm_mmu_unlink_parents(kvm, sp);
5b5c6a5a
AK
1273 kvm_flush_remote_tlbs(kvm);
1274 if (!sp->role.invalid && !sp->role.metaphysical)
1275 unaccount_shadowed(kvm, sp->gfn);
4731d4c7
MT
1276 if (sp->unsync)
1277 kvm_unlink_unsync_page(kvm, sp);
4db35314
AK
1278 if (!sp->root_count) {
1279 hlist_del(&sp->hash_link);
1280 kvm_mmu_free_page(kvm, sp);
2e53d63a 1281 } else {
2e53d63a 1282 sp->role.invalid = 1;
5b5c6a5a 1283 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2e53d63a
MT
1284 kvm_reload_remote_mmus(kvm);
1285 }
12b7d28f 1286 kvm_mmu_reset_last_pte_updated(kvm);
4731d4c7 1287 return ret;
a436036b
AK
1288}
1289
82ce2c96
IE
1290/*
1291 * Changing the number of mmu pages allocated to the vm
1292 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1293 */
1294void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1295{
1296 /*
1297 * If we set the number of mmu pages to be smaller be than the
1298 * number of actived pages , we must to free some mmu pages before we
1299 * change the value
1300 */
1301
f05e70ac 1302 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
82ce2c96 1303 kvm_nr_mmu_pages) {
f05e70ac
ZX
1304 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1305 - kvm->arch.n_free_mmu_pages;
82ce2c96
IE
1306
1307 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1308 struct kvm_mmu_page *page;
1309
f05e70ac 1310 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96
IE
1311 struct kvm_mmu_page, link);
1312 kvm_mmu_zap_page(kvm, page);
1313 n_used_mmu_pages--;
1314 }
f05e70ac 1315 kvm->arch.n_free_mmu_pages = 0;
82ce2c96
IE
1316 }
1317 else
f05e70ac
ZX
1318 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1319 - kvm->arch.n_alloc_mmu_pages;
82ce2c96 1320
f05e70ac 1321 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
82ce2c96
IE
1322}
1323
f67a46f4 1324static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
1325{
1326 unsigned index;
1327 struct hlist_head *bucket;
4db35314 1328 struct kvm_mmu_page *sp;
a436036b
AK
1329 struct hlist_node *node, *n;
1330 int r;
1331
b8688d51 1332 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
a436036b 1333 r = 0;
1ae0a13d 1334 index = kvm_page_table_hashfn(gfn);
f05e70ac 1335 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
1336 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1337 if (sp->gfn == gfn && !sp->role.metaphysical) {
b8688d51 1338 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
4db35314 1339 sp->role.word);
a436036b 1340 r = 1;
07385413
MT
1341 if (kvm_mmu_zap_page(kvm, sp))
1342 n = bucket->first;
a436036b
AK
1343 }
1344 return r;
cea0f0e7
AK
1345}
1346
f67a46f4 1347static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 1348{
4db35314 1349 struct kvm_mmu_page *sp;
97a0a01e 1350
4db35314 1351 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
b8688d51 1352 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
4db35314 1353 kvm_mmu_zap_page(kvm, sp);
97a0a01e
AK
1354 }
1355}
1356
38c335f1 1357static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 1358{
38c335f1 1359 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
4db35314 1360 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 1361
4db35314 1362 __set_bit(slot, &sp->slot_bitmap);
6aa8b732
AK
1363}
1364
6844dec6
MT
1365static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1366{
1367 int i;
1368 u64 *pt = sp->spt;
1369
1370 if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1371 return;
1372
1373 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1374 if (pt[i] == shadow_notrap_nonpresent_pte)
1375 set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
1376 }
1377}
1378
039576c0
AK
1379struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1380{
72dc67a6
IE
1381 struct page *page;
1382
ad312c7c 1383 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
039576c0
AK
1384
1385 if (gpa == UNMAPPED_GVA)
1386 return NULL;
72dc67a6 1387
72dc67a6 1388 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6
IE
1389
1390 return page;
039576c0
AK
1391}
1392
4731d4c7
MT
1393static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1394{
1395 unsigned index;
1396 struct hlist_head *bucket;
1397 struct kvm_mmu_page *s;
1398 struct hlist_node *node, *n;
1399
1400 index = kvm_page_table_hashfn(sp->gfn);
1401 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1402 /* don't unsync if pagetable is shadowed with multiple roles */
1403 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1404 if (s->gfn != sp->gfn || s->role.metaphysical)
1405 continue;
1406 if (s->role.word != sp->role.word)
1407 return 1;
1408 }
0074ff63 1409 kvm_mmu_mark_parents_unsync(vcpu, sp);
4731d4c7
MT
1410 ++vcpu->kvm->stat.mmu_unsync;
1411 sp->unsync = 1;
1412 mmu_convert_notrap(sp);
1413 return 0;
1414}
1415
1416static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1417 bool can_unsync)
1418{
1419 struct kvm_mmu_page *shadow;
1420
1421 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1422 if (shadow) {
1423 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1424 return 1;
1425 if (shadow->unsync)
1426 return 0;
1427 if (can_unsync)
1428 return kvm_unsync_page(vcpu, shadow);
1429 return 1;
1430 }
1431 return 0;
1432}
1433
1e73f9dd
MT
1434static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1435 unsigned pte_access, int user_fault,
1436 int write_fault, int dirty, int largepage,
4731d4c7
MT
1437 gfn_t gfn, pfn_t pfn, bool speculative,
1438 bool can_unsync)
1c4f1fd6
AK
1439{
1440 u64 spte;
1e73f9dd 1441 int ret = 0;
1c4f1fd6
AK
1442 /*
1443 * We don't set the accessed bit, since we sometimes want to see
1444 * whether the guest actually used the pte (in order to detect
1445 * demand paging).
1446 */
7b52345e 1447 spte = shadow_base_present_pte | shadow_dirty_mask;
947da538 1448 if (!speculative)
3201b5d9 1449 spte |= shadow_accessed_mask;
1c4f1fd6
AK
1450 if (!dirty)
1451 pte_access &= ~ACC_WRITE_MASK;
7b52345e
SY
1452 if (pte_access & ACC_EXEC_MASK)
1453 spte |= shadow_x_mask;
1454 else
1455 spte |= shadow_nx_mask;
1c4f1fd6 1456 if (pte_access & ACC_USER_MASK)
7b52345e 1457 spte |= shadow_user_mask;
05da4558
MT
1458 if (largepage)
1459 spte |= PT_PAGE_SIZE_MASK;
1c4f1fd6 1460
35149e21 1461 spte |= (u64)pfn << PAGE_SHIFT;
1c4f1fd6
AK
1462
1463 if ((pte_access & ACC_WRITE_MASK)
1464 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1c4f1fd6 1465
38187c83
MT
1466 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1467 ret = 1;
1468 spte = shadow_trap_nonpresent_pte;
1469 goto set_pte;
1470 }
1471
1c4f1fd6 1472 spte |= PT_WRITABLE_MASK;
1c4f1fd6 1473
4731d4c7 1474 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1c4f1fd6 1475 pgprintk("%s: found shadow page for %lx, marking ro\n",
b8688d51 1476 __func__, gfn);
1e73f9dd 1477 ret = 1;
1c4f1fd6 1478 pte_access &= ~ACC_WRITE_MASK;
a378b4e6 1479 if (is_writeble_pte(spte))
1c4f1fd6 1480 spte &= ~PT_WRITABLE_MASK;
1c4f1fd6
AK
1481 }
1482 }
1483
1c4f1fd6
AK
1484 if (pte_access & ACC_WRITE_MASK)
1485 mark_page_dirty(vcpu->kvm, gfn);
1486
38187c83 1487set_pte:
1c4f1fd6 1488 set_shadow_pte(shadow_pte, spte);
1e73f9dd
MT
1489 return ret;
1490}
1491
1e73f9dd
MT
1492static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1493 unsigned pt_access, unsigned pte_access,
1494 int user_fault, int write_fault, int dirty,
1495 int *ptwrite, int largepage, gfn_t gfn,
1496 pfn_t pfn, bool speculative)
1497{
1498 int was_rmapped = 0;
1499 int was_writeble = is_writeble_pte(*shadow_pte);
1500
1501 pgprintk("%s: spte %llx access %x write_fault %d"
1502 " user_fault %d gfn %lx\n",
1503 __func__, *shadow_pte, pt_access,
1504 write_fault, user_fault, gfn);
1505
1506 if (is_rmap_pte(*shadow_pte)) {
1507 /*
1508 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1509 * the parent of the now unreachable PTE.
1510 */
1511 if (largepage && !is_large_pte(*shadow_pte)) {
1512 struct kvm_mmu_page *child;
1513 u64 pte = *shadow_pte;
1514
1515 child = page_header(pte & PT64_BASE_ADDR_MASK);
1516 mmu_page_remove_parent_pte(child, shadow_pte);
1517 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1518 pgprintk("hfn old %lx new %lx\n",
1519 spte_to_pfn(*shadow_pte), pfn);
1520 rmap_remove(vcpu->kvm, shadow_pte);
1521 } else {
1522 if (largepage)
1523 was_rmapped = is_large_pte(*shadow_pte);
1524 else
1525 was_rmapped = 1;
1526 }
1527 }
1528 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
4731d4c7 1529 dirty, largepage, gfn, pfn, speculative, true)) {
1e73f9dd
MT
1530 if (write_fault)
1531 *ptwrite = 1;
a378b4e6
MT
1532 kvm_x86_ops->tlb_flush(vcpu);
1533 }
1e73f9dd
MT
1534
1535 pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1536 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1537 is_large_pte(*shadow_pte)? "2MB" : "4kB",
1538 is_present_pte(*shadow_pte)?"RW":"R", gfn,
1539 *shadow_pte, shadow_pte);
1540 if (!was_rmapped && is_large_pte(*shadow_pte))
05da4558
MT
1541 ++vcpu->kvm->stat.lpages;
1542
1c4f1fd6
AK
1543 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1544 if (!was_rmapped) {
05da4558 1545 rmap_add(vcpu, shadow_pte, gfn, largepage);
1c4f1fd6 1546 if (!is_rmap_pte(*shadow_pte))
35149e21 1547 kvm_release_pfn_clean(pfn);
75e68e60
IE
1548 } else {
1549 if (was_writeble)
35149e21 1550 kvm_release_pfn_dirty(pfn);
75e68e60 1551 else
35149e21 1552 kvm_release_pfn_clean(pfn);
1c4f1fd6 1553 }
1b7fcd32 1554 if (speculative) {
ad312c7c 1555 vcpu->arch.last_pte_updated = shadow_pte;
1b7fcd32
AK
1556 vcpu->arch.last_pte_gfn = gfn;
1557 }
1c4f1fd6
AK
1558}
1559
6aa8b732
AK
1560static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1561{
1562}
1563
140754bc
AK
1564struct direct_shadow_walk {
1565 struct kvm_shadow_walk walker;
1566 pfn_t pfn;
1567 int write;
1568 int largepage;
1569 int pt_write;
1570};
6aa8b732 1571
140754bc
AK
1572static int direct_map_entry(struct kvm_shadow_walk *_walk,
1573 struct kvm_vcpu *vcpu,
d40a1ee4 1574 u64 addr, u64 *sptep, int level)
140754bc
AK
1575{
1576 struct direct_shadow_walk *walk =
1577 container_of(_walk, struct direct_shadow_walk, walker);
1578 struct kvm_mmu_page *sp;
1579 gfn_t pseudo_gfn;
1580 gfn_t gfn = addr >> PAGE_SHIFT;
1581
1582 if (level == PT_PAGE_TABLE_LEVEL
1583 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1584 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1585 0, walk->write, 1, &walk->pt_write,
1586 walk->largepage, gfn, walk->pfn, false);
bc2d4299 1587 ++vcpu->stat.pf_fixed;
140754bc
AK
1588 return 1;
1589 }
6aa8b732 1590
140754bc
AK
1591 if (*sptep == shadow_trap_nonpresent_pte) {
1592 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
d40a1ee4 1593 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
140754bc
AK
1594 1, ACC_ALL, sptep);
1595 if (!sp) {
1596 pgprintk("nonpaging_map: ENOMEM\n");
1597 kvm_release_pfn_clean(walk->pfn);
1598 return -ENOMEM;
6aa8b732
AK
1599 }
1600
140754bc
AK
1601 set_shadow_pte(sptep,
1602 __pa(sp->spt)
1603 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1604 | shadow_user_mask | shadow_x_mask);
6aa8b732 1605 }
140754bc
AK
1606 return 0;
1607}
1608
1609static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1610 int largepage, gfn_t gfn, pfn_t pfn)
1611{
1612 int r;
1613 struct direct_shadow_walk walker = {
1614 .walker = { .entry = direct_map_entry, },
1615 .pfn = pfn,
1616 .largepage = largepage,
1617 .write = write,
1618 .pt_write = 0,
1619 };
1620
d40a1ee4 1621 r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
140754bc
AK
1622 if (r < 0)
1623 return r;
1624 return walker.pt_write;
6aa8b732
AK
1625}
1626
10589a46
MT
1627static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1628{
1629 int r;
05da4558 1630 int largepage = 0;
35149e21 1631 pfn_t pfn;
e930bffe 1632 unsigned long mmu_seq;
aaee2c94 1633
05da4558
MT
1634 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1635 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1636 largepage = 1;
1637 }
1638
e930bffe 1639 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 1640 smp_rmb();
35149e21 1641 pfn = gfn_to_pfn(vcpu->kvm, gfn);
aaee2c94 1642
d196e343 1643 /* mmio */
35149e21
AL
1644 if (is_error_pfn(pfn)) {
1645 kvm_release_pfn_clean(pfn);
d196e343
AK
1646 return 1;
1647 }
1648
aaee2c94 1649 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
1650 if (mmu_notifier_retry(vcpu, mmu_seq))
1651 goto out_unlock;
eb787d10 1652 kvm_mmu_free_some_pages(vcpu);
6c41f428 1653 r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
aaee2c94
MT
1654 spin_unlock(&vcpu->kvm->mmu_lock);
1655
aaee2c94 1656
10589a46 1657 return r;
e930bffe
AA
1658
1659out_unlock:
1660 spin_unlock(&vcpu->kvm->mmu_lock);
1661 kvm_release_pfn_clean(pfn);
1662 return 0;
10589a46
MT
1663}
1664
1665
17ac10ad
AK
1666static void mmu_free_roots(struct kvm_vcpu *vcpu)
1667{
1668 int i;
4db35314 1669 struct kvm_mmu_page *sp;
17ac10ad 1670
ad312c7c 1671 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 1672 return;
aaee2c94 1673 spin_lock(&vcpu->kvm->mmu_lock);
ad312c7c
ZX
1674 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1675 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 1676
4db35314
AK
1677 sp = page_header(root);
1678 --sp->root_count;
2e53d63a
MT
1679 if (!sp->root_count && sp->role.invalid)
1680 kvm_mmu_zap_page(vcpu->kvm, sp);
ad312c7c 1681 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 1682 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
1683 return;
1684 }
17ac10ad 1685 for (i = 0; i < 4; ++i) {
ad312c7c 1686 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 1687
417726a3 1688 if (root) {
417726a3 1689 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
1690 sp = page_header(root);
1691 --sp->root_count;
2e53d63a
MT
1692 if (!sp->root_count && sp->role.invalid)
1693 kvm_mmu_zap_page(vcpu->kvm, sp);
417726a3 1694 }
ad312c7c 1695 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1696 }
aaee2c94 1697 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1698 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
1699}
1700
1701static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1702{
1703 int i;
cea0f0e7 1704 gfn_t root_gfn;
4db35314 1705 struct kvm_mmu_page *sp;
fb72d167 1706 int metaphysical = 0;
3bb65a22 1707
ad312c7c 1708 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad 1709
ad312c7c
ZX
1710 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1711 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
1712
1713 ASSERT(!VALID_PAGE(root));
fb72d167
JR
1714 if (tdp_enabled)
1715 metaphysical = 1;
4db35314 1716 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
fb72d167
JR
1717 PT64_ROOT_LEVEL, metaphysical,
1718 ACC_ALL, NULL);
4db35314
AK
1719 root = __pa(sp->spt);
1720 ++sp->root_count;
ad312c7c 1721 vcpu->arch.mmu.root_hpa = root;
17ac10ad
AK
1722 return;
1723 }
fb72d167
JR
1724 metaphysical = !is_paging(vcpu);
1725 if (tdp_enabled)
1726 metaphysical = 1;
17ac10ad 1727 for (i = 0; i < 4; ++i) {
ad312c7c 1728 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
1729
1730 ASSERT(!VALID_PAGE(root));
ad312c7c
ZX
1731 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1732 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1733 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
1734 continue;
1735 }
ad312c7c
ZX
1736 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1737 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 1738 root_gfn = 0;
4db35314 1739 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
fb72d167 1740 PT32_ROOT_LEVEL, metaphysical,
f7d9c7b7 1741 ACC_ALL, NULL);
4db35314
AK
1742 root = __pa(sp->spt);
1743 ++sp->root_count;
ad312c7c 1744 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 1745 }
ad312c7c 1746 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
17ac10ad
AK
1747}
1748
0ba73cda
MT
1749static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1750{
1751 int i;
1752 struct kvm_mmu_page *sp;
1753
1754 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1755 return;
1756 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1757 hpa_t root = vcpu->arch.mmu.root_hpa;
1758 sp = page_header(root);
1759 mmu_sync_children(vcpu, sp);
1760 return;
1761 }
1762 for (i = 0; i < 4; ++i) {
1763 hpa_t root = vcpu->arch.mmu.pae_root[i];
1764
1765 if (root) {
1766 root &= PT64_BASE_ADDR_MASK;
1767 sp = page_header(root);
1768 mmu_sync_children(vcpu, sp);
1769 }
1770 }
1771}
1772
1773void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
1774{
1775 spin_lock(&vcpu->kvm->mmu_lock);
1776 mmu_sync_roots(vcpu);
1777 spin_unlock(&vcpu->kvm->mmu_lock);
1778}
1779
6aa8b732
AK
1780static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1781{
1782 return vaddr;
1783}
1784
1785static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 1786 u32 error_code)
6aa8b732 1787{
e833240f 1788 gfn_t gfn;
e2dec939 1789 int r;
6aa8b732 1790
b8688d51 1791 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
e2dec939
AK
1792 r = mmu_topup_memory_caches(vcpu);
1793 if (r)
1794 return r;
714b93da 1795
6aa8b732 1796 ASSERT(vcpu);
ad312c7c 1797 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1798
e833240f 1799 gfn = gva >> PAGE_SHIFT;
6aa8b732 1800
e833240f
AK
1801 return nonpaging_map(vcpu, gva & PAGE_MASK,
1802 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
1803}
1804
fb72d167
JR
1805static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1806 u32 error_code)
1807{
35149e21 1808 pfn_t pfn;
fb72d167 1809 int r;
05da4558
MT
1810 int largepage = 0;
1811 gfn_t gfn = gpa >> PAGE_SHIFT;
e930bffe 1812 unsigned long mmu_seq;
fb72d167
JR
1813
1814 ASSERT(vcpu);
1815 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1816
1817 r = mmu_topup_memory_caches(vcpu);
1818 if (r)
1819 return r;
1820
05da4558
MT
1821 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1822 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1823 largepage = 1;
1824 }
e930bffe 1825 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 1826 smp_rmb();
35149e21 1827 pfn = gfn_to_pfn(vcpu->kvm, gfn);
35149e21
AL
1828 if (is_error_pfn(pfn)) {
1829 kvm_release_pfn_clean(pfn);
fb72d167
JR
1830 return 1;
1831 }
1832 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
1833 if (mmu_notifier_retry(vcpu, mmu_seq))
1834 goto out_unlock;
fb72d167
JR
1835 kvm_mmu_free_some_pages(vcpu);
1836 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
6c41f428 1837 largepage, gfn, pfn);
fb72d167 1838 spin_unlock(&vcpu->kvm->mmu_lock);
fb72d167
JR
1839
1840 return r;
e930bffe
AA
1841
1842out_unlock:
1843 spin_unlock(&vcpu->kvm->mmu_lock);
1844 kvm_release_pfn_clean(pfn);
1845 return 0;
fb72d167
JR
1846}
1847
6aa8b732
AK
1848static void nonpaging_free(struct kvm_vcpu *vcpu)
1849{
17ac10ad 1850 mmu_free_roots(vcpu);
6aa8b732
AK
1851}
1852
1853static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1854{
ad312c7c 1855 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1856
1857 context->new_cr3 = nonpaging_new_cr3;
1858 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1859 context->gva_to_gpa = nonpaging_gva_to_gpa;
1860 context->free = nonpaging_free;
c7addb90 1861 context->prefetch_page = nonpaging_prefetch_page;
e8bc217a 1862 context->sync_page = nonpaging_sync_page;
a7052897 1863 context->invlpg = nonpaging_invlpg;
cea0f0e7 1864 context->root_level = 0;
6aa8b732 1865 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1866 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1867 return 0;
1868}
1869
d835dfec 1870void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 1871{
1165f5fe 1872 ++vcpu->stat.tlb_flush;
cbdd1bea 1873 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1874}
1875
1876static void paging_new_cr3(struct kvm_vcpu *vcpu)
1877{
b8688d51 1878 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
cea0f0e7 1879 mmu_free_roots(vcpu);
6aa8b732
AK
1880}
1881
6aa8b732
AK
1882static void inject_page_fault(struct kvm_vcpu *vcpu,
1883 u64 addr,
1884 u32 err_code)
1885{
c3c91fee 1886 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1887}
1888
6aa8b732
AK
1889static void paging_free(struct kvm_vcpu *vcpu)
1890{
1891 nonpaging_free(vcpu);
1892}
1893
1894#define PTTYPE 64
1895#include "paging_tmpl.h"
1896#undef PTTYPE
1897
1898#define PTTYPE 32
1899#include "paging_tmpl.h"
1900#undef PTTYPE
1901
17ac10ad 1902static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 1903{
ad312c7c 1904 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1905
1906 ASSERT(is_pae(vcpu));
1907 context->new_cr3 = paging_new_cr3;
1908 context->page_fault = paging64_page_fault;
6aa8b732 1909 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1910 context->prefetch_page = paging64_prefetch_page;
e8bc217a 1911 context->sync_page = paging64_sync_page;
a7052897 1912 context->invlpg = paging64_invlpg;
6aa8b732 1913 context->free = paging_free;
17ac10ad
AK
1914 context->root_level = level;
1915 context->shadow_root_level = level;
17c3ba9d 1916 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1917 return 0;
1918}
1919
17ac10ad
AK
1920static int paging64_init_context(struct kvm_vcpu *vcpu)
1921{
1922 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1923}
1924
6aa8b732
AK
1925static int paging32_init_context(struct kvm_vcpu *vcpu)
1926{
ad312c7c 1927 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1928
1929 context->new_cr3 = paging_new_cr3;
1930 context->page_fault = paging32_page_fault;
6aa8b732
AK
1931 context->gva_to_gpa = paging32_gva_to_gpa;
1932 context->free = paging_free;
c7addb90 1933 context->prefetch_page = paging32_prefetch_page;
e8bc217a 1934 context->sync_page = paging32_sync_page;
a7052897 1935 context->invlpg = paging32_invlpg;
6aa8b732
AK
1936 context->root_level = PT32_ROOT_LEVEL;
1937 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1938 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1939 return 0;
1940}
1941
1942static int paging32E_init_context(struct kvm_vcpu *vcpu)
1943{
17ac10ad 1944 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1945}
1946
fb72d167
JR
1947static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1948{
1949 struct kvm_mmu *context = &vcpu->arch.mmu;
1950
1951 context->new_cr3 = nonpaging_new_cr3;
1952 context->page_fault = tdp_page_fault;
1953 context->free = nonpaging_free;
1954 context->prefetch_page = nonpaging_prefetch_page;
e8bc217a 1955 context->sync_page = nonpaging_sync_page;
a7052897 1956 context->invlpg = nonpaging_invlpg;
67253af5 1957 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
fb72d167
JR
1958 context->root_hpa = INVALID_PAGE;
1959
1960 if (!is_paging(vcpu)) {
1961 context->gva_to_gpa = nonpaging_gva_to_gpa;
1962 context->root_level = 0;
1963 } else if (is_long_mode(vcpu)) {
1964 context->gva_to_gpa = paging64_gva_to_gpa;
1965 context->root_level = PT64_ROOT_LEVEL;
1966 } else if (is_pae(vcpu)) {
1967 context->gva_to_gpa = paging64_gva_to_gpa;
1968 context->root_level = PT32E_ROOT_LEVEL;
1969 } else {
1970 context->gva_to_gpa = paging32_gva_to_gpa;
1971 context->root_level = PT32_ROOT_LEVEL;
1972 }
1973
1974 return 0;
1975}
1976
1977static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
6aa8b732
AK
1978{
1979 ASSERT(vcpu);
ad312c7c 1980 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
1981
1982 if (!is_paging(vcpu))
1983 return nonpaging_init_context(vcpu);
a9058ecd 1984 else if (is_long_mode(vcpu))
6aa8b732
AK
1985 return paging64_init_context(vcpu);
1986 else if (is_pae(vcpu))
1987 return paging32E_init_context(vcpu);
1988 else
1989 return paging32_init_context(vcpu);
1990}
1991
fb72d167
JR
1992static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1993{
35149e21
AL
1994 vcpu->arch.update_pte.pfn = bad_pfn;
1995
fb72d167
JR
1996 if (tdp_enabled)
1997 return init_kvm_tdp_mmu(vcpu);
1998 else
1999 return init_kvm_softmmu(vcpu);
2000}
2001
6aa8b732
AK
2002static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2003{
2004 ASSERT(vcpu);
ad312c7c
ZX
2005 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2006 vcpu->arch.mmu.free(vcpu);
2007 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6aa8b732
AK
2008 }
2009}
2010
2011int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
2012{
2013 destroy_kvm_mmu(vcpu);
2014 return init_kvm_mmu(vcpu);
2015}
8668a3c4 2016EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
2017
2018int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 2019{
714b93da
AK
2020 int r;
2021
e2dec939 2022 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
2023 if (r)
2024 goto out;
aaee2c94 2025 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 2026 kvm_mmu_free_some_pages(vcpu);
17c3ba9d 2027 mmu_alloc_roots(vcpu);
0ba73cda 2028 mmu_sync_roots(vcpu);
aaee2c94 2029 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 2030 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
17c3ba9d 2031 kvm_mmu_flush_tlb(vcpu);
714b93da
AK
2032out:
2033 return r;
6aa8b732 2034}
17c3ba9d
AK
2035EXPORT_SYMBOL_GPL(kvm_mmu_load);
2036
2037void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2038{
2039 mmu_free_roots(vcpu);
2040}
6aa8b732 2041
09072daf 2042static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 2043 struct kvm_mmu_page *sp,
ac1b714e
AK
2044 u64 *spte)
2045{
2046 u64 pte;
2047 struct kvm_mmu_page *child;
2048
2049 pte = *spte;
c7addb90 2050 if (is_shadow_present_pte(pte)) {
05da4558
MT
2051 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
2052 is_large_pte(pte))
290fc38d 2053 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
2054 else {
2055 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 2056 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
2057 }
2058 }
c7addb90 2059 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
05da4558
MT
2060 if (is_large_pte(pte))
2061 --vcpu->kvm->stat.lpages;
ac1b714e
AK
2062}
2063
0028425f 2064static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 2065 struct kvm_mmu_page *sp,
0028425f 2066 u64 *spte,
489f1d65 2067 const void *new)
0028425f 2068{
30945387
MT
2069 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2070 if (!vcpu->arch.update_pte.largepage ||
2071 sp->role.glevels == PT32_ROOT_LEVEL) {
2072 ++vcpu->kvm->stat.mmu_pde_zapped;
2073 return;
2074 }
2075 }
0028425f 2076
4cee5764 2077 ++vcpu->kvm->stat.mmu_pte_updated;
4db35314 2078 if (sp->role.glevels == PT32_ROOT_LEVEL)
489f1d65 2079 paging32_update_pte(vcpu, sp, spte, new);
0028425f 2080 else
489f1d65 2081 paging64_update_pte(vcpu, sp, spte, new);
0028425f
AK
2082}
2083
79539cec
AK
2084static bool need_remote_flush(u64 old, u64 new)
2085{
2086 if (!is_shadow_present_pte(old))
2087 return false;
2088 if (!is_shadow_present_pte(new))
2089 return true;
2090 if ((old ^ new) & PT64_BASE_ADDR_MASK)
2091 return true;
2092 old ^= PT64_NX_MASK;
2093 new ^= PT64_NX_MASK;
2094 return (old & ~new & PT64_PERM_MASK) != 0;
2095}
2096
2097static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2098{
2099 if (need_remote_flush(old, new))
2100 kvm_flush_remote_tlbs(vcpu->kvm);
2101 else
2102 kvm_mmu_flush_tlb(vcpu);
2103}
2104
12b7d28f
AK
2105static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2106{
ad312c7c 2107 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f 2108
7b52345e 2109 return !!(spte && (*spte & shadow_accessed_mask));
12b7d28f
AK
2110}
2111
d7824fff
AK
2112static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2113 const u8 *new, int bytes)
2114{
2115 gfn_t gfn;
2116 int r;
2117 u64 gpte = 0;
35149e21 2118 pfn_t pfn;
d7824fff 2119
05da4558
MT
2120 vcpu->arch.update_pte.largepage = 0;
2121
d7824fff
AK
2122 if (bytes != 4 && bytes != 8)
2123 return;
2124
2125 /*
2126 * Assume that the pte write on a page table of the same type
2127 * as the current vcpu paging mode. This is nearly always true
2128 * (might be false while changing modes). Note it is verified later
2129 * by update_pte().
2130 */
2131 if (is_pae(vcpu)) {
2132 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2133 if ((bytes == 4) && (gpa % 4 == 0)) {
2134 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2135 if (r)
2136 return;
2137 memcpy((void *)&gpte + (gpa % 8), new, 4);
2138 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2139 memcpy((void *)&gpte, new, 8);
2140 }
2141 } else {
2142 if ((bytes == 4) && (gpa % 4 == 0))
2143 memcpy((void *)&gpte, new, 4);
2144 }
2145 if (!is_present_pte(gpte))
2146 return;
2147 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6 2148
05da4558
MT
2149 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2150 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2151 vcpu->arch.update_pte.largepage = 1;
2152 }
e930bffe 2153 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 2154 smp_rmb();
35149e21 2155 pfn = gfn_to_pfn(vcpu->kvm, gfn);
72dc67a6 2156
35149e21
AL
2157 if (is_error_pfn(pfn)) {
2158 kvm_release_pfn_clean(pfn);
d196e343
AK
2159 return;
2160 }
d7824fff 2161 vcpu->arch.update_pte.gfn = gfn;
35149e21 2162 vcpu->arch.update_pte.pfn = pfn;
d7824fff
AK
2163}
2164
1b7fcd32
AK
2165static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2166{
2167 u64 *spte = vcpu->arch.last_pte_updated;
2168
2169 if (spte
2170 && vcpu->arch.last_pte_gfn == gfn
2171 && shadow_accessed_mask
2172 && !(*spte & shadow_accessed_mask)
2173 && is_shadow_present_pte(*spte))
2174 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2175}
2176
09072daf 2177void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 2178 const u8 *new, int bytes)
da4a00f0 2179{
9b7a0325 2180 gfn_t gfn = gpa >> PAGE_SHIFT;
4db35314 2181 struct kvm_mmu_page *sp;
0e7bc4b9 2182 struct hlist_node *node, *n;
9b7a0325
AK
2183 struct hlist_head *bucket;
2184 unsigned index;
489f1d65 2185 u64 entry, gentry;
9b7a0325 2186 u64 *spte;
9b7a0325 2187 unsigned offset = offset_in_page(gpa);
0e7bc4b9 2188 unsigned pte_size;
9b7a0325 2189 unsigned page_offset;
0e7bc4b9 2190 unsigned misaligned;
fce0657f 2191 unsigned quadrant;
9b7a0325 2192 int level;
86a5ba02 2193 int flooded = 0;
ac1b714e 2194 int npte;
489f1d65 2195 int r;
9b7a0325 2196
b8688d51 2197 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
d7824fff 2198 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
aaee2c94 2199 spin_lock(&vcpu->kvm->mmu_lock);
1b7fcd32 2200 kvm_mmu_access_page(vcpu, gfn);
eb787d10 2201 kvm_mmu_free_some_pages(vcpu);
4cee5764 2202 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 2203 kvm_mmu_audit(vcpu, "pre pte write");
ad312c7c 2204 if (gfn == vcpu->arch.last_pt_write_gfn
12b7d28f 2205 && !last_updated_pte_accessed(vcpu)) {
ad312c7c
ZX
2206 ++vcpu->arch.last_pt_write_count;
2207 if (vcpu->arch.last_pt_write_count >= 3)
86a5ba02
AK
2208 flooded = 1;
2209 } else {
ad312c7c
ZX
2210 vcpu->arch.last_pt_write_gfn = gfn;
2211 vcpu->arch.last_pt_write_count = 1;
2212 vcpu->arch.last_pte_updated = NULL;
86a5ba02 2213 }
1ae0a13d 2214 index = kvm_page_table_hashfn(gfn);
f05e70ac 2215 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314 2216 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
5b5c6a5a 2217 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
9b7a0325 2218 continue;
4db35314 2219 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
0e7bc4b9 2220 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 2221 misaligned |= bytes < 4;
86a5ba02 2222 if (misaligned || flooded) {
0e7bc4b9
AK
2223 /*
2224 * Misaligned accesses are too much trouble to fix
2225 * up; also, they usually indicate a page is not used
2226 * as a page table.
86a5ba02
AK
2227 *
2228 * If we're seeing too many writes to a page,
2229 * it may no longer be a page table, or we may be
2230 * forking, in which case it is better to unmap the
2231 * page.
0e7bc4b9
AK
2232 */
2233 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314 2234 gpa, bytes, sp->role.word);
07385413
MT
2235 if (kvm_mmu_zap_page(vcpu->kvm, sp))
2236 n = bucket->first;
4cee5764 2237 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
2238 continue;
2239 }
9b7a0325 2240 page_offset = offset;
4db35314 2241 level = sp->role.level;
ac1b714e 2242 npte = 1;
4db35314 2243 if (sp->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
2244 page_offset <<= 1; /* 32->64 */
2245 /*
2246 * A 32-bit pde maps 4MB while the shadow pdes map
2247 * only 2MB. So we need to double the offset again
2248 * and zap two pdes instead of one.
2249 */
2250 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 2251 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
2252 page_offset <<= 1;
2253 npte = 2;
2254 }
fce0657f 2255 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 2256 page_offset &= ~PAGE_MASK;
4db35314 2257 if (quadrant != sp->role.quadrant)
fce0657f 2258 continue;
9b7a0325 2259 }
4db35314 2260 spte = &sp->spt[page_offset / sizeof(*spte)];
489f1d65
DE
2261 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2262 gentry = 0;
2263 r = kvm_read_guest_atomic(vcpu->kvm,
2264 gpa & ~(u64)(pte_size - 1),
2265 &gentry, pte_size);
2266 new = (const void *)&gentry;
2267 if (r < 0)
2268 new = NULL;
2269 }
ac1b714e 2270 while (npte--) {
79539cec 2271 entry = *spte;
4db35314 2272 mmu_pte_write_zap_pte(vcpu, sp, spte);
489f1d65
DE
2273 if (new)
2274 mmu_pte_write_new_pte(vcpu, sp, spte, new);
79539cec 2275 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
ac1b714e 2276 ++spte;
9b7a0325 2277 }
9b7a0325 2278 }
c7addb90 2279 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 2280 spin_unlock(&vcpu->kvm->mmu_lock);
35149e21
AL
2281 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2282 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2283 vcpu->arch.update_pte.pfn = bad_pfn;
d7824fff 2284 }
da4a00f0
AK
2285}
2286
a436036b
AK
2287int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2288{
10589a46
MT
2289 gpa_t gpa;
2290 int r;
a436036b 2291
10589a46 2292 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
10589a46 2293
aaee2c94 2294 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 2295 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 2296 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 2297 return r;
a436036b 2298}
577bdc49 2299EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
a436036b 2300
22d95b12 2301void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 2302{
f05e70ac 2303 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
4db35314 2304 struct kvm_mmu_page *sp;
ebeace86 2305
f05e70ac 2306 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314
AK
2307 struct kvm_mmu_page, link);
2308 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 2309 ++vcpu->kvm->stat.mmu_recycled;
ebeace86
AK
2310 }
2311}
ebeace86 2312
3067714c
AK
2313int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2314{
2315 int r;
2316 enum emulation_result er;
2317
ad312c7c 2318 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
2319 if (r < 0)
2320 goto out;
2321
2322 if (!r) {
2323 r = 1;
2324 goto out;
2325 }
2326
b733bfb5
AK
2327 r = mmu_topup_memory_caches(vcpu);
2328 if (r)
2329 goto out;
2330
3067714c 2331 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
3067714c
AK
2332
2333 switch (er) {
2334 case EMULATE_DONE:
2335 return 1;
2336 case EMULATE_DO_MMIO:
2337 ++vcpu->stat.mmio_exits;
2338 return 0;
2339 case EMULATE_FAIL:
2340 kvm_report_emulation_failure(vcpu, "pagetable");
2341 return 1;
2342 default:
2343 BUG();
2344 }
2345out:
3067714c
AK
2346 return r;
2347}
2348EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2349
a7052897
MT
2350void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2351{
2352 spin_lock(&vcpu->kvm->mmu_lock);
2353 vcpu->arch.mmu.invlpg(vcpu, gva);
2354 spin_unlock(&vcpu->kvm->mmu_lock);
2355 kvm_mmu_flush_tlb(vcpu);
2356 ++vcpu->stat.invlpg;
2357}
2358EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2359
18552672
JR
2360void kvm_enable_tdp(void)
2361{
2362 tdp_enabled = true;
2363}
2364EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2365
5f4cb662
JR
2366void kvm_disable_tdp(void)
2367{
2368 tdp_enabled = false;
2369}
2370EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2371
6aa8b732
AK
2372static void free_mmu_pages(struct kvm_vcpu *vcpu)
2373{
4db35314 2374 struct kvm_mmu_page *sp;
6aa8b732 2375
f05e70ac
ZX
2376 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2377 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
4db35314
AK
2378 struct kvm_mmu_page, link);
2379 kvm_mmu_zap_page(vcpu->kvm, sp);
8d2d73b9 2380 cond_resched();
f51234c2 2381 }
ad312c7c 2382 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
2383}
2384
2385static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2386{
17ac10ad 2387 struct page *page;
6aa8b732
AK
2388 int i;
2389
2390 ASSERT(vcpu);
2391
f05e70ac
ZX
2392 if (vcpu->kvm->arch.n_requested_mmu_pages)
2393 vcpu->kvm->arch.n_free_mmu_pages =
2394 vcpu->kvm->arch.n_requested_mmu_pages;
82ce2c96 2395 else
f05e70ac
ZX
2396 vcpu->kvm->arch.n_free_mmu_pages =
2397 vcpu->kvm->arch.n_alloc_mmu_pages;
17ac10ad
AK
2398 /*
2399 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2400 * Therefore we need to allocate shadow page tables in the first
2401 * 4GB of memory, which happens to fit the DMA32 zone.
2402 */
2403 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2404 if (!page)
2405 goto error_1;
ad312c7c 2406 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 2407 for (i = 0; i < 4; ++i)
ad312c7c 2408 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 2409
6aa8b732
AK
2410 return 0;
2411
2412error_1:
2413 free_mmu_pages(vcpu);
2414 return -ENOMEM;
2415}
2416
8018c27b 2417int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 2418{
6aa8b732 2419 ASSERT(vcpu);
ad312c7c 2420 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 2421
8018c27b
IM
2422 return alloc_mmu_pages(vcpu);
2423}
6aa8b732 2424
8018c27b
IM
2425int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2426{
2427 ASSERT(vcpu);
ad312c7c 2428 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 2429
8018c27b 2430 return init_kvm_mmu(vcpu);
6aa8b732
AK
2431}
2432
2433void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2434{
2435 ASSERT(vcpu);
2436
2437 destroy_kvm_mmu(vcpu);
2438 free_mmu_pages(vcpu);
714b93da 2439 mmu_free_memory_caches(vcpu);
6aa8b732
AK
2440}
2441
90cb0529 2442void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 2443{
4db35314 2444 struct kvm_mmu_page *sp;
6aa8b732 2445
2245a28f 2446 spin_lock(&kvm->mmu_lock);
f05e70ac 2447 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
2448 int i;
2449 u64 *pt;
2450
4db35314 2451 if (!test_bit(slot, &sp->slot_bitmap))
6aa8b732
AK
2452 continue;
2453
4db35314 2454 pt = sp->spt;
6aa8b732
AK
2455 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2456 /* avoid RMW */
9647c14c 2457 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 2458 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732 2459 }
171d595d 2460 kvm_flush_remote_tlbs(kvm);
2245a28f 2461 spin_unlock(&kvm->mmu_lock);
6aa8b732 2462}
37a7d8b0 2463
90cb0529 2464void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 2465{
4db35314 2466 struct kvm_mmu_page *sp, *node;
e0fa826f 2467
aaee2c94 2468 spin_lock(&kvm->mmu_lock);
f05e70ac 2469 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
07385413
MT
2470 if (kvm_mmu_zap_page(kvm, sp))
2471 node = container_of(kvm->arch.active_mmu_pages.next,
2472 struct kvm_mmu_page, link);
aaee2c94 2473 spin_unlock(&kvm->mmu_lock);
e0fa826f 2474
90cb0529 2475 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
2476}
2477
8b2cf73c 2478static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
3ee16c81
IE
2479{
2480 struct kvm_mmu_page *page;
2481
2482 page = container_of(kvm->arch.active_mmu_pages.prev,
2483 struct kvm_mmu_page, link);
2484 kvm_mmu_zap_page(kvm, page);
2485}
2486
2487static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2488{
2489 struct kvm *kvm;
2490 struct kvm *kvm_freed = NULL;
2491 int cache_count = 0;
2492
2493 spin_lock(&kvm_lock);
2494
2495 list_for_each_entry(kvm, &vm_list, vm_list) {
2496 int npages;
2497
5a4c9288
MT
2498 if (!down_read_trylock(&kvm->slots_lock))
2499 continue;
3ee16c81
IE
2500 spin_lock(&kvm->mmu_lock);
2501 npages = kvm->arch.n_alloc_mmu_pages -
2502 kvm->arch.n_free_mmu_pages;
2503 cache_count += npages;
2504 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2505 kvm_mmu_remove_one_alloc_mmu_page(kvm);
2506 cache_count--;
2507 kvm_freed = kvm;
2508 }
2509 nr_to_scan--;
2510
2511 spin_unlock(&kvm->mmu_lock);
5a4c9288 2512 up_read(&kvm->slots_lock);
3ee16c81
IE
2513 }
2514 if (kvm_freed)
2515 list_move_tail(&kvm_freed->vm_list, &vm_list);
2516
2517 spin_unlock(&kvm_lock);
2518
2519 return cache_count;
2520}
2521
2522static struct shrinker mmu_shrinker = {
2523 .shrink = mmu_shrink,
2524 .seeks = DEFAULT_SEEKS * 10,
2525};
2526
2ddfd20e 2527static void mmu_destroy_caches(void)
b5a33a75
AK
2528{
2529 if (pte_chain_cache)
2530 kmem_cache_destroy(pte_chain_cache);
2531 if (rmap_desc_cache)
2532 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
2533 if (mmu_page_header_cache)
2534 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
2535}
2536
3ee16c81
IE
2537void kvm_mmu_module_exit(void)
2538{
2539 mmu_destroy_caches();
2540 unregister_shrinker(&mmu_shrinker);
2541}
2542
b5a33a75
AK
2543int kvm_mmu_module_init(void)
2544{
2545 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2546 sizeof(struct kvm_pte_chain),
20c2df83 2547 0, 0, NULL);
b5a33a75
AK
2548 if (!pte_chain_cache)
2549 goto nomem;
2550 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2551 sizeof(struct kvm_rmap_desc),
20c2df83 2552 0, 0, NULL);
b5a33a75
AK
2553 if (!rmap_desc_cache)
2554 goto nomem;
2555
d3d25b04
AK
2556 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2557 sizeof(struct kvm_mmu_page),
20c2df83 2558 0, 0, NULL);
d3d25b04
AK
2559 if (!mmu_page_header_cache)
2560 goto nomem;
2561
3ee16c81
IE
2562 register_shrinker(&mmu_shrinker);
2563
b5a33a75
AK
2564 return 0;
2565
2566nomem:
3ee16c81 2567 mmu_destroy_caches();
b5a33a75
AK
2568 return -ENOMEM;
2569}
2570
3ad82a7e
ZX
2571/*
2572 * Caculate mmu pages needed for kvm.
2573 */
2574unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2575{
2576 int i;
2577 unsigned int nr_mmu_pages;
2578 unsigned int nr_pages = 0;
2579
2580 for (i = 0; i < kvm->nmemslots; i++)
2581 nr_pages += kvm->memslots[i].npages;
2582
2583 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2584 nr_mmu_pages = max(nr_mmu_pages,
2585 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2586
2587 return nr_mmu_pages;
2588}
2589
2f333bcb
MT
2590static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2591 unsigned len)
2592{
2593 if (len > buffer->len)
2594 return NULL;
2595 return buffer->ptr;
2596}
2597
2598static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2599 unsigned len)
2600{
2601 void *ret;
2602
2603 ret = pv_mmu_peek_buffer(buffer, len);
2604 if (!ret)
2605 return ret;
2606 buffer->ptr += len;
2607 buffer->len -= len;
2608 buffer->processed += len;
2609 return ret;
2610}
2611
2612static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2613 gpa_t addr, gpa_t value)
2614{
2615 int bytes = 8;
2616 int r;
2617
2618 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2619 bytes = 4;
2620
2621 r = mmu_topup_memory_caches(vcpu);
2622 if (r)
2623 return r;
2624
3200f405 2625 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2f333bcb
MT
2626 return -EFAULT;
2627
2628 return 1;
2629}
2630
2631static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2632{
2633 kvm_x86_ops->tlb_flush(vcpu);
2634 return 1;
2635}
2636
2637static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2638{
2639 spin_lock(&vcpu->kvm->mmu_lock);
2640 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2641 spin_unlock(&vcpu->kvm->mmu_lock);
2642 return 1;
2643}
2644
2645static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2646 struct kvm_pv_mmu_op_buffer *buffer)
2647{
2648 struct kvm_mmu_op_header *header;
2649
2650 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2651 if (!header)
2652 return 0;
2653 switch (header->op) {
2654 case KVM_MMU_OP_WRITE_PTE: {
2655 struct kvm_mmu_op_write_pte *wpte;
2656
2657 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2658 if (!wpte)
2659 return 0;
2660 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2661 wpte->pte_val);
2662 }
2663 case KVM_MMU_OP_FLUSH_TLB: {
2664 struct kvm_mmu_op_flush_tlb *ftlb;
2665
2666 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2667 if (!ftlb)
2668 return 0;
2669 return kvm_pv_mmu_flush_tlb(vcpu);
2670 }
2671 case KVM_MMU_OP_RELEASE_PT: {
2672 struct kvm_mmu_op_release_pt *rpt;
2673
2674 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2675 if (!rpt)
2676 return 0;
2677 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2678 }
2679 default: return 0;
2680 }
2681}
2682
2683int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2684 gpa_t addr, unsigned long *ret)
2685{
2686 int r;
6ad18fba 2687 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2f333bcb 2688
6ad18fba
DH
2689 buffer->ptr = buffer->buf;
2690 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2691 buffer->processed = 0;
2f333bcb 2692
6ad18fba 2693 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2f333bcb
MT
2694 if (r)
2695 goto out;
2696
6ad18fba
DH
2697 while (buffer->len) {
2698 r = kvm_pv_mmu_op_one(vcpu, buffer);
2f333bcb
MT
2699 if (r < 0)
2700 goto out;
2701 if (r == 0)
2702 break;
2703 }
2704
2705 r = 1;
2706out:
6ad18fba 2707 *ret = buffer->processed;
2f333bcb
MT
2708 return r;
2709}
2710
37a7d8b0
AK
2711#ifdef AUDIT
2712
2713static const char *audit_msg;
2714
2715static gva_t canonicalize(gva_t gva)
2716{
2717#ifdef CONFIG_X86_64
2718 gva = (long long)(gva << 16) >> 16;
2719#endif
2720 return gva;
2721}
2722
2723static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2724 gva_t va, int level)
2725{
2726 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2727 int i;
2728 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2729
2730 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2731 u64 ent = pt[i];
2732
c7addb90 2733 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
2734 continue;
2735
2736 va = canonicalize(va);
c7addb90
AK
2737 if (level > 1) {
2738 if (ent == shadow_notrap_nonpresent_pte)
2739 printk(KERN_ERR "audit: (%s) nontrapping pte"
2740 " in nonleaf level: levels %d gva %lx"
2741 " level %d pte %llx\n", audit_msg,
ad312c7c 2742 vcpu->arch.mmu.root_level, va, level, ent);
c7addb90 2743
37a7d8b0 2744 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 2745 } else {
ad312c7c 2746 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
35149e21 2747 hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
37a7d8b0 2748
c7addb90 2749 if (is_shadow_present_pte(ent)
37a7d8b0 2750 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
2751 printk(KERN_ERR "xx audit error: (%s) levels %d"
2752 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 2753 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
2754 va, gpa, hpa, ent,
2755 is_shadow_present_pte(ent));
c7addb90
AK
2756 else if (ent == shadow_notrap_nonpresent_pte
2757 && !is_error_hpa(hpa))
2758 printk(KERN_ERR "audit: (%s) notrap shadow,"
2759 " valid guest gva %lx\n", audit_msg, va);
35149e21 2760 kvm_release_pfn_clean(pfn);
c7addb90 2761
37a7d8b0
AK
2762 }
2763 }
2764}
2765
2766static void audit_mappings(struct kvm_vcpu *vcpu)
2767{
1ea252af 2768 unsigned i;
37a7d8b0 2769
ad312c7c
ZX
2770 if (vcpu->arch.mmu.root_level == 4)
2771 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
2772 else
2773 for (i = 0; i < 4; ++i)
ad312c7c 2774 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 2775 audit_mappings_page(vcpu,
ad312c7c 2776 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
2777 i << 30,
2778 2);
2779}
2780
2781static int count_rmaps(struct kvm_vcpu *vcpu)
2782{
2783 int nmaps = 0;
2784 int i, j, k;
2785
2786 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2787 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2788 struct kvm_rmap_desc *d;
2789
2790 for (j = 0; j < m->npages; ++j) {
290fc38d 2791 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 2792
290fc38d 2793 if (!*rmapp)
37a7d8b0 2794 continue;
290fc38d 2795 if (!(*rmapp & 1)) {
37a7d8b0
AK
2796 ++nmaps;
2797 continue;
2798 }
290fc38d 2799 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
2800 while (d) {
2801 for (k = 0; k < RMAP_EXT; ++k)
2802 if (d->shadow_ptes[k])
2803 ++nmaps;
2804 else
2805 break;
2806 d = d->more;
2807 }
2808 }
2809 }
2810 return nmaps;
2811}
2812
2813static int count_writable_mappings(struct kvm_vcpu *vcpu)
2814{
2815 int nmaps = 0;
4db35314 2816 struct kvm_mmu_page *sp;
37a7d8b0
AK
2817 int i;
2818
f05e70ac 2819 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2820 u64 *pt = sp->spt;
37a7d8b0 2821
4db35314 2822 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
2823 continue;
2824
2825 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2826 u64 ent = pt[i];
2827
2828 if (!(ent & PT_PRESENT_MASK))
2829 continue;
2830 if (!(ent & PT_WRITABLE_MASK))
2831 continue;
2832 ++nmaps;
2833 }
2834 }
2835 return nmaps;
2836}
2837
2838static void audit_rmap(struct kvm_vcpu *vcpu)
2839{
2840 int n_rmap = count_rmaps(vcpu);
2841 int n_actual = count_writable_mappings(vcpu);
2842
2843 if (n_rmap != n_actual)
2844 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
b8688d51 2845 __func__, audit_msg, n_rmap, n_actual);
37a7d8b0
AK
2846}
2847
2848static void audit_write_protection(struct kvm_vcpu *vcpu)
2849{
4db35314 2850 struct kvm_mmu_page *sp;
290fc38d
IE
2851 struct kvm_memory_slot *slot;
2852 unsigned long *rmapp;
2853 gfn_t gfn;
37a7d8b0 2854
f05e70ac 2855 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2856 if (sp->role.metaphysical)
37a7d8b0
AK
2857 continue;
2858
4db35314
AK
2859 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2860 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
290fc38d
IE
2861 rmapp = &slot->rmap[gfn - slot->base_gfn];
2862 if (*rmapp)
37a7d8b0
AK
2863 printk(KERN_ERR "%s: (%s) shadow page has writable"
2864 " mappings: gfn %lx role %x\n",
b8688d51 2865 __func__, audit_msg, sp->gfn,
4db35314 2866 sp->role.word);
37a7d8b0
AK
2867 }
2868}
2869
2870static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2871{
2872 int olddbg = dbg;
2873
2874 dbg = 0;
2875 audit_msg = msg;
2876 audit_rmap(vcpu);
2877 audit_write_protection(vcpu);
2878 audit_mappings(vcpu);
2879 dbg = olddbg;
2880}
2881
2882#endif
This page took 0.575289 seconds and 5 git commands to generate.