KVM: MMU: Keep a reverse mapping of non-writable translations
[deliverable/linux.git] / drivers / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
21#include "kvm.h"
22
6aa8b732
AK
23#include <linux/types.h>
24#include <linux/string.h>
6aa8b732
AK
25#include <linux/mm.h>
26#include <linux/highmem.h>
27#include <linux/module.h>
28
e495606d
AK
29#include <asm/page.h>
30#include <asm/cmpxchg.h>
6aa8b732 31
37a7d8b0
AK
32#undef MMU_DEBUG
33
34#undef AUDIT
35
36#ifdef AUDIT
37static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
38#else
39static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
40#endif
41
42#ifdef MMU_DEBUG
43
44#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
45#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
46
47#else
48
49#define pgprintk(x...) do { } while (0)
50#define rmap_printk(x...) do { } while (0)
51
52#endif
53
54#if defined(MMU_DEBUG) || defined(AUDIT)
55static int dbg = 1;
56#endif
6aa8b732 57
d6c69ee9
YD
58#ifndef MMU_DEBUG
59#define ASSERT(x) do { } while (0)
60#else
6aa8b732
AK
61#define ASSERT(x) \
62 if (!(x)) { \
63 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
64 __FILE__, __LINE__, #x); \
65 }
d6c69ee9 66#endif
6aa8b732 67
cea0f0e7
AK
68#define PT64_PT_BITS 9
69#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
70#define PT32_PT_BITS 10
71#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
6aa8b732
AK
72
73#define PT_WRITABLE_SHIFT 1
74
75#define PT_PRESENT_MASK (1ULL << 0)
76#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
77#define PT_USER_MASK (1ULL << 2)
78#define PT_PWT_MASK (1ULL << 3)
79#define PT_PCD_MASK (1ULL << 4)
80#define PT_ACCESSED_MASK (1ULL << 5)
81#define PT_DIRTY_MASK (1ULL << 6)
82#define PT_PAGE_SIZE_MASK (1ULL << 7)
83#define PT_PAT_MASK (1ULL << 7)
84#define PT_GLOBAL_MASK (1ULL << 8)
85#define PT64_NX_MASK (1ULL << 63)
86
87#define PT_PAT_SHIFT 7
88#define PT_DIR_PAT_SHIFT 12
89#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
90
91#define PT32_DIR_PSE36_SIZE 4
92#define PT32_DIR_PSE36_SHIFT 13
d77c26fc
MD
93#define PT32_DIR_PSE36_MASK \
94 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
6aa8b732
AK
95
96
6aa8b732
AK
97#define PT_FIRST_AVAIL_BITS_SHIFT 9
98#define PT64_SECOND_AVAIL_BITS_SHIFT 52
99
6aa8b732
AK
100#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
101
6aa8b732
AK
102#define VALID_PAGE(x) ((x) != INVALID_PAGE)
103
104#define PT64_LEVEL_BITS 9
105
106#define PT64_LEVEL_SHIFT(level) \
d77c26fc 107 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
108
109#define PT64_LEVEL_MASK(level) \
110 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
111
112#define PT64_INDEX(address, level)\
113 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
114
115
116#define PT32_LEVEL_BITS 10
117
118#define PT32_LEVEL_SHIFT(level) \
d77c26fc 119 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
120
121#define PT32_LEVEL_MASK(level) \
122 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
123
124#define PT32_INDEX(address, level)\
125 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
126
127
27aba766 128#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
129#define PT64_DIR_BASE_ADDR_MASK \
130 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
131
132#define PT32_BASE_ADDR_MASK PAGE_MASK
133#define PT32_DIR_BASE_ADDR_MASK \
134 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135
136
137#define PFERR_PRESENT_MASK (1U << 0)
138#define PFERR_WRITE_MASK (1U << 1)
139#define PFERR_USER_MASK (1U << 2)
73b1087e 140#define PFERR_FETCH_MASK (1U << 4)
6aa8b732
AK
141
142#define PT64_ROOT_LEVEL 4
143#define PT32_ROOT_LEVEL 2
144#define PT32E_ROOT_LEVEL 3
145
146#define PT_DIRECTORY_LEVEL 2
147#define PT_PAGE_TABLE_LEVEL 1
148
cd4a4e53
AK
149#define RMAP_EXT 4
150
151struct kvm_rmap_desc {
152 u64 *shadow_ptes[RMAP_EXT];
153 struct kvm_rmap_desc *more;
154};
155
b5a33a75
AK
156static struct kmem_cache *pte_chain_cache;
157static struct kmem_cache *rmap_desc_cache;
d3d25b04 158static struct kmem_cache *mmu_page_header_cache;
b5a33a75 159
c7addb90
AK
160static u64 __read_mostly shadow_trap_nonpresent_pte;
161static u64 __read_mostly shadow_notrap_nonpresent_pte;
162
163void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
164{
165 shadow_trap_nonpresent_pte = trap_pte;
166 shadow_notrap_nonpresent_pte = notrap_pte;
167}
168EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
169
6aa8b732
AK
170static int is_write_protection(struct kvm_vcpu *vcpu)
171{
707d92fa 172 return vcpu->cr0 & X86_CR0_WP;
6aa8b732
AK
173}
174
175static int is_cpuid_PSE36(void)
176{
177 return 1;
178}
179
73b1087e
AK
180static int is_nx(struct kvm_vcpu *vcpu)
181{
182 return vcpu->shadow_efer & EFER_NX;
183}
184
6aa8b732
AK
185static int is_present_pte(unsigned long pte)
186{
187 return pte & PT_PRESENT_MASK;
188}
189
c7addb90
AK
190static int is_shadow_present_pte(u64 pte)
191{
192 pte &= ~PT_SHADOW_IO_MARK;
193 return pte != shadow_trap_nonpresent_pte
194 && pte != shadow_notrap_nonpresent_pte;
195}
196
6aa8b732
AK
197static int is_writeble_pte(unsigned long pte)
198{
199 return pte & PT_WRITABLE_MASK;
200}
201
e3c5e7ec
AK
202static int is_dirty_pte(unsigned long pte)
203{
204 return pte & PT_DIRTY_MASK;
205}
206
6aa8b732
AK
207static int is_io_pte(unsigned long pte)
208{
209 return pte & PT_SHADOW_IO_MARK;
210}
211
cd4a4e53
AK
212static int is_rmap_pte(u64 pte)
213{
9647c14c
IE
214 return pte != shadow_trap_nonpresent_pte
215 && pte != shadow_notrap_nonpresent_pte;
cd4a4e53
AK
216}
217
e663ee64
AK
218static void set_shadow_pte(u64 *sptep, u64 spte)
219{
220#ifdef CONFIG_X86_64
221 set_64bit((unsigned long *)sptep, spte);
222#else
223 set_64bit((unsigned long long *)sptep, spte);
224#endif
225}
226
e2dec939 227static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 228 struct kmem_cache *base_cache, int min)
714b93da
AK
229{
230 void *obj;
231
232 if (cache->nobjs >= min)
e2dec939 233 return 0;
714b93da 234 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 235 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 236 if (!obj)
e2dec939 237 return -ENOMEM;
714b93da
AK
238 cache->objects[cache->nobjs++] = obj;
239 }
e2dec939 240 return 0;
714b93da
AK
241}
242
243static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
244{
245 while (mc->nobjs)
246 kfree(mc->objects[--mc->nobjs]);
247}
248
c1158e63 249static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 250 int min)
c1158e63
AK
251{
252 struct page *page;
253
254 if (cache->nobjs >= min)
255 return 0;
256 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 257 page = alloc_page(GFP_KERNEL);
c1158e63
AK
258 if (!page)
259 return -ENOMEM;
260 set_page_private(page, 0);
261 cache->objects[cache->nobjs++] = page_address(page);
262 }
263 return 0;
264}
265
266static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
267{
268 while (mc->nobjs)
c4d198d5 269 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
270}
271
2e3e5882 272static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 273{
e2dec939
AK
274 int r;
275
2e3e5882 276 kvm_mmu_free_some_pages(vcpu);
e2dec939 277 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
2e3e5882 278 pte_chain_cache, 4);
e2dec939
AK
279 if (r)
280 goto out;
281 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
2e3e5882 282 rmap_desc_cache, 1);
d3d25b04
AK
283 if (r)
284 goto out;
290fc38d 285 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
d3d25b04
AK
286 if (r)
287 goto out;
288 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
2e3e5882 289 mmu_page_header_cache, 4);
e2dec939
AK
290out:
291 return r;
714b93da
AK
292}
293
294static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
295{
296 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
297 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
c1158e63 298 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
d3d25b04 299 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
714b93da
AK
300}
301
302static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
303 size_t size)
304{
305 void *p;
306
307 BUG_ON(!mc->nobjs);
308 p = mc->objects[--mc->nobjs];
309 memset(p, 0, size);
310 return p;
311}
312
714b93da
AK
313static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
314{
315 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
316 sizeof(struct kvm_pte_chain));
317}
318
90cb0529 319static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 320{
90cb0529 321 kfree(pc);
714b93da
AK
322}
323
324static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
325{
326 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
327 sizeof(struct kvm_rmap_desc));
328}
329
90cb0529 330static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 331{
90cb0529 332 kfree(rd);
714b93da
AK
333}
334
290fc38d
IE
335/*
336 * Take gfn and return the reverse mapping to it.
337 * Note: gfn must be unaliased before this function get called
338 */
339
340static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
341{
342 struct kvm_memory_slot *slot;
343
344 slot = gfn_to_memslot(kvm, gfn);
345 return &slot->rmap[gfn - slot->base_gfn];
346}
347
cd4a4e53
AK
348/*
349 * Reverse mapping data structures:
350 *
290fc38d
IE
351 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
352 * that points to page_address(page).
cd4a4e53 353 *
290fc38d
IE
354 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
355 * containing more mappings.
cd4a4e53 356 */
290fc38d 357static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
cd4a4e53 358{
290fc38d 359 struct kvm_mmu_page *page;
cd4a4e53 360 struct kvm_rmap_desc *desc;
290fc38d 361 unsigned long *rmapp;
cd4a4e53
AK
362 int i;
363
364 if (!is_rmap_pte(*spte))
365 return;
290fc38d
IE
366 gfn = unalias_gfn(vcpu->kvm, gfn);
367 page = page_header(__pa(spte));
368 page->gfns[spte - page->spt] = gfn;
369 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
370 if (!*rmapp) {
cd4a4e53 371 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
372 *rmapp = (unsigned long)spte;
373 } else if (!(*rmapp & 1)) {
cd4a4e53 374 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 375 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 376 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 377 desc->shadow_ptes[1] = spte;
290fc38d 378 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
379 } else {
380 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 381 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
382 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
383 desc = desc->more;
384 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 385 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
386 desc = desc->more;
387 }
388 for (i = 0; desc->shadow_ptes[i]; ++i)
389 ;
390 desc->shadow_ptes[i] = spte;
391 }
392}
393
290fc38d 394static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
395 struct kvm_rmap_desc *desc,
396 int i,
397 struct kvm_rmap_desc *prev_desc)
398{
399 int j;
400
401 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
402 ;
403 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 404 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
405 if (j != 0)
406 return;
407 if (!prev_desc && !desc->more)
290fc38d 408 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
409 else
410 if (prev_desc)
411 prev_desc->more = desc->more;
412 else
290fc38d 413 *rmapp = (unsigned long)desc->more | 1;
90cb0529 414 mmu_free_rmap_desc(desc);
cd4a4e53
AK
415}
416
290fc38d 417static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 418{
cd4a4e53
AK
419 struct kvm_rmap_desc *desc;
420 struct kvm_rmap_desc *prev_desc;
290fc38d
IE
421 struct kvm_mmu_page *page;
422 unsigned long *rmapp;
cd4a4e53
AK
423 int i;
424
425 if (!is_rmap_pte(*spte))
426 return;
290fc38d
IE
427 page = page_header(__pa(spte));
428 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
429 if (!*rmapp) {
cd4a4e53
AK
430 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
431 BUG();
290fc38d 432 } else if (!(*rmapp & 1)) {
cd4a4e53 433 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 434 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
435 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
436 spte, *spte);
437 BUG();
438 }
290fc38d 439 *rmapp = 0;
cd4a4e53
AK
440 } else {
441 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 442 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
443 prev_desc = NULL;
444 while (desc) {
445 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
446 if (desc->shadow_ptes[i] == spte) {
290fc38d 447 rmap_desc_remove_entry(rmapp,
714b93da 448 desc, i,
cd4a4e53
AK
449 prev_desc);
450 return;
451 }
452 prev_desc = desc;
453 desc = desc->more;
454 }
455 BUG();
456 }
457}
458
98348e95 459static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 460{
374cbac0 461 struct kvm_rmap_desc *desc;
98348e95
IE
462 struct kvm_rmap_desc *prev_desc;
463 u64 *prev_spte;
464 int i;
465
466 if (!*rmapp)
467 return NULL;
468 else if (!(*rmapp & 1)) {
469 if (!spte)
470 return (u64 *)*rmapp;
471 return NULL;
472 }
473 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
474 prev_desc = NULL;
475 prev_spte = NULL;
476 while (desc) {
477 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
478 if (prev_spte == spte)
479 return desc->shadow_ptes[i];
480 prev_spte = desc->shadow_ptes[i];
481 }
482 desc = desc->more;
483 }
484 return NULL;
485}
486
487static void rmap_write_protect(struct kvm *kvm, u64 gfn)
488{
290fc38d 489 unsigned long *rmapp;
374cbac0
AK
490 u64 *spte;
491
4a4c9924
AL
492 gfn = unalias_gfn(kvm, gfn);
493 rmapp = gfn_to_rmap(kvm, gfn);
374cbac0 494
98348e95
IE
495 spte = rmap_next(kvm, rmapp, NULL);
496 while (spte) {
374cbac0 497 BUG_ON(!spte);
374cbac0 498 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 499 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
9647c14c
IE
500 if (is_writeble_pte(*spte))
501 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
4a4c9924 502 kvm_flush_remote_tlbs(kvm);
9647c14c 503 spte = rmap_next(kvm, rmapp, spte);
374cbac0
AK
504 }
505}
506
d6c69ee9 507#ifdef MMU_DEBUG
47ad8e68 508static int is_empty_shadow_page(u64 *spt)
6aa8b732 509{
139bdb2d
AK
510 u64 *pos;
511 u64 *end;
512
47ad8e68 513 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
c7addb90 514 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
139bdb2d
AK
515 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
516 pos, *pos);
6aa8b732 517 return 0;
139bdb2d 518 }
6aa8b732
AK
519 return 1;
520}
d6c69ee9 521#endif
6aa8b732 522
90cb0529 523static void kvm_mmu_free_page(struct kvm *kvm,
4b02d6da 524 struct kvm_mmu_page *page_head)
260746c0 525{
47ad8e68 526 ASSERT(is_empty_shadow_page(page_head->spt));
d3d25b04 527 list_del(&page_head->link);
c1158e63 528 __free_page(virt_to_page(page_head->spt));
290fc38d 529 __free_page(virt_to_page(page_head->gfns));
90cb0529
AK
530 kfree(page_head);
531 ++kvm->n_free_mmu_pages;
260746c0
AK
532}
533
cea0f0e7
AK
534static unsigned kvm_page_table_hashfn(gfn_t gfn)
535{
536 return gfn;
537}
538
25c0de2c
AK
539static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
540 u64 *parent_pte)
6aa8b732
AK
541{
542 struct kvm_mmu_page *page;
543
d3d25b04 544 if (!vcpu->kvm->n_free_mmu_pages)
25c0de2c 545 return NULL;
6aa8b732 546
d3d25b04
AK
547 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
548 sizeof *page);
549 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
290fc38d 550 page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
d3d25b04
AK
551 set_page_private(virt_to_page(page->spt), (unsigned long)page);
552 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
47ad8e68 553 ASSERT(is_empty_shadow_page(page->spt));
6aa8b732 554 page->slot_bitmap = 0;
cea0f0e7 555 page->multimapped = 0;
6aa8b732 556 page->parent_pte = parent_pte;
ebeace86 557 --vcpu->kvm->n_free_mmu_pages;
25c0de2c 558 return page;
6aa8b732
AK
559}
560
714b93da
AK
561static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
562 struct kvm_mmu_page *page, u64 *parent_pte)
cea0f0e7
AK
563{
564 struct kvm_pte_chain *pte_chain;
565 struct hlist_node *node;
566 int i;
567
568 if (!parent_pte)
569 return;
570 if (!page->multimapped) {
571 u64 *old = page->parent_pte;
572
573 if (!old) {
574 page->parent_pte = parent_pte;
575 return;
576 }
577 page->multimapped = 1;
714b93da 578 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
579 INIT_HLIST_HEAD(&page->parent_ptes);
580 hlist_add_head(&pte_chain->link, &page->parent_ptes);
581 pte_chain->parent_ptes[0] = old;
582 }
583 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
584 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
585 continue;
586 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
587 if (!pte_chain->parent_ptes[i]) {
588 pte_chain->parent_ptes[i] = parent_pte;
589 return;
590 }
591 }
714b93da 592 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
593 BUG_ON(!pte_chain);
594 hlist_add_head(&pte_chain->link, &page->parent_ptes);
595 pte_chain->parent_ptes[0] = parent_pte;
596}
597
90cb0529 598static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
cea0f0e7
AK
599 u64 *parent_pte)
600{
601 struct kvm_pte_chain *pte_chain;
602 struct hlist_node *node;
603 int i;
604
605 if (!page->multimapped) {
606 BUG_ON(page->parent_pte != parent_pte);
607 page->parent_pte = NULL;
608 return;
609 }
610 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
611 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
612 if (!pte_chain->parent_ptes[i])
613 break;
614 if (pte_chain->parent_ptes[i] != parent_pte)
615 continue;
697fe2e2
AK
616 while (i + 1 < NR_PTE_CHAIN_ENTRIES
617 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
618 pte_chain->parent_ptes[i]
619 = pte_chain->parent_ptes[i + 1];
620 ++i;
621 }
622 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
623 if (i == 0) {
624 hlist_del(&pte_chain->link);
90cb0529 625 mmu_free_pte_chain(pte_chain);
697fe2e2
AK
626 if (hlist_empty(&page->parent_ptes)) {
627 page->multimapped = 0;
628 page->parent_pte = NULL;
629 }
630 }
cea0f0e7
AK
631 return;
632 }
633 BUG();
634}
635
f67a46f4 636static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
cea0f0e7
AK
637 gfn_t gfn)
638{
639 unsigned index;
640 struct hlist_head *bucket;
641 struct kvm_mmu_page *page;
642 struct hlist_node *node;
643
644 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
645 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f67a46f4 646 bucket = &kvm->mmu_page_hash[index];
cea0f0e7
AK
647 hlist_for_each_entry(page, node, bucket, hash_link)
648 if (page->gfn == gfn && !page->role.metaphysical) {
649 pgprintk("%s: found role %x\n",
650 __FUNCTION__, page->role.word);
651 return page;
652 }
653 return NULL;
654}
655
656static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
657 gfn_t gfn,
658 gva_t gaddr,
659 unsigned level,
660 int metaphysical,
d28c6cfb 661 unsigned hugepage_access,
cea0f0e7
AK
662 u64 *parent_pte)
663{
664 union kvm_mmu_page_role role;
665 unsigned index;
666 unsigned quadrant;
667 struct hlist_head *bucket;
668 struct kvm_mmu_page *page;
669 struct hlist_node *node;
670
671 role.word = 0;
672 role.glevels = vcpu->mmu.root_level;
673 role.level = level;
674 role.metaphysical = metaphysical;
d28c6cfb 675 role.hugepage_access = hugepage_access;
cea0f0e7
AK
676 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
677 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
678 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
679 role.quadrant = quadrant;
680 }
681 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
682 gfn, role.word);
683 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
684 bucket = &vcpu->kvm->mmu_page_hash[index];
685 hlist_for_each_entry(page, node, bucket, hash_link)
686 if (page->gfn == gfn && page->role.word == role.word) {
714b93da 687 mmu_page_add_parent_pte(vcpu, page, parent_pte);
cea0f0e7
AK
688 pgprintk("%s: found\n", __FUNCTION__);
689 return page;
690 }
691 page = kvm_mmu_alloc_page(vcpu, parent_pte);
692 if (!page)
693 return page;
694 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
695 page->gfn = gfn;
696 page->role = role;
697 hlist_add_head(&page->hash_link, bucket);
c7addb90 698 vcpu->mmu.prefetch_page(vcpu, page);
374cbac0 699 if (!metaphysical)
4a4c9924 700 rmap_write_protect(vcpu->kvm, gfn);
cea0f0e7
AK
701 return page;
702}
703
90cb0529 704static void kvm_mmu_page_unlink_children(struct kvm *kvm,
a436036b
AK
705 struct kvm_mmu_page *page)
706{
697fe2e2
AK
707 unsigned i;
708 u64 *pt;
709 u64 ent;
710
47ad8e68 711 pt = page->spt;
697fe2e2
AK
712
713 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
714 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 715 if (is_shadow_present_pte(pt[i]))
290fc38d 716 rmap_remove(kvm, &pt[i]);
c7addb90 717 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 718 }
90cb0529 719 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
720 return;
721 }
722
723 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
724 ent = pt[i];
725
c7addb90
AK
726 pt[i] = shadow_trap_nonpresent_pte;
727 if (!is_shadow_present_pte(ent))
697fe2e2
AK
728 continue;
729 ent &= PT64_BASE_ADDR_MASK;
90cb0529 730 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
697fe2e2 731 }
90cb0529 732 kvm_flush_remote_tlbs(kvm);
a436036b
AK
733}
734
90cb0529 735static void kvm_mmu_put_page(struct kvm_mmu_page *page,
cea0f0e7
AK
736 u64 *parent_pte)
737{
90cb0529 738 mmu_page_remove_parent_pte(page, parent_pte);
a436036b
AK
739}
740
12b7d28f
AK
741static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
742{
743 int i;
744
745 for (i = 0; i < KVM_MAX_VCPUS; ++i)
746 if (kvm->vcpus[i])
747 kvm->vcpus[i]->last_pte_updated = NULL;
748}
749
90cb0529 750static void kvm_mmu_zap_page(struct kvm *kvm,
a436036b
AK
751 struct kvm_mmu_page *page)
752{
753 u64 *parent_pte;
754
755 while (page->multimapped || page->parent_pte) {
756 if (!page->multimapped)
757 parent_pte = page->parent_pte;
758 else {
759 struct kvm_pte_chain *chain;
760
761 chain = container_of(page->parent_ptes.first,
762 struct kvm_pte_chain, link);
763 parent_pte = chain->parent_ptes[0];
764 }
697fe2e2 765 BUG_ON(!parent_pte);
90cb0529 766 kvm_mmu_put_page(page, parent_pte);
c7addb90 767 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 768 }
90cb0529 769 kvm_mmu_page_unlink_children(kvm, page);
3bb65a22
AK
770 if (!page->root_count) {
771 hlist_del(&page->hash_link);
90cb0529 772 kvm_mmu_free_page(kvm, page);
36868f7b 773 } else
90cb0529 774 list_move(&page->link, &kvm->active_mmu_pages);
12b7d28f 775 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
776}
777
82ce2c96
IE
778/*
779 * Changing the number of mmu pages allocated to the vm
780 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
781 */
782void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
783{
784 /*
785 * If we set the number of mmu pages to be smaller be than the
786 * number of actived pages , we must to free some mmu pages before we
787 * change the value
788 */
789
790 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
791 kvm_nr_mmu_pages) {
792 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
793 - kvm->n_free_mmu_pages;
794
795 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
796 struct kvm_mmu_page *page;
797
798 page = container_of(kvm->active_mmu_pages.prev,
799 struct kvm_mmu_page, link);
800 kvm_mmu_zap_page(kvm, page);
801 n_used_mmu_pages--;
802 }
803 kvm->n_free_mmu_pages = 0;
804 }
805 else
806 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
807 - kvm->n_alloc_mmu_pages;
808
809 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
810}
811
f67a46f4 812static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
813{
814 unsigned index;
815 struct hlist_head *bucket;
816 struct kvm_mmu_page *page;
817 struct hlist_node *node, *n;
818 int r;
819
820 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
821 r = 0;
822 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f67a46f4 823 bucket = &kvm->mmu_page_hash[index];
a436036b
AK
824 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
825 if (page->gfn == gfn && !page->role.metaphysical) {
697fe2e2
AK
826 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
827 page->role.word);
f67a46f4 828 kvm_mmu_zap_page(kvm, page);
a436036b
AK
829 r = 1;
830 }
831 return r;
cea0f0e7
AK
832}
833
f67a46f4 834static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e
AK
835{
836 struct kvm_mmu_page *page;
837
f67a46f4 838 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
97a0a01e
AK
839 pgprintk("%s: zap %lx %x\n",
840 __FUNCTION__, gfn, page->role.word);
f67a46f4 841 kvm_mmu_zap_page(kvm, page);
97a0a01e
AK
842 }
843}
844
6aa8b732
AK
845static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
846{
847 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
848 struct kvm_mmu_page *page_head = page_header(__pa(pte));
849
850 __set_bit(slot, &page_head->slot_bitmap);
851}
852
4a4c9924 853hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
6aa8b732 854{
4a4c9924 855 hpa_t hpa = gpa_to_hpa(kvm, gpa);
6aa8b732
AK
856
857 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
858}
859
4a4c9924 860hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
6aa8b732 861{
6aa8b732
AK
862 struct page *page;
863
864 ASSERT((gpa & HPA_ERR_MASK) == 0);
4a4c9924 865 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
954bbbc2 866 if (!page)
6aa8b732 867 return gpa | HPA_ERR_MASK;
6aa8b732
AK
868 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
869 | (gpa & (PAGE_SIZE-1));
870}
871
872hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
873{
874 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
875
876 if (gpa == UNMAPPED_GVA)
877 return UNMAPPED_GVA;
4a4c9924 878 return gpa_to_hpa(vcpu->kvm, gpa);
6aa8b732
AK
879}
880
039576c0
AK
881struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
882{
883 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
884
885 if (gpa == UNMAPPED_GVA)
886 return NULL;
4a4c9924 887 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
039576c0
AK
888}
889
6aa8b732
AK
890static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
891{
892}
893
894static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
895{
896 int level = PT32E_ROOT_LEVEL;
897 hpa_t table_addr = vcpu->mmu.root_hpa;
898
899 for (; ; level--) {
900 u32 index = PT64_INDEX(v, level);
901 u64 *table;
cea0f0e7 902 u64 pte;
6aa8b732
AK
903
904 ASSERT(VALID_PAGE(table_addr));
905 table = __va(table_addr);
906
907 if (level == 1) {
9647c14c
IE
908 int was_rmapped;
909
cea0f0e7 910 pte = table[index];
9647c14c 911 was_rmapped = is_rmap_pte(pte);
c7addb90 912 if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
cea0f0e7 913 return 0;
6aa8b732
AK
914 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
915 page_header_update_slot(vcpu->kvm, table, v);
916 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
917 PT_USER_MASK;
9647c14c
IE
918 if (!was_rmapped)
919 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
6aa8b732
AK
920 return 0;
921 }
922
c7addb90 923 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 924 struct kvm_mmu_page *new_table;
cea0f0e7 925 gfn_t pseudo_gfn;
6aa8b732 926
cea0f0e7
AK
927 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
928 >> PAGE_SHIFT;
929 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
930 v, level - 1,
6bfccdc9 931 1, 3, &table[index]);
25c0de2c 932 if (!new_table) {
6aa8b732
AK
933 pgprintk("nonpaging_map: ENOMEM\n");
934 return -ENOMEM;
935 }
936
47ad8e68 937 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
25c0de2c 938 | PT_WRITABLE_MASK | PT_USER_MASK;
6aa8b732
AK
939 }
940 table_addr = table[index] & PT64_BASE_ADDR_MASK;
941 }
942}
943
c7addb90
AK
944static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
945 struct kvm_mmu_page *sp)
946{
947 int i;
948
949 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
950 sp->spt[i] = shadow_trap_nonpresent_pte;
951}
952
17ac10ad
AK
953static void mmu_free_roots(struct kvm_vcpu *vcpu)
954{
955 int i;
3bb65a22 956 struct kvm_mmu_page *page;
17ac10ad 957
7b53aa56
AK
958 if (!VALID_PAGE(vcpu->mmu.root_hpa))
959 return;
17ac10ad
AK
960#ifdef CONFIG_X86_64
961 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
962 hpa_t root = vcpu->mmu.root_hpa;
963
3bb65a22
AK
964 page = page_header(root);
965 --page->root_count;
17ac10ad
AK
966 vcpu->mmu.root_hpa = INVALID_PAGE;
967 return;
968 }
969#endif
970 for (i = 0; i < 4; ++i) {
971 hpa_t root = vcpu->mmu.pae_root[i];
972
417726a3 973 if (root) {
417726a3
AK
974 root &= PT64_BASE_ADDR_MASK;
975 page = page_header(root);
976 --page->root_count;
977 }
17ac10ad
AK
978 vcpu->mmu.pae_root[i] = INVALID_PAGE;
979 }
980 vcpu->mmu.root_hpa = INVALID_PAGE;
981}
982
983static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
984{
985 int i;
cea0f0e7 986 gfn_t root_gfn;
3bb65a22
AK
987 struct kvm_mmu_page *page;
988
cea0f0e7 989 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
17ac10ad
AK
990
991#ifdef CONFIG_X86_64
992 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
993 hpa_t root = vcpu->mmu.root_hpa;
994
995 ASSERT(!VALID_PAGE(root));
68a99f6d 996 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
d28c6cfb 997 PT64_ROOT_LEVEL, 0, 0, NULL);
47ad8e68 998 root = __pa(page->spt);
3bb65a22 999 ++page->root_count;
17ac10ad
AK
1000 vcpu->mmu.root_hpa = root;
1001 return;
1002 }
1003#endif
1004 for (i = 0; i < 4; ++i) {
1005 hpa_t root = vcpu->mmu.pae_root[i];
1006
1007 ASSERT(!VALID_PAGE(root));
417726a3
AK
1008 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1009 if (!is_present_pte(vcpu->pdptrs[i])) {
1010 vcpu->mmu.pae_root[i] = 0;
1011 continue;
1012 }
cea0f0e7 1013 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
417726a3 1014 } else if (vcpu->mmu.root_level == 0)
cea0f0e7 1015 root_gfn = 0;
68a99f6d 1016 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
cea0f0e7 1017 PT32_ROOT_LEVEL, !is_paging(vcpu),
d28c6cfb 1018 0, NULL);
47ad8e68 1019 root = __pa(page->spt);
3bb65a22 1020 ++page->root_count;
17ac10ad
AK
1021 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1022 }
1023 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1024}
1025
6aa8b732
AK
1026static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1027{
1028 return vaddr;
1029}
1030
1031static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1032 u32 error_code)
1033{
6aa8b732 1034 gpa_t addr = gva;
ebeace86 1035 hpa_t paddr;
e2dec939 1036 int r;
6aa8b732 1037
e2dec939
AK
1038 r = mmu_topup_memory_caches(vcpu);
1039 if (r)
1040 return r;
714b93da 1041
6aa8b732
AK
1042 ASSERT(vcpu);
1043 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1044
6aa8b732 1045
4a4c9924 1046 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
6aa8b732 1047
ebeace86
AK
1048 if (is_error_hpa(paddr))
1049 return 1;
6aa8b732 1050
ebeace86 1051 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
6aa8b732
AK
1052}
1053
6aa8b732
AK
1054static void nonpaging_free(struct kvm_vcpu *vcpu)
1055{
17ac10ad 1056 mmu_free_roots(vcpu);
6aa8b732
AK
1057}
1058
1059static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1060{
1061 struct kvm_mmu *context = &vcpu->mmu;
1062
1063 context->new_cr3 = nonpaging_new_cr3;
1064 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1065 context->gva_to_gpa = nonpaging_gva_to_gpa;
1066 context->free = nonpaging_free;
c7addb90 1067 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1068 context->root_level = 0;
6aa8b732 1069 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1070 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1071 return 0;
1072}
1073
6aa8b732
AK
1074static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1075{
1165f5fe 1076 ++vcpu->stat.tlb_flush;
cbdd1bea 1077 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1078}
1079
1080static void paging_new_cr3(struct kvm_vcpu *vcpu)
1081{
374cbac0 1082 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
cea0f0e7 1083 mmu_free_roots(vcpu);
6aa8b732
AK
1084}
1085
6aa8b732
AK
1086static void inject_page_fault(struct kvm_vcpu *vcpu,
1087 u64 addr,
1088 u32 err_code)
1089{
cbdd1bea 1090 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1091}
1092
6aa8b732
AK
1093static void paging_free(struct kvm_vcpu *vcpu)
1094{
1095 nonpaging_free(vcpu);
1096}
1097
1098#define PTTYPE 64
1099#include "paging_tmpl.h"
1100#undef PTTYPE
1101
1102#define PTTYPE 32
1103#include "paging_tmpl.h"
1104#undef PTTYPE
1105
17ac10ad 1106static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732
AK
1107{
1108 struct kvm_mmu *context = &vcpu->mmu;
1109
1110 ASSERT(is_pae(vcpu));
1111 context->new_cr3 = paging_new_cr3;
1112 context->page_fault = paging64_page_fault;
6aa8b732 1113 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1114 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1115 context->free = paging_free;
17ac10ad
AK
1116 context->root_level = level;
1117 context->shadow_root_level = level;
17c3ba9d 1118 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1119 return 0;
1120}
1121
17ac10ad
AK
1122static int paging64_init_context(struct kvm_vcpu *vcpu)
1123{
1124 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1125}
1126
6aa8b732
AK
1127static int paging32_init_context(struct kvm_vcpu *vcpu)
1128{
1129 struct kvm_mmu *context = &vcpu->mmu;
1130
1131 context->new_cr3 = paging_new_cr3;
1132 context->page_fault = paging32_page_fault;
6aa8b732
AK
1133 context->gva_to_gpa = paging32_gva_to_gpa;
1134 context->free = paging_free;
c7addb90 1135 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1136 context->root_level = PT32_ROOT_LEVEL;
1137 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1138 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1139 return 0;
1140}
1141
1142static int paging32E_init_context(struct kvm_vcpu *vcpu)
1143{
17ac10ad 1144 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1145}
1146
1147static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1148{
1149 ASSERT(vcpu);
1150 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1151
1152 if (!is_paging(vcpu))
1153 return nonpaging_init_context(vcpu);
a9058ecd 1154 else if (is_long_mode(vcpu))
6aa8b732
AK
1155 return paging64_init_context(vcpu);
1156 else if (is_pae(vcpu))
1157 return paging32E_init_context(vcpu);
1158 else
1159 return paging32_init_context(vcpu);
1160}
1161
1162static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1163{
1164 ASSERT(vcpu);
1165 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1166 vcpu->mmu.free(vcpu);
1167 vcpu->mmu.root_hpa = INVALID_PAGE;
1168 }
1169}
1170
1171int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1172{
1173 destroy_kvm_mmu(vcpu);
1174 return init_kvm_mmu(vcpu);
1175}
8668a3c4 1176EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1177
1178int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1179{
714b93da
AK
1180 int r;
1181
11ec2804 1182 mutex_lock(&vcpu->kvm->lock);
e2dec939 1183 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1184 if (r)
1185 goto out;
1186 mmu_alloc_roots(vcpu);
cbdd1bea 1187 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
17c3ba9d 1188 kvm_mmu_flush_tlb(vcpu);
714b93da 1189out:
11ec2804 1190 mutex_unlock(&vcpu->kvm->lock);
714b93da 1191 return r;
6aa8b732 1192}
17c3ba9d
AK
1193EXPORT_SYMBOL_GPL(kvm_mmu_load);
1194
1195void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1196{
1197 mmu_free_roots(vcpu);
1198}
6aa8b732 1199
09072daf 1200static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
ac1b714e
AK
1201 struct kvm_mmu_page *page,
1202 u64 *spte)
1203{
1204 u64 pte;
1205 struct kvm_mmu_page *child;
1206
1207 pte = *spte;
c7addb90 1208 if (is_shadow_present_pte(pte)) {
ac1b714e 1209 if (page->role.level == PT_PAGE_TABLE_LEVEL)
290fc38d 1210 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1211 else {
1212 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1213 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1214 }
1215 }
c7addb90 1216 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
d9e368d6 1217 kvm_flush_remote_tlbs(vcpu->kvm);
ac1b714e
AK
1218}
1219
0028425f
AK
1220static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1221 struct kvm_mmu_page *page,
1222 u64 *spte,
c7addb90
AK
1223 const void *new, int bytes,
1224 int offset_in_pte)
0028425f
AK
1225{
1226 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1227 return;
1228
1229 if (page->role.glevels == PT32_ROOT_LEVEL)
c7addb90
AK
1230 paging32_update_pte(vcpu, page, spte, new, bytes,
1231 offset_in_pte);
0028425f 1232 else
c7addb90
AK
1233 paging64_update_pte(vcpu, page, spte, new, bytes,
1234 offset_in_pte);
0028425f
AK
1235}
1236
12b7d28f
AK
1237static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1238{
1239 u64 *spte = vcpu->last_pte_updated;
1240
1241 return !!(spte && (*spte & PT_ACCESSED_MASK));
1242}
1243
09072daf 1244void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1245 const u8 *new, int bytes)
da4a00f0 1246{
9b7a0325
AK
1247 gfn_t gfn = gpa >> PAGE_SHIFT;
1248 struct kvm_mmu_page *page;
0e7bc4b9 1249 struct hlist_node *node, *n;
9b7a0325
AK
1250 struct hlist_head *bucket;
1251 unsigned index;
1252 u64 *spte;
9b7a0325 1253 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1254 unsigned pte_size;
9b7a0325 1255 unsigned page_offset;
0e7bc4b9 1256 unsigned misaligned;
fce0657f 1257 unsigned quadrant;
9b7a0325 1258 int level;
86a5ba02 1259 int flooded = 0;
ac1b714e 1260 int npte;
9b7a0325 1261
da4a00f0 1262 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
c7addb90 1263 kvm_mmu_audit(vcpu, "pre pte write");
12b7d28f
AK
1264 if (gfn == vcpu->last_pt_write_gfn
1265 && !last_updated_pte_accessed(vcpu)) {
86a5ba02
AK
1266 ++vcpu->last_pt_write_count;
1267 if (vcpu->last_pt_write_count >= 3)
1268 flooded = 1;
1269 } else {
1270 vcpu->last_pt_write_gfn = gfn;
1271 vcpu->last_pt_write_count = 1;
12b7d28f 1272 vcpu->last_pte_updated = NULL;
86a5ba02 1273 }
9b7a0325
AK
1274 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1275 bucket = &vcpu->kvm->mmu_page_hash[index];
0e7bc4b9 1276 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
9b7a0325
AK
1277 if (page->gfn != gfn || page->role.metaphysical)
1278 continue;
0e7bc4b9
AK
1279 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1280 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1281 misaligned |= bytes < 4;
86a5ba02 1282 if (misaligned || flooded) {
0e7bc4b9
AK
1283 /*
1284 * Misaligned accesses are too much trouble to fix
1285 * up; also, they usually indicate a page is not used
1286 * as a page table.
86a5ba02
AK
1287 *
1288 * If we're seeing too many writes to a page,
1289 * it may no longer be a page table, or we may be
1290 * forking, in which case it is better to unmap the
1291 * page.
0e7bc4b9
AK
1292 */
1293 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1294 gpa, bytes, page->role.word);
90cb0529 1295 kvm_mmu_zap_page(vcpu->kvm, page);
0e7bc4b9
AK
1296 continue;
1297 }
9b7a0325
AK
1298 page_offset = offset;
1299 level = page->role.level;
ac1b714e 1300 npte = 1;
9b7a0325 1301 if (page->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1302 page_offset <<= 1; /* 32->64 */
1303 /*
1304 * A 32-bit pde maps 4MB while the shadow pdes map
1305 * only 2MB. So we need to double the offset again
1306 * and zap two pdes instead of one.
1307 */
1308 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1309 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1310 page_offset <<= 1;
1311 npte = 2;
1312 }
fce0657f 1313 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1314 page_offset &= ~PAGE_MASK;
fce0657f
AK
1315 if (quadrant != page->role.quadrant)
1316 continue;
9b7a0325 1317 }
47ad8e68 1318 spte = &page->spt[page_offset / sizeof(*spte)];
ac1b714e 1319 while (npte--) {
09072daf 1320 mmu_pte_write_zap_pte(vcpu, page, spte);
c7addb90
AK
1321 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1322 page_offset & (pte_size - 1));
ac1b714e 1323 ++spte;
9b7a0325 1324 }
9b7a0325 1325 }
c7addb90 1326 kvm_mmu_audit(vcpu, "post pte write");
da4a00f0
AK
1327}
1328
a436036b
AK
1329int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1330{
1331 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1332
f67a46f4 1333 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
a436036b
AK
1334}
1335
22d95b12 1336void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86
AK
1337{
1338 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1339 struct kvm_mmu_page *page;
1340
1341 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1342 struct kvm_mmu_page, link);
90cb0529 1343 kvm_mmu_zap_page(vcpu->kvm, page);
ebeace86
AK
1344 }
1345}
ebeace86 1346
6aa8b732
AK
1347static void free_mmu_pages(struct kvm_vcpu *vcpu)
1348{
f51234c2 1349 struct kvm_mmu_page *page;
6aa8b732 1350
f51234c2
AK
1351 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1352 page = container_of(vcpu->kvm->active_mmu_pages.next,
1353 struct kvm_mmu_page, link);
90cb0529 1354 kvm_mmu_zap_page(vcpu->kvm, page);
f51234c2 1355 }
17ac10ad 1356 free_page((unsigned long)vcpu->mmu.pae_root);
6aa8b732
AK
1357}
1358
1359static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1360{
17ac10ad 1361 struct page *page;
6aa8b732
AK
1362 int i;
1363
1364 ASSERT(vcpu);
1365
82ce2c96
IE
1366 if (vcpu->kvm->n_requested_mmu_pages)
1367 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1368 else
1369 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
17ac10ad
AK
1370 /*
1371 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1372 * Therefore we need to allocate shadow page tables in the first
1373 * 4GB of memory, which happens to fit the DMA32 zone.
1374 */
1375 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1376 if (!page)
1377 goto error_1;
1378 vcpu->mmu.pae_root = page_address(page);
1379 for (i = 0; i < 4; ++i)
1380 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1381
6aa8b732
AK
1382 return 0;
1383
1384error_1:
1385 free_mmu_pages(vcpu);
1386 return -ENOMEM;
1387}
1388
8018c27b 1389int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1390{
6aa8b732
AK
1391 ASSERT(vcpu);
1392 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
6aa8b732 1393
8018c27b
IM
1394 return alloc_mmu_pages(vcpu);
1395}
6aa8b732 1396
8018c27b
IM
1397int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1398{
1399 ASSERT(vcpu);
1400 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
2c264957 1401
8018c27b 1402 return init_kvm_mmu(vcpu);
6aa8b732
AK
1403}
1404
1405void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1406{
1407 ASSERT(vcpu);
1408
1409 destroy_kvm_mmu(vcpu);
1410 free_mmu_pages(vcpu);
714b93da 1411 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1412}
1413
90cb0529 1414void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732
AK
1415{
1416 struct kvm_mmu_page *page;
1417
1418 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1419 int i;
1420 u64 *pt;
1421
1422 if (!test_bit(slot, &page->slot_bitmap))
1423 continue;
1424
47ad8e68 1425 pt = page->spt;
6aa8b732
AK
1426 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1427 /* avoid RMW */
9647c14c 1428 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 1429 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
1430 }
1431}
37a7d8b0 1432
90cb0529 1433void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1434{
90cb0529 1435 struct kvm_mmu_page *page, *node;
e0fa826f 1436
90cb0529
AK
1437 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1438 kvm_mmu_zap_page(kvm, page);
e0fa826f 1439
90cb0529 1440 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1441}
1442
b5a33a75
AK
1443void kvm_mmu_module_exit(void)
1444{
1445 if (pte_chain_cache)
1446 kmem_cache_destroy(pte_chain_cache);
1447 if (rmap_desc_cache)
1448 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
1449 if (mmu_page_header_cache)
1450 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
1451}
1452
1453int kvm_mmu_module_init(void)
1454{
1455 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1456 sizeof(struct kvm_pte_chain),
20c2df83 1457 0, 0, NULL);
b5a33a75
AK
1458 if (!pte_chain_cache)
1459 goto nomem;
1460 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1461 sizeof(struct kvm_rmap_desc),
20c2df83 1462 0, 0, NULL);
b5a33a75
AK
1463 if (!rmap_desc_cache)
1464 goto nomem;
1465
d3d25b04
AK
1466 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1467 sizeof(struct kvm_mmu_page),
20c2df83 1468 0, 0, NULL);
d3d25b04
AK
1469 if (!mmu_page_header_cache)
1470 goto nomem;
1471
b5a33a75
AK
1472 return 0;
1473
1474nomem:
1475 kvm_mmu_module_exit();
1476 return -ENOMEM;
1477}
1478
37a7d8b0
AK
1479#ifdef AUDIT
1480
1481static const char *audit_msg;
1482
1483static gva_t canonicalize(gva_t gva)
1484{
1485#ifdef CONFIG_X86_64
1486 gva = (long long)(gva << 16) >> 16;
1487#endif
1488 return gva;
1489}
1490
1491static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1492 gva_t va, int level)
1493{
1494 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1495 int i;
1496 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1497
1498 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1499 u64 ent = pt[i];
1500
c7addb90 1501 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
1502 continue;
1503
1504 va = canonicalize(va);
c7addb90
AK
1505 if (level > 1) {
1506 if (ent == shadow_notrap_nonpresent_pte)
1507 printk(KERN_ERR "audit: (%s) nontrapping pte"
1508 " in nonleaf level: levels %d gva %lx"
1509 " level %d pte %llx\n", audit_msg,
1510 vcpu->mmu.root_level, va, level, ent);
1511
37a7d8b0 1512 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 1513 } else {
37a7d8b0
AK
1514 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1515 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1516
c7addb90 1517 if (is_shadow_present_pte(ent)
37a7d8b0 1518 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
1519 printk(KERN_ERR "xx audit error: (%s) levels %d"
1520 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
37a7d8b0 1521 audit_msg, vcpu->mmu.root_level,
d77c26fc
MD
1522 va, gpa, hpa, ent,
1523 is_shadow_present_pte(ent));
c7addb90
AK
1524 else if (ent == shadow_notrap_nonpresent_pte
1525 && !is_error_hpa(hpa))
1526 printk(KERN_ERR "audit: (%s) notrap shadow,"
1527 " valid guest gva %lx\n", audit_msg, va);
1528
37a7d8b0
AK
1529 }
1530 }
1531}
1532
1533static void audit_mappings(struct kvm_vcpu *vcpu)
1534{
1ea252af 1535 unsigned i;
37a7d8b0
AK
1536
1537 if (vcpu->mmu.root_level == 4)
1538 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1539 else
1540 for (i = 0; i < 4; ++i)
1541 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1542 audit_mappings_page(vcpu,
1543 vcpu->mmu.pae_root[i],
1544 i << 30,
1545 2);
1546}
1547
1548static int count_rmaps(struct kvm_vcpu *vcpu)
1549{
1550 int nmaps = 0;
1551 int i, j, k;
1552
1553 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1554 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1555 struct kvm_rmap_desc *d;
1556
1557 for (j = 0; j < m->npages; ++j) {
290fc38d 1558 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 1559
290fc38d 1560 if (!*rmapp)
37a7d8b0 1561 continue;
290fc38d 1562 if (!(*rmapp & 1)) {
37a7d8b0
AK
1563 ++nmaps;
1564 continue;
1565 }
290fc38d 1566 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
1567 while (d) {
1568 for (k = 0; k < RMAP_EXT; ++k)
1569 if (d->shadow_ptes[k])
1570 ++nmaps;
1571 else
1572 break;
1573 d = d->more;
1574 }
1575 }
1576 }
1577 return nmaps;
1578}
1579
1580static int count_writable_mappings(struct kvm_vcpu *vcpu)
1581{
1582 int nmaps = 0;
1583 struct kvm_mmu_page *page;
1584 int i;
1585
1586 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
47ad8e68 1587 u64 *pt = page->spt;
37a7d8b0
AK
1588
1589 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1590 continue;
1591
1592 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1593 u64 ent = pt[i];
1594
1595 if (!(ent & PT_PRESENT_MASK))
1596 continue;
1597 if (!(ent & PT_WRITABLE_MASK))
1598 continue;
1599 ++nmaps;
1600 }
1601 }
1602 return nmaps;
1603}
1604
1605static void audit_rmap(struct kvm_vcpu *vcpu)
1606{
1607 int n_rmap = count_rmaps(vcpu);
1608 int n_actual = count_writable_mappings(vcpu);
1609
1610 if (n_rmap != n_actual)
1611 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1612 __FUNCTION__, audit_msg, n_rmap, n_actual);
1613}
1614
1615static void audit_write_protection(struct kvm_vcpu *vcpu)
1616{
1617 struct kvm_mmu_page *page;
290fc38d
IE
1618 struct kvm_memory_slot *slot;
1619 unsigned long *rmapp;
1620 gfn_t gfn;
37a7d8b0
AK
1621
1622 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
37a7d8b0
AK
1623 if (page->role.metaphysical)
1624 continue;
1625
290fc38d
IE
1626 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1627 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1628 rmapp = &slot->rmap[gfn - slot->base_gfn];
1629 if (*rmapp)
37a7d8b0
AK
1630 printk(KERN_ERR "%s: (%s) shadow page has writable"
1631 " mappings: gfn %lx role %x\n",
1632 __FUNCTION__, audit_msg, page->gfn,
1633 page->role.word);
1634 }
1635}
1636
1637static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1638{
1639 int olddbg = dbg;
1640
1641 dbg = 0;
1642 audit_msg = msg;
1643 audit_rmap(vcpu);
1644 audit_write_protection(vcpu);
1645 audit_mappings(vcpu);
1646 dbg = olddbg;
1647}
1648
1649#endif
This page took 0.264232 seconds and 5 git commands to generate.