KVM: MMU: Add rmap_next(), a helper for walking kvm rmaps
[deliverable/linux.git] / drivers / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
21#include "kvm.h"
22
6aa8b732
AK
23#include <linux/types.h>
24#include <linux/string.h>
6aa8b732
AK
25#include <linux/mm.h>
26#include <linux/highmem.h>
27#include <linux/module.h>
28
e495606d
AK
29#include <asm/page.h>
30#include <asm/cmpxchg.h>
6aa8b732 31
37a7d8b0
AK
32#undef MMU_DEBUG
33
34#undef AUDIT
35
36#ifdef AUDIT
37static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
38#else
39static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
40#endif
41
42#ifdef MMU_DEBUG
43
44#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
45#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
46
47#else
48
49#define pgprintk(x...) do { } while (0)
50#define rmap_printk(x...) do { } while (0)
51
52#endif
53
54#if defined(MMU_DEBUG) || defined(AUDIT)
55static int dbg = 1;
56#endif
6aa8b732 57
d6c69ee9
YD
58#ifndef MMU_DEBUG
59#define ASSERT(x) do { } while (0)
60#else
6aa8b732
AK
61#define ASSERT(x) \
62 if (!(x)) { \
63 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
64 __FILE__, __LINE__, #x); \
65 }
d6c69ee9 66#endif
6aa8b732 67
cea0f0e7
AK
68#define PT64_PT_BITS 9
69#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
70#define PT32_PT_BITS 10
71#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
6aa8b732
AK
72
73#define PT_WRITABLE_SHIFT 1
74
75#define PT_PRESENT_MASK (1ULL << 0)
76#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
77#define PT_USER_MASK (1ULL << 2)
78#define PT_PWT_MASK (1ULL << 3)
79#define PT_PCD_MASK (1ULL << 4)
80#define PT_ACCESSED_MASK (1ULL << 5)
81#define PT_DIRTY_MASK (1ULL << 6)
82#define PT_PAGE_SIZE_MASK (1ULL << 7)
83#define PT_PAT_MASK (1ULL << 7)
84#define PT_GLOBAL_MASK (1ULL << 8)
85#define PT64_NX_MASK (1ULL << 63)
86
87#define PT_PAT_SHIFT 7
88#define PT_DIR_PAT_SHIFT 12
89#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
90
91#define PT32_DIR_PSE36_SIZE 4
92#define PT32_DIR_PSE36_SHIFT 13
d77c26fc
MD
93#define PT32_DIR_PSE36_MASK \
94 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
6aa8b732
AK
95
96
6aa8b732
AK
97#define PT_FIRST_AVAIL_BITS_SHIFT 9
98#define PT64_SECOND_AVAIL_BITS_SHIFT 52
99
6aa8b732
AK
100#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
101
6aa8b732
AK
102#define VALID_PAGE(x) ((x) != INVALID_PAGE)
103
104#define PT64_LEVEL_BITS 9
105
106#define PT64_LEVEL_SHIFT(level) \
d77c26fc 107 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
108
109#define PT64_LEVEL_MASK(level) \
110 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
111
112#define PT64_INDEX(address, level)\
113 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
114
115
116#define PT32_LEVEL_BITS 10
117
118#define PT32_LEVEL_SHIFT(level) \
d77c26fc 119 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
120
121#define PT32_LEVEL_MASK(level) \
122 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
123
124#define PT32_INDEX(address, level)\
125 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
126
127
27aba766 128#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
129#define PT64_DIR_BASE_ADDR_MASK \
130 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
131
132#define PT32_BASE_ADDR_MASK PAGE_MASK
133#define PT32_DIR_BASE_ADDR_MASK \
134 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135
136
137#define PFERR_PRESENT_MASK (1U << 0)
138#define PFERR_WRITE_MASK (1U << 1)
139#define PFERR_USER_MASK (1U << 2)
73b1087e 140#define PFERR_FETCH_MASK (1U << 4)
6aa8b732
AK
141
142#define PT64_ROOT_LEVEL 4
143#define PT32_ROOT_LEVEL 2
144#define PT32E_ROOT_LEVEL 3
145
146#define PT_DIRECTORY_LEVEL 2
147#define PT_PAGE_TABLE_LEVEL 1
148
cd4a4e53
AK
149#define RMAP_EXT 4
150
151struct kvm_rmap_desc {
152 u64 *shadow_ptes[RMAP_EXT];
153 struct kvm_rmap_desc *more;
154};
155
b5a33a75
AK
156static struct kmem_cache *pte_chain_cache;
157static struct kmem_cache *rmap_desc_cache;
d3d25b04 158static struct kmem_cache *mmu_page_header_cache;
b5a33a75 159
c7addb90
AK
160static u64 __read_mostly shadow_trap_nonpresent_pte;
161static u64 __read_mostly shadow_notrap_nonpresent_pte;
162
163void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
164{
165 shadow_trap_nonpresent_pte = trap_pte;
166 shadow_notrap_nonpresent_pte = notrap_pte;
167}
168EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
169
6aa8b732
AK
170static int is_write_protection(struct kvm_vcpu *vcpu)
171{
707d92fa 172 return vcpu->cr0 & X86_CR0_WP;
6aa8b732
AK
173}
174
175static int is_cpuid_PSE36(void)
176{
177 return 1;
178}
179
73b1087e
AK
180static int is_nx(struct kvm_vcpu *vcpu)
181{
182 return vcpu->shadow_efer & EFER_NX;
183}
184
6aa8b732
AK
185static int is_present_pte(unsigned long pte)
186{
187 return pte & PT_PRESENT_MASK;
188}
189
c7addb90
AK
190static int is_shadow_present_pte(u64 pte)
191{
192 pte &= ~PT_SHADOW_IO_MARK;
193 return pte != shadow_trap_nonpresent_pte
194 && pte != shadow_notrap_nonpresent_pte;
195}
196
6aa8b732
AK
197static int is_writeble_pte(unsigned long pte)
198{
199 return pte & PT_WRITABLE_MASK;
200}
201
e3c5e7ec
AK
202static int is_dirty_pte(unsigned long pte)
203{
204 return pte & PT_DIRTY_MASK;
205}
206
6aa8b732
AK
207static int is_io_pte(unsigned long pte)
208{
209 return pte & PT_SHADOW_IO_MARK;
210}
211
cd4a4e53
AK
212static int is_rmap_pte(u64 pte)
213{
214 return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
215 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
216}
217
e663ee64
AK
218static void set_shadow_pte(u64 *sptep, u64 spte)
219{
220#ifdef CONFIG_X86_64
221 set_64bit((unsigned long *)sptep, spte);
222#else
223 set_64bit((unsigned long long *)sptep, spte);
224#endif
225}
226
e2dec939 227static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 228 struct kmem_cache *base_cache, int min)
714b93da
AK
229{
230 void *obj;
231
232 if (cache->nobjs >= min)
e2dec939 233 return 0;
714b93da 234 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 235 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 236 if (!obj)
e2dec939 237 return -ENOMEM;
714b93da
AK
238 cache->objects[cache->nobjs++] = obj;
239 }
e2dec939 240 return 0;
714b93da
AK
241}
242
243static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
244{
245 while (mc->nobjs)
246 kfree(mc->objects[--mc->nobjs]);
247}
248
c1158e63 249static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 250 int min)
c1158e63
AK
251{
252 struct page *page;
253
254 if (cache->nobjs >= min)
255 return 0;
256 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 257 page = alloc_page(GFP_KERNEL);
c1158e63
AK
258 if (!page)
259 return -ENOMEM;
260 set_page_private(page, 0);
261 cache->objects[cache->nobjs++] = page_address(page);
262 }
263 return 0;
264}
265
266static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
267{
268 while (mc->nobjs)
c4d198d5 269 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
270}
271
2e3e5882 272static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 273{
e2dec939
AK
274 int r;
275
2e3e5882 276 kvm_mmu_free_some_pages(vcpu);
e2dec939 277 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
2e3e5882 278 pte_chain_cache, 4);
e2dec939
AK
279 if (r)
280 goto out;
281 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
2e3e5882 282 rmap_desc_cache, 1);
d3d25b04
AK
283 if (r)
284 goto out;
290fc38d 285 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
d3d25b04
AK
286 if (r)
287 goto out;
288 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
2e3e5882 289 mmu_page_header_cache, 4);
e2dec939
AK
290out:
291 return r;
714b93da
AK
292}
293
294static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
295{
296 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
297 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
c1158e63 298 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
d3d25b04 299 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
714b93da
AK
300}
301
302static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
303 size_t size)
304{
305 void *p;
306
307 BUG_ON(!mc->nobjs);
308 p = mc->objects[--mc->nobjs];
309 memset(p, 0, size);
310 return p;
311}
312
714b93da
AK
313static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
314{
315 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
316 sizeof(struct kvm_pte_chain));
317}
318
90cb0529 319static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 320{
90cb0529 321 kfree(pc);
714b93da
AK
322}
323
324static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
325{
326 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
327 sizeof(struct kvm_rmap_desc));
328}
329
90cb0529 330static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 331{
90cb0529 332 kfree(rd);
714b93da
AK
333}
334
290fc38d
IE
335/*
336 * Take gfn and return the reverse mapping to it.
337 * Note: gfn must be unaliased before this function get called
338 */
339
340static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
341{
342 struct kvm_memory_slot *slot;
343
344 slot = gfn_to_memslot(kvm, gfn);
345 return &slot->rmap[gfn - slot->base_gfn];
346}
347
cd4a4e53
AK
348/*
349 * Reverse mapping data structures:
350 *
290fc38d
IE
351 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
352 * that points to page_address(page).
cd4a4e53 353 *
290fc38d
IE
354 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
355 * containing more mappings.
cd4a4e53 356 */
290fc38d 357static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
cd4a4e53 358{
290fc38d 359 struct kvm_mmu_page *page;
cd4a4e53 360 struct kvm_rmap_desc *desc;
290fc38d 361 unsigned long *rmapp;
cd4a4e53
AK
362 int i;
363
364 if (!is_rmap_pte(*spte))
365 return;
290fc38d
IE
366 gfn = unalias_gfn(vcpu->kvm, gfn);
367 page = page_header(__pa(spte));
368 page->gfns[spte - page->spt] = gfn;
369 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
370 if (!*rmapp) {
cd4a4e53 371 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
372 *rmapp = (unsigned long)spte;
373 } else if (!(*rmapp & 1)) {
cd4a4e53 374 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 375 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 376 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 377 desc->shadow_ptes[1] = spte;
290fc38d 378 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
379 } else {
380 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 381 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
382 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
383 desc = desc->more;
384 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 385 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
386 desc = desc->more;
387 }
388 for (i = 0; desc->shadow_ptes[i]; ++i)
389 ;
390 desc->shadow_ptes[i] = spte;
391 }
392}
393
290fc38d 394static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
395 struct kvm_rmap_desc *desc,
396 int i,
397 struct kvm_rmap_desc *prev_desc)
398{
399 int j;
400
401 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
402 ;
403 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 404 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
405 if (j != 0)
406 return;
407 if (!prev_desc && !desc->more)
290fc38d 408 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
409 else
410 if (prev_desc)
411 prev_desc->more = desc->more;
412 else
290fc38d 413 *rmapp = (unsigned long)desc->more | 1;
90cb0529 414 mmu_free_rmap_desc(desc);
cd4a4e53
AK
415}
416
290fc38d 417static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 418{
cd4a4e53
AK
419 struct kvm_rmap_desc *desc;
420 struct kvm_rmap_desc *prev_desc;
290fc38d
IE
421 struct kvm_mmu_page *page;
422 unsigned long *rmapp;
cd4a4e53
AK
423 int i;
424
425 if (!is_rmap_pte(*spte))
426 return;
290fc38d
IE
427 page = page_header(__pa(spte));
428 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
429 if (!*rmapp) {
cd4a4e53
AK
430 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
431 BUG();
290fc38d 432 } else if (!(*rmapp & 1)) {
cd4a4e53 433 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 434 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
435 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
436 spte, *spte);
437 BUG();
438 }
290fc38d 439 *rmapp = 0;
cd4a4e53
AK
440 } else {
441 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 442 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
443 prev_desc = NULL;
444 while (desc) {
445 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
446 if (desc->shadow_ptes[i] == spte) {
290fc38d 447 rmap_desc_remove_entry(rmapp,
714b93da 448 desc, i,
cd4a4e53
AK
449 prev_desc);
450 return;
451 }
452 prev_desc = desc;
453 desc = desc->more;
454 }
455 BUG();
456 }
457}
458
98348e95 459static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 460{
374cbac0 461 struct kvm_rmap_desc *desc;
98348e95
IE
462 struct kvm_rmap_desc *prev_desc;
463 u64 *prev_spte;
464 int i;
465
466 if (!*rmapp)
467 return NULL;
468 else if (!(*rmapp & 1)) {
469 if (!spte)
470 return (u64 *)*rmapp;
471 return NULL;
472 }
473 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
474 prev_desc = NULL;
475 prev_spte = NULL;
476 while (desc) {
477 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
478 if (prev_spte == spte)
479 return desc->shadow_ptes[i];
480 prev_spte = desc->shadow_ptes[i];
481 }
482 desc = desc->more;
483 }
484 return NULL;
485}
486
487static void rmap_write_protect(struct kvm *kvm, u64 gfn)
488{
290fc38d 489 unsigned long *rmapp;
374cbac0 490 u64 *spte;
98348e95 491 u64 *prev_spte;
374cbac0 492
4a4c9924
AL
493 gfn = unalias_gfn(kvm, gfn);
494 rmapp = gfn_to_rmap(kvm, gfn);
374cbac0 495
98348e95
IE
496 spte = rmap_next(kvm, rmapp, NULL);
497 while (spte) {
374cbac0 498 BUG_ON(!spte);
374cbac0
AK
499 BUG_ON(!(*spte & PT_PRESENT_MASK));
500 BUG_ON(!(*spte & PT_WRITABLE_MASK));
501 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
98348e95
IE
502 prev_spte = spte;
503 spte = rmap_next(kvm, rmapp, spte);
504 rmap_remove(kvm, prev_spte);
505 set_shadow_pte(prev_spte, *prev_spte & ~PT_WRITABLE_MASK);
4a4c9924 506 kvm_flush_remote_tlbs(kvm);
374cbac0
AK
507 }
508}
509
d6c69ee9 510#ifdef MMU_DEBUG
47ad8e68 511static int is_empty_shadow_page(u64 *spt)
6aa8b732 512{
139bdb2d
AK
513 u64 *pos;
514 u64 *end;
515
47ad8e68 516 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
c7addb90 517 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
139bdb2d
AK
518 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
519 pos, *pos);
6aa8b732 520 return 0;
139bdb2d 521 }
6aa8b732
AK
522 return 1;
523}
d6c69ee9 524#endif
6aa8b732 525
90cb0529 526static void kvm_mmu_free_page(struct kvm *kvm,
4b02d6da 527 struct kvm_mmu_page *page_head)
260746c0 528{
47ad8e68 529 ASSERT(is_empty_shadow_page(page_head->spt));
d3d25b04 530 list_del(&page_head->link);
c1158e63 531 __free_page(virt_to_page(page_head->spt));
290fc38d 532 __free_page(virt_to_page(page_head->gfns));
90cb0529
AK
533 kfree(page_head);
534 ++kvm->n_free_mmu_pages;
260746c0
AK
535}
536
cea0f0e7
AK
537static unsigned kvm_page_table_hashfn(gfn_t gfn)
538{
539 return gfn;
540}
541
25c0de2c
AK
542static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
543 u64 *parent_pte)
6aa8b732
AK
544{
545 struct kvm_mmu_page *page;
546
d3d25b04 547 if (!vcpu->kvm->n_free_mmu_pages)
25c0de2c 548 return NULL;
6aa8b732 549
d3d25b04
AK
550 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
551 sizeof *page);
552 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
290fc38d 553 page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
d3d25b04
AK
554 set_page_private(virt_to_page(page->spt), (unsigned long)page);
555 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
47ad8e68 556 ASSERT(is_empty_shadow_page(page->spt));
6aa8b732 557 page->slot_bitmap = 0;
cea0f0e7 558 page->multimapped = 0;
6aa8b732 559 page->parent_pte = parent_pte;
ebeace86 560 --vcpu->kvm->n_free_mmu_pages;
25c0de2c 561 return page;
6aa8b732
AK
562}
563
714b93da
AK
564static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
565 struct kvm_mmu_page *page, u64 *parent_pte)
cea0f0e7
AK
566{
567 struct kvm_pte_chain *pte_chain;
568 struct hlist_node *node;
569 int i;
570
571 if (!parent_pte)
572 return;
573 if (!page->multimapped) {
574 u64 *old = page->parent_pte;
575
576 if (!old) {
577 page->parent_pte = parent_pte;
578 return;
579 }
580 page->multimapped = 1;
714b93da 581 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
582 INIT_HLIST_HEAD(&page->parent_ptes);
583 hlist_add_head(&pte_chain->link, &page->parent_ptes);
584 pte_chain->parent_ptes[0] = old;
585 }
586 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
587 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
588 continue;
589 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
590 if (!pte_chain->parent_ptes[i]) {
591 pte_chain->parent_ptes[i] = parent_pte;
592 return;
593 }
594 }
714b93da 595 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
596 BUG_ON(!pte_chain);
597 hlist_add_head(&pte_chain->link, &page->parent_ptes);
598 pte_chain->parent_ptes[0] = parent_pte;
599}
600
90cb0529 601static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
cea0f0e7
AK
602 u64 *parent_pte)
603{
604 struct kvm_pte_chain *pte_chain;
605 struct hlist_node *node;
606 int i;
607
608 if (!page->multimapped) {
609 BUG_ON(page->parent_pte != parent_pte);
610 page->parent_pte = NULL;
611 return;
612 }
613 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
614 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
615 if (!pte_chain->parent_ptes[i])
616 break;
617 if (pte_chain->parent_ptes[i] != parent_pte)
618 continue;
697fe2e2
AK
619 while (i + 1 < NR_PTE_CHAIN_ENTRIES
620 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
621 pte_chain->parent_ptes[i]
622 = pte_chain->parent_ptes[i + 1];
623 ++i;
624 }
625 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
626 if (i == 0) {
627 hlist_del(&pte_chain->link);
90cb0529 628 mmu_free_pte_chain(pte_chain);
697fe2e2
AK
629 if (hlist_empty(&page->parent_ptes)) {
630 page->multimapped = 0;
631 page->parent_pte = NULL;
632 }
633 }
cea0f0e7
AK
634 return;
635 }
636 BUG();
637}
638
f67a46f4 639static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
cea0f0e7
AK
640 gfn_t gfn)
641{
642 unsigned index;
643 struct hlist_head *bucket;
644 struct kvm_mmu_page *page;
645 struct hlist_node *node;
646
647 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
648 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f67a46f4 649 bucket = &kvm->mmu_page_hash[index];
cea0f0e7
AK
650 hlist_for_each_entry(page, node, bucket, hash_link)
651 if (page->gfn == gfn && !page->role.metaphysical) {
652 pgprintk("%s: found role %x\n",
653 __FUNCTION__, page->role.word);
654 return page;
655 }
656 return NULL;
657}
658
659static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
660 gfn_t gfn,
661 gva_t gaddr,
662 unsigned level,
663 int metaphysical,
d28c6cfb 664 unsigned hugepage_access,
cea0f0e7
AK
665 u64 *parent_pte)
666{
667 union kvm_mmu_page_role role;
668 unsigned index;
669 unsigned quadrant;
670 struct hlist_head *bucket;
671 struct kvm_mmu_page *page;
672 struct hlist_node *node;
673
674 role.word = 0;
675 role.glevels = vcpu->mmu.root_level;
676 role.level = level;
677 role.metaphysical = metaphysical;
d28c6cfb 678 role.hugepage_access = hugepage_access;
cea0f0e7
AK
679 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
680 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
681 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
682 role.quadrant = quadrant;
683 }
684 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
685 gfn, role.word);
686 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
687 bucket = &vcpu->kvm->mmu_page_hash[index];
688 hlist_for_each_entry(page, node, bucket, hash_link)
689 if (page->gfn == gfn && page->role.word == role.word) {
714b93da 690 mmu_page_add_parent_pte(vcpu, page, parent_pte);
cea0f0e7
AK
691 pgprintk("%s: found\n", __FUNCTION__);
692 return page;
693 }
694 page = kvm_mmu_alloc_page(vcpu, parent_pte);
695 if (!page)
696 return page;
697 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
698 page->gfn = gfn;
699 page->role = role;
700 hlist_add_head(&page->hash_link, bucket);
c7addb90 701 vcpu->mmu.prefetch_page(vcpu, page);
374cbac0 702 if (!metaphysical)
4a4c9924 703 rmap_write_protect(vcpu->kvm, gfn);
cea0f0e7
AK
704 return page;
705}
706
90cb0529 707static void kvm_mmu_page_unlink_children(struct kvm *kvm,
a436036b
AK
708 struct kvm_mmu_page *page)
709{
697fe2e2
AK
710 unsigned i;
711 u64 *pt;
712 u64 ent;
713
47ad8e68 714 pt = page->spt;
697fe2e2
AK
715
716 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
717 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 718 if (is_shadow_present_pte(pt[i]))
290fc38d 719 rmap_remove(kvm, &pt[i]);
c7addb90 720 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 721 }
90cb0529 722 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
723 return;
724 }
725
726 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
727 ent = pt[i];
728
c7addb90
AK
729 pt[i] = shadow_trap_nonpresent_pte;
730 if (!is_shadow_present_pte(ent))
697fe2e2
AK
731 continue;
732 ent &= PT64_BASE_ADDR_MASK;
90cb0529 733 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
697fe2e2 734 }
90cb0529 735 kvm_flush_remote_tlbs(kvm);
a436036b
AK
736}
737
90cb0529 738static void kvm_mmu_put_page(struct kvm_mmu_page *page,
cea0f0e7
AK
739 u64 *parent_pte)
740{
90cb0529 741 mmu_page_remove_parent_pte(page, parent_pte);
a436036b
AK
742}
743
12b7d28f
AK
744static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
745{
746 int i;
747
748 for (i = 0; i < KVM_MAX_VCPUS; ++i)
749 if (kvm->vcpus[i])
750 kvm->vcpus[i]->last_pte_updated = NULL;
751}
752
90cb0529 753static void kvm_mmu_zap_page(struct kvm *kvm,
a436036b
AK
754 struct kvm_mmu_page *page)
755{
756 u64 *parent_pte;
757
758 while (page->multimapped || page->parent_pte) {
759 if (!page->multimapped)
760 parent_pte = page->parent_pte;
761 else {
762 struct kvm_pte_chain *chain;
763
764 chain = container_of(page->parent_ptes.first,
765 struct kvm_pte_chain, link);
766 parent_pte = chain->parent_ptes[0];
767 }
697fe2e2 768 BUG_ON(!parent_pte);
90cb0529 769 kvm_mmu_put_page(page, parent_pte);
c7addb90 770 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 771 }
90cb0529 772 kvm_mmu_page_unlink_children(kvm, page);
3bb65a22
AK
773 if (!page->root_count) {
774 hlist_del(&page->hash_link);
90cb0529 775 kvm_mmu_free_page(kvm, page);
36868f7b 776 } else
90cb0529 777 list_move(&page->link, &kvm->active_mmu_pages);
12b7d28f 778 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
779}
780
82ce2c96
IE
781/*
782 * Changing the number of mmu pages allocated to the vm
783 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
784 */
785void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
786{
787 /*
788 * If we set the number of mmu pages to be smaller be than the
789 * number of actived pages , we must to free some mmu pages before we
790 * change the value
791 */
792
793 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
794 kvm_nr_mmu_pages) {
795 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
796 - kvm->n_free_mmu_pages;
797
798 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
799 struct kvm_mmu_page *page;
800
801 page = container_of(kvm->active_mmu_pages.prev,
802 struct kvm_mmu_page, link);
803 kvm_mmu_zap_page(kvm, page);
804 n_used_mmu_pages--;
805 }
806 kvm->n_free_mmu_pages = 0;
807 }
808 else
809 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
810 - kvm->n_alloc_mmu_pages;
811
812 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
813}
814
f67a46f4 815static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
816{
817 unsigned index;
818 struct hlist_head *bucket;
819 struct kvm_mmu_page *page;
820 struct hlist_node *node, *n;
821 int r;
822
823 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
824 r = 0;
825 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f67a46f4 826 bucket = &kvm->mmu_page_hash[index];
a436036b
AK
827 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
828 if (page->gfn == gfn && !page->role.metaphysical) {
697fe2e2
AK
829 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
830 page->role.word);
f67a46f4 831 kvm_mmu_zap_page(kvm, page);
a436036b
AK
832 r = 1;
833 }
834 return r;
cea0f0e7
AK
835}
836
f67a46f4 837static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e
AK
838{
839 struct kvm_mmu_page *page;
840
f67a46f4 841 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
97a0a01e
AK
842 pgprintk("%s: zap %lx %x\n",
843 __FUNCTION__, gfn, page->role.word);
f67a46f4 844 kvm_mmu_zap_page(kvm, page);
97a0a01e
AK
845 }
846}
847
6aa8b732
AK
848static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
849{
850 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
851 struct kvm_mmu_page *page_head = page_header(__pa(pte));
852
853 __set_bit(slot, &page_head->slot_bitmap);
854}
855
4a4c9924 856hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
6aa8b732 857{
4a4c9924 858 hpa_t hpa = gpa_to_hpa(kvm, gpa);
6aa8b732
AK
859
860 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
861}
862
4a4c9924 863hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
6aa8b732 864{
6aa8b732
AK
865 struct page *page;
866
867 ASSERT((gpa & HPA_ERR_MASK) == 0);
4a4c9924 868 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
954bbbc2 869 if (!page)
6aa8b732 870 return gpa | HPA_ERR_MASK;
6aa8b732
AK
871 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
872 | (gpa & (PAGE_SIZE-1));
873}
874
875hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
876{
877 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
878
879 if (gpa == UNMAPPED_GVA)
880 return UNMAPPED_GVA;
4a4c9924 881 return gpa_to_hpa(vcpu->kvm, gpa);
6aa8b732
AK
882}
883
039576c0
AK
884struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
885{
886 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
887
888 if (gpa == UNMAPPED_GVA)
889 return NULL;
4a4c9924 890 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
039576c0
AK
891}
892
6aa8b732
AK
893static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
894{
895}
896
897static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
898{
899 int level = PT32E_ROOT_LEVEL;
900 hpa_t table_addr = vcpu->mmu.root_hpa;
901
902 for (; ; level--) {
903 u32 index = PT64_INDEX(v, level);
904 u64 *table;
cea0f0e7 905 u64 pte;
6aa8b732
AK
906
907 ASSERT(VALID_PAGE(table_addr));
908 table = __va(table_addr);
909
910 if (level == 1) {
cea0f0e7 911 pte = table[index];
c7addb90 912 if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
cea0f0e7 913 return 0;
6aa8b732
AK
914 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
915 page_header_update_slot(vcpu->kvm, table, v);
916 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
917 PT_USER_MASK;
290fc38d 918 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
6aa8b732
AK
919 return 0;
920 }
921
c7addb90 922 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 923 struct kvm_mmu_page *new_table;
cea0f0e7 924 gfn_t pseudo_gfn;
6aa8b732 925
cea0f0e7
AK
926 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
927 >> PAGE_SHIFT;
928 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
929 v, level - 1,
6bfccdc9 930 1, 3, &table[index]);
25c0de2c 931 if (!new_table) {
6aa8b732
AK
932 pgprintk("nonpaging_map: ENOMEM\n");
933 return -ENOMEM;
934 }
935
47ad8e68 936 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
25c0de2c 937 | PT_WRITABLE_MASK | PT_USER_MASK;
6aa8b732
AK
938 }
939 table_addr = table[index] & PT64_BASE_ADDR_MASK;
940 }
941}
942
c7addb90
AK
943static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
944 struct kvm_mmu_page *sp)
945{
946 int i;
947
948 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
949 sp->spt[i] = shadow_trap_nonpresent_pte;
950}
951
17ac10ad
AK
952static void mmu_free_roots(struct kvm_vcpu *vcpu)
953{
954 int i;
3bb65a22 955 struct kvm_mmu_page *page;
17ac10ad 956
7b53aa56
AK
957 if (!VALID_PAGE(vcpu->mmu.root_hpa))
958 return;
17ac10ad
AK
959#ifdef CONFIG_X86_64
960 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
961 hpa_t root = vcpu->mmu.root_hpa;
962
3bb65a22
AK
963 page = page_header(root);
964 --page->root_count;
17ac10ad
AK
965 vcpu->mmu.root_hpa = INVALID_PAGE;
966 return;
967 }
968#endif
969 for (i = 0; i < 4; ++i) {
970 hpa_t root = vcpu->mmu.pae_root[i];
971
417726a3 972 if (root) {
417726a3
AK
973 root &= PT64_BASE_ADDR_MASK;
974 page = page_header(root);
975 --page->root_count;
976 }
17ac10ad
AK
977 vcpu->mmu.pae_root[i] = INVALID_PAGE;
978 }
979 vcpu->mmu.root_hpa = INVALID_PAGE;
980}
981
982static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
983{
984 int i;
cea0f0e7 985 gfn_t root_gfn;
3bb65a22
AK
986 struct kvm_mmu_page *page;
987
cea0f0e7 988 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
17ac10ad
AK
989
990#ifdef CONFIG_X86_64
991 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
992 hpa_t root = vcpu->mmu.root_hpa;
993
994 ASSERT(!VALID_PAGE(root));
68a99f6d 995 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
d28c6cfb 996 PT64_ROOT_LEVEL, 0, 0, NULL);
47ad8e68 997 root = __pa(page->spt);
3bb65a22 998 ++page->root_count;
17ac10ad
AK
999 vcpu->mmu.root_hpa = root;
1000 return;
1001 }
1002#endif
1003 for (i = 0; i < 4; ++i) {
1004 hpa_t root = vcpu->mmu.pae_root[i];
1005
1006 ASSERT(!VALID_PAGE(root));
417726a3
AK
1007 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1008 if (!is_present_pte(vcpu->pdptrs[i])) {
1009 vcpu->mmu.pae_root[i] = 0;
1010 continue;
1011 }
cea0f0e7 1012 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
417726a3 1013 } else if (vcpu->mmu.root_level == 0)
cea0f0e7 1014 root_gfn = 0;
68a99f6d 1015 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
cea0f0e7 1016 PT32_ROOT_LEVEL, !is_paging(vcpu),
d28c6cfb 1017 0, NULL);
47ad8e68 1018 root = __pa(page->spt);
3bb65a22 1019 ++page->root_count;
17ac10ad
AK
1020 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1021 }
1022 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1023}
1024
6aa8b732
AK
1025static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1026{
1027 return vaddr;
1028}
1029
1030static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1031 u32 error_code)
1032{
6aa8b732 1033 gpa_t addr = gva;
ebeace86 1034 hpa_t paddr;
e2dec939 1035 int r;
6aa8b732 1036
e2dec939
AK
1037 r = mmu_topup_memory_caches(vcpu);
1038 if (r)
1039 return r;
714b93da 1040
6aa8b732
AK
1041 ASSERT(vcpu);
1042 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1043
6aa8b732 1044
4a4c9924 1045 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
6aa8b732 1046
ebeace86
AK
1047 if (is_error_hpa(paddr))
1048 return 1;
6aa8b732 1049
ebeace86 1050 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
6aa8b732
AK
1051}
1052
6aa8b732
AK
1053static void nonpaging_free(struct kvm_vcpu *vcpu)
1054{
17ac10ad 1055 mmu_free_roots(vcpu);
6aa8b732
AK
1056}
1057
1058static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1059{
1060 struct kvm_mmu *context = &vcpu->mmu;
1061
1062 context->new_cr3 = nonpaging_new_cr3;
1063 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1064 context->gva_to_gpa = nonpaging_gva_to_gpa;
1065 context->free = nonpaging_free;
c7addb90 1066 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1067 context->root_level = 0;
6aa8b732 1068 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1069 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1070 return 0;
1071}
1072
6aa8b732
AK
1073static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1074{
1165f5fe 1075 ++vcpu->stat.tlb_flush;
cbdd1bea 1076 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1077}
1078
1079static void paging_new_cr3(struct kvm_vcpu *vcpu)
1080{
374cbac0 1081 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
cea0f0e7 1082 mmu_free_roots(vcpu);
6aa8b732
AK
1083}
1084
6aa8b732
AK
1085static void inject_page_fault(struct kvm_vcpu *vcpu,
1086 u64 addr,
1087 u32 err_code)
1088{
cbdd1bea 1089 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1090}
1091
6aa8b732
AK
1092static void paging_free(struct kvm_vcpu *vcpu)
1093{
1094 nonpaging_free(vcpu);
1095}
1096
1097#define PTTYPE 64
1098#include "paging_tmpl.h"
1099#undef PTTYPE
1100
1101#define PTTYPE 32
1102#include "paging_tmpl.h"
1103#undef PTTYPE
1104
17ac10ad 1105static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732
AK
1106{
1107 struct kvm_mmu *context = &vcpu->mmu;
1108
1109 ASSERT(is_pae(vcpu));
1110 context->new_cr3 = paging_new_cr3;
1111 context->page_fault = paging64_page_fault;
6aa8b732 1112 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1113 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1114 context->free = paging_free;
17ac10ad
AK
1115 context->root_level = level;
1116 context->shadow_root_level = level;
17c3ba9d 1117 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1118 return 0;
1119}
1120
17ac10ad
AK
1121static int paging64_init_context(struct kvm_vcpu *vcpu)
1122{
1123 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1124}
1125
6aa8b732
AK
1126static int paging32_init_context(struct kvm_vcpu *vcpu)
1127{
1128 struct kvm_mmu *context = &vcpu->mmu;
1129
1130 context->new_cr3 = paging_new_cr3;
1131 context->page_fault = paging32_page_fault;
6aa8b732
AK
1132 context->gva_to_gpa = paging32_gva_to_gpa;
1133 context->free = paging_free;
c7addb90 1134 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1135 context->root_level = PT32_ROOT_LEVEL;
1136 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1137 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1138 return 0;
1139}
1140
1141static int paging32E_init_context(struct kvm_vcpu *vcpu)
1142{
17ac10ad 1143 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1144}
1145
1146static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1147{
1148 ASSERT(vcpu);
1149 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1150
1151 if (!is_paging(vcpu))
1152 return nonpaging_init_context(vcpu);
a9058ecd 1153 else if (is_long_mode(vcpu))
6aa8b732
AK
1154 return paging64_init_context(vcpu);
1155 else if (is_pae(vcpu))
1156 return paging32E_init_context(vcpu);
1157 else
1158 return paging32_init_context(vcpu);
1159}
1160
1161static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1162{
1163 ASSERT(vcpu);
1164 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1165 vcpu->mmu.free(vcpu);
1166 vcpu->mmu.root_hpa = INVALID_PAGE;
1167 }
1168}
1169
1170int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1171{
1172 destroy_kvm_mmu(vcpu);
1173 return init_kvm_mmu(vcpu);
1174}
8668a3c4 1175EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1176
1177int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1178{
714b93da
AK
1179 int r;
1180
11ec2804 1181 mutex_lock(&vcpu->kvm->lock);
e2dec939 1182 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1183 if (r)
1184 goto out;
1185 mmu_alloc_roots(vcpu);
cbdd1bea 1186 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
17c3ba9d 1187 kvm_mmu_flush_tlb(vcpu);
714b93da 1188out:
11ec2804 1189 mutex_unlock(&vcpu->kvm->lock);
714b93da 1190 return r;
6aa8b732 1191}
17c3ba9d
AK
1192EXPORT_SYMBOL_GPL(kvm_mmu_load);
1193
1194void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1195{
1196 mmu_free_roots(vcpu);
1197}
6aa8b732 1198
09072daf 1199static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
ac1b714e
AK
1200 struct kvm_mmu_page *page,
1201 u64 *spte)
1202{
1203 u64 pte;
1204 struct kvm_mmu_page *child;
1205
1206 pte = *spte;
c7addb90 1207 if (is_shadow_present_pte(pte)) {
ac1b714e 1208 if (page->role.level == PT_PAGE_TABLE_LEVEL)
290fc38d 1209 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1210 else {
1211 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1212 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1213 }
1214 }
c7addb90 1215 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
d9e368d6 1216 kvm_flush_remote_tlbs(vcpu->kvm);
ac1b714e
AK
1217}
1218
0028425f
AK
1219static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1220 struct kvm_mmu_page *page,
1221 u64 *spte,
c7addb90
AK
1222 const void *new, int bytes,
1223 int offset_in_pte)
0028425f
AK
1224{
1225 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1226 return;
1227
1228 if (page->role.glevels == PT32_ROOT_LEVEL)
c7addb90
AK
1229 paging32_update_pte(vcpu, page, spte, new, bytes,
1230 offset_in_pte);
0028425f 1231 else
c7addb90
AK
1232 paging64_update_pte(vcpu, page, spte, new, bytes,
1233 offset_in_pte);
0028425f
AK
1234}
1235
12b7d28f
AK
1236static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1237{
1238 u64 *spte = vcpu->last_pte_updated;
1239
1240 return !!(spte && (*spte & PT_ACCESSED_MASK));
1241}
1242
09072daf 1243void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1244 const u8 *new, int bytes)
da4a00f0 1245{
9b7a0325
AK
1246 gfn_t gfn = gpa >> PAGE_SHIFT;
1247 struct kvm_mmu_page *page;
0e7bc4b9 1248 struct hlist_node *node, *n;
9b7a0325
AK
1249 struct hlist_head *bucket;
1250 unsigned index;
1251 u64 *spte;
9b7a0325 1252 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1253 unsigned pte_size;
9b7a0325 1254 unsigned page_offset;
0e7bc4b9 1255 unsigned misaligned;
fce0657f 1256 unsigned quadrant;
9b7a0325 1257 int level;
86a5ba02 1258 int flooded = 0;
ac1b714e 1259 int npte;
9b7a0325 1260
da4a00f0 1261 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
c7addb90 1262 kvm_mmu_audit(vcpu, "pre pte write");
12b7d28f
AK
1263 if (gfn == vcpu->last_pt_write_gfn
1264 && !last_updated_pte_accessed(vcpu)) {
86a5ba02
AK
1265 ++vcpu->last_pt_write_count;
1266 if (vcpu->last_pt_write_count >= 3)
1267 flooded = 1;
1268 } else {
1269 vcpu->last_pt_write_gfn = gfn;
1270 vcpu->last_pt_write_count = 1;
12b7d28f 1271 vcpu->last_pte_updated = NULL;
86a5ba02 1272 }
9b7a0325
AK
1273 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1274 bucket = &vcpu->kvm->mmu_page_hash[index];
0e7bc4b9 1275 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
9b7a0325
AK
1276 if (page->gfn != gfn || page->role.metaphysical)
1277 continue;
0e7bc4b9
AK
1278 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1279 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1280 misaligned |= bytes < 4;
86a5ba02 1281 if (misaligned || flooded) {
0e7bc4b9
AK
1282 /*
1283 * Misaligned accesses are too much trouble to fix
1284 * up; also, they usually indicate a page is not used
1285 * as a page table.
86a5ba02
AK
1286 *
1287 * If we're seeing too many writes to a page,
1288 * it may no longer be a page table, or we may be
1289 * forking, in which case it is better to unmap the
1290 * page.
0e7bc4b9
AK
1291 */
1292 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1293 gpa, bytes, page->role.word);
90cb0529 1294 kvm_mmu_zap_page(vcpu->kvm, page);
0e7bc4b9
AK
1295 continue;
1296 }
9b7a0325
AK
1297 page_offset = offset;
1298 level = page->role.level;
ac1b714e 1299 npte = 1;
9b7a0325 1300 if (page->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1301 page_offset <<= 1; /* 32->64 */
1302 /*
1303 * A 32-bit pde maps 4MB while the shadow pdes map
1304 * only 2MB. So we need to double the offset again
1305 * and zap two pdes instead of one.
1306 */
1307 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1308 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1309 page_offset <<= 1;
1310 npte = 2;
1311 }
fce0657f 1312 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1313 page_offset &= ~PAGE_MASK;
fce0657f
AK
1314 if (quadrant != page->role.quadrant)
1315 continue;
9b7a0325 1316 }
47ad8e68 1317 spte = &page->spt[page_offset / sizeof(*spte)];
ac1b714e 1318 while (npte--) {
09072daf 1319 mmu_pte_write_zap_pte(vcpu, page, spte);
c7addb90
AK
1320 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1321 page_offset & (pte_size - 1));
ac1b714e 1322 ++spte;
9b7a0325 1323 }
9b7a0325 1324 }
c7addb90 1325 kvm_mmu_audit(vcpu, "post pte write");
da4a00f0
AK
1326}
1327
a436036b
AK
1328int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1329{
1330 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1331
f67a46f4 1332 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
a436036b
AK
1333}
1334
22d95b12 1335void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86
AK
1336{
1337 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1338 struct kvm_mmu_page *page;
1339
1340 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1341 struct kvm_mmu_page, link);
90cb0529 1342 kvm_mmu_zap_page(vcpu->kvm, page);
ebeace86
AK
1343 }
1344}
ebeace86 1345
6aa8b732
AK
1346static void free_mmu_pages(struct kvm_vcpu *vcpu)
1347{
f51234c2 1348 struct kvm_mmu_page *page;
6aa8b732 1349
f51234c2
AK
1350 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1351 page = container_of(vcpu->kvm->active_mmu_pages.next,
1352 struct kvm_mmu_page, link);
90cb0529 1353 kvm_mmu_zap_page(vcpu->kvm, page);
f51234c2 1354 }
17ac10ad 1355 free_page((unsigned long)vcpu->mmu.pae_root);
6aa8b732
AK
1356}
1357
1358static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1359{
17ac10ad 1360 struct page *page;
6aa8b732
AK
1361 int i;
1362
1363 ASSERT(vcpu);
1364
82ce2c96
IE
1365 if (vcpu->kvm->n_requested_mmu_pages)
1366 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1367 else
1368 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
17ac10ad
AK
1369 /*
1370 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1371 * Therefore we need to allocate shadow page tables in the first
1372 * 4GB of memory, which happens to fit the DMA32 zone.
1373 */
1374 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1375 if (!page)
1376 goto error_1;
1377 vcpu->mmu.pae_root = page_address(page);
1378 for (i = 0; i < 4; ++i)
1379 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1380
6aa8b732
AK
1381 return 0;
1382
1383error_1:
1384 free_mmu_pages(vcpu);
1385 return -ENOMEM;
1386}
1387
8018c27b 1388int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1389{
6aa8b732
AK
1390 ASSERT(vcpu);
1391 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
6aa8b732 1392
8018c27b
IM
1393 return alloc_mmu_pages(vcpu);
1394}
6aa8b732 1395
8018c27b
IM
1396int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1397{
1398 ASSERT(vcpu);
1399 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
2c264957 1400
8018c27b 1401 return init_kvm_mmu(vcpu);
6aa8b732
AK
1402}
1403
1404void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1405{
1406 ASSERT(vcpu);
1407
1408 destroy_kvm_mmu(vcpu);
1409 free_mmu_pages(vcpu);
714b93da 1410 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1411}
1412
90cb0529 1413void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732
AK
1414{
1415 struct kvm_mmu_page *page;
1416
1417 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1418 int i;
1419 u64 *pt;
1420
1421 if (!test_bit(slot, &page->slot_bitmap))
1422 continue;
1423
47ad8e68 1424 pt = page->spt;
6aa8b732
AK
1425 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1426 /* avoid RMW */
cd4a4e53 1427 if (pt[i] & PT_WRITABLE_MASK) {
290fc38d 1428 rmap_remove(kvm, &pt[i]);
6aa8b732 1429 pt[i] &= ~PT_WRITABLE_MASK;
cd4a4e53 1430 }
6aa8b732
AK
1431 }
1432}
37a7d8b0 1433
90cb0529 1434void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1435{
90cb0529 1436 struct kvm_mmu_page *page, *node;
e0fa826f 1437
90cb0529
AK
1438 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1439 kvm_mmu_zap_page(kvm, page);
e0fa826f 1440
90cb0529 1441 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1442}
1443
b5a33a75
AK
1444void kvm_mmu_module_exit(void)
1445{
1446 if (pte_chain_cache)
1447 kmem_cache_destroy(pte_chain_cache);
1448 if (rmap_desc_cache)
1449 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
1450 if (mmu_page_header_cache)
1451 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
1452}
1453
1454int kvm_mmu_module_init(void)
1455{
1456 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1457 sizeof(struct kvm_pte_chain),
20c2df83 1458 0, 0, NULL);
b5a33a75
AK
1459 if (!pte_chain_cache)
1460 goto nomem;
1461 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1462 sizeof(struct kvm_rmap_desc),
20c2df83 1463 0, 0, NULL);
b5a33a75
AK
1464 if (!rmap_desc_cache)
1465 goto nomem;
1466
d3d25b04
AK
1467 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1468 sizeof(struct kvm_mmu_page),
20c2df83 1469 0, 0, NULL);
d3d25b04
AK
1470 if (!mmu_page_header_cache)
1471 goto nomem;
1472
b5a33a75
AK
1473 return 0;
1474
1475nomem:
1476 kvm_mmu_module_exit();
1477 return -ENOMEM;
1478}
1479
37a7d8b0
AK
1480#ifdef AUDIT
1481
1482static const char *audit_msg;
1483
1484static gva_t canonicalize(gva_t gva)
1485{
1486#ifdef CONFIG_X86_64
1487 gva = (long long)(gva << 16) >> 16;
1488#endif
1489 return gva;
1490}
1491
1492static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1493 gva_t va, int level)
1494{
1495 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1496 int i;
1497 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1498
1499 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1500 u64 ent = pt[i];
1501
c7addb90 1502 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
1503 continue;
1504
1505 va = canonicalize(va);
c7addb90
AK
1506 if (level > 1) {
1507 if (ent == shadow_notrap_nonpresent_pte)
1508 printk(KERN_ERR "audit: (%s) nontrapping pte"
1509 " in nonleaf level: levels %d gva %lx"
1510 " level %d pte %llx\n", audit_msg,
1511 vcpu->mmu.root_level, va, level, ent);
1512
37a7d8b0 1513 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 1514 } else {
37a7d8b0
AK
1515 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1516 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1517
c7addb90 1518 if (is_shadow_present_pte(ent)
37a7d8b0 1519 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
1520 printk(KERN_ERR "xx audit error: (%s) levels %d"
1521 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
37a7d8b0 1522 audit_msg, vcpu->mmu.root_level,
d77c26fc
MD
1523 va, gpa, hpa, ent,
1524 is_shadow_present_pte(ent));
c7addb90
AK
1525 else if (ent == shadow_notrap_nonpresent_pte
1526 && !is_error_hpa(hpa))
1527 printk(KERN_ERR "audit: (%s) notrap shadow,"
1528 " valid guest gva %lx\n", audit_msg, va);
1529
37a7d8b0
AK
1530 }
1531 }
1532}
1533
1534static void audit_mappings(struct kvm_vcpu *vcpu)
1535{
1ea252af 1536 unsigned i;
37a7d8b0
AK
1537
1538 if (vcpu->mmu.root_level == 4)
1539 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1540 else
1541 for (i = 0; i < 4; ++i)
1542 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1543 audit_mappings_page(vcpu,
1544 vcpu->mmu.pae_root[i],
1545 i << 30,
1546 2);
1547}
1548
1549static int count_rmaps(struct kvm_vcpu *vcpu)
1550{
1551 int nmaps = 0;
1552 int i, j, k;
1553
1554 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1555 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1556 struct kvm_rmap_desc *d;
1557
1558 for (j = 0; j < m->npages; ++j) {
290fc38d 1559 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 1560
290fc38d 1561 if (!*rmapp)
37a7d8b0 1562 continue;
290fc38d 1563 if (!(*rmapp & 1)) {
37a7d8b0
AK
1564 ++nmaps;
1565 continue;
1566 }
290fc38d 1567 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
1568 while (d) {
1569 for (k = 0; k < RMAP_EXT; ++k)
1570 if (d->shadow_ptes[k])
1571 ++nmaps;
1572 else
1573 break;
1574 d = d->more;
1575 }
1576 }
1577 }
1578 return nmaps;
1579}
1580
1581static int count_writable_mappings(struct kvm_vcpu *vcpu)
1582{
1583 int nmaps = 0;
1584 struct kvm_mmu_page *page;
1585 int i;
1586
1587 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
47ad8e68 1588 u64 *pt = page->spt;
37a7d8b0
AK
1589
1590 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1591 continue;
1592
1593 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1594 u64 ent = pt[i];
1595
1596 if (!(ent & PT_PRESENT_MASK))
1597 continue;
1598 if (!(ent & PT_WRITABLE_MASK))
1599 continue;
1600 ++nmaps;
1601 }
1602 }
1603 return nmaps;
1604}
1605
1606static void audit_rmap(struct kvm_vcpu *vcpu)
1607{
1608 int n_rmap = count_rmaps(vcpu);
1609 int n_actual = count_writable_mappings(vcpu);
1610
1611 if (n_rmap != n_actual)
1612 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1613 __FUNCTION__, audit_msg, n_rmap, n_actual);
1614}
1615
1616static void audit_write_protection(struct kvm_vcpu *vcpu)
1617{
1618 struct kvm_mmu_page *page;
290fc38d
IE
1619 struct kvm_memory_slot *slot;
1620 unsigned long *rmapp;
1621 gfn_t gfn;
37a7d8b0
AK
1622
1623 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
37a7d8b0
AK
1624 if (page->role.metaphysical)
1625 continue;
1626
290fc38d
IE
1627 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1628 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1629 rmapp = &slot->rmap[gfn - slot->base_gfn];
1630 if (*rmapp)
37a7d8b0
AK
1631 printk(KERN_ERR "%s: (%s) shadow page has writable"
1632 " mappings: gfn %lx role %x\n",
1633 __FUNCTION__, audit_msg, page->gfn,
1634 page->role.word);
1635 }
1636}
1637
1638static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1639{
1640 int olddbg = dbg;
1641
1642 dbg = 0;
1643 audit_msg = msg;
1644 audit_rmap(vcpu);
1645 audit_write_protection(vcpu);
1646 audit_mappings(vcpu);
1647 dbg = olddbg;
1648}
1649
1650#endif
This page took 0.235337 seconds and 5 git commands to generate.