KVM: MMU: Remove gva_to_hpa()
[deliverable/linux.git] / drivers / kvm / mmu.c
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20 #include "vmx.h"
21 #include "kvm.h"
22 #include "x86.h"
23
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29
30 #include <asm/page.h>
31 #include <asm/cmpxchg.h>
32 #include <asm/io.h>
33
34 #undef MMU_DEBUG
35
36 #undef AUDIT
37
38 #ifdef AUDIT
39 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
40 #else
41 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
42 #endif
43
44 #ifdef MMU_DEBUG
45
46 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
47 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
48
49 #else
50
51 #define pgprintk(x...) do { } while (0)
52 #define rmap_printk(x...) do { } while (0)
53
54 #endif
55
56 #if defined(MMU_DEBUG) || defined(AUDIT)
57 static int dbg = 1;
58 #endif
59
60 #ifndef MMU_DEBUG
61 #define ASSERT(x) do { } while (0)
62 #else
63 #define ASSERT(x) \
64 if (!(x)) { \
65 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
66 __FILE__, __LINE__, #x); \
67 }
68 #endif
69
70 #define PT64_PT_BITS 9
71 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
72 #define PT32_PT_BITS 10
73 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
74
75 #define PT_WRITABLE_SHIFT 1
76
77 #define PT_PRESENT_MASK (1ULL << 0)
78 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
79 #define PT_USER_MASK (1ULL << 2)
80 #define PT_PWT_MASK (1ULL << 3)
81 #define PT_PCD_MASK (1ULL << 4)
82 #define PT_ACCESSED_MASK (1ULL << 5)
83 #define PT_DIRTY_MASK (1ULL << 6)
84 #define PT_PAGE_SIZE_MASK (1ULL << 7)
85 #define PT_PAT_MASK (1ULL << 7)
86 #define PT_GLOBAL_MASK (1ULL << 8)
87 #define PT64_NX_MASK (1ULL << 63)
88
89 #define PT_PAT_SHIFT 7
90 #define PT_DIR_PAT_SHIFT 12
91 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
92
93 #define PT32_DIR_PSE36_SIZE 4
94 #define PT32_DIR_PSE36_SHIFT 13
95 #define PT32_DIR_PSE36_MASK \
96 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
97
98
99 #define PT_FIRST_AVAIL_BITS_SHIFT 9
100 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
101
102 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
103
104 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
105
106 #define PT64_LEVEL_BITS 9
107
108 #define PT64_LEVEL_SHIFT(level) \
109 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
110
111 #define PT64_LEVEL_MASK(level) \
112 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
113
114 #define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
116
117
118 #define PT32_LEVEL_BITS 10
119
120 #define PT32_LEVEL_SHIFT(level) \
121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
122
123 #define PT32_LEVEL_MASK(level) \
124 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
125
126 #define PT32_INDEX(address, level)\
127 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
128
129
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
133
134 #define PT32_BASE_ADDR_MASK PAGE_MASK
135 #define PT32_DIR_BASE_ADDR_MASK \
136 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
137
138 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
139 | PT64_NX_MASK)
140
141 #define PFERR_PRESENT_MASK (1U << 0)
142 #define PFERR_WRITE_MASK (1U << 1)
143 #define PFERR_USER_MASK (1U << 2)
144 #define PFERR_FETCH_MASK (1U << 4)
145
146 #define PT64_ROOT_LEVEL 4
147 #define PT32_ROOT_LEVEL 2
148 #define PT32E_ROOT_LEVEL 3
149
150 #define PT_DIRECTORY_LEVEL 2
151 #define PT_PAGE_TABLE_LEVEL 1
152
153 #define RMAP_EXT 4
154
155 struct kvm_rmap_desc {
156 u64 *shadow_ptes[RMAP_EXT];
157 struct kvm_rmap_desc *more;
158 };
159
160 static struct kmem_cache *pte_chain_cache;
161 static struct kmem_cache *rmap_desc_cache;
162 static struct kmem_cache *mmu_page_header_cache;
163
164 static u64 __read_mostly shadow_trap_nonpresent_pte;
165 static u64 __read_mostly shadow_notrap_nonpresent_pte;
166
167 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
168 {
169 shadow_trap_nonpresent_pte = trap_pte;
170 shadow_notrap_nonpresent_pte = notrap_pte;
171 }
172 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
173
174 static int is_write_protection(struct kvm_vcpu *vcpu)
175 {
176 return vcpu->cr0 & X86_CR0_WP;
177 }
178
179 static int is_cpuid_PSE36(void)
180 {
181 return 1;
182 }
183
184 static int is_nx(struct kvm_vcpu *vcpu)
185 {
186 return vcpu->shadow_efer & EFER_NX;
187 }
188
189 static int is_present_pte(unsigned long pte)
190 {
191 return pte & PT_PRESENT_MASK;
192 }
193
194 static int is_shadow_present_pte(u64 pte)
195 {
196 pte &= ~PT_SHADOW_IO_MARK;
197 return pte != shadow_trap_nonpresent_pte
198 && pte != shadow_notrap_nonpresent_pte;
199 }
200
201 static int is_writeble_pte(unsigned long pte)
202 {
203 return pte & PT_WRITABLE_MASK;
204 }
205
206 static int is_dirty_pte(unsigned long pte)
207 {
208 return pte & PT_DIRTY_MASK;
209 }
210
211 static int is_io_pte(unsigned long pte)
212 {
213 return pte & PT_SHADOW_IO_MARK;
214 }
215
216 static int is_rmap_pte(u64 pte)
217 {
218 return pte != shadow_trap_nonpresent_pte
219 && pte != shadow_notrap_nonpresent_pte;
220 }
221
222 static gfn_t pse36_gfn_delta(u32 gpte)
223 {
224 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
225
226 return (gpte & PT32_DIR_PSE36_MASK) << shift;
227 }
228
229 static void set_shadow_pte(u64 *sptep, u64 spte)
230 {
231 #ifdef CONFIG_X86_64
232 set_64bit((unsigned long *)sptep, spte);
233 #else
234 set_64bit((unsigned long long *)sptep, spte);
235 #endif
236 }
237
238 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
239 struct kmem_cache *base_cache, int min)
240 {
241 void *obj;
242
243 if (cache->nobjs >= min)
244 return 0;
245 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
246 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
247 if (!obj)
248 return -ENOMEM;
249 cache->objects[cache->nobjs++] = obj;
250 }
251 return 0;
252 }
253
254 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
255 {
256 while (mc->nobjs)
257 kfree(mc->objects[--mc->nobjs]);
258 }
259
260 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
261 int min)
262 {
263 struct page *page;
264
265 if (cache->nobjs >= min)
266 return 0;
267 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
268 page = alloc_page(GFP_KERNEL);
269 if (!page)
270 return -ENOMEM;
271 set_page_private(page, 0);
272 cache->objects[cache->nobjs++] = page_address(page);
273 }
274 return 0;
275 }
276
277 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
278 {
279 while (mc->nobjs)
280 free_page((unsigned long)mc->objects[--mc->nobjs]);
281 }
282
283 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
284 {
285 int r;
286
287 kvm_mmu_free_some_pages(vcpu);
288 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
289 pte_chain_cache, 4);
290 if (r)
291 goto out;
292 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
293 rmap_desc_cache, 1);
294 if (r)
295 goto out;
296 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
297 if (r)
298 goto out;
299 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
300 mmu_page_header_cache, 4);
301 out:
302 return r;
303 }
304
305 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
306 {
307 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
308 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
309 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
310 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
311 }
312
313 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
314 size_t size)
315 {
316 void *p;
317
318 BUG_ON(!mc->nobjs);
319 p = mc->objects[--mc->nobjs];
320 memset(p, 0, size);
321 return p;
322 }
323
324 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
325 {
326 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
327 sizeof(struct kvm_pte_chain));
328 }
329
330 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
331 {
332 kfree(pc);
333 }
334
335 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
336 {
337 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
338 sizeof(struct kvm_rmap_desc));
339 }
340
341 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
342 {
343 kfree(rd);
344 }
345
346 /*
347 * Take gfn and return the reverse mapping to it.
348 * Note: gfn must be unaliased before this function get called
349 */
350
351 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
352 {
353 struct kvm_memory_slot *slot;
354
355 slot = gfn_to_memslot(kvm, gfn);
356 return &slot->rmap[gfn - slot->base_gfn];
357 }
358
359 /*
360 * Reverse mapping data structures:
361 *
362 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
363 * that points to page_address(page).
364 *
365 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
366 * containing more mappings.
367 */
368 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
369 {
370 struct kvm_mmu_page *page;
371 struct kvm_rmap_desc *desc;
372 unsigned long *rmapp;
373 int i;
374
375 if (!is_rmap_pte(*spte))
376 return;
377 gfn = unalias_gfn(vcpu->kvm, gfn);
378 page = page_header(__pa(spte));
379 page->gfns[spte - page->spt] = gfn;
380 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
381 if (!*rmapp) {
382 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
383 *rmapp = (unsigned long)spte;
384 } else if (!(*rmapp & 1)) {
385 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
386 desc = mmu_alloc_rmap_desc(vcpu);
387 desc->shadow_ptes[0] = (u64 *)*rmapp;
388 desc->shadow_ptes[1] = spte;
389 *rmapp = (unsigned long)desc | 1;
390 } else {
391 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
392 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
393 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
394 desc = desc->more;
395 if (desc->shadow_ptes[RMAP_EXT-1]) {
396 desc->more = mmu_alloc_rmap_desc(vcpu);
397 desc = desc->more;
398 }
399 for (i = 0; desc->shadow_ptes[i]; ++i)
400 ;
401 desc->shadow_ptes[i] = spte;
402 }
403 }
404
405 static void rmap_desc_remove_entry(unsigned long *rmapp,
406 struct kvm_rmap_desc *desc,
407 int i,
408 struct kvm_rmap_desc *prev_desc)
409 {
410 int j;
411
412 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
413 ;
414 desc->shadow_ptes[i] = desc->shadow_ptes[j];
415 desc->shadow_ptes[j] = NULL;
416 if (j != 0)
417 return;
418 if (!prev_desc && !desc->more)
419 *rmapp = (unsigned long)desc->shadow_ptes[0];
420 else
421 if (prev_desc)
422 prev_desc->more = desc->more;
423 else
424 *rmapp = (unsigned long)desc->more | 1;
425 mmu_free_rmap_desc(desc);
426 }
427
428 static void rmap_remove(struct kvm *kvm, u64 *spte)
429 {
430 struct kvm_rmap_desc *desc;
431 struct kvm_rmap_desc *prev_desc;
432 struct kvm_mmu_page *page;
433 struct page *release_page;
434 unsigned long *rmapp;
435 int i;
436
437 if (!is_rmap_pte(*spte))
438 return;
439 page = page_header(__pa(spte));
440 release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
441 if (is_writeble_pte(*spte))
442 kvm_release_page_dirty(release_page);
443 else
444 kvm_release_page_clean(release_page);
445 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
446 if (!*rmapp) {
447 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
448 BUG();
449 } else if (!(*rmapp & 1)) {
450 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
451 if ((u64 *)*rmapp != spte) {
452 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
453 spte, *spte);
454 BUG();
455 }
456 *rmapp = 0;
457 } else {
458 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
459 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
460 prev_desc = NULL;
461 while (desc) {
462 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
463 if (desc->shadow_ptes[i] == spte) {
464 rmap_desc_remove_entry(rmapp,
465 desc, i,
466 prev_desc);
467 return;
468 }
469 prev_desc = desc;
470 desc = desc->more;
471 }
472 BUG();
473 }
474 }
475
476 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
477 {
478 struct kvm_rmap_desc *desc;
479 struct kvm_rmap_desc *prev_desc;
480 u64 *prev_spte;
481 int i;
482
483 if (!*rmapp)
484 return NULL;
485 else if (!(*rmapp & 1)) {
486 if (!spte)
487 return (u64 *)*rmapp;
488 return NULL;
489 }
490 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
491 prev_desc = NULL;
492 prev_spte = NULL;
493 while (desc) {
494 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
495 if (prev_spte == spte)
496 return desc->shadow_ptes[i];
497 prev_spte = desc->shadow_ptes[i];
498 }
499 desc = desc->more;
500 }
501 return NULL;
502 }
503
504 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
505 {
506 unsigned long *rmapp;
507 u64 *spte;
508
509 gfn = unalias_gfn(kvm, gfn);
510 rmapp = gfn_to_rmap(kvm, gfn);
511
512 spte = rmap_next(kvm, rmapp, NULL);
513 while (spte) {
514 BUG_ON(!spte);
515 BUG_ON(!(*spte & PT_PRESENT_MASK));
516 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
517 if (is_writeble_pte(*spte))
518 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
519 kvm_flush_remote_tlbs(kvm);
520 spte = rmap_next(kvm, rmapp, spte);
521 }
522 }
523
524 #ifdef MMU_DEBUG
525 static int is_empty_shadow_page(u64 *spt)
526 {
527 u64 *pos;
528 u64 *end;
529
530 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
531 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
532 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
533 pos, *pos);
534 return 0;
535 }
536 return 1;
537 }
538 #endif
539
540 static void kvm_mmu_free_page(struct kvm *kvm,
541 struct kvm_mmu_page *page_head)
542 {
543 ASSERT(is_empty_shadow_page(page_head->spt));
544 list_del(&page_head->link);
545 __free_page(virt_to_page(page_head->spt));
546 __free_page(virt_to_page(page_head->gfns));
547 kfree(page_head);
548 ++kvm->n_free_mmu_pages;
549 }
550
551 static unsigned kvm_page_table_hashfn(gfn_t gfn)
552 {
553 return gfn;
554 }
555
556 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
557 u64 *parent_pte)
558 {
559 struct kvm_mmu_page *page;
560
561 if (!vcpu->kvm->n_free_mmu_pages)
562 return NULL;
563
564 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
565 sizeof *page);
566 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
567 page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
568 set_page_private(virt_to_page(page->spt), (unsigned long)page);
569 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
570 ASSERT(is_empty_shadow_page(page->spt));
571 page->slot_bitmap = 0;
572 page->multimapped = 0;
573 page->parent_pte = parent_pte;
574 --vcpu->kvm->n_free_mmu_pages;
575 return page;
576 }
577
578 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
579 struct kvm_mmu_page *page, u64 *parent_pte)
580 {
581 struct kvm_pte_chain *pte_chain;
582 struct hlist_node *node;
583 int i;
584
585 if (!parent_pte)
586 return;
587 if (!page->multimapped) {
588 u64 *old = page->parent_pte;
589
590 if (!old) {
591 page->parent_pte = parent_pte;
592 return;
593 }
594 page->multimapped = 1;
595 pte_chain = mmu_alloc_pte_chain(vcpu);
596 INIT_HLIST_HEAD(&page->parent_ptes);
597 hlist_add_head(&pte_chain->link, &page->parent_ptes);
598 pte_chain->parent_ptes[0] = old;
599 }
600 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
601 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
602 continue;
603 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
604 if (!pte_chain->parent_ptes[i]) {
605 pte_chain->parent_ptes[i] = parent_pte;
606 return;
607 }
608 }
609 pte_chain = mmu_alloc_pte_chain(vcpu);
610 BUG_ON(!pte_chain);
611 hlist_add_head(&pte_chain->link, &page->parent_ptes);
612 pte_chain->parent_ptes[0] = parent_pte;
613 }
614
615 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
616 u64 *parent_pte)
617 {
618 struct kvm_pte_chain *pte_chain;
619 struct hlist_node *node;
620 int i;
621
622 if (!page->multimapped) {
623 BUG_ON(page->parent_pte != parent_pte);
624 page->parent_pte = NULL;
625 return;
626 }
627 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
628 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
629 if (!pte_chain->parent_ptes[i])
630 break;
631 if (pte_chain->parent_ptes[i] != parent_pte)
632 continue;
633 while (i + 1 < NR_PTE_CHAIN_ENTRIES
634 && pte_chain->parent_ptes[i + 1]) {
635 pte_chain->parent_ptes[i]
636 = pte_chain->parent_ptes[i + 1];
637 ++i;
638 }
639 pte_chain->parent_ptes[i] = NULL;
640 if (i == 0) {
641 hlist_del(&pte_chain->link);
642 mmu_free_pte_chain(pte_chain);
643 if (hlist_empty(&page->parent_ptes)) {
644 page->multimapped = 0;
645 page->parent_pte = NULL;
646 }
647 }
648 return;
649 }
650 BUG();
651 }
652
653 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
654 gfn_t gfn)
655 {
656 unsigned index;
657 struct hlist_head *bucket;
658 struct kvm_mmu_page *page;
659 struct hlist_node *node;
660
661 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
662 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
663 bucket = &kvm->mmu_page_hash[index];
664 hlist_for_each_entry(page, node, bucket, hash_link)
665 if (page->gfn == gfn && !page->role.metaphysical) {
666 pgprintk("%s: found role %x\n",
667 __FUNCTION__, page->role.word);
668 return page;
669 }
670 return NULL;
671 }
672
673 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
674 gfn_t gfn,
675 gva_t gaddr,
676 unsigned level,
677 int metaphysical,
678 unsigned hugepage_access,
679 u64 *parent_pte)
680 {
681 union kvm_mmu_page_role role;
682 unsigned index;
683 unsigned quadrant;
684 struct hlist_head *bucket;
685 struct kvm_mmu_page *page;
686 struct hlist_node *node;
687
688 role.word = 0;
689 role.glevels = vcpu->mmu.root_level;
690 role.level = level;
691 role.metaphysical = metaphysical;
692 role.hugepage_access = hugepage_access;
693 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
694 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
695 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
696 role.quadrant = quadrant;
697 }
698 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
699 gfn, role.word);
700 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
701 bucket = &vcpu->kvm->mmu_page_hash[index];
702 hlist_for_each_entry(page, node, bucket, hash_link)
703 if (page->gfn == gfn && page->role.word == role.word) {
704 mmu_page_add_parent_pte(vcpu, page, parent_pte);
705 pgprintk("%s: found\n", __FUNCTION__);
706 return page;
707 }
708 page = kvm_mmu_alloc_page(vcpu, parent_pte);
709 if (!page)
710 return page;
711 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
712 page->gfn = gfn;
713 page->role = role;
714 hlist_add_head(&page->hash_link, bucket);
715 vcpu->mmu.prefetch_page(vcpu, page);
716 if (!metaphysical)
717 rmap_write_protect(vcpu->kvm, gfn);
718 return page;
719 }
720
721 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
722 struct kvm_mmu_page *page)
723 {
724 unsigned i;
725 u64 *pt;
726 u64 ent;
727
728 pt = page->spt;
729
730 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
731 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
732 if (is_shadow_present_pte(pt[i]))
733 rmap_remove(kvm, &pt[i]);
734 pt[i] = shadow_trap_nonpresent_pte;
735 }
736 kvm_flush_remote_tlbs(kvm);
737 return;
738 }
739
740 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
741 ent = pt[i];
742
743 pt[i] = shadow_trap_nonpresent_pte;
744 if (!is_shadow_present_pte(ent))
745 continue;
746 ent &= PT64_BASE_ADDR_MASK;
747 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
748 }
749 kvm_flush_remote_tlbs(kvm);
750 }
751
752 static void kvm_mmu_put_page(struct kvm_mmu_page *page,
753 u64 *parent_pte)
754 {
755 mmu_page_remove_parent_pte(page, parent_pte);
756 }
757
758 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
759 {
760 int i;
761
762 for (i = 0; i < KVM_MAX_VCPUS; ++i)
763 if (kvm->vcpus[i])
764 kvm->vcpus[i]->last_pte_updated = NULL;
765 }
766
767 static void kvm_mmu_zap_page(struct kvm *kvm,
768 struct kvm_mmu_page *page)
769 {
770 u64 *parent_pte;
771
772 ++kvm->stat.mmu_shadow_zapped;
773 while (page->multimapped || page->parent_pte) {
774 if (!page->multimapped)
775 parent_pte = page->parent_pte;
776 else {
777 struct kvm_pte_chain *chain;
778
779 chain = container_of(page->parent_ptes.first,
780 struct kvm_pte_chain, link);
781 parent_pte = chain->parent_ptes[0];
782 }
783 BUG_ON(!parent_pte);
784 kvm_mmu_put_page(page, parent_pte);
785 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
786 }
787 kvm_mmu_page_unlink_children(kvm, page);
788 if (!page->root_count) {
789 hlist_del(&page->hash_link);
790 kvm_mmu_free_page(kvm, page);
791 } else
792 list_move(&page->link, &kvm->active_mmu_pages);
793 kvm_mmu_reset_last_pte_updated(kvm);
794 }
795
796 /*
797 * Changing the number of mmu pages allocated to the vm
798 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
799 */
800 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
801 {
802 /*
803 * If we set the number of mmu pages to be smaller be than the
804 * number of actived pages , we must to free some mmu pages before we
805 * change the value
806 */
807
808 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
809 kvm_nr_mmu_pages) {
810 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
811 - kvm->n_free_mmu_pages;
812
813 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
814 struct kvm_mmu_page *page;
815
816 page = container_of(kvm->active_mmu_pages.prev,
817 struct kvm_mmu_page, link);
818 kvm_mmu_zap_page(kvm, page);
819 n_used_mmu_pages--;
820 }
821 kvm->n_free_mmu_pages = 0;
822 }
823 else
824 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
825 - kvm->n_alloc_mmu_pages;
826
827 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
828 }
829
830 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
831 {
832 unsigned index;
833 struct hlist_head *bucket;
834 struct kvm_mmu_page *page;
835 struct hlist_node *node, *n;
836 int r;
837
838 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
839 r = 0;
840 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
841 bucket = &kvm->mmu_page_hash[index];
842 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
843 if (page->gfn == gfn && !page->role.metaphysical) {
844 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
845 page->role.word);
846 kvm_mmu_zap_page(kvm, page);
847 r = 1;
848 }
849 return r;
850 }
851
852 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
853 {
854 struct kvm_mmu_page *page;
855
856 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
857 pgprintk("%s: zap %lx %x\n",
858 __FUNCTION__, gfn, page->role.word);
859 kvm_mmu_zap_page(kvm, page);
860 }
861 }
862
863 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
864 {
865 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
866 struct kvm_mmu_page *page_head = page_header(__pa(pte));
867
868 __set_bit(slot, &page_head->slot_bitmap);
869 }
870
871 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
872 {
873 struct page *page;
874 hpa_t hpa;
875
876 ASSERT((gpa & HPA_ERR_MASK) == 0);
877 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
878 hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
879 if (is_error_page(page))
880 return hpa | HPA_ERR_MASK;
881 return hpa;
882 }
883
884 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
885 {
886 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
887
888 if (gpa == UNMAPPED_GVA)
889 return NULL;
890 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
891 }
892
893 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
894 {
895 }
896
897 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
898 {
899 int level = PT32E_ROOT_LEVEL;
900 hpa_t table_addr = vcpu->mmu.root_hpa;
901
902 for (; ; level--) {
903 u32 index = PT64_INDEX(v, level);
904 u64 *table;
905 u64 pte;
906
907 ASSERT(VALID_PAGE(table_addr));
908 table = __va(table_addr);
909
910 if (level == 1) {
911 int was_rmapped;
912
913 pte = table[index];
914 was_rmapped = is_rmap_pte(pte);
915 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
916 kvm_release_page_clean(page);
917 return 0;
918 }
919 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
920 page_header_update_slot(vcpu->kvm, table,
921 v >> PAGE_SHIFT);
922 table[index] = page_to_phys(page)
923 | PT_PRESENT_MASK | PT_WRITABLE_MASK
924 | PT_USER_MASK;
925 if (!was_rmapped)
926 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
927 else
928 kvm_release_page_clean(page);
929
930 return 0;
931 }
932
933 if (table[index] == shadow_trap_nonpresent_pte) {
934 struct kvm_mmu_page *new_table;
935 gfn_t pseudo_gfn;
936
937 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
938 >> PAGE_SHIFT;
939 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
940 v, level - 1,
941 1, 3, &table[index]);
942 if (!new_table) {
943 pgprintk("nonpaging_map: ENOMEM\n");
944 kvm_release_page_clean(page);
945 return -ENOMEM;
946 }
947
948 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
949 | PT_WRITABLE_MASK | PT_USER_MASK;
950 }
951 table_addr = table[index] & PT64_BASE_ADDR_MASK;
952 }
953 }
954
955 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
956 struct kvm_mmu_page *sp)
957 {
958 int i;
959
960 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
961 sp->spt[i] = shadow_trap_nonpresent_pte;
962 }
963
964 static void mmu_free_roots(struct kvm_vcpu *vcpu)
965 {
966 int i;
967 struct kvm_mmu_page *page;
968
969 if (!VALID_PAGE(vcpu->mmu.root_hpa))
970 return;
971 #ifdef CONFIG_X86_64
972 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
973 hpa_t root = vcpu->mmu.root_hpa;
974
975 page = page_header(root);
976 --page->root_count;
977 vcpu->mmu.root_hpa = INVALID_PAGE;
978 return;
979 }
980 #endif
981 for (i = 0; i < 4; ++i) {
982 hpa_t root = vcpu->mmu.pae_root[i];
983
984 if (root) {
985 root &= PT64_BASE_ADDR_MASK;
986 page = page_header(root);
987 --page->root_count;
988 }
989 vcpu->mmu.pae_root[i] = INVALID_PAGE;
990 }
991 vcpu->mmu.root_hpa = INVALID_PAGE;
992 }
993
994 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
995 {
996 int i;
997 gfn_t root_gfn;
998 struct kvm_mmu_page *page;
999
1000 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
1001
1002 #ifdef CONFIG_X86_64
1003 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1004 hpa_t root = vcpu->mmu.root_hpa;
1005
1006 ASSERT(!VALID_PAGE(root));
1007 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
1008 PT64_ROOT_LEVEL, 0, 0, NULL);
1009 root = __pa(page->spt);
1010 ++page->root_count;
1011 vcpu->mmu.root_hpa = root;
1012 return;
1013 }
1014 #endif
1015 for (i = 0; i < 4; ++i) {
1016 hpa_t root = vcpu->mmu.pae_root[i];
1017
1018 ASSERT(!VALID_PAGE(root));
1019 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1020 if (!is_present_pte(vcpu->pdptrs[i])) {
1021 vcpu->mmu.pae_root[i] = 0;
1022 continue;
1023 }
1024 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1025 } else if (vcpu->mmu.root_level == 0)
1026 root_gfn = 0;
1027 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1028 PT32_ROOT_LEVEL, !is_paging(vcpu),
1029 0, NULL);
1030 root = __pa(page->spt);
1031 ++page->root_count;
1032 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1033 }
1034 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1035 }
1036
1037 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1038 {
1039 return vaddr;
1040 }
1041
1042 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1043 u32 error_code)
1044 {
1045 struct page *page;
1046 int r;
1047
1048 r = mmu_topup_memory_caches(vcpu);
1049 if (r)
1050 return r;
1051
1052 ASSERT(vcpu);
1053 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1054
1055 page = gfn_to_page(vcpu->kvm, gva >> PAGE_SHIFT);
1056
1057 if (is_error_page(page)) {
1058 kvm_release_page_clean(page);
1059 return 1;
1060 }
1061
1062 return nonpaging_map(vcpu, gva & PAGE_MASK, page);
1063 }
1064
1065 static void nonpaging_free(struct kvm_vcpu *vcpu)
1066 {
1067 mmu_free_roots(vcpu);
1068 }
1069
1070 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1071 {
1072 struct kvm_mmu *context = &vcpu->mmu;
1073
1074 context->new_cr3 = nonpaging_new_cr3;
1075 context->page_fault = nonpaging_page_fault;
1076 context->gva_to_gpa = nonpaging_gva_to_gpa;
1077 context->free = nonpaging_free;
1078 context->prefetch_page = nonpaging_prefetch_page;
1079 context->root_level = 0;
1080 context->shadow_root_level = PT32E_ROOT_LEVEL;
1081 context->root_hpa = INVALID_PAGE;
1082 return 0;
1083 }
1084
1085 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1086 {
1087 ++vcpu->stat.tlb_flush;
1088 kvm_x86_ops->tlb_flush(vcpu);
1089 }
1090
1091 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1092 {
1093 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1094 mmu_free_roots(vcpu);
1095 }
1096
1097 static void inject_page_fault(struct kvm_vcpu *vcpu,
1098 u64 addr,
1099 u32 err_code)
1100 {
1101 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1102 }
1103
1104 static void paging_free(struct kvm_vcpu *vcpu)
1105 {
1106 nonpaging_free(vcpu);
1107 }
1108
1109 #define PTTYPE 64
1110 #include "paging_tmpl.h"
1111 #undef PTTYPE
1112
1113 #define PTTYPE 32
1114 #include "paging_tmpl.h"
1115 #undef PTTYPE
1116
1117 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1118 {
1119 struct kvm_mmu *context = &vcpu->mmu;
1120
1121 ASSERT(is_pae(vcpu));
1122 context->new_cr3 = paging_new_cr3;
1123 context->page_fault = paging64_page_fault;
1124 context->gva_to_gpa = paging64_gva_to_gpa;
1125 context->prefetch_page = paging64_prefetch_page;
1126 context->free = paging_free;
1127 context->root_level = level;
1128 context->shadow_root_level = level;
1129 context->root_hpa = INVALID_PAGE;
1130 return 0;
1131 }
1132
1133 static int paging64_init_context(struct kvm_vcpu *vcpu)
1134 {
1135 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1136 }
1137
1138 static int paging32_init_context(struct kvm_vcpu *vcpu)
1139 {
1140 struct kvm_mmu *context = &vcpu->mmu;
1141
1142 context->new_cr3 = paging_new_cr3;
1143 context->page_fault = paging32_page_fault;
1144 context->gva_to_gpa = paging32_gva_to_gpa;
1145 context->free = paging_free;
1146 context->prefetch_page = paging32_prefetch_page;
1147 context->root_level = PT32_ROOT_LEVEL;
1148 context->shadow_root_level = PT32E_ROOT_LEVEL;
1149 context->root_hpa = INVALID_PAGE;
1150 return 0;
1151 }
1152
1153 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1154 {
1155 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1156 }
1157
1158 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1159 {
1160 ASSERT(vcpu);
1161 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1162
1163 if (!is_paging(vcpu))
1164 return nonpaging_init_context(vcpu);
1165 else if (is_long_mode(vcpu))
1166 return paging64_init_context(vcpu);
1167 else if (is_pae(vcpu))
1168 return paging32E_init_context(vcpu);
1169 else
1170 return paging32_init_context(vcpu);
1171 }
1172
1173 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1174 {
1175 ASSERT(vcpu);
1176 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1177 vcpu->mmu.free(vcpu);
1178 vcpu->mmu.root_hpa = INVALID_PAGE;
1179 }
1180 }
1181
1182 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1183 {
1184 destroy_kvm_mmu(vcpu);
1185 return init_kvm_mmu(vcpu);
1186 }
1187 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1188
1189 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1190 {
1191 int r;
1192
1193 mutex_lock(&vcpu->kvm->lock);
1194 r = mmu_topup_memory_caches(vcpu);
1195 if (r)
1196 goto out;
1197 mmu_alloc_roots(vcpu);
1198 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1199 kvm_mmu_flush_tlb(vcpu);
1200 out:
1201 mutex_unlock(&vcpu->kvm->lock);
1202 return r;
1203 }
1204 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1205
1206 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1207 {
1208 mmu_free_roots(vcpu);
1209 }
1210
1211 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1212 struct kvm_mmu_page *page,
1213 u64 *spte)
1214 {
1215 u64 pte;
1216 struct kvm_mmu_page *child;
1217
1218 pte = *spte;
1219 if (is_shadow_present_pte(pte)) {
1220 if (page->role.level == PT_PAGE_TABLE_LEVEL)
1221 rmap_remove(vcpu->kvm, spte);
1222 else {
1223 child = page_header(pte & PT64_BASE_ADDR_MASK);
1224 mmu_page_remove_parent_pte(child, spte);
1225 }
1226 }
1227 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1228 }
1229
1230 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1231 struct kvm_mmu_page *page,
1232 u64 *spte,
1233 const void *new, int bytes,
1234 int offset_in_pte)
1235 {
1236 if (page->role.level != PT_PAGE_TABLE_LEVEL) {
1237 ++vcpu->kvm->stat.mmu_pde_zapped;
1238 return;
1239 }
1240
1241 ++vcpu->kvm->stat.mmu_pte_updated;
1242 if (page->role.glevels == PT32_ROOT_LEVEL)
1243 paging32_update_pte(vcpu, page, spte, new, bytes,
1244 offset_in_pte);
1245 else
1246 paging64_update_pte(vcpu, page, spte, new, bytes,
1247 offset_in_pte);
1248 }
1249
1250 static bool need_remote_flush(u64 old, u64 new)
1251 {
1252 if (!is_shadow_present_pte(old))
1253 return false;
1254 if (!is_shadow_present_pte(new))
1255 return true;
1256 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1257 return true;
1258 old ^= PT64_NX_MASK;
1259 new ^= PT64_NX_MASK;
1260 return (old & ~new & PT64_PERM_MASK) != 0;
1261 }
1262
1263 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1264 {
1265 if (need_remote_flush(old, new))
1266 kvm_flush_remote_tlbs(vcpu->kvm);
1267 else
1268 kvm_mmu_flush_tlb(vcpu);
1269 }
1270
1271 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1272 {
1273 u64 *spte = vcpu->last_pte_updated;
1274
1275 return !!(spte && (*spte & PT_ACCESSED_MASK));
1276 }
1277
1278 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1279 const u8 *new, int bytes)
1280 {
1281 gfn_t gfn = gpa >> PAGE_SHIFT;
1282 struct kvm_mmu_page *page;
1283 struct hlist_node *node, *n;
1284 struct hlist_head *bucket;
1285 unsigned index;
1286 u64 entry;
1287 u64 *spte;
1288 unsigned offset = offset_in_page(gpa);
1289 unsigned pte_size;
1290 unsigned page_offset;
1291 unsigned misaligned;
1292 unsigned quadrant;
1293 int level;
1294 int flooded = 0;
1295 int npte;
1296
1297 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1298 ++vcpu->kvm->stat.mmu_pte_write;
1299 kvm_mmu_audit(vcpu, "pre pte write");
1300 if (gfn == vcpu->last_pt_write_gfn
1301 && !last_updated_pte_accessed(vcpu)) {
1302 ++vcpu->last_pt_write_count;
1303 if (vcpu->last_pt_write_count >= 3)
1304 flooded = 1;
1305 } else {
1306 vcpu->last_pt_write_gfn = gfn;
1307 vcpu->last_pt_write_count = 1;
1308 vcpu->last_pte_updated = NULL;
1309 }
1310 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1311 bucket = &vcpu->kvm->mmu_page_hash[index];
1312 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1313 if (page->gfn != gfn || page->role.metaphysical)
1314 continue;
1315 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1316 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1317 misaligned |= bytes < 4;
1318 if (misaligned || flooded) {
1319 /*
1320 * Misaligned accesses are too much trouble to fix
1321 * up; also, they usually indicate a page is not used
1322 * as a page table.
1323 *
1324 * If we're seeing too many writes to a page,
1325 * it may no longer be a page table, or we may be
1326 * forking, in which case it is better to unmap the
1327 * page.
1328 */
1329 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1330 gpa, bytes, page->role.word);
1331 kvm_mmu_zap_page(vcpu->kvm, page);
1332 ++vcpu->kvm->stat.mmu_flooded;
1333 continue;
1334 }
1335 page_offset = offset;
1336 level = page->role.level;
1337 npte = 1;
1338 if (page->role.glevels == PT32_ROOT_LEVEL) {
1339 page_offset <<= 1; /* 32->64 */
1340 /*
1341 * A 32-bit pde maps 4MB while the shadow pdes map
1342 * only 2MB. So we need to double the offset again
1343 * and zap two pdes instead of one.
1344 */
1345 if (level == PT32_ROOT_LEVEL) {
1346 page_offset &= ~7; /* kill rounding error */
1347 page_offset <<= 1;
1348 npte = 2;
1349 }
1350 quadrant = page_offset >> PAGE_SHIFT;
1351 page_offset &= ~PAGE_MASK;
1352 if (quadrant != page->role.quadrant)
1353 continue;
1354 }
1355 spte = &page->spt[page_offset / sizeof(*spte)];
1356 while (npte--) {
1357 entry = *spte;
1358 mmu_pte_write_zap_pte(vcpu, page, spte);
1359 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1360 page_offset & (pte_size - 1));
1361 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1362 ++spte;
1363 }
1364 }
1365 kvm_mmu_audit(vcpu, "post pte write");
1366 }
1367
1368 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1369 {
1370 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1371
1372 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1373 }
1374
1375 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1376 {
1377 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1378 struct kvm_mmu_page *page;
1379
1380 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1381 struct kvm_mmu_page, link);
1382 kvm_mmu_zap_page(vcpu->kvm, page);
1383 ++vcpu->kvm->stat.mmu_recycled;
1384 }
1385 }
1386
1387 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1388 {
1389 int r;
1390 enum emulation_result er;
1391
1392 mutex_lock(&vcpu->kvm->lock);
1393 r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1394 if (r < 0)
1395 goto out;
1396
1397 if (!r) {
1398 r = 1;
1399 goto out;
1400 }
1401
1402 r = mmu_topup_memory_caches(vcpu);
1403 if (r)
1404 goto out;
1405
1406 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1407 mutex_unlock(&vcpu->kvm->lock);
1408
1409 switch (er) {
1410 case EMULATE_DONE:
1411 return 1;
1412 case EMULATE_DO_MMIO:
1413 ++vcpu->stat.mmio_exits;
1414 return 0;
1415 case EMULATE_FAIL:
1416 kvm_report_emulation_failure(vcpu, "pagetable");
1417 return 1;
1418 default:
1419 BUG();
1420 }
1421 out:
1422 mutex_unlock(&vcpu->kvm->lock);
1423 return r;
1424 }
1425 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1426
1427 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1428 {
1429 struct kvm_mmu_page *page;
1430
1431 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1432 page = container_of(vcpu->kvm->active_mmu_pages.next,
1433 struct kvm_mmu_page, link);
1434 kvm_mmu_zap_page(vcpu->kvm, page);
1435 }
1436 free_page((unsigned long)vcpu->mmu.pae_root);
1437 }
1438
1439 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1440 {
1441 struct page *page;
1442 int i;
1443
1444 ASSERT(vcpu);
1445
1446 if (vcpu->kvm->n_requested_mmu_pages)
1447 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1448 else
1449 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1450 /*
1451 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1452 * Therefore we need to allocate shadow page tables in the first
1453 * 4GB of memory, which happens to fit the DMA32 zone.
1454 */
1455 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1456 if (!page)
1457 goto error_1;
1458 vcpu->mmu.pae_root = page_address(page);
1459 for (i = 0; i < 4; ++i)
1460 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1461
1462 return 0;
1463
1464 error_1:
1465 free_mmu_pages(vcpu);
1466 return -ENOMEM;
1467 }
1468
1469 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1470 {
1471 ASSERT(vcpu);
1472 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1473
1474 return alloc_mmu_pages(vcpu);
1475 }
1476
1477 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1478 {
1479 ASSERT(vcpu);
1480 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1481
1482 return init_kvm_mmu(vcpu);
1483 }
1484
1485 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1486 {
1487 ASSERT(vcpu);
1488
1489 destroy_kvm_mmu(vcpu);
1490 free_mmu_pages(vcpu);
1491 mmu_free_memory_caches(vcpu);
1492 }
1493
1494 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1495 {
1496 struct kvm_mmu_page *page;
1497
1498 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1499 int i;
1500 u64 *pt;
1501
1502 if (!test_bit(slot, &page->slot_bitmap))
1503 continue;
1504
1505 pt = page->spt;
1506 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1507 /* avoid RMW */
1508 if (pt[i] & PT_WRITABLE_MASK)
1509 pt[i] &= ~PT_WRITABLE_MASK;
1510 }
1511 }
1512
1513 void kvm_mmu_zap_all(struct kvm *kvm)
1514 {
1515 struct kvm_mmu_page *page, *node;
1516
1517 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1518 kvm_mmu_zap_page(kvm, page);
1519
1520 kvm_flush_remote_tlbs(kvm);
1521 }
1522
1523 void kvm_mmu_module_exit(void)
1524 {
1525 if (pte_chain_cache)
1526 kmem_cache_destroy(pte_chain_cache);
1527 if (rmap_desc_cache)
1528 kmem_cache_destroy(rmap_desc_cache);
1529 if (mmu_page_header_cache)
1530 kmem_cache_destroy(mmu_page_header_cache);
1531 }
1532
1533 int kvm_mmu_module_init(void)
1534 {
1535 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1536 sizeof(struct kvm_pte_chain),
1537 0, 0, NULL);
1538 if (!pte_chain_cache)
1539 goto nomem;
1540 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1541 sizeof(struct kvm_rmap_desc),
1542 0, 0, NULL);
1543 if (!rmap_desc_cache)
1544 goto nomem;
1545
1546 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1547 sizeof(struct kvm_mmu_page),
1548 0, 0, NULL);
1549 if (!mmu_page_header_cache)
1550 goto nomem;
1551
1552 return 0;
1553
1554 nomem:
1555 kvm_mmu_module_exit();
1556 return -ENOMEM;
1557 }
1558
1559 /*
1560 * Caculate mmu pages needed for kvm.
1561 */
1562 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1563 {
1564 int i;
1565 unsigned int nr_mmu_pages;
1566 unsigned int nr_pages = 0;
1567
1568 for (i = 0; i < kvm->nmemslots; i++)
1569 nr_pages += kvm->memslots[i].npages;
1570
1571 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1572 nr_mmu_pages = max(nr_mmu_pages,
1573 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1574
1575 return nr_mmu_pages;
1576 }
1577
1578 #ifdef AUDIT
1579
1580 static const char *audit_msg;
1581
1582 static gva_t canonicalize(gva_t gva)
1583 {
1584 #ifdef CONFIG_X86_64
1585 gva = (long long)(gva << 16) >> 16;
1586 #endif
1587 return gva;
1588 }
1589
1590 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1591 gva_t va, int level)
1592 {
1593 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1594 int i;
1595 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1596
1597 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1598 u64 ent = pt[i];
1599
1600 if (ent == shadow_trap_nonpresent_pte)
1601 continue;
1602
1603 va = canonicalize(va);
1604 if (level > 1) {
1605 if (ent == shadow_notrap_nonpresent_pte)
1606 printk(KERN_ERR "audit: (%s) nontrapping pte"
1607 " in nonleaf level: levels %d gva %lx"
1608 " level %d pte %llx\n", audit_msg,
1609 vcpu->mmu.root_level, va, level, ent);
1610
1611 audit_mappings_page(vcpu, ent, va, level - 1);
1612 } else {
1613 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1614 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1615 struct page *page;
1616
1617 if (is_shadow_present_pte(ent)
1618 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1619 printk(KERN_ERR "xx audit error: (%s) levels %d"
1620 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1621 audit_msg, vcpu->mmu.root_level,
1622 va, gpa, hpa, ent,
1623 is_shadow_present_pte(ent));
1624 else if (ent == shadow_notrap_nonpresent_pte
1625 && !is_error_hpa(hpa))
1626 printk(KERN_ERR "audit: (%s) notrap shadow,"
1627 " valid guest gva %lx\n", audit_msg, va);
1628 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1629 >> PAGE_SHIFT);
1630 kvm_release_page_clean(page);
1631
1632 }
1633 }
1634 }
1635
1636 static void audit_mappings(struct kvm_vcpu *vcpu)
1637 {
1638 unsigned i;
1639
1640 if (vcpu->mmu.root_level == 4)
1641 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1642 else
1643 for (i = 0; i < 4; ++i)
1644 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1645 audit_mappings_page(vcpu,
1646 vcpu->mmu.pae_root[i],
1647 i << 30,
1648 2);
1649 }
1650
1651 static int count_rmaps(struct kvm_vcpu *vcpu)
1652 {
1653 int nmaps = 0;
1654 int i, j, k;
1655
1656 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1657 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1658 struct kvm_rmap_desc *d;
1659
1660 for (j = 0; j < m->npages; ++j) {
1661 unsigned long *rmapp = &m->rmap[j];
1662
1663 if (!*rmapp)
1664 continue;
1665 if (!(*rmapp & 1)) {
1666 ++nmaps;
1667 continue;
1668 }
1669 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1670 while (d) {
1671 for (k = 0; k < RMAP_EXT; ++k)
1672 if (d->shadow_ptes[k])
1673 ++nmaps;
1674 else
1675 break;
1676 d = d->more;
1677 }
1678 }
1679 }
1680 return nmaps;
1681 }
1682
1683 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1684 {
1685 int nmaps = 0;
1686 struct kvm_mmu_page *page;
1687 int i;
1688
1689 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1690 u64 *pt = page->spt;
1691
1692 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1693 continue;
1694
1695 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1696 u64 ent = pt[i];
1697
1698 if (!(ent & PT_PRESENT_MASK))
1699 continue;
1700 if (!(ent & PT_WRITABLE_MASK))
1701 continue;
1702 ++nmaps;
1703 }
1704 }
1705 return nmaps;
1706 }
1707
1708 static void audit_rmap(struct kvm_vcpu *vcpu)
1709 {
1710 int n_rmap = count_rmaps(vcpu);
1711 int n_actual = count_writable_mappings(vcpu);
1712
1713 if (n_rmap != n_actual)
1714 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1715 __FUNCTION__, audit_msg, n_rmap, n_actual);
1716 }
1717
1718 static void audit_write_protection(struct kvm_vcpu *vcpu)
1719 {
1720 struct kvm_mmu_page *page;
1721 struct kvm_memory_slot *slot;
1722 unsigned long *rmapp;
1723 gfn_t gfn;
1724
1725 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1726 if (page->role.metaphysical)
1727 continue;
1728
1729 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1730 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1731 rmapp = &slot->rmap[gfn - slot->base_gfn];
1732 if (*rmapp)
1733 printk(KERN_ERR "%s: (%s) shadow page has writable"
1734 " mappings: gfn %lx role %x\n",
1735 __FUNCTION__, audit_msg, page->gfn,
1736 page->role.word);
1737 }
1738 }
1739
1740 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1741 {
1742 int olddbg = dbg;
1743
1744 dbg = 0;
1745 audit_msg = msg;
1746 audit_rmap(vcpu);
1747 audit_write_protection(vcpu);
1748 audit_mappings(vcpu);
1749 dbg = olddbg;
1750 }
1751
1752 #endif
This page took 0.087158 seconds and 6 git commands to generate.