KVM: Add statistic for remote tlb flushes
[deliverable/linux.git] / drivers / kvm / mmu.c
index bbf5eb427dc6c88390c9170bdc544f3ffa7a3713..101cd5377a89716377021baebe68733b14e5ca85 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "vmx.h"
 #include "kvm.h"
+#include "x86.h"
 
 #include <linux/types.h>
 #include <linux/string.h>
@@ -419,12 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
        struct kvm_mmu_page *page;
+       struct page *release_page;
        unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
        page = page_header(__pa(spte));
+       release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+       if (is_writeble_pte(*spte))
+               kvm_release_page_dirty(release_page);
+       else
+               kvm_release_page_clean(release_page);
        rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
        if (!*rmapp) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -752,6 +759,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
 {
        u64 *parent_pte;
 
+       ++kvm->stat.mmu_shadow_zapped;
        while (page->multimapped || page->parent_pte) {
                if (!page->multimapped)
                        parent_pte = page->parent_pte;
@@ -850,23 +858,17 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
        __set_bit(slot, &page_head->slot_bitmap);
 }
 
-hpa_t safe_gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
-{
-       hpa_t hpa = gpa_to_hpa(kvm, gpa);
-
-       return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
-}
-
 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
 {
        struct page *page;
+       hpa_t hpa;
 
        ASSERT((gpa & HPA_ERR_MASK) == 0);
        page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
-       if (!page)
-               return gpa | HPA_ERR_MASK;
-       return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
-               | (gpa & (PAGE_SIZE-1));
+       hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
+       if (is_error_page(page))
+               return hpa | HPA_ERR_MASK;
+       return hpa;
 }
 
 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
@@ -895,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
 {
        int level = PT32E_ROOT_LEVEL;
        hpa_t table_addr = vcpu->mmu.root_hpa;
+       struct page *page;
 
+       page = pfn_to_page(p >> PAGE_SHIFT);
        for (; ; level--) {
                u32 index = PT64_INDEX(v, level);
                u64 *table;
@@ -909,14 +913,19 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
 
                        pte = table[index];
                        was_rmapped = is_rmap_pte(pte);
-                       if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
+                       if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
+                               kvm_release_page_clean(page);
                                return 0;
+                       }
                        mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
                        page_header_update_slot(vcpu->kvm, table, v);
                        table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
                                                                PT_USER_MASK;
                        if (!was_rmapped)
                                rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
+                       else
+                               kvm_release_page_clean(page);
+
                        return 0;
                }
 
@@ -931,6 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                                     1, 3, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
+                               kvm_release_page_clean(page);
                                return -ENOMEM;
                        }
 
@@ -1045,8 +1055,11 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
 
        paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
 
-       if (is_error_hpa(paddr))
+       if (is_error_hpa(paddr)) {
+               kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+                                      >> PAGE_SHIFT));
                return 1;
+       }
 
        return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
 }
@@ -1223,9 +1236,12 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                                  const void *new, int bytes,
                                  int offset_in_pte)
 {
-       if (page->role.level != PT_PAGE_TABLE_LEVEL)
+       if (page->role.level != PT_PAGE_TABLE_LEVEL) {
+               ++vcpu->kvm->stat.mmu_pde_zapped;
                return;
+       }
 
+       ++vcpu->kvm->stat.mmu_pte_updated;
        if (page->role.glevels == PT32_ROOT_LEVEL)
                paging32_update_pte(vcpu, page, spte, new, bytes,
                                    offset_in_pte);
@@ -1260,6 +1276,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        int npte;
 
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+       ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, "pre pte write");
        if (gfn == vcpu->last_pt_write_gfn
            && !last_updated_pte_accessed(vcpu)) {
@@ -1293,6 +1310,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
                                 gpa, bytes, page->role.word);
                        kvm_mmu_zap_page(vcpu->kvm, page);
+                       ++vcpu->kvm->stat.mmu_flooded;
                        continue;
                }
                page_offset = offset;
@@ -1341,9 +1359,50 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
                page = container_of(vcpu->kvm->active_mmu_pages.prev,
                                    struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu->kvm, page);
+               ++vcpu->kvm->stat.mmu_recycled;
        }
 }
 
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
+{
+       int r;
+       enum emulation_result er;
+
+       mutex_lock(&vcpu->kvm->lock);
+       r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
+       if (r < 0)
+               goto out;
+
+       if (!r) {
+               r = 1;
+               goto out;
+       }
+
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               goto out;
+
+       er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
+       mutex_unlock(&vcpu->kvm->lock);
+
+       switch (er) {
+       case EMULATE_DONE:
+               return 1;
+       case EMULATE_DO_MMIO:
+               ++vcpu->stat.mmio_exits;
+               return 0;
+       case EMULATE_FAIL:
+               kvm_report_emulation_failure(vcpu, "pagetable");
+               return 1;
+       default:
+               BUG();
+       }
+out:
+       mutex_unlock(&vcpu->kvm->lock);
+       return r;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
+
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu_page *page;
@@ -1476,6 +1535,25 @@ nomem:
        return -ENOMEM;
 }
 
+/*
+ * Caculate mmu pages needed for kvm.
+ */
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+{
+       int i;
+       unsigned int nr_mmu_pages;
+       unsigned int  nr_pages = 0;
+
+       for (i = 0; i < kvm->nmemslots; i++)
+               nr_pages += kvm->memslots[i].npages;
+
+       nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
+       nr_mmu_pages = max(nr_mmu_pages,
+                       (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+
+       return nr_mmu_pages;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1513,6 +1591,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                } else {
                        gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
                        hpa_t hpa = gpa_to_hpa(vcpu, gpa);
+                       struct page *page;
 
                        if (is_shadow_present_pte(ent)
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
@@ -1525,6 +1604,9 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                 && !is_error_hpa(hpa))
                                printk(KERN_ERR "audit: (%s) notrap shadow,"
                                       " valid guest gva %lx\n", audit_msg, va);
+                       page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
+                                          >> PAGE_SHIFT);
+                       kvm_release_page_clean(page);
 
                }
        }
This page took 0.029749 seconds and 5 git commands to generate.