KVM: Add statistic for remote tlb flushes
[deliverable/linux.git] / drivers / kvm / mmu.c
index ace3cb86214b2979aaad93b580f6b20088c24154..101cd5377a89716377021baebe68733b14e5ca85 100644 (file)
@@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
        struct kvm_mmu_page *page;
+       struct page *release_page;
        unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
        page = page_header(__pa(spte));
-       kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
-                        PAGE_SHIFT));
+       release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
+       if (is_writeble_pte(*spte))
+               kvm_release_page_dirty(release_page);
+       else
+               kvm_release_page_clean(release_page);
        rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
        if (!*rmapp) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -755,6 +759,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
 {
        u64 *parent_pte;
 
+       ++kvm->stat.mmu_shadow_zapped;
        while (page->multimapped || page->parent_pte) {
                if (!page->multimapped)
                        parent_pte = page->parent_pte;
@@ -892,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
 {
        int level = PT32E_ROOT_LEVEL;
        hpa_t table_addr = vcpu->mmu.root_hpa;
+       struct page *page;
 
+       page = pfn_to_page(p >> PAGE_SHIFT);
        for (; ; level--) {
                u32 index = PT64_INDEX(v, level);
                u64 *table;
@@ -906,8 +913,10 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
 
                        pte = table[index];
                        was_rmapped = is_rmap_pte(pte);
-                       if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
+                       if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
+                               kvm_release_page_clean(page);
                                return 0;
+                       }
                        mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
                        page_header_update_slot(vcpu->kvm, table, v);
                        table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
@@ -915,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                        if (!was_rmapped)
                                rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
                        else
-                               kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+                               kvm_release_page_clean(page);
+
                        return 0;
                }
 
@@ -930,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                                     1, 3, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
-                               kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
+                               kvm_release_page_clean(page);
                                return -ENOMEM;
                        }
 
@@ -1046,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
        paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
 
        if (is_error_hpa(paddr)) {
-               kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
-                                >> PAGE_SHIFT));
+               kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
+                                      >> PAGE_SHIFT));
                return 1;
        }
 
@@ -1226,9 +1236,12 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                                  const void *new, int bytes,
                                  int offset_in_pte)
 {
-       if (page->role.level != PT_PAGE_TABLE_LEVEL)
+       if (page->role.level != PT_PAGE_TABLE_LEVEL) {
+               ++vcpu->kvm->stat.mmu_pde_zapped;
                return;
+       }
 
+       ++vcpu->kvm->stat.mmu_pte_updated;
        if (page->role.glevels == PT32_ROOT_LEVEL)
                paging32_update_pte(vcpu, page, spte, new, bytes,
                                    offset_in_pte);
@@ -1263,6 +1276,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        int npte;
 
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+       ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, "pre pte write");
        if (gfn == vcpu->last_pt_write_gfn
            && !last_updated_pte_accessed(vcpu)) {
@@ -1296,6 +1310,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
                                 gpa, bytes, page->role.word);
                        kvm_mmu_zap_page(vcpu->kvm, page);
+                       ++vcpu->kvm->stat.mmu_flooded;
                        continue;
                }
                page_offset = offset;
@@ -1344,6 +1359,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
                page = container_of(vcpu->kvm->active_mmu_pages.prev,
                                    struct kvm_mmu_page, link);
                kvm_mmu_zap_page(vcpu->kvm, page);
+               ++vcpu->kvm->stat.mmu_recycled;
        }
 }
 
@@ -1362,6 +1378,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
                goto out;
        }
 
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               goto out;
+
        er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
        mutex_unlock(&vcpu->kvm->lock);
 
@@ -1515,6 +1535,25 @@ nomem:
        return -ENOMEM;
 }
 
+/*
+ * Caculate mmu pages needed for kvm.
+ */
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+{
+       int i;
+       unsigned int nr_mmu_pages;
+       unsigned int  nr_pages = 0;
+
+       for (i = 0; i < kvm->nmemslots; i++)
+               nr_pages += kvm->memslots[i].npages;
+
+       nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
+       nr_mmu_pages = max(nr_mmu_pages,
+                       (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+
+       return nr_mmu_pages;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1567,7 +1606,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                       " valid guest gva %lx\n", audit_msg, va);
                        page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
                                           >> PAGE_SHIFT);
-                       kvm_release_page(page);
+                       kvm_release_page_clean(page);
 
                }
        }
This page took 0.030554 seconds and 5 git commands to generate.