KVM: MMU: do not record gfn in kvm_mmu_pte_write
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
index f02b8edc3d449c41550a6f1899abb8a576cb0ddf..91a194667432f34d76075cff241698c12563bd7b 100644 (file)
@@ -3228,7 +3228,6 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                kvm_release_pfn_clean(pfn);
                return;
        }
-       vcpu->arch.update_pte.gfn = gfn;
        vcpu->arch.update_pte.pfn = pfn;
 }
 
@@ -3275,9 +3274,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
        /*
         * Assume that the pte write on a page table of the same type
-        * as the current vcpu paging mode.  This is nearly always true
-        * (might be false while changing modes).  Note it is verified later
-        * by update_pte().
+        * as the current vcpu paging mode since we update the sptes only
+        * when they have the same mode.
         */
        if ((is_pae(vcpu) && bytes == 4) || !new) {
                /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
@@ -3307,11 +3305,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        spin_lock(&vcpu->kvm->mmu_lock);
        if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
                gentry = 0;
-       kvm_mmu_access_page(vcpu, gfn);
        kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
        trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
        if (guest_initiated) {
+               kvm_mmu_access_page(vcpu, gfn);
                if (gfn == vcpu->arch.last_pt_write_gfn
                    && !last_updated_pte_accessed(vcpu)) {
                        ++vcpu->arch.last_pt_write_count;
@@ -3538,14 +3536,23 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                if (!test_bit(slot, sp->slot_bitmap))
                        continue;
 
-               if (sp->role.level != PT_PAGE_TABLE_LEVEL)
-                       continue;
-
                pt = sp->spt;
-               for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+               for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+                       if (!is_shadow_present_pte(pt[i]) ||
+                             !is_last_spte(pt[i], sp->role.level))
+                               continue;
+
+                       if (is_large_pte(pt[i])) {
+                               drop_spte(kvm, &pt[i],
+                                         shadow_trap_nonpresent_pte);
+                               --kvm->stat.lpages;
+                               continue;
+                       }
+
                        /* avoid RMW */
                        if (is_writable_pte(pt[i]))
                                update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
+               }
        }
        kvm_flush_remote_tlbs(kvm);
 }
@@ -3583,7 +3590,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
        if (nr_to_scan == 0)
                goto out;
 
-       spin_lock(&kvm_lock);
+       raw_spin_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
                int idx, freed_pages;
@@ -3606,7 +3613,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
        if (kvm_freed)
                list_move_tail(&kvm_freed->vm_list, &vm_list);
 
-       spin_unlock(&kvm_lock);
+       raw_spin_unlock(&kvm_lock);
 
 out:
        return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
This page took 0.067828 seconds and 5 git commands to generate.