From 5befdc385ddb2d5ae8995ad89004529a3acf58fc Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 18 Feb 2014 17:22:47 +0900 Subject: [PATCH] KVM: Simplify kvm->tlbs_dirty handling When this was introduced, kvm_flush_remote_tlbs() could be called without holding mmu_lock. It is now acknowledged that the function must be called before releasing mmu_lock, and all callers have already been changed to do so. There is no need to use smp_mb() and cmpxchg() any more. Signed-off-by: Takuya Yoshikawa Signed-off-by: Paolo Bonzini --- arch/x86/kvm/paging_tmpl.h | 7 ++++--- include/linux/kvm_host.h | 4 +++- virt/kvm/kvm_main.c | 5 +---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index cba218a2f08d..b1e6c1bf68d3 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -913,7 +913,8 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't * used by guest then tlbs are not flushed, so guest is allowed to access the * freed pages. - * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. + * We set tlbs_dirty to let the notifier know this change and delay the flush + * until such a case actually happens. */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { @@ -942,7 +943,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return -EINVAL; if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { - vcpu->kvm->tlbs_dirty++; + vcpu->kvm->tlbs_dirty = true; continue; } @@ -957,7 +958,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) if (gfn != sp->gfns[i]) { drop_spte(vcpu->kvm, &sp->spt[i]); - vcpu->kvm->tlbs_dirty++; + vcpu->kvm->tlbs_dirty = true; continue; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f5937b8188b4..9816b68b085f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -401,7 +401,9 @@ struct kvm { unsigned long mmu_notifier_seq; long mmu_notifier_count; #endif - long tlbs_dirty; + /* Protected by mmu_lock */ + bool tlbs_dirty; + struct list_head devices; }; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a9e999a48e43..f5668a431d54 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -186,12 +186,9 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) void kvm_flush_remote_tlbs(struct kvm *kvm) { - long dirty_count = kvm->tlbs_dirty; - - smp_mb(); if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; - cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); + kvm->tlbs_dirty = false; } EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); -- 2.34.1