Merge commit 'ftrace/function-graph' into next
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
index 58c35dead321e74987161bb37b5f91f2460ddd30..2d4477c7147372c28ed84f5a513ca553dc46979c 100644 (file)
@@ -793,9 +793,11 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
        sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+       INIT_LIST_HEAD(&sp->oos_link);
        ASSERT(is_empty_shadow_page(sp->spt));
        bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
        sp->multimapped = 0;
+       sp->global = 1;
        sp->parent_pte = parent_pte;
        --vcpu->kvm->arch.n_free_mmu_pages;
        return sp;
@@ -1005,7 +1007,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
        for_each_unsync_children(sp->unsync_child_bitmap, i) {
                u64 ent = sp->spt[i];
 
-               if (is_shadow_present_pte(ent)) {
+               if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
                        struct kvm_mmu_page *child;
                        child = page_header(ent & PT64_BASE_ADDR_MASK);
 
@@ -1066,10 +1068,18 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
        return NULL;
 }
 
+static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       list_del(&sp->oos_link);
+       --kvm->stat.mmu_unsync_global;
+}
+
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        WARN_ON(!sp->unsync);
        sp->unsync = 0;
+       if (sp->global)
+               kvm_unlink_unsync_global(kvm, sp);
        --kvm->stat.mmu_unsync;
 }
 
@@ -1259,6 +1269,8 @@ static int walk_shadow(struct kvm_shadow_walk *walker,
        if (level == PT32E_ROOT_LEVEL) {
                shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
                shadow_addr &= PT64_BASE_ADDR_MASK;
+               if (!shadow_addr)
+                       return 1;
                --level;
        }
 
@@ -1615,9 +1627,15 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                if (s->role.word != sp->role.word)
                        return 1;
        }
-       kvm_mmu_mark_parents_unsync(vcpu, sp);
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
+
+       if (sp->global) {
+               list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
+               ++vcpu->kvm->stat.mmu_unsync_global;
+       } else
+               kvm_mmu_mark_parents_unsync(vcpu, sp);
+
        mmu_convert_notrap(sp);
        return 0;
 }
@@ -1643,12 +1661,23 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int largepage,
-                   gfn_t gfn, pfn_t pfn, bool speculative,
+                   int global, gfn_t gfn, pfn_t pfn, bool speculative,
                    bool can_unsync)
 {
        u64 spte;
        int ret = 0;
        u64 mt_mask = shadow_mt_mask;
+       struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
+
+       if (!(vcpu->arch.cr4 & X86_CR4_PGE))
+               global = 0;
+       if (!global && sp->global) {
+               sp->global = 0;
+               if (sp->unsync) {
+                       kvm_unlink_unsync_global(vcpu->kvm, sp);
+                       kvm_mmu_mark_parents_unsync(vcpu, sp);
+               }
+       }
 
        /*
         * We don't set the accessed bit, since we sometimes want to see
@@ -1669,8 +1698,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
        if (largepage)
                spte |= PT_PAGE_SIZE_MASK;
        if (mt_mask) {
-               mt_mask = get_memory_type(vcpu, gfn) <<
-                         kvm_x86_ops->get_mt_mask_shift();
+               if (!kvm_is_mmio_pfn(pfn)) {
+                       mt_mask = get_memory_type(vcpu, gfn) <<
+                               kvm_x86_ops->get_mt_mask_shift();
+                       mt_mask |= VMX_EPT_IGMT_BIT;
+               } else
+                       mt_mask = MTRR_TYPE_UNCACHABLE <<
+                               kvm_x86_ops->get_mt_mask_shift();
                spte |= mt_mask;
        }
 
@@ -1717,8 +1751,8 @@ set_pte:
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, int largepage, gfn_t gfn,
-                        pfn_t pfn, bool speculative)
+                        int *ptwrite, int largepage, int global,
+                        gfn_t gfn, pfn_t pfn, bool speculative)
 {
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1751,7 +1785,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                }
        }
        if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-                     dirty, largepage, gfn, pfn, speculative, true)) {
+                     dirty, largepage, global, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
@@ -1808,7 +1842,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk,
            || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
                mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
                             0, walk->write, 1, &walk->pt_write,
-                            walk->largepage, gfn, walk->pfn, false);
+                            walk->largepage, 0, gfn, walk->pfn, false);
                ++vcpu->stat.pf_fixed;
                return 1;
        }
@@ -1995,6 +2029,15 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        }
 }
 
+static void mmu_sync_global(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_mmu_page *sp, *n;
+
+       list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
+               kvm_sync_page(vcpu, sp);
+}
+
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 {
        spin_lock(&vcpu->kvm->mmu_lock);
@@ -2002,6 +2045,13 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
+void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
+{
+       spin_lock(&vcpu->kvm->mmu_lock);
+       mmu_sync_global(vcpu);
+       spin_unlock(&vcpu->kvm->mmu_lock);
+}
+
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        return vaddr;
@@ -2400,7 +2450,8 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes)
+                      const u8 *new, int bytes,
+                      bool guest_initiated)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *sp;
@@ -2426,15 +2477,17 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, "pre pte write");
-       if (gfn == vcpu->arch.last_pt_write_gfn
-           && !last_updated_pte_accessed(vcpu)) {
-               ++vcpu->arch.last_pt_write_count;
-               if (vcpu->arch.last_pt_write_count >= 3)
-                       flooded = 1;
-       } else {
-               vcpu->arch.last_pt_write_gfn = gfn;
-               vcpu->arch.last_pt_write_count = 1;
-               vcpu->arch.last_pte_updated = NULL;
+       if (guest_initiated) {
+               if (gfn == vcpu->arch.last_pt_write_gfn
+                   && !last_updated_pte_accessed(vcpu)) {
+                       ++vcpu->arch.last_pt_write_count;
+                       if (vcpu->arch.last_pt_write_count >= 3)
+                               flooded = 1;
+               } else {
+                       vcpu->arch.last_pt_write_gfn = gfn;
+                       vcpu->arch.last_pt_write_count = 1;
+                       vcpu->arch.last_pte_updated = NULL;
+               }
        }
        index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
@@ -2574,9 +2627,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       spin_lock(&vcpu->kvm->mmu_lock);
        vcpu->arch.mmu.invlpg(vcpu, gva);
-       spin_unlock(&vcpu->kvm->mmu_lock);
        kvm_mmu_flush_tlb(vcpu);
        ++vcpu->stat.invlpg;
 }
This page took 0.027434 seconds and 5 git commands to generate.