Merge tag 'efi-for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming...
[deliverable/linux.git] / mm / mmu_notifier.c
index 35ff447d8d1426355d8ea06feec092e63ff559e9..479a1e751a73fb4781e1b856db60b2381909c9e6 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/slab.h>
 
 /* global SRCU for all MMs */
-struct srcu_struct srcu;
+static struct srcu_struct srcu;
 
 /*
  * This function can't run concurrently against mmu_notifier_register
@@ -41,7 +41,7 @@ void __mmu_notifier_release(struct mm_struct *mm)
        int id;
 
        /*
-        * RCU here will block mmu_notifier_unregister until
+        * SRCU here will block mmu_notifier_unregister until
         * ->release returns.
         */
        id = srcu_read_lock(&srcu);
@@ -137,12 +137,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
        hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
                if (mn->ops->change_pte)
                        mn->ops->change_pte(mn, mm, address, pte);
-               /*
-                * Some drivers don't have change_pte,
-                * so we must call invalidate_page in that case.
-                */
-               else if (mn->ops->invalidate_page)
-                       mn->ops->invalidate_page(mn, mm, address);
        }
        srcu_read_unlock(&srcu, id);
 }
@@ -207,22 +201,23 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
        */
        BUG_ON(!srcu.per_cpu_ref);
 
-       ret = -ENOMEM;
-       mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
-       if (unlikely(!mmu_notifier_mm))
-               goto out;
-
        if (take_mmap_sem)
                down_write(&mm->mmap_sem);
        ret = mm_take_all_locks(mm);
        if (unlikely(ret))
-               goto out_cleanup;
+               goto out;
 
        if (!mm_has_notifiers(mm)) {
+               mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
+                                       GFP_KERNEL);
+               if (unlikely(!mmu_notifier_mm)) {
+                       ret = -ENOMEM;
+                       goto out_of_mem;
+               }
                INIT_HLIST_HEAD(&mmu_notifier_mm->list);
                spin_lock_init(&mmu_notifier_mm->lock);
+
                mm->mmu_notifier_mm = mmu_notifier_mm;
-               mmu_notifier_mm = NULL;
        }
        atomic_inc(&mm->mm_count);
 
@@ -238,13 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
        hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
        spin_unlock(&mm->mmu_notifier_mm->lock);
 
+out_of_mem:
        mm_drop_all_locks(mm);
-out_cleanup:
+out:
        if (take_mmap_sem)
                up_write(&mm->mmap_sem);
-       /* kfree() does nothing if mmu_notifier_mm is NULL */
-       kfree(mmu_notifier_mm);
-out:
+
        BUG_ON(atomic_read(&mm->mm_users) <= 0);
        return ret;
 }
@@ -302,7 +296,7 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
 
        if (!hlist_unhashed(&mn->hlist)) {
                /*
-                * RCU here will force exit_mmap to wait ->release to finish
+                * SRCU here will force exit_mmap to wait ->release to finish
                 * before freeing the pages.
                 */
                int id;
This page took 0.027589 seconds and 5 git commands to generate.