mm: migration: take a reference to the anon_vma before migrating
[deliverable/linux.git] / mm / rmap.c
index 0feeef860a8f5b5d61081234f40e8e149e2696f5..f522cb008646df170ef0f235a121467409a7c9a0 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -250,7 +250,8 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
        list_del(&anon_vma_chain->same_anon_vma);
 
        /* We must garbage collect the anon_vma if it's empty */
-       empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma);
+       empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma) &&
+                                       !migrate_refcount(anon_vma);
        spin_unlock(&anon_vma->lock);
 
        if (empty)
@@ -275,6 +276,7 @@ static void anon_vma_ctor(void *data)
 
        spin_lock_init(&anon_vma->lock);
        ksm_refcount_init(anon_vma);
+       migrate_refcount_init(anon_vma);
        INIT_LIST_HEAD(&anon_vma->head);
 }
 
@@ -1355,10 +1357,8 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
        /*
         * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
         * because that depends on page_mapped(); but not all its usages
-        * are holding mmap_sem, which also gave the necessary guarantee
-        * (that this anon_vma's slab has not already been destroyed).
-        * This needs to be reviewed later: avoiding page_lock_anon_vma()
-        * is risky, and currently limits the usefulness of rmap_walk().
+        * are holding mmap_sem. Users without mmap_sem are required to
+        * take a reference count to prevent the anon_vma disappearing
         */
        anon_vma = page_anon_vma(page);
        if (!anon_vma)
This page took 0.03474 seconds and 5 git commands to generate.