[PATCH] page migration: handle freeing of pages in migrate_pages()
[deliverable/linux.git] / mm / migrate.c
index 81721a061d50a4711e40a3663bd1a43529d85e45..d3a1810a4c9fe01bd32e46c763ccaea86fdbfcef 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/topology.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
+#include <linux/writeback.h>
 
 #include "internal.h"
 
@@ -83,7 +84,6 @@ int migrate_prep(void)
 
 static inline void move_to_lru(struct page *page)
 {
-       list_del(&page->lru);
        if (PageActive(page)) {
                /*
                 * lru_cache_add_active checks that
@@ -109,6 +109,7 @@ int putback_lru_pages(struct list_head *l)
        int count = 0;
 
        list_for_each_entry_safe(page, page2, l, lru) {
+               list_del(&page->lru);
                move_to_lru(page);
                count++;
        }
@@ -123,7 +124,7 @@ static inline int is_swap_pte(pte_t pte)
 /*
  * Restore a potential migration pte to a working pte entry
  */
-static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr,
+static void remove_migration_pte(struct vm_area_struct *vma,
                struct page *old, struct page *new)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -133,6 +134,10 @@ static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr,
        pmd_t *pmd;
        pte_t *ptep, pte;
        spinlock_t *ptl;
+       unsigned long addr = page_address_in_vma(new, vma);
+
+       if (addr == -EFAULT)
+               return;
 
        pgd = pgd_offset(mm, addr);
        if (!pgd_present(*pgd))
@@ -164,25 +169,52 @@ static void remove_migration_pte(struct vm_area_struct *vma, unsigned long addr,
        if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
                goto out;
 
-       inc_mm_counter(mm, anon_rss);
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
        if (is_write_migration_entry(entry))
                pte = pte_mkwrite(pte);
        set_pte_at(mm, addr, ptep, pte);
-       page_add_anon_rmap(new, vma, addr);
+
+       if (PageAnon(new))
+               page_add_anon_rmap(new, vma, addr);
+       else
+               page_add_file_rmap(new);
+
+       /* No need to invalidate - it was non-present before */
+       update_mmu_cache(vma, addr, pte);
+       lazy_mmu_prot_update(pte);
+
 out:
        pte_unmap_unlock(ptep, ptl);
 }
 
 /*
- * Get rid of all migration entries and replace them by
- * references to the indicated page.
- *
+ * Note that remove_file_migration_ptes will only work on regular mappings,
+ * Nonlinear mappings do not use migration entries.
+ */
+static void remove_file_migration_ptes(struct page *old, struct page *new)
+{
+       struct vm_area_struct *vma;
+       struct address_space *mapping = page_mapping(new);
+       struct prio_tree_iter iter;
+       pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+       if (!mapping)
+               return;
+
+       spin_lock(&mapping->i_mmap_lock);
+
+       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
+               remove_migration_pte(vma, old, new);
+
+       spin_unlock(&mapping->i_mmap_lock);
+}
+
+/*
  * Must hold mmap_sem lock on at least one of the vmas containing
  * the page so that the anon_vma cannot vanish.
  */
-static void remove_migration_ptes(struct page *old, struct page *new)
+static void remove_anon_migration_ptes(struct page *old, struct page *new)
 {
        struct anon_vma *anon_vma;
        struct vm_area_struct *vma;
@@ -200,12 +232,23 @@ static void remove_migration_ptes(struct page *old, struct page *new)
        spin_lock(&anon_vma->lock);
 
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
-               remove_migration_pte(vma, page_address_in_vma(new, vma),
-                                       old, new);
+               remove_migration_pte(vma, old, new);
 
        spin_unlock(&anon_vma->lock);
 }
 
+/*
+ * Get rid of all migration entries and replace them by
+ * references to the indicated page.
+ */
+static void remove_migration_ptes(struct page *old, struct page *new)
+{
+       if (PageAnon(new))
+               remove_anon_migration_ptes(old, new);
+       else
+               remove_file_migration_ptes(old, new);
+}
+
 /*
  * Something used the pte of a page under migration. We need to
  * get to the page and wait until migration is finished.
@@ -254,14 +297,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
 {
        struct page **radix_pointer;
 
+       if (!mapping) {
+               /* Anonymous page */
+               if (page_count(page) != 1)
+                       return -EAGAIN;
+               return 0;
+       }
+
        write_lock_irq(&mapping->tree_lock);
 
        radix_pointer = (struct page **)radix_tree_lookup_slot(
                                                &mapping->page_tree,
                                                page_index(page));
 
-       if (!page_mapping(page) ||
-                       page_count(page) != 2 + !!PagePrivate(page) ||
+       if (page_count(page) != 2 + !!PagePrivate(page) ||
                        *radix_pointer != page) {
                write_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
@@ -271,10 +320,12 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         * Now we know that no one else is looking at the page.
         */
        get_page(newpage);
+#ifdef CONFIG_SWAP
        if (PageSwapCache(page)) {
                SetPageSwapCache(newpage);
                set_page_private(newpage, page_private(page));
        }
+#endif
 
        *radix_pointer = newpage;
        __put_page(page);
@@ -308,7 +359,9 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
                set_page_dirty(newpage);
        }
 
+#ifdef CONFIG_SWAP
        ClearPageSwapCache(page);
+#endif
        ClearPageActive(page);
        ClearPagePrivate(page);
        set_page_private(page, 0);
@@ -353,16 +406,6 @@ int migrate_page(struct address_space *mapping,
                return rc;
 
        migrate_page_copy(newpage, page);
-
-       /*
-        * Remove auxiliary swap entries and replace
-        * them with real ptes.
-        *
-        * Note that a real pte entry will allow processes that are not
-        * waiting on the page lock to use the new page via the page tables
-        * before the new page is unlocked.
-        */
-       remove_from_swap(newpage);
        return 0;
 }
 EXPORT_SYMBOL(migrate_page);
@@ -425,30 +468,59 @@ int buffer_migrate_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(buffer_migrate_page);
 
-static int fallback_migrate_page(struct address_space *mapping,
-       struct page *newpage, struct page *page)
+/*
+ * Writeback a page to clean the dirty state
+ */
+static int writeout(struct address_space *mapping, struct page *page)
 {
+       struct writeback_control wbc = {
+               .sync_mode = WB_SYNC_NONE,
+               .nr_to_write = 1,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
+               .nonblocking = 1,
+               .for_reclaim = 1
+       };
+       int rc;
+
+       if (!mapping->a_ops->writepage)
+               /* No write method for the address space */
+               return -EINVAL;
+
+       if (!clear_page_dirty_for_io(page))
+               /* Someone else already triggered a write */
+               return -EAGAIN;
+
        /*
-        * Default handling if a filesystem does not provide
-        * a migration function. We can only migrate clean
-        * pages so try to write out any dirty pages first.
+        * A dirty page may imply that the underlying filesystem has
+        * the page on some queue. So the page must be clean for
+        * migration. Writeout may mean we loose the lock and the
+        * page state is no longer what we checked for earlier.
+        * At this point we know that the migration attempt cannot
+        * be successful.
         */
-       if (PageDirty(page)) {
-               switch (pageout(page, mapping)) {
-               case PAGE_KEEP:
-               case PAGE_ACTIVATE:
-                       return -EAGAIN;
+       remove_migration_ptes(page, page);
 
-               case PAGE_SUCCESS:
-                       /* Relock since we lost the lock */
-                       lock_page(page);
-                       /* Must retry since page state may have changed */
-                       return -EAGAIN;
+       rc = mapping->a_ops->writepage(page, &wbc);
+       if (rc < 0)
+               /* I/O Error writing */
+               return -EIO;
 
-               case PAGE_CLEAN:
-                       ; /* try to migrate the page below */
-               }
-       }
+       if (rc != AOP_WRITEPAGE_ACTIVATE)
+               /* unlocked. Relock */
+               lock_page(page);
+
+       return -EAGAIN;
+}
+
+/*
+ * Default handling if a filesystem does not provide a migration function.
+ */
+static int fallback_migrate_page(struct address_space *mapping,
+       struct page *newpage, struct page *page)
+{
+       if (PageDirty(page))
+               return writeout(mapping, page);
 
        /*
         * Buffers may be managed in a filesystem specific way.
@@ -461,24 +533,130 @@ static int fallback_migrate_page(struct address_space *mapping,
        return migrate_page(mapping, newpage, page);
 }
 
+/*
+ * Move a page to a newly allocated page
+ * The page is locked and all ptes have been successfully removed.
+ *
+ * The new page will have replaced the old page if this function
+ * is successful.
+ */
+static int move_to_new_page(struct page *newpage, struct page *page)
+{
+       struct address_space *mapping;
+       int rc;
+
+       /*
+        * Block others from accessing the page when we get around to
+        * establishing additional references. We are the only one
+        * holding a reference to the new page at this point.
+        */
+       if (TestSetPageLocked(newpage))
+               BUG();
+
+       /* Prepare mapping for the new page.*/
+       newpage->index = page->index;
+       newpage->mapping = page->mapping;
+
+       mapping = page_mapping(page);
+       if (!mapping)
+               rc = migrate_page(mapping, newpage, page);
+       else if (mapping->a_ops->migratepage)
+               /*
+                * Most pages have a mapping and most filesystems
+                * should provide a migration function. Anonymous
+                * pages are part of swap space which also has its
+                * own migration function. This is the most common
+                * path for page migration.
+                */
+               rc = mapping->a_ops->migratepage(mapping,
+                                               newpage, page);
+       else
+               rc = fallback_migrate_page(mapping, newpage, page);
+
+       if (!rc)
+               remove_migration_ptes(page, newpage);
+       else
+               newpage->mapping = NULL;
+
+       unlock_page(newpage);
+
+       return rc;
+}
+
+/*
+ * Obtain the lock on page, remove all ptes and migrate the page
+ * to the newly allocated page in newpage.
+ */
+static int unmap_and_move(struct page *newpage, struct page *page, int force)
+{
+       int rc = 0;
+
+       if (page_count(page) == 1)
+               /* page was freed from under us. So we are done. */
+               goto ret;
+
+       rc = -EAGAIN;
+       if (TestSetPageLocked(page)) {
+               if (!force)
+                       goto ret;
+               lock_page(page);
+       }
+
+       if (PageWriteback(page)) {
+               if (!force)
+                       goto unlock;
+               wait_on_page_writeback(page);
+       }
+
+       /*
+        * Establish migration ptes or remove ptes
+        */
+       if (try_to_unmap(page, 1) != SWAP_FAIL) {
+               if (!page_mapped(page))
+                       rc = move_to_new_page(newpage, page);
+       } else
+               /* A vma has VM_LOCKED set -> permanent failure */
+               rc = -EPERM;
+
+       if (rc)
+               remove_migration_ptes(page, page);
+unlock:
+       unlock_page(page);
+ret:
+       if (rc != -EAGAIN) {
+               /*
+                * A page that has been migrated has all references
+                * removed and will be freed. A page that has not been
+                * migrated will have kepts its references and be
+                * restored.
+                */
+               list_del(&page->lru);
+               move_to_lru(page);
+
+               list_del(&newpage->lru);
+               move_to_lru(newpage);
+       }
+       return rc;
+}
+
 /*
  * migrate_pages
  *
  * Two lists are passed to this function. The first list
  * contains the pages isolated from the LRU to be migrated.
- * The second list contains new pages that the pages isolated
+ * The second list contains new pages that the isolated pages
  * can be moved to.
  *
  * The function returns after 10 attempts or if no pages
  * are movable anymore because to has become empty
- * or no retryable pages exist anymore.
+ * or no retryable pages exist anymore. All pages will be
+ * retruned to the LRU or freed.
  *
- * Return: Number of pages not migrated when "to" ran empty.
+ * Return: Number of pages not migrated.
  */
-int migrate_pages(struct list_head *from, struct list_head *to,
-                 struct list_head *moved, struct list_head *failed)
+int migrate_pages(struct list_head *from, struct list_head *to)
 {
-       int retry;
+       int retry = 1;
        int nr_failed = 0;
        int pass = 0;
        struct page *page;
@@ -489,133 +667,36 @@ int migrate_pages(struct list_head *from, struct list_head *to,
        if (!swapwrite)
                current->flags |= PF_SWAPWRITE;
 
-redo:
-       retry = 0;
-
-       list_for_each_entry_safe(page, page2, from, lru) {
-               struct page *newpage = NULL;
-               struct address_space *mapping;
-
-               cond_resched();
+       for(pass = 0; pass < 10 && retry; pass++) {
+               retry = 0;
 
-               rc = 0;
-               if (page_count(page) == 1)
-                       /* page was freed from under us. So we are done. */
-                       goto next;
+               list_for_each_entry_safe(page, page2, from, lru) {
 
-               if (to && list_empty(to))
-                       break;
-
-               /*
-                * Skip locked pages during the first two passes to give the
-                * functions holding the lock time to release the page. Later we
-                * use lock_page() to have a higher chance of acquiring the
-                * lock.
-                */
-               rc = -EAGAIN;
-               if (pass > 2)
-                       lock_page(page);
-               else
-                       if (TestSetPageLocked(page))
-                               goto next;
-
-               /*
-                * Only wait on writeback if we have already done a pass where
-                * we we may have triggered writeouts for lots of pages.
-                */
-               if (pass > 0)
-                       wait_on_page_writeback(page);
-               else
-                       if (PageWriteback(page))
-                               goto unlock_page;
-
-               /*
-                * Establish swap ptes for anonymous pages or destroy pte
-                * maps for files.
-                *
-                * In order to reestablish file backed mappings the fault handlers
-                * will take the radix tree_lock which may then be used to stop
-                * processses from accessing this page until the new page is ready.
-                *
-                * A process accessing via a swap pte (an anonymous page) will take a
-                * page_lock on the old page which will block the process until the
-                * migration attempt is complete. At that time the PageSwapCache bit
-                * will be examined. If the page was migrated then the PageSwapCache
-                * bit will be clear and the operation to retrieve the page will be
-                * retried which will find the new page in the radix tree. Then a new
-                * direct mapping may be generated based on the radix tree contents.
-                *
-                * If the page was not migrated then the PageSwapCache bit
-                * is still set and the operation may continue.
-                */
-               rc = -EPERM;
-               if (try_to_unmap(page, 1) == SWAP_FAIL)
-                       /* A vma has VM_LOCKED set -> permanent failure */
-                       goto unlock_page;
-
-               rc = -EAGAIN;
-               if (page_mapped(page))
-                       goto unlock_page;
+                       if (list_empty(to))
+                               break;
 
-               newpage = lru_to_page(to);
-               lock_page(newpage);
-               /* Prepare mapping for the new page.*/
-               newpage->index = page->index;
-               newpage->mapping = page->mapping;
+                       cond_resched();
 
-               /*
-                * Pages are properly locked and writeback is complete.
-                * Try to migrate the page.
-                */
-               mapping = page_mapping(page);
-               if (!mapping)
-                       goto unlock_both;
+                       rc = unmap_and_move(lru_to_page(to), page, pass > 2);
 
-               if (mapping->a_ops->migratepage)
-                       /*
-                        * Most pages have a mapping and most filesystems
-                        * should provide a migration function. Anonymous
-                        * pages are part of swap space which also has its
-                        * own migration function. This is the most common
-                        * path for page migration.
-                        */
-                       rc = mapping->a_ops->migratepage(mapping,
-                                                       newpage, page);
-               else
-                       rc = fallback_migrate_page(mapping, newpage, page);
-
-unlock_both:
-               unlock_page(newpage);
-
-unlock_page:
-               unlock_page(page);
-
-next:
-               if (rc) {
-                       if (newpage)
-                               newpage->mapping = NULL;
-
-                       if (rc == -EAGAIN)
+                       switch(rc) {
+                       case -EAGAIN:
                                retry++;
-                       else {
+                               break;
+                       case 0:
+                               break;
+                       default:
                                /* Permanent failure */
-                               list_move(&page->lru, failed);
                                nr_failed++;
+                               break;
                        }
-               } else {
-                       if (newpage) {
-                               /* Successful migration. Return page to LRU */
-                               move_to_lru(newpage);
-                       }
-                       list_move(&page->lru, moved);
                }
        }
-       if (retry && pass++ < 10)
-               goto redo;
 
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
 
+       putback_lru_pages(from);
        return nr_failed + retry;
 }
 
@@ -629,11 +710,10 @@ int migrate_pages_to(struct list_head *pagelist,
                        struct vm_area_struct *vma, int dest)
 {
        LIST_HEAD(newlist);
-       LIST_HEAD(moved);
-       LIST_HEAD(failed);
        int err = 0;
        unsigned long offset = 0;
        int nr_pages;
+       int nr_failed = 0;
        struct page *page;
        struct list_head *p;
 
@@ -667,26 +747,17 @@ redo:
                if (nr_pages > MIGRATE_CHUNK_SIZE)
                        break;
        }
-       err = migrate_pages(pagelist, &newlist, &moved, &failed);
-
-       putback_lru_pages(&moved);      /* Call release pages instead ?? */
+       err = migrate_pages(pagelist, &newlist);
 
-       if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
-               goto redo;
-out:
-       /* Return leftover allocated pages */
-       while (!list_empty(&newlist)) {
-               page = list_entry(newlist.next, struct page, lru);
-               list_del(&page->lru);
-               __free_page(page);
+       if (err >= 0) {
+               nr_failed += err;
+               if (list_empty(&newlist) && !list_empty(pagelist))
+                       goto redo;
        }
-       list_splice(&failed, pagelist);
-       if (err < 0)
-               return err;
+out:
 
        /* Calculate number of leftover pages */
-       nr_pages = 0;
        list_for_each(p, pagelist)
-               nr_pages++;
-       return nr_pages;
+               nr_failed++;
+       return nr_failed;
 }
This page took 0.030414 seconds and 5 git commands to generate.