swap: redirty page if page write fails on swap file
[deliverable/linux.git] / mm / huge_memory.c
index 6001ee6347a9694f4a9b31ef9060913ff30440bf..03a89a2f464bef283770e84ad7186a5cc0915924 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mman.h>
 #include <linux/pagemap.h>
 #include <linux/migrate.h>
+#include <linux/hashtable.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -62,12 +63,11 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
 
 static int khugepaged(void *none);
-static int mm_slots_hash_init(void);
 static int khugepaged_slab_init(void);
-static void khugepaged_slab_free(void);
 
-#define MM_SLOTS_HASH_HEADS 1024
-static struct hlist_head *mm_slots_hash __read_mostly;
+#define MM_SLOTS_HASH_BITS 10
+static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
+
 static struct kmem_cache *mm_slot_cache __read_mostly;
 
 /**
@@ -105,7 +105,6 @@ static int set_recommended_min_free_kbytes(void)
        struct zone *zone;
        int nr_zones = 0;
        unsigned long recommended_min;
-       extern int min_free_kbytes;
 
        if (!khugepaged_enabled())
                return 0;
@@ -164,35 +163,34 @@ static int start_khugepaged(void)
 }
 
 static atomic_t huge_zero_refcount;
-static unsigned long huge_zero_pfn __read_mostly;
+static struct page *huge_zero_page __read_mostly;
 
-static inline bool is_huge_zero_pfn(unsigned long pfn)
+static inline bool is_huge_zero_page(struct page *page)
 {
-       unsigned long zero_pfn = ACCESS_ONCE(huge_zero_pfn);
-       return zero_pfn && pfn == zero_pfn;
+       return ACCESS_ONCE(huge_zero_page) == page;
 }
 
 static inline bool is_huge_zero_pmd(pmd_t pmd)
 {
-       return is_huge_zero_pfn(pmd_pfn(pmd));
+       return is_huge_zero_page(pmd_page(pmd));
 }
 
-static unsigned long get_huge_zero_page(void)
+static struct page *get_huge_zero_page(void)
 {
        struct page *zero_page;
 retry:
        if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
-               return ACCESS_ONCE(huge_zero_pfn);
+               return ACCESS_ONCE(huge_zero_page);
 
        zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
                        HPAGE_PMD_ORDER);
        if (!zero_page) {
                count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
-               return 0;
+               return NULL;
        }
        count_vm_event(THP_ZERO_PAGE_ALLOC);
        preempt_disable();
-       if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) {
+       if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
                preempt_enable();
                __free_page(zero_page);
                goto retry;
@@ -201,7 +199,7 @@ retry:
        /* We take additional reference here. It will be put back by shrinker */
        atomic_set(&huge_zero_refcount, 2);
        preempt_enable();
-       return ACCESS_ONCE(huge_zero_pfn);
+       return ACCESS_ONCE(huge_zero_page);
 }
 
 static void put_huge_zero_page(void)
@@ -221,9 +219,9 @@ static int shrink_huge_zero_page(struct shrinker *shrink,
                return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
 
        if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
-               unsigned long zero_pfn = xchg(&huge_zero_pfn, 0);
-               BUG_ON(zero_pfn == 0);
-               __free_page(__pfn_to_page(zero_pfn));
+               struct page *zero_page = xchg(&huge_zero_page, NULL);
+               BUG_ON(zero_page == NULL);
+               __free_page(zero_page);
        }
 
        return 0;
@@ -634,12 +632,6 @@ static int __init hugepage_init(void)
        if (err)
                goto out;
 
-       err = mm_slots_hash_init();
-       if (err) {
-               khugepaged_slab_free();
-               goto out;
-       }
-
        register_shrinker(&huge_zero_page_shrinker);
 
        /*
@@ -720,6 +712,11 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                return VM_FAULT_OOM;
 
        clear_huge_page(page, haddr, HPAGE_PMD_NR);
+       /*
+        * The memory barrier inside __SetPageUptodate makes sure that
+        * clear_huge_page writes become visible before the set_pmd_at()
+        * write.
+        */
        __SetPageUptodate(page);
 
        spin_lock(&mm->page_table_lock);
@@ -731,12 +728,6 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        } else {
                pmd_t entry;
                entry = mk_huge_pmd(page, vma);
-               /*
-                * The spinlocking to take the lru_lock inside
-                * page_add_new_anon_rmap() acts as a full memory
-                * barrier to be sure clear_huge_page writes become
-                * visible after the set_pmd_at() write.
-                */
                page_add_new_anon_rmap(page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                pgtable_trans_huge_deposit(mm, pgtable);
@@ -772,12 +763,12 @@ static inline struct page *alloc_hugepage(int defrag)
 
 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
-               unsigned long zero_pfn)
+               struct page *zero_page)
 {
        pmd_t entry;
        if (!pmd_none(*pmd))
                return false;
-       entry = pfn_pmd(zero_pfn, vma->vm_page_prot);
+       entry = mk_pmd(zero_page, vma->vm_page_prot);
        entry = pmd_wrprotect(entry);
        entry = pmd_mkhuge(entry);
        set_pmd_at(mm, haddr, pmd, entry);
@@ -802,20 +793,20 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (!(flags & FAULT_FLAG_WRITE) &&
                                transparent_hugepage_use_zero_page()) {
                        pgtable_t pgtable;
-                       unsigned long zero_pfn;
+                       struct page *zero_page;
                        bool set;
                        pgtable = pte_alloc_one(mm, haddr);
                        if (unlikely(!pgtable))
                                return VM_FAULT_OOM;
-                       zero_pfn = get_huge_zero_page();
-                       if (unlikely(!zero_pfn)) {
+                       zero_page = get_huge_zero_page();
+                       if (unlikely(!zero_page)) {
                                pte_free(mm, pgtable);
                                count_vm_event(THP_FAULT_FALLBACK);
                                goto out;
                        }
                        spin_lock(&mm->page_table_lock);
                        set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
-                                       zero_pfn);
+                                       zero_page);
                        spin_unlock(&mm->page_table_lock);
                        if (!set) {
                                pte_free(mm, pgtable);
@@ -894,16 +885,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * a page table.
         */
        if (is_huge_zero_pmd(pmd)) {
-               unsigned long zero_pfn;
+               struct page *zero_page;
                bool set;
                /*
                 * get_huge_zero_page() will never allocate a new page here,
                 * since we already have a zero page to copy. It just takes a
                 * reference.
                 */
-               zero_pfn = get_huge_zero_page();
+               zero_page = get_huge_zero_page();
                set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
-                               zero_pfn);
+                               zero_page);
                BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
                ret = 0;
                goto out_unlock;
@@ -1257,6 +1248,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                goto out;
 
+       /* Avoid dumping huge zero page */
+       if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+               return ERR_PTR(-EFAULT);
+
        page = pmd_page(*pmd);
        VM_BUG_ON(!PageHead(page));
        if (flags & FOLL_TOUCH) {
@@ -1298,7 +1293,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int target_nid;
        int current_nid = -1;
        bool migrated;
-       bool page_locked = false;
 
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_same(pmd, *pmdp)))
@@ -1320,7 +1314,6 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /* Acquire the page lock to serialise THP migrations */
        spin_unlock(&mm->page_table_lock);
        lock_page(page);
-       page_locked = true;
 
        /* Confirm the PTE did not while locked */
        spin_lock(&mm->page_table_lock);
@@ -1333,34 +1326,26 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /* Migrate the THP to the requested node */
        migrated = migrate_misplaced_transhuge_page(mm, vma,
-                               pmdp, pmd, addr,
-                               page, target_nid);
-       if (migrated)
-               current_nid = target_nid;
-       else {
-               spin_lock(&mm->page_table_lock);
-               if (unlikely(!pmd_same(pmd, *pmdp))) {
-                       unlock_page(page);
-                       goto out_unlock;
-               }
-               goto clear_pmdnuma;
-       }
+                               pmdp, pmd, addr, page, target_nid);
+       if (!migrated)
+               goto check_same;
 
-       task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
+       task_numa_fault(target_nid, HPAGE_PMD_NR, true);
        return 0;
 
+check_same:
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp)))
+               goto out_unlock;
 clear_pmdnuma:
        pmd = pmd_mknonnuma(pmd);
        set_pmd_at(mm, haddr, pmdp, pmd);
        VM_BUG_ON(pmd_numa(*pmdp));
        update_mmu_cache_pmd(vma, addr, pmdp);
-       if (page_locked)
-               unlock_page(page);
-
 out_unlock:
        spin_unlock(&mm->page_table_lock);
        if (current_nid != -1)
-               task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
+               task_numa_fault(current_nid, HPAGE_PMD_NR, false);
        return 0;
 }
 
@@ -1573,7 +1558,8 @@ static int __split_huge_page_splitting(struct page *page,
        return ret;
 }
 
-static void __split_huge_page_refcount(struct page *page)
+static void __split_huge_page_refcount(struct page *page,
+                                      struct list_head *list)
 {
        int i;
        struct zone *zone = page_zone(page);
@@ -1652,14 +1638,14 @@ static void __split_huge_page_refcount(struct page *page)
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
-               page_xchg_last_nid(page_tail, page_last_nid(page));
+               page_nid_xchg_last(page_tail, page_nid_last(page));
 
                BUG_ON(!PageAnon(page_tail));
                BUG_ON(!PageUptodate(page_tail));
                BUG_ON(!PageDirty(page_tail));
                BUG_ON(!PageSwapBacked(page_tail));
 
-               lru_add_page_tail(page, page_tail, lruvec);
+               lru_add_page_tail(page, page_tail, lruvec, list);
        }
        atomic_sub(tail_count, &page->_count);
        BUG_ON(atomic_read(&page->_count) <= 0);
@@ -1766,7 +1752,8 @@ static int __split_huge_page_map(struct page *page,
 
 /* must be called with anon_vma->root->rwsem held */
 static void __split_huge_page(struct page *page,
-                             struct anon_vma *anon_vma)
+                             struct anon_vma *anon_vma,
+                             struct list_head *list)
 {
        int mapcount, mapcount2;
        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1797,7 +1784,7 @@ static void __split_huge_page(struct page *page,
                       mapcount, page_mapcount(page));
        BUG_ON(mapcount != page_mapcount(page));
 
-       __split_huge_page_refcount(page);
+       __split_huge_page_refcount(page, list);
 
        mapcount2 = 0;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
@@ -1812,12 +1799,19 @@ static void __split_huge_page(struct page *page,
        BUG_ON(mapcount != mapcount2);
 }
 
-int split_huge_page(struct page *page)
+/*
+ * Split a hugepage into normal pages. This doesn't change the position of head
+ * page. If @list is null, tail pages will be added to LRU list, otherwise, to
+ * @list. Both head page and tail pages will inherit mapping, flags, and so on
+ * from the hugepage.
+ * Return 0 if the hugepage is split successfully otherwise return 1.
+ */
+int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct anon_vma *anon_vma;
        int ret = 1;
 
-       BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
+       BUG_ON(is_huge_zero_page(page));
        BUG_ON(!PageAnon(page));
 
        /*
@@ -1837,12 +1831,12 @@ int split_huge_page(struct page *page)
                goto out_unlock;
 
        BUG_ON(!PageSwapBacked(page));
-       __split_huge_page(page, anon_vma);
+       __split_huge_page(page, anon_vma, list);
        count_vm_event(THP_SPLIT);
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       anon_vma_unlock(anon_vma);
+       anon_vma_unlock_write(anon_vma);
        put_anon_vma(anon_vma);
 out:
        return ret;
@@ -1904,12 +1898,6 @@ static int __init khugepaged_slab_init(void)
        return 0;
 }
 
-static void __init khugepaged_slab_free(void)
-{
-       kmem_cache_destroy(mm_slot_cache);
-       mm_slot_cache = NULL;
-}
-
 static inline struct mm_slot *alloc_mm_slot(void)
 {
        if (!mm_slot_cache)     /* initialization failed */
@@ -1922,47 +1910,22 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
        kmem_cache_free(mm_slot_cache, mm_slot);
 }
 
-static int __init mm_slots_hash_init(void)
-{
-       mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
-                               GFP_KERNEL);
-       if (!mm_slots_hash)
-               return -ENOMEM;
-       return 0;
-}
-
-#if 0
-static void __init mm_slots_hash_free(void)
-{
-       kfree(mm_slots_hash);
-       mm_slots_hash = NULL;
-}
-#endif
-
 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
 {
        struct mm_slot *mm_slot;
-       struct hlist_head *bucket;
-       struct hlist_node *node;
 
-       bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-                               % MM_SLOTS_HASH_HEADS];
-       hlist_for_each_entry(mm_slot, node, bucket, hash) {
+       hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
                if (mm == mm_slot->mm)
                        return mm_slot;
-       }
+
        return NULL;
 }
 
 static void insert_to_mm_slots_hash(struct mm_struct *mm,
                                    struct mm_slot *mm_slot)
 {
-       struct hlist_head *bucket;
-
-       bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
-                               % MM_SLOTS_HASH_HEADS];
        mm_slot->mm = mm;
-       hlist_add_head(&mm_slot->hash, bucket);
+       hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
 }
 
 static inline int khugepaged_test_exit(struct mm_struct *mm)
@@ -2031,7 +1994,7 @@ void __khugepaged_exit(struct mm_struct *mm)
        spin_lock(&khugepaged_mm_lock);
        mm_slot = get_mm_slot(mm);
        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
-               hlist_del(&mm_slot->hash);
+               hash_del(&mm_slot->hash);
                list_del(&mm_slot->mm_node);
                free = 1;
        }
@@ -2364,7 +2327,7 @@ static void collapse_huge_page(struct mm_struct *mm,
                BUG_ON(!pmd_none(*pmd));
                set_pmd_at(mm, address, pmd, _pmd);
                spin_unlock(&mm->page_table_lock);
-               anon_vma_unlock(vma->anon_vma);
+               anon_vma_unlock_write(vma->anon_vma);
                goto out;
        }
 
@@ -2372,7 +2335,7 @@ static void collapse_huge_page(struct mm_struct *mm,
         * All pages are isolated and locked so anon_vma rmap
         * can't run anymore.
         */
-       anon_vma_unlock(vma->anon_vma);
+       anon_vma_unlock_write(vma->anon_vma);
 
        __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
        pte_unmap(pte);
@@ -2419,7 +2382,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        struct page *page;
        unsigned long _address;
        spinlock_t *ptl;
-       int node = -1;
+       int node = NUMA_NO_NODE;
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
@@ -2449,7 +2412,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 * be more sophisticated and look at more pages,
                 * but isn't for now.
                 */
-               if (node == -1)
+               if (node == NUMA_NO_NODE)
                        node = page_to_nid(page);
                VM_BUG_ON(PageCompound(page));
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
@@ -2480,7 +2443,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 
        if (khugepaged_test_exit(mm)) {
                /* free mm_slot */
-               hlist_del(&mm_slot->hash);
+               hash_del(&mm_slot->hash);
                list_del(&mm_slot->mm_node);
 
                /*
This page took 0.032916 seconds and 5 git commands to generate.