ARM: dts: add syscon compatible string for CP syscon
[deliverable/linux.git] / mm / huge_memory.c
index 121a7f808216fbc157b8b069099935b05b62aa9d..2373f0a7d3405bb869ac4a3a4be175289af38114 100644 (file)
@@ -539,23 +539,26 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
 }
 
 /*
- * If THP is set to always then directly reclaim/compact as necessary
- * If set to defer then do no reclaim and defer to khugepaged
+ * If THP defrag is set to always then directly reclaim/compact as necessary
+ * If set to defer then do only background reclaim/compact and defer to khugepaged
  * If set to madvise and the VMA is flagged then directly reclaim/compact
+ * When direct reclaim/compact is allowed, don't retry except for flagged VMA's
  */
 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
 {
-       gfp_t reclaim_flags = 0;
-
-       if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags) &&
-           (vma->vm_flags & VM_HUGEPAGE))
-               reclaim_flags = __GFP_DIRECT_RECLAIM;
-       else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
-               reclaim_flags = __GFP_KSWAPD_RECLAIM;
-       else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
-               reclaim_flags = __GFP_DIRECT_RECLAIM;
-
-       return GFP_TRANSHUGE | reclaim_flags;
+       bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+
+       if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
+                               &transparent_hugepage_flags) && vma_madvised)
+               return GFP_TRANSHUGE;
+       else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
+                                               &transparent_hugepage_flags))
+               return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
+       else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
+                                               &transparent_hugepage_flags))
+               return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
+
+       return GFP_TRANSHUGE_LIGHT;
 }
 
 /* Caller must hold page table lock. */
@@ -1249,25 +1252,26 @@ out:
        return 0;
 }
 
-int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+/*
+ * Return true if we do MADV_FREE successfully on entire pmd page.
+ * Otherwise, return false.
+ */
+bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                pmd_t *pmd, unsigned long addr, unsigned long next)
-
 {
        spinlock_t *ptl;
        pmd_t orig_pmd;
        struct page *page;
        struct mm_struct *mm = tlb->mm;
-       int ret = 0;
+       bool ret = false;
 
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (!ptl)
                goto out_unlocked;
 
        orig_pmd = *pmd;
-       if (is_huge_zero_pmd(orig_pmd)) {
-               ret = 1;
+       if (is_huge_zero_pmd(orig_pmd))
                goto out;
-       }
 
        page = pmd_page(orig_pmd);
        /*
@@ -1309,7 +1313,7 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                set_pmd_at(mm, addr, pmd, orig_pmd);
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
        }
-       ret = 1;
+       ret = true;
 out:
        spin_unlock(ptl);
 out_unlocked:
This page took 0.031806 seconds and 5 git commands to generate.