mm: thp: tail page refcounting fix
[deliverable/linux.git] / mm / vmscan.c
index f51a33e8ed89111089b35dee6b3a6f7d084b07e7..a90c603a8d02937fd41bac6a7e72d2891f86bfaa 100644 (file)
@@ -2103,14 +2103,19 @@ restart:
  *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
+ *
+ * This function returns true if a zone is being reclaimed for a costly
+ * high-order allocation and compaction is either ready to begin or deferred.
+ * This indicates to the caller that it should retry the allocation or fail.
  */
-static void shrink_zones(int priority, struct zonelist *zonelist,
+static bool shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
        unsigned long nr_soft_reclaimed;
        unsigned long nr_soft_scanned;
+       bool should_abort_reclaim = false;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2125,6 +2130,23 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
                                continue;
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
+                       if (COMPACTION_BUILD) {
+                               /*
+                                * If we already have plenty of memory free for
+                                * compaction in this zone, don't free any more.
+                                * Even though compaction is invoked for any
+                                * non-zero order, only frequent costly order
+                                * reclamation is disruptive enough to become a
+                                * noticable problem, like transparent huge page
+                                * allocations.
+                                */
+                               if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
+                                       (compaction_suitable(zone, sc->order) ||
+                                        compaction_deferred(zone))) {
+                                       should_abort_reclaim = true;
+                                       continue;
+                               }
+                       }
                        /*
                         * This steals pages from memory cgroups over softlimit
                         * and returns the number of reclaimed pages and
@@ -2142,6 +2164,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
 
                shrink_zone(priority, zone, sc);
        }
+
+       return should_abort_reclaim;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2206,7 +2230,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token(sc->mem_cgroup);
-               shrink_zones(priority, zonelist, sc);
+               if (shrink_zones(priority, zonelist, sc))
+                       break;
+
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
This page took 0.050708 seconds and 5 git commands to generate.