mm: vmscan: fix do_try_to_free_pages() livelock
[deliverable/linux.git] / mm / page_alloc.c
index f885eb82715950515ec708a4cde5d654ab53bdf7..ff2782576e394849735ace584866de62adb73ba9 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/ftrace_event.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/mm_inline.h>
 #include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
 #include <linux/hugetlb.h>
@@ -647,7 +648,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
        int to_free = count;
 
        spin_lock(&zone->lock);
-       zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
        while (to_free) {
@@ -696,7 +696,6 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
                                int migratetype)
 {
        spin_lock(&zone->lock);
-       zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
        __free_one_page(page, zone, order, migratetype);
@@ -1306,7 +1305,7 @@ void mark_free_pages(struct zone *zone)
        int order, t;
        struct list_head *curr;
 
-       if (!zone->spanned_pages)
+       if (zone_is_empty(zone))
                return;
 
        spin_lock_irqsave(&zone->lock, flags);
@@ -1896,7 +1895,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
 zonelist_scan:
        /*
         * Scan zonelist, looking for a zone with enough free.
-        * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+        * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
         */
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                high_zoneidx, nodemask) {
@@ -3164,7 +3163,7 @@ void show_free_areas(unsigned int filter)
                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
                        K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
                        zone->pages_scanned,
-                       (zone->all_unreclaimable ? "yes" : "no")
+                       (!zone_reclaimable(zone) ? "yes" : "no")
                        );
                printk("lowmem_reserve[]:");
                for (i = 0; i < MAX_NR_ZONES; i++)
@@ -4306,7 +4305,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
 int __meminit __early_pfn_to_nid(unsigned long pfn)
 {
        unsigned long start_pfn, end_pfn;
-       int i, nid;
+       int nid;
        /*
         * NOTE: The following SMP-unsafe globals are only used early in boot
         * when the kernel is running single-threaded.
@@ -4317,15 +4316,14 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
        if (last_start_pfn <= pfn && pfn < last_end_pfn)
                return last_nid;
 
-       for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
-               if (start_pfn <= pfn && pfn < end_pfn) {
-                       last_start_pfn = start_pfn;
-                       last_end_pfn = end_pfn;
-                       last_nid = nid;
-                       return nid;
-               }
-       /* This is a memory hole */
-       return -1;
+       nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
+       if (nid != -1) {
+               last_start_pfn = start_pfn;
+               last_end_pfn = end_pfn;
+               last_nid = nid;
+       }
+
+       return nid;
 }
 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
 
@@ -6008,6 +6006,17 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                        continue;
 
                page = pfn_to_page(check);
+
+               /*
+                * Hugepages are not in LRU lists, but they're movable.
+                * We need not scan over tail pages bacause we don't
+                * handle each tail page individually in migration.
+                */
+               if (PageHuge(page)) {
+                       iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+                       continue;
+               }
+
                /*
                 * We can't use page_count without pin a page
                 * because another CPU can free compound page.
This page took 0.029287 seconds and 5 git commands to generate.