Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[deliverable/linux.git] / mm / page_alloc.c
index 00750bc08a3adbcf852961a5ee7e267988a9510a..bb90971182bd8c833e40f508e3c8b8677d6913c8 100644 (file)
@@ -598,17 +598,6 @@ out:
        zone->free_area[order].nr_free++;
 }
 
-/*
- * free_page_mlock() -- clean up attempts to free and mlocked() page.
- * Page should not be on lru, so no need to fix that up.
- * free_pages_check() will verify...
- */
-static inline void free_page_mlock(struct page *page)
-{
-       __dec_zone_page_state(page, NR_MLOCK);
-       __count_vm_event(UNEVICTABLE_MLOCKFREED);
-}
-
 static inline int free_pages_check(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
@@ -728,15 +717,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
 static void __free_pages_ok(struct page *page, unsigned int order)
 {
        unsigned long flags;
-       int wasMlocked = __TestClearPageMlocked(page);
        int migratetype;
 
        if (!free_pages_prepare(page, order))
                return;
 
        local_irq_save(flags);
-       if (unlikely(wasMlocked))
-               free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
        migratetype = get_pageblock_migratetype(page);
        set_freepage_migratetype(page, migratetype);
@@ -1310,7 +1296,6 @@ void free_hot_cold_page(struct page *page, int cold)
        struct per_cpu_pages *pcp;
        unsigned long flags;
        int migratetype;
-       int wasMlocked = __TestClearPageMlocked(page);
 
        if (!free_pages_prepare(page, 0))
                return;
@@ -1318,8 +1303,6 @@ void free_hot_cold_page(struct page *page, int cold)
        migratetype = get_pageblock_migratetype(page);
        set_freepage_migratetype(page, migratetype);
        local_irq_save(flags);
-       if (unlikely(wasMlocked))
-               free_page_mlock(page);
        __count_vm_event(PGFREE);
 
        /*
@@ -1816,6 +1799,22 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
 }
 
+static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
+{
+       return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
+}
+
+static void __paginginit init_zone_allows_reclaim(int nid)
+{
+       int i;
+
+       for_each_online_node(i)
+               if (node_distance(nid, i) <= RECLAIM_DISTANCE) {
+                       node_set(i, NODE_DATA(nid)->reclaim_nodes);
+                       zone_reclaim_mode = 1;
+               }
+}
+
 #else  /* CONFIG_NUMA */
 
 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
@@ -1836,6 +1835,15 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
 static void zlc_clear_zones_full(struct zonelist *zonelist)
 {
 }
+
+static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
+{
+       return true;
+}
+
+static inline void init_zone_allows_reclaim(int nid)
+{
+}
 #endif /* CONFIG_NUMA */
 
 /*
@@ -1920,7 +1928,8 @@ zonelist_scan:
                                did_zlc_setup = 1;
                        }
 
-                       if (zone_reclaim_mode == 0)
+                       if (zone_reclaim_mode == 0 ||
+                           !zone_allows_reclaim(preferred_zone, zone))
                                goto this_zone_full;
 
                        /*
@@ -3381,21 +3390,13 @@ static void build_zonelists(pg_data_t *pgdat)
        j = 0;
 
        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
-               int distance = node_distance(local_node, node);
-
-               /*
-                * If another node is sufficiently far away then it is better
-                * to reclaim pages in a zone before going off node.
-                */
-               if (distance > RECLAIM_DISTANCE)
-                       zone_reclaim_mode = 1;
-
                /*
                 * We don't want to pressure a particular node.
                 * So adding penalty to the first node in same
                 * distance group to make it round-robin.
                 */
-               if (distance != node_distance(local_node, prev_node))
+               if (node_distance(local_node, node) !=
+                   node_distance(local_node, prev_node))
                        node_load[node] = load;
 
                prev_node = node;
@@ -4569,6 +4570,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
 
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
+       init_zone_allows_reclaim(nid);
        calculate_node_totalpages(pgdat, zones_size, zholes_size);
 
        alloc_node_mem_map(pgdat);
@@ -5672,7 +5674,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                                        unsigned long start, unsigned long end)
 {
        /* This function is based on compact_zone() from compaction.c. */
-
+       unsigned long nr_reclaimed;
        unsigned long pfn = start;
        unsigned int tries = 0;
        int ret = 0;
@@ -5688,7 +5690,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                if (list_empty(&cc->migratepages)) {
                        cc->nr_migratepages = 0;
                        pfn = isolate_migratepages_range(cc->zone, cc,
-                                                        pfn, end);
+                                                        pfn, end, true);
                        if (!pfn) {
                                ret = -EINTR;
                                break;
@@ -5699,7 +5701,9 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
                        break;
                }
 
-               reclaim_clean_pages_from_list(cc->zone, &cc->migratepages);
+               nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
+                                                       &cc->migratepages);
+               cc->nr_migratepages -= nr_reclaimed;
 
                ret = migrate_pages(&cc->migratepages,
                                    alloc_migrate_target,
@@ -5914,6 +5918,7 @@ static int __meminit __zone_pcp_update(void *data)
                local_irq_save(flags);
                if (pcp->count > 0)
                        free_pcppages_bulk(zone, pcp->count, pcp);
+               drain_zonestat(zone, pset);
                setup_pageset(pset, batch);
                local_irq_restore(flags);
        }
@@ -5930,10 +5935,16 @@ void __meminit zone_pcp_update(struct zone *zone)
 void zone_pcp_reset(struct zone *zone)
 {
        unsigned long flags;
+       int cpu;
+       struct per_cpu_pageset *pset;
 
        /* avoid races with drain_pages()  */
        local_irq_save(flags);
        if (zone->pageset != &boot_pageset) {
+               for_each_online_cpu(cpu) {
+                       pset = per_cpu_ptr(zone->pageset, cpu);
+                       drain_zonestat(zone, pset);
+               }
                free_percpu(zone->pageset);
                zone->pageset = &boot_pageset;
        }
This page took 0.039944 seconds and 5 git commands to generate.