mm/hwpoison: fix refcount of THP head page in no-injection case
[deliverable/linux.git] / mm / page_alloc.c
index 5b5240b7f642de179efa3552245fbf9326d7a206..252665d553b4874182fc3125512ef9ac78528c31 100644 (file)
@@ -125,6 +125,24 @@ unsigned long dirty_balance_reserve __read_mostly;
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
+/*
+ * A cached value of the page's pageblock's migratetype, used when the page is
+ * put on a pcplist. Used to avoid the pageblock migratetype lookup when
+ * freeing from pcplists in most cases, at the cost of possibly becoming stale.
+ * Also the migratetype set in the page does not necessarily match the pcplist
+ * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
+ * other index - this ensures that it will be put on the correct CMA freelist.
+ */
+static inline int get_pcppage_migratetype(struct page *page)
+{
+       return page->index;
+}
+
+static inline void set_pcppage_migratetype(struct page *page, int migratetype)
+{
+       page->index = migratetype;
+}
+
 #ifdef CONFIG_PM_SLEEP
 /*
  * The following functions are used by the suspend/hibernate code to temporarily
@@ -788,7 +806,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        page = list_entry(list->prev, struct page, lru);
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
-                       mt = get_freepage_migratetype(page);
+
+                       mt = get_pcppage_migratetype(page);
+                       /* MIGRATE_ISOLATE page should not go to pcplists */
+                       VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+                       /* Pageblock could have been isolated meanwhile */
                        if (unlikely(has_isolate_pageblock(zone)))
                                mt = get_pageblock_migratetype(page);
 
@@ -952,7 +974,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        migratetype = get_pfnblock_migratetype(page, pfn);
        local_irq_save(flags);
        __count_vm_events(PGFREE, 1 << order);
-       set_freepage_migratetype(page, migratetype);
        free_one_page(page_zone(page), page, pfn, order, migratetype);
        local_irq_restore(flags);
 }
@@ -1380,7 +1401,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                rmv_page_order(page);
                area->nr_free--;
                expand(zone, page, order, current_order, area, migratetype);
-               set_freepage_migratetype(page, migratetype);
+               set_pcppage_migratetype(page, migratetype);
                return page;
        }
 
@@ -1457,7 +1478,6 @@ int move_freepages(struct zone *zone,
                order = page_order(page);
                list_move(&page->lru,
                          &zone->free_area[order].free_list[migratetype]);
-               set_freepage_migratetype(page, migratetype);
                page += 1 << order;
                pages_moved += 1 << order;
        }
@@ -1627,14 +1647,13 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
                expand(zone, page, order, current_order, area,
                                        start_migratetype);
                /*
-                * The freepage_migratetype may differ from pageblock's
+                * The pcppage_migratetype may differ from pageblock's
                 * migratetype depending on the decisions in
-                * try_to_steal_freepages(). This is OK as long as it
-                * does not differ for MIGRATE_CMA pageblocks. For CMA
-                * we need to make sure unallocated pages flushed from
-                * pcp lists are returned to the correct freelist.
+                * find_suitable_fallback(). This is OK as long as it does not
+                * differ for MIGRATE_CMA pageblocks. Those can be used as
+                * fallback only via special __rmqueue_cma_fallback() function
                 */
-               set_freepage_migratetype(page, start_migratetype);
+               set_pcppage_migratetype(page, start_migratetype);
 
                trace_mm_page_alloc_extfrag(page, order, current_order,
                        start_migratetype, fallback_mt);
@@ -1710,7 +1729,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                else
                        list_add_tail(&page->lru, list);
                list = &page->lru;
-               if (is_migrate_cma(get_freepage_migratetype(page)))
+               if (is_migrate_cma(get_pcppage_migratetype(page)))
                        __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
                                              -(1 << order));
        }
@@ -1907,7 +1926,7 @@ void free_hot_cold_page(struct page *page, bool cold)
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
-       set_freepage_migratetype(page, migratetype);
+       set_pcppage_migratetype(page, migratetype);
        local_irq_save(flags);
        __count_vm_event(PGFREE);
 
@@ -2112,7 +2131,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                if (!page)
                        goto failed;
                __mod_zone_freepage_state(zone, -(1 << order),
-                                         get_freepage_migratetype(page));
+                                         get_pcppage_migratetype(page));
        }
 
        __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
@@ -2693,6 +2712,12 @@ static inline struct page *
 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        const struct alloc_context *ac, unsigned long *did_some_progress)
 {
+       struct oom_control oc = {
+               .zonelist = ac->zonelist,
+               .nodemask = ac->nodemask,
+               .gfp_mask = gfp_mask,
+               .order = order,
+       };
        struct page *page;
 
        *did_some_progress = 0;
@@ -2744,8 +2769,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                        goto out;
        }
        /* Exhausted what can be done so it's blamo time */
-       if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
-                       || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
+       if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
                *did_some_progress = 1;
 out:
        mutex_unlock(&oom_lock);
@@ -5303,8 +5327,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
  *
  * NOTE: pgdat should get zeroed by caller.
  */
-static void __paginginit free_area_init_core(struct pglist_data *pgdat,
-               unsigned long node_start_pfn, unsigned long node_end_pfn)
+static void __paginginit free_area_init_core(struct pglist_data *pgdat)
 {
        enum zone_type j;
        int nid = pgdat->node_id;
@@ -5467,7 +5490,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
                (unsigned long)pgdat->node_mem_map);
 #endif
 
-       free_area_init_core(pgdat, start_pfn, end_pfn);
+       free_area_init_core(pgdat);
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -5478,11 +5501,9 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  */
 void __init setup_nr_node_ids(void)
 {
-       unsigned int node;
-       unsigned int highest = 0;
+       unsigned int highest;
 
-       for_each_node_mask(node, node_possible_map)
-               highest = node;
+       highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
        nr_node_ids = highest + 1;
 }
 #endif
This page took 0.027589 seconds and 5 git commands to generate.