oom: move oom_killer_enable()/oom_killer_disable to where they belong
[deliverable/linux.git] / mm / hugetlb.c
index b16d636347771016cf8433ccc4a3807bec8ede4e..c001f846f17d1ee1baa6b9036d66ca00490a97a2 100644 (file)
@@ -456,24 +456,6 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        h->free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(struct hstate *h)
-{
-       int nid;
-       struct page *page = NULL;
-
-       for (nid = 0; nid < MAX_NUMNODES; ++nid) {
-               if (!list_empty(&h->hugepage_freelists[nid])) {
-                       page = list_entry(h->hugepage_freelists[nid].next,
-                                         struct page, lru);
-                       list_del(&page->lru);
-                       h->free_huge_pages--;
-                       h->free_huge_pages_node[nid]--;
-                       break;
-               }
-       }
-       return page;
-}
-
 static struct page *dequeue_huge_page_vma(struct hstate *h,
                                struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve)
@@ -641,7 +623,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 
 /*
  * Use a helper variable to find the next node and then
- * copy it back to hugetlb_next_nid afterwards:
+ * copy it back to next_nid_to_alloc afterwards:
  * otherwise there's a window in which a racer might
  * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
  * But we don't need to use a spin_lock here: it really
@@ -650,13 +632,13 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  * if we just successfully allocated a hugepage so that
  * the next caller gets hugepages on the next node.
  */
-static int hstate_next_node(struct hstate *h)
+static int hstate_next_node_to_alloc(struct hstate *h)
 {
        int next_nid;
-       next_nid = next_node(h->hugetlb_next_nid, node_online_map);
+       next_nid = next_node(h->next_nid_to_alloc, node_online_map);
        if (next_nid == MAX_NUMNODES)
                next_nid = first_node(node_online_map);
-       h->hugetlb_next_nid = next_nid;
+       h->next_nid_to_alloc = next_nid;
        return next_nid;
 }
 
@@ -667,14 +649,15 @@ static int alloc_fresh_huge_page(struct hstate *h)
        int next_nid;
        int ret = 0;
 
-       start_nid = h->hugetlb_next_nid;
+       start_nid = h->next_nid_to_alloc;
+       next_nid = start_nid;
 
        do {
-               page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
+               page = alloc_fresh_huge_page_node(h, next_nid);
                if (page)
                        ret = 1;
-               next_nid = hstate_next_node(h);
-       } while (!page && h->hugetlb_next_nid != start_nid);
+               next_nid = hstate_next_node_to_alloc(h);
+       } while (!page && next_nid != start_nid);
 
        if (ret)
                count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -684,6 +667,61 @@ static int alloc_fresh_huge_page(struct hstate *h)
        return ret;
 }
 
+/*
+ * helper for free_pool_huge_page() - find next node
+ * from which to free a huge page
+ */
+static int hstate_next_node_to_free(struct hstate *h)
+{
+       int next_nid;
+       next_nid = next_node(h->next_nid_to_free, node_online_map);
+       if (next_nid == MAX_NUMNODES)
+               next_nid = first_node(node_online_map);
+       h->next_nid_to_free = next_nid;
+       return next_nid;
+}
+
+/*
+ * Free huge page from pool from next node to free.
+ * Attempt to keep persistent huge pages more or less
+ * balanced over allowed nodes.
+ * Called with hugetlb_lock locked.
+ */
+static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
+{
+       int start_nid;
+       int next_nid;
+       int ret = 0;
+
+       start_nid = h->next_nid_to_free;
+       next_nid = start_nid;
+
+       do {
+               /*
+                * If we're returning unused surplus pages, only examine
+                * nodes with surplus pages.
+                */
+               if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
+                   !list_empty(&h->hugepage_freelists[next_nid])) {
+                       struct page *page =
+                               list_entry(h->hugepage_freelists[next_nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       h->free_huge_pages--;
+                       h->free_huge_pages_node[next_nid]--;
+                       if (acct_surplus) {
+                               h->surplus_huge_pages--;
+                               h->surplus_huge_pages_node[next_nid]--;
+                       }
+                       update_and_free_page(h, page);
+                       ret = 1;
+               }
+               next_nid = hstate_next_node_to_free(h);
+       } while (!ret && next_nid != start_nid);
+
+       return ret;
+}
+
 static struct page *alloc_buddy_huge_page(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long address)
 {
@@ -855,22 +893,13 @@ free:
  * When releasing a hugetlb pool reservation, any surplus pages that were
  * allocated to satisfy the reservation must be explicitly freed if they were
  * never used.
+ * Called with hugetlb_lock held.
  */
 static void return_unused_surplus_pages(struct hstate *h,
                                        unsigned long unused_resv_pages)
 {
-       static int nid = -1;
-       struct page *page;
        unsigned long nr_pages;
 
-       /*
-        * We want to release as many surplus pages as possible, spread
-        * evenly across all nodes. Iterate across all nodes until we
-        * can no longer free unreserved surplus pages. This occurs when
-        * the nodes with surplus pages have no free pages.
-        */
-       unsigned long remaining_iterations = nr_online_nodes;
-
        /* Uncommit the reservation */
        h->resv_huge_pages -= unused_resv_pages;
 
@@ -880,26 +909,17 @@ static void return_unused_surplus_pages(struct hstate *h,
 
        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
 
-       while (remaining_iterations-- && nr_pages) {
-               nid = next_node(nid, node_online_map);
-               if (nid == MAX_NUMNODES)
-                       nid = first_node(node_online_map);
-
-               if (!h->surplus_huge_pages_node[nid])
-                       continue;
-
-               if (!list_empty(&h->hugepage_freelists[nid])) {
-                       page = list_entry(h->hugepage_freelists[nid].next,
-                                         struct page, lru);
-                       list_del(&page->lru);
-                       update_and_free_page(h, page);
-                       h->free_huge_pages--;
-                       h->free_huge_pages_node[nid]--;
-                       h->surplus_huge_pages--;
-                       h->surplus_huge_pages_node[nid]--;
-                       nr_pages--;
-                       remaining_iterations = nr_online_nodes;
-               }
+       /*
+        * We want to release as many surplus pages as possible, spread
+        * evenly across all nodes. Iterate across all nodes until we
+        * can no longer free unreserved surplus pages. This occurs when
+        * the nodes with surplus pages have no free pages.
+        * free_pool_huge_page() will balance the the frees across the
+        * on-line nodes for us and will handle the hstate accounting.
+        */
+       while (nr_pages--) {
+               if (!free_pool_huge_page(h, 1))
+                       break;
        }
 }
 
@@ -1008,9 +1028,10 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
                void *addr;
 
                addr = __alloc_bootmem_node_nopanic(
-                               NODE_DATA(h->hugetlb_next_nid),
+                               NODE_DATA(h->next_nid_to_alloc),
                                huge_page_size(h), huge_page_size(h), 0);
 
+               hstate_next_node_to_alloc(h);
                if (addr) {
                        /*
                         * Use the beginning of the huge page to store the
@@ -1020,7 +1041,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
                        m = addr;
                        goto found;
                }
-               hstate_next_node(h);
                nr_nodes--;
        }
        return 0;
@@ -1141,31 +1161,43 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
  */
 static int adjust_pool_surplus(struct hstate *h, int delta)
 {
-       static int prev_nid;
-       int nid = prev_nid;
+       int start_nid, next_nid;
        int ret = 0;
 
        VM_BUG_ON(delta != -1 && delta != 1);
-       do {
-               nid = next_node(nid, node_online_map);
-               if (nid == MAX_NUMNODES)
-                       nid = first_node(node_online_map);
 
-               /* To shrink on this node, there must be a surplus page */
-               if (delta < 0 && !h->surplus_huge_pages_node[nid])
-                       continue;
-               /* Surplus cannot exceed the total number of pages */
-               if (delta > 0 && h->surplus_huge_pages_node[nid] >=
+       if (delta < 0)
+               start_nid = h->next_nid_to_alloc;
+       else
+               start_nid = h->next_nid_to_free;
+       next_nid = start_nid;
+
+       do {
+               int nid = next_nid;
+               if (delta < 0)  {
+                       next_nid = hstate_next_node_to_alloc(h);
+                       /*
+                        * To shrink on this node, there must be a surplus page
+                        */
+                       if (!h->surplus_huge_pages_node[nid])
+                               continue;
+               }
+               if (delta > 0) {
+                       next_nid = hstate_next_node_to_free(h);
+                       /*
+                        * Surplus cannot exceed the total number of pages
+                        */
+                       if (h->surplus_huge_pages_node[nid] >=
                                                h->nr_huge_pages_node[nid])
-                       continue;
+                               continue;
+               }
 
                h->surplus_huge_pages += delta;
                h->surplus_huge_pages_node[nid] += delta;
                ret = 1;
                break;
-       } while (nid != prev_nid);
+       } while (next_nid != start_nid);
 
-       prev_nid = nid;
        return ret;
 }
 
@@ -1227,10 +1259,8 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
        min_count = max(count, min_count);
        try_to_free_low(h, min_count);
        while (min_count < persistent_huge_pages(h)) {
-               struct page *page = dequeue_huge_page(h);
-               if (!page)
+               if (!free_pool_huge_page(h, 0))
                        break;
-               update_and_free_page(h, page);
        }
        while (count < persistent_huge_pages(h)) {
                if (!adjust_pool_surplus(h, 1))
@@ -1442,7 +1472,8 @@ void __init hugetlb_add_hstate(unsigned order)
        h->free_huge_pages = 0;
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
-       h->hugetlb_next_nid = first_node(node_online_map);
+       h->next_nid_to_alloc = first_node(node_online_map);
+       h->next_nid_to_free = first_node(node_online_map);
        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
                                        huge_page_size(h)/1024);
 
This page took 0.030498 seconds and 5 git commands to generate.