mm/page_alloc.c: replace set_dma_reserve to set_memory_reserve
authorSrikar Dronamraju <srikar@linux.vnet.ibm.com>
Sat, 10 Sep 2016 10:33:59 +0000 (20:33 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Sat, 10 Sep 2016 10:33:59 +0000 (20:33 +1000)
Expand the scope of the existing dma_reserve to accommodate other memory
reserves too.  Accordingly rename variable dma_reserve to
nr_memory_reserve.

set_memory_reserve() also takes a new parameter that helps to identify if
the current value needs to be incremented.

Link: http://lkml.kernel.org/r/1470330729-6273-1-git-send-email-srikar@linux.vnet.ibm.com
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Suggested-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: Hari Bathini <hbathini@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/kernel/e820.c
include/linux/mm.h
mm/page_alloc.c

index 621b501f89351146b84b090b7160166bb9d5907e..d935983ff90eeaf49bf2962185e605ee9b2db2c4 100644 (file)
@@ -1188,6 +1188,6 @@ void __init memblock_find_dma_reserve(void)
                        nr_free_pages += end_pfn - start_pfn;
        }
 
-       set_dma_reserve(nr_pages - nr_free_pages);
+       set_memory_reserve(nr_pages - nr_free_pages, false);
 #endif
 }
index ef815b9cd42696bc70db1e9f35e39a9c295afacd..3bc861c5d7f19b21329e6ade0422442cfa4d0134 100644 (file)
@@ -1913,7 +1913,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
                                        struct mminit_pfnnid_cache *state);
 #endif
 
-extern void set_dma_reserve(unsigned long new_dma_reserve);
+extern void set_memory_reserve(unsigned long nr_reserve, bool inc);
 extern void memmap_init_zone(unsigned long, int, unsigned long,
                                unsigned long, enum memmap_context);
 extern void setup_per_zone_wmarks(void);
index a2214c64ed3cd04dceaed7a579f593852e458df1..88ecf01d44a0e8483336e1534b5a270233d4ee9e 100644 (file)
@@ -254,7 +254,7 @@ int watermark_scale_factor = 10;
 
 static unsigned long __meminitdata nr_kernel_pages;
 static unsigned long __meminitdata nr_all_pages;
-static unsigned long __meminitdata dma_reserve;
+static unsigned long __meminitdata nr_memory_reserve;
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
@@ -5812,10 +5812,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
                }
 
                /* Account for reserved pages */
-               if (j == 0 && freesize > dma_reserve) {
-                       freesize -= dma_reserve;
+               if (j == 0 && freesize > nr_memory_reserve) {
+                       freesize -= nr_memory_reserve;
                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
-                                       zone_names[0], dma_reserve);
+                                       zone_names[0], nr_memory_reserve);
                }
 
                if (!is_highmem_idx(j))
@@ -6501,8 +6501,9 @@ void __init mem_init_print_info(const char *str)
 }
 
 /**
- * set_dma_reserve - set the specified number of pages reserved in the first zone
- * @new_dma_reserve: The number of pages to mark reserved
+ * set_memory_reserve - set number of pages reserved in the first zone
+ * @nr_reserve: The number of pages to mark reserved
+ * @inc: true increment to existing value; false set new value.
  *
  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
  * In the DMA zone, a significant percentage may be consumed by kernel image
@@ -6511,9 +6512,12 @@ void __init mem_init_print_info(const char *str)
  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  * smaller per-cpu batchsize.
  */
-void __init set_dma_reserve(unsigned long new_dma_reserve)
+void __init set_memory_reserve(unsigned long nr_reserve, bool inc)
 {
-       dma_reserve = new_dma_reserve;
+       if (inc)
+               nr_memory_reserve += nr_reserve;
+       else
+               nr_memory_reserve = nr_reserve;
 }
 
 void __init free_area_init(unsigned long *zones_size)
This page took 0.036795 seconds and 5 git commands to generate.