From 659e91ed45a2162ebac7fd1f2374dd6dcb3b2fa4 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Sat, 10 Sep 2016 20:33:59 +1000 Subject: [PATCH] mm/page_alloc.c: replace set_dma_reserve to set_memory_reserve Expand the scope of the existing dma_reserve to accommodate other memory reserves too. Accordingly rename variable dma_reserve to nr_memory_reserve. set_memory_reserve() also takes a new parameter that helps to identify if the current value needs to be incremented. Link: http://lkml.kernel.org/r/1470330729-6273-1-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Srikar Dronamraju Suggested-by: Mel Gorman Cc: Vlastimil Babka Cc: Michal Hocko Cc: Michael Ellerman Cc: Mahesh Salgaonkar Cc: Hari Bathini Cc: Dave Hansen Cc: Balbir Singh Signed-off-by: Andrew Morton --- arch/x86/kernel/e820.c | 2 +- include/linux/mm.h | 2 +- mm/page_alloc.c | 20 ++++++++++++-------- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 621b501f8935..d935983ff90e 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1188,6 +1188,6 @@ void __init memblock_find_dma_reserve(void) nr_free_pages += end_pfn - start_pfn; } - set_dma_reserve(nr_pages - nr_free_pages); + set_memory_reserve(nr_pages - nr_free_pages, false); #endif } diff --git a/include/linux/mm.h b/include/linux/mm.h index ef815b9cd426..3bc861c5d7f1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1913,7 +1913,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, struct mminit_pfnnid_cache *state); #endif -extern void set_dma_reserve(unsigned long new_dma_reserve); +extern void set_memory_reserve(unsigned long nr_reserve, bool inc); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, enum memmap_context); extern void setup_per_zone_wmarks(void); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a2214c64ed3c..88ecf01d44a0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -254,7 +254,7 @@ int watermark_scale_factor = 10; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; -static unsigned long __meminitdata dma_reserve; +static unsigned long __meminitdata nr_memory_reserve; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; @@ -5812,10 +5812,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) } /* Account for reserved pages */ - if (j == 0 && freesize > dma_reserve) { - freesize -= dma_reserve; + if (j == 0 && freesize > nr_memory_reserve) { + freesize -= nr_memory_reserve; printk(KERN_DEBUG " %s zone: %lu pages reserved\n", - zone_names[0], dma_reserve); + zone_names[0], nr_memory_reserve); } if (!is_highmem_idx(j)) @@ -6501,8 +6501,9 @@ void __init mem_init_print_info(const char *str) } /** - * set_dma_reserve - set the specified number of pages reserved in the first zone - * @new_dma_reserve: The number of pages to mark reserved + * set_memory_reserve - set number of pages reserved in the first zone + * @nr_reserve: The number of pages to mark reserved + * @inc: true increment to existing value; false set new value. * * The per-cpu batchsize and zone watermarks are determined by managed_pages. * In the DMA zone, a significant percentage may be consumed by kernel image @@ -6511,9 +6512,12 @@ void __init mem_init_print_info(const char *str) * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and * smaller per-cpu batchsize. */ -void __init set_dma_reserve(unsigned long new_dma_reserve) +void __init set_memory_reserve(unsigned long nr_reserve, bool inc) { - dma_reserve = new_dma_reserve; + if (inc) + nr_memory_reserve += nr_reserve; + else + nr_memory_reserve = nr_reserve; } void __init free_area_init(unsigned long *zones_size) -- 2.34.1