2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
22 #include <asm/processor.h>
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data
;
28 EXPORT_SYMBOL(contig_page_data
);
31 unsigned long max_low_pfn
;
32 unsigned long min_low_pfn
;
33 unsigned long max_pfn
;
35 static void * __init
__alloc_memory_core_early(int nid
, u64 size
, u64 align
,
41 if (limit
> memblock
.current_limit
)
42 limit
= memblock
.current_limit
;
44 addr
= memblock_find_in_range_node(size
, align
, goal
, limit
, nid
);
48 if (memblock_reserve(addr
, size
))
51 ptr
= phys_to_virt(addr
);
54 * The min_count is set to 0 so that bootmem allocated blocks
55 * are never reported as leaks.
57 kmemleak_alloc(ptr
, size
, 0, 0);
62 * free_bootmem_late - free bootmem pages directly to page allocator
63 * @addr: starting address of the range
64 * @size: size of the range in bytes
66 * This is only useful when the bootmem allocator has already been torn
67 * down, but we are still initializing the system. Pages are given directly
68 * to the page allocator, no bootmem metadata is updated because it is gone.
70 void __init
free_bootmem_late(unsigned long addr
, unsigned long size
)
72 unsigned long cursor
, end
;
74 kmemleak_free_part(__va(addr
), size
);
76 cursor
= PFN_UP(addr
);
77 end
= PFN_DOWN(addr
+ size
);
79 for (; cursor
< end
; cursor
++) {
80 __free_pages_bootmem(pfn_to_page(cursor
), 0);
85 static void __init
__free_pages_memory(unsigned long start
, unsigned long end
)
90 order
= min(MAX_ORDER
- 1UL, __ffs(start
));
92 while (start
+ (1UL << order
) > end
)
95 __free_pages_bootmem(pfn_to_page(start
), order
);
97 start
+= (1UL << order
);
101 static unsigned long __init
__free_memory_core(phys_addr_t start
,
104 unsigned long start_pfn
= PFN_UP(start
);
105 unsigned long end_pfn
= min_t(unsigned long,
106 PFN_DOWN(end
), max_low_pfn
);
108 if (start_pfn
> end_pfn
)
111 __free_pages_memory(start_pfn
, end_pfn
);
113 return end_pfn
- start_pfn
;
116 static unsigned long __init
free_low_memory_core_early(void)
118 unsigned long count
= 0;
119 phys_addr_t start
, end
, size
;
122 for_each_free_mem_range(i
, NUMA_NO_NODE
, &start
, &end
, NULL
)
123 count
+= __free_memory_core(start
, end
);
125 /* Free memblock.reserved array if it was allocated */
126 size
= get_allocated_memblock_reserved_regions_info(&start
);
128 count
+= __free_memory_core(start
, start
+ size
);
130 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
132 /* Free memblock.memory array if it was allocated */
133 size
= get_allocated_memblock_memory_regions_info(&start
);
135 count
+= __free_memory_core(start
, start
+ size
);
141 static int reset_managed_pages_done __initdata
;
143 static inline void __init
reset_node_managed_pages(pg_data_t
*pgdat
)
147 if (reset_managed_pages_done
)
149 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++)
150 z
->managed_pages
= 0;
153 void __init
reset_all_zones_managed_pages(void)
155 struct pglist_data
*pgdat
;
157 for_each_online_pgdat(pgdat
)
158 reset_node_managed_pages(pgdat
);
159 reset_managed_pages_done
= 1;
163 * free_all_bootmem - release free pages to the buddy allocator
165 * Returns the number of pages actually released.
167 unsigned long __init
free_all_bootmem(void)
171 reset_all_zones_managed_pages();
174 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
175 * because in some case like Node0 doesn't have RAM installed
176 * low ram will be on Node1
178 pages
= free_low_memory_core_early();
179 totalram_pages
+= pages
;
185 * free_bootmem_node - mark a page range as usable
186 * @pgdat: node the range resides on
187 * @physaddr: starting address of the range
188 * @size: size of the range in bytes
190 * Partial pages will be considered reserved and left as they are.
192 * The range must reside completely on the specified node.
194 void __init
free_bootmem_node(pg_data_t
*pgdat
, unsigned long physaddr
,
197 kmemleak_free_part(__va(physaddr
), size
);
198 memblock_free(physaddr
, size
);
202 * free_bootmem - mark a page range as usable
203 * @addr: starting address of the range
204 * @size: size of the range in bytes
206 * Partial pages will be considered reserved and left as they are.
208 * The range must be contiguous but may span node boundaries.
210 void __init
free_bootmem(unsigned long addr
, unsigned long size
)
212 kmemleak_free_part(__va(addr
), size
);
213 memblock_free(addr
, size
);
216 static void * __init
___alloc_bootmem_nopanic(unsigned long size
,
223 if (WARN_ON_ONCE(slab_is_available()))
224 return kzalloc(size
, GFP_NOWAIT
);
228 ptr
= __alloc_memory_core_early(NUMA_NO_NODE
, size
, align
, goal
, limit
);
242 * __alloc_bootmem_nopanic - allocate boot memory without panicking
243 * @size: size of the request in bytes
244 * @align: alignment of the region
245 * @goal: preferred starting address of the region
247 * The goal is dropped if it can not be satisfied and the allocation will
248 * fall back to memory below @goal.
250 * Allocation may happen on any node in the system.
252 * Returns NULL on failure.
254 void * __init
__alloc_bootmem_nopanic(unsigned long size
, unsigned long align
,
257 unsigned long limit
= -1UL;
259 return ___alloc_bootmem_nopanic(size
, align
, goal
, limit
);
262 static void * __init
___alloc_bootmem(unsigned long size
, unsigned long align
,
263 unsigned long goal
, unsigned long limit
)
265 void *mem
= ___alloc_bootmem_nopanic(size
, align
, goal
, limit
);
270 * Whoops, we cannot satisfy the allocation request.
272 printk(KERN_ALERT
"bootmem alloc of %lu bytes failed!\n", size
);
273 panic("Out of memory");
278 * __alloc_bootmem - allocate boot memory
279 * @size: size of the request in bytes
280 * @align: alignment of the region
281 * @goal: preferred starting address of the region
283 * The goal is dropped if it can not be satisfied and the allocation will
284 * fall back to memory below @goal.
286 * Allocation may happen on any node in the system.
288 * The function panics if the request can not be satisfied.
290 void * __init
__alloc_bootmem(unsigned long size
, unsigned long align
,
293 unsigned long limit
= -1UL;
295 return ___alloc_bootmem(size
, align
, goal
, limit
);
298 void * __init
___alloc_bootmem_node_nopanic(pg_data_t
*pgdat
,
307 ptr
= __alloc_memory_core_early(pgdat
->node_id
, size
, align
,
312 ptr
= __alloc_memory_core_early(NUMA_NO_NODE
, size
, align
,
325 void * __init
__alloc_bootmem_node_nopanic(pg_data_t
*pgdat
, unsigned long size
,
326 unsigned long align
, unsigned long goal
)
328 if (WARN_ON_ONCE(slab_is_available()))
329 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
331 return ___alloc_bootmem_node_nopanic(pgdat
, size
, align
, goal
, 0);
334 void * __init
___alloc_bootmem_node(pg_data_t
*pgdat
, unsigned long size
,
335 unsigned long align
, unsigned long goal
,
340 ptr
= ___alloc_bootmem_node_nopanic(pgdat
, size
, align
, goal
, limit
);
344 printk(KERN_ALERT
"bootmem alloc of %lu bytes failed!\n", size
);
345 panic("Out of memory");
350 * __alloc_bootmem_node - allocate boot memory from a specific node
351 * @pgdat: node to allocate from
352 * @size: size of the request in bytes
353 * @align: alignment of the region
354 * @goal: preferred starting address of the region
356 * The goal is dropped if it can not be satisfied and the allocation will
357 * fall back to memory below @goal.
359 * Allocation may fall back to any node in the system if the specified node
360 * can not hold the requested memory.
362 * The function panics if the request can not be satisfied.
364 void * __init
__alloc_bootmem_node(pg_data_t
*pgdat
, unsigned long size
,
365 unsigned long align
, unsigned long goal
)
367 if (WARN_ON_ONCE(slab_is_available()))
368 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
370 return ___alloc_bootmem_node(pgdat
, size
, align
, goal
, 0);
373 void * __init
__alloc_bootmem_node_high(pg_data_t
*pgdat
, unsigned long size
,
374 unsigned long align
, unsigned long goal
)
376 return __alloc_bootmem_node(pgdat
, size
, align
, goal
);
379 #ifndef ARCH_LOW_ADDRESS_LIMIT
380 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
384 * __alloc_bootmem_low - allocate low boot memory
385 * @size: size of the request in bytes
386 * @align: alignment of the region
387 * @goal: preferred starting address of the region
389 * The goal is dropped if it can not be satisfied and the allocation will
390 * fall back to memory below @goal.
392 * Allocation may happen on any node in the system.
394 * The function panics if the request can not be satisfied.
396 void * __init
__alloc_bootmem_low(unsigned long size
, unsigned long align
,
399 return ___alloc_bootmem(size
, align
, goal
, ARCH_LOW_ADDRESS_LIMIT
);
402 void * __init
__alloc_bootmem_low_nopanic(unsigned long size
,
406 return ___alloc_bootmem_nopanic(size
, align
, goal
,
407 ARCH_LOW_ADDRESS_LIMIT
);
411 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
412 * @pgdat: node to allocate from
413 * @size: size of the request in bytes
414 * @align: alignment of the region
415 * @goal: preferred starting address of the region
417 * The goal is dropped if it can not be satisfied and the allocation will
418 * fall back to memory below @goal.
420 * Allocation may fall back to any node in the system if the specified node
421 * can not hold the requested memory.
423 * The function panics if the request can not be satisfied.
425 void * __init
__alloc_bootmem_low_node(pg_data_t
*pgdat
, unsigned long size
,
426 unsigned long align
, unsigned long goal
)
428 if (WARN_ON_ONCE(slab_is_available()))
429 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
431 return ___alloc_bootmem_node(pgdat
, size
, align
, goal
,
432 ARCH_LOW_ADDRESS_LIMIT
);