2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/module.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
22 #include <asm/processor.h>
26 unsigned long max_low_pfn
;
27 unsigned long min_low_pfn
;
28 unsigned long max_pfn
;
30 #ifdef CONFIG_CRASH_DUMP
32 * If we have booted due to a crash, max_pfn will be a very low value. We need
33 * to know the amount of memory that the previous kernel used.
35 unsigned long saved_max_pfn
;
39 * free_bootmem_late - free bootmem pages directly to page allocator
40 * @addr: starting address of the range
41 * @size: size of the range in bytes
43 * This is only useful when the bootmem allocator has already been torn
44 * down, but we are still initializing the system. Pages are given directly
45 * to the page allocator, no bootmem metadata is updated because it is gone.
47 void __init
free_bootmem_late(unsigned long addr
, unsigned long size
)
49 unsigned long cursor
, end
;
51 kmemleak_free_part(__va(addr
), size
);
53 cursor
= PFN_UP(addr
);
54 end
= PFN_DOWN(addr
+ size
);
56 for (; cursor
< end
; cursor
++) {
57 __free_pages_bootmem(pfn_to_page(cursor
), 0);
62 static void __init
__free_pages_memory(unsigned long start
, unsigned long end
)
65 unsigned long start_aligned
, end_aligned
;
66 int order
= ilog2(BITS_PER_LONG
);
68 start_aligned
= (start
+ (BITS_PER_LONG
- 1)) & ~(BITS_PER_LONG
- 1);
69 end_aligned
= end
& ~(BITS_PER_LONG
- 1);
71 if (end_aligned
<= start_aligned
) {
72 for (i
= start
; i
< end
; i
++)
73 __free_pages_bootmem(pfn_to_page(i
), 0);
78 for (i
= start
; i
< start_aligned
; i
++)
79 __free_pages_bootmem(pfn_to_page(i
), 0);
81 for (i
= start_aligned
; i
< end_aligned
; i
+= BITS_PER_LONG
)
82 __free_pages_bootmem(pfn_to_page(i
), order
);
84 for (i
= end_aligned
; i
< end
; i
++)
85 __free_pages_bootmem(pfn_to_page(i
), 0);
88 unsigned long __init
free_all_memory_core_early(int nodeid
)
92 unsigned long count
= 0;
93 struct range
*range
= NULL
;
96 nr_range
= get_free_all_memory_range(&range
, nodeid
);
98 for (i
= 0; i
< nr_range
; i
++) {
99 start
= range
[i
].start
;
101 count
+= end
- start
;
102 __free_pages_memory(start
, end
);
109 * free_all_bootmem_node - release a node's free pages to the buddy allocator
110 * @pgdat: node to be released
112 * Returns the number of pages actually released.
114 unsigned long __init
free_all_bootmem_node(pg_data_t
*pgdat
)
116 register_page_bootmem_info_node(pgdat
);
118 /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
123 * free_all_bootmem - release free pages to the buddy allocator
125 * Returns the number of pages actually released.
127 unsigned long __init
free_all_bootmem(void)
130 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
131 * because in some case like Node0 doesnt have RAM installed
132 * low ram will be on Node1
133 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
134 * will be used instead of only Node0 related
136 return free_all_memory_core_early(MAX_NUMNODES
);
140 * free_bootmem_node - mark a page range as usable
141 * @pgdat: node the range resides on
142 * @physaddr: starting address of the range
143 * @size: size of the range in bytes
145 * Partial pages will be considered reserved and left as they are.
147 * The range must reside completely on the specified node.
149 void __init
free_bootmem_node(pg_data_t
*pgdat
, unsigned long physaddr
,
152 kmemleak_free_part(__va(physaddr
), size
);
153 memblock_x86_free_range(physaddr
, physaddr
+ size
);
157 * free_bootmem - mark a page range as usable
158 * @addr: starting address of the range
159 * @size: size of the range in bytes
161 * Partial pages will be considered reserved and left as they are.
163 * The range must be contiguous but may span node boundaries.
165 void __init
free_bootmem(unsigned long addr
, unsigned long size
)
167 kmemleak_free_part(__va(addr
), size
);
168 memblock_x86_free_range(addr
, addr
+ size
);
171 static void * __init
___alloc_bootmem_nopanic(unsigned long size
,
178 if (WARN_ON_ONCE(slab_is_available()))
179 return kzalloc(size
, GFP_NOWAIT
);
183 ptr
= __alloc_memory_core_early(MAX_NUMNODES
, size
, align
, goal
, limit
);
197 * __alloc_bootmem_nopanic - allocate boot memory without panicking
198 * @size: size of the request in bytes
199 * @align: alignment of the region
200 * @goal: preferred starting address of the region
202 * The goal is dropped if it can not be satisfied and the allocation will
203 * fall back to memory below @goal.
205 * Allocation may happen on any node in the system.
207 * Returns NULL on failure.
209 void * __init
__alloc_bootmem_nopanic(unsigned long size
, unsigned long align
,
212 unsigned long limit
= -1UL;
214 return ___alloc_bootmem_nopanic(size
, align
, goal
, limit
);
217 static void * __init
___alloc_bootmem(unsigned long size
, unsigned long align
,
218 unsigned long goal
, unsigned long limit
)
220 void *mem
= ___alloc_bootmem_nopanic(size
, align
, goal
, limit
);
225 * Whoops, we cannot satisfy the allocation request.
227 printk(KERN_ALERT
"bootmem alloc of %lu bytes failed!\n", size
);
228 panic("Out of memory");
233 * __alloc_bootmem - allocate boot memory
234 * @size: size of the request in bytes
235 * @align: alignment of the region
236 * @goal: preferred starting address of the region
238 * The goal is dropped if it can not be satisfied and the allocation will
239 * fall back to memory below @goal.
241 * Allocation may happen on any node in the system.
243 * The function panics if the request can not be satisfied.
245 void * __init
__alloc_bootmem(unsigned long size
, unsigned long align
,
248 unsigned long limit
= -1UL;
250 return ___alloc_bootmem(size
, align
, goal
, limit
);
254 * __alloc_bootmem_node - allocate boot memory from a specific node
255 * @pgdat: node to allocate from
256 * @size: size of the request in bytes
257 * @align: alignment of the region
258 * @goal: preferred starting address of the region
260 * The goal is dropped if it can not be satisfied and the allocation will
261 * fall back to memory below @goal.
263 * Allocation may fall back to any node in the system if the specified node
264 * can not hold the requested memory.
266 * The function panics if the request can not be satisfied.
268 void * __init
__alloc_bootmem_node(pg_data_t
*pgdat
, unsigned long size
,
269 unsigned long align
, unsigned long goal
)
273 if (WARN_ON_ONCE(slab_is_available()))
274 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
276 ptr
= __alloc_memory_core_early(pgdat
->node_id
, size
, align
,
281 return __alloc_memory_core_early(MAX_NUMNODES
, size
, align
,
285 void * __init
__alloc_bootmem_node_high(pg_data_t
*pgdat
, unsigned long size
,
286 unsigned long align
, unsigned long goal
)
289 unsigned long end_pfn
;
291 if (WARN_ON_ONCE(slab_is_available()))
292 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
294 /* update goal according ...MAX_DMA32_PFN */
295 end_pfn
= pgdat
->node_start_pfn
+ pgdat
->node_spanned_pages
;
297 if (end_pfn
> MAX_DMA32_PFN
+ (128 >> (20 - PAGE_SHIFT
)) &&
298 (goal
>> PAGE_SHIFT
) < MAX_DMA32_PFN
) {
300 unsigned long new_goal
;
302 new_goal
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
303 ptr
= __alloc_memory_core_early(pgdat
->node_id
, size
, align
,
310 return __alloc_bootmem_node(pgdat
, size
, align
, goal
);
314 #ifdef CONFIG_SPARSEMEM
316 * alloc_bootmem_section - allocate boot memory from a specific section
317 * @size: size of the request in bytes
318 * @section_nr: sparse map section to allocate from
320 * Return NULL on failure.
322 void * __init
alloc_bootmem_section(unsigned long size
,
323 unsigned long section_nr
)
325 unsigned long pfn
, goal
, limit
;
327 pfn
= section_nr_to_pfn(section_nr
);
328 goal
= pfn
<< PAGE_SHIFT
;
329 limit
= section_nr_to_pfn(section_nr
+ 1) << PAGE_SHIFT
;
331 return __alloc_memory_core_early(early_pfn_to_nid(pfn
), size
,
332 SMP_CACHE_BYTES
, goal
, limit
);
336 void * __init
__alloc_bootmem_node_nopanic(pg_data_t
*pgdat
, unsigned long size
,
337 unsigned long align
, unsigned long goal
)
341 if (WARN_ON_ONCE(slab_is_available()))
342 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
344 ptr
= __alloc_memory_core_early(pgdat
->node_id
, size
, align
,
349 return __alloc_bootmem_nopanic(size
, align
, goal
);
352 #ifndef ARCH_LOW_ADDRESS_LIMIT
353 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
357 * __alloc_bootmem_low - allocate low boot memory
358 * @size: size of the request in bytes
359 * @align: alignment of the region
360 * @goal: preferred starting address of the region
362 * The goal is dropped if it can not be satisfied and the allocation will
363 * fall back to memory below @goal.
365 * Allocation may happen on any node in the system.
367 * The function panics if the request can not be satisfied.
369 void * __init
__alloc_bootmem_low(unsigned long size
, unsigned long align
,
372 return ___alloc_bootmem(size
, align
, goal
, ARCH_LOW_ADDRESS_LIMIT
);
376 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
377 * @pgdat: node to allocate from
378 * @size: size of the request in bytes
379 * @align: alignment of the region
380 * @goal: preferred starting address of the region
382 * The goal is dropped if it can not be satisfied and the allocation will
383 * fall back to memory below @goal.
385 * Allocation may fall back to any node in the system if the specified node
386 * can not hold the requested memory.
388 * The function panics if the request can not be satisfied.
390 void * __init
__alloc_bootmem_low_node(pg_data_t
*pgdat
, unsigned long size
,
391 unsigned long align
, unsigned long goal
)
395 if (WARN_ON_ONCE(slab_is_available()))
396 return kzalloc_node(size
, GFP_NOWAIT
, pgdat
->node_id
);
398 ptr
= __alloc_memory_core_early(pgdat
->node_id
, size
, align
,
399 goal
, ARCH_LOW_ADDRESS_LIMIT
);
403 return __alloc_memory_core_early(MAX_NUMNODES
, size
, align
,
404 goal
, ARCH_LOW_ADDRESS_LIMIT
);
This page took 0.041895 seconds and 5 git commands to generate.