2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 #include <asm-generic/sections.h>
25 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
26 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
28 struct memblock memblock __initdata_memblock
= {
29 .memory
.regions
= memblock_memory_init_regions
,
30 .memory
.cnt
= 1, /* empty dummy entry */
31 .memory
.max
= INIT_MEMBLOCK_REGIONS
,
33 .reserved
.regions
= memblock_reserved_init_regions
,
34 .reserved
.cnt
= 1, /* empty dummy entry */
35 .reserved
.max
= INIT_MEMBLOCK_REGIONS
,
38 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
41 int memblock_debug __initdata_memblock
;
42 #ifdef CONFIG_MOVABLE_NODE
43 bool movable_node_enabled __initdata_memblock
= false;
45 static int memblock_can_resize __initdata_memblock
;
46 static int memblock_memory_in_slab __initdata_memblock
= 0;
47 static int memblock_reserved_in_slab __initdata_memblock
= 0;
49 /* inline so we don't get a warning when pr_debug is compiled out */
50 static __init_memblock
const char *
51 memblock_type_name(struct memblock_type
*type
)
53 if (type
== &memblock
.memory
)
55 else if (type
== &memblock
.reserved
)
61 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
62 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
64 return *size
= min(*size
, (phys_addr_t
)ULLONG_MAX
- base
);
68 * Address comparison utilities
70 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
71 phys_addr_t base2
, phys_addr_t size2
)
73 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
76 static long __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
77 phys_addr_t base
, phys_addr_t size
)
81 for (i
= 0; i
< type
->cnt
; i
++) {
82 phys_addr_t rgnbase
= type
->regions
[i
].base
;
83 phys_addr_t rgnsize
= type
->regions
[i
].size
;
84 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
88 return (i
< type
->cnt
) ? i
: -1;
92 * __memblock_find_range_bottom_up - find free area utility in bottom-up
93 * @start: start of candidate range
94 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
95 * @size: size of free area to find
96 * @align: alignment of free area to find
97 * @nid: nid of the free area to find, %MAX_NUMNODES for any node
99 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
102 * Found address on success, 0 on failure.
104 static phys_addr_t __init_memblock
105 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
106 phys_addr_t size
, phys_addr_t align
, int nid
)
108 phys_addr_t this_start
, this_end
, cand
;
111 for_each_free_mem_range(i
, nid
, &this_start
, &this_end
, NULL
) {
112 this_start
= clamp(this_start
, start
, end
);
113 this_end
= clamp(this_end
, start
, end
);
115 cand
= round_up(this_start
, align
);
116 if (cand
< this_end
&& this_end
- cand
>= size
)
124 * __memblock_find_range_top_down - find free area utility, in top-down
125 * @start: start of candidate range
126 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
127 * @size: size of free area to find
128 * @align: alignment of free area to find
129 * @nid: nid of the free area to find, %MAX_NUMNODES for any node
131 * Utility called from memblock_find_in_range_node(), find free area top-down.
134 * Found address on success, 0 on failure.
136 static phys_addr_t __init_memblock
137 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
138 phys_addr_t size
, phys_addr_t align
, int nid
)
140 phys_addr_t this_start
, this_end
, cand
;
143 for_each_free_mem_range_reverse(i
, nid
, &this_start
, &this_end
, NULL
) {
144 this_start
= clamp(this_start
, start
, end
);
145 this_end
= clamp(this_end
, start
, end
);
150 cand
= round_down(this_end
- size
, align
);
151 if (cand
>= this_start
)
159 * memblock_find_in_range_node - find free area in given range and node
160 * @start: start of candidate range
161 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
162 * @size: size of free area to find
163 * @align: alignment of free area to find
164 * @nid: nid of the free area to find, %MAX_NUMNODES for any node
166 * Find @size free area aligned to @align in the specified range and node.
168 * When allocation direction is bottom-up, the @start should be greater
169 * than the end of the kernel image. Otherwise, it will be trimmed. The
170 * reason is that we want the bottom-up allocation just near the kernel
171 * image so it is highly likely that the allocated memory and the kernel
172 * will reside in the same node.
174 * If bottom-up allocation failed, will try to allocate memory top-down.
177 * Found address on success, 0 on failure.
179 phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t start
,
180 phys_addr_t end
, phys_addr_t size
,
181 phys_addr_t align
, int nid
)
184 phys_addr_t kernel_end
;
187 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
188 end
= memblock
.current_limit
;
190 /* avoid allocating the first page */
191 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
192 end
= max(start
, end
);
193 kernel_end
= __pa_symbol(_end
);
196 * try bottom-up allocation only when bottom-up mode
197 * is set and @end is above the kernel image.
199 if (memblock_bottom_up() && end
> kernel_end
) {
200 phys_addr_t bottom_up_start
;
202 /* make sure we will allocate above the kernel */
203 bottom_up_start
= max(start
, kernel_end
);
205 /* ok, try bottom-up allocation first */
206 ret
= __memblock_find_range_bottom_up(bottom_up_start
, end
,
212 * we always limit bottom-up allocation above the kernel,
213 * but top-down allocation doesn't have the limit, so
214 * retrying top-down allocation may succeed when bottom-up
217 * bottom-up allocation is expected to be fail very rarely,
218 * so we use WARN_ONCE() here to see the stack trace if
221 WARN_ONCE(1, "memblock: bottom-up allocation failed, "
222 "memory hotunplug may be affected\n");
225 return __memblock_find_range_top_down(start
, end
, size
, align
, nid
);
229 * memblock_find_in_range - find free area in given range
230 * @start: start of candidate range
231 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
232 * @size: size of free area to find
233 * @align: alignment of free area to find
235 * Find @size free area aligned to @align in the specified range.
238 * Found address on success, 0 on failure.
240 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
241 phys_addr_t end
, phys_addr_t size
,
244 return memblock_find_in_range_node(start
, end
, size
, align
,
248 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
250 type
->total_size
-= type
->regions
[r
].size
;
251 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
252 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
255 /* Special case for empty arrays */
256 if (type
->cnt
== 0) {
257 WARN_ON(type
->total_size
!= 0);
259 type
->regions
[0].base
= 0;
260 type
->regions
[0].size
= 0;
261 type
->regions
[0].flags
= 0;
262 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
266 phys_addr_t __init_memblock
get_allocated_memblock_reserved_regions_info(
269 if (memblock
.reserved
.regions
== memblock_reserved_init_regions
)
272 *addr
= __pa(memblock
.reserved
.regions
);
274 return PAGE_ALIGN(sizeof(struct memblock_region
) *
275 memblock
.reserved
.max
);
279 * memblock_double_array - double the size of the memblock regions array
280 * @type: memblock type of the regions array being doubled
281 * @new_area_start: starting address of memory range to avoid overlap with
282 * @new_area_size: size of memory range to avoid overlap with
284 * Double the size of the @type regions array. If memblock is being used to
285 * allocate memory for a new reserved regions array and there is a previously
286 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
287 * waiting to be reserved, ensure the memory used by the new array does
291 * 0 on success, -1 on failure.
293 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
294 phys_addr_t new_area_start
,
295 phys_addr_t new_area_size
)
297 struct memblock_region
*new_array
, *old_array
;
298 phys_addr_t old_alloc_size
, new_alloc_size
;
299 phys_addr_t old_size
, new_size
, addr
;
300 int use_slab
= slab_is_available();
303 /* We don't allow resizing until we know about the reserved regions
304 * of memory that aren't suitable for allocation
306 if (!memblock_can_resize
)
309 /* Calculate new doubled size */
310 old_size
= type
->max
* sizeof(struct memblock_region
);
311 new_size
= old_size
<< 1;
313 * We need to allocated new one align to PAGE_SIZE,
314 * so we can free them completely later.
316 old_alloc_size
= PAGE_ALIGN(old_size
);
317 new_alloc_size
= PAGE_ALIGN(new_size
);
319 /* Retrieve the slab flag */
320 if (type
== &memblock
.memory
)
321 in_slab
= &memblock_memory_in_slab
;
323 in_slab
= &memblock_reserved_in_slab
;
325 /* Try to find some space for it.
327 * WARNING: We assume that either slab_is_available() and we use it or
328 * we use MEMBLOCK for allocations. That means that this is unsafe to
329 * use when bootmem is currently active (unless bootmem itself is
330 * implemented on top of MEMBLOCK which isn't the case yet)
332 * This should however not be an issue for now, as we currently only
333 * call into MEMBLOCK while it's still active, or much later when slab
334 * is active for memory hotplug operations
337 new_array
= kmalloc(new_size
, GFP_KERNEL
);
338 addr
= new_array
? __pa(new_array
) : 0;
340 /* only exclude range when trying to double reserved.regions */
341 if (type
!= &memblock
.reserved
)
342 new_area_start
= new_area_size
= 0;
344 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
345 memblock
.current_limit
,
346 new_alloc_size
, PAGE_SIZE
);
347 if (!addr
&& new_area_size
)
348 addr
= memblock_find_in_range(0,
349 min(new_area_start
, memblock
.current_limit
),
350 new_alloc_size
, PAGE_SIZE
);
352 new_array
= addr
? __va(addr
) : NULL
;
355 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
356 memblock_type_name(type
), type
->max
, type
->max
* 2);
360 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
361 memblock_type_name(type
), type
->max
* 2, (u64
)addr
,
362 (u64
)addr
+ new_size
- 1);
365 * Found space, we now need to move the array over before we add the
366 * reserved region since it may be our reserved array itself that is
369 memcpy(new_array
, type
->regions
, old_size
);
370 memset(new_array
+ type
->max
, 0, old_size
);
371 old_array
= type
->regions
;
372 type
->regions
= new_array
;
375 /* Free old array. We needn't free it if the array is the static one */
378 else if (old_array
!= memblock_memory_init_regions
&&
379 old_array
!= memblock_reserved_init_regions
)
380 memblock_free(__pa(old_array
), old_alloc_size
);
383 * Reserve the new array if that comes from the memblock. Otherwise, we
387 BUG_ON(memblock_reserve(addr
, new_alloc_size
));
389 /* Update slab flag */
396 * memblock_merge_regions - merge neighboring compatible regions
397 * @type: memblock type to scan
399 * Scan @type and merge neighboring compatible regions.
401 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
)
405 /* cnt never goes below 1 */
406 while (i
< type
->cnt
- 1) {
407 struct memblock_region
*this = &type
->regions
[i
];
408 struct memblock_region
*next
= &type
->regions
[i
+ 1];
410 if (this->base
+ this->size
!= next
->base
||
411 memblock_get_region_node(this) !=
412 memblock_get_region_node(next
) ||
413 this->flags
!= next
->flags
) {
414 BUG_ON(this->base
+ this->size
> next
->base
);
419 this->size
+= next
->size
;
420 /* move forward from next + 1, index of which is i + 2 */
421 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
427 * memblock_insert_region - insert new memblock region
428 * @type: memblock type to insert into
429 * @idx: index for the insertion point
430 * @base: base address of the new region
431 * @size: size of the new region
432 * @nid: node id of the new region
433 * @flags: flags of the new region
435 * Insert new memblock region [@base,@base+@size) into @type at @idx.
436 * @type must already have extra room to accomodate the new region.
438 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
439 int idx
, phys_addr_t base
,
441 int nid
, unsigned long flags
)
443 struct memblock_region
*rgn
= &type
->regions
[idx
];
445 BUG_ON(type
->cnt
>= type
->max
);
446 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
450 memblock_set_region_node(rgn
, nid
);
452 type
->total_size
+= size
;
456 * memblock_add_region - add new memblock region
457 * @type: memblock type to add new region into
458 * @base: base address of the new region
459 * @size: size of the new region
460 * @nid: nid of the new region
461 * @flags: flags of the new region
463 * Add new memblock region [@base,@base+@size) into @type. The new region
464 * is allowed to overlap with existing ones - overlaps don't affect already
465 * existing regions. @type is guaranteed to be minimal (all neighbouring
466 * compatible regions are merged) after the addition.
469 * 0 on success, -errno on failure.
471 static int __init_memblock
memblock_add_region(struct memblock_type
*type
,
472 phys_addr_t base
, phys_addr_t size
,
473 int nid
, unsigned long flags
)
476 phys_addr_t obase
= base
;
477 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
483 /* special case for empty array */
484 if (type
->regions
[0].size
== 0) {
485 WARN_ON(type
->cnt
!= 1 || type
->total_size
);
486 type
->regions
[0].base
= base
;
487 type
->regions
[0].size
= size
;
488 type
->regions
[0].flags
= flags
;
489 memblock_set_region_node(&type
->regions
[0], nid
);
490 type
->total_size
= size
;
495 * The following is executed twice. Once with %false @insert and
496 * then with %true. The first counts the number of regions needed
497 * to accomodate the new area. The second actually inserts them.
502 for (i
= 0; i
< type
->cnt
; i
++) {
503 struct memblock_region
*rgn
= &type
->regions
[i
];
504 phys_addr_t rbase
= rgn
->base
;
505 phys_addr_t rend
= rbase
+ rgn
->size
;
512 * @rgn overlaps. If it separates the lower part of new
513 * area, insert that portion.
518 memblock_insert_region(type
, i
++, base
,
522 /* area below @rend is dealt with, forget about it */
523 base
= min(rend
, end
);
526 /* insert the remaining portion */
530 memblock_insert_region(type
, i
, base
, end
- base
,
535 * If this was the first round, resize array and repeat for actual
536 * insertions; otherwise, merge and return.
539 while (type
->cnt
+ nr_new
> type
->max
)
540 if (memblock_double_array(type
, obase
, size
) < 0)
545 memblock_merge_regions(type
);
550 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
553 return memblock_add_region(&memblock
.memory
, base
, size
, nid
, 0);
556 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
558 return memblock_add_region(&memblock
.memory
, base
, size
,
563 * memblock_isolate_range - isolate given range into disjoint memblocks
564 * @type: memblock type to isolate range for
565 * @base: base of range to isolate
566 * @size: size of range to isolate
567 * @start_rgn: out parameter for the start of isolated region
568 * @end_rgn: out parameter for the end of isolated region
570 * Walk @type and ensure that regions don't cross the boundaries defined by
571 * [@base,@base+@size). Crossing regions are split at the boundaries,
572 * which may create at most two more regions. The index of the first
573 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
576 * 0 on success, -errno on failure.
578 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
579 phys_addr_t base
, phys_addr_t size
,
580 int *start_rgn
, int *end_rgn
)
582 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
585 *start_rgn
= *end_rgn
= 0;
590 /* we'll create at most two more regions */
591 while (type
->cnt
+ 2 > type
->max
)
592 if (memblock_double_array(type
, base
, size
) < 0)
595 for (i
= 0; i
< type
->cnt
; i
++) {
596 struct memblock_region
*rgn
= &type
->regions
[i
];
597 phys_addr_t rbase
= rgn
->base
;
598 phys_addr_t rend
= rbase
+ rgn
->size
;
607 * @rgn intersects from below. Split and continue
608 * to process the next region - the new top half.
611 rgn
->size
-= base
- rbase
;
612 type
->total_size
-= base
- rbase
;
613 memblock_insert_region(type
, i
, rbase
, base
- rbase
,
614 memblock_get_region_node(rgn
),
616 } else if (rend
> end
) {
618 * @rgn intersects from above. Split and redo the
619 * current region - the new bottom half.
622 rgn
->size
-= end
- rbase
;
623 type
->total_size
-= end
- rbase
;
624 memblock_insert_region(type
, i
--, rbase
, end
- rbase
,
625 memblock_get_region_node(rgn
),
628 /* @rgn is fully contained, record it */
638 static int __init_memblock
__memblock_remove(struct memblock_type
*type
,
639 phys_addr_t base
, phys_addr_t size
)
641 int start_rgn
, end_rgn
;
644 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
648 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
649 memblock_remove_region(type
, i
);
653 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
655 return __memblock_remove(&memblock
.memory
, base
, size
);
658 int __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
660 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
661 (unsigned long long)base
,
662 (unsigned long long)base
+ size
- 1,
665 return __memblock_remove(&memblock
.reserved
, base
, size
);
668 static int __init_memblock
memblock_reserve_region(phys_addr_t base
,
673 struct memblock_type
*_rgn
= &memblock
.reserved
;
675 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
676 (unsigned long long)base
,
677 (unsigned long long)base
+ size
- 1,
678 flags
, (void *)_RET_IP_
);
680 return memblock_add_region(_rgn
, base
, size
, nid
, flags
);
683 int __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
685 return memblock_reserve_region(base
, size
, MAX_NUMNODES
, 0);
689 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
690 * @base: the base phys addr of the region
691 * @size: the size of the region
693 * This function isolates region [@base, @base + @size), and mark it with flag
696 * Return 0 on succees, -errno on failure.
698 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
700 struct memblock_type
*type
= &memblock
.memory
;
701 int i
, ret
, start_rgn
, end_rgn
;
703 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
707 for (i
= start_rgn
; i
< end_rgn
; i
++)
708 memblock_set_region_flags(&type
->regions
[i
], MEMBLOCK_HOTPLUG
);
710 memblock_merge_regions(type
);
715 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
716 * @base: the base phys addr of the region
717 * @size: the size of the region
719 * This function isolates region [@base, @base + @size), and clear flag
720 * MEMBLOCK_HOTPLUG for the isolated regions.
722 * Return 0 on succees, -errno on failure.
724 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
726 struct memblock_type
*type
= &memblock
.memory
;
727 int i
, ret
, start_rgn
, end_rgn
;
729 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
733 for (i
= start_rgn
; i
< end_rgn
; i
++)
734 memblock_clear_region_flags(&type
->regions
[i
],
737 memblock_merge_regions(type
);
742 * __next_free_mem_range - next function for for_each_free_mem_range()
743 * @idx: pointer to u64 loop variable
744 * @nid: node selector, %MAX_NUMNODES for all nodes
745 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
746 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
747 * @out_nid: ptr to int for nid of the range, can be %NULL
749 * Find the first free area from *@idx which matches @nid, fill the out
750 * parameters, and update *@idx for the next iteration. The lower 32bit of
751 * *@idx contains index into memory region and the upper 32bit indexes the
752 * areas before each reserved region. For example, if reserved regions
753 * look like the following,
755 * 0:[0-16), 1:[32-48), 2:[128-130)
757 * The upper 32bit indexes the following regions.
759 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
761 * As both region arrays are sorted, the function advances the two indices
762 * in lockstep and returns each intersection.
764 void __init_memblock
__next_free_mem_range(u64
*idx
, int nid
,
765 phys_addr_t
*out_start
,
766 phys_addr_t
*out_end
, int *out_nid
)
768 struct memblock_type
*mem
= &memblock
.memory
;
769 struct memblock_type
*rsv
= &memblock
.reserved
;
770 int mi
= *idx
& 0xffffffff;
773 for ( ; mi
< mem
->cnt
; mi
++) {
774 struct memblock_region
*m
= &mem
->regions
[mi
];
775 phys_addr_t m_start
= m
->base
;
776 phys_addr_t m_end
= m
->base
+ m
->size
;
778 /* only memory regions are associated with nodes, check it */
779 if (nid
!= MAX_NUMNODES
&& nid
!= memblock_get_region_node(m
))
782 /* scan areas before each reservation for intersection */
783 for ( ; ri
< rsv
->cnt
+ 1; ri
++) {
784 struct memblock_region
*r
= &rsv
->regions
[ri
];
785 phys_addr_t r_start
= ri
? r
[-1].base
+ r
[-1].size
: 0;
786 phys_addr_t r_end
= ri
< rsv
->cnt
? r
->base
: ULLONG_MAX
;
788 /* if ri advanced past mi, break out to advance mi */
789 if (r_start
>= m_end
)
791 /* if the two regions intersect, we're done */
792 if (m_start
< r_end
) {
794 *out_start
= max(m_start
, r_start
);
796 *out_end
= min(m_end
, r_end
);
798 *out_nid
= memblock_get_region_node(m
);
800 * The region which ends first is advanced
801 * for the next iteration.
807 *idx
= (u32
)mi
| (u64
)ri
<< 32;
813 /* signal end of iteration */
818 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
819 * @idx: pointer to u64 loop variable
820 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
821 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
822 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
823 * @out_nid: ptr to int for nid of the range, can be %NULL
825 * Reverse of __next_free_mem_range().
827 * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't
828 * be able to hot-remove hotpluggable memory used by the kernel. So this
829 * function skip hotpluggable regions if needed when allocating memory for the
832 void __init_memblock
__next_free_mem_range_rev(u64
*idx
, int nid
,
833 phys_addr_t
*out_start
,
834 phys_addr_t
*out_end
, int *out_nid
)
836 struct memblock_type
*mem
= &memblock
.memory
;
837 struct memblock_type
*rsv
= &memblock
.reserved
;
838 int mi
= *idx
& 0xffffffff;
841 if (*idx
== (u64
)ULLONG_MAX
) {
846 for ( ; mi
>= 0; mi
--) {
847 struct memblock_region
*m
= &mem
->regions
[mi
];
848 phys_addr_t m_start
= m
->base
;
849 phys_addr_t m_end
= m
->base
+ m
->size
;
851 /* only memory regions are associated with nodes, check it */
852 if (nid
!= MAX_NUMNODES
&& nid
!= memblock_get_region_node(m
))
855 /* skip hotpluggable memory regions if needed */
856 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
859 /* scan areas before each reservation for intersection */
860 for ( ; ri
>= 0; ri
--) {
861 struct memblock_region
*r
= &rsv
->regions
[ri
];
862 phys_addr_t r_start
= ri
? r
[-1].base
+ r
[-1].size
: 0;
863 phys_addr_t r_end
= ri
< rsv
->cnt
? r
->base
: ULLONG_MAX
;
865 /* if ri advanced past mi, break out to advance mi */
866 if (r_end
<= m_start
)
868 /* if the two regions intersect, we're done */
869 if (m_end
> r_start
) {
871 *out_start
= max(m_start
, r_start
);
873 *out_end
= min(m_end
, r_end
);
875 *out_nid
= memblock_get_region_node(m
);
877 if (m_start
>= r_start
)
881 *idx
= (u32
)mi
| (u64
)ri
<< 32;
890 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
892 * Common iterator interface used to define for_each_mem_range().
894 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
895 unsigned long *out_start_pfn
,
896 unsigned long *out_end_pfn
, int *out_nid
)
898 struct memblock_type
*type
= &memblock
.memory
;
899 struct memblock_region
*r
;
901 while (++*idx
< type
->cnt
) {
902 r
= &type
->regions
[*idx
];
904 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
906 if (nid
== MAX_NUMNODES
|| nid
== r
->nid
)
909 if (*idx
>= type
->cnt
) {
915 *out_start_pfn
= PFN_UP(r
->base
);
917 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
923 * memblock_set_node - set node ID on memblock regions
924 * @base: base of area to set node ID for
925 * @size: size of area to set node ID for
926 * @type: memblock type to set node ID for
927 * @nid: node ID to set
929 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
930 * Regions which cross the area boundaries are split as necessary.
933 * 0 on success, -errno on failure.
935 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
936 struct memblock_type
*type
, int nid
)
938 int start_rgn
, end_rgn
;
941 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
945 for (i
= start_rgn
; i
< end_rgn
; i
++)
946 memblock_set_region_node(&type
->regions
[i
], nid
);
948 memblock_merge_regions(type
);
951 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
953 static phys_addr_t __init
memblock_alloc_base_nid(phys_addr_t size
,
954 phys_addr_t align
, phys_addr_t max_addr
,
960 align
= __alignof__(long long);
962 /* align @size to avoid excessive fragmentation on reserved array */
963 size
= round_up(size
, align
);
965 found
= memblock_find_in_range_node(0, max_addr
, size
, align
, nid
);
966 if (found
&& !memblock_reserve(found
, size
))
972 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
974 return memblock_alloc_base_nid(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
977 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
979 return memblock_alloc_base_nid(size
, align
, max_addr
, MAX_NUMNODES
);
982 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
986 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
989 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
990 (unsigned long long) size
, (unsigned long long) max_addr
);
995 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
997 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1000 phys_addr_t __init
memblock_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1002 phys_addr_t res
= memblock_alloc_nid(size
, align
, nid
);
1006 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1011 * Remaining API functions
1014 phys_addr_t __init
memblock_phys_mem_size(void)
1016 return memblock
.memory
.total_size
;
1019 phys_addr_t __init
memblock_mem_size(unsigned long limit_pfn
)
1021 unsigned long pages
= 0;
1022 struct memblock_region
*r
;
1023 unsigned long start_pfn
, end_pfn
;
1025 for_each_memblock(memory
, r
) {
1026 start_pfn
= memblock_region_memory_base_pfn(r
);
1027 end_pfn
= memblock_region_memory_end_pfn(r
);
1028 start_pfn
= min_t(unsigned long, start_pfn
, limit_pfn
);
1029 end_pfn
= min_t(unsigned long, end_pfn
, limit_pfn
);
1030 pages
+= end_pfn
- start_pfn
;
1033 return (phys_addr_t
)pages
<< PAGE_SHIFT
;
1036 /* lowest address */
1037 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1039 return memblock
.memory
.regions
[0].base
;
1042 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1044 int idx
= memblock
.memory
.cnt
- 1;
1046 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1049 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1052 phys_addr_t max_addr
= (phys_addr_t
)ULLONG_MAX
;
1057 /* find out max address */
1058 for (i
= 0; i
< memblock
.memory
.cnt
; i
++) {
1059 struct memblock_region
*r
= &memblock
.memory
.regions
[i
];
1061 if (limit
<= r
->size
) {
1062 max_addr
= r
->base
+ limit
;
1068 /* truncate both memory and reserved regions */
1069 __memblock_remove(&memblock
.memory
, max_addr
, (phys_addr_t
)ULLONG_MAX
);
1070 __memblock_remove(&memblock
.reserved
, max_addr
, (phys_addr_t
)ULLONG_MAX
);
1073 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1075 unsigned int left
= 0, right
= type
->cnt
;
1078 unsigned int mid
= (right
+ left
) / 2;
1080 if (addr
< type
->regions
[mid
].base
)
1082 else if (addr
>= (type
->regions
[mid
].base
+
1083 type
->regions
[mid
].size
))
1087 } while (left
< right
);
1091 int __init
memblock_is_reserved(phys_addr_t addr
)
1093 return memblock_search(&memblock
.reserved
, addr
) != -1;
1096 int __init_memblock
memblock_is_memory(phys_addr_t addr
)
1098 return memblock_search(&memblock
.memory
, addr
) != -1;
1101 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1102 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
1103 unsigned long *start_pfn
, unsigned long *end_pfn
)
1105 struct memblock_type
*type
= &memblock
.memory
;
1106 int mid
= memblock_search(type
, (phys_addr_t
)pfn
<< PAGE_SHIFT
);
1111 *start_pfn
= type
->regions
[mid
].base
>> PAGE_SHIFT
;
1112 *end_pfn
= (type
->regions
[mid
].base
+ type
->regions
[mid
].size
)
1115 return type
->regions
[mid
].nid
;
1120 * memblock_is_region_memory - check if a region is a subset of memory
1121 * @base: base of region to check
1122 * @size: size of region to check
1124 * Check if the region [@base, @base+@size) is a subset of a memory block.
1127 * 0 if false, non-zero if true
1129 int __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
1131 int idx
= memblock_search(&memblock
.memory
, base
);
1132 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
1136 return memblock
.memory
.regions
[idx
].base
<= base
&&
1137 (memblock
.memory
.regions
[idx
].base
+
1138 memblock
.memory
.regions
[idx
].size
) >= end
;
1142 * memblock_is_region_reserved - check if a region intersects reserved memory
1143 * @base: base of region to check
1144 * @size: size of region to check
1146 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1149 * 0 if false, non-zero if true
1151 int __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
1153 memblock_cap_size(base
, &size
);
1154 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
1157 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
1160 phys_addr_t start
, end
, orig_start
, orig_end
;
1161 struct memblock_type
*mem
= &memblock
.memory
;
1163 for (i
= 0; i
< mem
->cnt
; i
++) {
1164 orig_start
= mem
->regions
[i
].base
;
1165 orig_end
= mem
->regions
[i
].base
+ mem
->regions
[i
].size
;
1166 start
= round_up(orig_start
, align
);
1167 end
= round_down(orig_end
, align
);
1169 if (start
== orig_start
&& end
== orig_end
)
1173 mem
->regions
[i
].base
= start
;
1174 mem
->regions
[i
].size
= end
- start
;
1176 memblock_remove_region(mem
, i
);
1182 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
1184 memblock
.current_limit
= limit
;
1187 static void __init_memblock
memblock_dump(struct memblock_type
*type
, char *name
)
1189 unsigned long long base
, size
;
1190 unsigned long flags
;
1193 pr_info(" %s.cnt = 0x%lx\n", name
, type
->cnt
);
1195 for (i
= 0; i
< type
->cnt
; i
++) {
1196 struct memblock_region
*rgn
= &type
->regions
[i
];
1197 char nid_buf
[32] = "";
1202 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1203 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
1204 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
1205 memblock_get_region_node(rgn
));
1207 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1208 name
, i
, base
, base
+ size
- 1, size
, nid_buf
, flags
);
1212 void __init_memblock
__memblock_dump_all(void)
1214 pr_info("MEMBLOCK configuration:\n");
1215 pr_info(" memory size = %#llx reserved size = %#llx\n",
1216 (unsigned long long)memblock
.memory
.total_size
,
1217 (unsigned long long)memblock
.reserved
.total_size
);
1219 memblock_dump(&memblock
.memory
, "memory");
1220 memblock_dump(&memblock
.reserved
, "reserved");
1223 void __init
memblock_allow_resize(void)
1225 memblock_can_resize
= 1;
1228 static int __init
early_memblock(char *p
)
1230 if (p
&& strstr(p
, "debug"))
1234 early_param("memblock", early_memblock
);
1236 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1238 static int memblock_debug_show(struct seq_file
*m
, void *private)
1240 struct memblock_type
*type
= m
->private;
1241 struct memblock_region
*reg
;
1244 for (i
= 0; i
< type
->cnt
; i
++) {
1245 reg
= &type
->regions
[i
];
1246 seq_printf(m
, "%4d: ", i
);
1247 if (sizeof(phys_addr_t
) == 4)
1248 seq_printf(m
, "0x%08lx..0x%08lx\n",
1249 (unsigned long)reg
->base
,
1250 (unsigned long)(reg
->base
+ reg
->size
- 1));
1252 seq_printf(m
, "0x%016llx..0x%016llx\n",
1253 (unsigned long long)reg
->base
,
1254 (unsigned long long)(reg
->base
+ reg
->size
- 1));
1260 static int memblock_debug_open(struct inode
*inode
, struct file
*file
)
1262 return single_open(file
, memblock_debug_show
, inode
->i_private
);
1265 static const struct file_operations memblock_debug_fops
= {
1266 .open
= memblock_debug_open
,
1268 .llseek
= seq_lseek
,
1269 .release
= single_release
,
1272 static int __init
memblock_init_debugfs(void)
1274 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
1277 debugfs_create_file("memory", S_IRUGO
, root
, &memblock
.memory
, &memblock_debug_fops
);
1278 debugfs_create_file("reserved", S_IRUGO
, root
, &memblock
.reserved
, &memblock_debug_fops
);
1282 __initcall(memblock_init_debugfs
);
1284 #endif /* CONFIG_DEBUG_FS */