2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 #include <asm-generic/sections.h>
28 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
29 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31 static struct memblock_region memblock_physmem_init_regions
[INIT_PHYSMEM_REGIONS
] __initdata_memblock
;
34 struct memblock memblock __initdata_memblock
= {
35 .memory
.regions
= memblock_memory_init_regions
,
36 .memory
.cnt
= 1, /* empty dummy entry */
37 .memory
.max
= INIT_MEMBLOCK_REGIONS
,
39 .reserved
.regions
= memblock_reserved_init_regions
,
40 .reserved
.cnt
= 1, /* empty dummy entry */
41 .reserved
.max
= INIT_MEMBLOCK_REGIONS
,
43 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
44 .physmem
.regions
= memblock_physmem_init_regions
,
45 .physmem
.cnt
= 1, /* empty dummy entry */
46 .physmem
.max
= INIT_PHYSMEM_REGIONS
,
50 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
53 int memblock_debug __initdata_memblock
;
54 #ifdef CONFIG_MOVABLE_NODE
55 bool movable_node_enabled __initdata_memblock
= false;
57 static bool system_has_some_mirror __initdata_memblock
= false;
58 static int memblock_can_resize __initdata_memblock
;
59 static int memblock_memory_in_slab __initdata_memblock
= 0;
60 static int memblock_reserved_in_slab __initdata_memblock
= 0;
62 ulong __init_memblock
choose_memblock_flags(void)
64 return system_has_some_mirror
? MEMBLOCK_MIRROR
: MEMBLOCK_NONE
;
67 /* inline so we don't get a warning when pr_debug is compiled out */
68 static __init_memblock
const char *
69 memblock_type_name(struct memblock_type
*type
)
71 if (type
== &memblock
.memory
)
73 else if (type
== &memblock
.reserved
)
79 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
80 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
82 return *size
= min(*size
, (phys_addr_t
)ULLONG_MAX
- base
);
86 * Address comparison utilities
88 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
89 phys_addr_t base2
, phys_addr_t size2
)
91 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
94 static long __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
95 phys_addr_t base
, phys_addr_t size
)
99 for (i
= 0; i
< type
->cnt
; i
++) {
100 phys_addr_t rgnbase
= type
->regions
[i
].base
;
101 phys_addr_t rgnsize
= type
->regions
[i
].size
;
102 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
106 return (i
< type
->cnt
) ? i
: -1;
110 * __memblock_find_range_bottom_up - find free area utility in bottom-up
111 * @start: start of candidate range
112 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
113 * @size: size of free area to find
114 * @align: alignment of free area to find
115 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
116 * @flags: pick from blocks based on memory attributes
118 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
121 * Found address on success, 0 on failure.
123 static phys_addr_t __init_memblock
124 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
125 phys_addr_t size
, phys_addr_t align
, int nid
,
128 phys_addr_t this_start
, this_end
, cand
;
131 for_each_free_mem_range(i
, nid
, flags
, &this_start
, &this_end
, NULL
) {
132 this_start
= clamp(this_start
, start
, end
);
133 this_end
= clamp(this_end
, start
, end
);
135 cand
= round_up(this_start
, align
);
136 if (cand
< this_end
&& this_end
- cand
>= size
)
144 * __memblock_find_range_top_down - find free area utility, in top-down
145 * @start: start of candidate range
146 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
147 * @size: size of free area to find
148 * @align: alignment of free area to find
149 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
150 * @flags: pick from blocks based on memory attributes
152 * Utility called from memblock_find_in_range_node(), find free area top-down.
155 * Found address on success, 0 on failure.
157 static phys_addr_t __init_memblock
158 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
159 phys_addr_t size
, phys_addr_t align
, int nid
,
162 phys_addr_t this_start
, this_end
, cand
;
165 for_each_free_mem_range_reverse(i
, nid
, flags
, &this_start
, &this_end
,
167 this_start
= clamp(this_start
, start
, end
);
168 this_end
= clamp(this_end
, start
, end
);
173 cand
= round_down(this_end
- size
, align
);
174 if (cand
>= this_start
)
182 * memblock_find_in_range_node - find free area in given range and node
183 * @size: size of free area to find
184 * @align: alignment of free area to find
185 * @start: start of candidate range
186 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
187 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
188 * @flags: pick from blocks based on memory attributes
190 * Find @size free area aligned to @align in the specified range and node.
192 * When allocation direction is bottom-up, the @start should be greater
193 * than the end of the kernel image. Otherwise, it will be trimmed. The
194 * reason is that we want the bottom-up allocation just near the kernel
195 * image so it is highly likely that the allocated memory and the kernel
196 * will reside in the same node.
198 * If bottom-up allocation failed, will try to allocate memory top-down.
201 * Found address on success, 0 on failure.
203 phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t size
,
204 phys_addr_t align
, phys_addr_t start
,
205 phys_addr_t end
, int nid
, ulong flags
)
207 phys_addr_t kernel_end
, ret
;
210 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
211 end
= memblock
.current_limit
;
213 /* avoid allocating the first page */
214 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
215 end
= max(start
, end
);
216 kernel_end
= __pa_symbol(_end
);
219 * try bottom-up allocation only when bottom-up mode
220 * is set and @end is above the kernel image.
222 if (memblock_bottom_up() && end
> kernel_end
) {
223 phys_addr_t bottom_up_start
;
225 /* make sure we will allocate above the kernel */
226 bottom_up_start
= max(start
, kernel_end
);
228 /* ok, try bottom-up allocation first */
229 ret
= __memblock_find_range_bottom_up(bottom_up_start
, end
,
230 size
, align
, nid
, flags
);
235 * we always limit bottom-up allocation above the kernel,
236 * but top-down allocation doesn't have the limit, so
237 * retrying top-down allocation may succeed when bottom-up
240 * bottom-up allocation is expected to be fail very rarely,
241 * so we use WARN_ONCE() here to see the stack trace if
244 WARN_ONCE(1, "memblock: bottom-up allocation failed, "
245 "memory hotunplug may be affected\n");
248 return __memblock_find_range_top_down(start
, end
, size
, align
, nid
,
253 * memblock_find_in_range - find free area in given range
254 * @start: start of candidate range
255 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
256 * @size: size of free area to find
257 * @align: alignment of free area to find
259 * Find @size free area aligned to @align in the specified range.
262 * Found address on success, 0 on failure.
264 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
265 phys_addr_t end
, phys_addr_t size
,
269 ulong flags
= choose_memblock_flags();
272 ret
= memblock_find_in_range_node(size
, align
, start
, end
,
273 NUMA_NO_NODE
, flags
);
275 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
276 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
278 flags
&= ~MEMBLOCK_MIRROR
;
285 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
287 type
->total_size
-= type
->regions
[r
].size
;
288 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
289 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
292 /* Special case for empty arrays */
293 if (type
->cnt
== 0) {
294 WARN_ON(type
->total_size
!= 0);
296 type
->regions
[0].base
= 0;
297 type
->regions
[0].size
= 0;
298 type
->regions
[0].flags
= 0;
299 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
303 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
305 phys_addr_t __init_memblock
get_allocated_memblock_reserved_regions_info(
308 if (memblock
.reserved
.regions
== memblock_reserved_init_regions
)
311 *addr
= __pa(memblock
.reserved
.regions
);
313 return PAGE_ALIGN(sizeof(struct memblock_region
) *
314 memblock
.reserved
.max
);
317 phys_addr_t __init_memblock
get_allocated_memblock_memory_regions_info(
320 if (memblock
.memory
.regions
== memblock_memory_init_regions
)
323 *addr
= __pa(memblock
.memory
.regions
);
325 return PAGE_ALIGN(sizeof(struct memblock_region
) *
326 memblock
.memory
.max
);
332 * memblock_double_array - double the size of the memblock regions array
333 * @type: memblock type of the regions array being doubled
334 * @new_area_start: starting address of memory range to avoid overlap with
335 * @new_area_size: size of memory range to avoid overlap with
337 * Double the size of the @type regions array. If memblock is being used to
338 * allocate memory for a new reserved regions array and there is a previously
339 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
340 * waiting to be reserved, ensure the memory used by the new array does
344 * 0 on success, -1 on failure.
346 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
347 phys_addr_t new_area_start
,
348 phys_addr_t new_area_size
)
350 struct memblock_region
*new_array
, *old_array
;
351 phys_addr_t old_alloc_size
, new_alloc_size
;
352 phys_addr_t old_size
, new_size
, addr
;
353 int use_slab
= slab_is_available();
356 /* We don't allow resizing until we know about the reserved regions
357 * of memory that aren't suitable for allocation
359 if (!memblock_can_resize
)
362 /* Calculate new doubled size */
363 old_size
= type
->max
* sizeof(struct memblock_region
);
364 new_size
= old_size
<< 1;
366 * We need to allocated new one align to PAGE_SIZE,
367 * so we can free them completely later.
369 old_alloc_size
= PAGE_ALIGN(old_size
);
370 new_alloc_size
= PAGE_ALIGN(new_size
);
372 /* Retrieve the slab flag */
373 if (type
== &memblock
.memory
)
374 in_slab
= &memblock_memory_in_slab
;
376 in_slab
= &memblock_reserved_in_slab
;
378 /* Try to find some space for it.
380 * WARNING: We assume that either slab_is_available() and we use it or
381 * we use MEMBLOCK for allocations. That means that this is unsafe to
382 * use when bootmem is currently active (unless bootmem itself is
383 * implemented on top of MEMBLOCK which isn't the case yet)
385 * This should however not be an issue for now, as we currently only
386 * call into MEMBLOCK while it's still active, or much later when slab
387 * is active for memory hotplug operations
390 new_array
= kmalloc(new_size
, GFP_KERNEL
);
391 addr
= new_array
? __pa(new_array
) : 0;
393 /* only exclude range when trying to double reserved.regions */
394 if (type
!= &memblock
.reserved
)
395 new_area_start
= new_area_size
= 0;
397 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
398 memblock
.current_limit
,
399 new_alloc_size
, PAGE_SIZE
);
400 if (!addr
&& new_area_size
)
401 addr
= memblock_find_in_range(0,
402 min(new_area_start
, memblock
.current_limit
),
403 new_alloc_size
, PAGE_SIZE
);
405 new_array
= addr
? __va(addr
) : NULL
;
408 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
409 memblock_type_name(type
), type
->max
, type
->max
* 2);
413 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
414 memblock_type_name(type
), type
->max
* 2, (u64
)addr
,
415 (u64
)addr
+ new_size
- 1);
418 * Found space, we now need to move the array over before we add the
419 * reserved region since it may be our reserved array itself that is
422 memcpy(new_array
, type
->regions
, old_size
);
423 memset(new_array
+ type
->max
, 0, old_size
);
424 old_array
= type
->regions
;
425 type
->regions
= new_array
;
428 /* Free old array. We needn't free it if the array is the static one */
431 else if (old_array
!= memblock_memory_init_regions
&&
432 old_array
!= memblock_reserved_init_regions
)
433 memblock_free(__pa(old_array
), old_alloc_size
);
436 * Reserve the new array if that comes from the memblock. Otherwise, we
440 BUG_ON(memblock_reserve(addr
, new_alloc_size
));
442 /* Update slab flag */
449 * memblock_merge_regions - merge neighboring compatible regions
450 * @type: memblock type to scan
452 * Scan @type and merge neighboring compatible regions.
454 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
)
458 /* cnt never goes below 1 */
459 while (i
< type
->cnt
- 1) {
460 struct memblock_region
*this = &type
->regions
[i
];
461 struct memblock_region
*next
= &type
->regions
[i
+ 1];
463 if (this->base
+ this->size
!= next
->base
||
464 memblock_get_region_node(this) !=
465 memblock_get_region_node(next
) ||
466 this->flags
!= next
->flags
) {
467 BUG_ON(this->base
+ this->size
> next
->base
);
472 this->size
+= next
->size
;
473 /* move forward from next + 1, index of which is i + 2 */
474 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
480 * memblock_insert_region - insert new memblock region
481 * @type: memblock type to insert into
482 * @idx: index for the insertion point
483 * @base: base address of the new region
484 * @size: size of the new region
485 * @nid: node id of the new region
486 * @flags: flags of the new region
488 * Insert new memblock region [@base,@base+@size) into @type at @idx.
489 * @type must already have extra room to accomodate the new region.
491 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
492 int idx
, phys_addr_t base
,
494 int nid
, unsigned long flags
)
496 struct memblock_region
*rgn
= &type
->regions
[idx
];
498 BUG_ON(type
->cnt
>= type
->max
);
499 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
503 memblock_set_region_node(rgn
, nid
);
505 type
->total_size
+= size
;
509 * memblock_add_range - add new memblock region
510 * @type: memblock type to add new region into
511 * @base: base address of the new region
512 * @size: size of the new region
513 * @nid: nid of the new region
514 * @flags: flags of the new region
516 * Add new memblock region [@base,@base+@size) into @type. The new region
517 * is allowed to overlap with existing ones - overlaps don't affect already
518 * existing regions. @type is guaranteed to be minimal (all neighbouring
519 * compatible regions are merged) after the addition.
522 * 0 on success, -errno on failure.
524 int __init_memblock
memblock_add_range(struct memblock_type
*type
,
525 phys_addr_t base
, phys_addr_t size
,
526 int nid
, unsigned long flags
)
529 phys_addr_t obase
= base
;
530 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
536 /* special case for empty array */
537 if (type
->regions
[0].size
== 0) {
538 WARN_ON(type
->cnt
!= 1 || type
->total_size
);
539 type
->regions
[0].base
= base
;
540 type
->regions
[0].size
= size
;
541 type
->regions
[0].flags
= flags
;
542 memblock_set_region_node(&type
->regions
[0], nid
);
543 type
->total_size
= size
;
548 * The following is executed twice. Once with %false @insert and
549 * then with %true. The first counts the number of regions needed
550 * to accomodate the new area. The second actually inserts them.
555 for (i
= 0; i
< type
->cnt
; i
++) {
556 struct memblock_region
*rgn
= &type
->regions
[i
];
557 phys_addr_t rbase
= rgn
->base
;
558 phys_addr_t rend
= rbase
+ rgn
->size
;
565 * @rgn overlaps. If it separates the lower part of new
566 * area, insert that portion.
569 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
570 WARN_ON(nid
!= memblock_get_region_node(rgn
));
574 memblock_insert_region(type
, i
++, base
,
578 /* area below @rend is dealt with, forget about it */
579 base
= min(rend
, end
);
582 /* insert the remaining portion */
586 memblock_insert_region(type
, i
, base
, end
- base
,
591 * If this was the first round, resize array and repeat for actual
592 * insertions; otherwise, merge and return.
595 while (type
->cnt
+ nr_new
> type
->max
)
596 if (memblock_double_array(type
, obase
, size
) < 0)
601 memblock_merge_regions(type
);
606 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
609 return memblock_add_range(&memblock
.memory
, base
, size
, nid
, 0);
612 static int __init_memblock
memblock_add_region(phys_addr_t base
,
617 struct memblock_type
*_rgn
= &memblock
.memory
;
619 memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
620 (unsigned long long)base
,
621 (unsigned long long)base
+ size
- 1,
622 flags
, (void *)_RET_IP_
);
624 return memblock_add_range(_rgn
, base
, size
, nid
, flags
);
627 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
629 return memblock_add_region(base
, size
, MAX_NUMNODES
, 0);
633 * memblock_isolate_range - isolate given range into disjoint memblocks
634 * @type: memblock type to isolate range for
635 * @base: base of range to isolate
636 * @size: size of range to isolate
637 * @start_rgn: out parameter for the start of isolated region
638 * @end_rgn: out parameter for the end of isolated region
640 * Walk @type and ensure that regions don't cross the boundaries defined by
641 * [@base,@base+@size). Crossing regions are split at the boundaries,
642 * which may create at most two more regions. The index of the first
643 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
646 * 0 on success, -errno on failure.
648 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
649 phys_addr_t base
, phys_addr_t size
,
650 int *start_rgn
, int *end_rgn
)
652 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
655 *start_rgn
= *end_rgn
= 0;
660 /* we'll create at most two more regions */
661 while (type
->cnt
+ 2 > type
->max
)
662 if (memblock_double_array(type
, base
, size
) < 0)
665 for (i
= 0; i
< type
->cnt
; i
++) {
666 struct memblock_region
*rgn
= &type
->regions
[i
];
667 phys_addr_t rbase
= rgn
->base
;
668 phys_addr_t rend
= rbase
+ rgn
->size
;
677 * @rgn intersects from below. Split and continue
678 * to process the next region - the new top half.
681 rgn
->size
-= base
- rbase
;
682 type
->total_size
-= base
- rbase
;
683 memblock_insert_region(type
, i
, rbase
, base
- rbase
,
684 memblock_get_region_node(rgn
),
686 } else if (rend
> end
) {
688 * @rgn intersects from above. Split and redo the
689 * current region - the new bottom half.
692 rgn
->size
-= end
- rbase
;
693 type
->total_size
-= end
- rbase
;
694 memblock_insert_region(type
, i
--, rbase
, end
- rbase
,
695 memblock_get_region_node(rgn
),
698 /* @rgn is fully contained, record it */
708 int __init_memblock
memblock_remove_range(struct memblock_type
*type
,
709 phys_addr_t base
, phys_addr_t size
)
711 int start_rgn
, end_rgn
;
714 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
718 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
719 memblock_remove_region(type
, i
);
723 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
725 return memblock_remove_range(&memblock
.memory
, base
, size
);
729 int __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
731 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
732 (unsigned long long)base
,
733 (unsigned long long)base
+ size
- 1,
736 kmemleak_free_part(__va(base
), size
);
737 return memblock_remove_range(&memblock
.reserved
, base
, size
);
740 static int __init_memblock
memblock_reserve_region(phys_addr_t base
,
745 struct memblock_type
*type
= &memblock
.reserved
;
747 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
748 (unsigned long long)base
,
749 (unsigned long long)base
+ size
- 1,
750 flags
, (void *)_RET_IP_
);
752 return memblock_add_range(type
, base
, size
, nid
, flags
);
755 int __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
757 return memblock_reserve_region(base
, size
, MAX_NUMNODES
, 0);
762 * This function isolates region [@base, @base + @size), and sets/clears flag
764 * Return 0 on succees, -errno on failure.
766 static int __init_memblock
memblock_setclr_flag(phys_addr_t base
,
767 phys_addr_t size
, int set
, int flag
)
769 struct memblock_type
*type
= &memblock
.memory
;
770 int i
, ret
, start_rgn
, end_rgn
;
772 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
776 for (i
= start_rgn
; i
< end_rgn
; i
++)
778 memblock_set_region_flags(&type
->regions
[i
], flag
);
780 memblock_clear_region_flags(&type
->regions
[i
], flag
);
782 memblock_merge_regions(type
);
787 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
788 * @base: the base phys addr of the region
789 * @size: the size of the region
791 * Return 0 on succees, -errno on failure.
793 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
795 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_HOTPLUG
);
799 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
800 * @base: the base phys addr of the region
801 * @size: the size of the region
803 * Return 0 on succees, -errno on failure.
805 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
807 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_HOTPLUG
);
811 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
812 * @base: the base phys addr of the region
813 * @size: the size of the region
815 * Return 0 on succees, -errno on failure.
817 int __init_memblock
memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
)
819 system_has_some_mirror
= true;
821 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_MIRROR
);
826 * __next_reserved_mem_region - next function for for_each_reserved_region()
827 * @idx: pointer to u64 loop variable
828 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
829 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
831 * Iterate over all reserved memory regions.
833 void __init_memblock
__next_reserved_mem_region(u64
*idx
,
834 phys_addr_t
*out_start
,
835 phys_addr_t
*out_end
)
837 struct memblock_type
*rsv
= &memblock
.reserved
;
839 if (*idx
>= 0 && *idx
< rsv
->cnt
) {
840 struct memblock_region
*r
= &rsv
->regions
[*idx
];
841 phys_addr_t base
= r
->base
;
842 phys_addr_t size
= r
->size
;
847 *out_end
= base
+ size
- 1;
853 /* signal end of iteration */
858 * __next__mem_range - next function for for_each_free_mem_range() etc.
859 * @idx: pointer to u64 loop variable
860 * @nid: node selector, %NUMA_NO_NODE for all nodes
861 * @flags: pick from blocks based on memory attributes
862 * @type_a: pointer to memblock_type from where the range is taken
863 * @type_b: pointer to memblock_type which excludes memory from being taken
864 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
865 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
866 * @out_nid: ptr to int for nid of the range, can be %NULL
868 * Find the first area from *@idx which matches @nid, fill the out
869 * parameters, and update *@idx for the next iteration. The lower 32bit of
870 * *@idx contains index into type_a and the upper 32bit indexes the
871 * areas before each region in type_b. For example, if type_b regions
872 * look like the following,
874 * 0:[0-16), 1:[32-48), 2:[128-130)
876 * The upper 32bit indexes the following regions.
878 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
880 * As both region arrays are sorted, the function advances the two indices
881 * in lockstep and returns each intersection.
883 void __init_memblock
__next_mem_range(u64
*idx
, int nid
, ulong flags
,
884 struct memblock_type
*type_a
,
885 struct memblock_type
*type_b
,
886 phys_addr_t
*out_start
,
887 phys_addr_t
*out_end
, int *out_nid
)
889 int idx_a
= *idx
& 0xffffffff;
890 int idx_b
= *idx
>> 32;
892 if (WARN_ONCE(nid
== MAX_NUMNODES
,
893 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
896 for (; idx_a
< type_a
->cnt
; idx_a
++) {
897 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
899 phys_addr_t m_start
= m
->base
;
900 phys_addr_t m_end
= m
->base
+ m
->size
;
901 int m_nid
= memblock_get_region_node(m
);
903 /* only memory regions are associated with nodes, check it */
904 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
907 /* skip hotpluggable memory regions if needed */
908 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
911 /* if we want mirror memory skip non-mirror memory regions */
912 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
917 *out_start
= m_start
;
923 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
927 /* scan areas before each reservation */
928 for (; idx_b
< type_b
->cnt
+ 1; idx_b
++) {
929 struct memblock_region
*r
;
933 r
= &type_b
->regions
[idx_b
];
934 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
935 r_end
= idx_b
< type_b
->cnt
?
936 r
->base
: ULLONG_MAX
;
939 * if idx_b advanced past idx_a,
940 * break out to advance idx_a
942 if (r_start
>= m_end
)
944 /* if the two regions intersect, we're done */
945 if (m_start
< r_end
) {
948 max(m_start
, r_start
);
950 *out_end
= min(m_end
, r_end
);
954 * The region which ends first is
955 * advanced for the next iteration.
961 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
967 /* signal end of iteration */
972 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
974 * Finds the next range from type_a which is not marked as unsuitable
977 * @idx: pointer to u64 loop variable
978 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
979 * @flags: pick from blocks based on memory attributes
980 * @type_a: pointer to memblock_type from where the range is taken
981 * @type_b: pointer to memblock_type which excludes memory from being taken
982 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
983 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
984 * @out_nid: ptr to int for nid of the range, can be %NULL
986 * Reverse of __next_mem_range().
988 void __init_memblock
__next_mem_range_rev(u64
*idx
, int nid
, ulong flags
,
989 struct memblock_type
*type_a
,
990 struct memblock_type
*type_b
,
991 phys_addr_t
*out_start
,
992 phys_addr_t
*out_end
, int *out_nid
)
994 int idx_a
= *idx
& 0xffffffff;
995 int idx_b
= *idx
>> 32;
997 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1000 if (*idx
== (u64
)ULLONG_MAX
) {
1001 idx_a
= type_a
->cnt
- 1;
1002 idx_b
= type_b
->cnt
;
1005 for (; idx_a
>= 0; idx_a
--) {
1006 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1008 phys_addr_t m_start
= m
->base
;
1009 phys_addr_t m_end
= m
->base
+ m
->size
;
1010 int m_nid
= memblock_get_region_node(m
);
1012 /* only memory regions are associated with nodes, check it */
1013 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
1016 /* skip hotpluggable memory regions if needed */
1017 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
1020 /* if we want mirror memory skip non-mirror memory regions */
1021 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
1026 *out_start
= m_start
;
1032 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1036 /* scan areas before each reservation */
1037 for (; idx_b
>= 0; idx_b
--) {
1038 struct memblock_region
*r
;
1039 phys_addr_t r_start
;
1042 r
= &type_b
->regions
[idx_b
];
1043 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1044 r_end
= idx_b
< type_b
->cnt
?
1045 r
->base
: ULLONG_MAX
;
1047 * if idx_b advanced past idx_a,
1048 * break out to advance idx_a
1051 if (r_end
<= m_start
)
1053 /* if the two regions intersect, we're done */
1054 if (m_end
> r_start
) {
1056 *out_start
= max(m_start
, r_start
);
1058 *out_end
= min(m_end
, r_end
);
1061 if (m_start
>= r_start
)
1065 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1070 /* signal end of iteration */
1074 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1076 * Common iterator interface used to define for_each_mem_range().
1078 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
1079 unsigned long *out_start_pfn
,
1080 unsigned long *out_end_pfn
, int *out_nid
)
1082 struct memblock_type
*type
= &memblock
.memory
;
1083 struct memblock_region
*r
;
1085 while (++*idx
< type
->cnt
) {
1086 r
= &type
->regions
[*idx
];
1088 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
1090 if (nid
== MAX_NUMNODES
|| nid
== r
->nid
)
1093 if (*idx
>= type
->cnt
) {
1099 *out_start_pfn
= PFN_UP(r
->base
);
1101 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
1107 * memblock_set_node - set node ID on memblock regions
1108 * @base: base of area to set node ID for
1109 * @size: size of area to set node ID for
1110 * @type: memblock type to set node ID for
1111 * @nid: node ID to set
1113 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1114 * Regions which cross the area boundaries are split as necessary.
1117 * 0 on success, -errno on failure.
1119 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
1120 struct memblock_type
*type
, int nid
)
1122 int start_rgn
, end_rgn
;
1125 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1129 for (i
= start_rgn
; i
< end_rgn
; i
++)
1130 memblock_set_region_node(&type
->regions
[i
], nid
);
1132 memblock_merge_regions(type
);
1135 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1137 static phys_addr_t __init
memblock_alloc_range_nid(phys_addr_t size
,
1138 phys_addr_t align
, phys_addr_t start
,
1139 phys_addr_t end
, int nid
, ulong flags
)
1144 align
= SMP_CACHE_BYTES
;
1146 found
= memblock_find_in_range_node(size
, align
, start
, end
, nid
,
1148 if (found
&& !memblock_reserve(found
, size
)) {
1150 * The min_count is set to 0 so that memblock allocations are
1151 * never reported as leaks.
1153 kmemleak_alloc(__va(found
), size
, 0, 0);
1159 phys_addr_t __init
memblock_alloc_range(phys_addr_t size
, phys_addr_t align
,
1160 phys_addr_t start
, phys_addr_t end
,
1163 return memblock_alloc_range_nid(size
, align
, start
, end
, NUMA_NO_NODE
,
1167 static phys_addr_t __init
memblock_alloc_base_nid(phys_addr_t size
,
1168 phys_addr_t align
, phys_addr_t max_addr
,
1169 int nid
, ulong flags
)
1171 return memblock_alloc_range_nid(size
, align
, 0, max_addr
, nid
, flags
);
1174 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1176 ulong flags
= choose_memblock_flags();
1180 ret
= memblock_alloc_base_nid(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
,
1183 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
1184 flags
&= ~MEMBLOCK_MIRROR
;
1190 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
1192 return memblock_alloc_base_nid(size
, align
, max_addr
, NUMA_NO_NODE
,
1196 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
1200 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
1203 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1204 (unsigned long long) size
, (unsigned long long) max_addr
);
1209 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
1211 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1214 phys_addr_t __init
memblock_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1216 phys_addr_t res
= memblock_alloc_nid(size
, align
, nid
);
1220 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1224 * memblock_virt_alloc_internal - allocate boot memory block
1225 * @size: size of memory block to be allocated in bytes
1226 * @align: alignment of the region and block's size
1227 * @min_addr: the lower bound of the memory region to allocate (phys address)
1228 * @max_addr: the upper bound of the memory region to allocate (phys address)
1229 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1231 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1232 * will fall back to memory below @min_addr. Also, allocation may fall back
1233 * to any node in the system if the specified node can not
1234 * hold the requested memory.
1236 * The allocation is performed from memory region limited by
1237 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1239 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1241 * The phys address of allocated boot memory block is converted to virtual and
1242 * allocated memory is reset to 0.
1244 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1245 * allocated boot memory block, so that it is never reported as leaks.
1248 * Virtual address of allocated memory block on success, NULL on failure.
1250 static void * __init
memblock_virt_alloc_internal(
1251 phys_addr_t size
, phys_addr_t align
,
1252 phys_addr_t min_addr
, phys_addr_t max_addr
,
1257 ulong flags
= choose_memblock_flags();
1259 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1263 * Detect any accidental use of these APIs after slab is ready, as at
1264 * this moment memblock may be deinitialized already and its
1265 * internal data may be destroyed (after execution of free_all_bootmem)
1267 if (WARN_ON_ONCE(slab_is_available()))
1268 return kzalloc_node(size
, GFP_NOWAIT
, nid
);
1271 align
= SMP_CACHE_BYTES
;
1273 if (max_addr
> memblock
.current_limit
)
1274 max_addr
= memblock
.current_limit
;
1277 alloc
= memblock_find_in_range_node(size
, align
, min_addr
, max_addr
,
1282 if (nid
!= NUMA_NO_NODE
) {
1283 alloc
= memblock_find_in_range_node(size
, align
, min_addr
,
1284 max_addr
, NUMA_NO_NODE
,
1295 if (flags
& MEMBLOCK_MIRROR
) {
1296 flags
&= ~MEMBLOCK_MIRROR
;
1297 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1304 memblock_reserve(alloc
, size
);
1305 ptr
= phys_to_virt(alloc
);
1306 memset(ptr
, 0, size
);
1309 * The min_count is set to 0 so that bootmem allocated blocks
1310 * are never reported as leaks. This is because many of these blocks
1311 * are only referred via the physical address which is not
1312 * looked up by kmemleak.
1314 kmemleak_alloc(ptr
, size
, 0, 0);
1320 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1321 * @size: size of memory block to be allocated in bytes
1322 * @align: alignment of the region and block's size
1323 * @min_addr: the lower bound of the memory region from where the allocation
1324 * is preferred (phys address)
1325 * @max_addr: the upper bound of the memory region from where the allocation
1326 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1327 * allocate only from memory limited by memblock.current_limit value
1328 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1330 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1331 * additional debug information (including caller info), if enabled.
1334 * Virtual address of allocated memory block on success, NULL on failure.
1336 void * __init
memblock_virt_alloc_try_nid_nopanic(
1337 phys_addr_t size
, phys_addr_t align
,
1338 phys_addr_t min_addr
, phys_addr_t max_addr
,
1341 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1342 __func__
, (u64
)size
, (u64
)align
, nid
, (u64
)min_addr
,
1343 (u64
)max_addr
, (void *)_RET_IP_
);
1344 return memblock_virt_alloc_internal(size
, align
, min_addr
,
1349 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1350 * @size: size of memory block to be allocated in bytes
1351 * @align: alignment of the region and block's size
1352 * @min_addr: the lower bound of the memory region from where the allocation
1353 * is preferred (phys address)
1354 * @max_addr: the upper bound of the memory region from where the allocation
1355 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1356 * allocate only from memory limited by memblock.current_limit value
1357 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1359 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1360 * which provides debug information (including caller info), if enabled,
1361 * and panics if the request can not be satisfied.
1364 * Virtual address of allocated memory block on success, NULL on failure.
1366 void * __init
memblock_virt_alloc_try_nid(
1367 phys_addr_t size
, phys_addr_t align
,
1368 phys_addr_t min_addr
, phys_addr_t max_addr
,
1373 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1374 __func__
, (u64
)size
, (u64
)align
, nid
, (u64
)min_addr
,
1375 (u64
)max_addr
, (void *)_RET_IP_
);
1376 ptr
= memblock_virt_alloc_internal(size
, align
,
1377 min_addr
, max_addr
, nid
);
1381 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1382 __func__
, (u64
)size
, (u64
)align
, nid
, (u64
)min_addr
,
1388 * __memblock_free_early - free boot memory block
1389 * @base: phys starting address of the boot memory block
1390 * @size: size of the boot memory block in bytes
1392 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1393 * The freeing memory will not be released to the buddy allocator.
1395 void __init
__memblock_free_early(phys_addr_t base
, phys_addr_t size
)
1397 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1398 __func__
, (u64
)base
, (u64
)base
+ size
- 1,
1400 kmemleak_free_part(__va(base
), size
);
1401 memblock_remove_range(&memblock
.reserved
, base
, size
);
1405 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1406 * @addr: phys starting address of the boot memory block
1407 * @size: size of the boot memory block in bytes
1409 * This is only useful when the bootmem allocator has already been torn
1410 * down, but we are still initializing the system. Pages are released directly
1411 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1413 void __init
__memblock_free_late(phys_addr_t base
, phys_addr_t size
)
1417 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1418 __func__
, (u64
)base
, (u64
)base
+ size
- 1,
1420 kmemleak_free_part(__va(base
), size
);
1421 cursor
= PFN_UP(base
);
1422 end
= PFN_DOWN(base
+ size
);
1424 for (; cursor
< end
; cursor
++) {
1425 __free_pages_bootmem(pfn_to_page(cursor
), cursor
, 0);
1431 * Remaining API functions
1434 phys_addr_t __init
memblock_phys_mem_size(void)
1436 return memblock
.memory
.total_size
;
1439 phys_addr_t __init
memblock_mem_size(unsigned long limit_pfn
)
1441 unsigned long pages
= 0;
1442 struct memblock_region
*r
;
1443 unsigned long start_pfn
, end_pfn
;
1445 for_each_memblock(memory
, r
) {
1446 start_pfn
= memblock_region_memory_base_pfn(r
);
1447 end_pfn
= memblock_region_memory_end_pfn(r
);
1448 start_pfn
= min_t(unsigned long, start_pfn
, limit_pfn
);
1449 end_pfn
= min_t(unsigned long, end_pfn
, limit_pfn
);
1450 pages
+= end_pfn
- start_pfn
;
1453 return PFN_PHYS(pages
);
1456 /* lowest address */
1457 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1459 return memblock
.memory
.regions
[0].base
;
1462 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1464 int idx
= memblock
.memory
.cnt
- 1;
1466 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1469 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1471 phys_addr_t max_addr
= (phys_addr_t
)ULLONG_MAX
;
1472 struct memblock_region
*r
;
1477 /* find out max address */
1478 for_each_memblock(memory
, r
) {
1479 if (limit
<= r
->size
) {
1480 max_addr
= r
->base
+ limit
;
1486 /* truncate both memory and reserved regions */
1487 memblock_remove_range(&memblock
.memory
, max_addr
,
1488 (phys_addr_t
)ULLONG_MAX
);
1489 memblock_remove_range(&memblock
.reserved
, max_addr
,
1490 (phys_addr_t
)ULLONG_MAX
);
1493 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1495 unsigned int left
= 0, right
= type
->cnt
;
1498 unsigned int mid
= (right
+ left
) / 2;
1500 if (addr
< type
->regions
[mid
].base
)
1502 else if (addr
>= (type
->regions
[mid
].base
+
1503 type
->regions
[mid
].size
))
1507 } while (left
< right
);
1511 int __init
memblock_is_reserved(phys_addr_t addr
)
1513 return memblock_search(&memblock
.reserved
, addr
) != -1;
1516 int __init_memblock
memblock_is_memory(phys_addr_t addr
)
1518 return memblock_search(&memblock
.memory
, addr
) != -1;
1521 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1522 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
1523 unsigned long *start_pfn
, unsigned long *end_pfn
)
1525 struct memblock_type
*type
= &memblock
.memory
;
1526 int mid
= memblock_search(type
, PFN_PHYS(pfn
));
1531 *start_pfn
= PFN_DOWN(type
->regions
[mid
].base
);
1532 *end_pfn
= PFN_DOWN(type
->regions
[mid
].base
+ type
->regions
[mid
].size
);
1534 return type
->regions
[mid
].nid
;
1539 * memblock_is_region_memory - check if a region is a subset of memory
1540 * @base: base of region to check
1541 * @size: size of region to check
1543 * Check if the region [@base, @base+@size) is a subset of a memory block.
1546 * 0 if false, non-zero if true
1548 int __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
1550 int idx
= memblock_search(&memblock
.memory
, base
);
1551 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
1555 return memblock
.memory
.regions
[idx
].base
<= base
&&
1556 (memblock
.memory
.regions
[idx
].base
+
1557 memblock
.memory
.regions
[idx
].size
) >= end
;
1561 * memblock_is_region_reserved - check if a region intersects reserved memory
1562 * @base: base of region to check
1563 * @size: size of region to check
1565 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1568 * 0 if false, non-zero if true
1570 int __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
1572 memblock_cap_size(base
, &size
);
1573 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
1576 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
1578 phys_addr_t start
, end
, orig_start
, orig_end
;
1579 struct memblock_region
*r
;
1581 for_each_memblock(memory
, r
) {
1582 orig_start
= r
->base
;
1583 orig_end
= r
->base
+ r
->size
;
1584 start
= round_up(orig_start
, align
);
1585 end
= round_down(orig_end
, align
);
1587 if (start
== orig_start
&& end
== orig_end
)
1592 r
->size
= end
- start
;
1594 memblock_remove_region(&memblock
.memory
,
1595 r
- memblock
.memory
.regions
);
1601 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
1603 memblock
.current_limit
= limit
;
1606 phys_addr_t __init_memblock
memblock_get_current_limit(void)
1608 return memblock
.current_limit
;
1611 static void __init_memblock
memblock_dump(struct memblock_type
*type
, char *name
)
1613 unsigned long long base
, size
;
1614 unsigned long flags
;
1617 pr_info(" %s.cnt = 0x%lx\n", name
, type
->cnt
);
1619 for (i
= 0; i
< type
->cnt
; i
++) {
1620 struct memblock_region
*rgn
= &type
->regions
[i
];
1621 char nid_buf
[32] = "";
1626 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1627 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
1628 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
1629 memblock_get_region_node(rgn
));
1631 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1632 name
, i
, base
, base
+ size
- 1, size
, nid_buf
, flags
);
1636 void __init_memblock
__memblock_dump_all(void)
1638 pr_info("MEMBLOCK configuration:\n");
1639 pr_info(" memory size = %#llx reserved size = %#llx\n",
1640 (unsigned long long)memblock
.memory
.total_size
,
1641 (unsigned long long)memblock
.reserved
.total_size
);
1643 memblock_dump(&memblock
.memory
, "memory");
1644 memblock_dump(&memblock
.reserved
, "reserved");
1647 void __init
memblock_allow_resize(void)
1649 memblock_can_resize
= 1;
1652 static int __init
early_memblock(char *p
)
1654 if (p
&& strstr(p
, "debug"))
1658 early_param("memblock", early_memblock
);
1660 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1662 static int memblock_debug_show(struct seq_file
*m
, void *private)
1664 struct memblock_type
*type
= m
->private;
1665 struct memblock_region
*reg
;
1668 for (i
= 0; i
< type
->cnt
; i
++) {
1669 reg
= &type
->regions
[i
];
1670 seq_printf(m
, "%4d: ", i
);
1671 if (sizeof(phys_addr_t
) == 4)
1672 seq_printf(m
, "0x%08lx..0x%08lx\n",
1673 (unsigned long)reg
->base
,
1674 (unsigned long)(reg
->base
+ reg
->size
- 1));
1676 seq_printf(m
, "0x%016llx..0x%016llx\n",
1677 (unsigned long long)reg
->base
,
1678 (unsigned long long)(reg
->base
+ reg
->size
- 1));
1684 static int memblock_debug_open(struct inode
*inode
, struct file
*file
)
1686 return single_open(file
, memblock_debug_show
, inode
->i_private
);
1689 static const struct file_operations memblock_debug_fops
= {
1690 .open
= memblock_debug_open
,
1692 .llseek
= seq_lseek
,
1693 .release
= single_release
,
1696 static int __init
memblock_init_debugfs(void)
1698 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
1701 debugfs_create_file("memory", S_IRUGO
, root
, &memblock
.memory
, &memblock_debug_fops
);
1702 debugfs_create_file("reserved", S_IRUGO
, root
, &memblock
.reserved
, &memblock_debug_fops
);
1703 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1704 debugfs_create_file("physmem", S_IRUGO
, root
, &memblock
.physmem
, &memblock_debug_fops
);
1709 __initcall(memblock_init_debugfs
);
1711 #endif /* CONFIG_DEBUG_FS */