2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock __initdata_memblock
;
25 int memblock_debug __initdata_memblock
;
26 int memblock_can_resize __initdata_memblock
;
27 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1] __initdata_memblock
;
28 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1] __initdata_memblock
;
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type
*type
)
33 if (type
== &memblock
.memory
)
35 else if (type
== &memblock
.reserved
)
42 * Address comparison utilities
44 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
45 phys_addr_t base2
, phys_addr_t size2
)
47 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
50 long __init_memblock
memblock_overlaps_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
54 for (i
= 0; i
< type
->cnt
; i
++) {
55 phys_addr_t rgnbase
= type
->regions
[i
].base
;
56 phys_addr_t rgnsize
= type
->regions
[i
].size
;
57 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
61 return (i
< type
->cnt
) ? i
: -1;
65 * Find, allocate, deallocate or reserve unreserved regions. All allocations
69 static phys_addr_t __init_memblock
memblock_find_region(phys_addr_t start
, phys_addr_t end
,
70 phys_addr_t size
, phys_addr_t align
)
72 phys_addr_t base
, res_base
;
75 /* In case, huge size is requested */
79 base
= round_down(end
- size
, align
);
81 /* Prevent allocations returning 0 as it's also used to
82 * indicate an allocation failure
87 while (start
<= base
) {
88 j
= memblock_overlaps_region(&memblock
.reserved
, base
, size
);
91 res_base
= memblock
.reserved
.regions
[j
].base
;
94 base
= round_down(res_base
- size
, align
);
101 * Find a free area with specified alignment in a specific range.
103 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
, phys_addr_t end
,
104 phys_addr_t size
, phys_addr_t align
)
110 /* Pump up max_addr */
111 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
112 end
= memblock
.current_limit
;
114 /* We do a top-down search, this tends to limit memory
115 * fragmentation by keeping early boot allocs near the
118 for (i
= memblock
.memory
.cnt
- 1; i
>= 0; i
--) {
119 phys_addr_t memblockbase
= memblock
.memory
.regions
[i
].base
;
120 phys_addr_t memblocksize
= memblock
.memory
.regions
[i
].size
;
121 phys_addr_t bottom
, top
, found
;
123 if (memblocksize
< size
)
125 if ((memblockbase
+ memblocksize
) <= start
)
127 bottom
= max(memblockbase
, start
);
128 top
= min(memblockbase
+ memblocksize
, end
);
131 found
= memblock_find_region(bottom
, top
, size
, align
);
139 * Free memblock.reserved.regions
141 int __init_memblock
memblock_free_reserved_regions(void)
143 if (memblock
.reserved
.regions
== memblock_reserved_init_regions
)
146 return memblock_free(__pa(memblock
.reserved
.regions
),
147 sizeof(struct memblock_region
) * memblock
.reserved
.max
);
151 * Reserve memblock.reserved.regions
153 int __init_memblock
memblock_reserve_reserved_regions(void)
155 if (memblock
.reserved
.regions
== memblock_reserved_init_regions
)
158 return memblock_reserve(__pa(memblock
.reserved
.regions
),
159 sizeof(struct memblock_region
) * memblock
.reserved
.max
);
162 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
166 for (i
= r
; i
< type
->cnt
- 1; i
++) {
167 type
->regions
[i
].base
= type
->regions
[i
+ 1].base
;
168 type
->regions
[i
].size
= type
->regions
[i
+ 1].size
;
172 /* Special case for empty arrays */
173 if (type
->cnt
== 0) {
175 type
->regions
[0].base
= 0;
176 type
->regions
[0].size
= 0;
180 /* Defined below but needed now */
181 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
);
183 static int __init_memblock
memblock_double_array(struct memblock_type
*type
)
185 struct memblock_region
*new_array
, *old_array
;
186 phys_addr_t old_size
, new_size
, addr
;
187 int use_slab
= slab_is_available();
189 /* We don't allow resizing until we know about the reserved regions
190 * of memory that aren't suitable for allocation
192 if (!memblock_can_resize
)
195 /* Calculate new doubled size */
196 old_size
= type
->max
* sizeof(struct memblock_region
);
197 new_size
= old_size
<< 1;
199 /* Try to find some space for it.
201 * WARNING: We assume that either slab_is_available() and we use it or
202 * we use MEMBLOCK for allocations. That means that this is unsafe to use
203 * when bootmem is currently active (unless bootmem itself is implemented
204 * on top of MEMBLOCK which isn't the case yet)
206 * This should however not be an issue for now, as we currently only
207 * call into MEMBLOCK while it's still active, or much later when slab is
208 * active for memory hotplug operations
211 new_array
= kmalloc(new_size
, GFP_KERNEL
);
212 addr
= new_array
? __pa(new_array
) : 0;
214 addr
= memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE
, new_size
, sizeof(phys_addr_t
));
216 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
217 memblock_type_name(type
), type
->max
, type
->max
* 2);
220 new_array
= __va(addr
);
222 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
223 memblock_type_name(type
), type
->max
* 2, (u64
)addr
, (u64
)addr
+ new_size
- 1);
225 /* Found space, we now need to move the array over before
226 * we add the reserved region since it may be our reserved
227 * array itself that is full.
229 memcpy(new_array
, type
->regions
, old_size
);
230 memset(new_array
+ type
->max
, 0, old_size
);
231 old_array
= type
->regions
;
232 type
->regions
= new_array
;
235 /* If we use SLAB that's it, we are done */
239 /* Add the new reserved region now. Should not fail ! */
240 BUG_ON(memblock_add_region(&memblock
.reserved
, addr
, new_size
));
242 /* If the array wasn't our static init one, then free it. We only do
243 * that before SLAB is available as later on, we don't know whether
244 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
247 if (old_array
!= memblock_memory_init_regions
&&
248 old_array
!= memblock_reserved_init_regions
)
249 memblock_free(__pa(old_array
), old_size
);
254 extern int __init_memblock __weak
memblock_memory_can_coalesce(phys_addr_t addr1
, phys_addr_t size1
,
255 phys_addr_t addr2
, phys_addr_t size2
)
260 static long __init_memblock
memblock_add_region(struct memblock_type
*type
,
261 phys_addr_t base
, phys_addr_t size
)
263 phys_addr_t end
= base
+ size
;
266 /* First try and coalesce this MEMBLOCK with others */
267 for (i
= 0; i
< type
->cnt
; i
++) {
268 struct memblock_region
*rgn
= &type
->regions
[i
];
269 phys_addr_t rend
= rgn
->base
+ rgn
->size
;
271 /* Exit if there's no possible hits */
272 if (rgn
->base
> end
|| rgn
->size
== 0)
275 /* Check if we are fully enclosed within an existing
278 if (rgn
->base
<= base
&& rend
>= end
)
281 /* Check if we overlap or are adjacent with the bottom
284 if (base
< rgn
->base
&& end
>= rgn
->base
) {
285 /* If we can't coalesce, create a new block */
286 if (!memblock_memory_can_coalesce(base
, size
,
289 /* Overlap & can't coalesce are mutually
290 * exclusive, if you do that, be prepared
293 WARN_ON(end
!= rgn
->base
);
296 /* We extend the bottom of the block down to our
300 rgn
->size
= rend
- base
;
302 /* Return if we have nothing else to allocate
308 /* We continue processing from the end of the
315 /* Now check if we overlap or are adjacent with the
318 if (base
<= rend
&& end
>= rend
) {
319 /* If we can't coalesce, create a new block */
320 if (!memblock_memory_can_coalesce(rgn
->base
,
323 /* Overlap & can't coalesce are mutually
324 * exclusive, if you do that, be prepared
327 WARN_ON(rend
!= base
);
330 /* We adjust our base down to enclose the
331 * original block and destroy it. It will be
332 * part of our new allocation. Since we've
333 * freed an entry, we know we won't fail
334 * to allocate one later, so we won't risk
335 * losing the original block allocation.
337 size
+= (base
- rgn
->base
);
339 memblock_remove_region(type
, i
--);
343 /* If the array is empty, special case, replace the fake
344 * filler region and return
346 if ((type
->cnt
== 1) && (type
->regions
[0].size
== 0)) {
347 type
->regions
[0].base
= base
;
348 type
->regions
[0].size
= size
;
353 /* If we are out of space, we fail. It's too late to resize the array
354 * but then this shouldn't have happened in the first place.
356 if (WARN_ON(type
->cnt
>= type
->max
))
359 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
360 for (i
= type
->cnt
- 1; i
>= 0; i
--) {
361 if (base
< type
->regions
[i
].base
) {
362 type
->regions
[i
+1].base
= type
->regions
[i
].base
;
363 type
->regions
[i
+1].size
= type
->regions
[i
].size
;
365 type
->regions
[i
+1].base
= base
;
366 type
->regions
[i
+1].size
= size
;
371 if (base
< type
->regions
[0].base
) {
372 type
->regions
[0].base
= base
;
373 type
->regions
[0].size
= size
;
378 /* The array is full ? Try to resize it. If that fails, we undo
379 * our allocation and return an error
381 if (type
->cnt
== type
->max
&& memblock_double_array(type
)) {
383 memblock_remove_region(type
, slot
);
390 long __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
392 return memblock_add_region(&memblock
.memory
, base
, size
);
396 static long __init_memblock
__memblock_remove(struct memblock_type
*type
,
397 phys_addr_t base
, phys_addr_t size
)
399 phys_addr_t end
= base
+ size
;
402 /* Walk through the array for collisions */
403 for (i
= 0; i
< type
->cnt
; i
++) {
404 struct memblock_region
*rgn
= &type
->regions
[i
];
405 phys_addr_t rend
= rgn
->base
+ rgn
->size
;
407 /* Nothing more to do, exit */
408 if (rgn
->base
> end
|| rgn
->size
== 0)
411 /* If we fully enclose the block, drop it */
412 if (base
<= rgn
->base
&& end
>= rend
) {
413 memblock_remove_region(type
, i
--);
417 /* If we are fully enclosed within a block
418 * then we need to split it and we are done
420 if (base
> rgn
->base
&& end
< rend
) {
421 rgn
->size
= base
- rgn
->base
;
422 if (!memblock_add_region(type
, end
, rend
- end
))
424 /* Failure to split is bad, we at least
425 * restore the block before erroring
427 rgn
->size
= rend
- rgn
->base
;
432 /* Check if we need to trim the bottom of a block */
433 if (rgn
->base
< end
&& rend
> end
) {
434 rgn
->size
-= end
- rgn
->base
;
439 /* And check if we need to trim the top of a block */
441 rgn
->size
-= rend
- base
;
447 long __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
449 return __memblock_remove(&memblock
.memory
, base
, size
);
452 long __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
454 return __memblock_remove(&memblock
.reserved
, base
, size
);
457 long __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
459 struct memblock_type
*_rgn
= &memblock
.reserved
;
463 return memblock_add_region(_rgn
, base
, size
);
466 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
470 /* We align the size to limit fragmentation. Without this, a lot of
471 * small allocs quickly eat up the whole reserve array on sparc
473 size
= round_up(size
, align
);
475 found
= memblock_find_in_range(0, max_addr
, size
, align
);
476 if (found
&& !memblock_add_region(&memblock
.reserved
, found
, size
))
482 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
486 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
489 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
490 (unsigned long long) size
, (unsigned long long) max_addr
);
495 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
497 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
502 * Additional node-local allocators. Search for node memory is bottom up
503 * and walks memblock regions within that node bottom-up as well, but allocation
504 * within an memblock region is top-down. XXX I plan to fix that at some stage
506 * WARNING: Only available after early_node_map[] has been populated,
507 * on some architectures, that is after all the calls to add_active_range()
508 * have been done to populate it.
511 static phys_addr_t __init
memblock_nid_range(phys_addr_t start
, phys_addr_t end
, int *nid
)
513 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
514 unsigned long start_pfn
, end_pfn
;
517 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, nid
)
518 if (start
>= PFN_PHYS(start_pfn
) && start
< PFN_PHYS(end_pfn
))
519 return min(end
, PFN_PHYS(end_pfn
));
525 static phys_addr_t __init
memblock_alloc_nid_region(struct memblock_region
*mp
,
527 phys_addr_t align
, int nid
)
529 phys_addr_t start
, end
;
532 end
= start
+ mp
->size
;
534 start
= round_up(start
, align
);
535 while (start
< end
) {
536 phys_addr_t this_end
;
539 this_end
= memblock_nid_range(start
, end
, &this_nid
);
540 if (this_nid
== nid
) {
541 phys_addr_t ret
= memblock_find_region(start
, this_end
, size
, align
);
543 !memblock_add_region(&memblock
.reserved
, ret
, size
))
552 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
554 struct memblock_type
*mem
= &memblock
.memory
;
559 /* We align the size to limit fragmentation. Without this, a lot of
560 * small allocs quickly eat up the whole reserve array on sparc
562 size
= round_up(size
, align
);
564 /* We do a bottom-up search for a region with the right
565 * nid since that's easier considering how memblock_nid_range()
568 for (i
= 0; i
< mem
->cnt
; i
++) {
569 phys_addr_t ret
= memblock_alloc_nid_region(&mem
->regions
[i
],
578 phys_addr_t __init
memblock_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
580 phys_addr_t res
= memblock_alloc_nid(size
, align
, nid
);
584 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
589 * Remaining API functions
592 /* You must call memblock_analyze() before this. */
593 phys_addr_t __init
memblock_phys_mem_size(void)
595 return memblock
.memory_size
;
598 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
600 int idx
= memblock
.memory
.cnt
- 1;
602 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
605 /* You must call memblock_analyze() after this. */
606 void __init
memblock_enforce_memory_limit(phys_addr_t memory_limit
)
610 struct memblock_region
*p
;
615 /* Truncate the memblock regions to satisfy the memory limit. */
616 limit
= memory_limit
;
617 for (i
= 0; i
< memblock
.memory
.cnt
; i
++) {
618 if (limit
> memblock
.memory
.regions
[i
].size
) {
619 limit
-= memblock
.memory
.regions
[i
].size
;
623 memblock
.memory
.regions
[i
].size
= limit
;
624 memblock
.memory
.cnt
= i
+ 1;
628 memory_limit
= memblock_end_of_DRAM();
630 /* And truncate any reserves above the limit also. */
631 for (i
= 0; i
< memblock
.reserved
.cnt
; i
++) {
632 p
= &memblock
.reserved
.regions
[i
];
634 if (p
->base
> memory_limit
)
636 else if ((p
->base
+ p
->size
) > memory_limit
)
637 p
->size
= memory_limit
- p
->base
;
640 memblock_remove_region(&memblock
.reserved
, i
);
646 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
648 unsigned int left
= 0, right
= type
->cnt
;
651 unsigned int mid
= (right
+ left
) / 2;
653 if (addr
< type
->regions
[mid
].base
)
655 else if (addr
>= (type
->regions
[mid
].base
+
656 type
->regions
[mid
].size
))
660 } while (left
< right
);
664 int __init
memblock_is_reserved(phys_addr_t addr
)
666 return memblock_search(&memblock
.reserved
, addr
) != -1;
669 int __init_memblock
memblock_is_memory(phys_addr_t addr
)
671 return memblock_search(&memblock
.memory
, addr
) != -1;
674 int __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
676 int idx
= memblock_search(&memblock
.memory
, base
);
680 return memblock
.memory
.regions
[idx
].base
<= base
&&
681 (memblock
.memory
.regions
[idx
].base
+
682 memblock
.memory
.regions
[idx
].size
) >= (base
+ size
);
685 int __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
687 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
691 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
693 memblock
.current_limit
= limit
;
696 static void __init_memblock
memblock_dump(struct memblock_type
*region
, char *name
)
698 unsigned long long base
, size
;
701 pr_info(" %s.cnt = 0x%lx\n", name
, region
->cnt
);
703 for (i
= 0; i
< region
->cnt
; i
++) {
704 base
= region
->regions
[i
].base
;
705 size
= region
->regions
[i
].size
;
707 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
708 name
, i
, base
, base
+ size
- 1, size
);
712 void __init_memblock
memblock_dump_all(void)
717 pr_info("MEMBLOCK configuration:\n");
718 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock
.memory_size
);
720 memblock_dump(&memblock
.memory
, "memory");
721 memblock_dump(&memblock
.reserved
, "reserved");
724 void __init
memblock_analyze(void)
728 /* Check marker in the unused last array entry */
729 WARN_ON(memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
].base
730 != (phys_addr_t
)RED_INACTIVE
);
731 WARN_ON(memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
].base
732 != (phys_addr_t
)RED_INACTIVE
);
734 memblock
.memory_size
= 0;
736 for (i
= 0; i
< memblock
.memory
.cnt
; i
++)
737 memblock
.memory_size
+= memblock
.memory
.regions
[i
].size
;
739 /* We allow resizing from there */
740 memblock_can_resize
= 1;
743 void __init
memblock_init(void)
745 static int init_done __initdata
= 0;
751 /* Hookup the initial arrays */
752 memblock
.memory
.regions
= memblock_memory_init_regions
;
753 memblock
.memory
.max
= INIT_MEMBLOCK_REGIONS
;
754 memblock
.reserved
.regions
= memblock_reserved_init_regions
;
755 memblock
.reserved
.max
= INIT_MEMBLOCK_REGIONS
;
757 /* Write a marker in the unused last array entry */
758 memblock
.memory
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
759 memblock
.reserved
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
761 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
762 * This simplifies the memblock_add() code below...
764 memblock
.memory
.regions
[0].base
= 0;
765 memblock
.memory
.regions
[0].size
= 0;
766 memblock
.memory
.cnt
= 1;
769 memblock
.reserved
.regions
[0].base
= 0;
770 memblock
.reserved
.regions
[0].size
= 0;
771 memblock
.reserved
.cnt
= 1;
773 memblock
.current_limit
= MEMBLOCK_ALLOC_ANYWHERE
;
776 static int __init
early_memblock(char *p
)
778 if (p
&& strstr(p
, "debug"))
782 early_param("memblock", early_memblock
);
784 #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK)
786 static int memblock_debug_show(struct seq_file
*m
, void *private)
788 struct memblock_type
*type
= m
->private;
789 struct memblock_region
*reg
;
792 for (i
= 0; i
< type
->cnt
; i
++) {
793 reg
= &type
->regions
[i
];
794 seq_printf(m
, "%4d: ", i
);
795 if (sizeof(phys_addr_t
) == 4)
796 seq_printf(m
, "0x%08lx..0x%08lx\n",
797 (unsigned long)reg
->base
,
798 (unsigned long)(reg
->base
+ reg
->size
- 1));
800 seq_printf(m
, "0x%016llx..0x%016llx\n",
801 (unsigned long long)reg
->base
,
802 (unsigned long long)(reg
->base
+ reg
->size
- 1));
808 static int memblock_debug_open(struct inode
*inode
, struct file
*file
)
810 return single_open(file
, memblock_debug_show
, inode
->i_private
);
813 static const struct file_operations memblock_debug_fops
= {
814 .open
= memblock_debug_open
,
817 .release
= single_release
,
820 static int __init
memblock_init_debugfs(void)
822 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
825 debugfs_create_file("memory", S_IRUGO
, root
, &memblock
.memory
, &memblock_debug_fops
);
826 debugfs_create_file("reserved", S_IRUGO
, root
, &memblock
.reserved
, &memblock_debug_fops
);
830 __initcall(memblock_init_debugfs
);
832 #endif /* CONFIG_DEBUG_FS */