2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock
;
26 int memblock_can_resize
;
27 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1];
28 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
+ 1];
30 #define MEMBLOCK_ERROR (~(phys_addr_t)0)
32 /* inline so we don't get a warning when pr_debug is compiled out */
33 static inline const char *memblock_type_name(struct memblock_type
*type
)
35 if (type
== &memblock
.memory
)
37 else if (type
== &memblock
.reserved
)
44 * Address comparison utilities
47 static phys_addr_t
memblock_align_down(phys_addr_t addr
, phys_addr_t size
)
49 return addr
& ~(size
- 1);
52 static phys_addr_t
memblock_align_up(phys_addr_t addr
, phys_addr_t size
)
54 return (addr
+ (size
- 1)) & ~(size
- 1);
57 static unsigned long memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
58 phys_addr_t base2
, phys_addr_t size2
)
60 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
63 static long memblock_addrs_adjacent(phys_addr_t base1
, phys_addr_t size1
,
64 phys_addr_t base2
, phys_addr_t size2
)
66 if (base2
== base1
+ size1
)
68 else if (base1
== base2
+ size2
)
74 static long memblock_regions_adjacent(struct memblock_type
*type
,
75 unsigned long r1
, unsigned long r2
)
77 phys_addr_t base1
= type
->regions
[r1
].base
;
78 phys_addr_t size1
= type
->regions
[r1
].size
;
79 phys_addr_t base2
= type
->regions
[r2
].base
;
80 phys_addr_t size2
= type
->regions
[r2
].size
;
82 return memblock_addrs_adjacent(base1
, size1
, base2
, size2
);
85 long memblock_overlaps_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
89 for (i
= 0; i
< type
->cnt
; i
++) {
90 phys_addr_t rgnbase
= type
->regions
[i
].base
;
91 phys_addr_t rgnsize
= type
->regions
[i
].size
;
92 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
96 return (i
< type
->cnt
) ? i
: -1;
100 * Find, allocate, deallocate or reserve unreserved regions. All allocations
104 static phys_addr_t __init
memblock_find_region(phys_addr_t start
, phys_addr_t end
,
105 phys_addr_t size
, phys_addr_t align
)
107 phys_addr_t base
, res_base
;
110 base
= memblock_align_down((end
- size
), align
);
111 while (start
<= base
) {
112 j
= memblock_overlaps_region(&memblock
.reserved
, base
, size
);
115 res_base
= memblock
.reserved
.regions
[j
].base
;
118 base
= memblock_align_down(res_base
- size
, align
);
121 return MEMBLOCK_ERROR
;
124 static phys_addr_t __init
memblock_find_base(phys_addr_t size
, phys_addr_t align
,
125 phys_addr_t start
, phys_addr_t end
)
131 size
= memblock_align_up(size
, align
);
133 /* Pump up max_addr */
134 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
135 end
= memblock
.current_limit
;
137 /* We do a top-down search, this tends to limit memory
138 * fragmentation by keeping early boot allocs near the
141 for (i
= memblock
.memory
.cnt
- 1; i
>= 0; i
--) {
142 phys_addr_t memblockbase
= memblock
.memory
.regions
[i
].base
;
143 phys_addr_t memblocksize
= memblock
.memory
.regions
[i
].size
;
144 phys_addr_t bottom
, top
, found
;
146 if (memblocksize
< size
)
148 if ((memblockbase
+ memblocksize
) <= start
)
150 bottom
= max(memblockbase
, start
);
151 top
= min(memblockbase
+ memblocksize
, end
);
154 found
= memblock_find_region(bottom
, top
, size
, align
);
155 if (found
!= MEMBLOCK_ERROR
)
158 return MEMBLOCK_ERROR
;
161 static void memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
165 for (i
= r
; i
< type
->cnt
- 1; i
++) {
166 type
->regions
[i
].base
= type
->regions
[i
+ 1].base
;
167 type
->regions
[i
].size
= type
->regions
[i
+ 1].size
;
172 /* Assumption: base addr of region 1 < base addr of region 2 */
173 static void memblock_coalesce_regions(struct memblock_type
*type
,
174 unsigned long r1
, unsigned long r2
)
176 type
->regions
[r1
].size
+= type
->regions
[r2
].size
;
177 memblock_remove_region(type
, r2
);
180 /* Defined below but needed now */
181 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
);
183 static int memblock_double_array(struct memblock_type
*type
)
185 struct memblock_region
*new_array
, *old_array
;
186 phys_addr_t old_size
, new_size
, addr
;
187 int use_slab
= slab_is_available();
189 /* We don't allow resizing until we know about the reserved regions
190 * of memory that aren't suitable for allocation
192 if (!memblock_can_resize
)
195 /* Calculate new doubled size */
196 old_size
= type
->max
* sizeof(struct memblock_region
);
197 new_size
= old_size
<< 1;
199 /* Try to find some space for it.
201 * WARNING: We assume that either slab_is_available() and we use it or
202 * we use MEMBLOCK for allocations. That means that this is unsafe to use
203 * when bootmem is currently active (unless bootmem itself is implemented
204 * on top of MEMBLOCK which isn't the case yet)
206 * This should however not be an issue for now, as we currently only
207 * call into MEMBLOCK while it's still active, or much later when slab is
208 * active for memory hotplug operations
211 new_array
= kmalloc(new_size
, GFP_KERNEL
);
212 addr
= new_array
== NULL
? MEMBLOCK_ERROR
: __pa(new_array
);
214 addr
= memblock_find_base(new_size
, sizeof(phys_addr_t
), 0, MEMBLOCK_ALLOC_ACCESSIBLE
);
215 if (addr
== MEMBLOCK_ERROR
) {
216 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
217 memblock_type_name(type
), type
->max
, type
->max
* 2);
220 new_array
= __va(addr
);
222 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
223 memblock_type_name(type
), type
->max
* 2, (u64
)addr
, (u64
)addr
+ new_size
- 1);
225 /* Found space, we now need to move the array over before
226 * we add the reserved region since it may be our reserved
227 * array itself that is full.
229 memcpy(new_array
, type
->regions
, old_size
);
230 memset(new_array
+ type
->max
, 0, old_size
);
231 old_array
= type
->regions
;
232 type
->regions
= new_array
;
235 /* If we use SLAB that's it, we are done */
239 /* Add the new reserved region now. Should not fail ! */
240 BUG_ON(memblock_add_region(&memblock
.reserved
, addr
, new_size
) < 0);
242 /* If the array wasn't our static init one, then free it. We only do
243 * that before SLAB is available as later on, we don't know whether
244 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
247 if (old_array
!= memblock_memory_init_regions
&&
248 old_array
!= memblock_reserved_init_regions
)
249 memblock_free(__pa(old_array
), old_size
);
254 extern int __weak
memblock_memory_can_coalesce(phys_addr_t addr1
, phys_addr_t size1
,
255 phys_addr_t addr2
, phys_addr_t size2
)
260 static long memblock_add_region(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
262 unsigned long coalesced
= 0;
265 if ((type
->cnt
== 1) && (type
->regions
[0].size
== 0)) {
266 type
->regions
[0].base
= base
;
267 type
->regions
[0].size
= size
;
271 /* First try and coalesce this MEMBLOCK with another. */
272 for (i
= 0; i
< type
->cnt
; i
++) {
273 phys_addr_t rgnbase
= type
->regions
[i
].base
;
274 phys_addr_t rgnsize
= type
->regions
[i
].size
;
276 if ((rgnbase
== base
) && (rgnsize
== size
))
277 /* Already have this region, so we're done */
280 adjacent
= memblock_addrs_adjacent(base
, size
, rgnbase
, rgnsize
);
281 /* Check if arch allows coalescing */
282 if (adjacent
!= 0 && type
== &memblock
.memory
&&
283 !memblock_memory_can_coalesce(base
, size
, rgnbase
, rgnsize
))
286 type
->regions
[i
].base
-= size
;
287 type
->regions
[i
].size
+= size
;
290 } else if (adjacent
< 0) {
291 type
->regions
[i
].size
+= size
;
297 /* If we plugged a hole, we may want to also coalesce with the
300 if ((i
< type
->cnt
- 1) && memblock_regions_adjacent(type
, i
, i
+1) &&
301 ((type
!= &memblock
.memory
|| memblock_memory_can_coalesce(type
->regions
[i
].base
,
302 type
->regions
[i
].size
,
303 type
->regions
[i
+1].base
,
304 type
->regions
[i
+1].size
)))) {
305 memblock_coalesce_regions(type
, i
, i
+1);
312 /* If we are out of space, we fail. It's too late to resize the array
313 * but then this shouldn't have happened in the first place.
315 if (WARN_ON(type
->cnt
>= type
->max
))
318 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
319 for (i
= type
->cnt
- 1; i
>= 0; i
--) {
320 if (base
< type
->regions
[i
].base
) {
321 type
->regions
[i
+1].base
= type
->regions
[i
].base
;
322 type
->regions
[i
+1].size
= type
->regions
[i
].size
;
324 type
->regions
[i
+1].base
= base
;
325 type
->regions
[i
+1].size
= size
;
330 if (base
< type
->regions
[0].base
) {
331 type
->regions
[0].base
= base
;
332 type
->regions
[0].size
= size
;
336 /* The array is full ? Try to resize it. If that fails, we undo
337 * our allocation and return an error
339 if (type
->cnt
== type
->max
&& memblock_double_array(type
)) {
347 long memblock_add(phys_addr_t base
, phys_addr_t size
)
349 return memblock_add_region(&memblock
.memory
, base
, size
);
353 static long __memblock_remove(struct memblock_type
*type
, phys_addr_t base
, phys_addr_t size
)
355 phys_addr_t rgnbegin
, rgnend
;
356 phys_addr_t end
= base
+ size
;
359 rgnbegin
= rgnend
= 0; /* supress gcc warnings */
361 /* Find the region where (base, size) belongs to */
362 for (i
=0; i
< type
->cnt
; i
++) {
363 rgnbegin
= type
->regions
[i
].base
;
364 rgnend
= rgnbegin
+ type
->regions
[i
].size
;
366 if ((rgnbegin
<= base
) && (end
<= rgnend
))
370 /* Didn't find the region */
374 /* Check to see if we are removing entire region */
375 if ((rgnbegin
== base
) && (rgnend
== end
)) {
376 memblock_remove_region(type
, i
);
380 /* Check to see if region is matching at the front */
381 if (rgnbegin
== base
) {
382 type
->regions
[i
].base
= end
;
383 type
->regions
[i
].size
-= size
;
387 /* Check to see if the region is matching at the end */
389 type
->regions
[i
].size
-= size
;
394 * We need to split the entry - adjust the current one to the
395 * beginging of the hole and add the region after hole.
397 type
->regions
[i
].size
= base
- type
->regions
[i
].base
;
398 return memblock_add_region(type
, end
, rgnend
- end
);
401 long memblock_remove(phys_addr_t base
, phys_addr_t size
)
403 return __memblock_remove(&memblock
.memory
, base
, size
);
406 long __init
memblock_free(phys_addr_t base
, phys_addr_t size
)
408 return __memblock_remove(&memblock
.reserved
, base
, size
);
411 long __init
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
413 struct memblock_type
*_rgn
= &memblock
.reserved
;
417 return memblock_add_region(_rgn
, base
, size
);
420 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
424 /* We align the size to limit fragmentation. Without this, a lot of
425 * small allocs quickly eat up the whole reserve array on sparc
427 size
= memblock_align_up(size
, align
);
429 found
= memblock_find_base(size
, align
, 0, max_addr
);
430 if (found
!= MEMBLOCK_ERROR
&&
431 memblock_add_region(&memblock
.reserved
, found
, size
) >= 0)
437 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
441 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
444 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
445 (unsigned long long) size
, (unsigned long long) max_addr
);
450 phys_addr_t __init
memblock_alloc(phys_addr_t size
, phys_addr_t align
)
452 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
457 * Additional node-local allocators. Search for node memory is bottom up
458 * and walks memblock regions within that node bottom-up as well, but allocation
459 * within an memblock region is top-down. XXX I plan to fix that at some stage
461 * WARNING: Only available after early_node_map[] has been populated,
462 * on some architectures, that is after all the calls to add_active_range()
463 * have been done to populate it.
466 phys_addr_t __weak __init
memblock_nid_range(phys_addr_t start
, phys_addr_t end
, int *nid
)
468 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
470 * This code originates from sparc which really wants use to walk by addresses
471 * and returns the nid. This is not very convenient for early_pfn_map[] users
472 * as the map isn't sorted yet, and it really wants to be walked by nid.
474 * For now, I implement the inefficient method below which walks the early
475 * map multiple times. Eventually we may want to use an ARCH config option
476 * to implement a completely different method for both case.
478 unsigned long start_pfn
, end_pfn
;
481 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
482 get_pfn_range_for_nid(i
, &start_pfn
, &end_pfn
);
483 if (start
< PFN_PHYS(start_pfn
) || start
>= PFN_PHYS(end_pfn
))
486 return min(end
, PFN_PHYS(end_pfn
));
494 static phys_addr_t __init
memblock_alloc_nid_region(struct memblock_region
*mp
,
496 phys_addr_t align
, int nid
)
498 phys_addr_t start
, end
;
501 end
= start
+ mp
->size
;
503 start
= memblock_align_up(start
, align
);
504 while (start
< end
) {
505 phys_addr_t this_end
;
508 this_end
= memblock_nid_range(start
, end
, &this_nid
);
509 if (this_nid
== nid
) {
510 phys_addr_t ret
= memblock_find_region(start
, this_end
, size
, align
);
511 if (ret
!= MEMBLOCK_ERROR
&&
512 memblock_add_region(&memblock
.reserved
, ret
, size
) >= 0)
518 return MEMBLOCK_ERROR
;
521 phys_addr_t __init
memblock_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
523 struct memblock_type
*mem
= &memblock
.memory
;
528 /* We align the size to limit fragmentation. Without this, a lot of
529 * small allocs quickly eat up the whole reserve array on sparc
531 size
= memblock_align_up(size
, align
);
533 /* We do a bottom-up search for a region with the right
534 * nid since that's easier considering how memblock_nid_range()
537 for (i
= 0; i
< mem
->cnt
; i
++) {
538 phys_addr_t ret
= memblock_alloc_nid_region(&mem
->regions
[i
],
540 if (ret
!= MEMBLOCK_ERROR
)
547 phys_addr_t __init
memblock_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
549 phys_addr_t res
= memblock_alloc_nid(size
, align
, nid
);
553 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ANYWHERE
);
558 * Remaining API functions
561 /* You must call memblock_analyze() before this. */
562 phys_addr_t __init
memblock_phys_mem_size(void)
564 return memblock
.memory_size
;
567 phys_addr_t
memblock_end_of_DRAM(void)
569 int idx
= memblock
.memory
.cnt
- 1;
571 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
574 /* You must call memblock_analyze() after this. */
575 void __init
memblock_enforce_memory_limit(phys_addr_t memory_limit
)
579 struct memblock_region
*p
;
584 /* Truncate the memblock regions to satisfy the memory limit. */
585 limit
= memory_limit
;
586 for (i
= 0; i
< memblock
.memory
.cnt
; i
++) {
587 if (limit
> memblock
.memory
.regions
[i
].size
) {
588 limit
-= memblock
.memory
.regions
[i
].size
;
592 memblock
.memory
.regions
[i
].size
= limit
;
593 memblock
.memory
.cnt
= i
+ 1;
597 memory_limit
= memblock_end_of_DRAM();
599 /* And truncate any reserves above the limit also. */
600 for (i
= 0; i
< memblock
.reserved
.cnt
; i
++) {
601 p
= &memblock
.reserved
.regions
[i
];
603 if (p
->base
> memory_limit
)
605 else if ((p
->base
+ p
->size
) > memory_limit
)
606 p
->size
= memory_limit
- p
->base
;
609 memblock_remove_region(&memblock
.reserved
, i
);
615 static int memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
617 unsigned int left
= 0, right
= type
->cnt
;
620 unsigned int mid
= (right
+ left
) / 2;
622 if (addr
< type
->regions
[mid
].base
)
624 else if (addr
>= (type
->regions
[mid
].base
+
625 type
->regions
[mid
].size
))
629 } while (left
< right
);
633 int __init
memblock_is_reserved(phys_addr_t addr
)
635 return memblock_search(&memblock
.reserved
, addr
) != -1;
638 int memblock_is_memory(phys_addr_t addr
)
640 return memblock_search(&memblock
.memory
, addr
) != -1;
643 int memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
645 int idx
= memblock_search(&memblock
.reserved
, base
);
649 return memblock
.reserved
.regions
[idx
].base
<= base
&&
650 (memblock
.reserved
.regions
[idx
].base
+
651 memblock
.reserved
.regions
[idx
].size
) >= (base
+ size
);
654 int memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
656 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
660 void __init
memblock_set_current_limit(phys_addr_t limit
)
662 memblock
.current_limit
= limit
;
665 static void memblock_dump(struct memblock_type
*region
, char *name
)
667 unsigned long long base
, size
;
670 pr_info(" %s.cnt = 0x%lx\n", name
, region
->cnt
);
672 for (i
= 0; i
< region
->cnt
; i
++) {
673 base
= region
->regions
[i
].base
;
674 size
= region
->regions
[i
].size
;
676 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
677 name
, i
, base
, base
+ size
- 1, size
);
681 void memblock_dump_all(void)
686 pr_info("MEMBLOCK configuration:\n");
687 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock
.memory_size
);
689 memblock_dump(&memblock
.memory
, "memory");
690 memblock_dump(&memblock
.reserved
, "reserved");
693 void __init
memblock_analyze(void)
697 /* Check marker in the unused last array entry */
698 WARN_ON(memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
].base
699 != (phys_addr_t
)RED_INACTIVE
);
700 WARN_ON(memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
].base
701 != (phys_addr_t
)RED_INACTIVE
);
703 memblock
.memory_size
= 0;
705 for (i
= 0; i
< memblock
.memory
.cnt
; i
++)
706 memblock
.memory_size
+= memblock
.memory
.regions
[i
].size
;
708 /* We allow resizing from there */
709 memblock_can_resize
= 1;
712 void __init
memblock_init(void)
714 /* Hookup the initial arrays */
715 memblock
.memory
.regions
= memblock_memory_init_regions
;
716 memblock
.memory
.max
= INIT_MEMBLOCK_REGIONS
;
717 memblock
.reserved
.regions
= memblock_reserved_init_regions
;
718 memblock
.reserved
.max
= INIT_MEMBLOCK_REGIONS
;
720 /* Write a marker in the unused last array entry */
721 memblock
.memory
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
722 memblock
.reserved
.regions
[INIT_MEMBLOCK_REGIONS
].base
= (phys_addr_t
)RED_INACTIVE
;
724 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
725 * This simplifies the memblock_add() code below...
727 memblock
.memory
.regions
[0].base
= 0;
728 memblock
.memory
.regions
[0].size
= 0;
729 memblock
.memory
.cnt
= 1;
732 memblock
.reserved
.regions
[0].base
= 0;
733 memblock
.reserved
.regions
[0].size
= 0;
734 memblock
.reserved
.cnt
= 1;
736 memblock
.current_limit
= MEMBLOCK_ALLOC_ANYWHERE
;
739 static int __init
early_memblock(char *p
)
741 if (p
&& strstr(p
, "debug"))
745 early_param("memblock", early_memblock
);
747 #ifdef CONFIG_DEBUG_FS
749 static int memblock_debug_show(struct seq_file
*m
, void *private)
751 struct memblock_type
*type
= m
->private;
752 struct memblock_region
*reg
;
755 for (i
= 0; i
< type
->cnt
; i
++) {
756 reg
= &type
->regions
[i
];
757 seq_printf(m
, "%4d: ", i
);
758 if (sizeof(phys_addr_t
) == 4)
759 seq_printf(m
, "0x%08lx..0x%08lx\n",
760 (unsigned long)reg
->base
,
761 (unsigned long)(reg
->base
+ reg
->size
- 1));
763 seq_printf(m
, "0x%016llx..0x%016llx\n",
764 (unsigned long long)reg
->base
,
765 (unsigned long long)(reg
->base
+ reg
->size
- 1));
771 static int memblock_debug_open(struct inode
*inode
, struct file
*file
)
773 return single_open(file
, memblock_debug_show
, inode
->i_private
);
776 static const struct file_operations memblock_debug_fops
= {
777 .open
= memblock_debug_open
,
780 .release
= single_release
,
783 static int __init
memblock_init_debugfs(void)
785 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
788 debugfs_create_file("memory", S_IRUGO
, root
, &memblock
.memory
, &memblock_debug_fops
);
789 debugfs_create_file("reserved", S_IRUGO
, root
, &memblock
.reserved
, &memblock_debug_fops
);
793 __initcall(memblock_init_debugfs
);
795 #endif /* CONFIG_DEBUG_FS */