2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/memblock.h>
18 struct memblock memblock
;
20 static int memblock_debug
;
22 static int __init
early_memblock(char *p
)
24 if (p
&& strstr(p
, "debug"))
28 early_param("memblock", early_memblock
);
30 static void memblock_dump(struct memblock_type
*region
, char *name
)
32 unsigned long long base
, size
;
35 pr_info(" %s.cnt = 0x%lx\n", name
, region
->cnt
);
37 for (i
= 0; i
< region
->cnt
; i
++) {
38 base
= region
->regions
[i
].base
;
39 size
= region
->regions
[i
].size
;
41 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
42 name
, i
, base
, base
+ size
- 1, size
);
46 void memblock_dump_all(void)
51 pr_info("MEMBLOCK configuration:\n");
52 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock
.rmo_size
);
53 pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock
.memory
.size
);
55 memblock_dump(&memblock
.memory
, "memory");
56 memblock_dump(&memblock
.reserved
, "reserved");
59 static unsigned long memblock_addrs_overlap(u64 base1
, u64 size1
, u64 base2
,
62 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
65 static long memblock_addrs_adjacent(u64 base1
, u64 size1
, u64 base2
, u64 size2
)
67 if (base2
== base1
+ size1
)
69 else if (base1
== base2
+ size2
)
75 static long memblock_regions_adjacent(struct memblock_type
*type
,
76 unsigned long r1
, unsigned long r2
)
78 u64 base1
= type
->regions
[r1
].base
;
79 u64 size1
= type
->regions
[r1
].size
;
80 u64 base2
= type
->regions
[r2
].base
;
81 u64 size2
= type
->regions
[r2
].size
;
83 return memblock_addrs_adjacent(base1
, size1
, base2
, size2
);
86 static void memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
90 for (i
= r
; i
< type
->cnt
- 1; i
++) {
91 type
->regions
[i
].base
= type
->regions
[i
+ 1].base
;
92 type
->regions
[i
].size
= type
->regions
[i
+ 1].size
;
97 /* Assumption: base addr of region 1 < base addr of region 2 */
98 static void memblock_coalesce_regions(struct memblock_type
*type
,
99 unsigned long r1
, unsigned long r2
)
101 type
->regions
[r1
].size
+= type
->regions
[r2
].size
;
102 memblock_remove_region(type
, r2
);
105 void __init
memblock_init(void)
107 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
108 * This simplifies the memblock_add() code below...
110 memblock
.memory
.regions
[0].base
= 0;
111 memblock
.memory
.regions
[0].size
= 0;
112 memblock
.memory
.cnt
= 1;
115 memblock
.reserved
.regions
[0].base
= 0;
116 memblock
.reserved
.regions
[0].size
= 0;
117 memblock
.reserved
.cnt
= 1;
119 memblock
.current_limit
= MEMBLOCK_ALLOC_ANYWHERE
;
122 void __init
memblock_analyze(void)
126 memblock
.memory
.size
= 0;
128 for (i
= 0; i
< memblock
.memory
.cnt
; i
++)
129 memblock
.memory
.size
+= memblock
.memory
.regions
[i
].size
;
132 static long memblock_add_region(struct memblock_type
*type
, u64 base
, u64 size
)
134 unsigned long coalesced
= 0;
137 if ((type
->cnt
== 1) && (type
->regions
[0].size
== 0)) {
138 type
->regions
[0].base
= base
;
139 type
->regions
[0].size
= size
;
143 /* First try and coalesce this MEMBLOCK with another. */
144 for (i
= 0; i
< type
->cnt
; i
++) {
145 u64 rgnbase
= type
->regions
[i
].base
;
146 u64 rgnsize
= type
->regions
[i
].size
;
148 if ((rgnbase
== base
) && (rgnsize
== size
))
149 /* Already have this region, so we're done */
152 adjacent
= memblock_addrs_adjacent(base
, size
, rgnbase
, rgnsize
);
154 type
->regions
[i
].base
-= size
;
155 type
->regions
[i
].size
+= size
;
158 } else if (adjacent
< 0) {
159 type
->regions
[i
].size
+= size
;
165 if ((i
< type
->cnt
- 1) && memblock_regions_adjacent(type
, i
, i
+1)) {
166 memblock_coalesce_regions(type
, i
, i
+1);
172 if (type
->cnt
>= MAX_MEMBLOCK_REGIONS
)
175 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
176 for (i
= type
->cnt
- 1; i
>= 0; i
--) {
177 if (base
< type
->regions
[i
].base
) {
178 type
->regions
[i
+1].base
= type
->regions
[i
].base
;
179 type
->regions
[i
+1].size
= type
->regions
[i
].size
;
181 type
->regions
[i
+1].base
= base
;
182 type
->regions
[i
+1].size
= size
;
187 if (base
< type
->regions
[0].base
) {
188 type
->regions
[0].base
= base
;
189 type
->regions
[0].size
= size
;
196 long memblock_add(u64 base
, u64 size
)
198 /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
200 memblock
.rmo_size
= size
;
202 return memblock_add_region(&memblock
.memory
, base
, size
);
206 static long __memblock_remove(struct memblock_type
*type
, u64 base
, u64 size
)
208 u64 rgnbegin
, rgnend
;
209 u64 end
= base
+ size
;
212 rgnbegin
= rgnend
= 0; /* supress gcc warnings */
214 /* Find the region where (base, size) belongs to */
215 for (i
=0; i
< type
->cnt
; i
++) {
216 rgnbegin
= type
->regions
[i
].base
;
217 rgnend
= rgnbegin
+ type
->regions
[i
].size
;
219 if ((rgnbegin
<= base
) && (end
<= rgnend
))
223 /* Didn't find the region */
227 /* Check to see if we are removing entire region */
228 if ((rgnbegin
== base
) && (rgnend
== end
)) {
229 memblock_remove_region(type
, i
);
233 /* Check to see if region is matching at the front */
234 if (rgnbegin
== base
) {
235 type
->regions
[i
].base
= end
;
236 type
->regions
[i
].size
-= size
;
240 /* Check to see if the region is matching at the end */
242 type
->regions
[i
].size
-= size
;
247 * We need to split the entry - adjust the current one to the
248 * beginging of the hole and add the region after hole.
250 type
->regions
[i
].size
= base
- type
->regions
[i
].base
;
251 return memblock_add_region(type
, end
, rgnend
- end
);
254 long memblock_remove(u64 base
, u64 size
)
256 return __memblock_remove(&memblock
.memory
, base
, size
);
259 long __init
memblock_free(u64 base
, u64 size
)
261 return __memblock_remove(&memblock
.reserved
, base
, size
);
264 long __init
memblock_reserve(u64 base
, u64 size
)
266 struct memblock_type
*_rgn
= &memblock
.reserved
;
270 return memblock_add_region(_rgn
, base
, size
);
273 long memblock_overlaps_region(struct memblock_type
*type
, u64 base
, u64 size
)
277 for (i
= 0; i
< type
->cnt
; i
++) {
278 u64 rgnbase
= type
->regions
[i
].base
;
279 u64 rgnsize
= type
->regions
[i
].size
;
280 if (memblock_addrs_overlap(base
, size
, rgnbase
, rgnsize
))
284 return (i
< type
->cnt
) ? i
: -1;
287 static u64
memblock_align_down(u64 addr
, u64 size
)
289 return addr
& ~(size
- 1);
292 static u64
memblock_align_up(u64 addr
, u64 size
)
294 return (addr
+ (size
- 1)) & ~(size
- 1);
297 static u64 __init
memblock_alloc_region(u64 start
, u64 end
,
303 base
= memblock_align_down((end
- size
), align
);
304 while (start
<= base
) {
305 j
= memblock_overlaps_region(&memblock
.reserved
, base
, size
);
307 /* this area isn't reserved, take it */
308 if (memblock_add_region(&memblock
.reserved
, base
, size
) < 0)
312 res_base
= memblock
.reserved
.regions
[j
].base
;
315 base
= memblock_align_down(res_base
- size
, align
);
321 u64 __weak __init
memblock_nid_range(u64 start
, u64 end
, int *nid
)
328 static u64 __init
memblock_alloc_nid_region(struct memblock_region
*mp
,
329 u64 size
, u64 align
, int nid
)
334 end
= start
+ mp
->size
;
336 start
= memblock_align_up(start
, align
);
337 while (start
< end
) {
341 this_end
= memblock_nid_range(start
, end
, &this_nid
);
342 if (this_nid
== nid
) {
343 u64 ret
= memblock_alloc_region(start
, this_end
, size
, align
);
353 u64 __init
memblock_alloc_nid(u64 size
, u64 align
, int nid
)
355 struct memblock_type
*mem
= &memblock
.memory
;
360 /* We do a bottom-up search for a region with the right
361 * nid since that's easier considering how memblock_nid_range()
364 size
= memblock_align_up(size
, align
);
366 for (i
= 0; i
< mem
->cnt
; i
++) {
367 u64 ret
= memblock_alloc_nid_region(&mem
->regions
[i
],
373 return memblock_alloc(size
, align
);
376 u64 __init
memblock_alloc(u64 size
, u64 align
)
378 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
381 u64 __init
memblock_alloc_base(u64 size
, u64 align
, u64 max_addr
)
385 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
388 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
389 (unsigned long long) size
, (unsigned long long) max_addr
);
394 u64 __init
__memblock_alloc_base(u64 size
, u64 align
, u64 max_addr
)
402 size
= memblock_align_up(size
, align
);
404 /* Pump up max_addr */
405 if (max_addr
== MEMBLOCK_ALLOC_ACCESSIBLE
)
406 max_addr
= memblock
.current_limit
;
408 /* We do a top-down search, this tends to limit memory
409 * fragmentation by keeping early boot allocs near the
412 for (i
= memblock
.memory
.cnt
- 1; i
>= 0; i
--) {
413 u64 memblockbase
= memblock
.memory
.regions
[i
].base
;
414 u64 memblocksize
= memblock
.memory
.regions
[i
].size
;
416 if (memblocksize
< size
)
418 base
= min(memblockbase
+ memblocksize
, max_addr
);
419 res_base
= memblock_alloc_region(memblockbase
, base
, size
, align
);
420 if (res_base
!= ~(u64
)0)
426 /* You must call memblock_analyze() before this. */
427 u64 __init
memblock_phys_mem_size(void)
429 return memblock
.memory
.size
;
432 u64
memblock_end_of_DRAM(void)
434 int idx
= memblock
.memory
.cnt
- 1;
436 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
439 /* You must call memblock_analyze() after this. */
440 void __init
memblock_enforce_memory_limit(u64 memory_limit
)
444 struct memblock_region
*p
;
449 /* Truncate the memblock regions to satisfy the memory limit. */
450 limit
= memory_limit
;
451 for (i
= 0; i
< memblock
.memory
.cnt
; i
++) {
452 if (limit
> memblock
.memory
.regions
[i
].size
) {
453 limit
-= memblock
.memory
.regions
[i
].size
;
457 memblock
.memory
.regions
[i
].size
= limit
;
458 memblock
.memory
.cnt
= i
+ 1;
462 if (memblock
.memory
.regions
[0].size
< memblock
.rmo_size
)
463 memblock
.rmo_size
= memblock
.memory
.regions
[0].size
;
465 memory_limit
= memblock_end_of_DRAM();
467 /* And truncate any reserves above the limit also. */
468 for (i
= 0; i
< memblock
.reserved
.cnt
; i
++) {
469 p
= &memblock
.reserved
.regions
[i
];
471 if (p
->base
> memory_limit
)
473 else if ((p
->base
+ p
->size
) > memory_limit
)
474 p
->size
= memory_limit
- p
->base
;
477 memblock_remove_region(&memblock
.reserved
, i
);
483 static int memblock_search(struct memblock_type
*type
, u64 addr
)
485 unsigned int left
= 0, right
= type
->cnt
;
488 unsigned int mid
= (right
+ left
) / 2;
490 if (addr
< type
->regions
[mid
].base
)
492 else if (addr
>= (type
->regions
[mid
].base
+
493 type
->regions
[mid
].size
))
497 } while (left
< right
);
501 int __init
memblock_is_reserved(u64 addr
)
503 return memblock_search(&memblock
.reserved
, addr
) != -1;
506 int memblock_is_memory(u64 addr
)
508 return memblock_search(&memblock
.memory
, addr
) != -1;
511 int memblock_is_region_memory(u64 base
, u64 size
)
513 int idx
= memblock_search(&memblock
.reserved
, base
);
517 return memblock
.reserved
.regions
[idx
].base
<= base
&&
518 (memblock
.reserved
.regions
[idx
].base
+
519 memblock
.reserved
.regions
[idx
].size
) >= (base
+ size
);
522 int memblock_is_region_reserved(u64 base
, u64 size
)
524 return memblock_overlaps_region(&memblock
.reserved
, base
, size
) >= 0;
528 void __init
memblock_set_current_limit(u64 limit
)
530 memblock
.current_limit
= limit
;