[LMB]: Make lmb support large physical addressing
[deliverable/linux.git] / lib / lmb.c
1 /*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/lmb.h>
17
18 #undef DEBUG
19
20 #ifdef DEBUG
21 #define DBG(fmt...) LMB_DBG(fmt)
22 #else
23 #define DBG(fmt...)
24 #endif
25
26 #define LMB_ALLOC_ANYWHERE 0
27
28 struct lmb lmb;
29
30 void lmb_dump_all(void)
31 {
32 #ifdef DEBUG
33 unsigned long i;
34
35 DBG("lmb_dump_all:\n");
36 DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
37 DBG(" memory.size = 0x%llx\n",
38 (unsigned long long)lmb.memory.size);
39 for (i=0; i < lmb.memory.cnt ;i++) {
40 DBG(" memory.region[0x%x].base = 0x%llx\n",
41 i, (unsigned long long)lmb.memory.region[i].base);
42 DBG(" .size = 0x%llx\n",
43 (unsigned long long)lmb.memory.region[i].size);
44 }
45
46 DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
47 DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
48 for (i=0; i < lmb.reserved.cnt ;i++) {
49 DBG(" reserved.region[0x%x].base = 0x%llx\n",
50 i, (unsigned long long)lmb.reserved.region[i].base);
51 DBG(" .size = 0x%llx\n",
52 (unsigned long long)lmb.reserved.region[i].size);
53 }
54 #endif /* DEBUG */
55 }
56
57 static unsigned long __init lmb_addrs_overlap(u64 base1,
58 u64 size1, u64 base2, u64 size2)
59 {
60 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
61 }
62
63 static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
64 u64 base2, u64 size2)
65 {
66 if (base2 == base1 + size1)
67 return 1;
68 else if (base1 == base2 + size2)
69 return -1;
70
71 return 0;
72 }
73
74 static long __init lmb_regions_adjacent(struct lmb_region *rgn,
75 unsigned long r1, unsigned long r2)
76 {
77 u64 base1 = rgn->region[r1].base;
78 u64 size1 = rgn->region[r1].size;
79 u64 base2 = rgn->region[r2].base;
80 u64 size2 = rgn->region[r2].size;
81
82 return lmb_addrs_adjacent(base1, size1, base2, size2);
83 }
84
85 static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
86 {
87 unsigned long i;
88
89 for (i = r; i < rgn->cnt - 1; i++) {
90 rgn->region[i].base = rgn->region[i + 1].base;
91 rgn->region[i].size = rgn->region[i + 1].size;
92 }
93 rgn->cnt--;
94 }
95
96 /* Assumption: base addr of region 1 < base addr of region 2 */
97 static void __init lmb_coalesce_regions(struct lmb_region *rgn,
98 unsigned long r1, unsigned long r2)
99 {
100 rgn->region[r1].size += rgn->region[r2].size;
101 lmb_remove_region(rgn, r2);
102 }
103
104 /* This routine called with relocation disabled. */
105 void __init lmb_init(void)
106 {
107 /* Create a dummy zero size LMB which will get coalesced away later.
108 * This simplifies the lmb_add() code below...
109 */
110 lmb.memory.region[0].base = 0;
111 lmb.memory.region[0].size = 0;
112 lmb.memory.cnt = 1;
113
114 /* Ditto. */
115 lmb.reserved.region[0].base = 0;
116 lmb.reserved.region[0].size = 0;
117 lmb.reserved.cnt = 1;
118 }
119
120 /* This routine may be called with relocation disabled. */
121 void __init lmb_analyze(void)
122 {
123 int i;
124
125 lmb.memory.size = 0;
126
127 for (i = 0; i < lmb.memory.cnt; i++)
128 lmb.memory.size += lmb.memory.region[i].size;
129 }
130
131 /* This routine called with relocation disabled. */
132 static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
133 {
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
143 /* First try and coalesce this LMB with another. */
144 for (i=0; i < rgn->cnt; i++) {
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
147
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
152 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
153 if ( adjacent > 0 ) {
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
158 }
159 else if ( adjacent < 0 ) {
160 rgn->region[i].size += size;
161 coalesced++;
162 break;
163 }
164 }
165
166 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
167 lmb_coalesce_regions(rgn, i, i+1);
168 coalesced++;
169 }
170
171 if (coalesced)
172 return coalesced;
173 if (rgn->cnt >= MAX_LMB_REGIONS)
174 return -1;
175
176 /* Couldn't coalesce the LMB, so add it to the sorted table. */
177 for (i = rgn->cnt-1; i >= 0; i--) {
178 if (base < rgn->region[i].base) {
179 rgn->region[i+1].base = rgn->region[i].base;
180 rgn->region[i+1].size = rgn->region[i].size;
181 } else {
182 rgn->region[i+1].base = base;
183 rgn->region[i+1].size = size;
184 break;
185 }
186 }
187 rgn->cnt++;
188
189 return 0;
190 }
191
192 /* This routine may be called with relocation disabled. */
193 long __init lmb_add(u64 base, u64 size)
194 {
195 struct lmb_region *_rgn = &(lmb.memory);
196
197 /* On pSeries LPAR systems, the first LMB is our RMO region. */
198 if (base == 0)
199 lmb.rmo_size = size;
200
201 return lmb_add_region(_rgn, base, size);
202
203 }
204
205 long __init lmb_reserve(u64 base, u64 size)
206 {
207 struct lmb_region *_rgn = &(lmb.reserved);
208
209 BUG_ON(0 == size);
210
211 return lmb_add_region(_rgn, base, size);
212 }
213
214 long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base,
215 u64 size)
216 {
217 unsigned long i;
218
219 for (i=0; i < rgn->cnt; i++) {
220 u64 rgnbase = rgn->region[i].base;
221 u64 rgnsize = rgn->region[i].size;
222 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
223 break;
224 }
225 }
226
227 return (i < rgn->cnt) ? i : -1;
228 }
229
230 u64 __init lmb_alloc(u64 size, u64 align)
231 {
232 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
233 }
234
235 u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
236 {
237 u64 alloc;
238
239 alloc = __lmb_alloc_base(size, align, max_addr);
240
241 if (alloc == 0)
242 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
243 (unsigned long long) size, (unsigned long long) max_addr);
244
245 return alloc;
246 }
247
248 static u64 lmb_align_down(u64 addr, u64 size)
249 {
250 return addr & ~(size - 1);
251 }
252
253 static u64 lmb_align_up(u64 addr, u64 size)
254 {
255 return (addr + (size - 1)) & ~(size - 1);
256 }
257
258 u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
259 {
260 long i, j;
261 u64 base = 0;
262
263 BUG_ON(0 == size);
264
265 /* On some platforms, make sure we allocate lowmem */
266 if (max_addr == LMB_ALLOC_ANYWHERE)
267 max_addr = LMB_REAL_LIMIT;
268
269 for (i = lmb.memory.cnt-1; i >= 0; i--) {
270 u64 lmbbase = lmb.memory.region[i].base;
271 u64 lmbsize = lmb.memory.region[i].size;
272
273 if (max_addr == LMB_ALLOC_ANYWHERE)
274 base = lmb_align_down(lmbbase + lmbsize - size, align);
275 else if (lmbbase < max_addr) {
276 base = min(lmbbase + lmbsize, max_addr);
277 base = lmb_align_down(base - size, align);
278 } else
279 continue;
280
281 while ((lmbbase <= base) &&
282 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
283 base = lmb_align_down(lmb.reserved.region[j].base - size,
284 align);
285
286 if ((base != 0) && (lmbbase <= base))
287 break;
288 }
289
290 if (i < 0)
291 return 0;
292
293 if (lmb_add_region(&lmb.reserved, base, lmb_align_up(size, align)) < 0)
294 return 0;
295
296 return base;
297 }
298
299 /* You must call lmb_analyze() before this. */
300 u64 __init lmb_phys_mem_size(void)
301 {
302 return lmb.memory.size;
303 }
304
305 u64 __init lmb_end_of_DRAM(void)
306 {
307 int idx = lmb.memory.cnt - 1;
308
309 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
310 }
311
312 /* You must call lmb_analyze() after this. */
313 void __init lmb_enforce_memory_limit(u64 memory_limit)
314 {
315 unsigned long i;
316 u64 limit;
317 struct lmb_property *p;
318
319 if (! memory_limit)
320 return;
321
322 /* Truncate the lmb regions to satisfy the memory limit. */
323 limit = memory_limit;
324 for (i = 0; i < lmb.memory.cnt; i++) {
325 if (limit > lmb.memory.region[i].size) {
326 limit -= lmb.memory.region[i].size;
327 continue;
328 }
329
330 lmb.memory.region[i].size = limit;
331 lmb.memory.cnt = i + 1;
332 break;
333 }
334
335 if (lmb.memory.region[0].size < lmb.rmo_size)
336 lmb.rmo_size = lmb.memory.region[0].size;
337
338 /* And truncate any reserves above the limit also. */
339 for (i = 0; i < lmb.reserved.cnt; i++) {
340 p = &lmb.reserved.region[i];
341
342 if (p->base > memory_limit)
343 p->size = 0;
344 else if ((p->base + p->size) > memory_limit)
345 p->size = memory_limit - p->base;
346
347 if (p->size == 0) {
348 lmb_remove_region(&lmb.reserved, i);
349 i--;
350 }
351 }
352 }
353
354 int __init lmb_is_reserved(u64 addr)
355 {
356 int i;
357
358 for (i = 0; i < lmb.reserved.cnt; i++) {
359 u64 upper = lmb.reserved.region[i].base +
360 lmb.reserved.region[i].size - 1;
361 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
362 return 1;
363 }
364 return 0;
365 }
This page took 0.057765 seconds and 6 git commands to generate.