Merge branch 'merge'
[deliverable/linux.git] / arch / powerpc / mm / lmb.c
CommitLineData
7c8c6b97
PM
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <asm/types.h>
18#include <asm/page.h>
19#include <asm/prom.h>
20#include <asm/lmb.h>
21#ifdef CONFIG_PPC32
22#include "mmu_decl.h" /* for __max_low_memory */
23#endif
24
7c8c6b97
PM
25#undef DEBUG
26
eb481899
ME
27#ifdef DEBUG
28#include <asm/udbg.h>
29#define DBG(fmt...) udbg_printf(fmt)
30#else
31#define DBG(fmt...)
32#endif
33
3b9331da
ME
34#define LMB_ALLOC_ANYWHERE 0
35
eb481899
ME
36struct lmb lmb;
37
7c8c6b97
PM
38void lmb_dump_all(void)
39{
40#ifdef DEBUG
41 unsigned long i;
42
eb481899
ME
43 DBG("lmb_dump_all:\n");
44 DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
45 DBG(" memory.size = 0x%lx\n", lmb.memory.size);
7c8c6b97 46 for (i=0; i < lmb.memory.cnt ;i++) {
eb481899 47 DBG(" memory.region[0x%x].base = 0x%lx\n",
7c8c6b97 48 i, lmb.memory.region[i].base);
eb481899 49 DBG(" .size = 0x%lx\n",
7c8c6b97
PM
50 lmb.memory.region[i].size);
51 }
52
eb481899
ME
53 DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
54 DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
7c8c6b97 55 for (i=0; i < lmb.reserved.cnt ;i++) {
eb481899 56 DBG(" reserved.region[0x%x].base = 0x%lx\n",
7c8c6b97 57 i, lmb.reserved.region[i].base);
eb481899 58 DBG(" .size = 0x%lx\n",
7c8c6b97
PM
59 lmb.reserved.region[i].size);
60 }
61#endif /* DEBUG */
62}
63
64static unsigned long __init lmb_addrs_overlap(unsigned long base1,
65 unsigned long size1, unsigned long base2, unsigned long size2)
66{
67 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
68}
69
70static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
71 unsigned long base2, unsigned long size2)
72{
73 if (base2 == base1 + size1)
74 return 1;
75 else if (base1 == base2 + size2)
76 return -1;
77
78 return 0;
79}
80
81static long __init lmb_regions_adjacent(struct lmb_region *rgn,
82 unsigned long r1, unsigned long r2)
83{
84 unsigned long base1 = rgn->region[r1].base;
85 unsigned long size1 = rgn->region[r1].size;
86 unsigned long base2 = rgn->region[r2].base;
87 unsigned long size2 = rgn->region[r2].size;
88
89 return lmb_addrs_adjacent(base1, size1, base2, size2);
90}
91
2babf5c2 92static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
7c8c6b97
PM
93{
94 unsigned long i;
95
2babf5c2
ME
96 for (i = r; i < rgn->cnt - 1; i++) {
97 rgn->region[i].base = rgn->region[i + 1].base;
98 rgn->region[i].size = rgn->region[i + 1].size;
7c8c6b97
PM
99 }
100 rgn->cnt--;
101}
102
2babf5c2
ME
103/* Assumption: base addr of region 1 < base addr of region 2 */
104static void __init lmb_coalesce_regions(struct lmb_region *rgn,
105 unsigned long r1, unsigned long r2)
106{
107 rgn->region[r1].size += rgn->region[r2].size;
108 lmb_remove_region(rgn, r2);
109}
110
7c8c6b97
PM
111/* This routine called with relocation disabled. */
112void __init lmb_init(void)
113{
114 /* Create a dummy zero size LMB which will get coalesced away later.
115 * This simplifies the lmb_add() code below...
116 */
117 lmb.memory.region[0].base = 0;
118 lmb.memory.region[0].size = 0;
119 lmb.memory.cnt = 1;
120
121 /* Ditto. */
122 lmb.reserved.region[0].base = 0;
123 lmb.reserved.region[0].size = 0;
124 lmb.reserved.cnt = 1;
125}
126
127/* This routine may be called with relocation disabled. */
128void __init lmb_analyze(void)
129{
130 int i;
131
132 lmb.memory.size = 0;
133
134 for (i = 0; i < lmb.memory.cnt; i++)
135 lmb.memory.size += lmb.memory.region[i].size;
136}
137
138/* This routine called with relocation disabled. */
139static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
140 unsigned long size)
141{
142 unsigned long i, coalesced = 0;
143 long adjacent;
144
145 /* First try and coalesce this LMB with another. */
146 for (i=0; i < rgn->cnt; i++) {
147 unsigned long rgnbase = rgn->region[i].base;
148 unsigned long rgnsize = rgn->region[i].size;
149
150 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
151 if ( adjacent > 0 ) {
152 rgn->region[i].base -= size;
153 rgn->region[i].size += size;
154 coalesced++;
155 break;
156 }
157 else if ( adjacent < 0 ) {
158 rgn->region[i].size += size;
159 coalesced++;
160 break;
161 }
162 }
163
164 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
165 lmb_coalesce_regions(rgn, i, i+1);
166 coalesced++;
167 }
168
169 if (coalesced)
170 return coalesced;
171 if (rgn->cnt >= MAX_LMB_REGIONS)
172 return -1;
173
174 /* Couldn't coalesce the LMB, so add it to the sorted table. */
175 for (i = rgn->cnt-1; i >= 0; i--) {
176 if (base < rgn->region[i].base) {
177 rgn->region[i+1].base = rgn->region[i].base;
178 rgn->region[i+1].size = rgn->region[i].size;
179 } else {
180 rgn->region[i+1].base = base;
181 rgn->region[i+1].size = size;
182 break;
183 }
184 }
185 rgn->cnt++;
186
187 return 0;
188}
189
190/* This routine may be called with relocation disabled. */
191long __init lmb_add(unsigned long base, unsigned long size)
192{
193 struct lmb_region *_rgn = &(lmb.memory);
194
195 /* On pSeries LPAR systems, the first LMB is our RMO region. */
196 if (base == 0)
197 lmb.rmo_size = size;
198
199 return lmb_add_region(_rgn, base, size);
200
201}
202
203long __init lmb_reserve(unsigned long base, unsigned long size)
204{
205 struct lmb_region *_rgn = &(lmb.reserved);
206
8c20fafa
ME
207 BUG_ON(0 == size);
208
7c8c6b97
PM
209 return lmb_add_region(_rgn, base, size);
210}
211
212long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
213 unsigned long size)
214{
215 unsigned long i;
216
217 for (i=0; i < rgn->cnt; i++) {
218 unsigned long rgnbase = rgn->region[i].base;
219 unsigned long rgnsize = rgn->region[i].size;
220 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
221 break;
222 }
223 }
224
225 return (i < rgn->cnt) ? i : -1;
226}
227
228unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
229{
230 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
231}
232
233unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
234 unsigned long max_addr)
d7a5b2ff
ME
235{
236 unsigned long alloc;
237
238 alloc = __lmb_alloc_base(size, align, max_addr);
239
2c276603 240 if (alloc == 0)
d7a5b2ff
ME
241 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
242 size, max_addr);
243
244 return alloc;
245}
246
247unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
248 unsigned long max_addr)
7c8c6b97
PM
249{
250 long i, j;
251 unsigned long base = 0;
252
8c20fafa
ME
253 BUG_ON(0 == size);
254
7c8c6b97
PM
255#ifdef CONFIG_PPC32
256 /* On 32-bit, make sure we allocate lowmem */
257 if (max_addr == LMB_ALLOC_ANYWHERE)
258 max_addr = __max_low_memory;
259#endif
260 for (i = lmb.memory.cnt-1; i >= 0; i--) {
261 unsigned long lmbbase = lmb.memory.region[i].base;
262 unsigned long lmbsize = lmb.memory.region[i].size;
263
264 if (max_addr == LMB_ALLOC_ANYWHERE)
265 base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
266 else if (lmbbase < max_addr) {
267 base = min(lmbbase + lmbsize, max_addr);
268 base = _ALIGN_DOWN(base - size, align);
269 } else
270 continue;
271
272 while ((lmbbase <= base) &&
273 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
274 base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
275 align);
276
277 if ((base != 0) && (lmbbase <= base))
278 break;
279 }
280
281 if (i < 0)
282 return 0;
283
284 lmb_add_region(&lmb.reserved, base, size);
285
286 return base;
287}
288
289/* You must call lmb_analyze() before this. */
290unsigned long __init lmb_phys_mem_size(void)
291{
292 return lmb.memory.size;
293}
294
295unsigned long __init lmb_end_of_DRAM(void)
296{
297 int idx = lmb.memory.cnt - 1;
298
299 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
300}
301
2babf5c2 302/* You must call lmb_analyze() after this. */
7c8c6b97
PM
303void __init lmb_enforce_memory_limit(unsigned long memory_limit)
304{
305 unsigned long i, limit;
2babf5c2 306 struct lmb_property *p;
7c8c6b97
PM
307
308 if (! memory_limit)
309 return;
310
2babf5c2 311 /* Truncate the lmb regions to satisfy the memory limit. */
7c8c6b97
PM
312 limit = memory_limit;
313 for (i = 0; i < lmb.memory.cnt; i++) {
314 if (limit > lmb.memory.region[i].size) {
315 limit -= lmb.memory.region[i].size;
316 continue;
317 }
318
319 lmb.memory.region[i].size = limit;
320 lmb.memory.cnt = i + 1;
321 break;
322 }
2babf5c2
ME
323
324 lmb.rmo_size = lmb.memory.region[0].size;
325
326 /* And truncate any reserves above the limit also. */
327 for (i = 0; i < lmb.reserved.cnt; i++) {
328 p = &lmb.reserved.region[i];
329
330 if (p->base > memory_limit)
331 p->size = 0;
332 else if ((p->base + p->size) > memory_limit)
333 p->size = memory_limit - p->base;
334
335 if (p->size == 0) {
336 lmb_remove_region(&lmb.reserved, i);
337 i--;
338 }
339 }
7c8c6b97 340}
This page took 0.0877790000000001 seconds and 5 git commands to generate.