[NETNS][DCCPV4]: Enable DCCPv4 in net namespaces.
[deliverable/linux.git] / arch / powerpc / mm / lmb.c
1 /*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <asm/types.h>
17 #include <asm/page.h>
18 #include <asm/prom.h>
19 #include <asm/lmb.h>
20 #ifdef CONFIG_PPC32
21 #include "mmu_decl.h" /* for __max_low_memory */
22 #endif
23
24 #undef DEBUG
25
26 #ifdef DEBUG
27 #include <asm/udbg.h>
28 #define DBG(fmt...) udbg_printf(fmt)
29 #else
30 #define DBG(fmt...)
31 #endif
32
33 #define LMB_ALLOC_ANYWHERE 0
34
35 struct lmb lmb;
36
37 void lmb_dump_all(void)
38 {
39 #ifdef DEBUG
40 unsigned long i;
41
42 DBG("lmb_dump_all:\n");
43 DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
44 DBG(" memory.size = 0x%lx\n", lmb.memory.size);
45 for (i=0; i < lmb.memory.cnt ;i++) {
46 DBG(" memory.region[0x%x].base = 0x%lx\n",
47 i, lmb.memory.region[i].base);
48 DBG(" .size = 0x%lx\n",
49 lmb.memory.region[i].size);
50 }
51
52 DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
53 DBG(" reserved.size = 0x%lx\n", lmb.reserved.size);
54 for (i=0; i < lmb.reserved.cnt ;i++) {
55 DBG(" reserved.region[0x%x].base = 0x%lx\n",
56 i, lmb.reserved.region[i].base);
57 DBG(" .size = 0x%lx\n",
58 lmb.reserved.region[i].size);
59 }
60 #endif /* DEBUG */
61 }
62
63 static unsigned long __init lmb_addrs_overlap(unsigned long base1,
64 unsigned long size1, unsigned long base2, unsigned long size2)
65 {
66 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
67 }
68
69 static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
70 unsigned long base2, unsigned long size2)
71 {
72 if (base2 == base1 + size1)
73 return 1;
74 else if (base1 == base2 + size2)
75 return -1;
76
77 return 0;
78 }
79
80 static long __init lmb_regions_adjacent(struct lmb_region *rgn,
81 unsigned long r1, unsigned long r2)
82 {
83 unsigned long base1 = rgn->region[r1].base;
84 unsigned long size1 = rgn->region[r1].size;
85 unsigned long base2 = rgn->region[r2].base;
86 unsigned long size2 = rgn->region[r2].size;
87
88 return lmb_addrs_adjacent(base1, size1, base2, size2);
89 }
90
91 static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
92 {
93 unsigned long i;
94
95 for (i = r; i < rgn->cnt - 1; i++) {
96 rgn->region[i].base = rgn->region[i + 1].base;
97 rgn->region[i].size = rgn->region[i + 1].size;
98 }
99 rgn->cnt--;
100 }
101
102 /* Assumption: base addr of region 1 < base addr of region 2 */
103 static void __init lmb_coalesce_regions(struct lmb_region *rgn,
104 unsigned long r1, unsigned long r2)
105 {
106 rgn->region[r1].size += rgn->region[r2].size;
107 lmb_remove_region(rgn, r2);
108 }
109
110 /* This routine called with relocation disabled. */
111 void __init lmb_init(void)
112 {
113 /* Create a dummy zero size LMB which will get coalesced away later.
114 * This simplifies the lmb_add() code below...
115 */
116 lmb.memory.region[0].base = 0;
117 lmb.memory.region[0].size = 0;
118 lmb.memory.cnt = 1;
119
120 /* Ditto. */
121 lmb.reserved.region[0].base = 0;
122 lmb.reserved.region[0].size = 0;
123 lmb.reserved.cnt = 1;
124 }
125
126 /* This routine may be called with relocation disabled. */
127 void __init lmb_analyze(void)
128 {
129 int i;
130
131 lmb.memory.size = 0;
132
133 for (i = 0; i < lmb.memory.cnt; i++)
134 lmb.memory.size += lmb.memory.region[i].size;
135 }
136
137 /* This routine called with relocation disabled. */
138 static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
139 unsigned long size)
140 {
141 unsigned long coalesced = 0;
142 long adjacent, i;
143
144 /* First try and coalesce this LMB with another. */
145 for (i=0; i < rgn->cnt; i++) {
146 unsigned long rgnbase = rgn->region[i].base;
147 unsigned long rgnsize = rgn->region[i].size;
148
149 if ((rgnbase == base) && (rgnsize == size))
150 /* Already have this region, so we're done */
151 return 0;
152
153 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
154 if ( adjacent > 0 ) {
155 rgn->region[i].base -= size;
156 rgn->region[i].size += size;
157 coalesced++;
158 break;
159 }
160 else if ( adjacent < 0 ) {
161 rgn->region[i].size += size;
162 coalesced++;
163 break;
164 }
165 }
166
167 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
168 lmb_coalesce_regions(rgn, i, i+1);
169 coalesced++;
170 }
171
172 if (coalesced)
173 return coalesced;
174 if (rgn->cnt >= MAX_LMB_REGIONS)
175 return -1;
176
177 /* Couldn't coalesce the LMB, so add it to the sorted table. */
178 for (i = rgn->cnt-1; i >= 0; i--) {
179 if (base < rgn->region[i].base) {
180 rgn->region[i+1].base = rgn->region[i].base;
181 rgn->region[i+1].size = rgn->region[i].size;
182 } else {
183 rgn->region[i+1].base = base;
184 rgn->region[i+1].size = size;
185 break;
186 }
187 }
188 rgn->cnt++;
189
190 return 0;
191 }
192
193 /* This routine may be called with relocation disabled. */
194 long __init lmb_add(unsigned long base, unsigned long size)
195 {
196 struct lmb_region *_rgn = &(lmb.memory);
197
198 /* On pSeries LPAR systems, the first LMB is our RMO region. */
199 if (base == 0)
200 lmb.rmo_size = size;
201
202 return lmb_add_region(_rgn, base, size);
203
204 }
205
206 long __init lmb_reserve(unsigned long base, unsigned long size)
207 {
208 struct lmb_region *_rgn = &(lmb.reserved);
209
210 BUG_ON(0 == size);
211
212 return lmb_add_region(_rgn, base, size);
213 }
214
215 long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
216 unsigned long size)
217 {
218 unsigned long i;
219
220 for (i=0; i < rgn->cnt; i++) {
221 unsigned long rgnbase = rgn->region[i].base;
222 unsigned long rgnsize = rgn->region[i].size;
223 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
224 break;
225 }
226 }
227
228 return (i < rgn->cnt) ? i : -1;
229 }
230
231 unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
232 {
233 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
234 }
235
236 unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
237 unsigned long max_addr)
238 {
239 unsigned long alloc;
240
241 alloc = __lmb_alloc_base(size, align, max_addr);
242
243 if (alloc == 0)
244 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
245 size, max_addr);
246
247 return alloc;
248 }
249
250 unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
251 unsigned long max_addr)
252 {
253 long i, j;
254 unsigned long base = 0;
255
256 BUG_ON(0 == size);
257
258 #ifdef CONFIG_PPC32
259 /* On 32-bit, make sure we allocate lowmem */
260 if (max_addr == LMB_ALLOC_ANYWHERE)
261 max_addr = __max_low_memory;
262 #endif
263 for (i = lmb.memory.cnt-1; i >= 0; i--) {
264 unsigned long lmbbase = lmb.memory.region[i].base;
265 unsigned long lmbsize = lmb.memory.region[i].size;
266
267 if (max_addr == LMB_ALLOC_ANYWHERE)
268 base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
269 else if (lmbbase < max_addr) {
270 base = min(lmbbase + lmbsize, max_addr);
271 base = _ALIGN_DOWN(base - size, align);
272 } else
273 continue;
274
275 while ((lmbbase <= base) &&
276 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
277 base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
278 align);
279
280 if ((base != 0) && (lmbbase <= base))
281 break;
282 }
283
284 if (i < 0)
285 return 0;
286
287 lmb_add_region(&lmb.reserved, base, size);
288
289 return base;
290 }
291
292 /* You must call lmb_analyze() before this. */
293 unsigned long __init lmb_phys_mem_size(void)
294 {
295 return lmb.memory.size;
296 }
297
298 unsigned long __init lmb_end_of_DRAM(void)
299 {
300 int idx = lmb.memory.cnt - 1;
301
302 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
303 }
304
305 /* You must call lmb_analyze() after this. */
306 void __init lmb_enforce_memory_limit(unsigned long memory_limit)
307 {
308 unsigned long i, limit;
309 struct lmb_property *p;
310
311 if (! memory_limit)
312 return;
313
314 /* Truncate the lmb regions to satisfy the memory limit. */
315 limit = memory_limit;
316 for (i = 0; i < lmb.memory.cnt; i++) {
317 if (limit > lmb.memory.region[i].size) {
318 limit -= lmb.memory.region[i].size;
319 continue;
320 }
321
322 lmb.memory.region[i].size = limit;
323 lmb.memory.cnt = i + 1;
324 break;
325 }
326
327 if (lmb.memory.region[0].size < lmb.rmo_size)
328 lmb.rmo_size = lmb.memory.region[0].size;
329
330 /* And truncate any reserves above the limit also. */
331 for (i = 0; i < lmb.reserved.cnt; i++) {
332 p = &lmb.reserved.region[i];
333
334 if (p->base > memory_limit)
335 p->size = 0;
336 else if ((p->base + p->size) > memory_limit)
337 p->size = memory_limit - p->base;
338
339 if (p->size == 0) {
340 lmb_remove_region(&lmb.reserved, i);
341 i--;
342 }
343 }
344 }
345
346 int __init lmb_is_reserved(unsigned long addr)
347 {
348 int i;
349
350 for (i = 0; i < lmb.reserved.cnt; i++) {
351 unsigned long upper = lmb.reserved.region[i].base +
352 lmb.reserved.region[i].size - 1;
353 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
354 return 1;
355 }
356 return 0;
357 }
This page took 0.043124 seconds and 5 git commands to generate.