Pull novell-bugzilla-156426 into release branch
[deliverable/linux.git] / arch / alpha / mm / numa.c
1 /*
2 * linux/arch/alpha/mm/numa.c
3 *
4 * DISCONTIGMEM NUMA alpha support.
5 *
6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7 */
8
9 #include <linux/config.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/bootmem.h>
14 #include <linux/swap.h>
15 #include <linux/initrd.h>
16 #include <linux/pfn.h>
17
18 #include <asm/hwrpb.h>
19 #include <asm/pgalloc.h>
20
21 pg_data_t node_data[MAX_NUMNODES];
22 bootmem_data_t node_bdata[MAX_NUMNODES];
23
24 #undef DEBUG_DISCONTIG
25 #ifdef DEBUG_DISCONTIG
26 #define DBGDCONT(args...) printk(args)
27 #else
28 #define DBGDCONT(args...)
29 #endif
30
31 #define for_each_mem_cluster(memdesc, cluster, i) \
32 for ((cluster) = (memdesc)->cluster, (i) = 0; \
33 (i) < (memdesc)->numclusters; (i)++, (cluster)++)
34
35 static void __init show_mem_layout(void)
36 {
37 struct memclust_struct * cluster;
38 struct memdesc_struct * memdesc;
39 int i;
40
41 /* Find free clusters, and init and free the bootmem accordingly. */
42 memdesc = (struct memdesc_struct *)
43 (hwrpb->mddt_offset + (unsigned long) hwrpb);
44
45 printk("Raw memory layout:\n");
46 for_each_mem_cluster(memdesc, cluster, i) {
47 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
48 i, cluster->usage, cluster->start_pfn,
49 cluster->start_pfn + cluster->numpages);
50 }
51 }
52
53 static void __init
54 setup_memory_node(int nid, void *kernel_end)
55 {
56 extern unsigned long mem_size_limit;
57 struct memclust_struct * cluster;
58 struct memdesc_struct * memdesc;
59 unsigned long start_kernel_pfn, end_kernel_pfn;
60 unsigned long bootmap_size, bootmap_pages, bootmap_start;
61 unsigned long start, end;
62 unsigned long node_pfn_start, node_pfn_end;
63 unsigned long node_min_pfn, node_max_pfn;
64 int i;
65 unsigned long node_datasz = PFN_UP(sizeof(pg_data_t));
66 int show_init = 0;
67
68 /* Find the bounds of current node */
69 node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT;
70 node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT);
71
72 /* Find free clusters, and init and free the bootmem accordingly. */
73 memdesc = (struct memdesc_struct *)
74 (hwrpb->mddt_offset + (unsigned long) hwrpb);
75
76 /* find the bounds of this node (node_min_pfn/node_max_pfn) */
77 node_min_pfn = ~0UL;
78 node_max_pfn = 0UL;
79 for_each_mem_cluster(memdesc, cluster, i) {
80 /* Bit 0 is console/PALcode reserved. Bit 1 is
81 non-volatile memory -- we might want to mark
82 this for later. */
83 if (cluster->usage & 3)
84 continue;
85
86 start = cluster->start_pfn;
87 end = start + cluster->numpages;
88
89 if (start >= node_pfn_end || end <= node_pfn_start)
90 continue;
91
92 if (!show_init) {
93 show_init = 1;
94 printk("Initializing bootmem allocator on Node ID %d\n", nid);
95 }
96 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
97 i, cluster->usage, cluster->start_pfn,
98 cluster->start_pfn + cluster->numpages);
99
100 if (start < node_pfn_start)
101 start = node_pfn_start;
102 if (end > node_pfn_end)
103 end = node_pfn_end;
104
105 if (start < node_min_pfn)
106 node_min_pfn = start;
107 if (end > node_max_pfn)
108 node_max_pfn = end;
109 }
110
111 if (mem_size_limit && node_max_pfn > mem_size_limit) {
112 static int msg_shown = 0;
113 if (!msg_shown) {
114 msg_shown = 1;
115 printk("setup: forcing memory size to %ldK (from %ldK).\n",
116 mem_size_limit << (PAGE_SHIFT - 10),
117 node_max_pfn << (PAGE_SHIFT - 10));
118 }
119 node_max_pfn = mem_size_limit;
120 }
121
122 if (node_min_pfn >= node_max_pfn)
123 return;
124
125 /* Update global {min,max}_low_pfn from node information. */
126 if (node_min_pfn < min_low_pfn)
127 min_low_pfn = node_min_pfn;
128 if (node_max_pfn > max_low_pfn)
129 max_pfn = max_low_pfn = node_max_pfn;
130
131 num_physpages += node_max_pfn - node_min_pfn;
132
133 #if 0 /* we'll try this one again in a little while */
134 /* Cute trick to make sure our local node data is on local memory */
135 node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
136 #endif
137 /* Quasi-mark the pg_data_t as in-use */
138 node_min_pfn += node_datasz;
139 if (node_min_pfn >= node_max_pfn) {
140 printk(" not enough mem to reserve NODE_DATA");
141 return;
142 }
143 NODE_DATA(nid)->bdata = &node_bdata[nid];
144
145 printk(" Detected node memory: start %8lu, end %8lu\n",
146 node_min_pfn, node_max_pfn);
147
148 DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid));
149 DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata);
150
151 /* Find the bounds of kernel memory. */
152 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
153 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
154 bootmap_start = -1;
155
156 if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
157 panic("kernel loaded out of ram");
158
159 /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
160 Note that we round this down, not up - node memory
161 has much larger alignment than 8Mb, so it's safe. */
162 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
163
164 /* We need to know how many physically contiguous pages
165 we'll need for the bootmap. */
166 bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn);
167
168 /* Now find a good region where to allocate the bootmap. */
169 for_each_mem_cluster(memdesc, cluster, i) {
170 if (cluster->usage & 3)
171 continue;
172
173 start = cluster->start_pfn;
174 end = start + cluster->numpages;
175
176 if (start >= node_max_pfn || end <= node_min_pfn)
177 continue;
178
179 if (end > node_max_pfn)
180 end = node_max_pfn;
181 if (start < node_min_pfn)
182 start = node_min_pfn;
183
184 if (start < start_kernel_pfn) {
185 if (end > end_kernel_pfn
186 && end - end_kernel_pfn >= bootmap_pages) {
187 bootmap_start = end_kernel_pfn;
188 break;
189 } else if (end > start_kernel_pfn)
190 end = start_kernel_pfn;
191 } else if (start < end_kernel_pfn)
192 start = end_kernel_pfn;
193 if (end - start >= bootmap_pages) {
194 bootmap_start = start;
195 break;
196 }
197 }
198
199 if (bootmap_start == -1)
200 panic("couldn't find a contigous place for the bootmap");
201
202 /* Allocate the bootmap and mark the whole MM as reserved. */
203 bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start,
204 node_min_pfn, node_max_pfn);
205 DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n",
206 bootmap_start, bootmap_size, bootmap_pages);
207
208 /* Mark the free regions. */
209 for_each_mem_cluster(memdesc, cluster, i) {
210 if (cluster->usage & 3)
211 continue;
212
213 start = cluster->start_pfn;
214 end = cluster->start_pfn + cluster->numpages;
215
216 if (start >= node_max_pfn || end <= node_min_pfn)
217 continue;
218
219 if (end > node_max_pfn)
220 end = node_max_pfn;
221 if (start < node_min_pfn)
222 start = node_min_pfn;
223
224 if (start < start_kernel_pfn) {
225 if (end > end_kernel_pfn) {
226 free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start),
227 (PFN_PHYS(start_kernel_pfn)
228 - PFN_PHYS(start)));
229 printk(" freeing pages %ld:%ld\n",
230 start, start_kernel_pfn);
231 start = end_kernel_pfn;
232 } else if (end > start_kernel_pfn)
233 end = start_kernel_pfn;
234 } else if (start < end_kernel_pfn)
235 start = end_kernel_pfn;
236 if (start >= end)
237 continue;
238
239 free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
240 printk(" freeing pages %ld:%ld\n", start, end);
241 }
242
243 /* Reserve the bootmap memory. */
244 reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), bootmap_size);
245 printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
246
247 node_set_online(nid);
248 }
249
250 void __init
251 setup_memory(void *kernel_end)
252 {
253 int nid;
254
255 show_mem_layout();
256
257 nodes_clear(node_online_map);
258
259 min_low_pfn = ~0UL;
260 max_low_pfn = 0UL;
261 for (nid = 0; nid < MAX_NUMNODES; nid++)
262 setup_memory_node(nid, kernel_end);
263
264 #ifdef CONFIG_BLK_DEV_INITRD
265 initrd_start = INITRD_START;
266 if (initrd_start) {
267 extern void *move_initrd(unsigned long);
268
269 initrd_end = initrd_start+INITRD_SIZE;
270 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
271 (void *) initrd_start, INITRD_SIZE);
272
273 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
274 if (!move_initrd(PFN_PHYS(max_low_pfn)))
275 printk("initrd extends beyond end of memory "
276 "(0x%08lx > 0x%p)\ndisabling initrd\n",
277 initrd_end,
278 phys_to_virt(PFN_PHYS(max_low_pfn)));
279 } else {
280 nid = kvaddr_to_nid(initrd_start);
281 reserve_bootmem_node(NODE_DATA(nid),
282 virt_to_phys((void *)initrd_start),
283 INITRD_SIZE);
284 }
285 }
286 #endif /* CONFIG_BLK_DEV_INITRD */
287 }
288
289 void __init paging_init(void)
290 {
291 unsigned int nid;
292 unsigned long zones_size[MAX_NR_ZONES] = {0, };
293 unsigned long dma_local_pfn;
294
295 /*
296 * The old global MAX_DMA_ADDRESS per-arch API doesn't fit
297 * in the NUMA model, for now we convert it to a pfn and
298 * we interpret this pfn as a local per-node information.
299 * This issue isn't very important since none of these machines
300 * have legacy ISA slots anyways.
301 */
302 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
303
304 for_each_online_node(nid) {
305 unsigned long start_pfn = node_bdata[nid].node_boot_start >> PAGE_SHIFT;
306 unsigned long end_pfn = node_bdata[nid].node_low_pfn;
307
308 if (dma_local_pfn >= end_pfn - start_pfn)
309 zones_size[ZONE_DMA] = end_pfn - start_pfn;
310 else {
311 zones_size[ZONE_DMA] = dma_local_pfn;
312 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
313 }
314 free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, NULL);
315 }
316
317 /* Initialize the kernel's ZERO_PGE. */
318 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
319 }
320
321 void __init mem_init(void)
322 {
323 unsigned long codesize, reservedpages, datasize, initsize, pfn;
324 extern int page_is_ram(unsigned long) __init;
325 extern char _text, _etext, _data, _edata;
326 extern char __init_begin, __init_end;
327 unsigned long nid, i;
328 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
329
330 reservedpages = 0;
331 for_each_online_node(nid) {
332 /*
333 * This will free up the bootmem, ie, slot 0 memory
334 */
335 totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
336
337 pfn = NODE_DATA(nid)->node_start_pfn;
338 for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
339 if (page_is_ram(pfn) &&
340 PageReserved(nid_page_nr(nid, i)))
341 reservedpages++;
342 }
343
344 codesize = (unsigned long) &_etext - (unsigned long) &_text;
345 datasize = (unsigned long) &_edata - (unsigned long) &_data;
346 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
347
348 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, "
349 "%luk data, %luk init)\n",
350 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
351 num_physpages << (PAGE_SHIFT-10),
352 codesize >> 10,
353 reservedpages << (PAGE_SHIFT-10),
354 datasize >> 10,
355 initsize >> 10);
356 #if 0
357 mem_stress();
358 #endif
359 }
360
361 void
362 show_mem(void)
363 {
364 long i,free = 0,total = 0,reserved = 0;
365 long shared = 0, cached = 0;
366 int nid;
367
368 printk("\nMem-info:\n");
369 show_free_areas();
370 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
371 for_each_online_node(nid) {
372 unsigned long flags;
373 pgdat_resize_lock(NODE_DATA(nid), &flags);
374 i = node_spanned_pages(nid);
375 while (i-- > 0) {
376 struct page *page = nid_page_nr(nid, i);
377 total++;
378 if (PageReserved(page))
379 reserved++;
380 else if (PageSwapCache(page))
381 cached++;
382 else if (!page_count(page))
383 free++;
384 else
385 shared += page_count(page) - 1;
386 }
387 pgdat_resize_unlock(NODE_DATA(nid), &flags);
388 }
389 printk("%ld pages of RAM\n",total);
390 printk("%ld free pages\n",free);
391 printk("%ld reserved pages\n",reserved);
392 printk("%ld pages shared\n",shared);
393 printk("%ld pages swap cached\n",cached);
394 }
This page took 0.039602 seconds and 6 git commands to generate.