Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / arch / mips / sgi-ip27 / ip27-memory.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig
9 *
10 * On SGI IP27 the ARC memory configuration data is completly bogus but
11 * alternate easier to use mechanisms are available.
12 */
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/mmzone.h>
17 #include <linux/module.h>
18 #include <linux/nodemask.h>
19 #include <linux/swap.h>
20 #include <linux/bootmem.h>
21 #include <linux/pfn.h>
22 #include <linux/highmem.h>
23 #include <asm/page.h>
24 #include <asm/sections.h>
25
26 #include <asm/sn/arch.h>
27 #include <asm/sn/hub.h>
28 #include <asm/sn/klconfig.h>
29 #include <asm/sn/sn_private.h>
30
31
32 #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
33 #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
34
35 #define SLOT_IGNORED 0xffff
36
37 static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES];
38 static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS];
39 static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
40
41 struct node_data *__node_data[MAX_COMPACT_NODES];
42
43 EXPORT_SYMBOL(__node_data);
44
45 static int fine_mode;
46
47 static int is_fine_dirmode(void)
48 {
49 return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
50 >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
51 }
52
53 static hubreg_t get_region(cnodeid_t cnode)
54 {
55 if (fine_mode)
56 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
57 else
58 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
59 }
60
61 static hubreg_t region_mask;
62
63 static void gen_region_mask(hubreg_t *region_mask)
64 {
65 cnodeid_t cnode;
66
67 (*region_mask) = 0;
68 for_each_online_node(cnode) {
69 (*region_mask) |= 1ULL << get_region(cnode);
70 }
71 }
72
73 #define rou_rflag rou_flags
74
75 static int router_distance;
76
77 static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
78 {
79 klrou_t *router;
80 lboard_t *brd;
81 int port;
82
83 if (router_a->rou_rflag == 1)
84 return;
85
86 if (depth >= router_distance)
87 return;
88
89 router_a->rou_rflag = 1;
90
91 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
92 if (router_a->rou_port[port].port_nasid == INVALID_NASID)
93 continue;
94
95 brd = (lboard_t *)NODE_OFFSET_TO_K0(
96 router_a->rou_port[port].port_nasid,
97 router_a->rou_port[port].port_offset);
98
99 if (brd->brd_type == KLTYPE_ROUTER) {
100 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
101 if (router == router_b) {
102 if (depth < router_distance)
103 router_distance = depth;
104 }
105 else
106 router_recurse(router, router_b, depth + 1);
107 }
108 }
109
110 router_a->rou_rflag = 0;
111 }
112
113 unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
114
115 static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
116 {
117 klrou_t *router, *router_a = NULL, *router_b = NULL;
118 lboard_t *brd, *dest_brd;
119 cnodeid_t cnode;
120 nasid_t nasid;
121 int port;
122
123 /* Figure out which routers nodes in question are connected to */
124 for_each_online_node(cnode) {
125 nasid = COMPACT_TO_NASID_NODEID(cnode);
126
127 if (nasid == -1) continue;
128
129 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
130 KLTYPE_ROUTER);
131
132 if (!brd)
133 continue;
134
135 do {
136 if (brd->brd_flags & DUPLICATE_BOARD)
137 continue;
138
139 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
140 router->rou_rflag = 0;
141
142 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
143 if (router->rou_port[port].port_nasid == INVALID_NASID)
144 continue;
145
146 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
147 router->rou_port[port].port_nasid,
148 router->rou_port[port].port_offset);
149
150 if (dest_brd->brd_type == KLTYPE_IP27) {
151 if (dest_brd->brd_nasid == nasid_a)
152 router_a = router;
153 if (dest_brd->brd_nasid == nasid_b)
154 router_b = router;
155 }
156 }
157
158 } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
159 }
160
161 if (router_a == NULL) {
162 printk("node_distance: router_a NULL\n");
163 return -1;
164 }
165 if (router_b == NULL) {
166 printk("node_distance: router_b NULL\n");
167 return -1;
168 }
169
170 if (nasid_a == nasid_b)
171 return 0;
172
173 if (router_a == router_b)
174 return 1;
175
176 router_distance = 100;
177 router_recurse(router_a, router_b, 2);
178
179 return router_distance;
180 }
181
182 static void __init init_topology_matrix(void)
183 {
184 nasid_t nasid, nasid2;
185 cnodeid_t row, col;
186
187 for (row = 0; row < MAX_COMPACT_NODES; row++)
188 for (col = 0; col < MAX_COMPACT_NODES; col++)
189 __node_distances[row][col] = -1;
190
191 for_each_online_node(row) {
192 nasid = COMPACT_TO_NASID_NODEID(row);
193 for_each_online_node(col) {
194 nasid2 = COMPACT_TO_NASID_NODEID(col);
195 __node_distances[row][col] =
196 compute_node_distance(nasid, nasid2);
197 }
198 }
199 }
200
201 static void __init dump_topology(void)
202 {
203 nasid_t nasid;
204 cnodeid_t cnode;
205 lboard_t *brd, *dest_brd;
206 int port;
207 int router_num = 0;
208 klrou_t *router;
209 cnodeid_t row, col;
210
211 printk("************** Topology ********************\n");
212
213 printk(" ");
214 for_each_online_node(col)
215 printk("%02d ", col);
216 printk("\n");
217 for_each_online_node(row) {
218 printk("%02d ", row);
219 for_each_online_node(col)
220 printk("%2d ", node_distance(row, col));
221 printk("\n");
222 }
223
224 for_each_online_node(cnode) {
225 nasid = COMPACT_TO_NASID_NODEID(cnode);
226
227 if (nasid == -1) continue;
228
229 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
230 KLTYPE_ROUTER);
231
232 if (!brd)
233 continue;
234
235 do {
236 if (brd->brd_flags & DUPLICATE_BOARD)
237 continue;
238 printk("Router %d:", router_num);
239 router_num++;
240
241 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
242
243 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
244 if (router->rou_port[port].port_nasid == INVALID_NASID)
245 continue;
246
247 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
248 router->rou_port[port].port_nasid,
249 router->rou_port[port].port_offset);
250
251 if (dest_brd->brd_type == KLTYPE_IP27)
252 printk(" %d", dest_brd->brd_nasid);
253 if (dest_brd->brd_type == KLTYPE_ROUTER)
254 printk(" r");
255 }
256 printk("\n");
257
258 } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
259 }
260 }
261
262 static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
263 {
264 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
265
266 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
267 }
268
269 /*
270 * Return the number of pages of memory provided by the given slot
271 * on the specified node.
272 */
273 static pfn_t __init slot_getsize(cnodeid_t node, int slot)
274 {
275 return (pfn_t) slot_psize_cache[node][slot];
276 }
277
278 /*
279 * Return highest slot filled
280 */
281 static int __init node_getlastslot(cnodeid_t node)
282 {
283 return (int) slot_lastfilled_cache[node];
284 }
285
286 /*
287 * Return the pfn of the last free page of memory on a node.
288 */
289 static pfn_t __init node_getmaxclick(cnodeid_t node)
290 {
291 pfn_t slot_psize;
292 int slot;
293
294 /*
295 * Start at the top slot. When we find a slot with memory in it,
296 * that's the winner.
297 */
298 for (slot = (MAX_MEM_SLOTS - 1); slot >= 0; slot--) {
299 if ((slot_psize = slot_getsize(node, slot))) {
300 if (slot_psize == SLOT_IGNORED)
301 continue;
302 /* Return the basepfn + the slot size, minus 1. */
303 return slot_getbasepfn(node, slot) + slot_psize - 1;
304 }
305 }
306
307 /*
308 * If there's no memory on the node, return 0. This is likely
309 * to cause problems.
310 */
311 return 0;
312 }
313
314 static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
315 {
316 nasid_t nasid;
317 lboard_t *brd;
318 klmembnk_t *banks;
319 unsigned long size;
320
321 nasid = COMPACT_TO_NASID_NODEID(node);
322 /* Find the node board */
323 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
324 if (!brd)
325 return 0;
326
327 /* Get the memory bank structure */
328 banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
329 if (!banks)
330 return 0;
331
332 /* Size in _Megabytes_ */
333 size = (unsigned long)banks->membnk_bnksz[slot/4];
334
335 /* hack for 128 dimm banks */
336 if (size <= 128) {
337 if (slot % 4 == 0) {
338 size <<= 20; /* size in bytes */
339 return(size >> PAGE_SHIFT);
340 } else
341 return 0;
342 } else {
343 size /= 4;
344 size <<= 20;
345 return size >> PAGE_SHIFT;
346 }
347 }
348
349 static void __init mlreset(void)
350 {
351 int i;
352
353 master_nasid = get_nasid();
354 fine_mode = is_fine_dirmode();
355
356 /*
357 * Probe for all CPUs - this creates the cpumask and sets up the
358 * mapping tables. We need to do this as early as possible.
359 */
360 #ifdef CONFIG_SMP
361 cpu_node_probe();
362 #endif
363
364 init_topology_matrix();
365 dump_topology();
366
367 gen_region_mask(&region_mask);
368
369 setup_replication_mask();
370
371 /*
372 * Set all nodes' calias sizes to 8k
373 */
374 for_each_online_node(i) {
375 nasid_t nasid;
376
377 nasid = COMPACT_TO_NASID_NODEID(i);
378
379 /*
380 * Always have node 0 in the region mask, otherwise
381 * CALIAS accesses get exceptions since the hub
382 * thinks it is a node 0 address.
383 */
384 REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
385 #ifdef CONFIG_REPLICATE_EXHANDLERS
386 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
387 #else
388 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
389 #endif
390
391 #ifdef LATER
392 /*
393 * Set up all hubs to have a big window pointing at
394 * widget 0. Memory mode, widget 0, offset 0
395 */
396 REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
397 ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
398 (0 << IIO_ITTE_WIDGET_SHIFT)));
399 #endif
400 }
401 }
402
403 static void __init szmem(void)
404 {
405 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
406 int slot, ignore;
407 cnodeid_t node;
408
409 num_physpages = 0;
410
411 for_each_online_node(node) {
412 ignore = nodebytes = 0;
413 for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
414 slot_psize = slot_psize_compute(node, slot);
415 if (slot == 0)
416 slot0sz = slot_psize;
417 /*
418 * We need to refine the hack when we have replicated
419 * kernel text.
420 */
421 nodebytes += (1LL << SLOT_SHIFT);
422 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
423 (slot0sz << PAGE_SHIFT))
424 ignore = 1;
425 if (ignore && slot_psize) {
426 printk("Ignoring slot %d onwards on node %d\n",
427 slot, node);
428 slot_psize_cache[node][slot] = SLOT_IGNORED;
429 slot = MAX_MEM_SLOTS;
430 continue;
431 }
432 num_physpages += slot_psize;
433 slot_psize_cache[node][slot] =
434 (unsigned short) slot_psize;
435 if (slot_psize)
436 slot_lastfilled_cache[node] = slot;
437 }
438 }
439 }
440
441 static void __init node_mem_init(cnodeid_t node)
442 {
443 pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
444 pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0);
445 pfn_t slot_freepfn = node_getfirstfree(node);
446 struct pglist_data *pd;
447 unsigned long bootmap_size;
448
449 /*
450 * Allocate the node data structures on the node first.
451 */
452 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
453
454 pd = NODE_DATA(node);
455 pd->bdata = &plat_node_bdata[node];
456
457 cpus_clear(hub_data(node)->h_cpus);
458
459 slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
460 sizeof(struct hub_data));
461
462 bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
463 slot_firstpfn, slot_lastpfn);
464 free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
465 (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
466 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
467 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size);
468 }
469
470 /*
471 * A node with nothing. We use it to avoid any special casing in
472 * node_to_cpumask
473 */
474 static struct node_data null_node = {
475 .hub = {
476 .h_cpus = CPU_MASK_NONE
477 }
478 };
479
480 /*
481 * Currently, the intranode memory hole support assumes that each slot
482 * contains at least 32 MBytes of memory. We assume all bootmem data
483 * fits on the first slot.
484 */
485 void __init prom_meminit(void)
486 {
487 cnodeid_t node;
488
489 mlreset();
490 szmem();
491
492 for (node = 0; node < MAX_COMPACT_NODES; node++) {
493 if (node_online(node)) {
494 node_mem_init(node);
495 continue;
496 }
497 __node_data[node] = &null_node;
498 }
499 }
500
501 unsigned long __init prom_free_prom_memory(void)
502 {
503 /* We got nothing to free here ... */
504 return 0;
505 }
506
507 extern void pagetable_init(void);
508 extern unsigned long setup_zero_pages(void);
509
510 void __init paging_init(void)
511 {
512 unsigned long zones_size[MAX_NR_ZONES] = {0, };
513 unsigned node;
514
515 pagetable_init();
516
517 for_each_online_node(node) {
518 pfn_t start_pfn = slot_getbasepfn(node, 0);
519 pfn_t end_pfn = node_getmaxclick(node) + 1;
520
521 zones_size[ZONE_DMA] = end_pfn - start_pfn;
522 free_area_init_node(node, NODE_DATA(node),
523 zones_size, start_pfn, NULL);
524
525 if (end_pfn > max_low_pfn)
526 max_low_pfn = end_pfn;
527 }
528 }
529
530 void __init mem_init(void)
531 {
532 unsigned long codesize, datasize, initsize, tmp;
533 unsigned node;
534
535 high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
536
537 for_each_online_node(node) {
538 unsigned slot, numslots;
539 struct page *end, *p;
540
541 /*
542 * This will free up the bootmem, ie, slot 0 memory.
543 */
544 totalram_pages += free_all_bootmem_node(NODE_DATA(node));
545
546 /*
547 * We need to manually do the other slots.
548 */
549 numslots = node_getlastslot(node);
550 for (slot = 1; slot <= numslots; slot++) {
551 p = nid_page_nr(node, slot_getbasepfn(node, slot) -
552 slot_getbasepfn(node, 0));
553
554 /*
555 * Free valid memory in current slot.
556 */
557 for (end = p + slot_getsize(node, slot); p < end; p++) {
558 /* if (!page_is_ram(pgnr)) continue; */
559 /* commented out until page_is_ram works */
560 ClearPageReserved(p);
561 init_page_count(p);
562 __free_page(p);
563 totalram_pages++;
564 }
565 }
566 }
567
568 totalram_pages -= setup_zero_pages(); /* This comes from node 0 */
569
570 codesize = (unsigned long) &_etext - (unsigned long) &_text;
571 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
572 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
573
574 tmp = nr_free_pages();
575 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
576 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
577 tmp << (PAGE_SHIFT-10),
578 num_physpages << (PAGE_SHIFT-10),
579 codesize >> 10,
580 (num_physpages - tmp) << (PAGE_SHIFT-10),
581 datasize >> 10,
582 initsize >> 10,
583 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
584 }
This page took 0.05102 seconds and 6 git commands to generate.