x86, numa: Add fixed node size option for numa emulation
[deliverable/linux.git] / arch / x86 / mm / numa_64.c
1 /*
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
14 #include <linux/sched.h>
15
16 #include <asm/e820.h>
17 #include <asm/proto.h>
18 #include <asm/dma.h>
19 #include <asm/numa.h>
20 #include <asm/acpi.h>
21 #include <asm/k8.h>
22
23 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24 EXPORT_SYMBOL(node_data);
25
26 struct memnode memnode;
27
28 s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
29 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
30 };
31
32 int numa_off __initdata;
33 static unsigned long __initdata nodemap_addr;
34 static unsigned long __initdata nodemap_size;
35
36 DEFINE_PER_CPU(int, node_number) = 0;
37 EXPORT_PER_CPU_SYMBOL(node_number);
38
39 /*
40 * Map cpu index to node index
41 */
42 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
43 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
44
45 /*
46 * Given a shift value, try to populate memnodemap[]
47 * Returns :
48 * 1 if OK
49 * 0 if memnodmap[] too small (of shift too small)
50 * -1 if node overlap or lost ram (shift too big)
51 */
52 static int __init populate_memnodemap(const struct bootnode *nodes,
53 int numnodes, int shift, int *nodeids)
54 {
55 unsigned long addr, end;
56 int i, res = -1;
57
58 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
59 for (i = 0; i < numnodes; i++) {
60 addr = nodes[i].start;
61 end = nodes[i].end;
62 if (addr >= end)
63 continue;
64 if ((end >> shift) >= memnodemapsize)
65 return 0;
66 do {
67 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
68 return -1;
69
70 if (!nodeids)
71 memnodemap[addr >> shift] = i;
72 else
73 memnodemap[addr >> shift] = nodeids[i];
74
75 addr += (1UL << shift);
76 } while (addr < end);
77 res = 1;
78 }
79 return res;
80 }
81
82 static int __init allocate_cachealigned_memnodemap(void)
83 {
84 unsigned long addr;
85
86 memnodemap = memnode.embedded_map;
87 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
88 return 0;
89
90 addr = 0x8000;
91 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
92 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
93 nodemap_size, L1_CACHE_BYTES);
94 if (nodemap_addr == -1UL) {
95 printk(KERN_ERR
96 "NUMA: Unable to allocate Memory to Node hash map\n");
97 nodemap_addr = nodemap_size = 0;
98 return -1;
99 }
100 memnodemap = phys_to_virt(nodemap_addr);
101 reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
102
103 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
104 nodemap_addr, nodemap_addr + nodemap_size);
105 return 0;
106 }
107
108 /*
109 * The LSB of all start and end addresses in the node map is the value of the
110 * maximum possible shift.
111 */
112 static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
113 int numnodes)
114 {
115 int i, nodes_used = 0;
116 unsigned long start, end;
117 unsigned long bitfield = 0, memtop = 0;
118
119 for (i = 0; i < numnodes; i++) {
120 start = nodes[i].start;
121 end = nodes[i].end;
122 if (start >= end)
123 continue;
124 bitfield |= start;
125 nodes_used++;
126 if (end > memtop)
127 memtop = end;
128 }
129 if (nodes_used <= 1)
130 i = 63;
131 else
132 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
133 memnodemapsize = (memtop >> i)+1;
134 return i;
135 }
136
137 int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
138 int *nodeids)
139 {
140 int shift;
141
142 shift = extract_lsb_from_nodes(nodes, numnodes);
143 if (allocate_cachealigned_memnodemap())
144 return -1;
145 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
146 shift);
147
148 if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
149 printk(KERN_INFO "Your memory is not aligned you need to "
150 "rebuild your kernel with a bigger NODEMAPSIZE "
151 "shift=%d\n", shift);
152 return -1;
153 }
154 return shift;
155 }
156
157 int __meminit __early_pfn_to_nid(unsigned long pfn)
158 {
159 return phys_to_nid(pfn << PAGE_SHIFT);
160 }
161
162 static void * __init early_node_mem(int nodeid, unsigned long start,
163 unsigned long end, unsigned long size,
164 unsigned long align)
165 {
166 unsigned long mem = find_e820_area(start, end, size, align);
167 void *ptr;
168
169 if (mem != -1L)
170 return __va(mem);
171
172 ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
173 if (ptr == NULL) {
174 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
175 size, nodeid);
176 return NULL;
177 }
178 return ptr;
179 }
180
181 /* Initialize bootmem allocator for a node */
182 void __init
183 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
184 {
185 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
186 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
187 unsigned long bootmap_start, nodedata_phys;
188 void *bootmap;
189 int nid;
190
191 if (!end)
192 return;
193
194 /*
195 * Don't confuse VM with a node that doesn't have the
196 * minimum amount of memory:
197 */
198 if (end && (end - start) < NODE_MIN_SIZE)
199 return;
200
201 start = roundup(start, ZONE_ALIGN);
202
203 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
204 start, end);
205
206 start_pfn = start >> PAGE_SHIFT;
207 last_pfn = end >> PAGE_SHIFT;
208
209 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
210 SMP_CACHE_BYTES);
211 if (node_data[nodeid] == NULL)
212 return;
213 nodedata_phys = __pa(node_data[nodeid]);
214 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
215 nodedata_phys + pgdat_size - 1);
216
217 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
218 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
219 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
220 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
221
222 /*
223 * Find a place for the bootmem map
224 * nodedata_phys could be on other nodes by alloc_bootmem,
225 * so need to sure bootmap_start not to be small, otherwise
226 * early_node_mem will get that with find_e820_area instead
227 * of alloc_bootmem, that could clash with reserved range
228 */
229 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
230 nid = phys_to_nid(nodedata_phys);
231 if (nid == nodeid)
232 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
233 else
234 bootmap_start = roundup(start, PAGE_SIZE);
235 /*
236 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
237 * to use that to align to PAGE_SIZE
238 */
239 bootmap = early_node_mem(nodeid, bootmap_start, end,
240 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
241 if (bootmap == NULL) {
242 if (nodedata_phys < start || nodedata_phys >= end) {
243 /*
244 * only need to free it if it is from other node
245 * bootmem
246 */
247 if (nid != nodeid)
248 free_bootmem(nodedata_phys, pgdat_size);
249 }
250 node_data[nodeid] = NULL;
251 return;
252 }
253 bootmap_start = __pa(bootmap);
254
255 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
256 bootmap_start >> PAGE_SHIFT,
257 start_pfn, last_pfn);
258
259 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
260 bootmap_start, bootmap_start + bootmap_size - 1,
261 bootmap_pages);
262
263 free_bootmem_with_active_regions(nodeid, end);
264
265 /*
266 * convert early reserve to bootmem reserve earlier
267 * otherwise early_node_mem could use early reserved mem
268 * on previous node
269 */
270 early_res_to_bootmem(start, end);
271
272 /*
273 * in some case early_node_mem could use alloc_bootmem
274 * to get range on other node, don't reserve that again
275 */
276 if (nid != nodeid)
277 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
278 else
279 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
280 pgdat_size, BOOTMEM_DEFAULT);
281 nid = phys_to_nid(bootmap_start);
282 if (nid != nodeid)
283 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
284 else
285 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
286 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
287
288 node_set_online(nodeid);
289 }
290
291 /*
292 * There are unfortunately some poorly designed mainboards around that
293 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
294 * mapping. To avoid this fill in the mapping for all possible CPUs,
295 * as the number of CPUs is not known yet. We round robin the existing
296 * nodes.
297 */
298 void __init numa_init_array(void)
299 {
300 int rr, i;
301
302 rr = first_node(node_online_map);
303 for (i = 0; i < nr_cpu_ids; i++) {
304 if (early_cpu_to_node(i) != NUMA_NO_NODE)
305 continue;
306 numa_set_node(i, rr);
307 rr = next_node(rr, node_online_map);
308 if (rr == MAX_NUMNODES)
309 rr = first_node(node_online_map);
310 }
311 }
312
313 #ifdef CONFIG_NUMA_EMU
314 /* Numa emulation */
315 static struct bootnode nodes[MAX_NUMNODES] __initdata;
316 static struct bootnode physnodes[MAX_NUMNODES] __initdata;
317 static char *cmdline __initdata;
318
319 static int __init setup_physnodes(unsigned long start, unsigned long end,
320 int acpi, int k8)
321 {
322 int nr_nodes = 0;
323 int ret = 0;
324 int i;
325
326 #ifdef CONFIG_ACPI_NUMA
327 if (acpi)
328 nr_nodes = acpi_get_nodes(physnodes);
329 #endif
330 #ifdef CONFIG_K8_NUMA
331 if (k8)
332 nr_nodes = k8_get_nodes(physnodes);
333 #endif
334 /*
335 * Basic sanity checking on the physical node map: there may be errors
336 * if the SRAT or K8 incorrectly reported the topology or the mem=
337 * kernel parameter is used.
338 */
339 for (i = 0; i < nr_nodes; i++) {
340 if (physnodes[i].start == physnodes[i].end)
341 continue;
342 if (physnodes[i].start > end) {
343 physnodes[i].end = physnodes[i].start;
344 continue;
345 }
346 if (physnodes[i].end < start) {
347 physnodes[i].start = physnodes[i].end;
348 continue;
349 }
350 if (physnodes[i].start < start)
351 physnodes[i].start = start;
352 if (physnodes[i].end > end)
353 physnodes[i].end = end;
354 }
355
356 /*
357 * Remove all nodes that have no memory or were truncated because of the
358 * limited address range.
359 */
360 for (i = 0; i < nr_nodes; i++) {
361 if (physnodes[i].start == physnodes[i].end)
362 continue;
363 physnodes[ret].start = physnodes[i].start;
364 physnodes[ret].end = physnodes[i].end;
365 ret++;
366 }
367
368 /*
369 * If no physical topology was detected, a single node is faked to cover
370 * the entire address space.
371 */
372 if (!ret) {
373 physnodes[ret].start = start;
374 physnodes[ret].end = end;
375 ret = 1;
376 }
377 return ret;
378 }
379
380 /*
381 * Setups up nid to range from addr to addr + size. If the end
382 * boundary is greater than max_addr, then max_addr is used instead.
383 * The return value is 0 if there is additional memory left for
384 * allocation past addr and -1 otherwise. addr is adjusted to be at
385 * the end of the node.
386 */
387 static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
388 {
389 int ret = 0;
390 nodes[nid].start = *addr;
391 *addr += size;
392 if (*addr >= max_addr) {
393 *addr = max_addr;
394 ret = -1;
395 }
396 nodes[nid].end = *addr;
397 node_set(nid, node_possible_map);
398 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
399 nodes[nid].start, nodes[nid].end,
400 (nodes[nid].end - nodes[nid].start) >> 20);
401 return ret;
402 }
403
404 /*
405 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
406 * to max_addr. The return value is the number of nodes allocated.
407 */
408 static int __init split_nodes_interleave(u64 addr, u64 max_addr,
409 int nr_phys_nodes, int nr_nodes)
410 {
411 nodemask_t physnode_mask = NODE_MASK_NONE;
412 u64 size;
413 int big;
414 int ret = 0;
415 int i;
416
417 if (nr_nodes <= 0)
418 return -1;
419 if (nr_nodes > MAX_NUMNODES) {
420 pr_info("numa=fake=%d too large, reducing to %d\n",
421 nr_nodes, MAX_NUMNODES);
422 nr_nodes = MAX_NUMNODES;
423 }
424
425 size = (max_addr - addr - e820_hole_size(addr, max_addr)) / nr_nodes;
426 /*
427 * Calculate the number of big nodes that can be allocated as a result
428 * of consolidating the remainder.
429 */
430 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
431 FAKE_NODE_MIN_SIZE;
432
433 size &= FAKE_NODE_MIN_HASH_MASK;
434 if (!size) {
435 pr_err("Not enough memory for each node. "
436 "NUMA emulation disabled.\n");
437 return -1;
438 }
439
440 for (i = 0; i < nr_phys_nodes; i++)
441 if (physnodes[i].start != physnodes[i].end)
442 node_set(i, physnode_mask);
443
444 /*
445 * Continue to fill physical nodes with fake nodes until there is no
446 * memory left on any of them.
447 */
448 while (nodes_weight(physnode_mask)) {
449 for_each_node_mask(i, physnode_mask) {
450 u64 end = physnodes[i].start + size;
451 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
452
453 if (ret < big)
454 end += FAKE_NODE_MIN_SIZE;
455
456 /*
457 * Continue to add memory to this fake node if its
458 * non-reserved memory is less than the per-node size.
459 */
460 while (end - physnodes[i].start -
461 e820_hole_size(physnodes[i].start, end) < size) {
462 end += FAKE_NODE_MIN_SIZE;
463 if (end > physnodes[i].end) {
464 end = physnodes[i].end;
465 break;
466 }
467 }
468
469 /*
470 * If there won't be at least FAKE_NODE_MIN_SIZE of
471 * non-reserved memory in ZONE_DMA32 for the next node,
472 * this one must extend to the boundary.
473 */
474 if (end < dma32_end && dma32_end - end -
475 e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
476 end = dma32_end;
477
478 /*
479 * If there won't be enough non-reserved memory for the
480 * next node, this one must extend to the end of the
481 * physical node.
482 */
483 if (physnodes[i].end - end -
484 e820_hole_size(end, physnodes[i].end) < size)
485 end = physnodes[i].end;
486
487 /*
488 * Avoid allocating more nodes than requested, which can
489 * happen as a result of rounding down each node's size
490 * to FAKE_NODE_MIN_SIZE.
491 */
492 if (nodes_weight(physnode_mask) + ret >= nr_nodes)
493 end = physnodes[i].end;
494
495 if (setup_node_range(ret++, &physnodes[i].start,
496 end - physnodes[i].start,
497 physnodes[i].end) < 0)
498 node_clear(i, physnode_mask);
499 }
500 }
501 return ret;
502 }
503
504 /*
505 * Returns the end address of a node so that there is at least `size' amount of
506 * non-reserved memory or `max_addr' is reached.
507 */
508 static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
509 {
510 u64 end = start + size;
511
512 while (end - start - e820_hole_size(start, end) < size) {
513 end += FAKE_NODE_MIN_SIZE;
514 if (end > max_addr) {
515 end = max_addr;
516 break;
517 }
518 }
519 return end;
520 }
521
522 /*
523 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
524 * `addr' to `max_addr'. The return value is the number of nodes allocated.
525 */
526 static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
527 {
528 nodemask_t physnode_mask = NODE_MASK_NONE;
529 u64 min_size;
530 int ret = 0;
531 int i;
532
533 if (!size)
534 return -1;
535 /*
536 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
537 * increased accordingly if the requested size is too small. This
538 * creates a uniform distribution of node sizes across the entire
539 * machine (but not necessarily over physical nodes).
540 */
541 min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) /
542 MAX_NUMNODES;
543 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
544 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
545 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
546 FAKE_NODE_MIN_HASH_MASK;
547 if (size < min_size) {
548 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
549 size >> 20, min_size >> 20);
550 size = min_size;
551 }
552 size &= FAKE_NODE_MIN_HASH_MASK;
553
554 for (i = 0; i < MAX_NUMNODES; i++)
555 if (physnodes[i].start != physnodes[i].end)
556 node_set(i, physnode_mask);
557 /*
558 * Fill physical nodes with fake nodes of size until there is no memory
559 * left on any of them.
560 */
561 while (nodes_weight(physnode_mask)) {
562 for_each_node_mask(i, physnode_mask) {
563 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
564 u64 end;
565
566 end = find_end_of_node(physnodes[i].start,
567 physnodes[i].end, size);
568 /*
569 * If there won't be at least FAKE_NODE_MIN_SIZE of
570 * non-reserved memory in ZONE_DMA32 for the next node,
571 * this one must extend to the boundary.
572 */
573 if (end < dma32_end && dma32_end - end -
574 e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
575 end = dma32_end;
576
577 /*
578 * If there won't be enough non-reserved memory for the
579 * next node, this one must extend to the end of the
580 * physical node.
581 */
582 if (physnodes[i].end - end -
583 e820_hole_size(end, physnodes[i].end) < size)
584 end = physnodes[i].end;
585
586 /*
587 * Setup the fake node that will be allocated as bootmem
588 * later. If setup_node_range() returns non-zero, there
589 * is no more memory available on this physical node.
590 */
591 if (setup_node_range(ret++, &physnodes[i].start,
592 end - physnodes[i].start,
593 physnodes[i].end) < 0)
594 node_clear(i, physnode_mask);
595 }
596 }
597 return ret;
598 }
599
600 /*
601 * Splits num_nodes nodes up equally starting at node_start. The return value
602 * is the number of nodes split up and addr is adjusted to be at the end of the
603 * last node allocated.
604 */
605 static int __init split_nodes_equally(u64 *addr, u64 max_addr, int node_start,
606 int num_nodes)
607 {
608 unsigned int big;
609 u64 size;
610 int i;
611
612 if (num_nodes <= 0)
613 return -1;
614 if (num_nodes > MAX_NUMNODES)
615 num_nodes = MAX_NUMNODES;
616 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
617 num_nodes;
618 /*
619 * Calculate the number of big nodes that can be allocated as a result
620 * of consolidating the leftovers.
621 */
622 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
623 FAKE_NODE_MIN_SIZE;
624
625 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
626 size &= FAKE_NODE_MIN_HASH_MASK;
627 if (!size) {
628 printk(KERN_ERR "Not enough memory for each node. "
629 "NUMA emulation disabled.\n");
630 return -1;
631 }
632
633 for (i = node_start; i < num_nodes + node_start; i++) {
634 u64 end = *addr + size;
635
636 if (i < big)
637 end += FAKE_NODE_MIN_SIZE;
638 /*
639 * The final node can have the remaining system RAM. Other
640 * nodes receive roughly the same amount of available pages.
641 */
642 if (i == num_nodes + node_start - 1)
643 end = max_addr;
644 else
645 end = find_end_of_node(*addr, max_addr, size);
646 if (setup_node_range(i, addr, end - *addr, max_addr) < 0)
647 break;
648 }
649 return i - node_start + 1;
650 }
651
652 /*
653 * Splits the remaining system RAM into chunks of size. The remaining memory is
654 * always assigned to a final node and can be asymmetric. Returns the number of
655 * nodes split.
656 */
657 static int __init split_nodes_by_size(u64 *addr, u64 max_addr, int node_start,
658 u64 size)
659 {
660 int i = node_start;
661 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
662 while (!setup_node_range(i++, addr, size, max_addr))
663 ;
664 return i - node_start;
665 }
666
667 /*
668 * Sets up the system RAM area from start_pfn to last_pfn according to the
669 * numa=fake command-line option.
670 */
671 static int __init numa_emulation(unsigned long start_pfn,
672 unsigned long last_pfn, int acpi, int k8)
673 {
674 u64 size, addr = start_pfn << PAGE_SHIFT;
675 u64 max_addr = last_pfn << PAGE_SHIFT;
676 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
677 int num_phys_nodes;
678
679 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, k8);
680 /*
681 * If the numa=fake command-line contains a 'M' or 'G', it represents
682 * the fixed node size.
683 */
684 if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
685 size = memparse(cmdline, &cmdline);
686 num_nodes = split_nodes_size_interleave(addr, max_addr, size);
687 if (num_nodes < 0)
688 return num_nodes;
689 goto out;
690 }
691
692 /*
693 * If the numa=fake command-line is just a single number N, split the
694 * system RAM into N fake nodes.
695 */
696 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
697 long n = simple_strtol(cmdline, NULL, 0);
698
699 num_nodes = split_nodes_interleave(addr, max_addr,
700 num_phys_nodes, n);
701 if (num_nodes < 0)
702 return num_nodes;
703 goto out;
704 }
705
706 /* Parse the command line. */
707 for (coeff_flag = 0; ; cmdline++) {
708 if (*cmdline && isdigit(*cmdline)) {
709 num = num * 10 + *cmdline - '0';
710 continue;
711 }
712 if (*cmdline == '*') {
713 if (num > 0)
714 coeff = num;
715 coeff_flag = 1;
716 }
717 if (!*cmdline || *cmdline == ',') {
718 if (!coeff_flag)
719 coeff = 1;
720 /*
721 * Round down to the nearest FAKE_NODE_MIN_SIZE.
722 * Command-line coefficients are in megabytes.
723 */
724 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
725 if (size)
726 for (i = 0; i < coeff; i++, num_nodes++)
727 if (setup_node_range(num_nodes, &addr,
728 size, max_addr) < 0)
729 goto done;
730 if (!*cmdline)
731 break;
732 coeff_flag = 0;
733 coeff = -1;
734 }
735 num = 0;
736 }
737 done:
738 if (!num_nodes)
739 return -1;
740 /* Fill remainder of system RAM, if appropriate. */
741 if (addr < max_addr) {
742 if (coeff_flag && coeff < 0) {
743 /* Split remaining nodes into num-sized chunks */
744 num_nodes += split_nodes_by_size(&addr, max_addr,
745 num_nodes, num);
746 goto out;
747 }
748 switch (*(cmdline - 1)) {
749 case '*':
750 /* Split remaining nodes into coeff chunks */
751 if (coeff <= 0)
752 break;
753 num_nodes += split_nodes_equally(&addr, max_addr,
754 num_nodes, coeff);
755 break;
756 case ',':
757 /* Do not allocate remaining system RAM */
758 break;
759 default:
760 /* Give one final node */
761 setup_node_range(num_nodes, &addr, max_addr - addr,
762 max_addr);
763 num_nodes++;
764 }
765 }
766 out:
767 memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
768 if (memnode_shift < 0) {
769 memnode_shift = 0;
770 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
771 "disabled.\n");
772 return -1;
773 }
774
775 /*
776 * We need to vacate all active ranges that may have been registered for
777 * the e820 memory map.
778 */
779 remove_all_active_ranges();
780 for_each_node_mask(i, node_possible_map) {
781 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
782 nodes[i].end >> PAGE_SHIFT);
783 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
784 }
785 acpi_fake_nodes(nodes, num_nodes);
786 numa_init_array();
787 return 0;
788 }
789 #endif /* CONFIG_NUMA_EMU */
790
791 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
792 int acpi, int k8)
793 {
794 int i;
795
796 nodes_clear(node_possible_map);
797 nodes_clear(node_online_map);
798
799 #ifdef CONFIG_NUMA_EMU
800 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, k8))
801 return;
802 nodes_clear(node_possible_map);
803 nodes_clear(node_online_map);
804 #endif
805
806 #ifdef CONFIG_ACPI_NUMA
807 if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
808 last_pfn << PAGE_SHIFT))
809 return;
810 nodes_clear(node_possible_map);
811 nodes_clear(node_online_map);
812 #endif
813
814 #ifdef CONFIG_K8_NUMA
815 if (!numa_off && k8 && !k8_scan_nodes())
816 return;
817 nodes_clear(node_possible_map);
818 nodes_clear(node_online_map);
819 #endif
820 printk(KERN_INFO "%s\n",
821 numa_off ? "NUMA turned off" : "No NUMA configuration found");
822
823 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
824 start_pfn << PAGE_SHIFT,
825 last_pfn << PAGE_SHIFT);
826 /* setup dummy node covering all memory */
827 memnode_shift = 63;
828 memnodemap = memnode.embedded_map;
829 memnodemap[0] = 0;
830 node_set_online(0);
831 node_set(0, node_possible_map);
832 for (i = 0; i < nr_cpu_ids; i++)
833 numa_set_node(i, 0);
834 e820_register_active_regions(0, start_pfn, last_pfn);
835 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
836 }
837
838 unsigned long __init numa_free_all_bootmem(void)
839 {
840 unsigned long pages = 0;
841 int i;
842
843 for_each_online_node(i)
844 pages += free_all_bootmem_node(NODE_DATA(i));
845
846 return pages;
847 }
848
849 static __init int numa_setup(char *opt)
850 {
851 if (!opt)
852 return -EINVAL;
853 if (!strncmp(opt, "off", 3))
854 numa_off = 1;
855 #ifdef CONFIG_NUMA_EMU
856 if (!strncmp(opt, "fake=", 5))
857 cmdline = opt + 5;
858 #endif
859 #ifdef CONFIG_ACPI_NUMA
860 if (!strncmp(opt, "noacpi", 6))
861 acpi_numa = -1;
862 #endif
863 return 0;
864 }
865 early_param("numa", numa_setup);
866
867 #ifdef CONFIG_NUMA
868
869 static __init int find_near_online_node(int node)
870 {
871 int n, val;
872 int min_val = INT_MAX;
873 int best_node = -1;
874
875 for_each_online_node(n) {
876 val = node_distance(node, n);
877
878 if (val < min_val) {
879 min_val = val;
880 best_node = n;
881 }
882 }
883
884 return best_node;
885 }
886
887 /*
888 * Setup early cpu_to_node.
889 *
890 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
891 * and apicid_to_node[] tables have valid entries for a CPU.
892 * This means we skip cpu_to_node[] initialisation for NUMA
893 * emulation and faking node case (when running a kernel compiled
894 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
895 * is already initialized in a round robin manner at numa_init_array,
896 * prior to this call, and this initialization is good enough
897 * for the fake NUMA cases.
898 *
899 * Called before the per_cpu areas are setup.
900 */
901 void __init init_cpu_to_node(void)
902 {
903 int cpu;
904 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
905
906 BUG_ON(cpu_to_apicid == NULL);
907
908 for_each_possible_cpu(cpu) {
909 int node;
910 u16 apicid = cpu_to_apicid[cpu];
911
912 if (apicid == BAD_APICID)
913 continue;
914 node = apicid_to_node[apicid];
915 if (node == NUMA_NO_NODE)
916 continue;
917 if (!node_online(node))
918 node = find_near_online_node(node);
919 numa_set_node(cpu, node);
920 }
921 }
922 #endif
923
924
925 void __cpuinit numa_set_node(int cpu, int node)
926 {
927 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
928
929 /* early setting, no percpu area yet */
930 if (cpu_to_node_map) {
931 cpu_to_node_map[cpu] = node;
932 return;
933 }
934
935 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
936 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
937 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
938 dump_stack();
939 return;
940 }
941 #endif
942 per_cpu(x86_cpu_to_node_map, cpu) = node;
943
944 if (node != NUMA_NO_NODE)
945 per_cpu(node_number, cpu) = node;
946 }
947
948 void __cpuinit numa_clear_node(int cpu)
949 {
950 numa_set_node(cpu, NUMA_NO_NODE);
951 }
952
953 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
954
955 void __cpuinit numa_add_cpu(int cpu)
956 {
957 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
958 }
959
960 void __cpuinit numa_remove_cpu(int cpu)
961 {
962 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
963 }
964
965 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
966
967 /*
968 * --------- debug versions of the numa functions ---------
969 */
970 static void __cpuinit numa_set_cpumask(int cpu, int enable)
971 {
972 int node = early_cpu_to_node(cpu);
973 struct cpumask *mask;
974 char buf[64];
975
976 mask = node_to_cpumask_map[node];
977 if (mask == NULL) {
978 printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
979 dump_stack();
980 return;
981 }
982
983 if (enable)
984 cpumask_set_cpu(cpu, mask);
985 else
986 cpumask_clear_cpu(cpu, mask);
987
988 cpulist_scnprintf(buf, sizeof(buf), mask);
989 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
990 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
991 }
992
993 void __cpuinit numa_add_cpu(int cpu)
994 {
995 numa_set_cpumask(cpu, 1);
996 }
997
998 void __cpuinit numa_remove_cpu(int cpu)
999 {
1000 numa_set_cpumask(cpu, 0);
1001 }
1002
1003 int cpu_to_node(int cpu)
1004 {
1005 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
1006 printk(KERN_WARNING
1007 "cpu_to_node(%d): usage too early!\n", cpu);
1008 dump_stack();
1009 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
1010 }
1011 return per_cpu(x86_cpu_to_node_map, cpu);
1012 }
1013 EXPORT_SYMBOL(cpu_to_node);
1014
1015 /*
1016 * Same function as cpu_to_node() but used if called before the
1017 * per_cpu areas are setup.
1018 */
1019 int early_cpu_to_node(int cpu)
1020 {
1021 if (early_per_cpu_ptr(x86_cpu_to_node_map))
1022 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
1023
1024 if (!cpu_possible(cpu)) {
1025 printk(KERN_WARNING
1026 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
1027 dump_stack();
1028 return NUMA_NO_NODE;
1029 }
1030 return per_cpu(x86_cpu_to_node_map, cpu);
1031 }
1032
1033 /*
1034 * --------- end of debug versions of the numa functions ---------
1035 */
1036
1037 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
This page took 0.206393 seconds and 5 git commands to generate.