Commit | Line | Data |
---|---|---|
71ee73e7 | 1 | /* Common code for 32 and 64-bit NUMA */ |
a4106eae TH |
2 | #include <linux/kernel.h> |
3 | #include <linux/mm.h> | |
4 | #include <linux/string.h> | |
5 | #include <linux/init.h> | |
71ee73e7 | 6 | #include <linux/bootmem.h> |
a4106eae TH |
7 | #include <linux/memblock.h> |
8 | #include <linux/mmzone.h> | |
9 | #include <linux/ctype.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/nodemask.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/topology.h> | |
14 | ||
15 | #include <asm/e820.h> | |
16 | #include <asm/proto.h> | |
17 | #include <asm/dma.h> | |
90321602 | 18 | #include <asm/acpi.h> |
a4106eae TH |
19 | #include <asm/amd_nb.h> |
20 | ||
21 | #include "numa_internal.h" | |
90321602 JB |
22 | |
23 | int __initdata numa_off; | |
e6df595b | 24 | nodemask_t numa_nodes_parsed __initdata; |
90321602 | 25 | |
a4106eae TH |
26 | #ifdef CONFIG_X86_64 |
27 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | |
28 | EXPORT_SYMBOL(node_data); | |
29 | ||
30 | static struct numa_meminfo numa_meminfo | |
31 | #ifndef CONFIG_MEMORY_HOTPLUG | |
32 | __initdata | |
33 | #endif | |
34 | ; | |
35 | ||
36 | static int numa_distance_cnt; | |
37 | static u8 *numa_distance; | |
38 | #endif | |
39 | ||
90321602 JB |
40 | static __init int numa_setup(char *opt) |
41 | { | |
42 | if (!opt) | |
43 | return -EINVAL; | |
44 | if (!strncmp(opt, "off", 3)) | |
45 | numa_off = 1; | |
46 | #ifdef CONFIG_NUMA_EMU | |
47 | if (!strncmp(opt, "fake=", 5)) | |
48 | numa_emu_cmdline(opt + 5); | |
49 | #endif | |
50 | #ifdef CONFIG_ACPI_NUMA | |
51 | if (!strncmp(opt, "noacpi", 6)) | |
52 | acpi_numa = -1; | |
53 | #endif | |
54 | return 0; | |
55 | } | |
56 | early_param("numa", numa_setup); | |
71ee73e7 | 57 | |
71ee73e7 | 58 | /* |
bbc9e2f4 | 59 | * apicid, cpu, node mappings |
71ee73e7 | 60 | */ |
bbc9e2f4 TH |
61 | s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { |
62 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | |
63 | }; | |
64 | ||
6bd26273 TH |
65 | int __cpuinit numa_cpu_node(int cpu) |
66 | { | |
67 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | |
68 | ||
69 | if (apicid != BAD_APICID) | |
70 | return __apicid_to_node[apicid]; | |
71 | return NUMA_NO_NODE; | |
72 | } | |
73 | ||
c032ef60 | 74 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
71ee73e7 RR |
75 | EXPORT_SYMBOL(node_to_cpumask_map); |
76 | ||
645a7919 TH |
77 | /* |
78 | * Map cpu index to node index | |
79 | */ | |
645a7919 | 80 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
645a7919 TH |
81 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
82 | ||
83 | void __cpuinit numa_set_node(int cpu, int node) | |
84 | { | |
85 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
86 | ||
87 | /* early setting, no percpu area yet */ | |
88 | if (cpu_to_node_map) { | |
89 | cpu_to_node_map[cpu] = node; | |
90 | return; | |
91 | } | |
92 | ||
93 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | |
94 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | |
95 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
96 | dump_stack(); | |
97 | return; | |
98 | } | |
99 | #endif | |
100 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
101 | ||
102 | if (node != NUMA_NO_NODE) | |
103 | set_cpu_numa_node(cpu, node); | |
104 | } | |
105 | ||
106 | void __cpuinit numa_clear_node(int cpu) | |
107 | { | |
108 | numa_set_node(cpu, NUMA_NO_NODE); | |
109 | } | |
110 | ||
71ee73e7 RR |
111 | /* |
112 | * Allocate node_to_cpumask_map based on number of available nodes | |
113 | * Requires node_possible_map to be valid. | |
114 | * | |
115 | * Note: node_to_cpumask() is not valid until after this is done. | |
116 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | |
117 | */ | |
118 | void __init setup_node_to_cpumask_map(void) | |
119 | { | |
120 | unsigned int node, num = 0; | |
71ee73e7 RR |
121 | |
122 | /* setup nr_node_ids if not done yet */ | |
123 | if (nr_node_ids == MAX_NUMNODES) { | |
124 | for_each_node_mask(node, node_possible_map) | |
125 | num = node; | |
126 | nr_node_ids = num + 1; | |
127 | } | |
128 | ||
129 | /* allocate the map */ | |
c032ef60 RR |
130 | for (node = 0; node < nr_node_ids; node++) |
131 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
71ee73e7 | 132 | |
c032ef60 RR |
133 | /* cpumask_of_node() will now work */ |
134 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | |
71ee73e7 RR |
135 | } |
136 | ||
a4106eae TH |
137 | #ifdef CONFIG_X86_64 |
138 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, | |
139 | struct numa_meminfo *mi) | |
140 | { | |
141 | /* ignore zero length blks */ | |
142 | if (start == end) | |
143 | return 0; | |
144 | ||
145 | /* whine about and ignore invalid blks */ | |
146 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | |
147 | pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", | |
148 | nid, start, end); | |
149 | return 0; | |
150 | } | |
151 | ||
152 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | |
153 | pr_err("NUMA: too many memblk ranges\n"); | |
154 | return -EINVAL; | |
155 | } | |
156 | ||
157 | mi->blk[mi->nr_blks].start = start; | |
158 | mi->blk[mi->nr_blks].end = end; | |
159 | mi->blk[mi->nr_blks].nid = nid; | |
160 | mi->nr_blks++; | |
161 | return 0; | |
162 | } | |
163 | ||
164 | /** | |
165 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | |
166 | * @idx: Index of memblk to remove | |
167 | * @mi: numa_meminfo to remove memblk from | |
168 | * | |
169 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | |
170 | * decrementing @mi->nr_blks. | |
171 | */ | |
172 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | |
173 | { | |
174 | mi->nr_blks--; | |
175 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | |
176 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | |
177 | } | |
178 | ||
179 | /** | |
180 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | |
181 | * @nid: NUMA node ID of the new memblk | |
182 | * @start: Start address of the new memblk | |
183 | * @end: End address of the new memblk | |
184 | * | |
185 | * Add a new memblk to the default numa_meminfo. | |
186 | * | |
187 | * RETURNS: | |
188 | * 0 on success, -errno on failure. | |
189 | */ | |
190 | int __init numa_add_memblk(int nid, u64 start, u64 end) | |
191 | { | |
192 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | |
193 | } | |
194 | ||
195 | /* Initialize bootmem allocator for a node */ | |
196 | static void __init | |
197 | setup_node_bootmem(int nid, unsigned long start, unsigned long end) | |
198 | { | |
199 | const u64 nd_low = (u64)MAX_DMA_PFN << PAGE_SHIFT; | |
200 | const u64 nd_high = (u64)max_pfn_mapped << PAGE_SHIFT; | |
201 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); | |
202 | unsigned long nd_pa; | |
203 | int tnid; | |
204 | ||
205 | /* | |
206 | * Don't confuse VM with a node that doesn't have the | |
207 | * minimum amount of memory: | |
208 | */ | |
209 | if (end && (end - start) < NODE_MIN_SIZE) | |
210 | return; | |
211 | ||
212 | start = roundup(start, ZONE_ALIGN); | |
213 | ||
214 | printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", | |
215 | nid, start, end); | |
216 | ||
217 | /* | |
218 | * Try to allocate node data on local node and then fall back to | |
219 | * all nodes. Never allocate in DMA zone. | |
220 | */ | |
221 | nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, | |
222 | nd_size, SMP_CACHE_BYTES); | |
223 | if (nd_pa == MEMBLOCK_ERROR) | |
224 | nd_pa = memblock_find_in_range(nd_low, nd_high, | |
225 | nd_size, SMP_CACHE_BYTES); | |
226 | if (nd_pa == MEMBLOCK_ERROR) { | |
227 | pr_err("Cannot find %lu bytes in node %d\n", nd_size, nid); | |
228 | return; | |
229 | } | |
230 | memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); | |
231 | ||
232 | /* report and initialize */ | |
233 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", | |
234 | nd_pa, nd_pa + nd_size - 1); | |
235 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); | |
236 | if (tnid != nid) | |
237 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); | |
238 | ||
239 | node_data[nid] = __va(nd_pa); | |
240 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); | |
241 | NODE_DATA(nid)->node_id = nid; | |
242 | NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT; | |
243 | NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT; | |
244 | ||
245 | node_set_online(nid); | |
246 | } | |
247 | ||
248 | /** | |
249 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | |
250 | * @mi: numa_meminfo to clean up | |
251 | * | |
252 | * Sanitize @mi by merging and removing unncessary memblks. Also check for | |
253 | * conflicts and clear unused memblks. | |
254 | * | |
255 | * RETURNS: | |
256 | * 0 on success, -errno on failure. | |
257 | */ | |
258 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | |
259 | { | |
260 | const u64 low = 0; | |
261 | const u64 high = (u64)max_pfn << PAGE_SHIFT; | |
262 | int i, j, k; | |
263 | ||
264 | for (i = 0; i < mi->nr_blks; i++) { | |
265 | struct numa_memblk *bi = &mi->blk[i]; | |
266 | ||
267 | /* make sure all blocks are inside the limits */ | |
268 | bi->start = max(bi->start, low); | |
269 | bi->end = min(bi->end, high); | |
270 | ||
271 | /* and there's no empty block */ | |
272 | if (bi->start >= bi->end) { | |
273 | numa_remove_memblk_from(i--, mi); | |
274 | continue; | |
275 | } | |
276 | ||
277 | for (j = i + 1; j < mi->nr_blks; j++) { | |
278 | struct numa_memblk *bj = &mi->blk[j]; | |
279 | unsigned long start, end; | |
280 | ||
281 | /* | |
282 | * See whether there are overlapping blocks. Whine | |
283 | * about but allow overlaps of the same nid. They | |
284 | * will be merged below. | |
285 | */ | |
286 | if (bi->end > bj->start && bi->start < bj->end) { | |
287 | if (bi->nid != bj->nid) { | |
288 | pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", | |
289 | bi->nid, bi->start, bi->end, | |
290 | bj->nid, bj->start, bj->end); | |
291 | return -EINVAL; | |
292 | } | |
293 | pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", | |
294 | bi->nid, bi->start, bi->end, | |
295 | bj->start, bj->end); | |
296 | } | |
297 | ||
298 | /* | |
299 | * Join together blocks on the same node, holes | |
300 | * between which don't overlap with memory on other | |
301 | * nodes. | |
302 | */ | |
303 | if (bi->nid != bj->nid) | |
304 | continue; | |
305 | start = max(min(bi->start, bj->start), low); | |
306 | end = min(max(bi->end, bj->end), high); | |
307 | for (k = 0; k < mi->nr_blks; k++) { | |
308 | struct numa_memblk *bk = &mi->blk[k]; | |
309 | ||
310 | if (bi->nid == bk->nid) | |
311 | continue; | |
312 | if (start < bk->end && end > bk->start) | |
313 | break; | |
314 | } | |
315 | if (k < mi->nr_blks) | |
316 | continue; | |
317 | printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", | |
318 | bi->nid, bi->start, bi->end, bj->start, bj->end, | |
319 | start, end); | |
320 | bi->start = start; | |
321 | bi->end = end; | |
322 | numa_remove_memblk_from(j--, mi); | |
323 | } | |
324 | } | |
325 | ||
326 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { | |
327 | mi->blk[i].start = mi->blk[i].end = 0; | |
328 | mi->blk[i].nid = NUMA_NO_NODE; | |
329 | } | |
330 | ||
331 | return 0; | |
332 | } | |
333 | ||
334 | /* | |
335 | * Set nodes, which have memory in @mi, in *@nodemask. | |
336 | */ | |
337 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | |
338 | const struct numa_meminfo *mi) | |
339 | { | |
340 | int i; | |
341 | ||
342 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | |
343 | if (mi->blk[i].start != mi->blk[i].end && | |
344 | mi->blk[i].nid != NUMA_NO_NODE) | |
345 | node_set(mi->blk[i].nid, *nodemask); | |
346 | } | |
347 | ||
348 | /** | |
349 | * numa_reset_distance - Reset NUMA distance table | |
350 | * | |
351 | * The current table is freed. The next numa_set_distance() call will | |
352 | * create a new one. | |
353 | */ | |
354 | void __init numa_reset_distance(void) | |
355 | { | |
356 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | |
357 | ||
358 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | |
359 | if (numa_distance_cnt) | |
360 | memblock_x86_free_range(__pa(numa_distance), | |
361 | __pa(numa_distance) + size); | |
362 | numa_distance_cnt = 0; | |
363 | numa_distance = NULL; /* enable table creation */ | |
364 | } | |
365 | ||
366 | static int __init numa_alloc_distance(void) | |
367 | { | |
368 | nodemask_t nodes_parsed; | |
369 | size_t size; | |
370 | int i, j, cnt = 0; | |
371 | u64 phys; | |
372 | ||
373 | /* size the new table and allocate it */ | |
374 | nodes_parsed = numa_nodes_parsed; | |
375 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | |
376 | ||
377 | for_each_node_mask(i, nodes_parsed) | |
378 | cnt = i; | |
379 | cnt++; | |
380 | size = cnt * cnt * sizeof(numa_distance[0]); | |
381 | ||
382 | phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, | |
383 | size, PAGE_SIZE); | |
384 | if (phys == MEMBLOCK_ERROR) { | |
385 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); | |
386 | /* don't retry until explicitly reset */ | |
387 | numa_distance = (void *)1LU; | |
388 | return -ENOMEM; | |
389 | } | |
390 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | |
391 | ||
392 | numa_distance = __va(phys); | |
393 | numa_distance_cnt = cnt; | |
394 | ||
395 | /* fill with the default distances */ | |
396 | for (i = 0; i < cnt; i++) | |
397 | for (j = 0; j < cnt; j++) | |
398 | numa_distance[i * cnt + j] = i == j ? | |
399 | LOCAL_DISTANCE : REMOTE_DISTANCE; | |
400 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
405 | /** | |
406 | * numa_set_distance - Set NUMA distance from one NUMA to another | |
407 | * @from: the 'from' node to set distance | |
408 | * @to: the 'to' node to set distance | |
409 | * @distance: NUMA distance | |
410 | * | |
411 | * Set the distance from node @from to @to to @distance. If distance table | |
412 | * doesn't exist, one which is large enough to accommodate all the currently | |
413 | * known nodes will be created. | |
414 | * | |
415 | * If such table cannot be allocated, a warning is printed and further | |
416 | * calls are ignored until the distance table is reset with | |
417 | * numa_reset_distance(). | |
418 | * | |
419 | * If @from or @to is higher than the highest known node at the time of | |
420 | * table creation or @distance doesn't make sense, the call is ignored. | |
421 | * This is to allow simplification of specific NUMA config implementations. | |
422 | */ | |
423 | void __init numa_set_distance(int from, int to, int distance) | |
424 | { | |
425 | if (!numa_distance && numa_alloc_distance() < 0) | |
426 | return; | |
427 | ||
428 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) { | |
429 | printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n", | |
430 | from, to, distance); | |
431 | return; | |
432 | } | |
433 | ||
434 | if ((u8)distance != distance || | |
435 | (from == to && distance != LOCAL_DISTANCE)) { | |
436 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | |
437 | from, to, distance); | |
438 | return; | |
439 | } | |
440 | ||
441 | numa_distance[from * numa_distance_cnt + to] = distance; | |
442 | } | |
443 | ||
444 | int __node_distance(int from, int to) | |
445 | { | |
446 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | |
447 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | |
448 | return numa_distance[from * numa_distance_cnt + to]; | |
449 | } | |
450 | EXPORT_SYMBOL(__node_distance); | |
451 | ||
452 | /* | |
453 | * Sanity check to catch more bad NUMA configurations (they are amazingly | |
454 | * common). Make sure the nodes cover all memory. | |
455 | */ | |
456 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | |
457 | { | |
458 | unsigned long numaram, e820ram; | |
459 | int i; | |
460 | ||
461 | numaram = 0; | |
462 | for (i = 0; i < mi->nr_blks; i++) { | |
463 | unsigned long s = mi->blk[i].start >> PAGE_SHIFT; | |
464 | unsigned long e = mi->blk[i].end >> PAGE_SHIFT; | |
465 | numaram += e - s; | |
466 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | |
467 | if ((long)numaram < 0) | |
468 | numaram = 0; | |
469 | } | |
470 | ||
471 | e820ram = max_pfn - (memblock_x86_hole_size(0, | |
472 | max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); | |
473 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ | |
474 | if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { | |
475 | printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", | |
476 | (numaram << PAGE_SHIFT) >> 20, | |
477 | (e820ram << PAGE_SHIFT) >> 20); | |
478 | return false; | |
479 | } | |
480 | return true; | |
481 | } | |
482 | ||
483 | static int __init numa_register_memblks(struct numa_meminfo *mi) | |
484 | { | |
485 | int i, nid; | |
486 | ||
487 | /* Account for nodes with cpus and no memory */ | |
488 | node_possible_map = numa_nodes_parsed; | |
489 | numa_nodemask_from_meminfo(&node_possible_map, mi); | |
490 | if (WARN_ON(nodes_empty(node_possible_map))) | |
491 | return -EINVAL; | |
492 | ||
493 | for (i = 0; i < mi->nr_blks; i++) | |
494 | memblock_x86_register_active_regions(mi->blk[i].nid, | |
495 | mi->blk[i].start >> PAGE_SHIFT, | |
496 | mi->blk[i].end >> PAGE_SHIFT); | |
497 | ||
498 | /* for out of order entries */ | |
499 | sort_node_map(); | |
500 | if (!numa_meminfo_cover_memory(mi)) | |
501 | return -EINVAL; | |
502 | ||
503 | /* Finally register nodes. */ | |
504 | for_each_node_mask(nid, node_possible_map) { | |
505 | u64 start = (u64)max_pfn << PAGE_SHIFT; | |
506 | u64 end = 0; | |
507 | ||
508 | for (i = 0; i < mi->nr_blks; i++) { | |
509 | if (nid != mi->blk[i].nid) | |
510 | continue; | |
511 | start = min(mi->blk[i].start, start); | |
512 | end = max(mi->blk[i].end, end); | |
513 | } | |
514 | ||
515 | if (start < end) | |
516 | setup_node_bootmem(nid, start, end); | |
517 | } | |
518 | ||
519 | return 0; | |
520 | } | |
521 | #endif | |
522 | ||
8db78cc4 TH |
523 | /* |
524 | * There are unfortunately some poorly designed mainboards around that | |
525 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | |
526 | * mapping. To avoid this fill in the mapping for all possible CPUs, | |
527 | * as the number of CPUs is not known yet. We round robin the existing | |
528 | * nodes. | |
529 | */ | |
530 | void __init numa_init_array(void) | |
531 | { | |
532 | int rr, i; | |
533 | ||
534 | rr = first_node(node_online_map); | |
535 | for (i = 0; i < nr_cpu_ids; i++) { | |
536 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | |
537 | continue; | |
538 | numa_set_node(i, rr); | |
539 | rr = next_node(rr, node_online_map); | |
540 | if (rr == MAX_NUMNODES) | |
541 | rr = first_node(node_online_map); | |
542 | } | |
543 | } | |
544 | ||
a4106eae TH |
545 | #ifdef CONFIG_X86_64 |
546 | static int __init numa_init(int (*init_func)(void)) | |
547 | { | |
548 | int i; | |
549 | int ret; | |
550 | ||
551 | for (i = 0; i < MAX_LOCAL_APIC; i++) | |
552 | set_apicid_to_node(i, NUMA_NO_NODE); | |
553 | ||
554 | nodes_clear(numa_nodes_parsed); | |
555 | nodes_clear(node_possible_map); | |
556 | nodes_clear(node_online_map); | |
557 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | |
558 | remove_all_active_ranges(); | |
559 | numa_reset_distance(); | |
560 | ||
561 | ret = init_func(); | |
562 | if (ret < 0) | |
563 | return ret; | |
564 | ret = numa_cleanup_meminfo(&numa_meminfo); | |
565 | if (ret < 0) | |
566 | return ret; | |
567 | ||
568 | numa_emulation(&numa_meminfo, numa_distance_cnt); | |
569 | ||
570 | ret = numa_register_memblks(&numa_meminfo); | |
571 | if (ret < 0) | |
572 | return ret; | |
573 | ||
574 | for (i = 0; i < nr_cpu_ids; i++) { | |
575 | int nid = early_cpu_to_node(i); | |
576 | ||
577 | if (nid == NUMA_NO_NODE) | |
578 | continue; | |
579 | if (!node_online(nid)) | |
580 | numa_clear_node(i); | |
581 | } | |
582 | numa_init_array(); | |
583 | return 0; | |
584 | } | |
585 | ||
586 | /** | |
587 | * dummy_numa_init - Fallback dummy NUMA init | |
588 | * | |
589 | * Used if there's no underlying NUMA architecture, NUMA initialization | |
590 | * fails, or NUMA is disabled on the command line. | |
591 | * | |
592 | * Must online at least one node and add memory blocks that cover all | |
593 | * allowed memory. This function must not fail. | |
594 | */ | |
595 | static int __init dummy_numa_init(void) | |
596 | { | |
597 | printk(KERN_INFO "%s\n", | |
598 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | |
599 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", | |
600 | 0LU, max_pfn << PAGE_SHIFT); | |
601 | ||
602 | node_set(0, numa_nodes_parsed); | |
603 | numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); | |
604 | ||
605 | return 0; | |
606 | } | |
607 | ||
608 | /** | |
609 | * x86_numa_init - Initialize NUMA | |
610 | * | |
611 | * Try each configured NUMA initialization method until one succeeds. The | |
612 | * last fallback is dummy single node config encomapssing whole memory and | |
613 | * never fails. | |
614 | */ | |
615 | void __init x86_numa_init(void) | |
616 | { | |
617 | if (!numa_off) { | |
618 | #ifdef CONFIG_ACPI_NUMA | |
619 | if (!numa_init(x86_acpi_numa_init)) | |
620 | return; | |
621 | #endif | |
622 | #ifdef CONFIG_AMD_NUMA | |
623 | if (!numa_init(amd_numa_init)) | |
624 | return; | |
625 | #endif | |
626 | } | |
627 | ||
628 | numa_init(dummy_numa_init); | |
629 | } | |
630 | #endif | |
631 | ||
8db78cc4 TH |
632 | static __init int find_near_online_node(int node) |
633 | { | |
634 | int n, val; | |
635 | int min_val = INT_MAX; | |
636 | int best_node = -1; | |
637 | ||
638 | for_each_online_node(n) { | |
639 | val = node_distance(node, n); | |
640 | ||
641 | if (val < min_val) { | |
642 | min_val = val; | |
643 | best_node = n; | |
644 | } | |
645 | } | |
646 | ||
647 | return best_node; | |
648 | } | |
649 | ||
650 | /* | |
651 | * Setup early cpu_to_node. | |
652 | * | |
653 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | |
654 | * and apicid_to_node[] tables have valid entries for a CPU. | |
655 | * This means we skip cpu_to_node[] initialisation for NUMA | |
656 | * emulation and faking node case (when running a kernel compiled | |
657 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | |
658 | * is already initialized in a round robin manner at numa_init_array, | |
659 | * prior to this call, and this initialization is good enough | |
660 | * for the fake NUMA cases. | |
661 | * | |
662 | * Called before the per_cpu areas are setup. | |
663 | */ | |
664 | void __init init_cpu_to_node(void) | |
665 | { | |
666 | int cpu; | |
667 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | |
668 | ||
669 | BUG_ON(cpu_to_apicid == NULL); | |
670 | ||
671 | for_each_possible_cpu(cpu) { | |
672 | int node = numa_cpu_node(cpu); | |
673 | ||
674 | if (node == NUMA_NO_NODE) | |
675 | continue; | |
676 | if (!node_online(node)) | |
677 | node = find_near_online_node(node); | |
678 | numa_set_node(cpu, node); | |
679 | } | |
680 | } | |
681 | ||
de2d9445 TH |
682 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
683 | ||
684 | # ifndef CONFIG_NUMA_EMU | |
685 | void __cpuinit numa_add_cpu(int cpu) | |
686 | { | |
687 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
688 | } | |
689 | ||
690 | void __cpuinit numa_remove_cpu(int cpu) | |
691 | { | |
692 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
693 | } | |
694 | # endif /* !CONFIG_NUMA_EMU */ | |
695 | ||
696 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | |
645a7919 TH |
697 | |
698 | int __cpu_to_node(int cpu) | |
699 | { | |
700 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
701 | printk(KERN_WARNING | |
702 | "cpu_to_node(%d): usage too early!\n", cpu); | |
703 | dump_stack(); | |
704 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
705 | } | |
706 | return per_cpu(x86_cpu_to_node_map, cpu); | |
707 | } | |
708 | EXPORT_SYMBOL(__cpu_to_node); | |
709 | ||
710 | /* | |
711 | * Same function as cpu_to_node() but used if called before the | |
712 | * per_cpu areas are setup. | |
713 | */ | |
714 | int early_cpu_to_node(int cpu) | |
715 | { | |
716 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
717 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
718 | ||
719 | if (!cpu_possible(cpu)) { | |
720 | printk(KERN_WARNING | |
721 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
722 | dump_stack(); | |
723 | return NUMA_NO_NODE; | |
724 | } | |
725 | return per_cpu(x86_cpu_to_node_map, cpu); | |
726 | } | |
727 | ||
7a6c6547 | 728 | void debug_cpumask_set_cpu(int cpu, int node, bool enable) |
de2d9445 | 729 | { |
de2d9445 TH |
730 | struct cpumask *mask; |
731 | char buf[64]; | |
732 | ||
14392fd3 DR |
733 | if (node == NUMA_NO_NODE) { |
734 | /* early_cpu_to_node() already emits a warning and trace */ | |
7a6c6547 | 735 | return; |
14392fd3 | 736 | } |
de2d9445 TH |
737 | mask = node_to_cpumask_map[node]; |
738 | if (!mask) { | |
739 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | |
740 | dump_stack(); | |
7a6c6547 | 741 | return; |
de2d9445 TH |
742 | } |
743 | ||
7a6c6547 DR |
744 | if (enable) |
745 | cpumask_set_cpu(cpu, mask); | |
746 | else | |
747 | cpumask_clear_cpu(cpu, mask); | |
748 | ||
de2d9445 TH |
749 | cpulist_scnprintf(buf, sizeof(buf), mask); |
750 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | |
751 | enable ? "numa_add_cpu" : "numa_remove_cpu", | |
752 | cpu, node, buf); | |
7a6c6547 | 753 | return; |
de2d9445 TH |
754 | } |
755 | ||
756 | # ifndef CONFIG_NUMA_EMU | |
7a6c6547 | 757 | static void __cpuinit numa_set_cpumask(int cpu, bool enable) |
de2d9445 | 758 | { |
7a6c6547 | 759 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); |
de2d9445 TH |
760 | } |
761 | ||
762 | void __cpuinit numa_add_cpu(int cpu) | |
763 | { | |
7a6c6547 | 764 | numa_set_cpumask(cpu, true); |
de2d9445 TH |
765 | } |
766 | ||
767 | void __cpuinit numa_remove_cpu(int cpu) | |
768 | { | |
7a6c6547 | 769 | numa_set_cpumask(cpu, false); |
de2d9445 TH |
770 | } |
771 | # endif /* !CONFIG_NUMA_EMU */ | |
772 | ||
71ee73e7 RR |
773 | /* |
774 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
775 | */ | |
73e907de | 776 | const struct cpumask *cpumask_of_node(int node) |
71ee73e7 | 777 | { |
71ee73e7 RR |
778 | if (node >= nr_node_ids) { |
779 | printk(KERN_WARNING | |
780 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | |
781 | node, nr_node_ids); | |
782 | dump_stack(); | |
783 | return cpu_none_mask; | |
784 | } | |
c032ef60 RR |
785 | if (node_to_cpumask_map[node] == NULL) { |
786 | printk(KERN_WARNING | |
787 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | |
788 | node); | |
789 | dump_stack(); | |
790 | return cpu_online_mask; | |
791 | } | |
0b966252 | 792 | return node_to_cpumask_map[node]; |
71ee73e7 RR |
793 | } |
794 | EXPORT_SYMBOL(cpumask_of_node); | |
645a7919 | 795 | |
de2d9445 | 796 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
a4106eae TH |
797 | |
798 | #if defined(CONFIG_X86_64) && defined(CONFIG_MEMORY_HOTPLUG) | |
799 | int memory_add_physaddr_to_nid(u64 start) | |
800 | { | |
801 | struct numa_meminfo *mi = &numa_meminfo; | |
802 | int nid = mi->blk[0].nid; | |
803 | int i; | |
804 | ||
805 | for (i = 0; i < mi->nr_blks; i++) | |
806 | if (mi->blk[i].start <= start && mi->blk[i].end > start) | |
807 | nid = mi->blk[i].nid; | |
808 | return nid; | |
809 | } | |
810 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |
811 | #endif |