Commit | Line | Data |
---|---|---|
71ee73e7 | 1 | /* Common code for 32 and 64-bit NUMA */ |
a4106eae TH |
2 | #include <linux/kernel.h> |
3 | #include <linux/mm.h> | |
4 | #include <linux/string.h> | |
5 | #include <linux/init.h> | |
71ee73e7 | 6 | #include <linux/bootmem.h> |
a4106eae TH |
7 | #include <linux/memblock.h> |
8 | #include <linux/mmzone.h> | |
9 | #include <linux/ctype.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/nodemask.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/topology.h> | |
14 | ||
15 | #include <asm/e820.h> | |
16 | #include <asm/proto.h> | |
17 | #include <asm/dma.h> | |
90321602 | 18 | #include <asm/acpi.h> |
a4106eae TH |
19 | #include <asm/amd_nb.h> |
20 | ||
21 | #include "numa_internal.h" | |
90321602 JB |
22 | |
23 | int __initdata numa_off; | |
e6df595b | 24 | nodemask_t numa_nodes_parsed __initdata; |
90321602 | 25 | |
a4106eae TH |
26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
27 | EXPORT_SYMBOL(node_data); | |
28 | ||
29 | static struct numa_meminfo numa_meminfo | |
30 | #ifndef CONFIG_MEMORY_HOTPLUG | |
31 | __initdata | |
32 | #endif | |
33 | ; | |
34 | ||
35 | static int numa_distance_cnt; | |
36 | static u8 *numa_distance; | |
a4106eae | 37 | |
90321602 JB |
38 | static __init int numa_setup(char *opt) |
39 | { | |
40 | if (!opt) | |
41 | return -EINVAL; | |
42 | if (!strncmp(opt, "off", 3)) | |
43 | numa_off = 1; | |
44 | #ifdef CONFIG_NUMA_EMU | |
45 | if (!strncmp(opt, "fake=", 5)) | |
46 | numa_emu_cmdline(opt + 5); | |
47 | #endif | |
48 | #ifdef CONFIG_ACPI_NUMA | |
49 | if (!strncmp(opt, "noacpi", 6)) | |
50 | acpi_numa = -1; | |
51 | #endif | |
52 | return 0; | |
53 | } | |
54 | early_param("numa", numa_setup); | |
71ee73e7 | 55 | |
71ee73e7 | 56 | /* |
bbc9e2f4 | 57 | * apicid, cpu, node mappings |
71ee73e7 | 58 | */ |
c4c60524 | 59 | s16 __apicid_to_node[MAX_LOCAL_APIC] = { |
bbc9e2f4 TH |
60 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
61 | }; | |
62 | ||
148f9bb8 | 63 | int numa_cpu_node(int cpu) |
6bd26273 TH |
64 | { |
65 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | |
66 | ||
67 | if (apicid != BAD_APICID) | |
68 | return __apicid_to_node[apicid]; | |
69 | return NUMA_NO_NODE; | |
70 | } | |
71 | ||
c032ef60 | 72 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
71ee73e7 RR |
73 | EXPORT_SYMBOL(node_to_cpumask_map); |
74 | ||
645a7919 TH |
75 | /* |
76 | * Map cpu index to node index | |
77 | */ | |
645a7919 | 78 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
645a7919 TH |
79 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
80 | ||
e13fe869 | 81 | void numa_set_node(int cpu, int node) |
645a7919 TH |
82 | { |
83 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
84 | ||
85 | /* early setting, no percpu area yet */ | |
86 | if (cpu_to_node_map) { | |
87 | cpu_to_node_map[cpu] = node; | |
88 | return; | |
89 | } | |
90 | ||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | |
92 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | |
93 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
94 | dump_stack(); | |
95 | return; | |
96 | } | |
97 | #endif | |
98 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
99 | ||
942670d0 | 100 | set_cpu_numa_node(cpu, node); |
645a7919 TH |
101 | } |
102 | ||
e13fe869 | 103 | void numa_clear_node(int cpu) |
645a7919 TH |
104 | { |
105 | numa_set_node(cpu, NUMA_NO_NODE); | |
106 | } | |
107 | ||
71ee73e7 RR |
108 | /* |
109 | * Allocate node_to_cpumask_map based on number of available nodes | |
110 | * Requires node_possible_map to be valid. | |
111 | * | |
9512938b | 112 | * Note: cpumask_of_node() is not valid until after this is done. |
71ee73e7 RR |
113 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) |
114 | */ | |
115 | void __init setup_node_to_cpumask_map(void) | |
116 | { | |
d2ad351e | 117 | unsigned int node; |
71ee73e7 RR |
118 | |
119 | /* setup nr_node_ids if not done yet */ | |
d2ad351e CS |
120 | if (nr_node_ids == MAX_NUMNODES) |
121 | setup_nr_node_ids(); | |
71ee73e7 RR |
122 | |
123 | /* allocate the map */ | |
c032ef60 RR |
124 | for (node = 0; node < nr_node_ids; node++) |
125 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
71ee73e7 | 126 | |
c032ef60 RR |
127 | /* cpumask_of_node() will now work */ |
128 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | |
71ee73e7 RR |
129 | } |
130 | ||
a4106eae TH |
131 | static int __init numa_add_memblk_to(int nid, u64 start, u64 end, |
132 | struct numa_meminfo *mi) | |
133 | { | |
134 | /* ignore zero length blks */ | |
135 | if (start == end) | |
136 | return 0; | |
137 | ||
138 | /* whine about and ignore invalid blks */ | |
139 | if (start > end || nid < 0 || nid >= MAX_NUMNODES) { | |
365811d6 BH |
140 | pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", |
141 | nid, start, end - 1); | |
a4106eae TH |
142 | return 0; |
143 | } | |
144 | ||
145 | if (mi->nr_blks >= NR_NODE_MEMBLKS) { | |
146 | pr_err("NUMA: too many memblk ranges\n"); | |
147 | return -EINVAL; | |
148 | } | |
149 | ||
150 | mi->blk[mi->nr_blks].start = start; | |
151 | mi->blk[mi->nr_blks].end = end; | |
152 | mi->blk[mi->nr_blks].nid = nid; | |
153 | mi->nr_blks++; | |
154 | return 0; | |
155 | } | |
156 | ||
157 | /** | |
158 | * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo | |
159 | * @idx: Index of memblk to remove | |
160 | * @mi: numa_meminfo to remove memblk from | |
161 | * | |
162 | * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and | |
163 | * decrementing @mi->nr_blks. | |
164 | */ | |
165 | void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) | |
166 | { | |
167 | mi->nr_blks--; | |
168 | memmove(&mi->blk[idx], &mi->blk[idx + 1], | |
169 | (mi->nr_blks - idx) * sizeof(mi->blk[0])); | |
170 | } | |
171 | ||
172 | /** | |
173 | * numa_add_memblk - Add one numa_memblk to numa_meminfo | |
174 | * @nid: NUMA node ID of the new memblk | |
175 | * @start: Start address of the new memblk | |
176 | * @end: End address of the new memblk | |
177 | * | |
178 | * Add a new memblk to the default numa_meminfo. | |
179 | * | |
180 | * RETURNS: | |
181 | * 0 on success, -errno on failure. | |
182 | */ | |
183 | int __init numa_add_memblk(int nid, u64 start, u64 end) | |
184 | { | |
185 | return numa_add_memblk_to(nid, start, end, &numa_meminfo); | |
186 | } | |
187 | ||
8b375f64 LC |
188 | /* Allocate NODE_DATA for a node on the local memory */ |
189 | static void __init alloc_node_data(int nid) | |
a4106eae | 190 | { |
a4106eae | 191 | const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
38f3e1ca | 192 | u64 nd_pa; |
7888e96b | 193 | void *nd; |
a4106eae TH |
194 | int tnid; |
195 | ||
a4106eae | 196 | /* |
07f4207a PA |
197 | * Allocate node data. Try node-local memory and then any node. |
198 | * Never allocate in DMA zone. | |
a4106eae | 199 | */ |
20e6926d | 200 | nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); |
07f4207a | 201 | if (!nd_pa) { |
f3d815cb LZ |
202 | nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, |
203 | MEMBLOCK_ALLOC_ACCESSIBLE); | |
204 | if (!nd_pa) { | |
205 | pr_err("Cannot find %zu bytes in node %d\n", | |
206 | nd_size, nid); | |
207 | return; | |
208 | } | |
a4106eae | 209 | } |
07f4207a | 210 | nd = __va(nd_pa); |
a4106eae TH |
211 | |
212 | /* report and initialize */ | |
8b375f64 | 213 | printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, |
07f4207a | 214 | nd_pa, nd_pa + nd_size - 1); |
a4106eae | 215 | tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); |
07f4207a | 216 | if (tnid != nid) |
a4106eae TH |
217 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); |
218 | ||
7888e96b | 219 | node_data[nid] = nd; |
a4106eae | 220 | memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); |
a4106eae TH |
221 | |
222 | node_set_online(nid); | |
223 | } | |
224 | ||
225 | /** | |
226 | * numa_cleanup_meminfo - Cleanup a numa_meminfo | |
227 | * @mi: numa_meminfo to clean up | |
228 | * | |
229 | * Sanitize @mi by merging and removing unncessary memblks. Also check for | |
230 | * conflicts and clear unused memblks. | |
231 | * | |
232 | * RETURNS: | |
233 | * 0 on success, -errno on failure. | |
234 | */ | |
235 | int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | |
236 | { | |
237 | const u64 low = 0; | |
38f3e1ca | 238 | const u64 high = PFN_PHYS(max_pfn); |
a4106eae TH |
239 | int i, j, k; |
240 | ||
e5a10c1b | 241 | /* first, trim all entries */ |
a4106eae TH |
242 | for (i = 0; i < mi->nr_blks; i++) { |
243 | struct numa_memblk *bi = &mi->blk[i]; | |
244 | ||
245 | /* make sure all blocks are inside the limits */ | |
246 | bi->start = max(bi->start, low); | |
247 | bi->end = min(bi->end, high); | |
248 | ||
95cf82ec TC |
249 | /* and there's no empty or non-exist block */ |
250 | if (bi->start >= bi->end || | |
251 | !memblock_overlaps_region(&memblock.memory, | |
252 | bi->start, bi->end - bi->start)) | |
a4106eae | 253 | numa_remove_memblk_from(i--, mi); |
e5a10c1b YL |
254 | } |
255 | ||
256 | /* merge neighboring / overlapping entries */ | |
257 | for (i = 0; i < mi->nr_blks; i++) { | |
258 | struct numa_memblk *bi = &mi->blk[i]; | |
a4106eae TH |
259 | |
260 | for (j = i + 1; j < mi->nr_blks; j++) { | |
261 | struct numa_memblk *bj = &mi->blk[j]; | |
38f3e1ca | 262 | u64 start, end; |
a4106eae TH |
263 | |
264 | /* | |
265 | * See whether there are overlapping blocks. Whine | |
266 | * about but allow overlaps of the same nid. They | |
267 | * will be merged below. | |
268 | */ | |
269 | if (bi->end > bj->start && bi->start < bj->end) { | |
270 | if (bi->nid != bj->nid) { | |
365811d6 BH |
271 | pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", |
272 | bi->nid, bi->start, bi->end - 1, | |
273 | bj->nid, bj->start, bj->end - 1); | |
a4106eae TH |
274 | return -EINVAL; |
275 | } | |
365811d6 BH |
276 | pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", |
277 | bi->nid, bi->start, bi->end - 1, | |
278 | bj->start, bj->end - 1); | |
a4106eae TH |
279 | } |
280 | ||
281 | /* | |
282 | * Join together blocks on the same node, holes | |
283 | * between which don't overlap with memory on other | |
284 | * nodes. | |
285 | */ | |
286 | if (bi->nid != bj->nid) | |
287 | continue; | |
e5a10c1b YL |
288 | start = min(bi->start, bj->start); |
289 | end = max(bi->end, bj->end); | |
a4106eae TH |
290 | for (k = 0; k < mi->nr_blks; k++) { |
291 | struct numa_memblk *bk = &mi->blk[k]; | |
292 | ||
293 | if (bi->nid == bk->nid) | |
294 | continue; | |
295 | if (start < bk->end && end > bk->start) | |
296 | break; | |
297 | } | |
298 | if (k < mi->nr_blks) | |
299 | continue; | |
365811d6 BH |
300 | printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", |
301 | bi->nid, bi->start, bi->end - 1, bj->start, | |
302 | bj->end - 1, start, end - 1); | |
a4106eae TH |
303 | bi->start = start; |
304 | bi->end = end; | |
305 | numa_remove_memblk_from(j--, mi); | |
306 | } | |
307 | } | |
308 | ||
e5a10c1b | 309 | /* clear unused ones */ |
a4106eae TH |
310 | for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { |
311 | mi->blk[i].start = mi->blk[i].end = 0; | |
312 | mi->blk[i].nid = NUMA_NO_NODE; | |
313 | } | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | /* | |
319 | * Set nodes, which have memory in @mi, in *@nodemask. | |
320 | */ | |
321 | static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, | |
322 | const struct numa_meminfo *mi) | |
323 | { | |
324 | int i; | |
325 | ||
326 | for (i = 0; i < ARRAY_SIZE(mi->blk); i++) | |
327 | if (mi->blk[i].start != mi->blk[i].end && | |
328 | mi->blk[i].nid != NUMA_NO_NODE) | |
329 | node_set(mi->blk[i].nid, *nodemask); | |
330 | } | |
331 | ||
332 | /** | |
333 | * numa_reset_distance - Reset NUMA distance table | |
334 | * | |
335 | * The current table is freed. The next numa_set_distance() call will | |
336 | * create a new one. | |
337 | */ | |
338 | void __init numa_reset_distance(void) | |
339 | { | |
340 | size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); | |
341 | ||
342 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | |
343 | if (numa_distance_cnt) | |
24aa0788 | 344 | memblock_free(__pa(numa_distance), size); |
a4106eae TH |
345 | numa_distance_cnt = 0; |
346 | numa_distance = NULL; /* enable table creation */ | |
347 | } | |
348 | ||
349 | static int __init numa_alloc_distance(void) | |
350 | { | |
351 | nodemask_t nodes_parsed; | |
352 | size_t size; | |
353 | int i, j, cnt = 0; | |
354 | u64 phys; | |
355 | ||
356 | /* size the new table and allocate it */ | |
357 | nodes_parsed = numa_nodes_parsed; | |
358 | numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); | |
359 | ||
360 | for_each_node_mask(i, nodes_parsed) | |
361 | cnt = i; | |
362 | cnt++; | |
363 | size = cnt * cnt * sizeof(numa_distance[0]); | |
364 | ||
38f3e1ca | 365 | phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), |
a4106eae | 366 | size, PAGE_SIZE); |
1f5026a7 | 367 | if (!phys) { |
a4106eae TH |
368 | pr_warning("NUMA: Warning: can't allocate distance table!\n"); |
369 | /* don't retry until explicitly reset */ | |
370 | numa_distance = (void *)1LU; | |
371 | return -ENOMEM; | |
372 | } | |
24aa0788 | 373 | memblock_reserve(phys, size); |
a4106eae TH |
374 | |
375 | numa_distance = __va(phys); | |
376 | numa_distance_cnt = cnt; | |
377 | ||
378 | /* fill with the default distances */ | |
379 | for (i = 0; i < cnt; i++) | |
380 | for (j = 0; j < cnt; j++) | |
381 | numa_distance[i * cnt + j] = i == j ? | |
382 | LOCAL_DISTANCE : REMOTE_DISTANCE; | |
383 | printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); | |
384 | ||
385 | return 0; | |
386 | } | |
387 | ||
388 | /** | |
389 | * numa_set_distance - Set NUMA distance from one NUMA to another | |
390 | * @from: the 'from' node to set distance | |
391 | * @to: the 'to' node to set distance | |
392 | * @distance: NUMA distance | |
393 | * | |
394 | * Set the distance from node @from to @to to @distance. If distance table | |
395 | * doesn't exist, one which is large enough to accommodate all the currently | |
396 | * known nodes will be created. | |
397 | * | |
398 | * If such table cannot be allocated, a warning is printed and further | |
399 | * calls are ignored until the distance table is reset with | |
400 | * numa_reset_distance(). | |
401 | * | |
54eed6cb PH |
402 | * If @from or @to is higher than the highest known node or lower than zero |
403 | * at the time of table creation or @distance doesn't make sense, the call | |
404 | * is ignored. | |
a4106eae TH |
405 | * This is to allow simplification of specific NUMA config implementations. |
406 | */ | |
407 | void __init numa_set_distance(int from, int to, int distance) | |
408 | { | |
409 | if (!numa_distance && numa_alloc_distance() < 0) | |
410 | return; | |
411 | ||
54eed6cb PH |
412 | if (from >= numa_distance_cnt || to >= numa_distance_cnt || |
413 | from < 0 || to < 0) { | |
414 | pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", | |
a4106eae TH |
415 | from, to, distance); |
416 | return; | |
417 | } | |
418 | ||
419 | if ((u8)distance != distance || | |
420 | (from == to && distance != LOCAL_DISTANCE)) { | |
421 | pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", | |
422 | from, to, distance); | |
423 | return; | |
424 | } | |
425 | ||
426 | numa_distance[from * numa_distance_cnt + to] = distance; | |
427 | } | |
428 | ||
429 | int __node_distance(int from, int to) | |
430 | { | |
431 | if (from >= numa_distance_cnt || to >= numa_distance_cnt) | |
432 | return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; | |
433 | return numa_distance[from * numa_distance_cnt + to]; | |
434 | } | |
435 | EXPORT_SYMBOL(__node_distance); | |
436 | ||
437 | /* | |
438 | * Sanity check to catch more bad NUMA configurations (they are amazingly | |
439 | * common). Make sure the nodes cover all memory. | |
440 | */ | |
441 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) | |
442 | { | |
38f3e1ca | 443 | u64 numaram, e820ram; |
a4106eae TH |
444 | int i; |
445 | ||
446 | numaram = 0; | |
447 | for (i = 0; i < mi->nr_blks; i++) { | |
38f3e1ca TH |
448 | u64 s = mi->blk[i].start >> PAGE_SHIFT; |
449 | u64 e = mi->blk[i].end >> PAGE_SHIFT; | |
a4106eae TH |
450 | numaram += e - s; |
451 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); | |
38f3e1ca | 452 | if ((s64)numaram < 0) |
a4106eae TH |
453 | numaram = 0; |
454 | } | |
455 | ||
474b881b TH |
456 | e820ram = max_pfn - absent_pages_in_range(0, max_pfn); |
457 | ||
a4106eae | 458 | /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ |
38f3e1ca TH |
459 | if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { |
460 | printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", | |
a4106eae TH |
461 | (numaram << PAGE_SHIFT) >> 20, |
462 | (e820ram << PAGE_SHIFT) >> 20); | |
463 | return false; | |
464 | } | |
465 | return true; | |
466 | } | |
467 | ||
c1a0bf34 IM |
468 | /* |
469 | * Mark all currently memblock-reserved physical memory (which covers the | |
470 | * kernel's own memory ranges) as hot-unswappable. | |
471 | */ | |
bd5cfb89 XQ |
472 | static void __init numa_clear_kernel_node_hotplug(void) |
473 | { | |
c1a0bf34 IM |
474 | nodemask_t reserved_nodemask = NODE_MASK_NONE; |
475 | struct memblock_region *mb_region; | |
476 | int i; | |
bd5cfb89 XQ |
477 | |
478 | /* | |
c1a0bf34 IM |
479 | * We have to do some preprocessing of memblock regions, to |
480 | * make them suitable for reservation. | |
481 | * | |
bd5cfb89 | 482 | * At this time, all memory regions reserved by memblock are |
c1a0bf34 IM |
483 | * used by the kernel, but those regions are not split up |
484 | * along node boundaries yet, and don't necessarily have their | |
485 | * node ID set yet either. | |
486 | * | |
487 | * So iterate over all memory known to the x86 architecture, | |
488 | * and use those ranges to set the nid in memblock.reserved. | |
489 | * This will split up the memblock regions along node | |
490 | * boundaries and will set the node IDs as well. | |
bd5cfb89 XQ |
491 | */ |
492 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | |
c1a0bf34 | 493 | struct numa_memblk *mb = numa_meminfo.blk + i; |
5f7ee246 | 494 | int ret; |
bd5cfb89 | 495 | |
5f7ee246 IM |
496 | ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid); |
497 | WARN_ON_ONCE(ret); | |
bd5cfb89 XQ |
498 | } |
499 | ||
22ef882e | 500 | /* |
c1a0bf34 IM |
501 | * Now go over all reserved memblock regions, to construct a |
502 | * node mask of all kernel reserved memory areas. | |
22ef882e | 503 | * |
c1a0bf34 IM |
504 | * [ Note, when booting with mem=nn[kMG] or in a kdump kernel, |
505 | * numa_meminfo might not include all memblock.reserved | |
506 | * memory ranges, because quirks such as trim_snb_memory() | |
507 | * reserve specific pages for Sandy Bridge graphics. ] | |
22ef882e | 508 | */ |
c1a0bf34 IM |
509 | for_each_memblock(reserved, mb_region) { |
510 | if (mb_region->nid != MAX_NUMNODES) | |
511 | node_set(mb_region->nid, reserved_nodemask); | |
512 | } | |
bd5cfb89 | 513 | |
c1a0bf34 IM |
514 | /* |
515 | * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory | |
516 | * belonging to the reserved node mask. | |
517 | * | |
518 | * Note that this will include memory regions that reside | |
519 | * on nodes that contain kernel memory - entire nodes | |
520 | * become hot-unpluggable: | |
521 | */ | |
bd5cfb89 | 522 | for (i = 0; i < numa_meminfo.nr_blks; i++) { |
c1a0bf34 | 523 | struct numa_memblk *mb = numa_meminfo.blk + i; |
bd5cfb89 | 524 | |
c1a0bf34 IM |
525 | if (!node_isset(mb->nid, reserved_nodemask)) |
526 | continue; | |
bd5cfb89 | 527 | |
c1a0bf34 | 528 | memblock_clear_hotplug(mb->start, mb->end - mb->start); |
bd5cfb89 XQ |
529 | } |
530 | } | |
531 | ||
a4106eae TH |
532 | static int __init numa_register_memblks(struct numa_meminfo *mi) |
533 | { | |
1e01979c | 534 | unsigned long uninitialized_var(pfn_align); |
a4106eae TH |
535 | int i, nid; |
536 | ||
537 | /* Account for nodes with cpus and no memory */ | |
538 | node_possible_map = numa_nodes_parsed; | |
539 | numa_nodemask_from_meminfo(&node_possible_map, mi); | |
540 | if (WARN_ON(nodes_empty(node_possible_map))) | |
541 | return -EINVAL; | |
542 | ||
0608f70c TH |
543 | for (i = 0; i < mi->nr_blks; i++) { |
544 | struct numa_memblk *mb = &mi->blk[i]; | |
e7e8de59 TC |
545 | memblock_set_node(mb->start, mb->end - mb->start, |
546 | &memblock.memory, mb->nid); | |
0608f70c | 547 | } |
1e01979c | 548 | |
bd5cfb89 XQ |
549 | /* |
550 | * At very early time, the kernel have to use some memory such as | |
551 | * loading the kernel image. We cannot prevent this anyway. So any | |
552 | * node the kernel resides in should be un-hotpluggable. | |
553 | * | |
554 | * And when we come here, alloc node data won't fail. | |
555 | */ | |
556 | numa_clear_kernel_node_hotplug(); | |
557 | ||
1e01979c TH |
558 | /* |
559 | * If sections array is gonna be used for pfn -> nid mapping, check | |
560 | * whether its granularity is fine enough. | |
561 | */ | |
562 | #ifdef NODE_NOT_IN_PAGE_FLAGS | |
563 | pfn_align = node_map_pfn_alignment(); | |
564 | if (pfn_align && pfn_align < PAGES_PER_SECTION) { | |
565 | printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", | |
566 | PFN_PHYS(pfn_align) >> 20, | |
567 | PFN_PHYS(PAGES_PER_SECTION) >> 20); | |
568 | return -EINVAL; | |
569 | } | |
570 | #endif | |
a4106eae TH |
571 | if (!numa_meminfo_cover_memory(mi)) |
572 | return -EINVAL; | |
573 | ||
574 | /* Finally register nodes. */ | |
575 | for_each_node_mask(nid, node_possible_map) { | |
38f3e1ca | 576 | u64 start = PFN_PHYS(max_pfn); |
a4106eae TH |
577 | u64 end = 0; |
578 | ||
579 | for (i = 0; i < mi->nr_blks; i++) { | |
580 | if (nid != mi->blk[i].nid) | |
581 | continue; | |
582 | start = min(mi->blk[i].start, start); | |
583 | end = max(mi->blk[i].end, end); | |
584 | } | |
585 | ||
8b375f64 LC |
586 | if (start >= end) |
587 | continue; | |
588 | ||
589 | /* | |
590 | * Don't confuse VM with a node that doesn't have the | |
591 | * minimum amount of memory: | |
592 | */ | |
593 | if (end && (end - start) < NODE_MIN_SIZE) | |
594 | continue; | |
595 | ||
596 | alloc_node_data(nid); | |
a4106eae TH |
597 | } |
598 | ||
0608f70c TH |
599 | /* Dump memblock with node info and return. */ |
600 | memblock_dump_all(); | |
a4106eae TH |
601 | return 0; |
602 | } | |
a4106eae | 603 | |
8db78cc4 TH |
604 | /* |
605 | * There are unfortunately some poorly designed mainboards around that | |
606 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | |
607 | * mapping. To avoid this fill in the mapping for all possible CPUs, | |
608 | * as the number of CPUs is not known yet. We round robin the existing | |
609 | * nodes. | |
610 | */ | |
752d4f37 | 611 | static void __init numa_init_array(void) |
8db78cc4 TH |
612 | { |
613 | int rr, i; | |
614 | ||
615 | rr = first_node(node_online_map); | |
616 | for (i = 0; i < nr_cpu_ids; i++) { | |
617 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | |
618 | continue; | |
619 | numa_set_node(i, rr); | |
0edaf86c | 620 | rr = next_node_in(rr, node_online_map); |
8db78cc4 TH |
621 | } |
622 | } | |
623 | ||
a4106eae TH |
624 | static int __init numa_init(int (*init_func)(void)) |
625 | { | |
626 | int i; | |
627 | int ret; | |
628 | ||
629 | for (i = 0; i < MAX_LOCAL_APIC; i++) | |
630 | set_apicid_to_node(i, NUMA_NO_NODE); | |
631 | ||
20e6926d | 632 | nodes_clear(numa_nodes_parsed); |
a4106eae TH |
633 | nodes_clear(node_possible_map); |
634 | nodes_clear(node_online_map); | |
20e6926d | 635 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); |
e7e8de59 TC |
636 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, |
637 | MAX_NUMNODES)); | |
a0acda91 TC |
638 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, |
639 | MAX_NUMNODES)); | |
05d1d8cb TC |
640 | /* In case that parsing SRAT failed. */ |
641 | WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); | |
a4106eae TH |
642 | numa_reset_distance(); |
643 | ||
644 | ret = init_func(); | |
645 | if (ret < 0) | |
646 | return ret; | |
c5320926 TC |
647 | |
648 | /* | |
649 | * We reset memblock back to the top-down direction | |
650 | * here because if we configured ACPI_NUMA, we have | |
651 | * parsed SRAT in init_func(). It is ok to have the | |
652 | * reset here even if we did't configure ACPI_NUMA | |
653 | * or acpi numa init fails and fallbacks to dummy | |
654 | * numa init. | |
655 | */ | |
656 | memblock_set_bottom_up(false); | |
657 | ||
a4106eae TH |
658 | ret = numa_cleanup_meminfo(&numa_meminfo); |
659 | if (ret < 0) | |
660 | return ret; | |
661 | ||
662 | numa_emulation(&numa_meminfo, numa_distance_cnt); | |
663 | ||
664 | ret = numa_register_memblks(&numa_meminfo); | |
665 | if (ret < 0) | |
666 | return ret; | |
667 | ||
668 | for (i = 0; i < nr_cpu_ids; i++) { | |
669 | int nid = early_cpu_to_node(i); | |
670 | ||
671 | if (nid == NUMA_NO_NODE) | |
672 | continue; | |
673 | if (!node_online(nid)) | |
674 | numa_clear_node(i); | |
675 | } | |
676 | numa_init_array(); | |
a0acda91 | 677 | |
a4106eae TH |
678 | return 0; |
679 | } | |
680 | ||
681 | /** | |
682 | * dummy_numa_init - Fallback dummy NUMA init | |
683 | * | |
684 | * Used if there's no underlying NUMA architecture, NUMA initialization | |
685 | * fails, or NUMA is disabled on the command line. | |
686 | * | |
687 | * Must online at least one node and add memory blocks that cover all | |
688 | * allowed memory. This function must not fail. | |
689 | */ | |
690 | static int __init dummy_numa_init(void) | |
691 | { | |
692 | printk(KERN_INFO "%s\n", | |
693 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); | |
365811d6 BH |
694 | printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", |
695 | 0LLU, PFN_PHYS(max_pfn) - 1); | |
a4106eae TH |
696 | |
697 | node_set(0, numa_nodes_parsed); | |
38f3e1ca | 698 | numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); |
a4106eae TH |
699 | |
700 | return 0; | |
701 | } | |
702 | ||
703 | /** | |
704 | * x86_numa_init - Initialize NUMA | |
705 | * | |
706 | * Try each configured NUMA initialization method until one succeeds. The | |
707 | * last fallback is dummy single node config encomapssing whole memory and | |
708 | * never fails. | |
709 | */ | |
710 | void __init x86_numa_init(void) | |
711 | { | |
712 | if (!numa_off) { | |
713 | #ifdef CONFIG_ACPI_NUMA | |
714 | if (!numa_init(x86_acpi_numa_init)) | |
715 | return; | |
716 | #endif | |
717 | #ifdef CONFIG_AMD_NUMA | |
718 | if (!numa_init(amd_numa_init)) | |
719 | return; | |
720 | #endif | |
721 | } | |
722 | ||
723 | numa_init(dummy_numa_init); | |
724 | } | |
a4106eae | 725 | |
8db78cc4 TH |
726 | static __init int find_near_online_node(int node) |
727 | { | |
728 | int n, val; | |
729 | int min_val = INT_MAX; | |
730 | int best_node = -1; | |
731 | ||
732 | for_each_online_node(n) { | |
733 | val = node_distance(node, n); | |
734 | ||
735 | if (val < min_val) { | |
736 | min_val = val; | |
737 | best_node = n; | |
738 | } | |
739 | } | |
740 | ||
741 | return best_node; | |
742 | } | |
743 | ||
744 | /* | |
745 | * Setup early cpu_to_node. | |
746 | * | |
747 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | |
748 | * and apicid_to_node[] tables have valid entries for a CPU. | |
749 | * This means we skip cpu_to_node[] initialisation for NUMA | |
750 | * emulation and faking node case (when running a kernel compiled | |
751 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | |
752 | * is already initialized in a round robin manner at numa_init_array, | |
753 | * prior to this call, and this initialization is good enough | |
754 | * for the fake NUMA cases. | |
755 | * | |
756 | * Called before the per_cpu areas are setup. | |
757 | */ | |
758 | void __init init_cpu_to_node(void) | |
759 | { | |
760 | int cpu; | |
761 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | |
762 | ||
763 | BUG_ON(cpu_to_apicid == NULL); | |
764 | ||
765 | for_each_possible_cpu(cpu) { | |
766 | int node = numa_cpu_node(cpu); | |
767 | ||
768 | if (node == NUMA_NO_NODE) | |
769 | continue; | |
770 | if (!node_online(node)) | |
771 | node = find_near_online_node(node); | |
772 | numa_set_node(cpu, node); | |
773 | } | |
774 | } | |
775 | ||
de2d9445 TH |
776 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
777 | ||
778 | # ifndef CONFIG_NUMA_EMU | |
148f9bb8 | 779 | void numa_add_cpu(int cpu) |
de2d9445 TH |
780 | { |
781 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
782 | } | |
783 | ||
148f9bb8 | 784 | void numa_remove_cpu(int cpu) |
de2d9445 TH |
785 | { |
786 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
787 | } | |
788 | # endif /* !CONFIG_NUMA_EMU */ | |
789 | ||
790 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | |
645a7919 TH |
791 | |
792 | int __cpu_to_node(int cpu) | |
793 | { | |
794 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
795 | printk(KERN_WARNING | |
796 | "cpu_to_node(%d): usage too early!\n", cpu); | |
797 | dump_stack(); | |
798 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
799 | } | |
800 | return per_cpu(x86_cpu_to_node_map, cpu); | |
801 | } | |
802 | EXPORT_SYMBOL(__cpu_to_node); | |
803 | ||
804 | /* | |
805 | * Same function as cpu_to_node() but used if called before the | |
806 | * per_cpu areas are setup. | |
807 | */ | |
808 | int early_cpu_to_node(int cpu) | |
809 | { | |
810 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
811 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
812 | ||
813 | if (!cpu_possible(cpu)) { | |
814 | printk(KERN_WARNING | |
815 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
816 | dump_stack(); | |
817 | return NUMA_NO_NODE; | |
818 | } | |
819 | return per_cpu(x86_cpu_to_node_map, cpu); | |
820 | } | |
821 | ||
7a6c6547 | 822 | void debug_cpumask_set_cpu(int cpu, int node, bool enable) |
de2d9445 | 823 | { |
de2d9445 | 824 | struct cpumask *mask; |
de2d9445 | 825 | |
14392fd3 DR |
826 | if (node == NUMA_NO_NODE) { |
827 | /* early_cpu_to_node() already emits a warning and trace */ | |
7a6c6547 | 828 | return; |
14392fd3 | 829 | } |
de2d9445 TH |
830 | mask = node_to_cpumask_map[node]; |
831 | if (!mask) { | |
832 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | |
833 | dump_stack(); | |
7a6c6547 | 834 | return; |
de2d9445 TH |
835 | } |
836 | ||
7a6c6547 DR |
837 | if (enable) |
838 | cpumask_set_cpu(cpu, mask); | |
839 | else | |
840 | cpumask_clear_cpu(cpu, mask); | |
841 | ||
bf58b487 | 842 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", |
de2d9445 | 843 | enable ? "numa_add_cpu" : "numa_remove_cpu", |
bf58b487 | 844 | cpu, node, cpumask_pr_args(mask)); |
7a6c6547 | 845 | return; |
de2d9445 TH |
846 | } |
847 | ||
848 | # ifndef CONFIG_NUMA_EMU | |
148f9bb8 | 849 | static void numa_set_cpumask(int cpu, bool enable) |
de2d9445 | 850 | { |
7a6c6547 | 851 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); |
de2d9445 TH |
852 | } |
853 | ||
148f9bb8 | 854 | void numa_add_cpu(int cpu) |
de2d9445 | 855 | { |
7a6c6547 | 856 | numa_set_cpumask(cpu, true); |
de2d9445 TH |
857 | } |
858 | ||
148f9bb8 | 859 | void numa_remove_cpu(int cpu) |
de2d9445 | 860 | { |
7a6c6547 | 861 | numa_set_cpumask(cpu, false); |
de2d9445 TH |
862 | } |
863 | # endif /* !CONFIG_NUMA_EMU */ | |
864 | ||
71ee73e7 RR |
865 | /* |
866 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
867 | */ | |
73e907de | 868 | const struct cpumask *cpumask_of_node(int node) |
71ee73e7 | 869 | { |
71ee73e7 RR |
870 | if (node >= nr_node_ids) { |
871 | printk(KERN_WARNING | |
872 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | |
873 | node, nr_node_ids); | |
874 | dump_stack(); | |
875 | return cpu_none_mask; | |
876 | } | |
c032ef60 RR |
877 | if (node_to_cpumask_map[node] == NULL) { |
878 | printk(KERN_WARNING | |
879 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | |
880 | node); | |
881 | dump_stack(); | |
882 | return cpu_online_mask; | |
883 | } | |
0b966252 | 884 | return node_to_cpumask_map[node]; |
71ee73e7 RR |
885 | } |
886 | EXPORT_SYMBOL(cpumask_of_node); | |
645a7919 | 887 | |
de2d9445 | 888 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
a4106eae | 889 | |
bd6709a9 | 890 | #ifdef CONFIG_MEMORY_HOTPLUG |
a4106eae TH |
891 | int memory_add_physaddr_to_nid(u64 start) |
892 | { | |
893 | struct numa_meminfo *mi = &numa_meminfo; | |
894 | int nid = mi->blk[0].nid; | |
895 | int i; | |
896 | ||
897 | for (i = 0; i < mi->nr_blks; i++) | |
898 | if (mi->blk[i].start <= start && mi->blk[i].end > start) | |
899 | nid = mi->blk[i].nid; | |
900 | return nid; | |
901 | } | |
902 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |
903 | #endif |