x86-64, numa: Put pgtable to local node memory
[deliverable/linux.git] / arch / x86 / mm / srat_64.c
1 /*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
4 *
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
6 *
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/acpi.h>
14 #include <linux/mmzone.h>
15 #include <linux/bitmap.h>
16 #include <linux/module.h>
17 #include <linux/topology.h>
18 #include <linux/bootmem.h>
19 #include <linux/memblock.h>
20 #include <linux/mm.h>
21 #include <asm/proto.h>
22 #include <asm/numa.h>
23 #include <asm/e820.h>
24 #include <asm/apic.h>
25 #include <asm/uv/uv.h>
26
27 int acpi_numa __initdata;
28
29 static struct acpi_table_slit *acpi_slit;
30
31 static nodemask_t nodes_parsed __initdata;
32 static nodemask_t cpu_nodes_parsed __initdata;
33 static struct bootnode nodes[MAX_NUMNODES] __initdata;
34 static struct bootnode nodes_add[MAX_NUMNODES];
35
36 static int num_node_memblks __initdata;
37 static struct bootnode node_memblk_range[NR_NODE_MEMBLKS] __initdata;
38 static int memblk_nodeid[NR_NODE_MEMBLKS] __initdata;
39
40 static __init int setup_node(int pxm)
41 {
42 return acpi_map_pxm_to_node(pxm);
43 }
44
45 static __init int conflicting_memblks(unsigned long start, unsigned long end)
46 {
47 int i;
48 for (i = 0; i < num_node_memblks; i++) {
49 struct bootnode *nd = &node_memblk_range[i];
50 if (nd->start == nd->end)
51 continue;
52 if (nd->end > start && nd->start < end)
53 return memblk_nodeid[i];
54 if (nd->end == end && nd->start == start)
55 return memblk_nodeid[i];
56 }
57 return -1;
58 }
59
60 static __init void cutoff_node(int i, unsigned long start, unsigned long end)
61 {
62 struct bootnode *nd = &nodes[i];
63
64 if (nd->start < start) {
65 nd->start = start;
66 if (nd->end < nd->start)
67 nd->start = nd->end;
68 }
69 if (nd->end > end) {
70 nd->end = end;
71 if (nd->start > nd->end)
72 nd->start = nd->end;
73 }
74 }
75
76 static __init void bad_srat(void)
77 {
78 int i;
79 printk(KERN_ERR "SRAT: SRAT not used.\n");
80 acpi_numa = -1;
81 for (i = 0; i < MAX_LOCAL_APIC; i++)
82 apicid_to_node[i] = NUMA_NO_NODE;
83 for (i = 0; i < MAX_NUMNODES; i++) {
84 nodes[i].start = nodes[i].end = 0;
85 nodes_add[i].start = nodes_add[i].end = 0;
86 }
87 remove_all_active_ranges();
88 }
89
90 static __init inline int srat_disabled(void)
91 {
92 return numa_off || acpi_numa < 0;
93 }
94
95 /* Callback for SLIT parsing */
96 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
97 {
98 unsigned length;
99 unsigned long phys;
100
101 length = slit->header.length;
102 phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length,
103 PAGE_SIZE);
104
105 if (phys == MEMBLOCK_ERROR)
106 panic(" Can not save slit!\n");
107
108 acpi_slit = __va(phys);
109 memcpy(acpi_slit, slit, length);
110 memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT");
111 }
112
113 /* Callback for Proximity Domain -> x2APIC mapping */
114 void __init
115 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
116 {
117 int pxm, node;
118 int apic_id;
119
120 if (srat_disabled())
121 return;
122 if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
123 bad_srat();
124 return;
125 }
126 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
127 return;
128 pxm = pa->proximity_domain;
129 node = setup_node(pxm);
130 if (node < 0) {
131 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
132 bad_srat();
133 return;
134 }
135
136 apic_id = pa->apic_id;
137 apicid_to_node[apic_id] = node;
138 node_set(node, cpu_nodes_parsed);
139 acpi_numa = 1;
140 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
141 pxm, apic_id, node);
142 }
143
144 /* Callback for Proximity Domain -> LAPIC mapping */
145 void __init
146 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
147 {
148 int pxm, node;
149 int apic_id;
150
151 if (srat_disabled())
152 return;
153 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
154 bad_srat();
155 return;
156 }
157 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
158 return;
159 pxm = pa->proximity_domain_lo;
160 node = setup_node(pxm);
161 if (node < 0) {
162 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
163 bad_srat();
164 return;
165 }
166
167 if (get_uv_system_type() >= UV_X2APIC)
168 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
169 else
170 apic_id = pa->apic_id;
171 apicid_to_node[apic_id] = node;
172 node_set(node, cpu_nodes_parsed);
173 acpi_numa = 1;
174 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
175 pxm, apic_id, node);
176 }
177
178 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
179 static inline int save_add_info(void) {return 1;}
180 #else
181 static inline int save_add_info(void) {return 0;}
182 #endif
183 /*
184 * Update nodes_add[]
185 * This code supports one contiguous hot add area per node
186 */
187 static void __init
188 update_nodes_add(int node, unsigned long start, unsigned long end)
189 {
190 unsigned long s_pfn = start >> PAGE_SHIFT;
191 unsigned long e_pfn = end >> PAGE_SHIFT;
192 int changed = 0;
193 struct bootnode *nd = &nodes_add[node];
194
195 /* I had some trouble with strange memory hotadd regions breaking
196 the boot. Be very strict here and reject anything unexpected.
197 If you want working memory hotadd write correct SRATs.
198
199 The node size check is a basic sanity check to guard against
200 mistakes */
201 if ((signed long)(end - start) < NODE_MIN_SIZE) {
202 printk(KERN_ERR "SRAT: Hotplug area too small\n");
203 return;
204 }
205
206 /* This check might be a bit too strict, but I'm keeping it for now. */
207 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
208 printk(KERN_ERR
209 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
210 s_pfn, e_pfn);
211 return;
212 }
213
214 /* Looks good */
215
216 if (nd->start == nd->end) {
217 nd->start = start;
218 nd->end = end;
219 changed = 1;
220 } else {
221 if (nd->start == end) {
222 nd->start = start;
223 changed = 1;
224 }
225 if (nd->end == start) {
226 nd->end = end;
227 changed = 1;
228 }
229 if (!changed)
230 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
231 }
232
233 if (changed) {
234 node_set(node, cpu_nodes_parsed);
235 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
236 nd->start, nd->end);
237 }
238 }
239
240 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
241 void __init
242 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
243 {
244 struct bootnode *nd, oldnode;
245 unsigned long start, end;
246 int node, pxm;
247 int i;
248
249 if (srat_disabled())
250 return;
251 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
252 bad_srat();
253 return;
254 }
255 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
256 return;
257
258 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
259 return;
260 start = ma->base_address;
261 end = start + ma->length;
262 pxm = ma->proximity_domain;
263 node = setup_node(pxm);
264 if (node < 0) {
265 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
266 bad_srat();
267 return;
268 }
269 i = conflicting_memblks(start, end);
270 if (i == node) {
271 printk(KERN_WARNING
272 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
273 pxm, start, end, nodes[i].start, nodes[i].end);
274 } else if (i >= 0) {
275 printk(KERN_ERR
276 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
277 pxm, start, end, node_to_pxm(i),
278 nodes[i].start, nodes[i].end);
279 bad_srat();
280 return;
281 }
282 nd = &nodes[node];
283 oldnode = *nd;
284 if (!node_test_and_set(node, nodes_parsed)) {
285 nd->start = start;
286 nd->end = end;
287 } else {
288 if (start < nd->start)
289 nd->start = start;
290 if (nd->end < end)
291 nd->end = end;
292 }
293
294 printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
295 start, end);
296
297 if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) {
298 update_nodes_add(node, start, end);
299 /* restore nodes[node] */
300 *nd = oldnode;
301 if ((nd->start | nd->end) == 0)
302 node_clear(node, nodes_parsed);
303 }
304
305 node_memblk_range[num_node_memblks].start = start;
306 node_memblk_range[num_node_memblks].end = end;
307 memblk_nodeid[num_node_memblks] = node;
308 num_node_memblks++;
309 }
310
311 /* Sanity check to catch more bad SRATs (they are amazingly common).
312 Make sure the PXMs cover all memory. */
313 static int __init nodes_cover_memory(const struct bootnode *nodes)
314 {
315 int i;
316 unsigned long pxmram, e820ram;
317
318 pxmram = 0;
319 for_each_node_mask(i, nodes_parsed) {
320 unsigned long s = nodes[i].start >> PAGE_SHIFT;
321 unsigned long e = nodes[i].end >> PAGE_SHIFT;
322 pxmram += e - s;
323 pxmram -= __absent_pages_in_range(i, s, e);
324 if ((long)pxmram < 0)
325 pxmram = 0;
326 }
327
328 e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
329 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
330 if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
331 printk(KERN_ERR
332 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
333 (pxmram << PAGE_SHIFT) >> 20,
334 (e820ram << PAGE_SHIFT) >> 20);
335 return 0;
336 }
337 return 1;
338 }
339
340 void __init acpi_numa_arch_fixup(void) {}
341
342 int __init acpi_get_nodes(struct bootnode *physnodes)
343 {
344 int i;
345 int ret = 0;
346
347 for_each_node_mask(i, nodes_parsed) {
348 physnodes[ret].start = nodes[i].start;
349 physnodes[ret].end = nodes[i].end;
350 ret++;
351 }
352 return ret;
353 }
354
355 /* Use the information discovered above to actually set up the nodes. */
356 int __init acpi_scan_nodes(unsigned long start, unsigned long end)
357 {
358 int i;
359
360 if (acpi_numa <= 0)
361 return -1;
362
363 /* First clean up the node list */
364 for (i = 0; i < MAX_NUMNODES; i++)
365 cutoff_node(i, start, end);
366
367 /*
368 * Join together blocks on the same node, holes between
369 * which don't overlap with memory on other nodes.
370 */
371 for (i = 0; i < num_node_memblks; ++i) {
372 int j, k;
373
374 for (j = i + 1; j < num_node_memblks; ++j) {
375 unsigned long start, end;
376
377 if (memblk_nodeid[i] != memblk_nodeid[j])
378 continue;
379 start = min(node_memblk_range[i].end,
380 node_memblk_range[j].end);
381 end = max(node_memblk_range[i].start,
382 node_memblk_range[j].start);
383 for (k = 0; k < num_node_memblks; ++k) {
384 if (memblk_nodeid[i] == memblk_nodeid[k])
385 continue;
386 if (start < node_memblk_range[k].end &&
387 end > node_memblk_range[k].start)
388 break;
389 }
390 if (k < num_node_memblks)
391 continue;
392 start = min(node_memblk_range[i].start,
393 node_memblk_range[j].start);
394 end = max(node_memblk_range[i].end,
395 node_memblk_range[j].end);
396 printk(KERN_INFO "SRAT: Node %d "
397 "[%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
398 memblk_nodeid[i],
399 node_memblk_range[i].start,
400 node_memblk_range[i].end,
401 node_memblk_range[j].start,
402 node_memblk_range[j].end,
403 start, end);
404 node_memblk_range[i].start = start;
405 node_memblk_range[i].end = end;
406 k = --num_node_memblks - j;
407 memmove(memblk_nodeid + j, memblk_nodeid + j+1,
408 k * sizeof(*memblk_nodeid));
409 memmove(node_memblk_range + j, node_memblk_range + j+1,
410 k * sizeof(*node_memblk_range));
411 --j;
412 }
413 }
414
415 memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
416 memblk_nodeid);
417 if (memnode_shift < 0) {
418 printk(KERN_ERR
419 "SRAT: No NUMA node hash function found. Contact maintainer\n");
420 bad_srat();
421 return -1;
422 }
423
424 for (i = 0; i < num_node_memblks; i++)
425 memblock_x86_register_active_regions(memblk_nodeid[i],
426 node_memblk_range[i].start >> PAGE_SHIFT,
427 node_memblk_range[i].end >> PAGE_SHIFT);
428
429 /* for out of order entries in SRAT */
430 sort_node_map();
431 if (!nodes_cover_memory(nodes)) {
432 bad_srat();
433 return -1;
434 }
435
436 init_memory_mapping_high();
437
438 /* Account for nodes with cpus and no memory */
439 nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed);
440
441 /* Finally register nodes */
442 for_each_node_mask(i, node_possible_map)
443 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
444 /* Try again in case setup_node_bootmem missed one due
445 to missing bootmem */
446 for_each_node_mask(i, node_possible_map)
447 if (!node_online(i))
448 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
449
450 for (i = 0; i < nr_cpu_ids; i++) {
451 int node = early_cpu_to_node(i);
452
453 if (node == NUMA_NO_NODE)
454 continue;
455 if (!node_online(node))
456 numa_clear_node(i);
457 }
458 numa_init_array();
459 return 0;
460 }
461
462 #ifdef CONFIG_NUMA_EMU
463 static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
464 [0 ... MAX_NUMNODES-1] = PXM_INVAL
465 };
466 static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
467 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
468 };
469 static int __init find_node_by_addr(unsigned long addr)
470 {
471 int ret = NUMA_NO_NODE;
472 int i;
473
474 for_each_node_mask(i, nodes_parsed) {
475 /*
476 * Find the real node that this emulated node appears on. For
477 * the sake of simplicity, we only use a real node's starting
478 * address to determine which emulated node it appears on.
479 */
480 if (addr >= nodes[i].start && addr < nodes[i].end) {
481 ret = i;
482 break;
483 }
484 }
485 return ret;
486 }
487
488 /*
489 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
490 * mappings that respect the real ACPI topology but reflect our emulated
491 * environment. For each emulated node, we find which real node it appears on
492 * and create PXM to NID mappings for those fake nodes which mirror that
493 * locality. SLIT will now represent the correct distances between emulated
494 * nodes as a result of the real topology.
495 */
496 void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
497 {
498 int i, j;
499
500 printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
501 "topology.\n");
502 for (i = 0; i < num_nodes; i++) {
503 int nid, pxm;
504
505 nid = find_node_by_addr(fake_nodes[i].start);
506 if (nid == NUMA_NO_NODE)
507 continue;
508 pxm = node_to_pxm(nid);
509 if (pxm == PXM_INVAL)
510 continue;
511 fake_node_to_pxm_map[i] = pxm;
512 /*
513 * For each apicid_to_node mapping that exists for this real
514 * node, it must now point to the fake node ID.
515 */
516 for (j = 0; j < MAX_LOCAL_APIC; j++)
517 if (apicid_to_node[j] == nid &&
518 fake_apicid_to_node[j] == NUMA_NO_NODE)
519 fake_apicid_to_node[j] = i;
520 }
521 for (i = 0; i < num_nodes; i++)
522 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
523 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
524
525 nodes_clear(nodes_parsed);
526 for (i = 0; i < num_nodes; i++)
527 if (fake_nodes[i].start != fake_nodes[i].end)
528 node_set(i, nodes_parsed);
529 }
530
531 static int null_slit_node_compare(int a, int b)
532 {
533 return node_to_pxm(a) == node_to_pxm(b);
534 }
535 #else
536 static int null_slit_node_compare(int a, int b)
537 {
538 return a == b;
539 }
540 #endif /* CONFIG_NUMA_EMU */
541
542 int __node_distance(int a, int b)
543 {
544 int index;
545
546 if (!acpi_slit)
547 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
548 REMOTE_DISTANCE;
549 index = acpi_slit->locality_count * node_to_pxm(a);
550 return acpi_slit->entry[index + node_to_pxm(b)];
551 }
552
553 EXPORT_SYMBOL(__node_distance);
554
555 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
556 int memory_add_physaddr_to_nid(u64 start)
557 {
558 int i, ret = 0;
559
560 for_each_node(i)
561 if (nodes_add[i].start <= start && nodes_add[i].end > start)
562 ret = i;
563
564 return ret;
565 }
566 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
567 #endif
This page took 0.041677 seconds and 5 git commands to generate.