[PATCH] hot-add-mem x86_64: Kconfig changes
[deliverable/linux.git] / arch / x86_64 / mm / srat.c
CommitLineData
1da177e4
LT
1/*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
4 *
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
6 *
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
10 */
11
12#include <linux/kernel.h>
13#include <linux/acpi.h>
14#include <linux/mmzone.h>
15#include <linux/bitmap.h>
16#include <linux/module.h>
17#include <linux/topology.h>
68a3a7fe
AK
18#include <linux/bootmem.h>
19#include <linux/mm.h>
1da177e4
LT
20#include <asm/proto.h>
21#include <asm/numa.h>
8a6fdd3e 22#include <asm/e820.h>
1da177e4 23
c31fbb1a
AK
24int acpi_numa __initdata;
25
68a3a7fe
AK
26#if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
27 defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \
28 && !defined(CONFIG_MEMORY_HOTPLUG)
29#define RESERVE_HOTADD 1
30#endif
31
1da177e4
LT
32static struct acpi_table_slit *acpi_slit;
33
34static nodemask_t nodes_parsed __initdata;
abe059e7 35static struct bootnode nodes[MAX_NUMNODES] __initdata;
68a3a7fe
AK
36static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
37static int found_add_area __initdata;
fad7906d
AK
38int hotadd_percent __initdata = 0;
39#ifndef RESERVE_HOTADD
40#define hotadd_percent 0 /* Ignore all settings */
41#endif
1da177e4 42
9391a3f9
AK
43/* Too small nodes confuse the VM badly. Usually they result
44 from BIOS bugs. */
45#define NODE_MIN_SIZE (4*1024*1024)
46
1da177e4
LT
47static __init int setup_node(int pxm)
48{
762834e8 49 return acpi_map_pxm_to_node(pxm);
1da177e4
LT
50}
51
52static __init int conflicting_nodes(unsigned long start, unsigned long end)
53{
54 int i;
4b6a455c 55 for_each_node_mask(i, nodes_parsed) {
abe059e7 56 struct bootnode *nd = &nodes[i];
1da177e4
LT
57 if (nd->start == nd->end)
58 continue;
59 if (nd->end > start && nd->start < end)
05d1fa4b 60 return i;
1da177e4 61 if (nd->end == end && nd->start == start)
05d1fa4b 62 return i;
1da177e4
LT
63 }
64 return -1;
65}
66
67static __init void cutoff_node(int i, unsigned long start, unsigned long end)
68{
abe059e7 69 struct bootnode *nd = &nodes[i];
68a3a7fe
AK
70
71 if (found_add_area)
72 return;
73
1da177e4
LT
74 if (nd->start < start) {
75 nd->start = start;
76 if (nd->end < nd->start)
77 nd->start = nd->end;
78 }
79 if (nd->end > end) {
1da177e4
LT
80 nd->end = end;
81 if (nd->start > nd->end)
82 nd->start = nd->end;
83 }
84}
85
86static __init void bad_srat(void)
87{
2bce2b54 88 int i;
1da177e4
LT
89 printk(KERN_ERR "SRAT: SRAT not used.\n");
90 acpi_numa = -1;
fad7906d 91 found_add_area = 0;
2bce2b54
AK
92 for (i = 0; i < MAX_LOCAL_APIC; i++)
93 apicid_to_node[i] = NUMA_NO_NODE;
68a3a7fe
AK
94 for (i = 0; i < MAX_NUMNODES; i++)
95 nodes_add[i].start = nodes[i].end = 0;
5cb248ab 96 remove_all_active_ranges();
1da177e4
LT
97}
98
99static __init inline int srat_disabled(void)
100{
101 return numa_off || acpi_numa < 0;
102}
103
1584b89c
AK
104/*
105 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
106 * up the NUMA heuristics which wants the local node to have a smaller
107 * distance than the others.
108 * Do some quick checks here and only use the SLIT if it passes.
109 */
110static __init int slit_valid(struct acpi_table_slit *slit)
111{
112 int i, j;
113 int d = slit->localities;
114 for (i = 0; i < d; i++) {
115 for (j = 0; j < d; j++) {
116 u8 val = slit->entry[d*i + j];
117 if (i == j) {
118 if (val != 10)
119 return 0;
120 } else if (val <= 10)
121 return 0;
122 }
123 }
124 return 1;
125}
126
1da177e4
LT
127/* Callback for SLIT parsing */
128void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
129{
1584b89c
AK
130 if (!slit_valid(slit)) {
131 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
132 return;
133 }
1da177e4
LT
134 acpi_slit = slit;
135}
136
137/* Callback for Proximity Domain -> LAPIC mapping */
138void __init
139acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
140{
141 int pxm, node;
d22fe808
AK
142 if (srat_disabled())
143 return;
fad7906d
AK
144 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
145 bad_srat();
d22fe808
AK
146 return;
147 }
148 if (pa->flags.enabled == 0)
1da177e4
LT
149 return;
150 pxm = pa->proximity_domain;
151 node = setup_node(pxm);
152 if (node < 0) {
153 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
154 bad_srat();
155 return;
156 }
0b07e984 157 apicid_to_node[pa->apic_id] = node;
1da177e4 158 acpi_numa = 1;
0b07e984
AK
159 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
160 pxm, pa->apic_id, node);
1da177e4
LT
161}
162
68a3a7fe
AK
163#ifdef RESERVE_HOTADD
164/*
165 * Protect against too large hotadd areas that would fill up memory.
166 */
167static int hotadd_enough_memory(struct bootnode *nd)
168{
169 static unsigned long allocated;
170 static unsigned long last_area_end;
171 unsigned long pages = (nd->end - nd->start) >> PAGE_SHIFT;
172 long mem = pages * sizeof(struct page);
173 unsigned long addr;
174 unsigned long allowed;
175 unsigned long oldpages = pages;
176
177 if (mem < 0)
178 return 0;
5cb248ab 179 allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE;
68a3a7fe
AK
180 allowed = (allowed / 100) * hotadd_percent;
181 if (allocated + mem > allowed) {
fad7906d 182 unsigned long range;
68a3a7fe
AK
183 /* Give them at least part of their hotadd memory upto hotadd_percent
184 It would be better to spread the limit out
185 over multiple hotplug areas, but that is too complicated
186 right now */
187 if (allocated >= allowed)
188 return 0;
fad7906d
AK
189 range = allowed - allocated;
190 pages = (range / PAGE_SIZE);
68a3a7fe 191 mem = pages * sizeof(struct page);
fad7906d 192 nd->end = nd->start + range;
68a3a7fe
AK
193 }
194 /* Not completely fool proof, but a good sanity check */
195 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
196 if (addr == -1UL)
197 return 0;
198 if (pages != oldpages)
199 printk(KERN_NOTICE "SRAT: Hotadd area limited to %lu bytes\n",
200 pages << PAGE_SHIFT);
201 last_area_end = addr + mem;
202 allocated += mem;
203 return 1;
204}
205
206/*
207 * It is fine to add this area to the nodes data it will be used later
208 * This code supports one contigious hot add area per node.
209 */
210static int reserve_hotadd(int node, unsigned long start, unsigned long end)
211{
212 unsigned long s_pfn = start >> PAGE_SHIFT;
213 unsigned long e_pfn = end >> PAGE_SHIFT;
214 int changed = 0;
215 struct bootnode *nd = &nodes_add[node];
216
217 /* I had some trouble with strange memory hotadd regions breaking
218 the boot. Be very strict here and reject anything unexpected.
219 If you want working memory hotadd write correct SRATs.
220
221 The node size check is a basic sanity check to guard against
222 mistakes */
223 if ((signed long)(end - start) < NODE_MIN_SIZE) {
224 printk(KERN_ERR "SRAT: Hotplug area too small\n");
225 return -1;
226 }
227
228 /* This check might be a bit too strict, but I'm keeping it for now. */
5cb248ab 229 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
9c7cd687
MG
230 printk(KERN_ERR
231 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
232 s_pfn, e_pfn);
68a3a7fe
AK
233 return -1;
234 }
235
236 if (!hotadd_enough_memory(&nodes_add[node])) {
237 printk(KERN_ERR "SRAT: Hotplug area too large\n");
238 return -1;
239 }
240
241 /* Looks good */
242
243 found_add_area = 1;
244 if (nd->start == nd->end) {
245 nd->start = start;
246 nd->end = end;
247 changed = 1;
248 } else {
249 if (nd->start == end) {
250 nd->start = start;
251 changed = 1;
252 }
253 if (nd->end == start) {
254 nd->end = end;
255 changed = 1;
256 }
257 if (!changed)
258 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
259 }
260
261 if ((nd->end >> PAGE_SHIFT) > end_pfn)
262 end_pfn = nd->end >> PAGE_SHIFT;
263
264 if (changed)
265 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end);
266 return 0;
267}
268#endif
269
1da177e4
LT
270/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
271void __init
272acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
273{
68a3a7fe 274 struct bootnode *nd, oldnode;
1da177e4
LT
275 unsigned long start, end;
276 int node, pxm;
277 int i;
278
d22fe808 279 if (srat_disabled())
1da177e4 280 return;
d22fe808
AK
281 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
282 bad_srat();
283 return;
284 }
285 if (ma->flags.enabled == 0)
286 return;
68a3a7fe
AK
287 if (ma->flags.hot_pluggable && hotadd_percent == 0)
288 return;
d22fe808
AK
289 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
290 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
1da177e4
LT
291 pxm = ma->proximity_domain;
292 node = setup_node(pxm);
293 if (node < 0) {
294 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
295 bad_srat();
296 return;
297 }
1da177e4 298 i = conflicting_nodes(start, end);
05d1fa4b
AK
299 if (i == node) {
300 printk(KERN_WARNING
301 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
302 pxm, start, end, nodes[i].start, nodes[i].end);
303 } else if (i >= 0) {
1da177e4 304 printk(KERN_ERR
05d1fa4b
AK
305 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
306 pxm, start, end, node_to_pxm(i),
307 nodes[i].start, nodes[i].end);
1da177e4
LT
308 bad_srat();
309 return;
310 }
311 nd = &nodes[node];
68a3a7fe 312 oldnode = *nd;
1da177e4
LT
313 if (!node_test_and_set(node, nodes_parsed)) {
314 nd->start = start;
315 nd->end = end;
316 } else {
317 if (start < nd->start)
318 nd->start = start;
319 if (nd->end < end)
320 nd->end = end;
321 }
68a3a7fe 322
1da177e4
LT
323 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
324 nd->start, nd->end);
5cb248ab
MG
325 e820_register_active_regions(node, nd->start >> PAGE_SHIFT,
326 nd->end >> PAGE_SHIFT);
fb01439c
MG
327 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
328 nd->end >> PAGE_SHIFT);
68a3a7fe
AK
329
330#ifdef RESERVE_HOTADD
331 if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) {
332 /* Ignore hotadd region. Undo damage */
333 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
334 *nd = oldnode;
335 if ((nd->start | nd->end) == 0)
336 node_clear(node, nodes_parsed);
337 }
338#endif
1da177e4
LT
339}
340
8a6fdd3e
AK
341/* Sanity check to catch more bad SRATs (they are amazingly common).
342 Make sure the PXMs cover all memory. */
343static int nodes_cover_memory(void)
344{
345 int i;
346 unsigned long pxmram, e820ram;
347
348 pxmram = 0;
349 for_each_node_mask(i, nodes_parsed) {
350 unsigned long s = nodes[i].start >> PAGE_SHIFT;
351 unsigned long e = nodes[i].end >> PAGE_SHIFT;
352 pxmram += e - s;
5cb248ab 353 pxmram -= absent_pages_in_range(s, e);
68a3a7fe
AK
354 pxmram -= nodes_add[i].end - nodes_add[i].start;
355 if ((long)pxmram < 0)
356 pxmram = 0;
8a6fdd3e
AK
357 }
358
5cb248ab 359 e820ram = end_pfn - absent_pages_in_range(0, end_pfn);
fdb9df94
AK
360 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
361 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
8a6fdd3e
AK
362 printk(KERN_ERR
363 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
364 (pxmram << PAGE_SHIFT) >> 20,
365 (e820ram << PAGE_SHIFT) >> 20);
366 return 0;
367 }
368 return 1;
369}
370
9391a3f9
AK
371static void unparse_node(int node)
372{
373 int i;
374 node_clear(node, nodes_parsed);
375 for (i = 0; i < MAX_LOCAL_APIC; i++) {
376 if (apicid_to_node[i] == node)
377 apicid_to_node[i] = NUMA_NO_NODE;
378 }
379}
380
1da177e4
LT
381void __init acpi_numa_arch_fixup(void) {}
382
383/* Use the information discovered above to actually set up the nodes. */
384int __init acpi_scan_nodes(unsigned long start, unsigned long end)
385{
386 int i;
8a6fdd3e 387
e58e0d03 388 /* First clean up the node list */
9391a3f9 389 for (i = 0; i < MAX_NUMNODES; i++) {
68a3a7fe 390 cutoff_node(i, start, end);
0d015324 391 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
9391a3f9 392 unparse_node(i);
0d015324
DY
393 node_set_offline(i);
394 }
e58e0d03
AK
395 }
396
9391a3f9
AK
397 if (acpi_numa <= 0)
398 return -1;
399
8a6fdd3e
AK
400 if (!nodes_cover_memory()) {
401 bad_srat();
402 return -1;
403 }
404
2aed711a 405 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
1da177e4
LT
406 if (memnode_shift < 0) {
407 printk(KERN_ERR
408 "SRAT: No NUMA node hash function found. Contact maintainer\n");
409 bad_srat();
410 return -1;
411 }
e58e0d03
AK
412
413 /* Finally register nodes */
414 for_each_node_mask(i, nodes_parsed)
1da177e4 415 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
a8062231
AK
416 /* Try again in case setup_node_bootmem missed one due
417 to missing bootmem */
418 for_each_node_mask(i, nodes_parsed)
419 if (!node_online(i))
420 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
421
1da177e4
LT
422 for (i = 0; i < NR_CPUS; i++) {
423 if (cpu_to_node[i] == NUMA_NO_NODE)
424 continue;
425 if (!node_isset(cpu_to_node[i], nodes_parsed))
69d81fcd 426 numa_set_node(i, NUMA_NO_NODE);
1da177e4
LT
427 }
428 numa_init_array();
429 return 0;
430}
431
68a3a7fe
AK
432void __init srat_reserve_add_area(int nodeid)
433{
434 if (found_add_area && nodes_add[nodeid].end) {
435 u64 total_mb;
436
437 printk(KERN_INFO "SRAT: Reserving hot-add memory space "
438 "for node %d at %Lx-%Lx\n",
439 nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end);
440 total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start)
441 >> PAGE_SHIFT;
442 total_mb *= sizeof(struct page);
443 total_mb >>= 20;
444 printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
445 "pre-allocated memory.\n", (unsigned long long)total_mb);
446 reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
447 nodes_add[nodeid].end - nodes_add[nodeid].start);
448 }
449}
450
1da177e4
LT
451int __node_distance(int a, int b)
452{
453 int index;
454
455 if (!acpi_slit)
456 return a == b ? 10 : 20;
457 index = acpi_slit->localities * node_to_pxm(a);
458 return acpi_slit->entry[index + node_to_pxm(b)];
459}
460
461EXPORT_SYMBOL(__node_distance);
This page took 0.183399 seconds and 5 git commands to generate.