Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * pSeries NUMA support | |
3 | * | |
4 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/threads.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/mmzone.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/nodemask.h> | |
18 | #include <linux/cpu.h> | |
19 | #include <linux/notifier.h> | |
45fb6cea | 20 | #include <asm/sparsemem.h> |
1da177e4 | 21 | #include <asm/lmb.h> |
cf00a8d1 | 22 | #include <asm/system.h> |
2249ca9d | 23 | #include <asm/smp.h> |
1da177e4 LT |
24 | |
25 | static int numa_enabled = 1; | |
26 | ||
27 | static int numa_debug; | |
28 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | |
29 | ||
45fb6cea | 30 | int numa_cpu_lookup_table[NR_CPUS]; |
1da177e4 | 31 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; |
1da177e4 | 32 | struct pglist_data *node_data[MAX_NUMNODES]; |
45fb6cea AB |
33 | |
34 | EXPORT_SYMBOL(numa_cpu_lookup_table); | |
35 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | |
36 | EXPORT_SYMBOL(node_data); | |
37 | ||
38 | static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; | |
1da177e4 | 39 | static int min_common_depth; |
237a0989 | 40 | static int n_mem_addr_cells, n_mem_size_cells; |
1da177e4 LT |
41 | |
42 | /* | |
45fb6cea | 43 | * We need somewhere to store start/end/node for each region until we have |
1da177e4 LT |
44 | * allocated the real node_data structures. |
45 | */ | |
45fb6cea | 46 | #define MAX_REGIONS (MAX_LMB_REGIONS*2) |
1da177e4 | 47 | static struct { |
45fb6cea AB |
48 | unsigned long start_pfn; |
49 | unsigned long end_pfn; | |
50 | int nid; | |
51 | } init_node_data[MAX_REGIONS] __initdata; | |
1da177e4 | 52 | |
45fb6cea AB |
53 | int __init early_pfn_to_nid(unsigned long pfn) |
54 | { | |
55 | unsigned int i; | |
56 | ||
57 | for (i = 0; init_node_data[i].end_pfn; i++) { | |
58 | unsigned long start_pfn = init_node_data[i].start_pfn; | |
59 | unsigned long end_pfn = init_node_data[i].end_pfn; | |
60 | ||
61 | if ((start_pfn <= pfn) && (pfn < end_pfn)) | |
62 | return init_node_data[i].nid; | |
63 | } | |
64 | ||
65 | return -1; | |
66 | } | |
67 | ||
68 | void __init add_region(unsigned int nid, unsigned long start_pfn, | |
69 | unsigned long pages) | |
70 | { | |
71 | unsigned int i; | |
72 | ||
73 | dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n", | |
74 | nid, start_pfn, pages); | |
75 | ||
76 | for (i = 0; init_node_data[i].end_pfn; i++) { | |
77 | if (init_node_data[i].nid != nid) | |
78 | continue; | |
79 | if (init_node_data[i].end_pfn == start_pfn) { | |
80 | init_node_data[i].end_pfn += pages; | |
81 | return; | |
82 | } | |
83 | if (init_node_data[i].start_pfn == (start_pfn + pages)) { | |
84 | init_node_data[i].start_pfn -= pages; | |
85 | return; | |
86 | } | |
87 | } | |
88 | ||
89 | /* | |
90 | * Leave last entry NULL so we dont iterate off the end (we use | |
91 | * entry.end_pfn to terminate the walk). | |
92 | */ | |
93 | if (i >= (MAX_REGIONS - 1)) { | |
94 | printk(KERN_ERR "WARNING: too many memory regions in " | |
95 | "numa code, truncating\n"); | |
96 | return; | |
97 | } | |
98 | ||
99 | init_node_data[i].start_pfn = start_pfn; | |
100 | init_node_data[i].end_pfn = start_pfn + pages; | |
101 | init_node_data[i].nid = nid; | |
102 | } | |
103 | ||
104 | /* We assume init_node_data has no overlapping regions */ | |
105 | void __init get_region(unsigned int nid, unsigned long *start_pfn, | |
106 | unsigned long *end_pfn, unsigned long *pages_present) | |
107 | { | |
108 | unsigned int i; | |
109 | ||
110 | *start_pfn = -1UL; | |
111 | *end_pfn = *pages_present = 0; | |
112 | ||
113 | for (i = 0; init_node_data[i].end_pfn; i++) { | |
114 | if (init_node_data[i].nid != nid) | |
115 | continue; | |
116 | ||
117 | *pages_present += init_node_data[i].end_pfn - | |
118 | init_node_data[i].start_pfn; | |
119 | ||
120 | if (init_node_data[i].start_pfn < *start_pfn) | |
121 | *start_pfn = init_node_data[i].start_pfn; | |
122 | ||
123 | if (init_node_data[i].end_pfn > *end_pfn) | |
124 | *end_pfn = init_node_data[i].end_pfn; | |
125 | } | |
126 | ||
127 | /* We didnt find a matching region, return start/end as 0 */ | |
128 | if (*start_pfn == -1UL) | |
6d91bb93 | 129 | *start_pfn = 0; |
45fb6cea | 130 | } |
1da177e4 LT |
131 | |
132 | static inline void map_cpu_to_node(int cpu, int node) | |
133 | { | |
134 | numa_cpu_lookup_table[cpu] = node; | |
45fb6cea AB |
135 | |
136 | if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) | |
1da177e4 | 137 | cpu_set(cpu, numa_cpumask_lookup_table[node]); |
1da177e4 LT |
138 | } |
139 | ||
140 | #ifdef CONFIG_HOTPLUG_CPU | |
141 | static void unmap_cpu_from_node(unsigned long cpu) | |
142 | { | |
143 | int node = numa_cpu_lookup_table[cpu]; | |
144 | ||
145 | dbg("removing cpu %lu from node %d\n", cpu, node); | |
146 | ||
147 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
148 | cpu_clear(cpu, numa_cpumask_lookup_table[node]); | |
1da177e4 LT |
149 | } else { |
150 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | |
151 | cpu, node); | |
152 | } | |
153 | } | |
154 | #endif /* CONFIG_HOTPLUG_CPU */ | |
155 | ||
45fb6cea | 156 | static struct device_node *find_cpu_node(unsigned int cpu) |
1da177e4 LT |
157 | { |
158 | unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); | |
159 | struct device_node *cpu_node = NULL; | |
160 | unsigned int *interrupt_server, *reg; | |
161 | int len; | |
162 | ||
163 | while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) { | |
164 | /* Try interrupt server first */ | |
165 | interrupt_server = (unsigned int *)get_property(cpu_node, | |
166 | "ibm,ppc-interrupt-server#s", &len); | |
167 | ||
168 | len = len / sizeof(u32); | |
169 | ||
170 | if (interrupt_server && (len > 0)) { | |
171 | while (len--) { | |
172 | if (interrupt_server[len] == hw_cpuid) | |
173 | return cpu_node; | |
174 | } | |
175 | } else { | |
176 | reg = (unsigned int *)get_property(cpu_node, | |
177 | "reg", &len); | |
178 | if (reg && (len > 0) && (reg[0] == hw_cpuid)) | |
179 | return cpu_node; | |
180 | } | |
181 | } | |
182 | ||
183 | return NULL; | |
184 | } | |
185 | ||
186 | /* must hold reference to node during call */ | |
187 | static int *of_get_associativity(struct device_node *dev) | |
188 | { | |
189 | return (unsigned int *)get_property(dev, "ibm,associativity", NULL); | |
190 | } | |
191 | ||
192 | static int of_node_numa_domain(struct device_node *device) | |
193 | { | |
194 | int numa_domain; | |
195 | unsigned int *tmp; | |
196 | ||
197 | if (min_common_depth == -1) | |
198 | return 0; | |
199 | ||
200 | tmp = of_get_associativity(device); | |
201 | if (tmp && (tmp[0] >= min_common_depth)) { | |
202 | numa_domain = tmp[min_common_depth]; | |
203 | } else { | |
204 | dbg("WARNING: no NUMA information for %s\n", | |
205 | device->full_name); | |
206 | numa_domain = 0; | |
207 | } | |
208 | return numa_domain; | |
209 | } | |
210 | ||
211 | /* | |
212 | * In theory, the "ibm,associativity" property may contain multiple | |
213 | * associativity lists because a resource may be multiply connected | |
214 | * into the machine. This resource then has different associativity | |
215 | * characteristics relative to its multiple connections. We ignore | |
216 | * this for now. We also assume that all cpu and memory sets have | |
217 | * their distances represented at a common level. This won't be | |
218 | * true for heirarchical NUMA. | |
219 | * | |
220 | * In any case the ibm,associativity-reference-points should give | |
221 | * the correct depth for a normal NUMA system. | |
222 | * | |
223 | * - Dave Hansen <haveblue@us.ibm.com> | |
224 | */ | |
225 | static int __init find_min_common_depth(void) | |
226 | { | |
227 | int depth; | |
228 | unsigned int *ref_points; | |
229 | struct device_node *rtas_root; | |
230 | unsigned int len; | |
231 | ||
232 | rtas_root = of_find_node_by_path("/rtas"); | |
233 | ||
234 | if (!rtas_root) | |
235 | return -1; | |
236 | ||
237 | /* | |
238 | * this property is 2 32-bit integers, each representing a level of | |
239 | * depth in the associativity nodes. The first is for an SMP | |
240 | * configuration (should be all 0's) and the second is for a normal | |
241 | * NUMA configuration. | |
242 | */ | |
243 | ref_points = (unsigned int *)get_property(rtas_root, | |
244 | "ibm,associativity-reference-points", &len); | |
245 | ||
246 | if ((len >= 1) && ref_points) { | |
247 | depth = ref_points[1]; | |
248 | } else { | |
249 | dbg("WARNING: could not find NUMA " | |
250 | "associativity reference point\n"); | |
251 | depth = -1; | |
252 | } | |
253 | of_node_put(rtas_root); | |
254 | ||
255 | return depth; | |
256 | } | |
257 | ||
84c9fdd1 | 258 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) |
1da177e4 LT |
259 | { |
260 | struct device_node *memory = NULL; | |
1da177e4 LT |
261 | |
262 | memory = of_find_node_by_type(memory, "memory"); | |
54c23310 | 263 | if (!memory) |
84c9fdd1 | 264 | panic("numa.c: No memory nodes found!"); |
54c23310 | 265 | |
84c9fdd1 MK |
266 | *n_addr_cells = prom_n_addr_cells(memory); |
267 | *n_size_cells = prom_n_size_cells(memory); | |
268 | of_node_put(memory); | |
1da177e4 LT |
269 | } |
270 | ||
237a0989 | 271 | static unsigned long __devinit read_n_cells(int n, unsigned int **buf) |
1da177e4 LT |
272 | { |
273 | unsigned long result = 0; | |
274 | ||
275 | while (n--) { | |
276 | result = (result << 32) | **buf; | |
277 | (*buf)++; | |
278 | } | |
279 | return result; | |
280 | } | |
281 | ||
282 | /* | |
283 | * Figure out to which domain a cpu belongs and stick it there. | |
284 | * Return the id of the domain used. | |
285 | */ | |
286 | static int numa_setup_cpu(unsigned long lcpu) | |
287 | { | |
288 | int numa_domain = 0; | |
289 | struct device_node *cpu = find_cpu_node(lcpu); | |
290 | ||
291 | if (!cpu) { | |
292 | WARN_ON(1); | |
293 | goto out; | |
294 | } | |
295 | ||
296 | numa_domain = of_node_numa_domain(cpu); | |
297 | ||
298 | if (numa_domain >= num_online_nodes()) { | |
299 | /* | |
300 | * POWER4 LPAR uses 0xffff as invalid node, | |
301 | * dont warn in this case. | |
302 | */ | |
303 | if (numa_domain != 0xffff) | |
304 | printk(KERN_ERR "WARNING: cpu %ld " | |
305 | "maps to invalid NUMA node %d\n", | |
306 | lcpu, numa_domain); | |
307 | numa_domain = 0; | |
308 | } | |
309 | out: | |
310 | node_set_online(numa_domain); | |
311 | ||
312 | map_cpu_to_node(lcpu, numa_domain); | |
313 | ||
314 | of_node_put(cpu); | |
315 | ||
316 | return numa_domain; | |
317 | } | |
318 | ||
319 | static int cpu_numa_callback(struct notifier_block *nfb, | |
320 | unsigned long action, | |
321 | void *hcpu) | |
322 | { | |
323 | unsigned long lcpu = (unsigned long)hcpu; | |
324 | int ret = NOTIFY_DONE; | |
325 | ||
326 | switch (action) { | |
327 | case CPU_UP_PREPARE: | |
328 | if (min_common_depth == -1 || !numa_enabled) | |
329 | map_cpu_to_node(lcpu, 0); | |
330 | else | |
331 | numa_setup_cpu(lcpu); | |
332 | ret = NOTIFY_OK; | |
333 | break; | |
334 | #ifdef CONFIG_HOTPLUG_CPU | |
335 | case CPU_DEAD: | |
336 | case CPU_UP_CANCELED: | |
337 | unmap_cpu_from_node(lcpu); | |
338 | break; | |
339 | ret = NOTIFY_OK; | |
340 | #endif | |
341 | } | |
342 | return ret; | |
343 | } | |
344 | ||
345 | /* | |
346 | * Check and possibly modify a memory region to enforce the memory limit. | |
347 | * | |
348 | * Returns the size the region should have to enforce the memory limit. | |
349 | * This will either be the original value of size, a truncated value, | |
350 | * or zero. If the returned value of size is 0 the region should be | |
351 | * discarded as it lies wholy above the memory limit. | |
352 | */ | |
45fb6cea AB |
353 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
354 | unsigned long size) | |
1da177e4 LT |
355 | { |
356 | /* | |
357 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | |
358 | * we've already adjusted it for the limit and it takes care of | |
359 | * having memory holes below the limit. | |
360 | */ | |
1da177e4 LT |
361 | |
362 | if (! memory_limit) | |
363 | return size; | |
364 | ||
365 | if (start + size <= lmb_end_of_DRAM()) | |
366 | return size; | |
367 | ||
368 | if (start >= lmb_end_of_DRAM()) | |
369 | return 0; | |
370 | ||
371 | return lmb_end_of_DRAM() - start; | |
372 | } | |
373 | ||
374 | static int __init parse_numa_properties(void) | |
375 | { | |
376 | struct device_node *cpu = NULL; | |
377 | struct device_node *memory = NULL; | |
45fb6cea | 378 | int max_domain; |
1da177e4 LT |
379 | unsigned long i; |
380 | ||
381 | if (numa_enabled == 0) { | |
382 | printk(KERN_WARNING "NUMA disabled by user\n"); | |
383 | return -1; | |
384 | } | |
385 | ||
1da177e4 LT |
386 | min_common_depth = find_min_common_depth(); |
387 | ||
388 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); | |
389 | if (min_common_depth < 0) | |
390 | return min_common_depth; | |
391 | ||
392 | max_domain = numa_setup_cpu(boot_cpuid); | |
393 | ||
394 | /* | |
395 | * Even though we connect cpus to numa domains later in SMP init, | |
396 | * we need to know the maximum node id now. This is because each | |
397 | * node id must have NODE_DATA etc backing it. | |
398 | * As a result of hotplug we could still have cpus appear later on | |
399 | * with larger node ids. In that case we force the cpu into node 0. | |
400 | */ | |
401 | for_each_cpu(i) { | |
402 | int numa_domain; | |
403 | ||
404 | cpu = find_cpu_node(i); | |
405 | ||
406 | if (cpu) { | |
407 | numa_domain = of_node_numa_domain(cpu); | |
408 | of_node_put(cpu); | |
409 | ||
410 | if (numa_domain < MAX_NUMNODES && | |
411 | max_domain < numa_domain) | |
412 | max_domain = numa_domain; | |
413 | } | |
414 | } | |
415 | ||
237a0989 | 416 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); |
1da177e4 LT |
417 | memory = NULL; |
418 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | |
419 | unsigned long start; | |
420 | unsigned long size; | |
421 | int numa_domain; | |
422 | int ranges; | |
423 | unsigned int *memcell_buf; | |
424 | unsigned int len; | |
425 | ||
ba759485 ME |
426 | memcell_buf = (unsigned int *)get_property(memory, |
427 | "linux,usable-memory", &len); | |
428 | if (!memcell_buf || len <= 0) | |
429 | memcell_buf = | |
430 | (unsigned int *)get_property(memory, "reg", | |
431 | &len); | |
1da177e4 LT |
432 | if (!memcell_buf || len <= 0) |
433 | continue; | |
434 | ||
435 | ranges = memory->n_addrs; | |
436 | new_range: | |
437 | /* these are order-sensitive, and modify the buffer pointer */ | |
237a0989 MK |
438 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
439 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1da177e4 | 440 | |
1da177e4 LT |
441 | numa_domain = of_node_numa_domain(memory); |
442 | ||
443 | if (numa_domain >= MAX_NUMNODES) { | |
444 | if (numa_domain != 0xffff) | |
445 | printk(KERN_ERR "WARNING: memory at %lx maps " | |
446 | "to invalid NUMA node %d\n", start, | |
447 | numa_domain); | |
448 | numa_domain = 0; | |
449 | } | |
450 | ||
451 | if (max_domain < numa_domain) | |
452 | max_domain = numa_domain; | |
453 | ||
45fb6cea | 454 | if (!(size = numa_enforce_memory_limit(start, size))) { |
1da177e4 LT |
455 | if (--ranges) |
456 | goto new_range; | |
457 | else | |
458 | continue; | |
459 | } | |
460 | ||
45fb6cea AB |
461 | add_region(numa_domain, start >> PAGE_SHIFT, |
462 | size >> PAGE_SHIFT); | |
1da177e4 LT |
463 | |
464 | if (--ranges) | |
465 | goto new_range; | |
466 | } | |
467 | ||
468 | for (i = 0; i <= max_domain; i++) | |
469 | node_set_online(i); | |
470 | ||
471 | return 0; | |
472 | } | |
473 | ||
474 | static void __init setup_nonnuma(void) | |
475 | { | |
476 | unsigned long top_of_ram = lmb_end_of_DRAM(); | |
477 | unsigned long total_ram = lmb_phys_mem_size(); | |
fb6d73d3 | 478 | unsigned int i; |
1da177e4 LT |
479 | |
480 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | |
481 | top_of_ram, total_ram); | |
482 | printk(KERN_INFO "Memory hole size: %ldMB\n", | |
483 | (top_of_ram - total_ram) >> 20); | |
484 | ||
1da177e4 | 485 | map_cpu_to_node(boot_cpuid, 0); |
fb6d73d3 PM |
486 | for (i = 0; i < lmb.memory.cnt; ++i) |
487 | add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT, | |
488 | lmb_size_pages(&lmb.memory, i)); | |
1da177e4 | 489 | node_set_online(0); |
1da177e4 LT |
490 | } |
491 | ||
4b703a23 AB |
492 | void __init dump_numa_cpu_topology(void) |
493 | { | |
494 | unsigned int node; | |
495 | unsigned int cpu, count; | |
496 | ||
497 | if (min_common_depth == -1 || !numa_enabled) | |
498 | return; | |
499 | ||
500 | for_each_online_node(node) { | |
501 | printk(KERN_INFO "Node %d CPUs:", node); | |
502 | ||
503 | count = 0; | |
504 | /* | |
505 | * If we used a CPU iterator here we would miss printing | |
506 | * the holes in the cpumap. | |
507 | */ | |
508 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
509 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | |
510 | if (count == 0) | |
511 | printk(" %u", cpu); | |
512 | ++count; | |
513 | } else { | |
514 | if (count > 1) | |
515 | printk("-%u", cpu - 1); | |
516 | count = 0; | |
517 | } | |
518 | } | |
519 | ||
520 | if (count > 1) | |
521 | printk("-%u", NR_CPUS - 1); | |
522 | printk("\n"); | |
523 | } | |
524 | } | |
525 | ||
526 | static void __init dump_numa_memory_topology(void) | |
1da177e4 LT |
527 | { |
528 | unsigned int node; | |
529 | unsigned int count; | |
530 | ||
531 | if (min_common_depth == -1 || !numa_enabled) | |
532 | return; | |
533 | ||
534 | for_each_online_node(node) { | |
535 | unsigned long i; | |
536 | ||
537 | printk(KERN_INFO "Node %d Memory:", node); | |
538 | ||
539 | count = 0; | |
540 | ||
45fb6cea AB |
541 | for (i = 0; i < lmb_end_of_DRAM(); |
542 | i += (1 << SECTION_SIZE_BITS)) { | |
543 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | |
1da177e4 LT |
544 | if (count == 0) |
545 | printk(" 0x%lx", i); | |
546 | ++count; | |
547 | } else { | |
548 | if (count > 0) | |
549 | printk("-0x%lx", i); | |
550 | count = 0; | |
551 | } | |
552 | } | |
553 | ||
554 | if (count > 0) | |
555 | printk("-0x%lx", i); | |
556 | printk("\n"); | |
557 | } | |
1da177e4 LT |
558 | } |
559 | ||
560 | /* | |
561 | * Allocate some memory, satisfying the lmb or bootmem allocator where | |
562 | * required. nid is the preferred node and end is the physical address of | |
563 | * the highest address in the node. | |
564 | * | |
565 | * Returns the physical address of the memory. | |
566 | */ | |
45fb6cea AB |
567 | static void __init *careful_allocation(int nid, unsigned long size, |
568 | unsigned long align, | |
569 | unsigned long end_pfn) | |
1da177e4 | 570 | { |
45fb6cea AB |
571 | int new_nid; |
572 | unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | |
1da177e4 LT |
573 | |
574 | /* retry over all memory */ | |
575 | if (!ret) | |
576 | ret = lmb_alloc_base(size, align, lmb_end_of_DRAM()); | |
577 | ||
578 | if (!ret) | |
579 | panic("numa.c: cannot allocate %lu bytes on node %d", | |
580 | size, nid); | |
581 | ||
582 | /* | |
583 | * If the memory came from a previously allocated node, we must | |
584 | * retry with the bootmem allocator. | |
585 | */ | |
45fb6cea AB |
586 | new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); |
587 | if (new_nid < nid) { | |
588 | ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), | |
1da177e4 LT |
589 | size, align, 0); |
590 | ||
591 | if (!ret) | |
592 | panic("numa.c: cannot allocate %lu bytes on node %d", | |
45fb6cea | 593 | size, new_nid); |
1da177e4 | 594 | |
45fb6cea | 595 | ret = __pa(ret); |
1da177e4 LT |
596 | |
597 | dbg("alloc_bootmem %lx %lx\n", ret, size); | |
598 | } | |
599 | ||
45fb6cea | 600 | return (void *)ret; |
1da177e4 LT |
601 | } |
602 | ||
603 | void __init do_init_bootmem(void) | |
604 | { | |
605 | int nid; | |
45fb6cea | 606 | unsigned int i; |
1da177e4 LT |
607 | static struct notifier_block ppc64_numa_nb = { |
608 | .notifier_call = cpu_numa_callback, | |
609 | .priority = 1 /* Must run before sched domains notifier. */ | |
610 | }; | |
611 | ||
612 | min_low_pfn = 0; | |
613 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | |
614 | max_pfn = max_low_pfn; | |
615 | ||
616 | if (parse_numa_properties()) | |
617 | setup_nonnuma(); | |
618 | else | |
4b703a23 | 619 | dump_numa_memory_topology(); |
1da177e4 LT |
620 | |
621 | register_cpu_notifier(&ppc64_numa_nb); | |
622 | ||
623 | for_each_online_node(nid) { | |
45fb6cea | 624 | unsigned long start_pfn, end_pfn, pages_present; |
1da177e4 LT |
625 | unsigned long bootmem_paddr; |
626 | unsigned long bootmap_pages; | |
627 | ||
45fb6cea | 628 | get_region(nid, &start_pfn, &end_pfn, &pages_present); |
1da177e4 LT |
629 | |
630 | /* Allocate the node structure node local if possible */ | |
45fb6cea | 631 | NODE_DATA(nid) = careful_allocation(nid, |
1da177e4 | 632 | sizeof(struct pglist_data), |
45fb6cea AB |
633 | SMP_CACHE_BYTES, end_pfn); |
634 | NODE_DATA(nid) = __va(NODE_DATA(nid)); | |
1da177e4 LT |
635 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
636 | ||
637 | dbg("node %d\n", nid); | |
638 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); | |
639 | ||
640 | NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; | |
45fb6cea AB |
641 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
642 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | |
1da177e4 LT |
643 | |
644 | if (NODE_DATA(nid)->node_spanned_pages == 0) | |
645 | continue; | |
646 | ||
45fb6cea AB |
647 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); |
648 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); | |
1da177e4 | 649 | |
45fb6cea AB |
650 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
651 | bootmem_paddr = (unsigned long)careful_allocation(nid, | |
652 | bootmap_pages << PAGE_SHIFT, | |
653 | PAGE_SIZE, end_pfn); | |
654 | memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); | |
1da177e4 | 655 | |
1da177e4 LT |
656 | dbg("bootmap_paddr = %lx\n", bootmem_paddr); |
657 | ||
658 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | |
45fb6cea | 659 | start_pfn, end_pfn); |
1da177e4 | 660 | |
45fb6cea AB |
661 | /* Add free regions on this node */ |
662 | for (i = 0; init_node_data[i].end_pfn; i++) { | |
663 | unsigned long start, end; | |
1da177e4 | 664 | |
45fb6cea | 665 | if (init_node_data[i].nid != nid) |
1da177e4 LT |
666 | continue; |
667 | ||
45fb6cea AB |
668 | start = init_node_data[i].start_pfn << PAGE_SHIFT; |
669 | end = init_node_data[i].end_pfn << PAGE_SHIFT; | |
1da177e4 | 670 | |
45fb6cea AB |
671 | dbg("free_bootmem %lx %lx\n", start, end - start); |
672 | free_bootmem_node(NODE_DATA(nid), start, end - start); | |
1da177e4 LT |
673 | } |
674 | ||
45fb6cea | 675 | /* Mark reserved regions on this node */ |
1da177e4 | 676 | for (i = 0; i < lmb.reserved.cnt; i++) { |
180379dc | 677 | unsigned long physbase = lmb.reserved.region[i].base; |
1da177e4 | 678 | unsigned long size = lmb.reserved.region[i].size; |
45fb6cea AB |
679 | unsigned long start_paddr = start_pfn << PAGE_SHIFT; |
680 | unsigned long end_paddr = end_pfn << PAGE_SHIFT; | |
1da177e4 | 681 | |
45fb6cea AB |
682 | if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid && |
683 | early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid) | |
1da177e4 LT |
684 | continue; |
685 | ||
686 | if (physbase < end_paddr && | |
687 | (physbase+size) > start_paddr) { | |
688 | /* overlaps */ | |
689 | if (physbase < start_paddr) { | |
690 | size -= start_paddr - physbase; | |
691 | physbase = start_paddr; | |
692 | } | |
693 | ||
694 | if (size > end_paddr - physbase) | |
695 | size = end_paddr - physbase; | |
696 | ||
697 | dbg("reserve_bootmem %lx %lx\n", physbase, | |
698 | size); | |
699 | reserve_bootmem_node(NODE_DATA(nid), physbase, | |
700 | size); | |
701 | } | |
702 | } | |
802f192e | 703 | |
45fb6cea AB |
704 | /* Add regions into sparsemem */ |
705 | for (i = 0; init_node_data[i].end_pfn; i++) { | |
706 | unsigned long start, end; | |
707 | ||
708 | if (init_node_data[i].nid != nid) | |
802f192e BP |
709 | continue; |
710 | ||
45fb6cea AB |
711 | start = init_node_data[i].start_pfn; |
712 | end = init_node_data[i].end_pfn; | |
802f192e | 713 | |
45fb6cea | 714 | memory_present(nid, start, end); |
802f192e | 715 | } |
1da177e4 LT |
716 | } |
717 | } | |
718 | ||
719 | void __init paging_init(void) | |
720 | { | |
721 | unsigned long zones_size[MAX_NR_ZONES]; | |
722 | unsigned long zholes_size[MAX_NR_ZONES]; | |
723 | int nid; | |
724 | ||
725 | memset(zones_size, 0, sizeof(zones_size)); | |
726 | memset(zholes_size, 0, sizeof(zholes_size)); | |
727 | ||
728 | for_each_online_node(nid) { | |
45fb6cea | 729 | unsigned long start_pfn, end_pfn, pages_present; |
1da177e4 | 730 | |
45fb6cea | 731 | get_region(nid, &start_pfn, &end_pfn, &pages_present); |
1da177e4 LT |
732 | |
733 | zones_size[ZONE_DMA] = end_pfn - start_pfn; | |
45fb6cea | 734 | zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present; |
1da177e4 LT |
735 | |
736 | dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid, | |
737 | zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]); | |
738 | ||
45fb6cea AB |
739 | free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, |
740 | zholes_size); | |
1da177e4 LT |
741 | } |
742 | } | |
743 | ||
744 | static int __init early_numa(char *p) | |
745 | { | |
746 | if (!p) | |
747 | return 0; | |
748 | ||
749 | if (strstr(p, "off")) | |
750 | numa_enabled = 0; | |
751 | ||
752 | if (strstr(p, "debug")) | |
753 | numa_debug = 1; | |
754 | ||
755 | return 0; | |
756 | } | |
757 | early_param("numa", early_numa); | |
237a0989 MK |
758 | |
759 | #ifdef CONFIG_MEMORY_HOTPLUG | |
760 | /* | |
761 | * Find the node associated with a hot added memory section. Section | |
762 | * corresponds to a SPARSEMEM section, not an LMB. It is assumed that | |
763 | * sections are fully contained within a single LMB. | |
764 | */ | |
765 | int hot_add_scn_to_nid(unsigned long scn_addr) | |
766 | { | |
767 | struct device_node *memory = NULL; | |
768 | ||
769 | if (!numa_enabled || (min_common_depth < 0)) | |
770 | return 0; | |
771 | ||
772 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | |
773 | unsigned long start, size; | |
774 | int numa_domain, ranges; | |
775 | unsigned int *memcell_buf; | |
776 | unsigned int len; | |
777 | ||
778 | memcell_buf = (unsigned int *)get_property(memory, "reg", &len); | |
779 | if (!memcell_buf || len <= 0) | |
780 | continue; | |
781 | ||
782 | ranges = memory->n_addrs; /* ranges in cell */ | |
783 | ha_new_range: | |
784 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | |
785 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
786 | numa_domain = of_node_numa_domain(memory); | |
787 | ||
788 | /* Domains not present at boot default to 0 */ | |
789 | if (!node_online(numa_domain)) | |
790 | numa_domain = any_online_node(NODE_MASK_ALL); | |
791 | ||
792 | if ((scn_addr >= start) && (scn_addr < (start + size))) { | |
793 | of_node_put(memory); | |
794 | return numa_domain; | |
795 | } | |
796 | ||
797 | if (--ranges) /* process all ranges in cell */ | |
798 | goto ha_new_range; | |
799 | } | |
800 | ||
801 | BUG(); /* section address should be found above */ | |
802 | return 0; | |
803 | } | |
804 | #endif /* CONFIG_MEMORY_HOTPLUG */ |