4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #define pr_fmt(fmt) "numa: " fmt
13 #include <linux/threads.h>
14 #include <linux/bootmem.h>
15 #include <linux/init.h>
17 #include <linux/mmzone.h>
18 #include <linux/export.h>
19 #include <linux/nodemask.h>
20 #include <linux/cpu.h>
21 #include <linux/notifier.h>
22 #include <linux/memblock.h>
24 #include <linux/pfn.h>
25 #include <linux/cpuset.h>
26 #include <linux/node.h>
27 #include <linux/stop_machine.h>
28 #include <linux/proc_fs.h>
29 #include <linux/seq_file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <asm/cputhreads.h>
33 #include <asm/sparsemem.h>
36 #include <asm/cputhreads.h>
37 #include <asm/topology.h>
38 #include <asm/firmware.h>
40 #include <asm/hvcall.h>
41 #include <asm/setup.h>
44 static int numa_enabled
= 1;
46 static char *cmdline __initdata
;
48 static int numa_debug
;
49 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
51 int numa_cpu_lookup_table
[NR_CPUS
];
52 cpumask_var_t node_to_cpumask_map
[MAX_NUMNODES
];
53 struct pglist_data
*node_data
[MAX_NUMNODES
];
55 EXPORT_SYMBOL(numa_cpu_lookup_table
);
56 EXPORT_SYMBOL(node_to_cpumask_map
);
57 EXPORT_SYMBOL(node_data
);
59 static int min_common_depth
;
60 static int n_mem_addr_cells
, n_mem_size_cells
;
61 static int form1_affinity
;
63 #define MAX_DISTANCE_REF_POINTS 4
64 static int distance_ref_points_depth
;
65 static const __be32
*distance_ref_points
;
66 static int distance_lookup_table
[MAX_NUMNODES
][MAX_DISTANCE_REF_POINTS
];
69 * Allocate node_to_cpumask_map based on number of available nodes
70 * Requires node_possible_map to be valid.
72 * Note: cpumask_of_node() is not valid until after this is done.
74 static void __init
setup_node_to_cpumask_map(void)
78 /* setup nr_node_ids if not done yet */
79 if (nr_node_ids
== MAX_NUMNODES
)
82 /* allocate the map */
83 for (node
= 0; node
< nr_node_ids
; node
++)
84 alloc_bootmem_cpumask_var(&node_to_cpumask_map
[node
]);
86 /* cpumask_of_node() will now work */
87 dbg("Node to cpumask map for %d nodes\n", nr_node_ids
);
90 static int __init
fake_numa_create_new_node(unsigned long end_pfn
,
93 unsigned long long mem
;
95 static unsigned int fake_nid
;
96 static unsigned long long curr_boundary
;
99 * Modify node id, iff we started creating NUMA nodes
100 * We want to continue from where we left of the last time
105 * In case there are no more arguments to parse, the
106 * node_id should be the same as the last fake node id
107 * (we've handled this above).
112 mem
= memparse(p
, &p
);
116 if (mem
< curr_boundary
)
121 if ((end_pfn
<< PAGE_SHIFT
) > mem
) {
123 * Skip commas and spaces
125 while (*p
== ',' || *p
== ' ' || *p
== '\t')
131 dbg("created new fake_node with id %d\n", fake_nid
);
137 static void reset_numa_cpu_lookup_table(void)
141 for_each_possible_cpu(cpu
)
142 numa_cpu_lookup_table
[cpu
] = -1;
145 static void update_numa_cpu_lookup_table(unsigned int cpu
, int node
)
147 numa_cpu_lookup_table
[cpu
] = node
;
150 static void map_cpu_to_node(int cpu
, int node
)
152 update_numa_cpu_lookup_table(cpu
, node
);
154 dbg("adding cpu %d to node %d\n", cpu
, node
);
156 if (!(cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])))
157 cpumask_set_cpu(cpu
, node_to_cpumask_map
[node
]);
160 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
161 static void unmap_cpu_from_node(unsigned long cpu
)
163 int node
= numa_cpu_lookup_table
[cpu
];
165 dbg("removing cpu %lu from node %d\n", cpu
, node
);
167 if (cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])) {
168 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[node
]);
170 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
174 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
176 /* must hold reference to node during call */
177 static const __be32
*of_get_associativity(struct device_node
*dev
)
179 return of_get_property(dev
, "ibm,associativity", NULL
);
183 * Returns the property linux,drconf-usable-memory if
184 * it exists (the property exists only in kexec/kdump kernels,
185 * added by kexec-tools)
187 static const __be32
*of_get_usable_memory(struct device_node
*memory
)
191 prop
= of_get_property(memory
, "linux,drconf-usable-memory", &len
);
192 if (!prop
|| len
< sizeof(unsigned int))
197 int __node_distance(int a
, int b
)
200 int distance
= LOCAL_DISTANCE
;
203 return ((a
== b
) ? LOCAL_DISTANCE
: REMOTE_DISTANCE
);
205 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
206 if (distance_lookup_table
[a
][i
] == distance_lookup_table
[b
][i
])
209 /* Double the distance for each NUMA level */
215 EXPORT_SYMBOL(__node_distance
);
217 static void initialize_distance_lookup_table(int nid
,
218 const __be32
*associativity
)
225 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
228 entry
= &associativity
[be32_to_cpu(distance_ref_points
[i
]) - 1];
229 distance_lookup_table
[nid
][i
] = of_read_number(entry
, 1);
233 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
236 static int associativity_to_nid(const __be32
*associativity
)
240 if (min_common_depth
== -1)
243 if (of_read_number(associativity
, 1) >= min_common_depth
)
244 nid
= of_read_number(&associativity
[min_common_depth
], 1);
246 /* POWER4 LPAR uses 0xffff as invalid node */
247 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
251 of_read_number(associativity
, 1) >= distance_ref_points_depth
) {
253 * Skip the length field and send start of associativity array
255 initialize_distance_lookup_table(nid
, associativity
+ 1);
262 /* Returns the nid associated with the given device tree node,
263 * or -1 if not found.
265 static int of_node_to_nid_single(struct device_node
*device
)
270 tmp
= of_get_associativity(device
);
272 nid
= associativity_to_nid(tmp
);
276 /* Walk the device tree upwards, looking for an associativity id */
277 int of_node_to_nid(struct device_node
*device
)
279 struct device_node
*tmp
;
284 nid
= of_node_to_nid_single(device
);
289 device
= of_get_parent(tmp
);
296 EXPORT_SYMBOL_GPL(of_node_to_nid
);
298 static int __init
find_min_common_depth(void)
301 struct device_node
*root
;
303 if (firmware_has_feature(FW_FEATURE_OPAL
))
304 root
= of_find_node_by_path("/ibm,opal");
306 root
= of_find_node_by_path("/rtas");
308 root
= of_find_node_by_path("/");
311 * This property is a set of 32-bit integers, each representing
312 * an index into the ibm,associativity nodes.
314 * With form 0 affinity the first integer is for an SMP configuration
315 * (should be all 0's) and the second is for a normal NUMA
316 * configuration. We have only one level of NUMA.
318 * With form 1 affinity the first integer is the most significant
319 * NUMA boundary and the following are progressively less significant
320 * boundaries. There can be more than one level of NUMA.
322 distance_ref_points
= of_get_property(root
,
323 "ibm,associativity-reference-points",
324 &distance_ref_points_depth
);
326 if (!distance_ref_points
) {
327 dbg("NUMA: ibm,associativity-reference-points not found.\n");
331 distance_ref_points_depth
/= sizeof(int);
333 if (firmware_has_feature(FW_FEATURE_OPAL
) ||
334 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY
)) {
335 dbg("Using form 1 affinity\n");
339 if (form1_affinity
) {
340 depth
= of_read_number(distance_ref_points
, 1);
342 if (distance_ref_points_depth
< 2) {
343 printk(KERN_WARNING
"NUMA: "
344 "short ibm,associativity-reference-points\n");
348 depth
= of_read_number(&distance_ref_points
[1], 1);
352 * Warn and cap if the hardware supports more than
353 * MAX_DISTANCE_REF_POINTS domains.
355 if (distance_ref_points_depth
> MAX_DISTANCE_REF_POINTS
) {
356 printk(KERN_WARNING
"NUMA: distance array capped at "
357 "%d entries\n", MAX_DISTANCE_REF_POINTS
);
358 distance_ref_points_depth
= MAX_DISTANCE_REF_POINTS
;
369 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
371 struct device_node
*memory
= NULL
;
373 memory
= of_find_node_by_type(memory
, "memory");
375 panic("numa.c: No memory nodes found!");
377 *n_addr_cells
= of_n_addr_cells(memory
);
378 *n_size_cells
= of_n_size_cells(memory
);
382 static unsigned long read_n_cells(int n
, const __be32
**buf
)
384 unsigned long result
= 0;
387 result
= (result
<< 32) | of_read_number(*buf
, 1);
394 * Read the next memblock list entry from the ibm,dynamic-memory property
395 * and return the information in the provided of_drconf_cell structure.
397 static void read_drconf_cell(struct of_drconf_cell
*drmem
, const __be32
**cellp
)
401 drmem
->base_addr
= read_n_cells(n_mem_addr_cells
, cellp
);
404 drmem
->drc_index
= of_read_number(cp
, 1);
405 drmem
->reserved
= of_read_number(&cp
[1], 1);
406 drmem
->aa_index
= of_read_number(&cp
[2], 1);
407 drmem
->flags
= of_read_number(&cp
[3], 1);
413 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
415 * The layout of the ibm,dynamic-memory property is a number N of memblock
416 * list entries followed by N memblock list entries. Each memblock list entry
417 * contains information as laid out in the of_drconf_cell struct above.
419 static int of_get_drconf_memory(struct device_node
*memory
, const __be32
**dm
)
424 prop
= of_get_property(memory
, "ibm,dynamic-memory", &len
);
425 if (!prop
|| len
< sizeof(unsigned int))
428 entries
= of_read_number(prop
++, 1);
430 /* Now that we know the number of entries, revalidate the size
431 * of the property read in to ensure we have everything
433 if (len
< (entries
* (n_mem_addr_cells
+ 4) + 1) * sizeof(unsigned int))
441 * Retrieve and validate the ibm,lmb-size property for drconf memory
442 * from the device tree.
444 static u64
of_get_lmb_size(struct device_node
*memory
)
449 prop
= of_get_property(memory
, "ibm,lmb-size", &len
);
450 if (!prop
|| len
< sizeof(unsigned int))
453 return read_n_cells(n_mem_size_cells
, &prop
);
456 struct assoc_arrays
{
459 const __be32
*arrays
;
463 * Retrieve and validate the list of associativity arrays for drconf
464 * memory from the ibm,associativity-lookup-arrays property of the
467 * The layout of the ibm,associativity-lookup-arrays property is a number N
468 * indicating the number of associativity arrays, followed by a number M
469 * indicating the size of each associativity array, followed by a list
470 * of N associativity arrays.
472 static int of_get_assoc_arrays(struct device_node
*memory
,
473 struct assoc_arrays
*aa
)
478 prop
= of_get_property(memory
, "ibm,associativity-lookup-arrays", &len
);
479 if (!prop
|| len
< 2 * sizeof(unsigned int))
482 aa
->n_arrays
= of_read_number(prop
++, 1);
483 aa
->array_sz
= of_read_number(prop
++, 1);
485 /* Now that we know the number of arrays and size of each array,
486 * revalidate the size of the property read in.
488 if (len
< (aa
->n_arrays
* aa
->array_sz
+ 2) * sizeof(unsigned int))
496 * This is like of_node_to_nid_single() for memory represented in the
497 * ibm,dynamic-reconfiguration-memory node.
499 static int of_drconf_to_nid_single(struct of_drconf_cell
*drmem
,
500 struct assoc_arrays
*aa
)
503 int nid
= default_nid
;
506 if (min_common_depth
> 0 && min_common_depth
<= aa
->array_sz
&&
507 !(drmem
->flags
& DRCONF_MEM_AI_INVALID
) &&
508 drmem
->aa_index
< aa
->n_arrays
) {
509 index
= drmem
->aa_index
* aa
->array_sz
+ min_common_depth
- 1;
510 nid
= of_read_number(&aa
->arrays
[index
], 1);
512 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
516 index
= drmem
->aa_index
* aa
->array_sz
;
517 initialize_distance_lookup_table(nid
,
526 * Figure out to which domain a cpu belongs and stick it there.
527 * Return the id of the domain used.
529 static int numa_setup_cpu(unsigned long lcpu
)
532 struct device_node
*cpu
;
535 * If a valid cpu-to-node mapping is already available, use it
536 * directly instead of querying the firmware, since it represents
537 * the most recent mapping notified to us by the platform (eg: VPHN).
539 if ((nid
= numa_cpu_lookup_table
[lcpu
]) >= 0) {
540 map_cpu_to_node(lcpu
, nid
);
544 cpu
= of_get_cpu_node(lcpu
, NULL
);
548 if (cpu_present(lcpu
))
554 nid
= of_node_to_nid_single(cpu
);
557 if (nid
< 0 || !node_online(nid
))
558 nid
= first_online_node
;
560 map_cpu_to_node(lcpu
, nid
);
566 static void verify_cpu_node_mapping(int cpu
, int node
)
568 int base
, sibling
, i
;
570 /* Verify that all the threads in the core belong to the same node */
571 base
= cpu_first_thread_sibling(cpu
);
573 for (i
= 0; i
< threads_per_core
; i
++) {
576 if (sibling
== cpu
|| cpu_is_offline(sibling
))
579 if (cpu_to_node(sibling
) != node
) {
580 WARN(1, "CPU thread siblings %d and %d don't belong"
581 " to the same node!\n", cpu
, sibling
);
587 static int cpu_numa_callback(struct notifier_block
*nfb
, unsigned long action
,
590 unsigned long lcpu
= (unsigned long)hcpu
;
591 int ret
= NOTIFY_DONE
, nid
;
595 case CPU_UP_PREPARE_FROZEN
:
596 nid
= numa_setup_cpu(lcpu
);
597 verify_cpu_node_mapping((int)lcpu
, nid
);
600 #ifdef CONFIG_HOTPLUG_CPU
602 case CPU_DEAD_FROZEN
:
603 case CPU_UP_CANCELED
:
604 case CPU_UP_CANCELED_FROZEN
:
605 unmap_cpu_from_node(lcpu
);
614 * Check and possibly modify a memory region to enforce the memory limit.
616 * Returns the size the region should have to enforce the memory limit.
617 * This will either be the original value of size, a truncated value,
618 * or zero. If the returned value of size is 0 the region should be
619 * discarded as it lies wholly above the memory limit.
621 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
625 * We use memblock_end_of_DRAM() in here instead of memory_limit because
626 * we've already adjusted it for the limit and it takes care of
627 * having memory holes below the limit. Also, in the case of
628 * iommu_is_off, memory_limit is not set but is implicitly enforced.
631 if (start
+ size
<= memblock_end_of_DRAM())
634 if (start
>= memblock_end_of_DRAM())
637 return memblock_end_of_DRAM() - start
;
641 * Reads the counter for a given entry in
642 * linux,drconf-usable-memory property
644 static inline int __init
read_usm_ranges(const __be32
**usm
)
647 * For each lmb in ibm,dynamic-memory a corresponding
648 * entry in linux,drconf-usable-memory property contains
649 * a counter followed by that many (base, size) duple.
650 * read the counter from linux,drconf-usable-memory
652 return read_n_cells(n_mem_size_cells
, usm
);
656 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
657 * node. This assumes n_mem_{addr,size}_cells have been set.
659 static void __init
parse_drconf_memory(struct device_node
*memory
)
661 const __be32
*uninitialized_var(dm
), *usm
;
662 unsigned int n
, rc
, ranges
, is_kexec_kdump
= 0;
663 unsigned long lmb_size
, base
, size
, sz
;
665 struct assoc_arrays aa
= { .arrays
= NULL
};
667 n
= of_get_drconf_memory(memory
, &dm
);
671 lmb_size
= of_get_lmb_size(memory
);
675 rc
= of_get_assoc_arrays(memory
, &aa
);
679 /* check if this is a kexec/kdump kernel */
680 usm
= of_get_usable_memory(memory
);
684 for (; n
!= 0; --n
) {
685 struct of_drconf_cell drmem
;
687 read_drconf_cell(&drmem
, &dm
);
689 /* skip this block if the reserved bit is set in flags (0x80)
690 or if the block is not assigned to this partition (0x8) */
691 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
692 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
695 base
= drmem
.base_addr
;
699 if (is_kexec_kdump
) {
700 ranges
= read_usm_ranges(&usm
);
701 if (!ranges
) /* there are no (base, size) duple */
705 if (is_kexec_kdump
) {
706 base
= read_n_cells(n_mem_addr_cells
, &usm
);
707 size
= read_n_cells(n_mem_size_cells
, &usm
);
709 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
710 fake_numa_create_new_node(
711 ((base
+ size
) >> PAGE_SHIFT
),
713 node_set_online(nid
);
714 sz
= numa_enforce_memory_limit(base
, size
);
716 memblock_set_node(base
, sz
,
717 &memblock
.memory
, nid
);
722 static int __init
parse_numa_properties(void)
724 struct device_node
*memory
;
728 if (numa_enabled
== 0) {
729 printk(KERN_WARNING
"NUMA disabled by user\n");
733 min_common_depth
= find_min_common_depth();
735 if (min_common_depth
< 0)
736 return min_common_depth
;
738 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
741 * Even though we connect cpus to numa domains later in SMP
742 * init, we need to know the node ids now. This is because
743 * each node to be onlined must have NODE_DATA etc backing it.
745 for_each_present_cpu(i
) {
746 struct device_node
*cpu
;
749 cpu
= of_get_cpu_node(i
, NULL
);
751 nid
= of_node_to_nid_single(cpu
);
755 * Don't fall back to default_nid yet -- we will plug
756 * cpus into nodes once the memory scan has discovered
761 node_set_online(nid
);
764 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
766 for_each_node_by_type(memory
, "memory") {
771 const __be32
*memcell_buf
;
774 memcell_buf
= of_get_property(memory
,
775 "linux,usable-memory", &len
);
776 if (!memcell_buf
|| len
<= 0)
777 memcell_buf
= of_get_property(memory
, "reg", &len
);
778 if (!memcell_buf
|| len
<= 0)
782 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
784 /* these are order-sensitive, and modify the buffer pointer */
785 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
786 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
789 * Assumption: either all memory nodes or none will
790 * have associativity properties. If none, then
791 * everything goes to default_nid.
793 nid
= of_node_to_nid_single(memory
);
797 fake_numa_create_new_node(((start
+ size
) >> PAGE_SHIFT
), &nid
);
798 node_set_online(nid
);
800 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
807 memblock_set_node(start
, size
, &memblock
.memory
, nid
);
814 * Now do the same thing for each MEMBLOCK listed in the
815 * ibm,dynamic-memory property in the
816 * ibm,dynamic-reconfiguration-memory node.
818 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
820 parse_drconf_memory(memory
);
825 static void __init
setup_nonnuma(void)
827 unsigned long top_of_ram
= memblock_end_of_DRAM();
828 unsigned long total_ram
= memblock_phys_mem_size();
829 unsigned long start_pfn
, end_pfn
;
830 unsigned int nid
= 0;
831 struct memblock_region
*reg
;
833 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
834 top_of_ram
, total_ram
);
835 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
836 (top_of_ram
- total_ram
) >> 20);
838 for_each_memblock(memory
, reg
) {
839 start_pfn
= memblock_region_memory_base_pfn(reg
);
840 end_pfn
= memblock_region_memory_end_pfn(reg
);
842 fake_numa_create_new_node(end_pfn
, &nid
);
843 memblock_set_node(PFN_PHYS(start_pfn
),
844 PFN_PHYS(end_pfn
- start_pfn
),
845 &memblock
.memory
, nid
);
846 node_set_online(nid
);
850 void __init
dump_numa_cpu_topology(void)
853 unsigned int cpu
, count
;
855 if (min_common_depth
== -1 || !numa_enabled
)
858 for_each_online_node(node
) {
859 printk(KERN_DEBUG
"Node %d CPUs:", node
);
863 * If we used a CPU iterator here we would miss printing
864 * the holes in the cpumap.
866 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
867 if (cpumask_test_cpu(cpu
,
868 node_to_cpumask_map
[node
])) {
874 printk("-%u", cpu
- 1);
880 printk("-%u", nr_cpu_ids
- 1);
885 static void __init
dump_numa_memory_topology(void)
890 if (min_common_depth
== -1 || !numa_enabled
)
893 for_each_online_node(node
) {
896 printk(KERN_DEBUG
"Node %d Memory:", node
);
900 for (i
= 0; i
< memblock_end_of_DRAM();
901 i
+= (1 << SECTION_SIZE_BITS
)) {
902 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
919 static struct notifier_block ppc64_numa_nb
= {
920 .notifier_call
= cpu_numa_callback
,
921 .priority
= 1 /* Must run before sched domains notifier. */
924 /* Initialize NODE_DATA for a node on the local memory */
925 static void __init
setup_node_data(int nid
, u64 start_pfn
, u64 end_pfn
)
927 u64 spanned_pages
= end_pfn
- start_pfn
;
928 const size_t nd_size
= roundup(sizeof(pg_data_t
), SMP_CACHE_BYTES
);
934 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
935 nid
, start_pfn
<< PAGE_SHIFT
,
936 (end_pfn
<< PAGE_SHIFT
) - 1);
938 pr_info("Initmem setup node %d\n", nid
);
940 nd_pa
= memblock_alloc_try_nid(nd_size
, SMP_CACHE_BYTES
, nid
);
943 /* report and initialize */
944 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
945 nd_pa
, nd_pa
+ nd_size
- 1);
946 tnid
= early_pfn_to_nid(nd_pa
>> PAGE_SHIFT
);
948 pr_info(" NODE_DATA(%d) on node %d\n", nid
, tnid
);
951 memset(NODE_DATA(nid
), 0, sizeof(pg_data_t
));
952 NODE_DATA(nid
)->node_id
= nid
;
953 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
954 NODE_DATA(nid
)->node_spanned_pages
= spanned_pages
;
957 void __init
initmem_init(void)
961 max_low_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
962 max_pfn
= max_low_pfn
;
964 if (parse_numa_properties())
967 dump_numa_memory_topology();
972 * Reduce the possible NUMA nodes to the online NUMA nodes,
973 * since we do not support node hotplug. This ensures that we
974 * lower the maximum NUMA node ID to what is actually present.
976 nodes_and(node_possible_map
, node_possible_map
, node_online_map
);
978 for_each_online_node(nid
) {
979 unsigned long start_pfn
, end_pfn
;
981 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
982 setup_node_data(nid
, start_pfn
, end_pfn
);
983 sparse_memory_present_with_active_regions(nid
);
988 setup_node_to_cpumask_map();
990 reset_numa_cpu_lookup_table();
991 register_cpu_notifier(&ppc64_numa_nb
);
993 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
994 * even before we online them, so that we can use cpu_to_{node,mem}
995 * early in boot, cf. smp_prepare_cpus().
997 for_each_present_cpu(cpu
) {
998 numa_setup_cpu((unsigned long)cpu
);
1002 static int __init
early_numa(char *p
)
1007 if (strstr(p
, "off"))
1010 if (strstr(p
, "debug"))
1013 p
= strstr(p
, "fake=");
1015 cmdline
= p
+ strlen("fake=");
1019 early_param("numa", early_numa
);
1021 static bool topology_updates_enabled
= true;
1023 static int __init
early_topology_updates(char *p
)
1028 if (!strcmp(p
, "off")) {
1029 pr_info("Disabling topology updates\n");
1030 topology_updates_enabled
= false;
1035 early_param("topology_updates", early_topology_updates
);
1037 #ifdef CONFIG_MEMORY_HOTPLUG
1039 * Find the node associated with a hot added memory section for
1040 * memory represented in the device tree by the property
1041 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1043 static int hot_add_drconf_scn_to_nid(struct device_node
*memory
,
1044 unsigned long scn_addr
)
1047 unsigned int drconf_cell_cnt
, rc
;
1048 unsigned long lmb_size
;
1049 struct assoc_arrays aa
;
1052 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1053 if (!drconf_cell_cnt
)
1056 lmb_size
= of_get_lmb_size(memory
);
1060 rc
= of_get_assoc_arrays(memory
, &aa
);
1064 for (; drconf_cell_cnt
!= 0; --drconf_cell_cnt
) {
1065 struct of_drconf_cell drmem
;
1067 read_drconf_cell(&drmem
, &dm
);
1069 /* skip this block if it is reserved or not assigned to
1071 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
1072 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
1075 if ((scn_addr
< drmem
.base_addr
)
1076 || (scn_addr
>= (drmem
.base_addr
+ lmb_size
)))
1079 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
1087 * Find the node associated with a hot added memory section for memory
1088 * represented in the device tree as a node (i.e. memory@XXXX) for
1091 static int hot_add_node_scn_to_nid(unsigned long scn_addr
)
1093 struct device_node
*memory
;
1096 for_each_node_by_type(memory
, "memory") {
1097 unsigned long start
, size
;
1099 const __be32
*memcell_buf
;
1102 memcell_buf
= of_get_property(memory
, "reg", &len
);
1103 if (!memcell_buf
|| len
<= 0)
1106 /* ranges in cell */
1107 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
1110 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
1111 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
1113 if ((scn_addr
< start
) || (scn_addr
>= (start
+ size
)))
1116 nid
= of_node_to_nid_single(memory
);
1124 of_node_put(memory
);
1130 * Find the node associated with a hot added memory section. Section
1131 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1132 * sections are fully contained within a single MEMBLOCK.
1134 int hot_add_scn_to_nid(unsigned long scn_addr
)
1136 struct device_node
*memory
= NULL
;
1139 if (!numa_enabled
|| (min_common_depth
< 0))
1140 return first_online_node
;
1142 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1144 nid
= hot_add_drconf_scn_to_nid(memory
, scn_addr
);
1145 of_node_put(memory
);
1147 nid
= hot_add_node_scn_to_nid(scn_addr
);
1150 if (nid
< 0 || !node_online(nid
))
1151 nid
= first_online_node
;
1153 if (NODE_DATA(nid
)->node_spanned_pages
)
1156 for_each_online_node(nid
) {
1157 if (NODE_DATA(nid
)->node_spanned_pages
) {
1167 static u64
hot_add_drconf_memory_max(void)
1169 struct device_node
*memory
= NULL
;
1170 unsigned int drconf_cell_cnt
= 0;
1172 const __be32
*dm
= NULL
;
1174 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1176 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1177 lmb_size
= of_get_lmb_size(memory
);
1178 of_node_put(memory
);
1180 return lmb_size
* drconf_cell_cnt
;
1184 * memory_hotplug_max - return max address of memory that may be added
1186 * This is currently only used on systems that support drconfig memory
1189 u64
memory_hotplug_max(void)
1191 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1193 #endif /* CONFIG_MEMORY_HOTPLUG */
1195 /* Virtual Processor Home Node (VPHN) support */
1196 #ifdef CONFIG_PPC_SPLPAR
1200 struct topology_update_data
{
1201 struct topology_update_data
*next
;
1207 static u8 vphn_cpu_change_counts
[NR_CPUS
][MAX_DISTANCE_REF_POINTS
];
1208 static cpumask_t cpu_associativity_changes_mask
;
1209 static int vphn_enabled
;
1210 static int prrn_enabled
;
1211 static void reset_topology_timer(void);
1214 * Store the current values of the associativity change counters in the
1217 static void setup_cpu_associativity_change_counters(void)
1221 /* The VPHN feature supports a maximum of 8 reference points */
1222 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS
> 8);
1224 for_each_possible_cpu(cpu
) {
1226 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1227 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1229 for (i
= 0; i
< distance_ref_points_depth
; i
++)
1230 counts
[i
] = hypervisor_counts
[i
];
1235 * The hypervisor maintains a set of 8 associativity change counters in
1236 * the VPA of each cpu that correspond to the associativity levels in the
1237 * ibm,associativity-reference-points property. When an associativity
1238 * level changes, the corresponding counter is incremented.
1240 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1241 * node associativity levels have changed.
1243 * Returns the number of cpus with unhandled associativity changes.
1245 static int update_cpu_associativity_changes_mask(void)
1248 cpumask_t
*changes
= &cpu_associativity_changes_mask
;
1250 for_each_possible_cpu(cpu
) {
1252 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1253 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1255 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
1256 if (hypervisor_counts
[i
] != counts
[i
]) {
1257 counts
[i
] = hypervisor_counts
[i
];
1262 cpumask_or(changes
, changes
, cpu_sibling_mask(cpu
));
1263 cpu
= cpu_last_thread_sibling(cpu
);
1267 return cpumask_weight(changes
);
1271 * Retrieve the new associativity information for a virtual processor's
1274 static long hcall_vphn(unsigned long cpu
, __be32
*associativity
)
1277 long retbuf
[PLPAR_HCALL9_BUFSIZE
] = {0};
1279 int hwcpu
= get_hard_smp_processor_id(cpu
);
1281 rc
= plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY
, retbuf
, flags
, hwcpu
);
1282 vphn_unpack_associativity(retbuf
, associativity
);
1287 static long vphn_get_associativity(unsigned long cpu
,
1288 __be32
*associativity
)
1292 rc
= hcall_vphn(cpu
, associativity
);
1297 "VPHN is not supported. Disabling polling...\n");
1298 stop_topology_update();
1302 "hcall_vphn() experienced a hardware fault "
1303 "preventing VPHN. Disabling polling...\n");
1304 stop_topology_update();
1311 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1312 * characteristics change. This function doesn't perform any locking and is
1313 * only safe to call from stop_machine().
1315 static int update_cpu_topology(void *data
)
1317 struct topology_update_data
*update
;
1323 cpu
= smp_processor_id();
1325 for (update
= data
; update
; update
= update
->next
) {
1326 int new_nid
= update
->new_nid
;
1327 if (cpu
!= update
->cpu
)
1330 unmap_cpu_from_node(cpu
);
1331 map_cpu_to_node(cpu
, new_nid
);
1332 set_cpu_numa_node(cpu
, new_nid
);
1333 set_cpu_numa_mem(cpu
, local_memory_node(new_nid
));
1340 static int update_lookup_table(void *data
)
1342 struct topology_update_data
*update
;
1348 * Upon topology update, the numa-cpu lookup table needs to be updated
1349 * for all threads in the core, including offline CPUs, to ensure that
1350 * future hotplug operations respect the cpu-to-node associativity
1353 for (update
= data
; update
; update
= update
->next
) {
1356 nid
= update
->new_nid
;
1357 base
= cpu_first_thread_sibling(update
->cpu
);
1359 for (j
= 0; j
< threads_per_core
; j
++) {
1360 update_numa_cpu_lookup_table(base
+ j
, nid
);
1368 * Update the node maps and sysfs entries for each cpu whose home node
1369 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1371 int arch_update_cpu_topology(void)
1373 unsigned int cpu
, sibling
, changed
= 0;
1374 struct topology_update_data
*updates
, *ud
;
1375 __be32 associativity
[VPHN_ASSOC_BUFSIZE
] = {0};
1376 cpumask_t updated_cpus
;
1378 int weight
, new_nid
, i
= 0;
1380 if (!prrn_enabled
&& !vphn_enabled
)
1383 weight
= cpumask_weight(&cpu_associativity_changes_mask
);
1387 updates
= kzalloc(weight
* (sizeof(*updates
)), GFP_KERNEL
);
1391 cpumask_clear(&updated_cpus
);
1393 for_each_cpu(cpu
, &cpu_associativity_changes_mask
) {
1395 * If siblings aren't flagged for changes, updates list
1396 * will be too short. Skip on this update and set for next
1399 if (!cpumask_subset(cpu_sibling_mask(cpu
),
1400 &cpu_associativity_changes_mask
)) {
1401 pr_info("Sibling bits not set for associativity "
1402 "change, cpu%d\n", cpu
);
1403 cpumask_or(&cpu_associativity_changes_mask
,
1404 &cpu_associativity_changes_mask
,
1405 cpu_sibling_mask(cpu
));
1406 cpu
= cpu_last_thread_sibling(cpu
);
1410 /* Use associativity from first thread for all siblings */
1411 vphn_get_associativity(cpu
, associativity
);
1412 new_nid
= associativity_to_nid(associativity
);
1413 if (new_nid
< 0 || !node_online(new_nid
))
1414 new_nid
= first_online_node
;
1416 if (new_nid
== numa_cpu_lookup_table
[cpu
]) {
1417 cpumask_andnot(&cpu_associativity_changes_mask
,
1418 &cpu_associativity_changes_mask
,
1419 cpu_sibling_mask(cpu
));
1420 cpu
= cpu_last_thread_sibling(cpu
);
1424 for_each_cpu(sibling
, cpu_sibling_mask(cpu
)) {
1427 ud
->new_nid
= new_nid
;
1428 ud
->old_nid
= numa_cpu_lookup_table
[sibling
];
1429 cpumask_set_cpu(sibling
, &updated_cpus
);
1431 ud
->next
= &updates
[i
];
1433 cpu
= cpu_last_thread_sibling(cpu
);
1436 pr_debug("Topology update for the following CPUs:\n");
1437 if (cpumask_weight(&updated_cpus
)) {
1438 for (ud
= &updates
[0]; ud
; ud
= ud
->next
) {
1439 pr_debug("cpu %d moving from node %d "
1441 ud
->old_nid
, ud
->new_nid
);
1446 * In cases where we have nothing to update (because the updates list
1447 * is too short or because the new topology is same as the old one),
1448 * skip invoking update_cpu_topology() via stop-machine(). This is
1449 * necessary (and not just a fast-path optimization) since stop-machine
1450 * can end up electing a random CPU to run update_cpu_topology(), and
1451 * thus trick us into setting up incorrect cpu-node mappings (since
1452 * 'updates' is kzalloc()'ed).
1454 * And for the similar reason, we will skip all the following updating.
1456 if (!cpumask_weight(&updated_cpus
))
1459 stop_machine(update_cpu_topology
, &updates
[0], &updated_cpus
);
1462 * Update the numa-cpu lookup table with the new mappings, even for
1463 * offline CPUs. It is best to perform this update from the stop-
1466 stop_machine(update_lookup_table
, &updates
[0],
1467 cpumask_of(raw_smp_processor_id()));
1469 for (ud
= &updates
[0]; ud
; ud
= ud
->next
) {
1470 unregister_cpu_under_node(ud
->cpu
, ud
->old_nid
);
1471 register_cpu_under_node(ud
->cpu
, ud
->new_nid
);
1473 dev
= get_cpu_device(ud
->cpu
);
1475 kobject_uevent(&dev
->kobj
, KOBJ_CHANGE
);
1476 cpumask_clear_cpu(ud
->cpu
, &cpu_associativity_changes_mask
);
1485 static void topology_work_fn(struct work_struct
*work
)
1487 rebuild_sched_domains();
1489 static DECLARE_WORK(topology_work
, topology_work_fn
);
1491 static void topology_schedule_update(void)
1493 schedule_work(&topology_work
);
1496 static void topology_timer_fn(unsigned long ignored
)
1498 if (prrn_enabled
&& cpumask_weight(&cpu_associativity_changes_mask
))
1499 topology_schedule_update();
1500 else if (vphn_enabled
) {
1501 if (update_cpu_associativity_changes_mask() > 0)
1502 topology_schedule_update();
1503 reset_topology_timer();
1506 static struct timer_list topology_timer
=
1507 TIMER_INITIALIZER(topology_timer_fn
, 0, 0);
1509 static void reset_topology_timer(void)
1511 topology_timer
.data
= 0;
1512 topology_timer
.expires
= jiffies
+ 60 * HZ
;
1513 mod_timer(&topology_timer
, topology_timer
.expires
);
1518 static void stage_topology_update(int core_id
)
1520 cpumask_or(&cpu_associativity_changes_mask
,
1521 &cpu_associativity_changes_mask
, cpu_sibling_mask(core_id
));
1522 reset_topology_timer();
1525 static int dt_update_callback(struct notifier_block
*nb
,
1526 unsigned long action
, void *data
)
1528 struct of_reconfig_data
*update
= data
;
1529 int rc
= NOTIFY_DONE
;
1532 case OF_RECONFIG_UPDATE_PROPERTY
:
1533 if (!of_prop_cmp(update
->dn
->type
, "cpu") &&
1534 !of_prop_cmp(update
->prop
->name
, "ibm,associativity")) {
1536 of_property_read_u32(update
->dn
, "reg", &core_id
);
1537 stage_topology_update(core_id
);
1546 static struct notifier_block dt_update_nb
= {
1547 .notifier_call
= dt_update_callback
,
1553 * Start polling for associativity changes.
1555 int start_topology_update(void)
1559 if (firmware_has_feature(FW_FEATURE_PRRN
)) {
1560 if (!prrn_enabled
) {
1564 rc
= of_reconfig_notifier_register(&dt_update_nb
);
1567 } else if (firmware_has_feature(FW_FEATURE_VPHN
) &&
1568 lppaca_shared_proc(get_lppaca())) {
1569 if (!vphn_enabled
) {
1572 setup_cpu_associativity_change_counters();
1573 init_timer_deferrable(&topology_timer
);
1574 reset_topology_timer();
1582 * Disable polling for VPHN associativity changes.
1584 int stop_topology_update(void)
1591 rc
= of_reconfig_notifier_unregister(&dt_update_nb
);
1593 } else if (vphn_enabled
) {
1595 rc
= del_timer_sync(&topology_timer
);
1601 int prrn_is_enabled(void)
1603 return prrn_enabled
;
1606 static int topology_read(struct seq_file
*file
, void *v
)
1608 if (vphn_enabled
|| prrn_enabled
)
1609 seq_puts(file
, "on\n");
1611 seq_puts(file
, "off\n");
1616 static int topology_open(struct inode
*inode
, struct file
*file
)
1618 return single_open(file
, topology_read
, NULL
);
1621 static ssize_t
topology_write(struct file
*file
, const char __user
*buf
,
1622 size_t count
, loff_t
*off
)
1624 char kbuf
[4]; /* "on" or "off" plus null. */
1627 read_len
= count
< 3 ? count
: 3;
1628 if (copy_from_user(kbuf
, buf
, read_len
))
1631 kbuf
[read_len
] = '\0';
1633 if (!strncmp(kbuf
, "on", 2))
1634 start_topology_update();
1635 else if (!strncmp(kbuf
, "off", 3))
1636 stop_topology_update();
1643 static const struct file_operations topology_ops
= {
1645 .write
= topology_write
,
1646 .open
= topology_open
,
1647 .release
= single_release
1650 static int topology_update_init(void)
1652 /* Do not poll for changes if disabled at boot */
1653 if (topology_updates_enabled
)
1654 start_topology_update();
1656 if (!proc_create("powerpc/topology_updates", 0644, NULL
, &topology_ops
))
1661 device_initcall(topology_update_init
);
1662 #endif /* CONFIG_PPC_SPLPAR */