Merge commit 'v2.6.38-rc4' into x86/numa
[deliverable/linux.git] / arch / x86 / mm / numa.c
CommitLineData
71ee73e7
RR
1/* Common code for 32 and 64-bit NUMA */
2#include <linux/topology.h>
3#include <linux/module.h>
4#include <linux/bootmem.h>
90321602
JB
5#include <asm/numa.h>
6#include <asm/acpi.h>
7
8int __initdata numa_off;
9
10static __init int numa_setup(char *opt)
11{
12 if (!opt)
13 return -EINVAL;
14 if (!strncmp(opt, "off", 3))
15 numa_off = 1;
16#ifdef CONFIG_NUMA_EMU
17 if (!strncmp(opt, "fake=", 5))
18 numa_emu_cmdline(opt + 5);
19#endif
20#ifdef CONFIG_ACPI_NUMA
21 if (!strncmp(opt, "noacpi", 6))
22 acpi_numa = -1;
23#endif
24 return 0;
25}
26early_param("numa", numa_setup);
71ee73e7 27
71ee73e7 28/*
bbc9e2f4 29 * apicid, cpu, node mappings
71ee73e7 30 */
bbc9e2f4
TH
31s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
32 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
33};
34
c032ef60 35cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
71ee73e7
RR
36EXPORT_SYMBOL(node_to_cpumask_map);
37
645a7919
TH
38/*
39 * Map cpu index to node index
40 */
645a7919 41DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
645a7919
TH
42EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
43
44void __cpuinit numa_set_node(int cpu, int node)
45{
46 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
47
48 /* early setting, no percpu area yet */
49 if (cpu_to_node_map) {
50 cpu_to_node_map[cpu] = node;
51 return;
52 }
53
54#ifdef CONFIG_DEBUG_PER_CPU_MAPS
55 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
56 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
57 dump_stack();
58 return;
59 }
60#endif
61 per_cpu(x86_cpu_to_node_map, cpu) = node;
62
63 if (node != NUMA_NO_NODE)
64 set_cpu_numa_node(cpu, node);
65}
66
67void __cpuinit numa_clear_node(int cpu)
68{
69 numa_set_node(cpu, NUMA_NO_NODE);
70}
71
71ee73e7
RR
72/*
73 * Allocate node_to_cpumask_map based on number of available nodes
74 * Requires node_possible_map to be valid.
75 *
76 * Note: node_to_cpumask() is not valid until after this is done.
77 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
78 */
79void __init setup_node_to_cpumask_map(void)
80{
81 unsigned int node, num = 0;
71ee73e7
RR
82
83 /* setup nr_node_ids if not done yet */
84 if (nr_node_ids == MAX_NUMNODES) {
85 for_each_node_mask(node, node_possible_map)
86 num = node;
87 nr_node_ids = num + 1;
88 }
89
90 /* allocate the map */
c032ef60
RR
91 for (node = 0; node < nr_node_ids; node++)
92 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
71ee73e7 93
c032ef60
RR
94 /* cpumask_of_node() will now work */
95 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
71ee73e7
RR
96}
97
8db78cc4
TH
98/*
99 * There are unfortunately some poorly designed mainboards around that
100 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
101 * mapping. To avoid this fill in the mapping for all possible CPUs,
102 * as the number of CPUs is not known yet. We round robin the existing
103 * nodes.
104 */
105void __init numa_init_array(void)
106{
107 int rr, i;
108
109 rr = first_node(node_online_map);
110 for (i = 0; i < nr_cpu_ids; i++) {
111 if (early_cpu_to_node(i) != NUMA_NO_NODE)
112 continue;
113 numa_set_node(i, rr);
114 rr = next_node(rr, node_online_map);
115 if (rr == MAX_NUMNODES)
116 rr = first_node(node_online_map);
117 }
118}
119
120static __init int find_near_online_node(int node)
121{
122 int n, val;
123 int min_val = INT_MAX;
124 int best_node = -1;
125
126 for_each_online_node(n) {
127 val = node_distance(node, n);
128
129 if (val < min_val) {
130 min_val = val;
131 best_node = n;
132 }
133 }
134
135 return best_node;
136}
137
138/*
139 * Setup early cpu_to_node.
140 *
141 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
142 * and apicid_to_node[] tables have valid entries for a CPU.
143 * This means we skip cpu_to_node[] initialisation for NUMA
144 * emulation and faking node case (when running a kernel compiled
145 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
146 * is already initialized in a round robin manner at numa_init_array,
147 * prior to this call, and this initialization is good enough
148 * for the fake NUMA cases.
149 *
150 * Called before the per_cpu areas are setup.
151 */
152void __init init_cpu_to_node(void)
153{
154 int cpu;
155 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
156
157 BUG_ON(cpu_to_apicid == NULL);
158
159 for_each_possible_cpu(cpu) {
160 int node = numa_cpu_node(cpu);
161
162 if (node == NUMA_NO_NODE)
163 continue;
164 if (!node_online(node))
165 node = find_near_online_node(node);
166 numa_set_node(cpu, node);
167 }
168}
169
de2d9445
TH
170#ifndef CONFIG_DEBUG_PER_CPU_MAPS
171
172# ifndef CONFIG_NUMA_EMU
173void __cpuinit numa_add_cpu(int cpu)
174{
175 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
176}
177
178void __cpuinit numa_remove_cpu(int cpu)
179{
180 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
181}
182# endif /* !CONFIG_NUMA_EMU */
183
184#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
645a7919
TH
185
186int __cpu_to_node(int cpu)
187{
188 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
189 printk(KERN_WARNING
190 "cpu_to_node(%d): usage too early!\n", cpu);
191 dump_stack();
192 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
193 }
194 return per_cpu(x86_cpu_to_node_map, cpu);
195}
196EXPORT_SYMBOL(__cpu_to_node);
197
198/*
199 * Same function as cpu_to_node() but used if called before the
200 * per_cpu areas are setup.
201 */
202int early_cpu_to_node(int cpu)
203{
204 if (early_per_cpu_ptr(x86_cpu_to_node_map))
205 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
206
207 if (!cpu_possible(cpu)) {
208 printk(KERN_WARNING
209 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
210 dump_stack();
211 return NUMA_NO_NODE;
212 }
213 return per_cpu(x86_cpu_to_node_map, cpu);
214}
215
de2d9445
TH
216struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
217{
218 int node = early_cpu_to_node(cpu);
219 struct cpumask *mask;
220 char buf[64];
221
222 mask = node_to_cpumask_map[node];
223 if (!mask) {
224 pr_err("node_to_cpumask_map[%i] NULL\n", node);
225 dump_stack();
226 return NULL;
227 }
228
229 cpulist_scnprintf(buf, sizeof(buf), mask);
230 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
231 enable ? "numa_add_cpu" : "numa_remove_cpu",
232 cpu, node, buf);
233 return mask;
234}
235
236# ifndef CONFIG_NUMA_EMU
237static void __cpuinit numa_set_cpumask(int cpu, int enable)
238{
239 struct cpumask *mask;
240
241 mask = debug_cpumask_set_cpu(cpu, enable);
242 if (!mask)
243 return;
244
245 if (enable)
246 cpumask_set_cpu(cpu, mask);
247 else
248 cpumask_clear_cpu(cpu, mask);
249}
250
251void __cpuinit numa_add_cpu(int cpu)
252{
253 numa_set_cpumask(cpu, 1);
254}
255
256void __cpuinit numa_remove_cpu(int cpu)
257{
258 numa_set_cpumask(cpu, 0);
259}
260# endif /* !CONFIG_NUMA_EMU */
261
71ee73e7
RR
262/*
263 * Returns a pointer to the bitmask of CPUs on Node 'node'.
264 */
73e907de 265const struct cpumask *cpumask_of_node(int node)
71ee73e7 266{
71ee73e7
RR
267 if (node >= nr_node_ids) {
268 printk(KERN_WARNING
269 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
270 node, nr_node_ids);
271 dump_stack();
272 return cpu_none_mask;
273 }
c032ef60
RR
274 if (node_to_cpumask_map[node] == NULL) {
275 printk(KERN_WARNING
276 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
277 node);
278 dump_stack();
279 return cpu_online_mask;
280 }
0b966252 281 return node_to_cpumask_map[node];
71ee73e7
RR
282}
283EXPORT_SYMBOL(cpumask_of_node);
645a7919 284
de2d9445 285#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
This page took 0.151465 seconds and 5 git commands to generate.