Commit | Line | Data |
---|---|---|
71ee73e7 RR |
1 | /* Common code for 32 and 64-bit NUMA */ |
2 | #include <linux/topology.h> | |
3 | #include <linux/module.h> | |
4 | #include <linux/bootmem.h> | |
90321602 JB |
5 | #include <asm/numa.h> |
6 | #include <asm/acpi.h> | |
7 | ||
8 | int __initdata numa_off; | |
e6df595b | 9 | nodemask_t numa_nodes_parsed __initdata; |
90321602 JB |
10 | |
11 | static __init int numa_setup(char *opt) | |
12 | { | |
13 | if (!opt) | |
14 | return -EINVAL; | |
15 | if (!strncmp(opt, "off", 3)) | |
16 | numa_off = 1; | |
17 | #ifdef CONFIG_NUMA_EMU | |
18 | if (!strncmp(opt, "fake=", 5)) | |
19 | numa_emu_cmdline(opt + 5); | |
20 | #endif | |
21 | #ifdef CONFIG_ACPI_NUMA | |
22 | if (!strncmp(opt, "noacpi", 6)) | |
23 | acpi_numa = -1; | |
24 | #endif | |
25 | return 0; | |
26 | } | |
27 | early_param("numa", numa_setup); | |
71ee73e7 | 28 | |
71ee73e7 | 29 | /* |
bbc9e2f4 | 30 | * apicid, cpu, node mappings |
71ee73e7 | 31 | */ |
bbc9e2f4 TH |
32 | s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { |
33 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | |
34 | }; | |
35 | ||
6bd26273 TH |
36 | int __cpuinit numa_cpu_node(int cpu) |
37 | { | |
38 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | |
39 | ||
40 | if (apicid != BAD_APICID) | |
41 | return __apicid_to_node[apicid]; | |
42 | return NUMA_NO_NODE; | |
43 | } | |
44 | ||
c032ef60 | 45 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
71ee73e7 RR |
46 | EXPORT_SYMBOL(node_to_cpumask_map); |
47 | ||
645a7919 TH |
48 | /* |
49 | * Map cpu index to node index | |
50 | */ | |
645a7919 | 51 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
645a7919 TH |
52 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
53 | ||
54 | void __cpuinit numa_set_node(int cpu, int node) | |
55 | { | |
56 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
57 | ||
58 | /* early setting, no percpu area yet */ | |
59 | if (cpu_to_node_map) { | |
60 | cpu_to_node_map[cpu] = node; | |
61 | return; | |
62 | } | |
63 | ||
64 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | |
65 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | |
66 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
67 | dump_stack(); | |
68 | return; | |
69 | } | |
70 | #endif | |
71 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
72 | ||
73 | if (node != NUMA_NO_NODE) | |
74 | set_cpu_numa_node(cpu, node); | |
75 | } | |
76 | ||
77 | void __cpuinit numa_clear_node(int cpu) | |
78 | { | |
79 | numa_set_node(cpu, NUMA_NO_NODE); | |
80 | } | |
81 | ||
71ee73e7 RR |
82 | /* |
83 | * Allocate node_to_cpumask_map based on number of available nodes | |
84 | * Requires node_possible_map to be valid. | |
85 | * | |
86 | * Note: node_to_cpumask() is not valid until after this is done. | |
87 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | |
88 | */ | |
89 | void __init setup_node_to_cpumask_map(void) | |
90 | { | |
91 | unsigned int node, num = 0; | |
71ee73e7 RR |
92 | |
93 | /* setup nr_node_ids if not done yet */ | |
94 | if (nr_node_ids == MAX_NUMNODES) { | |
95 | for_each_node_mask(node, node_possible_map) | |
96 | num = node; | |
97 | nr_node_ids = num + 1; | |
98 | } | |
99 | ||
100 | /* allocate the map */ | |
c032ef60 RR |
101 | for (node = 0; node < nr_node_ids; node++) |
102 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
71ee73e7 | 103 | |
c032ef60 RR |
104 | /* cpumask_of_node() will now work */ |
105 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | |
71ee73e7 RR |
106 | } |
107 | ||
8db78cc4 TH |
108 | /* |
109 | * There are unfortunately some poorly designed mainboards around that | |
110 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | |
111 | * mapping. To avoid this fill in the mapping for all possible CPUs, | |
112 | * as the number of CPUs is not known yet. We round robin the existing | |
113 | * nodes. | |
114 | */ | |
115 | void __init numa_init_array(void) | |
116 | { | |
117 | int rr, i; | |
118 | ||
119 | rr = first_node(node_online_map); | |
120 | for (i = 0; i < nr_cpu_ids; i++) { | |
121 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | |
122 | continue; | |
123 | numa_set_node(i, rr); | |
124 | rr = next_node(rr, node_online_map); | |
125 | if (rr == MAX_NUMNODES) | |
126 | rr = first_node(node_online_map); | |
127 | } | |
128 | } | |
129 | ||
130 | static __init int find_near_online_node(int node) | |
131 | { | |
132 | int n, val; | |
133 | int min_val = INT_MAX; | |
134 | int best_node = -1; | |
135 | ||
136 | for_each_online_node(n) { | |
137 | val = node_distance(node, n); | |
138 | ||
139 | if (val < min_val) { | |
140 | min_val = val; | |
141 | best_node = n; | |
142 | } | |
143 | } | |
144 | ||
145 | return best_node; | |
146 | } | |
147 | ||
148 | /* | |
149 | * Setup early cpu_to_node. | |
150 | * | |
151 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | |
152 | * and apicid_to_node[] tables have valid entries for a CPU. | |
153 | * This means we skip cpu_to_node[] initialisation for NUMA | |
154 | * emulation and faking node case (when running a kernel compiled | |
155 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | |
156 | * is already initialized in a round robin manner at numa_init_array, | |
157 | * prior to this call, and this initialization is good enough | |
158 | * for the fake NUMA cases. | |
159 | * | |
160 | * Called before the per_cpu areas are setup. | |
161 | */ | |
162 | void __init init_cpu_to_node(void) | |
163 | { | |
164 | int cpu; | |
165 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | |
166 | ||
167 | BUG_ON(cpu_to_apicid == NULL); | |
168 | ||
169 | for_each_possible_cpu(cpu) { | |
170 | int node = numa_cpu_node(cpu); | |
171 | ||
172 | if (node == NUMA_NO_NODE) | |
173 | continue; | |
174 | if (!node_online(node)) | |
175 | node = find_near_online_node(node); | |
176 | numa_set_node(cpu, node); | |
177 | } | |
178 | } | |
179 | ||
de2d9445 TH |
180 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
181 | ||
182 | # ifndef CONFIG_NUMA_EMU | |
183 | void __cpuinit numa_add_cpu(int cpu) | |
184 | { | |
185 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
186 | } | |
187 | ||
188 | void __cpuinit numa_remove_cpu(int cpu) | |
189 | { | |
190 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
191 | } | |
192 | # endif /* !CONFIG_NUMA_EMU */ | |
193 | ||
194 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | |
645a7919 TH |
195 | |
196 | int __cpu_to_node(int cpu) | |
197 | { | |
198 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
199 | printk(KERN_WARNING | |
200 | "cpu_to_node(%d): usage too early!\n", cpu); | |
201 | dump_stack(); | |
202 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
203 | } | |
204 | return per_cpu(x86_cpu_to_node_map, cpu); | |
205 | } | |
206 | EXPORT_SYMBOL(__cpu_to_node); | |
207 | ||
208 | /* | |
209 | * Same function as cpu_to_node() but used if called before the | |
210 | * per_cpu areas are setup. | |
211 | */ | |
212 | int early_cpu_to_node(int cpu) | |
213 | { | |
214 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
215 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
216 | ||
217 | if (!cpu_possible(cpu)) { | |
218 | printk(KERN_WARNING | |
219 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
220 | dump_stack(); | |
221 | return NUMA_NO_NODE; | |
222 | } | |
223 | return per_cpu(x86_cpu_to_node_map, cpu); | |
224 | } | |
225 | ||
7a6c6547 | 226 | void debug_cpumask_set_cpu(int cpu, int node, bool enable) |
de2d9445 | 227 | { |
de2d9445 TH |
228 | struct cpumask *mask; |
229 | char buf[64]; | |
230 | ||
14392fd3 DR |
231 | if (node == NUMA_NO_NODE) { |
232 | /* early_cpu_to_node() already emits a warning and trace */ | |
7a6c6547 | 233 | return; |
14392fd3 | 234 | } |
de2d9445 TH |
235 | mask = node_to_cpumask_map[node]; |
236 | if (!mask) { | |
237 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | |
238 | dump_stack(); | |
7a6c6547 | 239 | return; |
de2d9445 TH |
240 | } |
241 | ||
7a6c6547 DR |
242 | if (enable) |
243 | cpumask_set_cpu(cpu, mask); | |
244 | else | |
245 | cpumask_clear_cpu(cpu, mask); | |
246 | ||
de2d9445 TH |
247 | cpulist_scnprintf(buf, sizeof(buf), mask); |
248 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | |
249 | enable ? "numa_add_cpu" : "numa_remove_cpu", | |
250 | cpu, node, buf); | |
7a6c6547 | 251 | return; |
de2d9445 TH |
252 | } |
253 | ||
254 | # ifndef CONFIG_NUMA_EMU | |
7a6c6547 | 255 | static void __cpuinit numa_set_cpumask(int cpu, bool enable) |
de2d9445 | 256 | { |
7a6c6547 | 257 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); |
de2d9445 TH |
258 | } |
259 | ||
260 | void __cpuinit numa_add_cpu(int cpu) | |
261 | { | |
7a6c6547 | 262 | numa_set_cpumask(cpu, true); |
de2d9445 TH |
263 | } |
264 | ||
265 | void __cpuinit numa_remove_cpu(int cpu) | |
266 | { | |
7a6c6547 | 267 | numa_set_cpumask(cpu, false); |
de2d9445 TH |
268 | } |
269 | # endif /* !CONFIG_NUMA_EMU */ | |
270 | ||
71ee73e7 RR |
271 | /* |
272 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
273 | */ | |
73e907de | 274 | const struct cpumask *cpumask_of_node(int node) |
71ee73e7 | 275 | { |
71ee73e7 RR |
276 | if (node >= nr_node_ids) { |
277 | printk(KERN_WARNING | |
278 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | |
279 | node, nr_node_ids); | |
280 | dump_stack(); | |
281 | return cpu_none_mask; | |
282 | } | |
c032ef60 RR |
283 | if (node_to_cpumask_map[node] == NULL) { |
284 | printk(KERN_WARNING | |
285 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | |
286 | node); | |
287 | dump_stack(); | |
288 | return cpu_online_mask; | |
289 | } | |
0b966252 | 290 | return node_to_cpumask_map[node]; |
71ee73e7 RR |
291 | } |
292 | EXPORT_SYMBOL(cpumask_of_node); | |
645a7919 | 293 | |
de2d9445 | 294 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |