Commit | Line | Data |
---|---|---|
71ee73e7 RR |
1 | /* Common code for 32 and 64-bit NUMA */ |
2 | #include <linux/topology.h> | |
3 | #include <linux/module.h> | |
4 | #include <linux/bootmem.h> | |
90321602 JB |
5 | #include <asm/numa.h> |
6 | #include <asm/acpi.h> | |
7 | ||
8 | int __initdata numa_off; | |
9 | ||
10 | static __init int numa_setup(char *opt) | |
11 | { | |
12 | if (!opt) | |
13 | return -EINVAL; | |
14 | if (!strncmp(opt, "off", 3)) | |
15 | numa_off = 1; | |
16 | #ifdef CONFIG_NUMA_EMU | |
17 | if (!strncmp(opt, "fake=", 5)) | |
18 | numa_emu_cmdline(opt + 5); | |
19 | #endif | |
20 | #ifdef CONFIG_ACPI_NUMA | |
21 | if (!strncmp(opt, "noacpi", 6)) | |
22 | acpi_numa = -1; | |
23 | #endif | |
24 | return 0; | |
25 | } | |
26 | early_param("numa", numa_setup); | |
71ee73e7 | 27 | |
71ee73e7 | 28 | /* |
bbc9e2f4 | 29 | * apicid, cpu, node mappings |
71ee73e7 | 30 | */ |
bbc9e2f4 TH |
31 | s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { |
32 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | |
33 | }; | |
34 | ||
6bd26273 TH |
35 | int __cpuinit numa_cpu_node(int cpu) |
36 | { | |
37 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | |
38 | ||
39 | if (apicid != BAD_APICID) | |
40 | return __apicid_to_node[apicid]; | |
41 | return NUMA_NO_NODE; | |
42 | } | |
43 | ||
c032ef60 | 44 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
71ee73e7 RR |
45 | EXPORT_SYMBOL(node_to_cpumask_map); |
46 | ||
645a7919 TH |
47 | /* |
48 | * Map cpu index to node index | |
49 | */ | |
645a7919 | 50 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
645a7919 TH |
51 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
52 | ||
53 | void __cpuinit numa_set_node(int cpu, int node) | |
54 | { | |
55 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
56 | ||
57 | /* early setting, no percpu area yet */ | |
58 | if (cpu_to_node_map) { | |
59 | cpu_to_node_map[cpu] = node; | |
60 | return; | |
61 | } | |
62 | ||
63 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | |
64 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { | |
65 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
66 | dump_stack(); | |
67 | return; | |
68 | } | |
69 | #endif | |
70 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
71 | ||
72 | if (node != NUMA_NO_NODE) | |
73 | set_cpu_numa_node(cpu, node); | |
74 | } | |
75 | ||
76 | void __cpuinit numa_clear_node(int cpu) | |
77 | { | |
78 | numa_set_node(cpu, NUMA_NO_NODE); | |
79 | } | |
80 | ||
71ee73e7 RR |
81 | /* |
82 | * Allocate node_to_cpumask_map based on number of available nodes | |
83 | * Requires node_possible_map to be valid. | |
84 | * | |
85 | * Note: node_to_cpumask() is not valid until after this is done. | |
86 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | |
87 | */ | |
88 | void __init setup_node_to_cpumask_map(void) | |
89 | { | |
90 | unsigned int node, num = 0; | |
71ee73e7 RR |
91 | |
92 | /* setup nr_node_ids if not done yet */ | |
93 | if (nr_node_ids == MAX_NUMNODES) { | |
94 | for_each_node_mask(node, node_possible_map) | |
95 | num = node; | |
96 | nr_node_ids = num + 1; | |
97 | } | |
98 | ||
99 | /* allocate the map */ | |
c032ef60 RR |
100 | for (node = 0; node < nr_node_ids; node++) |
101 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
71ee73e7 | 102 | |
c032ef60 RR |
103 | /* cpumask_of_node() will now work */ |
104 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | |
71ee73e7 RR |
105 | } |
106 | ||
8db78cc4 TH |
107 | /* |
108 | * There are unfortunately some poorly designed mainboards around that | |
109 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node | |
110 | * mapping. To avoid this fill in the mapping for all possible CPUs, | |
111 | * as the number of CPUs is not known yet. We round robin the existing | |
112 | * nodes. | |
113 | */ | |
114 | void __init numa_init_array(void) | |
115 | { | |
116 | int rr, i; | |
117 | ||
118 | rr = first_node(node_online_map); | |
119 | for (i = 0; i < nr_cpu_ids; i++) { | |
120 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | |
121 | continue; | |
122 | numa_set_node(i, rr); | |
123 | rr = next_node(rr, node_online_map); | |
124 | if (rr == MAX_NUMNODES) | |
125 | rr = first_node(node_online_map); | |
126 | } | |
127 | } | |
128 | ||
129 | static __init int find_near_online_node(int node) | |
130 | { | |
131 | int n, val; | |
132 | int min_val = INT_MAX; | |
133 | int best_node = -1; | |
134 | ||
135 | for_each_online_node(n) { | |
136 | val = node_distance(node, n); | |
137 | ||
138 | if (val < min_val) { | |
139 | min_val = val; | |
140 | best_node = n; | |
141 | } | |
142 | } | |
143 | ||
144 | return best_node; | |
145 | } | |
146 | ||
147 | /* | |
148 | * Setup early cpu_to_node. | |
149 | * | |
150 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], | |
151 | * and apicid_to_node[] tables have valid entries for a CPU. | |
152 | * This means we skip cpu_to_node[] initialisation for NUMA | |
153 | * emulation and faking node case (when running a kernel compiled | |
154 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] | |
155 | * is already initialized in a round robin manner at numa_init_array, | |
156 | * prior to this call, and this initialization is good enough | |
157 | * for the fake NUMA cases. | |
158 | * | |
159 | * Called before the per_cpu areas are setup. | |
160 | */ | |
161 | void __init init_cpu_to_node(void) | |
162 | { | |
163 | int cpu; | |
164 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); | |
165 | ||
166 | BUG_ON(cpu_to_apicid == NULL); | |
167 | ||
168 | for_each_possible_cpu(cpu) { | |
169 | int node = numa_cpu_node(cpu); | |
170 | ||
171 | if (node == NUMA_NO_NODE) | |
172 | continue; | |
173 | if (!node_online(node)) | |
174 | node = find_near_online_node(node); | |
175 | numa_set_node(cpu, node); | |
176 | } | |
177 | } | |
178 | ||
de2d9445 TH |
179 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
180 | ||
181 | # ifndef CONFIG_NUMA_EMU | |
182 | void __cpuinit numa_add_cpu(int cpu) | |
183 | { | |
184 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
185 | } | |
186 | ||
187 | void __cpuinit numa_remove_cpu(int cpu) | |
188 | { | |
189 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
190 | } | |
191 | # endif /* !CONFIG_NUMA_EMU */ | |
192 | ||
193 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | |
645a7919 TH |
194 | |
195 | int __cpu_to_node(int cpu) | |
196 | { | |
197 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
198 | printk(KERN_WARNING | |
199 | "cpu_to_node(%d): usage too early!\n", cpu); | |
200 | dump_stack(); | |
201 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
202 | } | |
203 | return per_cpu(x86_cpu_to_node_map, cpu); | |
204 | } | |
205 | EXPORT_SYMBOL(__cpu_to_node); | |
206 | ||
207 | /* | |
208 | * Same function as cpu_to_node() but used if called before the | |
209 | * per_cpu areas are setup. | |
210 | */ | |
211 | int early_cpu_to_node(int cpu) | |
212 | { | |
213 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
214 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
215 | ||
216 | if (!cpu_possible(cpu)) { | |
217 | printk(KERN_WARNING | |
218 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
219 | dump_stack(); | |
220 | return NUMA_NO_NODE; | |
221 | } | |
222 | return per_cpu(x86_cpu_to_node_map, cpu); | |
223 | } | |
224 | ||
7a6c6547 | 225 | void debug_cpumask_set_cpu(int cpu, int node, bool enable) |
de2d9445 | 226 | { |
de2d9445 TH |
227 | struct cpumask *mask; |
228 | char buf[64]; | |
229 | ||
14392fd3 DR |
230 | if (node == NUMA_NO_NODE) { |
231 | /* early_cpu_to_node() already emits a warning and trace */ | |
7a6c6547 | 232 | return; |
14392fd3 | 233 | } |
de2d9445 TH |
234 | mask = node_to_cpumask_map[node]; |
235 | if (!mask) { | |
236 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | |
237 | dump_stack(); | |
7a6c6547 | 238 | return; |
de2d9445 TH |
239 | } |
240 | ||
7a6c6547 DR |
241 | if (enable) |
242 | cpumask_set_cpu(cpu, mask); | |
243 | else | |
244 | cpumask_clear_cpu(cpu, mask); | |
245 | ||
de2d9445 TH |
246 | cpulist_scnprintf(buf, sizeof(buf), mask); |
247 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | |
248 | enable ? "numa_add_cpu" : "numa_remove_cpu", | |
249 | cpu, node, buf); | |
7a6c6547 | 250 | return; |
de2d9445 TH |
251 | } |
252 | ||
253 | # ifndef CONFIG_NUMA_EMU | |
7a6c6547 | 254 | static void __cpuinit numa_set_cpumask(int cpu, bool enable) |
de2d9445 | 255 | { |
7a6c6547 | 256 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); |
de2d9445 TH |
257 | } |
258 | ||
259 | void __cpuinit numa_add_cpu(int cpu) | |
260 | { | |
7a6c6547 | 261 | numa_set_cpumask(cpu, true); |
de2d9445 TH |
262 | } |
263 | ||
264 | void __cpuinit numa_remove_cpu(int cpu) | |
265 | { | |
7a6c6547 | 266 | numa_set_cpumask(cpu, false); |
de2d9445 TH |
267 | } |
268 | # endif /* !CONFIG_NUMA_EMU */ | |
269 | ||
71ee73e7 RR |
270 | /* |
271 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
272 | */ | |
73e907de | 273 | const struct cpumask *cpumask_of_node(int node) |
71ee73e7 | 274 | { |
71ee73e7 RR |
275 | if (node >= nr_node_ids) { |
276 | printk(KERN_WARNING | |
277 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | |
278 | node, nr_node_ids); | |
279 | dump_stack(); | |
280 | return cpu_none_mask; | |
281 | } | |
c032ef60 RR |
282 | if (node_to_cpumask_map[node] == NULL) { |
283 | printk(KERN_WARNING | |
284 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | |
285 | node); | |
286 | dump_stack(); | |
287 | return cpu_online_mask; | |
288 | } | |
0b966252 | 289 | return node_to_cpumask_map[node]; |
71ee73e7 RR |
290 | } |
291 | EXPORT_SYMBOL(cpumask_of_node); | |
645a7919 | 292 | |
de2d9445 | 293 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |