Commit | Line | Data |
---|---|---|
4fe29a85 GOC |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/init.h> | |
4 | #include <linux/bootmem.h> | |
5 | #include <linux/percpu.h> | |
1ecd2765 | 6 | #include <linux/kexec.h> |
17b4cceb | 7 | #include <linux/crash_dump.h> |
8a87dd9a JSR |
8 | #include <linux/smp.h> |
9 | #include <linux/topology.h> | |
4fe29a85 GOC |
10 | #include <asm/sections.h> |
11 | #include <asm/processor.h> | |
12 | #include <asm/setup.h> | |
0fc0906e | 13 | #include <asm/mpspec.h> |
76eb4131 | 14 | #include <asm/apicdef.h> |
1ecd2765 | 15 | #include <asm/highmem.h> |
1a51e3a0 | 16 | #include <asm/proto.h> |
06879033 | 17 | #include <asm/cpumask.h> |
76eb4131 | 18 | |
c90aa894 MT |
19 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
20 | # define DBG(x...) printk(KERN_DEBUG x) | |
21 | #else | |
22 | # define DBG(x...) | |
23 | #endif | |
24 | ||
ea927906 BG |
25 | /* |
26 | * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but | |
27 | * voyager wants cpu_number too. | |
28 | */ | |
29 | #ifdef CONFIG_SMP | |
30 | DEFINE_PER_CPU(int, cpu_number); | |
31 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
32 | #endif | |
33 | ||
f8955ebe | 34 | #ifdef CONFIG_X86_LOCAL_APIC |
2fe60147 AS |
35 | unsigned int num_processors; |
36 | unsigned disabled_cpus __cpuinitdata; | |
37 | /* Processor that is doing the boot up */ | |
38 | unsigned int boot_cpu_physical_apicid = -1U; | |
39 | EXPORT_SYMBOL(boot_cpu_physical_apicid); | |
8a87dd9a | 40 | unsigned int max_physical_apicid; |
2fe60147 | 41 | |
0fc0906e AS |
42 | /* Bitmask of physically existing CPUs */ |
43 | physid_mask_t phys_cpu_present_map; | |
f8955ebe | 44 | #endif |
0fc0906e | 45 | |
c90aa894 MT |
46 | /* |
47 | * Map cpu index to physical APIC ID | |
48 | */ | |
23ca4bba MT |
49 | DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); |
50 | DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); | |
51 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | |
52 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | |
53 | ||
54 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
c90aa894 | 55 | #define X86_64_NUMA 1 /* (used later) */ |
e7a22c1e BG |
56 | DEFINE_PER_CPU(int, node_number) = 0; |
57 | EXPORT_PER_CPU_SYMBOL(node_number); | |
23ca4bba | 58 | |
c90aa894 MT |
59 | /* |
60 | * Map cpu index to node index | |
61 | */ | |
23ca4bba MT |
62 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
63 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | |
9f248bde | 64 | |
c90aa894 MT |
65 | /* |
66 | * Which logical CPUs are on which nodes | |
67 | */ | |
9f248bde MT |
68 | cpumask_t *node_to_cpumask_map; |
69 | EXPORT_SYMBOL(node_to_cpumask_map); | |
70 | ||
c90aa894 MT |
71 | /* |
72 | * Setup node_to_cpumask_map | |
73 | */ | |
9f248bde MT |
74 | static void __init setup_node_to_cpumask_map(void); |
75 | ||
76 | #else | |
77 | static inline void setup_node_to_cpumask_map(void) { } | |
23ca4bba MT |
78 | #endif |
79 | ||
1a51e3a0 TH |
80 | #ifdef CONFIG_X86_64 |
81 | ||
82 | /* correctly size the local cpu masks */ | |
83 | static void setup_cpu_local_masks(void) | |
84 | { | |
85 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | |
86 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | |
87 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | |
88 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
89 | } | |
90 | ||
91 | #else /* CONFIG_X86_32 */ | |
92 | ||
93 | static inline void setup_cpu_local_masks(void) | |
94 | { | |
95 | } | |
96 | ||
97 | #endif /* CONFIG_X86_32 */ | |
98 | ||
c90aa894 | 99 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
4fe29a85 | 100 | |
9939ddaf TH |
101 | #ifdef CONFIG_X86_64 |
102 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { | |
103 | [0] = (unsigned long)__per_cpu_load, | |
104 | }; | |
105 | #else | |
4fe29a85 | 106 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
1a51e3a0 | 107 | #endif |
9939ddaf | 108 | EXPORT_SYMBOL(__per_cpu_offset); |
4fe29a85 GOC |
109 | |
110 | /* | |
111 | * Great future plan: | |
112 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | |
113 | * Always point %gs to its beginning | |
114 | */ | |
115 | void __init setup_per_cpu_areas(void) | |
116 | { | |
d6c88a50 | 117 | ssize_t size, old_size; |
3461b0af MT |
118 | char *ptr; |
119 | int cpu; | |
1f8ff037 | 120 | unsigned long align = 1; |
4fe29a85 | 121 | |
4fe29a85 | 122 | /* Copy section for each CPU (we discard the original) */ |
1f3fcd4b | 123 | old_size = PERCPU_ENOUGH_ROOM; |
1f8ff037 | 124 | align = max_t(unsigned long, PAGE_SIZE, align); |
d6c88a50 | 125 | size = roundup(old_size, align); |
a1681965 | 126 | |
ab14398a | 127 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
a1681965 MT |
128 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
129 | ||
ab14398a | 130 | pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); |
b447a468 | 131 | |
3461b0af | 132 | for_each_possible_cpu(cpu) { |
4fe29a85 | 133 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1f8ff037 YL |
134 | ptr = __alloc_bootmem(size, align, |
135 | __pa(MAX_DMA_ADDRESS)); | |
4fe29a85 | 136 | #else |
3461b0af | 137 | int node = early_cpu_to_node(cpu); |
b447a468 | 138 | if (!node_online(node) || !NODE_DATA(node)) { |
1f8ff037 YL |
139 | ptr = __alloc_bootmem(size, align, |
140 | __pa(MAX_DMA_ADDRESS)); | |
ab14398a | 141 | pr_info("cpu %d has no node %d or node-local memory\n", |
3461b0af | 142 | cpu, node); |
ab14398a CG |
143 | pr_debug("per cpu data for cpu%d at %016lx\n", |
144 | cpu, __pa(ptr)); | |
145 | } else { | |
1f8ff037 YL |
146 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, |
147 | __pa(MAX_DMA_ADDRESS)); | |
ab14398a CG |
148 | pr_debug("per cpu data for cpu%d on node%d at %016lx\n", |
149 | cpu, node, __pa(ptr)); | |
a677f58a | 150 | } |
4fe29a85 | 151 | #endif |
1a51e3a0 | 152 | |
3e5d8f97 | 153 | memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); |
9939ddaf | 154 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
26f80bd6 | 155 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
ea927906 | 156 | per_cpu(cpu_number, cpu) = cpu; |
0d77e7f0 BG |
157 | /* |
158 | * Copy data used in early init routines from the initial arrays to the | |
159 | * per cpu data areas. These arrays then become expendable and the | |
160 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | |
161 | */ | |
162 | per_cpu(x86_cpu_to_apicid, cpu) = | |
163 | early_per_cpu_map(x86_cpu_to_apicid, cpu); | |
164 | per_cpu(x86_bios_cpu_apicid, cpu) = | |
165 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); | |
166 | #ifdef X86_64_NUMA | |
167 | per_cpu(x86_cpu_to_node_map, cpu) = | |
168 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | |
169 | #endif | |
1a51e3a0 | 170 | #ifdef CONFIG_X86_64 |
26f80bd6 | 171 | per_cpu(irq_stack_ptr, cpu) = |
947e76cd | 172 | per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; |
1a51e3a0 | 173 | /* |
947e76cd BG |
174 | * Up to this point, CPU0 has been using .data.init |
175 | * area. Reload %gs offset for CPU0. | |
1a51e3a0 TH |
176 | */ |
177 | if (cpu == 0) | |
947e76cd | 178 | load_gs_base(cpu); |
1a51e3a0 | 179 | #endif |
c90aa894 MT |
180 | |
181 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); | |
4fe29a85 GOC |
182 | } |
183 | ||
0d77e7f0 BG |
184 | /* indicate the early static arrays will soon be gone */ |
185 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; | |
186 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | |
187 | #ifdef X86_64_NUMA | |
188 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | |
189 | #endif | |
9f0e8d04 | 190 | |
9f248bde MT |
191 | /* Setup node to cpumask map */ |
192 | setup_node_to_cpumask_map(); | |
c2d1cec1 MT |
193 | |
194 | /* Setup cpu initialized, callin, callout masks */ | |
195 | setup_cpu_local_masks(); | |
4fe29a85 GOC |
196 | } |
197 | ||
198 | #endif | |
c45a707d | 199 | |
23ca4bba | 200 | #ifdef X86_64_NUMA |
9f248bde MT |
201 | |
202 | /* | |
203 | * Allocate node_to_cpumask_map based on number of available nodes | |
204 | * Requires node_possible_map to be valid. | |
205 | * | |
206 | * Note: node_to_cpumask() is not valid until after this is done. | |
c90aa894 | 207 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) |
9f248bde MT |
208 | */ |
209 | static void __init setup_node_to_cpumask_map(void) | |
210 | { | |
211 | unsigned int node, num = 0; | |
212 | cpumask_t *map; | |
213 | ||
214 | /* setup nr_node_ids if not done yet */ | |
215 | if (nr_node_ids == MAX_NUMNODES) { | |
216 | for_each_node_mask(node, node_possible_map) | |
217 | num = node; | |
218 | nr_node_ids = num + 1; | |
219 | } | |
220 | ||
221 | /* allocate the map */ | |
222 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); | |
c90aa894 | 223 | DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); |
9f248bde | 224 | |
55410791 | 225 | pr_debug("Node to cpumask map at %p for %d nodes\n", |
cfc1b9a6 | 226 | map, nr_node_ids); |
9f248bde MT |
227 | |
228 | /* node_to_cpumask() will now work */ | |
229 | node_to_cpumask_map = map; | |
230 | } | |
231 | ||
23ca4bba MT |
232 | void __cpuinit numa_set_node(int cpu, int node) |
233 | { | |
234 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
235 | ||
c90aa894 MT |
236 | /* early setting, no percpu area yet */ |
237 | if (cpu_to_node_map) { | |
23ca4bba | 238 | cpu_to_node_map[cpu] = node; |
c90aa894 MT |
239 | return; |
240 | } | |
23ca4bba | 241 | |
c90aa894 MT |
242 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
243 | if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { | |
244 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
245 | dump_stack(); | |
246 | return; | |
247 | } | |
248 | #endif | |
249 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
23ca4bba | 250 | |
c90aa894 | 251 | if (node != NUMA_NO_NODE) |
e7a22c1e | 252 | per_cpu(node_number, cpu) = node; |
23ca4bba MT |
253 | } |
254 | ||
255 | void __cpuinit numa_clear_node(int cpu) | |
256 | { | |
257 | numa_set_node(cpu, NUMA_NO_NODE); | |
258 | } | |
259 | ||
9f248bde MT |
260 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
261 | ||
23ca4bba MT |
262 | void __cpuinit numa_add_cpu(int cpu) |
263 | { | |
264 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
265 | } | |
266 | ||
267 | void __cpuinit numa_remove_cpu(int cpu) | |
268 | { | |
c90aa894 | 269 | cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
23ca4bba | 270 | } |
23ca4bba | 271 | |
9f248bde MT |
272 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
273 | ||
274 | /* | |
275 | * --------- debug versions of the numa functions --------- | |
276 | */ | |
277 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | |
278 | { | |
c90aa894 | 279 | int node = early_cpu_to_node(cpu); |
9f248bde MT |
280 | cpumask_t *mask; |
281 | char buf[64]; | |
282 | ||
283 | if (node_to_cpumask_map == NULL) { | |
284 | printk(KERN_ERR "node_to_cpumask_map NULL\n"); | |
285 | dump_stack(); | |
286 | return; | |
287 | } | |
288 | ||
289 | mask = &node_to_cpumask_map[node]; | |
290 | if (enable) | |
291 | cpu_set(cpu, *mask); | |
292 | else | |
293 | cpu_clear(cpu, *mask); | |
294 | ||
29c0177e | 295 | cpulist_scnprintf(buf, sizeof(buf), mask); |
9f248bde | 296 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
8a87dd9a JSR |
297 | enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); |
298 | } | |
9f248bde MT |
299 | |
300 | void __cpuinit numa_add_cpu(int cpu) | |
301 | { | |
302 | numa_set_cpumask(cpu, 1); | |
303 | } | |
304 | ||
305 | void __cpuinit numa_remove_cpu(int cpu) | |
306 | { | |
307 | numa_set_cpumask(cpu, 0); | |
308 | } | |
23ca4bba MT |
309 | |
310 | int cpu_to_node(int cpu) | |
311 | { | |
312 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
313 | printk(KERN_WARNING | |
314 | "cpu_to_node(%d): usage too early!\n", cpu); | |
315 | dump_stack(); | |
316 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
317 | } | |
318 | return per_cpu(x86_cpu_to_node_map, cpu); | |
319 | } | |
320 | EXPORT_SYMBOL(cpu_to_node); | |
321 | ||
9f248bde MT |
322 | /* |
323 | * Same function as cpu_to_node() but used if called before the | |
324 | * per_cpu areas are setup. | |
325 | */ | |
23ca4bba MT |
326 | int early_cpu_to_node(int cpu) |
327 | { | |
328 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
329 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
330 | ||
331 | if (!per_cpu_offset(cpu)) { | |
332 | printk(KERN_WARNING | |
333 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
9f248bde | 334 | dump_stack(); |
23ca4bba MT |
335 | return NUMA_NO_NODE; |
336 | } | |
337 | return per_cpu(x86_cpu_to_node_map, cpu); | |
338 | } | |
9f248bde | 339 | |
6a2f47ca MT |
340 | |
341 | /* empty cpumask */ | |
342 | static const cpumask_t cpu_mask_none; | |
343 | ||
9f248bde MT |
344 | /* |
345 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
346 | */ | |
393d68fb | 347 | const cpumask_t *cpumask_of_node(int node) |
9f248bde MT |
348 | { |
349 | if (node_to_cpumask_map == NULL) { | |
350 | printk(KERN_WARNING | |
393d68fb | 351 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", |
9f248bde MT |
352 | node); |
353 | dump_stack(); | |
11369f35 | 354 | return (const cpumask_t *)&cpu_online_map; |
9f248bde | 355 | } |
6a2f47ca MT |
356 | if (node >= nr_node_ids) { |
357 | printk(KERN_WARNING | |
393d68fb | 358 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", |
6a2f47ca MT |
359 | node, nr_node_ids); |
360 | dump_stack(); | |
11369f35 | 361 | return &cpu_mask_none; |
6a2f47ca | 362 | } |
11369f35 | 363 | return &node_to_cpumask_map[node]; |
9f248bde | 364 | } |
393d68fb | 365 | EXPORT_SYMBOL(cpumask_of_node); |
9f248bde MT |
366 | |
367 | /* | |
368 | * Returns a bitmask of CPUs on Node 'node'. | |
6a2f47ca MT |
369 | * |
370 | * Side note: this function creates the returned cpumask on the stack | |
371 | * so with a high NR_CPUS count, excessive stack space is used. The | |
372 | * node_to_cpumask_ptr function should be used whenever possible. | |
9f248bde MT |
373 | */ |
374 | cpumask_t node_to_cpumask(int node) | |
375 | { | |
376 | if (node_to_cpumask_map == NULL) { | |
377 | printk(KERN_WARNING | |
378 | "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); | |
379 | dump_stack(); | |
380 | return cpu_online_map; | |
381 | } | |
6a2f47ca MT |
382 | if (node >= nr_node_ids) { |
383 | printk(KERN_WARNING | |
384 | "node_to_cpumask(%d): node > nr_node_ids(%d)\n", | |
385 | node, nr_node_ids); | |
386 | dump_stack(); | |
387 | return cpu_mask_none; | |
388 | } | |
9f248bde MT |
389 | return node_to_cpumask_map[node]; |
390 | } | |
391 | EXPORT_SYMBOL(node_to_cpumask); | |
392 | ||
393 | /* | |
394 | * --------- end of debug versions of the numa functions --------- | |
395 | */ | |
396 | ||
397 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | |
398 | ||
399 | #endif /* X86_64_NUMA */ | |
1ecd2765 | 400 |