Merge branch 'core/percpu' into stackprotector
[deliverable/linux.git] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
18
19 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
20 # define DBG(x...) printk(KERN_DEBUG x)
21 #else
22 # define DBG(x...)
23 #endif
24
25 /*
26 * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27 * voyager wants cpu_number too.
28 */
29 #ifdef CONFIG_SMP
30 DEFINE_PER_CPU(int, cpu_number);
31 EXPORT_PER_CPU_SYMBOL(cpu_number);
32 #endif
33
34 #ifdef CONFIG_X86_LOCAL_APIC
35 unsigned int num_processors;
36 unsigned disabled_cpus __cpuinitdata;
37 /* Processor that is doing the boot up */
38 unsigned int boot_cpu_physical_apicid = -1U;
39 EXPORT_SYMBOL(boot_cpu_physical_apicid);
40 unsigned int max_physical_apicid;
41
42 /* Bitmask of physically existing CPUs */
43 physid_mask_t phys_cpu_present_map;
44 #endif
45
46 /*
47 * Map cpu index to physical APIC ID
48 */
49 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
50 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
51 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
52 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
53
54 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
55 #define X86_64_NUMA 1 /* (used later) */
56 DEFINE_PER_CPU(int, node_number) = 0;
57 EXPORT_PER_CPU_SYMBOL(node_number);
58
59 /*
60 * Map cpu index to node index
61 */
62 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
63 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
64
65 /*
66 * Which logical CPUs are on which nodes
67 */
68 cpumask_t *node_to_cpumask_map;
69 EXPORT_SYMBOL(node_to_cpumask_map);
70
71 /*
72 * Setup node_to_cpumask_map
73 */
74 static void __init setup_node_to_cpumask_map(void);
75
76 #else
77 static inline void setup_node_to_cpumask_map(void) { }
78 #endif
79
80 /*
81 * Define load_pda_offset() and per-cpu __pda for x86_64.
82 * load_pda_offset() is responsible for loading the offset of pda into
83 * %gs.
84 *
85 * On SMP, pda offset also duals as percpu base address and thus it
86 * should be at the start of per-cpu area. To achieve this, it's
87 * preallocated in vmlinux_64.lds.S directly instead of using
88 * DEFINE_PER_CPU().
89 */
90 #ifdef CONFIG_X86_64
91 void __cpuinit load_pda_offset(int cpu)
92 {
93 /* Memory clobbers used to order pda/percpu accesses */
94 mb();
95 wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
96 mb();
97 }
98 #ifndef CONFIG_SMP
99 DEFINE_PER_CPU(struct x8664_pda, __pda);
100 #endif
101 EXPORT_PER_CPU_SYMBOL(__pda);
102 #endif /* CONFIG_SMP && CONFIG_X86_64 */
103
104 #ifdef CONFIG_X86_64
105
106 /* correctly size the local cpu masks */
107 static void setup_cpu_local_masks(void)
108 {
109 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
110 alloc_bootmem_cpumask_var(&cpu_callin_mask);
111 alloc_bootmem_cpumask_var(&cpu_callout_mask);
112 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
113 }
114
115 #else /* CONFIG_X86_32 */
116
117 static inline void setup_cpu_local_masks(void)
118 {
119 }
120
121 #endif /* CONFIG_X86_32 */
122
123 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
124 /*
125 * Copy data used in early init routines from the initial arrays to the
126 * per cpu data areas. These arrays then become expendable and the
127 * *_early_ptr's are zeroed indicating that the static arrays are gone.
128 */
129 static void __init setup_per_cpu_maps(void)
130 {
131 int cpu;
132
133 for_each_possible_cpu(cpu) {
134 per_cpu(x86_cpu_to_apicid, cpu) =
135 early_per_cpu_map(x86_cpu_to_apicid, cpu);
136 per_cpu(x86_bios_cpu_apicid, cpu) =
137 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
138 #ifdef X86_64_NUMA
139 per_cpu(x86_cpu_to_node_map, cpu) =
140 early_per_cpu_map(x86_cpu_to_node_map, cpu);
141 #endif
142 }
143
144 /* indicate the early static arrays will soon be gone */
145 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
146 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
147 #ifdef X86_64_NUMA
148 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
149 #endif
150 }
151
152 #ifdef CONFIG_X86_64
153 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
154 [0] = (unsigned long)__per_cpu_load,
155 };
156 #else
157 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158 #endif
159 EXPORT_SYMBOL(__per_cpu_offset);
160
161 /*
162 * Great future plan:
163 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
164 * Always point %gs to its beginning
165 */
166 void __init setup_per_cpu_areas(void)
167 {
168 ssize_t size, old_size;
169 char *ptr;
170 int cpu;
171 unsigned long align = 1;
172
173 /* Copy section for each CPU (we discard the original) */
174 old_size = PERCPU_ENOUGH_ROOM;
175 align = max_t(unsigned long, PAGE_SIZE, align);
176 size = roundup(old_size, align);
177
178 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
179 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
180
181 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
182
183 for_each_possible_cpu(cpu) {
184 #ifndef CONFIG_NEED_MULTIPLE_NODES
185 ptr = __alloc_bootmem(size, align,
186 __pa(MAX_DMA_ADDRESS));
187 #else
188 int node = early_cpu_to_node(cpu);
189 if (!node_online(node) || !NODE_DATA(node)) {
190 ptr = __alloc_bootmem(size, align,
191 __pa(MAX_DMA_ADDRESS));
192 pr_info("cpu %d has no node %d or node-local memory\n",
193 cpu, node);
194 pr_debug("per cpu data for cpu%d at %016lx\n",
195 cpu, __pa(ptr));
196 } else {
197 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
198 __pa(MAX_DMA_ADDRESS));
199 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
200 cpu, node, __pa(ptr));
201 }
202 #endif
203
204 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
205 per_cpu_offset(cpu) = ptr - __per_cpu_start;
206 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
207 per_cpu(cpu_number, cpu) = cpu;
208 #ifdef CONFIG_X86_64
209 per_cpu(irq_stack_ptr, cpu) =
210 (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64;
211 /*
212 * CPU0 modified pda in the init data area, reload pda
213 * offset for CPU0 and clear the area for others.
214 */
215 if (cpu == 0)
216 load_pda_offset(0);
217 else
218 memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
219 #endif
220
221 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
222 }
223
224 /* Setup percpu data maps */
225 setup_per_cpu_maps();
226
227 /* Setup node to cpumask map */
228 setup_node_to_cpumask_map();
229
230 /* Setup cpu initialized, callin, callout masks */
231 setup_cpu_local_masks();
232 }
233
234 #endif
235
236 #ifdef X86_64_NUMA
237
238 /*
239 * Allocate node_to_cpumask_map based on number of available nodes
240 * Requires node_possible_map to be valid.
241 *
242 * Note: node_to_cpumask() is not valid until after this is done.
243 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
244 */
245 static void __init setup_node_to_cpumask_map(void)
246 {
247 unsigned int node, num = 0;
248 cpumask_t *map;
249
250 /* setup nr_node_ids if not done yet */
251 if (nr_node_ids == MAX_NUMNODES) {
252 for_each_node_mask(node, node_possible_map)
253 num = node;
254 nr_node_ids = num + 1;
255 }
256
257 /* allocate the map */
258 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
259 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
260
261 pr_debug("Node to cpumask map at %p for %d nodes\n",
262 map, nr_node_ids);
263
264 /* node_to_cpumask() will now work */
265 node_to_cpumask_map = map;
266 }
267
268 void __cpuinit numa_set_node(int cpu, int node)
269 {
270 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
271
272 /* early setting, no percpu area yet */
273 if (cpu_to_node_map) {
274 cpu_to_node_map[cpu] = node;
275 return;
276 }
277
278 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
279 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
280 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
281 dump_stack();
282 return;
283 }
284 #endif
285 per_cpu(x86_cpu_to_node_map, cpu) = node;
286
287 if (node != NUMA_NO_NODE)
288 per_cpu(node_number, cpu) = node;
289 }
290
291 void __cpuinit numa_clear_node(int cpu)
292 {
293 numa_set_node(cpu, NUMA_NO_NODE);
294 }
295
296 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
297
298 void __cpuinit numa_add_cpu(int cpu)
299 {
300 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
301 }
302
303 void __cpuinit numa_remove_cpu(int cpu)
304 {
305 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
306 }
307
308 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
309
310 /*
311 * --------- debug versions of the numa functions ---------
312 */
313 static void __cpuinit numa_set_cpumask(int cpu, int enable)
314 {
315 int node = early_cpu_to_node(cpu);
316 cpumask_t *mask;
317 char buf[64];
318
319 if (node_to_cpumask_map == NULL) {
320 printk(KERN_ERR "node_to_cpumask_map NULL\n");
321 dump_stack();
322 return;
323 }
324
325 mask = &node_to_cpumask_map[node];
326 if (enable)
327 cpu_set(cpu, *mask);
328 else
329 cpu_clear(cpu, *mask);
330
331 cpulist_scnprintf(buf, sizeof(buf), mask);
332 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
333 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
334 }
335
336 void __cpuinit numa_add_cpu(int cpu)
337 {
338 numa_set_cpumask(cpu, 1);
339 }
340
341 void __cpuinit numa_remove_cpu(int cpu)
342 {
343 numa_set_cpumask(cpu, 0);
344 }
345
346 int cpu_to_node(int cpu)
347 {
348 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
349 printk(KERN_WARNING
350 "cpu_to_node(%d): usage too early!\n", cpu);
351 dump_stack();
352 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
353 }
354 return per_cpu(x86_cpu_to_node_map, cpu);
355 }
356 EXPORT_SYMBOL(cpu_to_node);
357
358 /*
359 * Same function as cpu_to_node() but used if called before the
360 * per_cpu areas are setup.
361 */
362 int early_cpu_to_node(int cpu)
363 {
364 if (early_per_cpu_ptr(x86_cpu_to_node_map))
365 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
366
367 if (!per_cpu_offset(cpu)) {
368 printk(KERN_WARNING
369 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
370 dump_stack();
371 return NUMA_NO_NODE;
372 }
373 return per_cpu(x86_cpu_to_node_map, cpu);
374 }
375
376
377 /* empty cpumask */
378 static const cpumask_t cpu_mask_none;
379
380 /*
381 * Returns a pointer to the bitmask of CPUs on Node 'node'.
382 */
383 const cpumask_t *cpumask_of_node(int node)
384 {
385 if (node_to_cpumask_map == NULL) {
386 printk(KERN_WARNING
387 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
388 node);
389 dump_stack();
390 return (const cpumask_t *)&cpu_online_map;
391 }
392 if (node >= nr_node_ids) {
393 printk(KERN_WARNING
394 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
395 node, nr_node_ids);
396 dump_stack();
397 return &cpu_mask_none;
398 }
399 return &node_to_cpumask_map[node];
400 }
401 EXPORT_SYMBOL(cpumask_of_node);
402
403 /*
404 * Returns a bitmask of CPUs on Node 'node'.
405 *
406 * Side note: this function creates the returned cpumask on the stack
407 * so with a high NR_CPUS count, excessive stack space is used. The
408 * node_to_cpumask_ptr function should be used whenever possible.
409 */
410 cpumask_t node_to_cpumask(int node)
411 {
412 if (node_to_cpumask_map == NULL) {
413 printk(KERN_WARNING
414 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
415 dump_stack();
416 return cpu_online_map;
417 }
418 if (node >= nr_node_ids) {
419 printk(KERN_WARNING
420 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
421 node, nr_node_ids);
422 dump_stack();
423 return cpu_mask_none;
424 }
425 return node_to_cpumask_map[node];
426 }
427 EXPORT_SYMBOL(node_to_cpumask);
428
429 /*
430 * --------- end of debug versions of the numa functions ---------
431 */
432
433 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
434
435 #endif /* X86_64_NUMA */
436
This page took 0.04052 seconds and 6 git commands to generate.