percpu: allow non-linear / sparse cpu -> unit mapping
[deliverable/linux.git] / arch / x86 / kernel / setup_percpu.c
CommitLineData
4fe29a85
GOC
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
1ecd2765 6#include <linux/kexec.h>
17b4cceb 7#include <linux/crash_dump.h>
8a87dd9a
JSR
8#include <linux/smp.h>
9#include <linux/topology.h>
5f5d8405 10#include <linux/pfn.h>
4fe29a85
GOC
11#include <asm/sections.h>
12#include <asm/processor.h>
13#include <asm/setup.h>
0fc0906e 14#include <asm/mpspec.h>
76eb4131 15#include <asm/apicdef.h>
1ecd2765 16#include <asm/highmem.h>
1a51e3a0 17#include <asm/proto.h>
06879033 18#include <asm/cpumask.h>
34019be1 19#include <asm/cpu.h>
60a5317f 20#include <asm/stackprotector.h>
76eb4131 21
c90aa894
MT
22#ifdef CONFIG_DEBUG_PER_CPU_MAPS
23# define DBG(x...) printk(KERN_DEBUG x)
24#else
25# define DBG(x...)
26#endif
27
ea927906
BG
28DEFINE_PER_CPU(int, cpu_number);
29EXPORT_PER_CPU_SYMBOL(cpu_number);
ea927906 30
1688401a
BG
31#ifdef CONFIG_X86_64
32#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33#else
34#define BOOT_PERCPU_OFFSET 0
35#endif
36
37DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39
9939ddaf 40unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
34019be1 41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
9939ddaf 42};
9939ddaf 43EXPORT_SYMBOL(__per_cpu_offset);
4fe29a85 44
6b19b0c2
TH
45/*
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
51 */
52#ifdef CONFIG_X86_64
53#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
54#else
55#define PERCPU_FIRST_CHUNK_RESERVE 0
56#endif
57
89c92151
TH
58/**
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60 *
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
64 *
65 * RETURNS:
66 * true if NUMA should be considered; otherwise, false.
67 */
68static bool __init pcpu_need_numa(void)
69{
70#ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
72 unsigned int cpu;
73
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
76
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
79 return true;
80
81 last = NODE_DATA(node);
82 }
83#endif
84 return false;
85}
86
5f5d8405
TH
87/**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
91 * @align: alignment
92 *
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
95 * configuration.
96 *
97 * RETURNS:
98 * Pointer to the allocated area on success, NULL on failure.
99 */
100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
102{
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104#ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
106 void *ptr;
107
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
111 cpu, node);
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
114 } else {
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 size, align, goal);
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu, size, node, __pa(ptr));
119 }
120 return ptr;
121#else
122 return __alloc_bootmem_nopanic(size, align, goal);
123#endif
124}
125
d4b95f80
TH
126/*
127 * Helpers for first chunk memory allocation
128 */
129static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size)
130{
131 return pcpu_alloc_bootmem(cpu, size, size);
132}
133
134static void __init pcpu_fc_free(void *ptr, size_t size)
135{
136 free_bootmem(__pa(ptr), size);
137}
138
8ac83757 139/*
8c4bfc6e 140 * Large page remapping allocator
8ac83757
TH
141 */
142#ifdef CONFIG_NEED_MULTIPLE_NODES
8c4bfc6e 143static void __init pcpul_map(void *ptr, size_t size, void *addr)
8ac83757 144{
8c4bfc6e 145 pmd_t *pmd, pmd_v;
8ac83757 146
8c4bfc6e
TH
147 pmd = populate_extra_pmd((unsigned long)addr);
148 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
149 set_pmd(pmd, pmd_v);
8ac83757
TH
150}
151
fa8a7094 152static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
8ac83757 153{
8c4bfc6e 154 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
8ac83757 155
0017c869
TH
156 if (!chosen) {
157 size_t vm_size = VMALLOC_END - VMALLOC_START;
158 size_t tot_size = num_possible_cpus() * PMD_SIZE;
159
160 /* on non-NUMA, embedding is better */
161 if (!pcpu_need_numa())
162 return -EINVAL;
163
164 /* don't consume more than 20% of vmalloc area */
165 if (tot_size > vm_size / 5) {
166 pr_info("PERCPU: too large chunk size %zuMB for "
167 "large page remap\n", tot_size >> 20);
168 return -EINVAL;
169 }
170 }
8ac83757 171
fa8a7094
TH
172 /* need PSE */
173 if (!cpu_has_pse) {
174 pr_warning("PERCPU: lpage allocator requires PSE\n");
175 return -EINVAL;
176 }
177
8c4bfc6e
TH
178 return pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
179 reserve - PERCPU_FIRST_CHUNK_RESERVE,
180 PMD_SIZE,
181 pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
8ac83757
TH
182}
183#else
fa8a7094 184static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
8ac83757
TH
185{
186 return -EINVAL;
187}
188#endif
189
89c92151
TH
190/*
191 * Embedding allocator
192 *
193 * The first chunk is sized to just contain the static area plus
66c3a757
TH
194 * module and dynamic reserves and embedded into linear physical
195 * mapping so that it can use PMD mapping without additional TLB
196 * pressure.
89c92151 197 */
fa8a7094 198static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
89c92151 199{
66c3a757 200 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
89c92151
TH
201
202 /*
203 * If large page isn't supported, there's no benefit in doing
204 * this. Also, embedding allocation doesn't play well with
205 * NUMA.
206 */
fa8a7094 207 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
89c92151
TH
208 return -EINVAL;
209
66c3a757 210 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
788e5abc 211 reserve - PERCPU_FIRST_CHUNK_RESERVE);
89c92151
TH
212}
213
5f5d8405 214/*
d4b95f80 215 * 4k allocator
5f5d8405 216 *
d4b95f80
TH
217 * Boring fallback 4k allocator. This allocator puts more pressure on
218 * PTE TLBs but other than that behaves nicely on both UMA and NUMA.
5f5d8405 219 */
458a3e64
TH
220static void __init pcpu4k_populate_pte(unsigned long addr)
221{
222 populate_extra_pte(addr);
223}
224
5f5d8405
TH
225static ssize_t __init setup_pcpu_4k(size_t static_size)
226{
d4b95f80
TH
227 return pcpu_4k_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
228 pcpu_fc_alloc, pcpu_fc_free,
229 pcpu4k_populate_pte);
5f5d8405
TH
230}
231
fa8a7094
TH
232/* for explicit first chunk allocator selection */
233static char pcpu_chosen_alloc[16] __initdata;
234
235static int __init percpu_alloc_setup(char *str)
236{
237 strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
238 return 0;
239}
240early_param("percpu_alloc", percpu_alloc_setup);
241
b2d2f431
BG
242static inline void setup_percpu_segment(int cpu)
243{
244#ifdef CONFIG_X86_32
245 struct desc_struct gdt;
246
247 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
248 0x2 | DESCTYPE_S, 0x8);
249 gdt.s = 1;
250 write_gdt_entry(get_cpu_gdt_table(cpu),
251 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
252#endif
253}
254
4fe29a85
GOC
255void __init setup_per_cpu_areas(void)
256{
5f5d8405
TH
257 size_t static_size = __per_cpu_end - __per_cpu_start;
258 unsigned int cpu;
11124411
TH
259 unsigned long delta;
260 size_t pcpu_unit_size;
5f5d8405 261 ssize_t ret;
a1681965 262
ab14398a 263 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
a1681965 264 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
11124411 265
8ac83757
TH
266 /*
267 * Allocate percpu area. If PSE is supported, try to make use
268 * of large page mappings. Please read comments on top of
269 * each allocator for details.
270 */
fa8a7094
TH
271 ret = -EINVAL;
272 if (strlen(pcpu_chosen_alloc)) {
273 if (strcmp(pcpu_chosen_alloc, "4k")) {
274 if (!strcmp(pcpu_chosen_alloc, "lpage"))
275 ret = setup_pcpu_lpage(static_size, true);
276 else if (!strcmp(pcpu_chosen_alloc, "embed"))
277 ret = setup_pcpu_embed(static_size, true);
278 else
279 pr_warning("PERCPU: unknown allocator %s "
280 "specified\n", pcpu_chosen_alloc);
281 if (ret < 0)
282 pr_warning("PERCPU: %s allocator failed (%zd), "
283 "falling back to 4k\n",
284 pcpu_chosen_alloc, ret);
285 }
286 } else {
287 ret = setup_pcpu_lpage(static_size, false);
288 if (ret < 0)
289 ret = setup_pcpu_embed(static_size, false);
290 }
89c92151
TH
291 if (ret < 0)
292 ret = setup_pcpu_4k(static_size);
5f5d8405
TH
293 if (ret < 0)
294 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
295 static_size, ret);
1a51e3a0 296
5f5d8405 297 pcpu_unit_size = ret;
11124411 298
5f5d8405 299 /* alrighty, percpu areas up and running */
11124411
TH
300 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
301 for_each_possible_cpu(cpu) {
302 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
26f80bd6 303 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
ea927906 304 per_cpu(cpu_number, cpu) = cpu;
b2d2f431 305 setup_percpu_segment(cpu);
60a5317f 306 setup_stack_canary_segment(cpu);
0d77e7f0 307 /*
cf3997f5
TH
308 * Copy data used in early init routines from the
309 * initial arrays to the per cpu data areas. These
310 * arrays then become expendable and the *_early_ptr's
311 * are zeroed indicating that the static arrays are
312 * gone.
0d77e7f0 313 */
ec70de8b 314#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0 315 per_cpu(x86_cpu_to_apicid, cpu) =
cf3997f5 316 early_per_cpu_map(x86_cpu_to_apicid, cpu);
0d77e7f0 317 per_cpu(x86_bios_cpu_apicid, cpu) =
cf3997f5 318 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
ec70de8b 319#endif
1a51e3a0 320#ifdef CONFIG_X86_64
26f80bd6 321 per_cpu(irq_stack_ptr, cpu) =
cf3997f5
TH
322 per_cpu(irq_stack_union.irq_stack, cpu) +
323 IRQ_STACK_SIZE - 64;
6470aff6
BG
324#ifdef CONFIG_NUMA
325 per_cpu(x86_cpu_to_node_map, cpu) =
cf3997f5 326 early_per_cpu_map(x86_cpu_to_node_map, cpu);
2697fbd5 327#endif
6470aff6 328#endif
1a51e3a0 329 /*
34019be1 330 * Up to this point, the boot CPU has been using .data.init
2697fbd5 331 * area. Reload any changed state for the boot CPU.
1a51e3a0 332 */
34019be1 333 if (cpu == boot_cpu_id)
552be871 334 switch_to_new_gdt(cpu);
4fe29a85
GOC
335 }
336
0d77e7f0 337 /* indicate the early static arrays will soon be gone */
22f25138 338#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0
BG
339 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
340 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
22f25138 341#endif
6470aff6 342#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
0d77e7f0
BG
343 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
344#endif
9f0e8d04 345
35d5a9a6
YL
346#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
347 /*
348 * make sure boot cpu node_number is right, when boot cpu is on the
349 * node that doesn't have mem installed
350 */
351 per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
352#endif
353
9f248bde
MT
354 /* Setup node to cpumask map */
355 setup_node_to_cpumask_map();
c2d1cec1
MT
356
357 /* Setup cpu initialized, callin, callout masks */
358 setup_cpu_local_masks();
4fe29a85 359}
This page took 0.119744 seconds and 5 git commands to generate.