percpu: introduce pcpu_alloc_info and pcpu_group_info
[deliverable/linux.git] / arch / x86 / kernel / setup_percpu.c
CommitLineData
4fe29a85
GOC
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/init.h>
4#include <linux/bootmem.h>
5#include <linux/percpu.h>
1ecd2765 6#include <linux/kexec.h>
17b4cceb 7#include <linux/crash_dump.h>
8a87dd9a
JSR
8#include <linux/smp.h>
9#include <linux/topology.h>
5f5d8405 10#include <linux/pfn.h>
4fe29a85
GOC
11#include <asm/sections.h>
12#include <asm/processor.h>
13#include <asm/setup.h>
0fc0906e 14#include <asm/mpspec.h>
76eb4131 15#include <asm/apicdef.h>
1ecd2765 16#include <asm/highmem.h>
1a51e3a0 17#include <asm/proto.h>
06879033 18#include <asm/cpumask.h>
34019be1 19#include <asm/cpu.h>
60a5317f 20#include <asm/stackprotector.h>
76eb4131 21
c90aa894
MT
22#ifdef CONFIG_DEBUG_PER_CPU_MAPS
23# define DBG(x...) printk(KERN_DEBUG x)
24#else
25# define DBG(x...)
26#endif
27
ea927906
BG
28DEFINE_PER_CPU(int, cpu_number);
29EXPORT_PER_CPU_SYMBOL(cpu_number);
ea927906 30
1688401a
BG
31#ifdef CONFIG_X86_64
32#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33#else
34#define BOOT_PERCPU_OFFSET 0
35#endif
36
37DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39
9939ddaf 40unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
34019be1 41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
9939ddaf 42};
9939ddaf 43EXPORT_SYMBOL(__per_cpu_offset);
4fe29a85 44
6b19b0c2
TH
45/*
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
51 */
52#ifdef CONFIG_X86_64
53#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
54#else
55#define PERCPU_FIRST_CHUNK_RESERVE 0
56#endif
57
89c92151
TH
58/**
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60 *
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
64 *
65 * RETURNS:
66 * true if NUMA should be considered; otherwise, false.
67 */
68static bool __init pcpu_need_numa(void)
69{
70#ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
72 unsigned int cpu;
73
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
76
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
79 return true;
80
81 last = NODE_DATA(node);
82 }
83#endif
84 return false;
85}
86
5f5d8405
TH
87/**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
91 * @align: alignment
92 *
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
95 * configuration.
96 *
97 * RETURNS:
98 * Pointer to the allocated area on success, NULL on failure.
99 */
100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
102{
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104#ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
106 void *ptr;
107
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
111 cpu, node);
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
114 } else {
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 size, align, goal);
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu, size, node, __pa(ptr));
119 }
120 return ptr;
121#else
122 return __alloc_bootmem_nopanic(size, align, goal);
123#endif
124}
125
d4b95f80
TH
126/*
127 * Helpers for first chunk memory allocation
128 */
3cbc8565 129static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
d4b95f80 130{
3cbc8565 131 return pcpu_alloc_bootmem(cpu, size, align);
d4b95f80
TH
132}
133
134static void __init pcpu_fc_free(void *ptr, size_t size)
135{
136 free_bootmem(__pa(ptr), size);
137}
138
8ac83757 139/*
8c4bfc6e 140 * Large page remapping allocator
8ac83757
TH
141 */
142#ifdef CONFIG_NEED_MULTIPLE_NODES
8c4bfc6e 143static void __init pcpul_map(void *ptr, size_t size, void *addr)
8ac83757 144{
8c4bfc6e 145 pmd_t *pmd, pmd_v;
8ac83757 146
8c4bfc6e
TH
147 pmd = populate_extra_pmd((unsigned long)addr);
148 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
149 set_pmd(pmd, pmd_v);
8ac83757
TH
150}
151
a530b795
TH
152static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
153{
154 if (early_cpu_to_node(from) == early_cpu_to_node(to))
155 return LOCAL_DISTANCE;
156 else
157 return REMOTE_DISTANCE;
158}
159
9a773769 160static ssize_t __init setup_pcpu_lpage(bool chosen)
8ac83757 161{
8c4bfc6e 162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
a530b795 163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
fd1e8a1f 164 struct pcpu_alloc_info *ai;
a530b795
TH
165 ssize_t ret;
166
167 /* on non-NUMA, embedding is better */
168 if (!chosen && !pcpu_need_numa())
169 return -EINVAL;
170
171 /* need PSE */
172 if (!cpu_has_pse) {
173 pr_warning("PERCPU: lpage allocator requires PSE\n");
174 return -EINVAL;
175 }
8ac83757 176
a530b795 177 /* allocate and build unit_map */
fd1e8a1f
TH
178 ai = pcpu_build_alloc_info(PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
179 PMD_SIZE, pcpu_lpage_cpu_distance);
180 if (IS_ERR(ai)) {
181 pr_warning("PERCPU: failed to build unit_map (%ld)\n",
182 PTR_ERR(ai));
183 return PTR_ERR(ai);
a530b795
TH
184 }
185
a530b795 186 /* do the parameters look okay? */
0017c869
TH
187 if (!chosen) {
188 size_t vm_size = VMALLOC_END - VMALLOC_START;
fd1e8a1f
TH
189 size_t tot_size = 0;
190 int group;
191
192 for (group = 0; group < ai->nr_groups; group++)
193 tot_size += ai->unit_size * ai->groups[group].nr_units;
0017c869
TH
194
195 /* don't consume more than 20% of vmalloc area */
196 if (tot_size > vm_size / 5) {
197 pr_info("PERCPU: too large chunk size %zuMB for "
198 "large page remap\n", tot_size >> 20);
a530b795
TH
199 ret = -EINVAL;
200 goto out_free;
0017c869
TH
201 }
202 }
8ac83757 203
fd1e8a1f
TH
204 ret = pcpu_lpage_first_chunk(ai, pcpu_fc_alloc, pcpu_fc_free,
205 pcpul_map);
a530b795 206out_free:
fd1e8a1f 207 pcpu_free_alloc_info(ai);
a530b795 208 return ret;
8ac83757
TH
209}
210#else
9a773769 211static ssize_t __init setup_pcpu_lpage(bool chosen)
8ac83757
TH
212{
213 return -EINVAL;
214}
215#endif
216
89c92151
TH
217/*
218 * Embedding allocator
219 *
220 * The first chunk is sized to just contain the static area plus
66c3a757
TH
221 * module and dynamic reserves and embedded into linear physical
222 * mapping so that it can use PMD mapping without additional TLB
223 * pressure.
89c92151 224 */
9a773769 225static ssize_t __init setup_pcpu_embed(bool chosen)
89c92151 226{
66c3a757 227 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
89c92151
TH
228
229 /*
230 * If large page isn't supported, there's no benefit in doing
231 * this. Also, embedding allocation doesn't play well with
232 * NUMA.
233 */
fa8a7094 234 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
89c92151
TH
235 return -EINVAL;
236
9a773769 237 return pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
788e5abc 238 reserve - PERCPU_FIRST_CHUNK_RESERVE);
89c92151
TH
239}
240
5f5d8405 241/*
00ae4064 242 * Page allocator
5f5d8405 243 *
00ae4064
TH
244 * Boring fallback 4k page allocator. This allocator puts more
245 * pressure on PTE TLBs but other than that behaves nicely on both UMA
246 * and NUMA.
5f5d8405 247 */
00ae4064 248static void __init pcpup_populate_pte(unsigned long addr)
458a3e64
TH
249{
250 populate_extra_pte(addr);
251}
252
9a773769 253static ssize_t __init setup_pcpu_page(void)
5f5d8405 254{
9a773769 255 return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
00ae4064
TH
256 pcpu_fc_alloc, pcpu_fc_free,
257 pcpup_populate_pte);
5f5d8405
TH
258}
259
b2d2f431
BG
260static inline void setup_percpu_segment(int cpu)
261{
262#ifdef CONFIG_X86_32
263 struct desc_struct gdt;
264
265 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
266 0x2 | DESCTYPE_S, 0x8);
267 gdt.s = 1;
268 write_gdt_entry(get_cpu_gdt_table(cpu),
269 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
270#endif
271}
272
4fe29a85
GOC
273void __init setup_per_cpu_areas(void)
274{
5f5d8405 275 unsigned int cpu;
11124411
TH
276 unsigned long delta;
277 size_t pcpu_unit_size;
5f5d8405 278 ssize_t ret;
a1681965 279
ab14398a 280 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
a1681965 281 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
11124411 282
8ac83757
TH
283 /*
284 * Allocate percpu area. If PSE is supported, try to make use
285 * of large page mappings. Please read comments on top of
286 * each allocator for details.
287 */
fa8a7094 288 ret = -EINVAL;
f58dc01b
TH
289 if (pcpu_chosen_fc != PCPU_FC_AUTO) {
290 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
291 if (pcpu_chosen_fc == PCPU_FC_LPAGE)
9a773769 292 ret = setup_pcpu_lpage(true);
fa8a7094 293 else
9a773769 294 ret = setup_pcpu_embed(true);
f58dc01b 295
fa8a7094
TH
296 if (ret < 0)
297 pr_warning("PERCPU: %s allocator failed (%zd), "
00ae4064 298 "falling back to page size\n",
f58dc01b 299 pcpu_fc_names[pcpu_chosen_fc], ret);
fa8a7094
TH
300 }
301 } else {
9a773769 302 ret = setup_pcpu_lpage(false);
fa8a7094 303 if (ret < 0)
9a773769 304 ret = setup_pcpu_embed(false);
fa8a7094 305 }
89c92151 306 if (ret < 0)
9a773769 307 ret = setup_pcpu_page();
5f5d8405 308 if (ret < 0)
9a773769 309 panic("cannot initialize percpu area (err=%zd)", ret);
1a51e3a0 310
5f5d8405 311 pcpu_unit_size = ret;
11124411 312
5f5d8405 313 /* alrighty, percpu areas up and running */
11124411
TH
314 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
315 for_each_possible_cpu(cpu) {
a530b795
TH
316 per_cpu_offset(cpu) =
317 delta + pcpu_unit_map[cpu] * pcpu_unit_size;
26f80bd6 318 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
ea927906 319 per_cpu(cpu_number, cpu) = cpu;
b2d2f431 320 setup_percpu_segment(cpu);
60a5317f 321 setup_stack_canary_segment(cpu);
0d77e7f0 322 /*
cf3997f5
TH
323 * Copy data used in early init routines from the
324 * initial arrays to the per cpu data areas. These
325 * arrays then become expendable and the *_early_ptr's
326 * are zeroed indicating that the static arrays are
327 * gone.
0d77e7f0 328 */
ec70de8b 329#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0 330 per_cpu(x86_cpu_to_apicid, cpu) =
cf3997f5 331 early_per_cpu_map(x86_cpu_to_apicid, cpu);
0d77e7f0 332 per_cpu(x86_bios_cpu_apicid, cpu) =
cf3997f5 333 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
ec70de8b 334#endif
1a51e3a0 335#ifdef CONFIG_X86_64
26f80bd6 336 per_cpu(irq_stack_ptr, cpu) =
cf3997f5
TH
337 per_cpu(irq_stack_union.irq_stack, cpu) +
338 IRQ_STACK_SIZE - 64;
6470aff6
BG
339#ifdef CONFIG_NUMA
340 per_cpu(x86_cpu_to_node_map, cpu) =
cf3997f5 341 early_per_cpu_map(x86_cpu_to_node_map, cpu);
2697fbd5 342#endif
6470aff6 343#endif
1a51e3a0 344 /*
34019be1 345 * Up to this point, the boot CPU has been using .data.init
2697fbd5 346 * area. Reload any changed state for the boot CPU.
1a51e3a0 347 */
34019be1 348 if (cpu == boot_cpu_id)
552be871 349 switch_to_new_gdt(cpu);
4fe29a85
GOC
350 }
351
0d77e7f0 352 /* indicate the early static arrays will soon be gone */
22f25138 353#ifdef CONFIG_X86_LOCAL_APIC
0d77e7f0
BG
354 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
355 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
22f25138 356#endif
6470aff6 357#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
0d77e7f0
BG
358 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
359#endif
9f0e8d04 360
35d5a9a6
YL
361#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
362 /*
363 * make sure boot cpu node_number is right, when boot cpu is on the
364 * node that doesn't have mem installed
365 */
366 per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
367#endif
368
9f248bde
MT
369 /* Setup node to cpumask map */
370 setup_node_to_cpumask_map();
c2d1cec1
MT
371
372 /* Setup cpu initialized, callin, callout masks */
373 setup_cpu_local_masks();
4fe29a85 374}
This page took 0.147896 seconds and 5 git commands to generate.