1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
7 #include <asm/percpu.h>
8 #include <asm/sections.h>
9 #include <asm/processor.h>
10 #include <asm/setup.h>
11 #include <asm/topology.h>
13 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
15 * Copy data used in early init routines from the initial arrays to the
16 * per cpu data areas. These arrays then become expendable and the
17 * *_early_ptr's are zeroed indicating that the static arrays are gone.
19 static void __init
setup_per_cpu_maps(void)
23 for_each_possible_cpu(cpu
) {
25 if (per_cpu_offset(cpu
)) {
27 per_cpu(x86_cpu_to_apicid
, cpu
) =
28 x86_cpu_to_apicid_init
[cpu
];
29 per_cpu(x86_bios_cpu_apicid
, cpu
) =
30 x86_bios_cpu_apicid_init
[cpu
];
32 per_cpu(x86_cpu_to_node_map
, cpu
) =
33 x86_cpu_to_node_map_init
[cpu
];
37 printk(KERN_NOTICE
"per_cpu_offset zero for cpu %d\n",
42 /* indicate the early static arrays will soon be gone */
43 x86_cpu_to_apicid_early_ptr
= NULL
;
44 x86_bios_cpu_apicid_early_ptr
= NULL
;
46 x86_cpu_to_node_map_early_ptr
= NULL
;
52 * Great future not-so-futuristic plan: make i386 and x86_64 do it
55 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
56 EXPORT_SYMBOL(__per_cpu_offset
);
61 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
62 * Always point %gs to its beginning
64 void __init
setup_per_cpu_areas(void)
69 #ifdef CONFIG_HOTPLUG_CPU
70 prefill_possible_map();
73 /* Copy section for each CPU (we discard the original) */
74 size
= PERCPU_ENOUGH_ROOM
;
76 printk(KERN_INFO
"PERCPU: Allocating %lu bytes of per cpu data\n",
78 for_each_cpu_mask(i
, cpu_possible_map
) {
80 #ifndef CONFIG_NEED_MULTIPLE_NODES
81 ptr
= alloc_bootmem_pages(size
);
83 int node
= early_cpu_to_node(i
);
84 if (!node_online(node
) || !NODE_DATA(node
))
85 ptr
= alloc_bootmem_pages(size
);
87 ptr
= alloc_bootmem_pages_node(NODE_DATA(node
), size
);
90 panic("Cannot allocate cpu data for CPU %d\n", i
);
92 cpu_pda(i
)->data_offset
= ptr
- __per_cpu_start
;
94 __per_cpu_offset
[i
] = ptr
- __per_cpu_start
;
96 memcpy(ptr
, __per_cpu_start
, __per_cpu_end
- __per_cpu_start
);
99 /* setup percpu data maps early */
100 setup_per_cpu_maps();