Commit | Line | Data |
---|---|---|
4fe29a85 GOC |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/init.h> | |
4 | #include <linux/bootmem.h> | |
5 | #include <linux/percpu.h> | |
6 | #include <asm/smp.h> | |
7 | #include <asm/percpu.h> | |
8 | #include <asm/sections.h> | |
9 | #include <asm/processor.h> | |
10 | #include <asm/setup.h> | |
11 | #include <asm/topology.h> | |
0fc0906e | 12 | #include <asm/mpspec.h> |
76eb4131 AS |
13 | #include <asm/apicdef.h> |
14 | ||
f8955ebe | 15 | #ifdef CONFIG_X86_LOCAL_APIC |
2fe60147 AS |
16 | unsigned int num_processors; |
17 | unsigned disabled_cpus __cpuinitdata; | |
18 | /* Processor that is doing the boot up */ | |
19 | unsigned int boot_cpu_physical_apicid = -1U; | |
20 | EXPORT_SYMBOL(boot_cpu_physical_apicid); | |
21 | ||
0fc0906e AS |
22 | /* Bitmask of physically existing CPUs */ |
23 | physid_mask_t phys_cpu_present_map; | |
f8955ebe | 24 | #endif |
0fc0906e | 25 | |
23ca4bba MT |
26 | /* map cpu index to physical APIC ID */ |
27 | DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); | |
28 | DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); | |
29 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | |
30 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | |
31 | ||
32 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
33 | #define X86_64_NUMA 1 | |
34 | ||
35 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | |
36 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | |
37 | #endif | |
38 | ||
f8955ebe | 39 | #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) |
4fe29a85 GOC |
40 | /* |
41 | * Copy data used in early init routines from the initial arrays to the | |
42 | * per cpu data areas. These arrays then become expendable and the | |
43 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | |
44 | */ | |
45 | static void __init setup_per_cpu_maps(void) | |
46 | { | |
47 | int cpu; | |
48 | ||
49 | for_each_possible_cpu(cpu) { | |
23ca4bba MT |
50 | per_cpu(x86_cpu_to_apicid, cpu) = |
51 | early_per_cpu_map(x86_cpu_to_apicid, cpu); | |
b447a468 | 52 | per_cpu(x86_bios_cpu_apicid, cpu) = |
23ca4bba MT |
53 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
54 | #ifdef X86_64_NUMA | |
b447a468 | 55 | per_cpu(x86_cpu_to_node_map, cpu) = |
23ca4bba | 56 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
4fe29a85 GOC |
57 | #endif |
58 | } | |
59 | ||
60 | /* indicate the early static arrays will soon be gone */ | |
23ca4bba MT |
61 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
62 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | |
63 | #ifdef X86_64_NUMA | |
64 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | |
4fe29a85 GOC |
65 | #endif |
66 | } | |
67 | ||
9f0e8d04 MT |
68 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP |
69 | cpumask_t *cpumask_of_cpu_map __read_mostly; | |
70 | EXPORT_SYMBOL(cpumask_of_cpu_map); | |
71 | ||
72 | /* requires nr_cpu_ids to be initialized */ | |
73 | static void __init setup_cpumask_of_cpu(void) | |
74 | { | |
75 | int i; | |
76 | ||
77 | /* alloc_bootmem zeroes memory */ | |
78 | cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids); | |
79 | for (i = 0; i < nr_cpu_ids; i++) | |
80 | cpu_set(i, cpumask_of_cpu_map[i]); | |
81 | } | |
82 | #else | |
83 | static inline void setup_cpumask_of_cpu(void) { } | |
84 | #endif | |
85 | ||
4fe29a85 GOC |
86 | #ifdef CONFIG_X86_32 |
87 | /* | |
88 | * Great future not-so-futuristic plan: make i386 and x86_64 do it | |
89 | * the same way | |
90 | */ | |
91 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | |
92 | EXPORT_SYMBOL(__per_cpu_offset); | |
93 | #endif | |
94 | ||
95 | /* | |
96 | * Great future plan: | |
97 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | |
98 | * Always point %gs to its beginning | |
99 | */ | |
100 | void __init setup_per_cpu_areas(void) | |
101 | { | |
9f0e8d04 | 102 | int i, highest_cpu = 0; |
4fe29a85 GOC |
103 | unsigned long size; |
104 | ||
105 | #ifdef CONFIG_HOTPLUG_CPU | |
106 | prefill_possible_map(); | |
107 | #endif | |
108 | ||
109 | /* Copy section for each CPU (we discard the original) */ | |
110 | size = PERCPU_ENOUGH_ROOM; | |
5ecddceb | 111 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", |
4fe29a85 | 112 | size); |
b447a468 MT |
113 | |
114 | for_each_possible_cpu(i) { | |
4fe29a85 GOC |
115 | char *ptr; |
116 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
117 | ptr = alloc_bootmem_pages(size); | |
118 | #else | |
119 | int node = early_cpu_to_node(i); | |
b447a468 | 120 | if (!node_online(node) || !NODE_DATA(node)) { |
4fe29a85 | 121 | ptr = alloc_bootmem_pages(size); |
b447a468 | 122 | printk(KERN_INFO |
23ca4bba MT |
123 | "cpu %d has no node %d or node-local memory\n", |
124 | i, node); | |
b447a468 | 125 | } |
4fe29a85 GOC |
126 | else |
127 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | |
128 | #endif | |
129 | if (!ptr) | |
130 | panic("Cannot allocate cpu data for CPU %d\n", i); | |
131 | #ifdef CONFIG_X86_64 | |
132 | cpu_pda(i)->data_offset = ptr - __per_cpu_start; | |
133 | #else | |
134 | __per_cpu_offset[i] = ptr - __per_cpu_start; | |
135 | #endif | |
136 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | |
9f0e8d04 MT |
137 | |
138 | highest_cpu = i; | |
4fe29a85 GOC |
139 | } |
140 | ||
9f0e8d04 MT |
141 | nr_cpu_ids = highest_cpu + 1; |
142 | printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d\n", NR_CPUS, nr_cpu_ids); | |
143 | ||
b447a468 | 144 | /* Setup percpu data maps */ |
4fe29a85 | 145 | setup_per_cpu_maps(); |
9f0e8d04 MT |
146 | |
147 | /* Setup cpumask_of_cpu map */ | |
148 | setup_cpumask_of_cpu(); | |
4fe29a85 GOC |
149 | } |
150 | ||
151 | #endif | |
23ca4bba MT |
152 | |
153 | #ifdef X86_64_NUMA | |
154 | void __cpuinit numa_set_node(int cpu, int node) | |
155 | { | |
156 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
157 | ||
158 | if (cpu_to_node_map) | |
159 | cpu_to_node_map[cpu] = node; | |
160 | ||
161 | else if (per_cpu_offset(cpu)) | |
162 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
163 | ||
164 | else | |
165 | Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu); | |
166 | } | |
167 | ||
168 | void __cpuinit numa_clear_node(int cpu) | |
169 | { | |
170 | numa_set_node(cpu, NUMA_NO_NODE); | |
171 | } | |
172 | ||
173 | void __cpuinit numa_add_cpu(int cpu) | |
174 | { | |
175 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
176 | } | |
177 | ||
178 | void __cpuinit numa_remove_cpu(int cpu) | |
179 | { | |
180 | cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); | |
181 | } | |
182 | #endif /* CONFIG_NUMA */ | |
183 | ||
184 | #if defined(CONFIG_DEBUG_PER_CPU_MAPS) && defined(CONFIG_X86_64) | |
185 | ||
186 | int cpu_to_node(int cpu) | |
187 | { | |
188 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
189 | printk(KERN_WARNING | |
190 | "cpu_to_node(%d): usage too early!\n", cpu); | |
191 | dump_stack(); | |
192 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
193 | } | |
194 | return per_cpu(x86_cpu_to_node_map, cpu); | |
195 | } | |
196 | EXPORT_SYMBOL(cpu_to_node); | |
197 | ||
198 | int early_cpu_to_node(int cpu) | |
199 | { | |
200 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
201 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
202 | ||
203 | if (!per_cpu_offset(cpu)) { | |
204 | printk(KERN_WARNING | |
205 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
206 | dump_stack(); | |
207 | return NUMA_NO_NODE; | |
208 | } | |
209 | return per_cpu(x86_cpu_to_node_map, cpu); | |
210 | } | |
211 | #endif |