Commit | Line | Data |
---|---|---|
f0fc4aff | 1 | #include <linux/bootmem.h> |
9766cdbc | 2 | #include <linux/linkage.h> |
f0fc4aff | 3 | #include <linux/bitops.h> |
9766cdbc | 4 | #include <linux/kernel.h> |
f0fc4aff | 5 | #include <linux/module.h> |
9766cdbc JSR |
6 | #include <linux/percpu.h> |
7 | #include <linux/string.h> | |
1da177e4 | 8 | #include <linux/delay.h> |
9766cdbc JSR |
9 | #include <linux/sched.h> |
10 | #include <linux/init.h> | |
0f46efeb | 11 | #include <linux/kprobes.h> |
9766cdbc | 12 | #include <linux/kgdb.h> |
1da177e4 | 13 | #include <linux/smp.h> |
9766cdbc JSR |
14 | #include <linux/io.h> |
15 | ||
16 | #include <asm/stackprotector.h> | |
cdd6c482 | 17 | #include <asm/perf_event.h> |
1da177e4 | 18 | #include <asm/mmu_context.h> |
49d859d7 | 19 | #include <asm/archrandom.h> |
9766cdbc JSR |
20 | #include <asm/hypervisor.h> |
21 | #include <asm/processor.h> | |
f649e938 | 22 | #include <asm/debugreg.h> |
9766cdbc | 23 | #include <asm/sections.h> |
f40c3300 | 24 | #include <asm/vsyscall.h> |
8bdbd962 AC |
25 | #include <linux/topology.h> |
26 | #include <linux/cpumask.h> | |
9766cdbc | 27 | #include <asm/pgtable.h> |
60063497 | 28 | #include <linux/atomic.h> |
9766cdbc JSR |
29 | #include <asm/proto.h> |
30 | #include <asm/setup.h> | |
31 | #include <asm/apic.h> | |
32 | #include <asm/desc.h> | |
33 | #include <asm/i387.h> | |
1361b83a | 34 | #include <asm/fpu-internal.h> |
27b07da7 | 35 | #include <asm/mtrr.h> |
8bdbd962 | 36 | #include <linux/numa.h> |
9766cdbc JSR |
37 | #include <asm/asm.h> |
38 | #include <asm/cpu.h> | |
a03a3e28 | 39 | #include <asm/mce.h> |
9766cdbc | 40 | #include <asm/msr.h> |
8d4a4300 | 41 | #include <asm/pat.h> |
d288e1cf FY |
42 | #include <asm/microcode.h> |
43 | #include <asm/microcode_intel.h> | |
e641f5f5 IM |
44 | |
45 | #ifdef CONFIG_X86_LOCAL_APIC | |
bdbcdd48 | 46 | #include <asm/uv/uv.h> |
1da177e4 LT |
47 | #endif |
48 | ||
49 | #include "cpu.h" | |
50 | ||
c2d1cec1 | 51 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
c2d1cec1 | 52 | cpumask_var_t cpu_initialized_mask; |
9766cdbc JSR |
53 | cpumask_var_t cpu_callout_mask; |
54 | cpumask_var_t cpu_callin_mask; | |
c2d1cec1 MT |
55 | |
56 | /* representing cpus for which sibling maps can be computed */ | |
57 | cpumask_var_t cpu_sibling_setup_mask; | |
58 | ||
2f2f52ba | 59 | /* correctly size the local cpu masks */ |
4369f1fb | 60 | void __init setup_cpu_local_masks(void) |
2f2f52ba BG |
61 | { |
62 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | |
63 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | |
64 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | |
65 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
66 | } | |
67 | ||
148f9bb8 | 68 | static void default_init(struct cpuinfo_x86 *c) |
e8055139 OZ |
69 | { |
70 | #ifdef CONFIG_X86_64 | |
27c13ece | 71 | cpu_detect_cache_sizes(c); |
e8055139 OZ |
72 | #else |
73 | /* Not much we can do here... */ | |
74 | /* Check if at least it has cpuid */ | |
75 | if (c->cpuid_level == -1) { | |
76 | /* No cpuid. It must be an ancient CPU */ | |
77 | if (c->x86 == 4) | |
78 | strcpy(c->x86_model_id, "486"); | |
79 | else if (c->x86 == 3) | |
80 | strcpy(c->x86_model_id, "386"); | |
81 | } | |
82 | #endif | |
83 | } | |
84 | ||
148f9bb8 | 85 | static const struct cpu_dev default_cpu = { |
e8055139 OZ |
86 | .c_init = default_init, |
87 | .c_vendor = "Unknown", | |
88 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | |
89 | }; | |
90 | ||
148f9bb8 | 91 | static const struct cpu_dev *this_cpu = &default_cpu; |
0a488a53 | 92 | |
06deef89 | 93 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
950ad7ff | 94 | #ifdef CONFIG_X86_64 |
06deef89 BG |
95 | /* |
96 | * We need valid kernel segments for data and code in long mode too | |
97 | * IRET will check the segment types kkeil 2000/10/28 | |
98 | * Also sysret mandates a special GDT layout | |
99 | * | |
9766cdbc | 100 | * TLS descriptors are currently at a different place compared to i386. |
06deef89 BG |
101 | * Hopefully nobody expects them at a fixed place (Wine?) |
102 | */ | |
1e5de182 AM |
103 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
104 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), | |
105 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), | |
106 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), | |
107 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), | |
108 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), | |
950ad7ff | 109 | #else |
1e5de182 AM |
110 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
111 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | |
112 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), | |
113 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), | |
bf504672 RR |
114 | /* |
115 | * Segments used for calling PnP BIOS have byte granularity. | |
116 | * They code segments and data segments have fixed 64k limits, | |
117 | * the transfer segment sizes are set at run time. | |
118 | */ | |
6842ef0e | 119 | /* 32-bit code */ |
1e5de182 | 120 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
6842ef0e | 121 | /* 16-bit code */ |
1e5de182 | 122 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
6842ef0e | 123 | /* 16-bit data */ |
1e5de182 | 124 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
6842ef0e | 125 | /* 16-bit data */ |
1e5de182 | 126 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
6842ef0e | 127 | /* 16-bit data */ |
1e5de182 | 128 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
bf504672 RR |
129 | /* |
130 | * The APM segments have byte granularity and their bases | |
131 | * are set at run time. All have 64k limits. | |
132 | */ | |
6842ef0e | 133 | /* 32-bit code */ |
1e5de182 | 134 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
bf504672 | 135 | /* 16-bit code */ |
1e5de182 | 136 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
6842ef0e | 137 | /* data */ |
72c4d853 | 138 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
bf504672 | 139 | |
1e5de182 AM |
140 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
141 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), | |
60a5317f | 142 | GDT_STACK_CANARY_INIT |
950ad7ff | 143 | #endif |
06deef89 | 144 | } }; |
7a61d35d | 145 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
ae1ee11b | 146 | |
0c752a93 SS |
147 | static int __init x86_xsave_setup(char *s) |
148 | { | |
149 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); | |
6bad06b7 | 150 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
b6f42a4a | 151 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
c6fd893d SS |
152 | setup_clear_cpu_cap(X86_FEATURE_AVX); |
153 | setup_clear_cpu_cap(X86_FEATURE_AVX2); | |
0c752a93 SS |
154 | return 1; |
155 | } | |
156 | __setup("noxsave", x86_xsave_setup); | |
157 | ||
6bad06b7 SS |
158 | static int __init x86_xsaveopt_setup(char *s) |
159 | { | |
160 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | |
161 | return 1; | |
162 | } | |
163 | __setup("noxsaveopt", x86_xsaveopt_setup); | |
164 | ||
b6f42a4a FY |
165 | static int __init x86_xsaves_setup(char *s) |
166 | { | |
167 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); | |
168 | return 1; | |
169 | } | |
170 | __setup("noxsaves", x86_xsaves_setup); | |
171 | ||
ba51dced | 172 | #ifdef CONFIG_X86_32 |
148f9bb8 PG |
173 | static int cachesize_override = -1; |
174 | static int disable_x86_serial_nr = 1; | |
1da177e4 | 175 | |
0a488a53 YL |
176 | static int __init cachesize_setup(char *str) |
177 | { | |
178 | get_option(&str, &cachesize_override); | |
179 | return 1; | |
180 | } | |
181 | __setup("cachesize=", cachesize_setup); | |
182 | ||
0a488a53 YL |
183 | static int __init x86_fxsr_setup(char *s) |
184 | { | |
185 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | |
186 | setup_clear_cpu_cap(X86_FEATURE_XMM); | |
187 | return 1; | |
188 | } | |
189 | __setup("nofxsr", x86_fxsr_setup); | |
190 | ||
191 | static int __init x86_sep_setup(char *s) | |
192 | { | |
193 | setup_clear_cpu_cap(X86_FEATURE_SEP); | |
194 | return 1; | |
195 | } | |
196 | __setup("nosep", x86_sep_setup); | |
197 | ||
198 | /* Standard macro to see if a specific flag is changeable */ | |
199 | static inline int flag_is_changeable_p(u32 flag) | |
200 | { | |
201 | u32 f1, f2; | |
202 | ||
94f6bac1 KH |
203 | /* |
204 | * Cyrix and IDT cpus allow disabling of CPUID | |
205 | * so the code below may return different results | |
206 | * when it is executed before and after enabling | |
207 | * the CPUID. Add "volatile" to not allow gcc to | |
208 | * optimize the subsequent calls to this function. | |
209 | */ | |
0f3fa48a IM |
210 | asm volatile ("pushfl \n\t" |
211 | "pushfl \n\t" | |
212 | "popl %0 \n\t" | |
213 | "movl %0, %1 \n\t" | |
214 | "xorl %2, %0 \n\t" | |
215 | "pushl %0 \n\t" | |
216 | "popfl \n\t" | |
217 | "pushfl \n\t" | |
218 | "popl %0 \n\t" | |
219 | "popfl \n\t" | |
220 | ||
94f6bac1 KH |
221 | : "=&r" (f1), "=&r" (f2) |
222 | : "ir" (flag)); | |
0a488a53 YL |
223 | |
224 | return ((f1^f2) & flag) != 0; | |
225 | } | |
226 | ||
227 | /* Probe for the CPUID instruction */ | |
148f9bb8 | 228 | int have_cpuid_p(void) |
0a488a53 YL |
229 | { |
230 | return flag_is_changeable_p(X86_EFLAGS_ID); | |
231 | } | |
232 | ||
148f9bb8 | 233 | static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
0a488a53 | 234 | { |
0f3fa48a IM |
235 | unsigned long lo, hi; |
236 | ||
237 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) | |
238 | return; | |
239 | ||
240 | /* Disable processor serial number: */ | |
241 | ||
242 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
243 | lo |= 0x200000; | |
244 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | |
245 | ||
246 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | |
247 | clear_cpu_cap(c, X86_FEATURE_PN); | |
248 | ||
249 | /* Disabling the serial number may affect the cpuid level */ | |
250 | c->cpuid_level = cpuid_eax(0); | |
0a488a53 YL |
251 | } |
252 | ||
253 | static int __init x86_serial_nr_setup(char *s) | |
254 | { | |
255 | disable_x86_serial_nr = 0; | |
256 | return 1; | |
257 | } | |
258 | __setup("serialnumber", x86_serial_nr_setup); | |
ba51dced | 259 | #else |
102bbe3a YL |
260 | static inline int flag_is_changeable_p(u32 flag) |
261 | { | |
262 | return 1; | |
263 | } | |
102bbe3a YL |
264 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
265 | { | |
266 | } | |
ba51dced | 267 | #endif |
0a488a53 | 268 | |
de5397ad FY |
269 | static __init int setup_disable_smep(char *arg) |
270 | { | |
b2cc2a07 | 271 | setup_clear_cpu_cap(X86_FEATURE_SMEP); |
de5397ad FY |
272 | return 1; |
273 | } | |
274 | __setup("nosmep", setup_disable_smep); | |
275 | ||
b2cc2a07 | 276 | static __always_inline void setup_smep(struct cpuinfo_x86 *c) |
de5397ad | 277 | { |
b2cc2a07 PA |
278 | if (cpu_has(c, X86_FEATURE_SMEP)) |
279 | set_in_cr4(X86_CR4_SMEP); | |
de5397ad FY |
280 | } |
281 | ||
52b6179a PA |
282 | static __init int setup_disable_smap(char *arg) |
283 | { | |
b2cc2a07 | 284 | setup_clear_cpu_cap(X86_FEATURE_SMAP); |
52b6179a PA |
285 | return 1; |
286 | } | |
287 | __setup("nosmap", setup_disable_smap); | |
288 | ||
b2cc2a07 PA |
289 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
290 | { | |
291 | unsigned long eflags; | |
292 | ||
293 | /* This should have been cleared long ago */ | |
294 | raw_local_save_flags(eflags); | |
295 | BUG_ON(eflags & X86_EFLAGS_AC); | |
296 | ||
03bbd596 PA |
297 | if (cpu_has(c, X86_FEATURE_SMAP)) { |
298 | #ifdef CONFIG_X86_SMAP | |
b2cc2a07 | 299 | set_in_cr4(X86_CR4_SMAP); |
03bbd596 PA |
300 | #else |
301 | clear_in_cr4(X86_CR4_SMAP); | |
302 | #endif | |
303 | } | |
de5397ad FY |
304 | } |
305 | ||
b38b0665 PA |
306 | /* |
307 | * Some CPU features depend on higher CPUID levels, which may not always | |
308 | * be available due to CPUID level capping or broken virtualization | |
309 | * software. Add those features to this table to auto-disable them. | |
310 | */ | |
311 | struct cpuid_dependent_feature { | |
312 | u32 feature; | |
313 | u32 level; | |
314 | }; | |
0f3fa48a | 315 | |
148f9bb8 | 316 | static const struct cpuid_dependent_feature |
b38b0665 PA |
317 | cpuid_dependent_features[] = { |
318 | { X86_FEATURE_MWAIT, 0x00000005 }, | |
319 | { X86_FEATURE_DCA, 0x00000009 }, | |
320 | { X86_FEATURE_XSAVE, 0x0000000d }, | |
321 | { 0, 0 } | |
322 | }; | |
323 | ||
148f9bb8 | 324 | static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
b38b0665 PA |
325 | { |
326 | const struct cpuid_dependent_feature *df; | |
9766cdbc | 327 | |
b38b0665 | 328 | for (df = cpuid_dependent_features; df->feature; df++) { |
0f3fa48a IM |
329 | |
330 | if (!cpu_has(c, df->feature)) | |
331 | continue; | |
b38b0665 PA |
332 | /* |
333 | * Note: cpuid_level is set to -1 if unavailable, but | |
334 | * extended_extended_level is set to 0 if unavailable | |
335 | * and the legitimate extended levels are all negative | |
336 | * when signed; hence the weird messing around with | |
337 | * signs here... | |
338 | */ | |
0f3fa48a | 339 | if (!((s32)df->level < 0 ? |
f6db44df | 340 | (u32)df->level > (u32)c->extended_cpuid_level : |
0f3fa48a IM |
341 | (s32)df->level > (s32)c->cpuid_level)) |
342 | continue; | |
343 | ||
344 | clear_cpu_cap(c, df->feature); | |
345 | if (!warn) | |
346 | continue; | |
347 | ||
348 | printk(KERN_WARNING | |
349 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", | |
350 | x86_cap_flags[df->feature], df->level); | |
b38b0665 | 351 | } |
f6db44df | 352 | } |
b38b0665 | 353 | |
102bbe3a YL |
354 | /* |
355 | * Naming convention should be: <Name> [(<Codename>)] | |
356 | * This table only is used unless init_<vendor>() below doesn't set it; | |
0f3fa48a IM |
357 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
358 | * isn't used | |
102bbe3a YL |
359 | */ |
360 | ||
361 | /* Look up CPU names by table lookup. */ | |
148f9bb8 | 362 | static const char *table_lookup_model(struct cpuinfo_x86 *c) |
102bbe3a | 363 | { |
09dc68d9 JB |
364 | #ifdef CONFIG_X86_32 |
365 | const struct legacy_cpu_model_info *info; | |
102bbe3a YL |
366 | |
367 | if (c->x86_model >= 16) | |
368 | return NULL; /* Range check */ | |
369 | ||
370 | if (!this_cpu) | |
371 | return NULL; | |
372 | ||
09dc68d9 | 373 | info = this_cpu->legacy_models; |
102bbe3a | 374 | |
09dc68d9 | 375 | while (info->family) { |
102bbe3a YL |
376 | if (info->family == c->x86) |
377 | return info->model_names[c->x86_model]; | |
378 | info++; | |
379 | } | |
09dc68d9 | 380 | #endif |
102bbe3a YL |
381 | return NULL; /* Not found */ |
382 | } | |
383 | ||
148f9bb8 PG |
384 | __u32 cpu_caps_cleared[NCAPINTS]; |
385 | __u32 cpu_caps_set[NCAPINTS]; | |
7d851c8d | 386 | |
11e3a840 JF |
387 | void load_percpu_segment(int cpu) |
388 | { | |
389 | #ifdef CONFIG_X86_32 | |
390 | loadsegment(fs, __KERNEL_PERCPU); | |
391 | #else | |
392 | loadsegment(gs, 0); | |
393 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | |
394 | #endif | |
60a5317f | 395 | load_stack_canary_segment(); |
11e3a840 JF |
396 | } |
397 | ||
0f3fa48a IM |
398 | /* |
399 | * Current gdt points %fs at the "master" per-cpu area: after this, | |
400 | * it's on the real one. | |
401 | */ | |
552be871 | 402 | void switch_to_new_gdt(int cpu) |
9d31d35b YL |
403 | { |
404 | struct desc_ptr gdt_descr; | |
405 | ||
2697fbd5 | 406 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
9d31d35b YL |
407 | gdt_descr.size = GDT_SIZE - 1; |
408 | load_gdt(&gdt_descr); | |
2697fbd5 | 409 | /* Reload the per-cpu base */ |
11e3a840 JF |
410 | |
411 | load_percpu_segment(cpu); | |
9d31d35b YL |
412 | } |
413 | ||
148f9bb8 | 414 | static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
1da177e4 | 415 | |
148f9bb8 | 416 | static void get_model_name(struct cpuinfo_x86 *c) |
1da177e4 LT |
417 | { |
418 | unsigned int *v; | |
419 | char *p, *q; | |
420 | ||
3da99c97 | 421 | if (c->extended_cpuid_level < 0x80000004) |
1b05d60d | 422 | return; |
1da177e4 | 423 | |
0f3fa48a | 424 | v = (unsigned int *)c->x86_model_id; |
1da177e4 LT |
425 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
426 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
427 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
428 | c->x86_model_id[48] = 0; | |
429 | ||
0f3fa48a IM |
430 | /* |
431 | * Intel chips right-justify this string for some dumb reason; | |
432 | * undo that brain damage: | |
433 | */ | |
1da177e4 | 434 | p = q = &c->x86_model_id[0]; |
34048c9e | 435 | while (*p == ' ') |
9766cdbc | 436 | p++; |
34048c9e | 437 | if (p != q) { |
9766cdbc JSR |
438 | while (*p) |
439 | *q++ = *p++; | |
440 | while (q <= &c->x86_model_id[48]) | |
441 | *q++ = '\0'; /* Zero-pad the rest */ | |
1da177e4 | 442 | } |
1da177e4 LT |
443 | } |
444 | ||
148f9bb8 | 445 | void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
1da177e4 | 446 | { |
9d31d35b | 447 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
1da177e4 | 448 | |
3da99c97 | 449 | n = c->extended_cpuid_level; |
1da177e4 LT |
450 | |
451 | if (n >= 0x80000005) { | |
9d31d35b | 452 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
9d31d35b | 453 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
140fc727 YL |
454 | #ifdef CONFIG_X86_64 |
455 | /* On K8 L1 TLB is inclusive, so don't count it */ | |
456 | c->x86_tlbsize = 0; | |
457 | #endif | |
1da177e4 LT |
458 | } |
459 | ||
460 | if (n < 0x80000006) /* Some chips just has a large L1. */ | |
461 | return; | |
462 | ||
0a488a53 | 463 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
1da177e4 | 464 | l2size = ecx >> 16; |
34048c9e | 465 | |
140fc727 YL |
466 | #ifdef CONFIG_X86_64 |
467 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | |
468 | #else | |
1da177e4 | 469 | /* do processor-specific cache resizing */ |
09dc68d9 JB |
470 | if (this_cpu->legacy_cache_size) |
471 | l2size = this_cpu->legacy_cache_size(c, l2size); | |
1da177e4 LT |
472 | |
473 | /* Allow user to override all this if necessary. */ | |
474 | if (cachesize_override != -1) | |
475 | l2size = cachesize_override; | |
476 | ||
34048c9e | 477 | if (l2size == 0) |
1da177e4 | 478 | return; /* Again, no L2 cache is possible */ |
140fc727 | 479 | #endif |
1da177e4 LT |
480 | |
481 | c->x86_cache_size = l2size; | |
1da177e4 LT |
482 | } |
483 | ||
e0ba94f1 AS |
484 | u16 __read_mostly tlb_lli_4k[NR_INFO]; |
485 | u16 __read_mostly tlb_lli_2m[NR_INFO]; | |
486 | u16 __read_mostly tlb_lli_4m[NR_INFO]; | |
487 | u16 __read_mostly tlb_lld_4k[NR_INFO]; | |
488 | u16 __read_mostly tlb_lld_2m[NR_INFO]; | |
489 | u16 __read_mostly tlb_lld_4m[NR_INFO]; | |
dd360393 | 490 | u16 __read_mostly tlb_lld_1g[NR_INFO]; |
e0ba94f1 | 491 | |
148f9bb8 | 492 | void cpu_detect_tlb(struct cpuinfo_x86 *c) |
e0ba94f1 AS |
493 | { |
494 | if (this_cpu->c_detect_tlb) | |
495 | this_cpu->c_detect_tlb(c); | |
496 | ||
dd360393 | 497 | printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" |
e9f4e0a9 | 498 | "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", |
e0ba94f1 AS |
499 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
500 | tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], | |
c4211f42 | 501 | tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], |
e9f4e0a9 | 502 | tlb_lld_1g[ENTRIES]); |
e0ba94f1 AS |
503 | } |
504 | ||
148f9bb8 | 505 | void detect_ht(struct cpuinfo_x86 *c) |
1da177e4 | 506 | { |
97e4db7c | 507 | #ifdef CONFIG_X86_HT |
0a488a53 YL |
508 | u32 eax, ebx, ecx, edx; |
509 | int index_msb, core_bits; | |
2eaad1fd | 510 | static bool printed; |
1da177e4 | 511 | |
0a488a53 | 512 | if (!cpu_has(c, X86_FEATURE_HT)) |
9d31d35b | 513 | return; |
1da177e4 | 514 | |
0a488a53 YL |
515 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
516 | goto out; | |
1da177e4 | 517 | |
1cd78776 YL |
518 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
519 | return; | |
1da177e4 | 520 | |
0a488a53 | 521 | cpuid(1, &eax, &ebx, &ecx, &edx); |
1da177e4 | 522 | |
9d31d35b YL |
523 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
524 | ||
525 | if (smp_num_siblings == 1) { | |
2eaad1fd | 526 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); |
0f3fa48a IM |
527 | goto out; |
528 | } | |
9d31d35b | 529 | |
0f3fa48a IM |
530 | if (smp_num_siblings <= 1) |
531 | goto out; | |
9d31d35b | 532 | |
0f3fa48a IM |
533 | index_msb = get_count_order(smp_num_siblings); |
534 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | |
9d31d35b | 535 | |
0f3fa48a | 536 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
9d31d35b | 537 | |
0f3fa48a | 538 | index_msb = get_count_order(smp_num_siblings); |
9d31d35b | 539 | |
0f3fa48a | 540 | core_bits = get_count_order(c->x86_max_cores); |
9d31d35b | 541 | |
0f3fa48a IM |
542 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
543 | ((1 << core_bits) - 1); | |
1da177e4 | 544 | |
0a488a53 | 545 | out: |
2eaad1fd | 546 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
0a488a53 YL |
547 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
548 | c->phys_proc_id); | |
549 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
550 | c->cpu_core_id); | |
2eaad1fd | 551 | printed = 1; |
9d31d35b | 552 | } |
9d31d35b | 553 | #endif |
97e4db7c | 554 | } |
1da177e4 | 555 | |
148f9bb8 | 556 | static void get_cpu_vendor(struct cpuinfo_x86 *c) |
1da177e4 LT |
557 | { |
558 | char *v = c->x86_vendor_id; | |
0f3fa48a | 559 | int i; |
1da177e4 LT |
560 | |
561 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
10a434fc YL |
562 | if (!cpu_devs[i]) |
563 | break; | |
564 | ||
565 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | |
566 | (cpu_devs[i]->c_ident[1] && | |
567 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | |
0f3fa48a | 568 | |
10a434fc YL |
569 | this_cpu = cpu_devs[i]; |
570 | c->x86_vendor = this_cpu->c_x86_vendor; | |
571 | return; | |
1da177e4 LT |
572 | } |
573 | } | |
10a434fc | 574 | |
a9c56953 MK |
575 | printk_once(KERN_ERR |
576 | "CPU: vendor_id '%s' unknown, using generic init.\n" \ | |
577 | "CPU: Your system may be unstable.\n", v); | |
10a434fc | 578 | |
fe38d855 CE |
579 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
580 | this_cpu = &default_cpu; | |
1da177e4 LT |
581 | } |
582 | ||
148f9bb8 | 583 | void cpu_detect(struct cpuinfo_x86 *c) |
1da177e4 | 584 | { |
1da177e4 | 585 | /* Get vendor name */ |
4a148513 HH |
586 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
587 | (unsigned int *)&c->x86_vendor_id[0], | |
588 | (unsigned int *)&c->x86_vendor_id[8], | |
589 | (unsigned int *)&c->x86_vendor_id[4]); | |
1da177e4 | 590 | |
1da177e4 | 591 | c->x86 = 4; |
9d31d35b | 592 | /* Intel-defined flags: level 0x00000001 */ |
1da177e4 LT |
593 | if (c->cpuid_level >= 0x00000001) { |
594 | u32 junk, tfms, cap0, misc; | |
0f3fa48a | 595 | |
1da177e4 | 596 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
9d31d35b YL |
597 | c->x86 = (tfms >> 8) & 0xf; |
598 | c->x86_model = (tfms >> 4) & 0xf; | |
599 | c->x86_mask = tfms & 0xf; | |
0f3fa48a | 600 | |
f5f786d0 | 601 | if (c->x86 == 0xf) |
1da177e4 | 602 | c->x86 += (tfms >> 20) & 0xff; |
f5f786d0 | 603 | if (c->x86 >= 0x6) |
9d31d35b | 604 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
0f3fa48a | 605 | |
d4387bd3 | 606 | if (cap0 & (1<<19)) { |
d4387bd3 | 607 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
9d31d35b | 608 | c->x86_cache_alignment = c->x86_clflush_size; |
d4387bd3 | 609 | } |
1da177e4 | 610 | } |
1da177e4 | 611 | } |
3da99c97 | 612 | |
148f9bb8 | 613 | void get_cpu_cap(struct cpuinfo_x86 *c) |
093af8d7 YL |
614 | { |
615 | u32 tfms, xlvl; | |
3da99c97 | 616 | u32 ebx; |
093af8d7 | 617 | |
3da99c97 YL |
618 | /* Intel-defined flags: level 0x00000001 */ |
619 | if (c->cpuid_level >= 0x00000001) { | |
620 | u32 capability, excap; | |
0f3fa48a | 621 | |
3da99c97 YL |
622 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
623 | c->x86_capability[0] = capability; | |
624 | c->x86_capability[4] = excap; | |
625 | } | |
093af8d7 | 626 | |
bdc802dc PA |
627 | /* Additional Intel-defined flags: level 0x00000007 */ |
628 | if (c->cpuid_level >= 0x00000007) { | |
629 | u32 eax, ebx, ecx, edx; | |
630 | ||
631 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); | |
632 | ||
2494b030 | 633 | c->x86_capability[9] = ebx; |
bdc802dc PA |
634 | } |
635 | ||
6229ad27 FY |
636 | /* Extended state features: level 0x0000000d */ |
637 | if (c->cpuid_level >= 0x0000000d) { | |
638 | u32 eax, ebx, ecx, edx; | |
639 | ||
640 | cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); | |
641 | ||
642 | c->x86_capability[10] = eax; | |
643 | } | |
644 | ||
3da99c97 YL |
645 | /* AMD-defined flags: level 0x80000001 */ |
646 | xlvl = cpuid_eax(0x80000000); | |
647 | c->extended_cpuid_level = xlvl; | |
0f3fa48a | 648 | |
3da99c97 YL |
649 | if ((xlvl & 0xffff0000) == 0x80000000) { |
650 | if (xlvl >= 0x80000001) { | |
651 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
652 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
093af8d7 | 653 | } |
093af8d7 | 654 | } |
093af8d7 | 655 | |
5122c890 YL |
656 | if (c->extended_cpuid_level >= 0x80000008) { |
657 | u32 eax = cpuid_eax(0x80000008); | |
658 | ||
659 | c->x86_virt_bits = (eax >> 8) & 0xff; | |
660 | c->x86_phys_bits = eax & 0xff; | |
093af8d7 | 661 | } |
13c6c532 JB |
662 | #ifdef CONFIG_X86_32 |
663 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | |
664 | c->x86_phys_bits = 36; | |
5122c890 | 665 | #endif |
e3224234 YL |
666 | |
667 | if (c->extended_cpuid_level >= 0x80000007) | |
668 | c->x86_power = cpuid_edx(0x80000007); | |
093af8d7 | 669 | |
1dedefd1 | 670 | init_scattered_cpuid_features(c); |
093af8d7 | 671 | } |
1da177e4 | 672 | |
148f9bb8 | 673 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
aef93c8b YL |
674 | { |
675 | #ifdef CONFIG_X86_32 | |
676 | int i; | |
677 | ||
678 | /* | |
679 | * First of all, decide if this is a 486 or higher | |
680 | * It's a 486 if we can modify the AC flag | |
681 | */ | |
682 | if (flag_is_changeable_p(X86_EFLAGS_AC)) | |
683 | c->x86 = 4; | |
684 | else | |
685 | c->x86 = 3; | |
686 | ||
687 | for (i = 0; i < X86_VENDOR_NUM; i++) | |
688 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { | |
689 | c->x86_vendor_id[0] = 0; | |
690 | cpu_devs[i]->c_identify(c); | |
691 | if (c->x86_vendor_id[0]) { | |
692 | get_cpu_vendor(c); | |
693 | break; | |
694 | } | |
695 | } | |
696 | #endif | |
697 | } | |
698 | ||
34048c9e PC |
699 | /* |
700 | * Do minimum CPU detection early. | |
701 | * Fields really needed: vendor, cpuid_level, family, model, mask, | |
702 | * cache alignment. | |
703 | * The others are not touched to avoid unwanted side effects. | |
704 | * | |
705 | * WARNING: this function is only called on the BP. Don't add code here | |
706 | * that is supposed to run on all CPUs. | |
707 | */ | |
3da99c97 | 708 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
d7cd5611 | 709 | { |
6627d242 YL |
710 | #ifdef CONFIG_X86_64 |
711 | c->x86_clflush_size = 64; | |
13c6c532 JB |
712 | c->x86_phys_bits = 36; |
713 | c->x86_virt_bits = 48; | |
6627d242 | 714 | #else |
d4387bd3 | 715 | c->x86_clflush_size = 32; |
13c6c532 JB |
716 | c->x86_phys_bits = 32; |
717 | c->x86_virt_bits = 32; | |
6627d242 | 718 | #endif |
0a488a53 | 719 | c->x86_cache_alignment = c->x86_clflush_size; |
d7cd5611 | 720 | |
3da99c97 | 721 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
0a488a53 | 722 | c->extended_cpuid_level = 0; |
d7cd5611 | 723 | |
aef93c8b YL |
724 | if (!have_cpuid_p()) |
725 | identify_cpu_without_cpuid(c); | |
726 | ||
727 | /* cyrix could have cpuid enabled via c_identify()*/ | |
d7cd5611 RR |
728 | if (!have_cpuid_p()) |
729 | return; | |
730 | ||
731 | cpu_detect(c); | |
3da99c97 | 732 | get_cpu_vendor(c); |
3da99c97 | 733 | get_cpu_cap(c); |
60e019eb | 734 | fpu_detect(c); |
12cf105c | 735 | |
10a434fc YL |
736 | if (this_cpu->c_early_init) |
737 | this_cpu->c_early_init(c); | |
093af8d7 | 738 | |
f6e9456c | 739 | c->cpu_index = 0; |
b38b0665 | 740 | filter_cpuid_features(c, false); |
de5397ad | 741 | |
a110b5ec BP |
742 | if (this_cpu->c_bsp_init) |
743 | this_cpu->c_bsp_init(c); | |
c3b83598 BP |
744 | |
745 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | |
d7cd5611 RR |
746 | } |
747 | ||
9d31d35b YL |
748 | void __init early_cpu_init(void) |
749 | { | |
02dde8b4 | 750 | const struct cpu_dev *const *cdev; |
10a434fc YL |
751 | int count = 0; |
752 | ||
ac23f253 | 753 | #ifdef CONFIG_PROCESSOR_SELECT |
9766cdbc | 754 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
31c997ca IM |
755 | #endif |
756 | ||
10a434fc | 757 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
02dde8b4 | 758 | const struct cpu_dev *cpudev = *cdev; |
9d31d35b | 759 | |
10a434fc YL |
760 | if (count >= X86_VENDOR_NUM) |
761 | break; | |
762 | cpu_devs[count] = cpudev; | |
763 | count++; | |
764 | ||
ac23f253 | 765 | #ifdef CONFIG_PROCESSOR_SELECT |
31c997ca IM |
766 | { |
767 | unsigned int j; | |
768 | ||
769 | for (j = 0; j < 2; j++) { | |
770 | if (!cpudev->c_ident[j]) | |
771 | continue; | |
772 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, | |
773 | cpudev->c_ident[j]); | |
774 | } | |
10a434fc | 775 | } |
0388423d | 776 | #endif |
10a434fc | 777 | } |
9d31d35b | 778 | early_identify_cpu(&boot_cpu_data); |
d7cd5611 | 779 | } |
093af8d7 | 780 | |
b6734c35 | 781 | /* |
366d4a43 BP |
782 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
783 | * unfortunately, that's not true in practice because of early VIA | |
784 | * chips and (more importantly) broken virtualizers that are not easy | |
785 | * to detect. In the latter case it doesn't even *fail* reliably, so | |
786 | * probing for it doesn't even work. Disable it completely on 32-bit | |
ba0593bf | 787 | * unless we can find a reliable way to detect all the broken cases. |
366d4a43 | 788 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
b6734c35 | 789 | */ |
148f9bb8 | 790 | static void detect_nopl(struct cpuinfo_x86 *c) |
b6734c35 | 791 | { |
366d4a43 | 792 | #ifdef CONFIG_X86_32 |
b6734c35 | 793 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
366d4a43 BP |
794 | #else |
795 | set_cpu_cap(c, X86_FEATURE_NOPL); | |
796 | #endif | |
d7cd5611 RR |
797 | } |
798 | ||
148f9bb8 | 799 | static void generic_identify(struct cpuinfo_x86 *c) |
1da177e4 | 800 | { |
aef93c8b | 801 | c->extended_cpuid_level = 0; |
1da177e4 | 802 | |
3da99c97 | 803 | if (!have_cpuid_p()) |
aef93c8b | 804 | identify_cpu_without_cpuid(c); |
1d67953f | 805 | |
aef93c8b | 806 | /* cyrix could have cpuid enabled via c_identify()*/ |
a9853dd6 | 807 | if (!have_cpuid_p()) |
aef93c8b | 808 | return; |
1da177e4 | 809 | |
3da99c97 | 810 | cpu_detect(c); |
1da177e4 | 811 | |
3da99c97 | 812 | get_cpu_vendor(c); |
1da177e4 | 813 | |
3da99c97 | 814 | get_cpu_cap(c); |
1da177e4 | 815 | |
3da99c97 YL |
816 | if (c->cpuid_level >= 0x00000001) { |
817 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | |
b89d3b3e YL |
818 | #ifdef CONFIG_X86_32 |
819 | # ifdef CONFIG_X86_HT | |
cb8cc442 | 820 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
b89d3b3e | 821 | # else |
3da99c97 | 822 | c->apicid = c->initial_apicid; |
b89d3b3e YL |
823 | # endif |
824 | #endif | |
b89d3b3e | 825 | c->phys_proc_id = c->initial_apicid; |
3da99c97 | 826 | } |
1da177e4 | 827 | |
1b05d60d | 828 | get_model_name(c); /* Default name */ |
1da177e4 | 829 | |
3da99c97 | 830 | detect_nopl(c); |
1da177e4 | 831 | } |
1da177e4 LT |
832 | |
833 | /* | |
834 | * This does the hard work of actually picking apart the CPU stuff... | |
835 | */ | |
148f9bb8 | 836 | static void identify_cpu(struct cpuinfo_x86 *c) |
1da177e4 LT |
837 | { |
838 | int i; | |
839 | ||
840 | c->loops_per_jiffy = loops_per_jiffy; | |
841 | c->x86_cache_size = -1; | |
842 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
1da177e4 LT |
843 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
844 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
845 | c->x86_model_id[0] = '\0'; /* Unset */ | |
94605eff | 846 | c->x86_max_cores = 1; |
102bbe3a | 847 | c->x86_coreid_bits = 0; |
11fdd252 | 848 | #ifdef CONFIG_X86_64 |
102bbe3a | 849 | c->x86_clflush_size = 64; |
13c6c532 JB |
850 | c->x86_phys_bits = 36; |
851 | c->x86_virt_bits = 48; | |
102bbe3a YL |
852 | #else |
853 | c->cpuid_level = -1; /* CPUID not detected */ | |
770d132f | 854 | c->x86_clflush_size = 32; |
13c6c532 JB |
855 | c->x86_phys_bits = 32; |
856 | c->x86_virt_bits = 32; | |
102bbe3a YL |
857 | #endif |
858 | c->x86_cache_alignment = c->x86_clflush_size; | |
1da177e4 LT |
859 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
860 | ||
1da177e4 LT |
861 | generic_identify(c); |
862 | ||
3898534d | 863 | if (this_cpu->c_identify) |
1da177e4 LT |
864 | this_cpu->c_identify(c); |
865 | ||
2759c328 YL |
866 | /* Clear/Set all flags overriden by options, after probe */ |
867 | for (i = 0; i < NCAPINTS; i++) { | |
868 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | |
869 | c->x86_capability[i] |= cpu_caps_set[i]; | |
870 | } | |
871 | ||
102bbe3a | 872 | #ifdef CONFIG_X86_64 |
cb8cc442 | 873 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
102bbe3a YL |
874 | #endif |
875 | ||
1da177e4 LT |
876 | /* |
877 | * Vendor-specific initialization. In this section we | |
878 | * canonicalize the feature flags, meaning if there are | |
879 | * features a certain CPU supports which CPUID doesn't | |
880 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
881 | * we handle them here. | |
882 | * | |
883 | * At the end of this section, c->x86_capability better | |
884 | * indicate the features this CPU genuinely supports! | |
885 | */ | |
886 | if (this_cpu->c_init) | |
887 | this_cpu->c_init(c); | |
888 | ||
889 | /* Disable the PN if appropriate */ | |
890 | squash_the_stupid_serial_number(c); | |
891 | ||
b2cc2a07 PA |
892 | /* Set up SMEP/SMAP */ |
893 | setup_smep(c); | |
894 | setup_smap(c); | |
895 | ||
1da177e4 | 896 | /* |
0f3fa48a IM |
897 | * The vendor-specific functions might have changed features. |
898 | * Now we do "generic changes." | |
1da177e4 LT |
899 | */ |
900 | ||
b38b0665 PA |
901 | /* Filter out anything that depends on CPUID levels we don't have */ |
902 | filter_cpuid_features(c, true); | |
903 | ||
1da177e4 | 904 | /* If the model name is still unset, do table lookup. */ |
34048c9e | 905 | if (!c->x86_model_id[0]) { |
02dde8b4 | 906 | const char *p; |
1da177e4 | 907 | p = table_lookup_model(c); |
34048c9e | 908 | if (p) |
1da177e4 LT |
909 | strcpy(c->x86_model_id, p); |
910 | else | |
911 | /* Last resort... */ | |
912 | sprintf(c->x86_model_id, "%02x/%02x", | |
54a20f8c | 913 | c->x86, c->x86_model); |
1da177e4 LT |
914 | } |
915 | ||
102bbe3a YL |
916 | #ifdef CONFIG_X86_64 |
917 | detect_ht(c); | |
918 | #endif | |
919 | ||
88b094fb | 920 | init_hypervisor(c); |
49d859d7 | 921 | x86_init_rdrand(c); |
3e0c3737 YL |
922 | |
923 | /* | |
924 | * Clear/Set all flags overriden by options, need do it | |
925 | * before following smp all cpus cap AND. | |
926 | */ | |
927 | for (i = 0; i < NCAPINTS; i++) { | |
928 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; | |
929 | c->x86_capability[i] |= cpu_caps_set[i]; | |
930 | } | |
931 | ||
1da177e4 LT |
932 | /* |
933 | * On SMP, boot_cpu_data holds the common feature set between | |
934 | * all CPUs; so make sure that we indicate which features are | |
935 | * common between the CPUs. The first time this routine gets | |
936 | * executed, c == &boot_cpu_data. | |
937 | */ | |
34048c9e | 938 | if (c != &boot_cpu_data) { |
1da177e4 | 939 | /* AND the already accumulated flags with these */ |
9d31d35b | 940 | for (i = 0; i < NCAPINTS; i++) |
1da177e4 | 941 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
65fc985b BP |
942 | |
943 | /* OR, i.e. replicate the bug flags */ | |
944 | for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) | |
945 | c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; | |
1da177e4 LT |
946 | } |
947 | ||
948 | /* Init Machine Check Exception if available. */ | |
5e09954a | 949 | mcheck_cpu_init(c); |
30d432df AK |
950 | |
951 | select_idle_routine(c); | |
102bbe3a | 952 | |
de2d9445 | 953 | #ifdef CONFIG_NUMA |
102bbe3a YL |
954 | numa_add_cpu(smp_processor_id()); |
955 | #endif | |
a6c4e076 | 956 | } |
31ab269a | 957 | |
e04d645f GC |
958 | #ifdef CONFIG_X86_64 |
959 | static void vgetcpu_set_mode(void) | |
960 | { | |
961 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) | |
962 | vgetcpu_mode = VGETCPU_RDTSCP; | |
963 | else | |
964 | vgetcpu_mode = VGETCPU_LSL; | |
965 | } | |
cfda7bb9 AL |
966 | |
967 | /* May not be __init: called during resume */ | |
968 | static void syscall32_cpu_init(void) | |
969 | { | |
970 | /* Load these always in case some future AMD CPU supports | |
971 | SYSENTER from compat mode too. */ | |
972 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); | |
973 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); | |
974 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); | |
975 | ||
976 | wrmsrl(MSR_CSTAR, ia32_cstar_target); | |
977 | } | |
978 | #endif | |
979 | ||
980 | #ifdef CONFIG_X86_32 | |
981 | void enable_sep_cpu(void) | |
982 | { | |
983 | int cpu = get_cpu(); | |
984 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
985 | ||
986 | if (!boot_cpu_has(X86_FEATURE_SEP)) { | |
987 | put_cpu(); | |
988 | return; | |
989 | } | |
990 | ||
991 | tss->x86_tss.ss1 = __KERNEL_CS; | |
992 | tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; | |
993 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | |
994 | wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); | |
995 | wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); | |
996 | put_cpu(); | |
997 | } | |
e04d645f GC |
998 | #endif |
999 | ||
a6c4e076 JF |
1000 | void __init identify_boot_cpu(void) |
1001 | { | |
1002 | identify_cpu(&boot_cpu_data); | |
02c68a02 | 1003 | init_amd_e400_c1e_mask(); |
102bbe3a | 1004 | #ifdef CONFIG_X86_32 |
a6c4e076 | 1005 | sysenter_setup(); |
6fe940d6 | 1006 | enable_sep_cpu(); |
e04d645f GC |
1007 | #else |
1008 | vgetcpu_set_mode(); | |
102bbe3a | 1009 | #endif |
5b556332 | 1010 | cpu_detect_tlb(&boot_cpu_data); |
a6c4e076 | 1011 | } |
3b520b23 | 1012 | |
148f9bb8 | 1013 | void identify_secondary_cpu(struct cpuinfo_x86 *c) |
a6c4e076 JF |
1014 | { |
1015 | BUG_ON(c == &boot_cpu_data); | |
1016 | identify_cpu(c); | |
102bbe3a | 1017 | #ifdef CONFIG_X86_32 |
a6c4e076 | 1018 | enable_sep_cpu(); |
102bbe3a | 1019 | #endif |
a6c4e076 | 1020 | mtrr_ap_init(); |
1da177e4 LT |
1021 | } |
1022 | ||
a0854a46 | 1023 | struct msr_range { |
0f3fa48a IM |
1024 | unsigned min; |
1025 | unsigned max; | |
a0854a46 | 1026 | }; |
1da177e4 | 1027 | |
148f9bb8 | 1028 | static const struct msr_range msr_range_array[] = { |
a0854a46 YL |
1029 | { 0x00000000, 0x00000418}, |
1030 | { 0xc0000000, 0xc000040b}, | |
1031 | { 0xc0010000, 0xc0010142}, | |
1032 | { 0xc0011000, 0xc001103b}, | |
1033 | }; | |
1da177e4 | 1034 | |
148f9bb8 | 1035 | static void __print_cpu_msr(void) |
a0854a46 | 1036 | { |
0f3fa48a | 1037 | unsigned index_min, index_max; |
a0854a46 YL |
1038 | unsigned index; |
1039 | u64 val; | |
1040 | int i; | |
a0854a46 YL |
1041 | |
1042 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | |
1043 | index_min = msr_range_array[i].min; | |
1044 | index_max = msr_range_array[i].max; | |
0f3fa48a | 1045 | |
a0854a46 | 1046 | for (index = index_min; index < index_max; index++) { |
ecd431d9 | 1047 | if (rdmsrl_safe(index, &val)) |
a0854a46 YL |
1048 | continue; |
1049 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | |
1da177e4 | 1050 | } |
a0854a46 YL |
1051 | } |
1052 | } | |
94605eff | 1053 | |
148f9bb8 | 1054 | static int show_msr; |
0f3fa48a | 1055 | |
a0854a46 YL |
1056 | static __init int setup_show_msr(char *arg) |
1057 | { | |
1058 | int num; | |
3dd9d514 | 1059 | |
a0854a46 | 1060 | get_option(&arg, &num); |
3dd9d514 | 1061 | |
a0854a46 YL |
1062 | if (num > 0) |
1063 | show_msr = num; | |
1064 | return 1; | |
1da177e4 | 1065 | } |
a0854a46 | 1066 | __setup("show_msr=", setup_show_msr); |
1da177e4 | 1067 | |
191679fd AK |
1068 | static __init int setup_noclflush(char *arg) |
1069 | { | |
840d2830 | 1070 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); |
da4aaa7d | 1071 | setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); |
191679fd AK |
1072 | return 1; |
1073 | } | |
1074 | __setup("noclflush", setup_noclflush); | |
1075 | ||
148f9bb8 | 1076 | void print_cpu_info(struct cpuinfo_x86 *c) |
1da177e4 | 1077 | { |
02dde8b4 | 1078 | const char *vendor = NULL; |
1da177e4 | 1079 | |
0f3fa48a | 1080 | if (c->x86_vendor < X86_VENDOR_NUM) { |
1da177e4 | 1081 | vendor = this_cpu->c_vendor; |
0f3fa48a IM |
1082 | } else { |
1083 | if (c->cpuid_level >= 0) | |
1084 | vendor = c->x86_vendor_id; | |
1085 | } | |
1da177e4 | 1086 | |
bd32a8cf | 1087 | if (vendor && !strstr(c->x86_model_id, vendor)) |
9d31d35b | 1088 | printk(KERN_CONT "%s ", vendor); |
1da177e4 | 1089 | |
9d31d35b | 1090 | if (c->x86_model_id[0]) |
924e101a | 1091 | printk(KERN_CONT "%s", strim(c->x86_model_id)); |
1da177e4 | 1092 | else |
9d31d35b | 1093 | printk(KERN_CONT "%d86", c->x86); |
1da177e4 | 1094 | |
924e101a BP |
1095 | printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); |
1096 | ||
34048c9e | 1097 | if (c->x86_mask || c->cpuid_level >= 0) |
924e101a | 1098 | printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); |
1da177e4 | 1099 | else |
924e101a | 1100 | printk(KERN_CONT ")\n"); |
a0854a46 | 1101 | |
0b8b8078 | 1102 | print_cpu_msr(c); |
21c3fcf3 YL |
1103 | } |
1104 | ||
148f9bb8 | 1105 | void print_cpu_msr(struct cpuinfo_x86 *c) |
21c3fcf3 | 1106 | { |
a0854a46 | 1107 | if (c->cpu_index < show_msr) |
21c3fcf3 | 1108 | __print_cpu_msr(); |
1da177e4 LT |
1109 | } |
1110 | ||
ac72e788 AK |
1111 | static __init int setup_disablecpuid(char *arg) |
1112 | { | |
1113 | int bit; | |
0f3fa48a | 1114 | |
ac72e788 AK |
1115 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
1116 | setup_clear_cpu_cap(bit); | |
1117 | else | |
1118 | return 0; | |
0f3fa48a | 1119 | |
ac72e788 AK |
1120 | return 1; |
1121 | } | |
1122 | __setup("clearcpuid=", setup_disablecpuid); | |
1123 | ||
198d208d SR |
1124 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
1125 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | |
1126 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | |
1127 | ||
d5494d4f | 1128 | #ifdef CONFIG_X86_64 |
9ff80942 | 1129 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
629f4f9d SA |
1130 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, |
1131 | (unsigned long) debug_idt_table }; | |
d5494d4f | 1132 | |
947e76cd | 1133 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
277d5b40 | 1134 | irq_stack_union) __aligned(PAGE_SIZE) __visible; |
0f3fa48a | 1135 | |
bdf977b3 TH |
1136 | /* |
1137 | * The following four percpu variables are hot. Align current_task to | |
1138 | * cacheline size such that all four fall in the same cacheline. | |
1139 | */ | |
1140 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | |
1141 | &init_task; | |
1142 | EXPORT_PER_CPU_SYMBOL(current_task); | |
d5494d4f | 1143 | |
bdf977b3 TH |
1144 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
1145 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | |
1146 | ||
277d5b40 | 1147 | DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; |
d5494d4f | 1148 | |
c2daa3be PZ |
1149 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
1150 | EXPORT_PER_CPU_SYMBOL(__preempt_count); | |
1151 | ||
7e16838d LT |
1152 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); |
1153 | ||
0f3fa48a IM |
1154 | /* |
1155 | * Special IST stacks which the CPU switches to when it calls | |
1156 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | |
1157 | * limit), all of them are 4K, except the debug stack which | |
1158 | * is 8K. | |
1159 | */ | |
1160 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | |
1161 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | |
1162 | [DEBUG_STACK - 1] = DEBUG_STKSZ | |
1163 | }; | |
1164 | ||
92d65b23 | 1165 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
3e352aa8 | 1166 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
d5494d4f | 1167 | |
d5494d4f YL |
1168 | /* May not be marked __init: used by software suspend */ |
1169 | void syscall_init(void) | |
1da177e4 | 1170 | { |
d5494d4f YL |
1171 | /* |
1172 | * LSTAR and STAR live in a bit strange symbiosis. | |
1173 | * They both write to the same internal register. STAR allows to | |
1174 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | |
1175 | */ | |
1176 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | |
1177 | wrmsrl(MSR_LSTAR, system_call); | |
1178 | wrmsrl(MSR_CSTAR, ignore_sysret); | |
03ae5768 | 1179 | |
d5494d4f YL |
1180 | #ifdef CONFIG_IA32_EMULATION |
1181 | syscall32_cpu_init(); | |
1182 | #endif | |
03ae5768 | 1183 | |
d5494d4f YL |
1184 | /* Flags to clear on syscall */ |
1185 | wrmsrl(MSR_SYSCALL_MASK, | |
63bcff2a PA |
1186 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| |
1187 | X86_EFLAGS_IOPL|X86_EFLAGS_AC); | |
1da177e4 | 1188 | } |
62111195 | 1189 | |
d5494d4f YL |
1190 | /* |
1191 | * Copies of the original ist values from the tss are only accessed during | |
1192 | * debugging, no special alignment required. | |
1193 | */ | |
1194 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | |
1195 | ||
228bdaa9 | 1196 | static DEFINE_PER_CPU(unsigned long, debug_stack_addr); |
42181186 | 1197 | DEFINE_PER_CPU(int, debug_stack_usage); |
228bdaa9 SR |
1198 | |
1199 | int is_debug_stack(unsigned long addr) | |
1200 | { | |
42181186 SR |
1201 | return __get_cpu_var(debug_stack_usage) || |
1202 | (addr <= __get_cpu_var(debug_stack_addr) && | |
1203 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); | |
228bdaa9 | 1204 | } |
0f46efeb | 1205 | NOKPROBE_SYMBOL(is_debug_stack); |
228bdaa9 | 1206 | |
629f4f9d | 1207 | DEFINE_PER_CPU(u32, debug_idt_ctr); |
f8988175 | 1208 | |
228bdaa9 SR |
1209 | void debug_stack_set_zero(void) |
1210 | { | |
629f4f9d SA |
1211 | this_cpu_inc(debug_idt_ctr); |
1212 | load_current_idt(); | |
228bdaa9 | 1213 | } |
0f46efeb | 1214 | NOKPROBE_SYMBOL(debug_stack_set_zero); |
228bdaa9 SR |
1215 | |
1216 | void debug_stack_reset(void) | |
1217 | { | |
629f4f9d | 1218 | if (WARN_ON(!this_cpu_read(debug_idt_ctr))) |
f8988175 | 1219 | return; |
629f4f9d SA |
1220 | if (this_cpu_dec_return(debug_idt_ctr) == 0) |
1221 | load_current_idt(); | |
228bdaa9 | 1222 | } |
0f46efeb | 1223 | NOKPROBE_SYMBOL(debug_stack_reset); |
228bdaa9 | 1224 | |
0f3fa48a | 1225 | #else /* CONFIG_X86_64 */ |
d5494d4f | 1226 | |
bdf977b3 TH |
1227 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
1228 | EXPORT_PER_CPU_SYMBOL(current_task); | |
c2daa3be PZ |
1229 | DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; |
1230 | EXPORT_PER_CPU_SYMBOL(__preempt_count); | |
27e74da9 | 1231 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); |
bdf977b3 | 1232 | |
60a5317f | 1233 | #ifdef CONFIG_CC_STACKPROTECTOR |
53f82452 | 1234 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
60a5317f | 1235 | #endif |
d5494d4f | 1236 | |
0f3fa48a | 1237 | #endif /* CONFIG_X86_64 */ |
c5413fbe | 1238 | |
9766cdbc JSR |
1239 | /* |
1240 | * Clear all 6 debug registers: | |
1241 | */ | |
1242 | static void clear_all_debug_regs(void) | |
1243 | { | |
1244 | int i; | |
1245 | ||
1246 | for (i = 0; i < 8; i++) { | |
1247 | /* Ignore db4, db5 */ | |
1248 | if ((i == 4) || (i == 5)) | |
1249 | continue; | |
1250 | ||
1251 | set_debugreg(0, i); | |
1252 | } | |
1253 | } | |
c5413fbe | 1254 | |
0bb9fef9 JW |
1255 | #ifdef CONFIG_KGDB |
1256 | /* | |
1257 | * Restore debug regs if using kgdbwait and you have a kernel debugger | |
1258 | * connection established. | |
1259 | */ | |
1260 | static void dbg_restore_debug_regs(void) | |
1261 | { | |
1262 | if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) | |
1263 | arch_kgdb_ops.correct_hw_break(); | |
1264 | } | |
1265 | #else /* ! CONFIG_KGDB */ | |
1266 | #define dbg_restore_debug_regs() | |
1267 | #endif /* ! CONFIG_KGDB */ | |
1268 | ||
d2cbcc49 RR |
1269 | /* |
1270 | * cpu_init() initializes state that is per-CPU. Some data is already | |
1271 | * initialized (naturally) in the bootstrap process, such as the GDT | |
1272 | * and IDT. We reload them nevertheless, this function acts as a | |
1273 | * 'CPU state barrier', nothing should get across. | |
1ba76586 | 1274 | * A lot of state is already set up in PDA init for 64 bit |
d2cbcc49 | 1275 | */ |
1ba76586 | 1276 | #ifdef CONFIG_X86_64 |
0f3fa48a | 1277 | |
148f9bb8 | 1278 | void cpu_init(void) |
1ba76586 | 1279 | { |
0fe1e009 | 1280 | struct orig_ist *oist; |
1ba76586 | 1281 | struct task_struct *me; |
0f3fa48a IM |
1282 | struct tss_struct *t; |
1283 | unsigned long v; | |
1284 | int cpu; | |
1ba76586 YL |
1285 | int i; |
1286 | ||
e6ebf5de FY |
1287 | /* |
1288 | * Load microcode on this cpu if a valid microcode is available. | |
1289 | * This is early microcode loading procedure. | |
1290 | */ | |
1291 | load_ucode_ap(); | |
1292 | ||
0f3fa48a IM |
1293 | cpu = stack_smp_processor_id(); |
1294 | t = &per_cpu(init_tss, cpu); | |
0fe1e009 | 1295 | oist = &per_cpu(orig_ist, cpu); |
0f3fa48a | 1296 | |
e7a22c1e | 1297 | #ifdef CONFIG_NUMA |
27fd185f | 1298 | if (this_cpu_read(numa_node) == 0 && |
e534c7c5 LS |
1299 | early_cpu_to_node(cpu) != NUMA_NO_NODE) |
1300 | set_numa_node(early_cpu_to_node(cpu)); | |
e7a22c1e | 1301 | #endif |
1ba76586 YL |
1302 | |
1303 | me = current; | |
1304 | ||
c2d1cec1 | 1305 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) |
1ba76586 YL |
1306 | panic("CPU#%d already initialized!\n", cpu); |
1307 | ||
2eaad1fd | 1308 | pr_debug("Initializing CPU#%d\n", cpu); |
1ba76586 YL |
1309 | |
1310 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | |
1311 | ||
1312 | /* | |
1313 | * Initialize the per-CPU GDT with the boot GDT, | |
1314 | * and set up the GDT descriptor: | |
1315 | */ | |
1316 | ||
552be871 | 1317 | switch_to_new_gdt(cpu); |
2697fbd5 BG |
1318 | loadsegment(fs, 0); |
1319 | ||
cf910e83 | 1320 | load_current_idt(); |
1ba76586 YL |
1321 | |
1322 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | |
1323 | syscall_init(); | |
1324 | ||
1325 | wrmsrl(MSR_FS_BASE, 0); | |
1326 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | |
1327 | barrier(); | |
1328 | ||
4763ed4d | 1329 | x86_configure_nx(); |
27fd185f | 1330 | enable_x2apic(); |
1ba76586 YL |
1331 | |
1332 | /* | |
1333 | * set up and load the per-CPU TSS | |
1334 | */ | |
0fe1e009 | 1335 | if (!oist->ist[0]) { |
92d65b23 | 1336 | char *estacks = per_cpu(exception_stacks, cpu); |
0f3fa48a | 1337 | |
1ba76586 | 1338 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
0f3fa48a | 1339 | estacks += exception_stack_sizes[v]; |
0fe1e009 | 1340 | oist->ist[v] = t->x86_tss.ist[v] = |
1ba76586 | 1341 | (unsigned long)estacks; |
228bdaa9 SR |
1342 | if (v == DEBUG_STACK-1) |
1343 | per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; | |
1ba76586 YL |
1344 | } |
1345 | } | |
1346 | ||
1347 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | |
0f3fa48a | 1348 | |
1ba76586 YL |
1349 | /* |
1350 | * <= is required because the CPU will access up to | |
1351 | * 8 bits beyond the end of the IO permission bitmap. | |
1352 | */ | |
1353 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | |
1354 | t->io_bitmap[i] = ~0UL; | |
1355 | ||
1356 | atomic_inc(&init_mm.mm_count); | |
1357 | me->active_mm = &init_mm; | |
8c5dfd25 | 1358 | BUG_ON(me->mm); |
1ba76586 YL |
1359 | enter_lazy_tlb(&init_mm, me); |
1360 | ||
1361 | load_sp0(t, ¤t->thread); | |
1362 | set_tss_desc(cpu, t); | |
1363 | load_TR_desc(); | |
1364 | load_LDT(&init_mm.context); | |
1365 | ||
0bb9fef9 JW |
1366 | clear_all_debug_regs(); |
1367 | dbg_restore_debug_regs(); | |
1ba76586 YL |
1368 | |
1369 | fpu_init(); | |
1370 | ||
1ba76586 YL |
1371 | if (is_uv_system()) |
1372 | uv_cpu_init(); | |
1373 | } | |
1374 | ||
1375 | #else | |
1376 | ||
148f9bb8 | 1377 | void cpu_init(void) |
9ee79a3d | 1378 | { |
d2cbcc49 RR |
1379 | int cpu = smp_processor_id(); |
1380 | struct task_struct *curr = current; | |
34048c9e | 1381 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
9ee79a3d | 1382 | struct thread_struct *thread = &curr->thread; |
62111195 | 1383 | |
e6ebf5de FY |
1384 | show_ucode_info_early(); |
1385 | ||
c2d1cec1 | 1386 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { |
62111195 | 1387 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
9766cdbc JSR |
1388 | for (;;) |
1389 | local_irq_enable(); | |
62111195 JF |
1390 | } |
1391 | ||
1392 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
1393 | ||
1394 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | |
1395 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | |
62111195 | 1396 | |
cf910e83 | 1397 | load_current_idt(); |
552be871 | 1398 | switch_to_new_gdt(cpu); |
1da177e4 | 1399 | |
1da177e4 LT |
1400 | /* |
1401 | * Set up and load the per-CPU TSS and LDT | |
1402 | */ | |
1403 | atomic_inc(&init_mm.mm_count); | |
62111195 | 1404 | curr->active_mm = &init_mm; |
8c5dfd25 | 1405 | BUG_ON(curr->mm); |
62111195 | 1406 | enter_lazy_tlb(&init_mm, curr); |
1da177e4 | 1407 | |
faca6227 | 1408 | load_sp0(t, thread); |
34048c9e | 1409 | set_tss_desc(cpu, t); |
1da177e4 LT |
1410 | load_TR_desc(); |
1411 | load_LDT(&init_mm.context); | |
1412 | ||
f9a196b8 TG |
1413 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1414 | ||
22c4e308 | 1415 | #ifdef CONFIG_DOUBLEFAULT |
1da177e4 LT |
1416 | /* Set up doublefault TSS pointer in the GDT */ |
1417 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | |
22c4e308 | 1418 | #endif |
1da177e4 | 1419 | |
9766cdbc | 1420 | clear_all_debug_regs(); |
0bb9fef9 | 1421 | dbg_restore_debug_regs(); |
1da177e4 | 1422 | |
0e49bf66 | 1423 | fpu_init(); |
1da177e4 | 1424 | } |
1ba76586 | 1425 | #endif |
5700f743 BP |
1426 | |
1427 | #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS | |
1428 | void warn_pre_alternatives(void) | |
1429 | { | |
1430 | WARN(1, "You're using static_cpu_has before alternatives have run!\n"); | |
1431 | } | |
1432 | EXPORT_SYMBOL_GPL(warn_pre_alternatives); | |
1433 | #endif | |
4a90a99c BP |
1434 | |
1435 | inline bool __static_cpu_has_safe(u16 bit) | |
1436 | { | |
1437 | return boot_cpu_has(bit); | |
1438 | } | |
1439 | EXPORT_SYMBOL_GPL(__static_cpu_has_safe); |