x86: cpu/common*.c, merge detect_ht()
[deliverable/linux.git] / arch / x86 / kernel / cpu / common_64.c
CommitLineData
f580366f 1#include <linux/init.h>
0f0124fa
YL
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h>
6#include <linux/bitops.h>
7#include <linux/module.h>
8#include <linux/kgdb.h>
9#include <linux/topology.h>
f580366f
YL
10#include <linux/delay.h>
11#include <linux/smp.h>
f580366f 12#include <linux/percpu.h>
f580366f
YL
13#include <asm/i387.h>
14#include <asm/msr.h>
15#include <asm/io.h>
cbcd79c2 16#include <asm/linkage.h>
f580366f
YL
17#include <asm/mmu_context.h>
18#include <asm/mtrr.h>
19#include <asm/mce.h>
20#include <asm/pat.h>
7e00df58 21#include <asm/asm.h>
f580366f
YL
22#include <asm/numa.h>
23#ifdef CONFIG_X86_LOCAL_APIC
24#include <asm/mpspec.h>
25#include <asm/apic.h>
26#include <mach_apic.h>
f0fc4aff 27#include <asm/genapic.h>
f580366f 28#endif
0f0124fa
YL
29#include <asm/pda.h>
30#include <asm/pgtable.h>
31#include <asm/processor.h>
32#include <asm/desc.h>
33#include <asm/atomic.h>
34#include <asm/proto.h>
35#include <asm/sections.h>
36#include <asm/setup.h>
f580366f
YL
37
38#include "cpu.h"
39
0a488a53
YL
40static struct cpu_dev *this_cpu __cpuinitdata;
41
950ad7ff 42#ifdef CONFIG_X86_64
f580366f
YL
43/* We need valid kernel segments for data and code in long mode too
44 * IRET will check the segment types kkeil 2000/10/28
45 * Also sysret mandates a special GDT layout
46 */
47/* The TLS descriptors are currently at a different place compared to i386.
48 Hopefully nobody expects them at a fixed place (Wine?) */
49DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
50 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
51 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
52 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
53 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
54 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
55 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
56} };
950ad7ff
YL
57#else
58DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
59 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
60 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
61 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
62 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
63 /*
64 * Segments used for calling PnP BIOS have byte granularity.
65 * They code segments and data segments have fixed 64k limits,
66 * the transfer segment sizes are set at run time.
67 */
68 /* 32-bit code */
69 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
70 /* 16-bit code */
71 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
72 /* 16-bit data */
73 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
74 /* 16-bit data */
75 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
76 /* 16-bit data */
77 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
78 /*
79 * The APM segments have byte granularity and their bases
80 * are set at run time. All have 64k limits.
81 */
82 /* 32-bit code */
83 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
84 /* 16-bit code */
85 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
86 /* data */
87 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
88
89 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
90 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
91} };
92#endif
f580366f
YL
93EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
94
ba51dced
YL
95#ifdef CONFIG_X86_32
96static int cachesize_override __cpuinitdata = -1;
97static int disable_x86_serial_nr __cpuinitdata = 1;
98
99static int __init cachesize_setup(char *str)
100{
101 get_option(&str, &cachesize_override);
102 return 1;
103}
104__setup("cachesize=", cachesize_setup);
105
106/*
107 * Naming convention should be: <Name> [(<Codename>)]
108 * This table only is used unless init_<vendor>() below doesn't set it;
109 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
110 *
111 */
112
113/* Look up CPU names by table lookup. */
114static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
115{
116 struct cpu_model_info *info;
117
118 if (c->x86_model >= 16)
119 return NULL; /* Range check */
120
121 if (!this_cpu)
122 return NULL;
123
124 info = this_cpu->c_models;
125
126 while (info && info->family) {
127 if (info->family == c->x86)
128 return info->model_names[c->x86_model];
129 info++;
130 }
131 return NULL; /* Not found */
132}
133
134static int __init x86_fxsr_setup(char *s)
135{
136 setup_clear_cpu_cap(X86_FEATURE_FXSR);
137 setup_clear_cpu_cap(X86_FEATURE_XMM);
138 return 1;
139}
140__setup("nofxsr", x86_fxsr_setup);
141
142static int __init x86_sep_setup(char *s)
143{
144 setup_clear_cpu_cap(X86_FEATURE_SEP);
145 return 1;
146}
147__setup("nosep", x86_sep_setup);
148
149/* Standard macro to see if a specific flag is changeable */
150static inline int flag_is_changeable_p(u32 flag)
151{
152 u32 f1, f2;
153
154 asm("pushfl\n\t"
155 "pushfl\n\t"
156 "popl %0\n\t"
157 "movl %0,%1\n\t"
158 "xorl %2,%0\n\t"
159 "pushl %0\n\t"
160 "popfl\n\t"
161 "pushfl\n\t"
162 "popl %0\n\t"
163 "popfl\n\t"
164 : "=&r" (f1), "=&r" (f2)
165 : "ir" (flag));
166
167 return ((f1^f2) & flag) != 0;
168}
169
170/* Probe for the CPUID instruction */
171static int __cpuinit have_cpuid_p(void)
172{
173 return flag_is_changeable_p(X86_EFLAGS_ID);
174}
175
176static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
177{
178 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
179 /* Disable processor serial number */
180 unsigned long lo, hi;
181 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
182 lo |= 0x200000;
183 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
184 printk(KERN_NOTICE "CPU serial number disabled.\n");
185 clear_cpu_cap(c, X86_FEATURE_PN);
186
187 /* Disabling the serial number may affect the cpuid level */
188 c->cpuid_level = cpuid_eax(0);
189 }
190}
191
192static int __init x86_serial_nr_setup(char *s)
193{
194 disable_x86_serial_nr = 0;
195 return 1;
196}
197__setup("serialnumber", x86_serial_nr_setup);
198#else
199/* Probe for the CPUID instruction */
200static inline int have_cpuid_p(void)
201{
202 return 1;
203}
204#endif
205
f580366f
YL
206__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
207
208/* Current gdt points %fs at the "master" per-cpu area: after this,
209 * it's on the real one. */
210void switch_to_new_gdt(void)
211{
212 struct desc_ptr gdt_descr;
213
214 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
215 gdt_descr.size = GDT_SIZE - 1;
216 load_gdt(&gdt_descr);
fab334c1
YL
217#ifdef CONFIG_X86_32
218 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
219#endif
f580366f
YL
220}
221
10a434fc 222static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
f580366f
YL
223
224static void __cpuinit default_init(struct cpuinfo_x86 *c)
225{
b9e67f00 226#ifdef CONFIG_X86_64
f580366f 227 display_cacheinfo(c);
b9e67f00
YL
228#else
229 /* Not much we can do here... */
230 /* Check if at least it has cpuid */
231 if (c->cpuid_level == -1) {
232 /* No cpuid. It must be an ancient CPU */
233 if (c->x86 == 4)
234 strcpy(c->x86_model_id, "486");
235 else if (c->x86 == 3)
236 strcpy(c->x86_model_id, "386");
237 }
238#endif
f580366f
YL
239}
240
241static struct cpu_dev __cpuinitdata default_cpu = {
242 .c_init = default_init,
243 .c_vendor = "Unknown",
10a434fc 244 .c_x86_vendor = X86_VENDOR_UNKNOWN,
f580366f 245};
f580366f
YL
246
247int __cpuinit get_model_name(struct cpuinfo_x86 *c)
248{
249 unsigned int *v;
01b2e16a 250 char *p, *q;
f580366f
YL
251
252 if (c->extended_cpuid_level < 0x80000004)
253 return 0;
254
255 v = (unsigned int *) c->x86_model_id;
256 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
257 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
258 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
259 c->x86_model_id[48] = 0;
01b2e16a
YL
260
261 /* Intel chips right-justify this string for some dumb reason;
262 undo that brain damage */
263 p = q = &c->x86_model_id[0];
264 while (*p == ' ')
265 p++;
266 if (p != q) {
267 while (*p)
268 *q++ = *p++;
269 while (q <= &c->x86_model_id[48])
270 *q++ = '\0'; /* Zero-pad the rest */
271 }
272
f580366f
YL
273 return 1;
274}
275
276
277void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
278{
0a488a53 279 unsigned int n, dummy, ebx, ecx, edx, l2size;
f580366f
YL
280
281 n = c->extended_cpuid_level;
282
283 if (n >= 0x80000005) {
284 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
9d31d35b
YL
285 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
286 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
f580366f 287 c->x86_cache_size = (ecx>>24) + (edx>>24);
140fc727 288#ifdef CONFIG_X86_64
f580366f
YL
289 /* On K8 L1 TLB is inclusive, so don't count it */
290 c->x86_tlbsize = 0;
140fc727 291#endif
f580366f
YL
292 }
293
0a488a53
YL
294 if (n < 0x80000006) /* Some chips just has a large L1. */
295 return;
f580366f 296
0a488a53
YL
297 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
298 l2size = ecx >> 16;
140fc727
YL
299
300#ifdef CONFIG_X86_64
0a488a53 301 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
140fc727
YL
302#else
303
304 /* do processor-specific cache resizing */
305 if (this_cpu->c_size_cache)
306 l2size = this_cpu->c_size_cache(c, l2size);
307
308 /* Allow user to override all this if necessary. */
309 if (cachesize_override != -1)
310 l2size = cachesize_override;
311
312 if (l2size == 0)
313 return; /* Again, no L2 cache is possible */
314#endif
0a488a53
YL
315
316 c->x86_cache_size = l2size;
317
318 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
319 l2size, ecx & 0xFF);
f580366f
YL
320}
321
322void __cpuinit detect_ht(struct cpuinfo_x86 *c)
323{
97e4db7c 324#ifdef CONFIG_X86_HT
f580366f
YL
325 u32 eax, ebx, ecx, edx;
326 int index_msb, core_bits;
327
f580366f
YL
328 if (!cpu_has(c, X86_FEATURE_HT))
329 return;
330 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
331 goto out;
332
90427638
IM
333 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
334 return;
335
0a488a53
YL
336 cpuid(1, &eax, &ebx, &ecx, &edx);
337
f580366f
YL
338 smp_num_siblings = (ebx & 0xff0000) >> 16;
339
340 if (smp_num_siblings == 1) {
341 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
342 } else if (smp_num_siblings > 1) {
343
344 if (smp_num_siblings > NR_CPUS) {
9d31d35b
YL
345 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
346 smp_num_siblings);
f580366f
YL
347 smp_num_siblings = 1;
348 return;
349 }
350
351 index_msb = get_count_order(smp_num_siblings);
1cd78776 352#ifdef CONFIG_X86_64
f580366f 353 c->phys_proc_id = phys_pkg_id(index_msb);
1cd78776
YL
354#else
355 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
356#endif
f580366f
YL
357
358 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
359
360 index_msb = get_count_order(smp_num_siblings);
361
362 core_bits = get_count_order(c->x86_max_cores);
363
1cd78776 364#ifdef CONFIG_X86_64
f580366f
YL
365 c->cpu_core_id = phys_pkg_id(index_msb) &
366 ((1 << core_bits) - 1);
1cd78776
YL
367#else
368 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
369 ((1 << core_bits) - 1);
370#endif
f580366f 371 }
0a488a53 372
f580366f
YL
373out:
374 if ((c->x86_max_cores * smp_num_siblings) > 1) {
375 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
376 c->phys_proc_id);
377 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
378 c->cpu_core_id);
379 }
f580366f
YL
380#endif
381}
382
383static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
384{
385 char *v = c->x86_vendor_id;
386 int i;
387 static int printed;
388
389 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
390 if (!cpu_devs[i])
391 break;
392
393 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
394 (cpu_devs[i]->c_ident[1] &&
395 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
396 this_cpu = cpu_devs[i];
397 c->x86_vendor = this_cpu->c_x86_vendor;
398 return;
f580366f
YL
399 }
400 }
10a434fc 401
f580366f
YL
402 if (!printed) {
403 printed++;
404 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
405 printk(KERN_ERR "CPU: Your system may be unstable.\n");
406 }
10a434fc 407
f580366f 408 c->x86_vendor = X86_VENDOR_UNKNOWN;
3da99c97 409 this_cpu = &default_cpu;
f580366f
YL
410}
411
3da99c97 412void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
f580366f 413{
f580366f
YL
414 /* Get vendor name */
415 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
416 (unsigned int *)&c->x86_vendor_id[0],
417 (unsigned int *)&c->x86_vendor_id[8],
418 (unsigned int *)&c->x86_vendor_id[4]);
419
9d31d35b 420 c->x86 = 4;
f580366f
YL
421 /* Intel-defined flags: level 0x00000001 */
422 if (c->cpuid_level >= 0x00000001) {
3da99c97
YL
423 u32 junk, tfms, cap0, misc;
424 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
f580366f
YL
425 c->x86 = (tfms >> 8) & 0xf;
426 c->x86_model = (tfms >> 4) & 0xf;
427 c->x86_mask = tfms & 0xf;
428 if (c->x86 == 0xf)
429 c->x86 += (tfms >> 20) & 0xff;
430 if (c->x86 >= 0x6)
9d31d35b
YL
431 c->x86_model += ((tfms >> 16) & 0xf) << 4;
432 if (cap0 & (1<<19)) {
f580366f 433 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b
YL
434 c->x86_cache_alignment = c->x86_clflush_size;
435 }
f580366f 436 }
3da99c97
YL
437}
438
439
440static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
441{
442 u32 tfms, xlvl;
443 u32 ebx;
444
3da99c97
YL
445 /* Intel-defined flags: level 0x00000001 */
446 if (c->cpuid_level >= 0x00000001) {
447 u32 capability, excap;
448
449 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
450 c->x86_capability[0] = capability;
451 c->x86_capability[4] = excap;
452 }
f580366f 453
f580366f
YL
454 /* AMD-defined flags: level 0x80000001 */
455 xlvl = cpuid_eax(0x80000000);
456 c->extended_cpuid_level = xlvl;
457 if ((xlvl & 0xffff0000) == 0x80000000) {
458 if (xlvl >= 0x80000001) {
459 c->x86_capability[1] = cpuid_edx(0x80000001);
460 c->x86_capability[6] = cpuid_ecx(0x80000001);
461 }
f580366f
YL
462 }
463
464 /* Transmeta-defined flags: level 0x80860001 */
465 xlvl = cpuid_eax(0x80860000);
466 if ((xlvl & 0xffff0000) == 0x80860000) {
467 /* Don't set x86_cpuid_level here for now to not confuse. */
468 if (xlvl >= 0x80860001)
469 c->x86_capability[2] = cpuid_edx(0x80860001);
470 }
471
f580366f
YL
472 if (c->extended_cpuid_level >= 0x80000007)
473 c->x86_power = cpuid_edx(0x80000007);
474
87a1c441
YL
475 if (c->extended_cpuid_level >= 0x80000008) {
476 u32 eax = cpuid_eax(0x80000008);
477
478 c->x86_virt_bits = (eax >> 8) & 0xff;
479 c->x86_phys_bits = eax & 0xff;
480 }
f580366f
YL
481}
482
3da99c97
YL
483/* Do some early cpuid on the boot CPU to get some parameter that are
484 needed before check_bugs. Everything advanced is in identify_cpu
485 below. */
486static void __init early_identify_cpu(struct cpuinfo_x86 *c)
f580366f 487{
3da99c97
YL
488
489 c->x86_clflush_size = 64;
490 c->x86_cache_alignment = c->x86_clflush_size;
491
492 memset(&c->x86_capability, 0, sizeof c->x86_capability);
493
494 c->extended_cpuid_level = 0;
495
496 cpu_detect(c);
497
498 get_cpu_vendor(c);
499
500 get_cpu_cap(c);
7e00df58 501
10a434fc
YL
502 if (this_cpu->c_early_init)
503 this_cpu->c_early_init(c);
f580366f
YL
504
505 validate_pat_support(c);
f580366f
YL
506}
507
3da99c97
YL
508void __init early_cpu_init(void)
509{
10a434fc
YL
510 struct cpu_dev **cdev;
511 int count = 0;
f580366f
YL
512
513 printk("KERNEL supported cpus:\n");
10a434fc
YL
514 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
515 struct cpu_dev *cpudev = *cdev;
516 unsigned int j;
3da99c97 517
10a434fc
YL
518 if (count >= X86_VENDOR_NUM)
519 break;
520 cpu_devs[count] = cpudev;
521 count++;
522
f580366f 523 for (j = 0; j < 2; j++) {
10a434fc 524 if (!cpudev->c_ident[j])
f580366f 525 continue;
10a434fc
YL
526 printk(" %s %s\n", cpudev->c_vendor,
527 cpudev->c_ident[j]);
f580366f
YL
528 }
529 }
3da99c97 530
3da99c97 531 early_identify_cpu(&boot_cpu_data);
f580366f
YL
532}
533
7e00df58
PA
534/*
535 * The NOPL instruction is supposed to exist on all CPUs with
536 * family >= 6, unfortunately, that's not true in practice because
537 * of early VIA chips and (more importantly) broken virtualizers that
538 * are not easy to detect. Hence, probe for it based on first
539 * principles.
540 *
541 * Note: no 64-bit chip is known to lack these, but put the code here
542 * for consistency with 32 bits, and to make it utterly trivial to
543 * diagnose the problem should it ever surface.
544 */
545static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
546{
547 const u32 nopl_signature = 0x888c53b1; /* Random number */
548 u32 has_nopl = nopl_signature;
549
550 clear_cpu_cap(c, X86_FEATURE_NOPL);
551 if (c->x86 >= 6) {
552 asm volatile("\n"
553 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
554 "2:\n"
555 " .section .fixup,\"ax\"\n"
556 "3: xor %0,%0\n"
557 " jmp 2b\n"
558 " .previous\n"
559 _ASM_EXTABLE(1b,3b)
560 : "+a" (has_nopl));
561
562 if (has_nopl == nopl_signature)
563 set_cpu_cap(c, X86_FEATURE_NOPL);
564 }
565}
566
3da99c97 567static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
f580366f 568{
f580366f 569 c->extended_cpuid_level = 0;
f580366f 570
3da99c97 571 cpu_detect(c);
f580366f
YL
572
573 get_cpu_vendor(c);
574
3da99c97 575 get_cpu_cap(c);
f580366f
YL
576
577 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
578#ifdef CONFIG_SMP
579 c->phys_proc_id = c->initial_apicid;
580#endif
f580366f 581
3da99c97
YL
582 if (c->extended_cpuid_level >= 0x80000004)
583 get_model_name(c); /* Default name */
87a1c441 584
3da99c97 585 init_scattered_cpuid_features(c);
7e00df58 586 detect_nopl(c);
f580366f
YL
587}
588
589/*
590 * This does the hard work of actually picking apart the CPU stuff...
591 */
9a250347 592static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
f580366f
YL
593{
594 int i;
595
3da99c97
YL
596 c->loops_per_jiffy = loops_per_jiffy;
597 c->x86_cache_size = -1;
598 c->x86_vendor = X86_VENDOR_UNKNOWN;
599 c->x86_model = c->x86_mask = 0; /* So far unknown... */
600 c->x86_vendor_id[0] = '\0'; /* Unset */
601 c->x86_model_id[0] = '\0'; /* Unset */
3da99c97
YL
602 c->x86_max_cores = 1;
603 c->x86_coreid_bits = 0;
0a488a53
YL
604 c->x86_clflush_size = 64;
605 c->x86_cache_alignment = c->x86_clflush_size;
3da99c97 606 memset(&c->x86_capability, 0, sizeof c->x86_capability);
f580366f 607
3da99c97 608 generic_identify(c);
f580366f
YL
609
610 c->apicid = phys_pkg_id(0);
611
612 /*
613 * Vendor-specific initialization. In this section we
614 * canonicalize the feature flags, meaning if there are
615 * features a certain CPU supports which CPUID doesn't
616 * tell us, CPUID claiming incorrect flags, or other bugs,
617 * we handle them here.
618 *
619 * At the end of this section, c->x86_capability better
620 * indicate the features this CPU genuinely supports!
621 */
622 if (this_cpu->c_init)
623 this_cpu->c_init(c);
624
625 detect_ht(c);
626
627 /*
628 * On SMP, boot_cpu_data holds the common feature set between
629 * all CPUs; so make sure that we indicate which features are
630 * common between the CPUs. The first time this routine gets
631 * executed, c == &boot_cpu_data.
632 */
633 if (c != &boot_cpu_data) {
634 /* AND the already accumulated flags with these */
635 for (i = 0; i < NCAPINTS; i++)
636 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
637 }
638
639 /* Clear all flags overriden by options */
640 for (i = 0; i < NCAPINTS; i++)
641 c->x86_capability[i] &= ~cleared_cpu_caps[i];
642
643#ifdef CONFIG_X86_MCE
644 mcheck_init(c);
645#endif
646 select_idle_routine(c);
647
648#ifdef CONFIG_NUMA
649 numa_add_cpu(smp_processor_id());
650#endif
651
652}
653
9d31d35b 654void __init identify_boot_cpu(void)
f580366f
YL
655{
656 identify_cpu(&boot_cpu_data);
657}
658
659void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
660{
661 BUG_ON(c == &boot_cpu_data);
662 identify_cpu(c);
663 mtrr_ap_init();
664}
665
b05f78f5
YL
666struct msr_range {
667 unsigned min;
668 unsigned max;
669};
670
671static struct msr_range msr_range_array[] __cpuinitdata = {
672 { 0x00000000, 0x00000418},
673 { 0xc0000000, 0xc000040b},
674 { 0xc0010000, 0xc0010142},
675 { 0xc0011000, 0xc001103b},
676};
677
678static void __cpuinit print_cpu_msr(void)
679{
680 unsigned index;
681 u64 val;
682 int i;
683 unsigned index_min, index_max;
684
685 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
686 index_min = msr_range_array[i].min;
687 index_max = msr_range_array[i].max;
688 for (index = index_min; index < index_max; index++) {
689 if (rdmsrl_amd_safe(index, &val))
690 continue;
691 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
692 }
693 }
694}
695
696static int show_msr __cpuinitdata;
697static __init int setup_show_msr(char *arg)
698{
699 int num;
700
701 get_option(&arg, &num);
702
703 if (num > 0)
704 show_msr = num;
705 return 1;
706}
707__setup("show_msr=", setup_show_msr);
708
f580366f
YL
709static __init int setup_noclflush(char *arg)
710{
711 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
712 return 1;
713}
714__setup("noclflush", setup_noclflush);
715
716void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
717{
718 if (c->x86_model_id[0])
719 printk(KERN_CONT "%s", c->x86_model_id);
720
721 if (c->x86_mask || c->cpuid_level >= 0)
722 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
723 else
724 printk(KERN_CONT "\n");
b05f78f5
YL
725
726#ifdef CONFIG_SMP
727 if (c->cpu_index < show_msr)
728 print_cpu_msr();
729#else
730 if (show_msr)
731 print_cpu_msr();
732#endif
f580366f
YL
733}
734
735static __init int setup_disablecpuid(char *arg)
736{
737 int bit;
738 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
739 setup_clear_cpu_cap(bit);
740 else
741 return 0;
742 return 1;
743}
744__setup("clearcpuid=", setup_disablecpuid);
0f0124fa 745
0f0124fa
YL
746cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
747
d5494d4f 748#ifdef CONFIG_X86_64
0f0124fa
YL
749struct x8664_pda **_cpu_pda __read_mostly;
750EXPORT_SYMBOL(_cpu_pda);
751
752struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
753
754char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
755
756unsigned long __supported_pte_mask __read_mostly = ~0UL;
757EXPORT_SYMBOL_GPL(__supported_pte_mask);
758
759static int do_not_nx __cpuinitdata;
760
761/* noexec=on|off
762Control non executable mappings for 64bit processes.
763
764on Enable(default)
765off Disable
766*/
767static int __init nonx_setup(char *str)
768{
769 if (!str)
770 return -EINVAL;
771 if (!strncmp(str, "on", 2)) {
772 __supported_pte_mask |= _PAGE_NX;
773 do_not_nx = 0;
774 } else if (!strncmp(str, "off", 3)) {
775 do_not_nx = 1;
776 __supported_pte_mask &= ~_PAGE_NX;
777 }
778 return 0;
779}
780early_param("noexec", nonx_setup);
781
782int force_personality32;
783
784/* noexec32=on|off
785Control non executable heap for 32bit processes.
786To control the stack too use noexec=off
787
788on PROT_READ does not imply PROT_EXEC for 32bit processes (default)
789off PROT_READ implies PROT_EXEC
790*/
791static int __init nonx32_setup(char *str)
792{
793 if (!strcmp(str, "on"))
794 force_personality32 &= ~READ_IMPLIES_EXEC;
795 else if (!strcmp(str, "off"))
796 force_personality32 |= READ_IMPLIES_EXEC;
797 return 1;
798}
799__setup("noexec32=", nonx32_setup);
800
801void pda_init(int cpu)
802{
803 struct x8664_pda *pda = cpu_pda(cpu);
804
805 /* Setup up data that may be needed in __get_free_pages early */
ada85708
JF
806 loadsegment(fs, 0);
807 loadsegment(gs, 0);
0f0124fa
YL
808 /* Memory clobbers used to order PDA accessed */
809 mb();
810 wrmsrl(MSR_GS_BASE, pda);
811 mb();
812
813 pda->cpunumber = cpu;
814 pda->irqcount = -1;
815 pda->kernelstack = (unsigned long)stack_thread_info() -
816 PDA_STACKOFFSET + THREAD_SIZE;
817 pda->active_mm = &init_mm;
818 pda->mmu_state = 0;
819
820 if (cpu == 0) {
821 /* others are initialized in smpboot.c */
822 pda->pcurrent = &init_task;
823 pda->irqstackptr = boot_cpu_stack;
49800efc 824 pda->irqstackptr += IRQSTACKSIZE - 64;
0f0124fa 825 } else {
49800efc
AH
826 if (!pda->irqstackptr) {
827 pda->irqstackptr = (char *)
828 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
829 if (!pda->irqstackptr)
830 panic("cannot allocate irqstack for cpu %d",
831 cpu);
832 pda->irqstackptr += IRQSTACKSIZE - 64;
833 }
0f0124fa
YL
834
835 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
836 pda->nodenumber = cpu_to_node(cpu);
837 }
0f0124fa
YL
838}
839
840char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
cbcd79c2 841 DEBUG_STKSZ] __page_aligned_bss;
0f0124fa
YL
842
843extern asmlinkage void ignore_sysret(void);
844
845/* May not be marked __init: used by software suspend */
846void syscall_init(void)
847{
848 /*
849 * LSTAR and STAR live in a bit strange symbiosis.
850 * They both write to the same internal register. STAR allows to
851 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
852 */
853 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
854 wrmsrl(MSR_LSTAR, system_call);
855 wrmsrl(MSR_CSTAR, ignore_sysret);
856
857#ifdef CONFIG_IA32_EMULATION
858 syscall32_cpu_init();
859#endif
860
861 /* Flags to clear on syscall */
862 wrmsrl(MSR_SYSCALL_MASK,
863 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
864}
865
866void __cpuinit check_efer(void)
867{
868 unsigned long efer;
869
870 rdmsrl(MSR_EFER, efer);
871 if (!(efer & EFER_NX) || do_not_nx)
872 __supported_pte_mask &= ~_PAGE_NX;
873}
874
875unsigned long kernel_eflags;
876
877/*
878 * Copies of the original ist values from the tss are only accessed during
879 * debugging, no special alignment required.
880 */
881DEFINE_PER_CPU(struct orig_ist, orig_ist);
882
d5494d4f
YL
883#else
884
885/* Make sure %fs is initialized properly in idle threads */
886struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
887{
888 memset(regs, 0, sizeof(struct pt_regs));
889 regs->fs = __KERNEL_PERCPU;
890 return regs;
891}
892#endif
893
0f0124fa
YL
894/*
895 * cpu_init() initializes state that is per-CPU. Some data is already
896 * initialized (naturally) in the bootstrap process, such as the GDT
897 * and IDT. We reload them nevertheless, this function acts as a
898 * 'CPU state barrier', nothing should get across.
1ba76586 899 * A lot of state is already set up in PDA init for 64 bit
0f0124fa 900 */
1ba76586 901#ifdef CONFIG_X86_64
0f0124fa
YL
902void __cpuinit cpu_init(void)
903{
904 int cpu = stack_smp_processor_id();
905 struct tss_struct *t = &per_cpu(init_tss, cpu);
906 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
907 unsigned long v;
908 char *estacks = NULL;
909 struct task_struct *me;
910 int i;
911
912 /* CPU 0 is initialised in head64.c */
913 if (cpu != 0)
914 pda_init(cpu);
915 else
916 estacks = boot_exception_stacks;
917
918 me = current;
919
920 if (cpu_test_and_set(cpu, cpu_initialized))
921 panic("CPU#%d already initialized!\n", cpu);
922
923 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
924
925 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
926
927 /*
928 * Initialize the per-CPU GDT with the boot GDT,
929 * and set up the GDT descriptor:
930 */
931
932 switch_to_new_gdt();
933 load_idt((const struct desc_ptr *)&idt_descr);
934
935 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
936 syscall_init();
937
938 wrmsrl(MSR_FS_BASE, 0);
939 wrmsrl(MSR_KERNEL_GS_BASE, 0);
940 barrier();
941
942 check_efer();
6e1cb38a
SS
943 if (cpu != 0 && x2apic)
944 enable_x2apic();
0f0124fa
YL
945
946 /*
947 * set up and load the per-CPU TSS
948 */
b55793f7 949 if (!orig_ist->ist[0]) {
0f0124fa 950 static const unsigned int order[N_EXCEPTION_STACKS] = {
b55793f7
AH
951 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
952 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
0f0124fa 953 };
b55793f7
AH
954 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
955 if (cpu) {
956 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
957 if (!estacks)
958 panic("Cannot allocate exception "
959 "stack %ld %d\n", v, cpu);
960 }
961 estacks += PAGE_SIZE << order[v];
962 orig_ist->ist[v] = t->x86_tss.ist[v] =
963 (unsigned long)estacks;
0f0124fa 964 }
0f0124fa
YL
965 }
966
967 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
968 /*
969 * <= is required because the CPU will access up to
970 * 8 bits beyond the end of the IO permission bitmap.
971 */
972 for (i = 0; i <= IO_BITMAP_LONGS; i++)
973 t->io_bitmap[i] = ~0UL;
974
975 atomic_inc(&init_mm.mm_count);
976 me->active_mm = &init_mm;
977 if (me->mm)
978 BUG();
979 enter_lazy_tlb(&init_mm, me);
980
981 load_sp0(t, &current->thread);
982 set_tss_desc(cpu, t);
983 load_TR_desc();
984 load_LDT(&init_mm.context);
985
986#ifdef CONFIG_KGDB
987 /*
988 * If the kgdb is connected no debug regs should be altered. This
989 * is only applicable when KGDB and a KGDB I/O module are built
990 * into the kernel and you are using early debugging with
991 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
992 */
993 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
994 arch_kgdb_ops.correct_hw_break();
995 else {
996#endif
997 /*
998 * Clear all 6 debug registers:
999 */
1000
1001 set_debugreg(0UL, 0);
1002 set_debugreg(0UL, 1);
1003 set_debugreg(0UL, 2);
1004 set_debugreg(0UL, 3);
1005 set_debugreg(0UL, 6);
1006 set_debugreg(0UL, 7);
1007#ifdef CONFIG_KGDB
1008 /* If the kgdb is connected no debug regs should be altered. */
1009 }
1010#endif
1011
1012 fpu_init();
1013
1014 raw_local_save_flags(kernel_eflags);
1015
1016 if (is_uv_system())
1017 uv_cpu_init();
1018}
1ba76586
YL
1019
1020#else
1021
1022void __cpuinit cpu_init(void)
1023{
1024 int cpu = smp_processor_id();
1025 struct task_struct *curr = current;
1026 struct tss_struct *t = &per_cpu(init_tss, cpu);
1027 struct thread_struct *thread = &curr->thread;
1028
1029 if (cpu_test_and_set(cpu, cpu_initialized)) {
1030 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1031 for (;;) local_irq_enable();
1032 }
1033
1034 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1035
1036 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1037 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1038
1039 load_idt(&idt_descr);
1040 switch_to_new_gdt();
1041
1042 /*
1043 * Set up and load the per-CPU TSS and LDT
1044 */
1045 atomic_inc(&init_mm.mm_count);
1046 curr->active_mm = &init_mm;
1047 if (curr->mm)
1048 BUG();
1049 enter_lazy_tlb(&init_mm, curr);
1050
1051 load_sp0(t, thread);
1052 set_tss_desc(cpu, t);
1053 load_TR_desc();
1054 load_LDT(&init_mm.context);
1055
1056#ifdef CONFIG_DOUBLEFAULT
1057 /* Set up doublefault TSS pointer in the GDT */
1058 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1059#endif
1060
1061 /* Clear %gs. */
1062 asm volatile ("mov %0, %%gs" : : "r" (0));
1063
1064 /* Clear all 6 debug registers: */
1065 set_debugreg(0, 0);
1066 set_debugreg(0, 1);
1067 set_debugreg(0, 2);
1068 set_debugreg(0, 3);
1069 set_debugreg(0, 6);
1070 set_debugreg(0, 7);
1071
1072 /*
1073 * Force FPU initialization:
1074 */
1075 if (cpu_has_xsave)
1076 current_thread_info()->status = TS_XSAVE;
1077 else
1078 current_thread_info()->status = 0;
1079 clear_used_math();
1080 mxcsr_feature_mask_init();
1081
1082 /*
1083 * Boot processor to setup the FP and extended state context info.
1084 */
1085 if (!smp_processor_id())
1086 init_thread_xstate();
1087
1088 xsave_init();
1089}
1090
1091#ifdef CONFIG_HOTPLUG_CPU
1092void __cpuinit cpu_uninit(void)
1093{
1094 int cpu = raw_smp_processor_id();
1095 cpu_clear(cpu, cpu_initialized);
1096
1097 /* lazy TLB state */
1098 per_cpu(cpu_tlbstate, cpu).state = 0;
1099 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
1100}
1101#endif
1102
1103#endif
This page took 0.173688 seconds and 5 git commands to generate.