x86: make get_mode_name of 64bit the same as 32bit
[deliverable/linux.git] / arch / x86 / kernel / cpu / common_64.c
CommitLineData
f580366f 1#include <linux/init.h>
0f0124fa
YL
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h>
6#include <linux/bitops.h>
7#include <linux/module.h>
8#include <linux/kgdb.h>
9#include <linux/topology.h>
f580366f
YL
10#include <linux/delay.h>
11#include <linux/smp.h>
f580366f 12#include <linux/percpu.h>
f580366f
YL
13#include <asm/i387.h>
14#include <asm/msr.h>
15#include <asm/io.h>
cbcd79c2 16#include <asm/linkage.h>
f580366f
YL
17#include <asm/mmu_context.h>
18#include <asm/mtrr.h>
19#include <asm/mce.h>
20#include <asm/pat.h>
7e00df58 21#include <asm/asm.h>
f580366f
YL
22#include <asm/numa.h>
23#ifdef CONFIG_X86_LOCAL_APIC
24#include <asm/mpspec.h>
25#include <asm/apic.h>
26#include <mach_apic.h>
27#endif
0f0124fa
YL
28#include <asm/pda.h>
29#include <asm/pgtable.h>
30#include <asm/processor.h>
31#include <asm/desc.h>
32#include <asm/atomic.h>
33#include <asm/proto.h>
34#include <asm/sections.h>
35#include <asm/setup.h>
36#include <asm/genapic.h>
f580366f
YL
37
38#include "cpu.h"
39
40/* We need valid kernel segments for data and code in long mode too
41 * IRET will check the segment types kkeil 2000/10/28
42 * Also sysret mandates a special GDT layout
43 */
44/* The TLS descriptors are currently at a different place compared to i386.
45 Hopefully nobody expects them at a fixed place (Wine?) */
46DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
47 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
48 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
49 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
50 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
51 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
52 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
53} };
54EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
55
56__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
57
58/* Current gdt points %fs at the "master" per-cpu area: after this,
59 * it's on the real one. */
60void switch_to_new_gdt(void)
61{
62 struct desc_ptr gdt_descr;
63
64 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
65 gdt_descr.size = GDT_SIZE - 1;
66 load_gdt(&gdt_descr);
67}
68
10a434fc 69static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
f580366f
YL
70
71static void __cpuinit default_init(struct cpuinfo_x86 *c)
72{
73 display_cacheinfo(c);
74}
75
76static struct cpu_dev __cpuinitdata default_cpu = {
77 .c_init = default_init,
78 .c_vendor = "Unknown",
10a434fc 79 .c_x86_vendor = X86_VENDOR_UNKNOWN,
f580366f 80};
10a434fc 81static struct cpu_dev *this_cpu __cpuinitdata;
f580366f
YL
82
83int __cpuinit get_model_name(struct cpuinfo_x86 *c)
84{
85 unsigned int *v;
01b2e16a 86 char *p, *q;
f580366f
YL
87
88 if (c->extended_cpuid_level < 0x80000004)
89 return 0;
90
91 v = (unsigned int *) c->x86_model_id;
92 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
93 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
94 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
95 c->x86_model_id[48] = 0;
01b2e16a
YL
96
97 /* Intel chips right-justify this string for some dumb reason;
98 undo that brain damage */
99 p = q = &c->x86_model_id[0];
100 while (*p == ' ')
101 p++;
102 if (p != q) {
103 while (*p)
104 *q++ = *p++;
105 while (q <= &c->x86_model_id[48])
106 *q++ = '\0'; /* Zero-pad the rest */
107 }
108
f580366f
YL
109 return 1;
110}
111
112
113void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
114{
87a1c441 115 unsigned int n, dummy, ebx, ecx, edx;
f580366f
YL
116
117 n = c->extended_cpuid_level;
118
119 if (n >= 0x80000005) {
120 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
9d31d35b
YL
121 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
122 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
f580366f
YL
123 c->x86_cache_size = (ecx>>24) + (edx>>24);
124 /* On K8 L1 TLB is inclusive, so don't count it */
125 c->x86_tlbsize = 0;
126 }
127
128 if (n >= 0x80000006) {
129 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
130 ecx = cpuid_ecx(0x80000006);
131 c->x86_cache_size = ecx >> 16;
132 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
133
134 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
135 c->x86_cache_size, ecx & 0xFF);
136 }
f580366f
YL
137}
138
139void __cpuinit detect_ht(struct cpuinfo_x86 *c)
140{
141#ifdef CONFIG_SMP
142 u32 eax, ebx, ecx, edx;
143 int index_msb, core_bits;
144
145 cpuid(1, &eax, &ebx, &ecx, &edx);
146
147
148 if (!cpu_has(c, X86_FEATURE_HT))
149 return;
150 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
151 goto out;
152
153 smp_num_siblings = (ebx & 0xff0000) >> 16;
154
155 if (smp_num_siblings == 1) {
156 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
157 } else if (smp_num_siblings > 1) {
158
159 if (smp_num_siblings > NR_CPUS) {
9d31d35b
YL
160 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
161 smp_num_siblings);
f580366f
YL
162 smp_num_siblings = 1;
163 return;
164 }
165
166 index_msb = get_count_order(smp_num_siblings);
167 c->phys_proc_id = phys_pkg_id(index_msb);
168
169 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
170
171 index_msb = get_count_order(smp_num_siblings);
172
173 core_bits = get_count_order(c->x86_max_cores);
174
175 c->cpu_core_id = phys_pkg_id(index_msb) &
176 ((1 << core_bits) - 1);
177 }
178out:
179 if ((c->x86_max_cores * smp_num_siblings) > 1) {
180 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
181 c->phys_proc_id);
182 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
183 c->cpu_core_id);
184 }
185
186#endif
187}
188
189static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
190{
191 char *v = c->x86_vendor_id;
192 int i;
193 static int printed;
194
195 for (i = 0; i < X86_VENDOR_NUM; i++) {
10a434fc
YL
196 if (!cpu_devs[i])
197 break;
198
199 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
200 (cpu_devs[i]->c_ident[1] &&
201 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
202 this_cpu = cpu_devs[i];
203 c->x86_vendor = this_cpu->c_x86_vendor;
204 return;
f580366f
YL
205 }
206 }
10a434fc 207
f580366f
YL
208 if (!printed) {
209 printed++;
210 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
211 printk(KERN_ERR "CPU: Your system may be unstable.\n");
212 }
10a434fc 213
f580366f 214 c->x86_vendor = X86_VENDOR_UNKNOWN;
3da99c97 215 this_cpu = &default_cpu;
f580366f
YL
216}
217
3da99c97 218void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
f580366f 219{
f580366f
YL
220 /* Get vendor name */
221 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
222 (unsigned int *)&c->x86_vendor_id[0],
223 (unsigned int *)&c->x86_vendor_id[8],
224 (unsigned int *)&c->x86_vendor_id[4]);
225
9d31d35b 226 c->x86 = 4;
f580366f
YL
227 /* Intel-defined flags: level 0x00000001 */
228 if (c->cpuid_level >= 0x00000001) {
3da99c97
YL
229 u32 junk, tfms, cap0, misc;
230 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
f580366f
YL
231 c->x86 = (tfms >> 8) & 0xf;
232 c->x86_model = (tfms >> 4) & 0xf;
233 c->x86_mask = tfms & 0xf;
234 if (c->x86 == 0xf)
235 c->x86 += (tfms >> 20) & 0xff;
236 if (c->x86 >= 0x6)
9d31d35b
YL
237 c->x86_model += ((tfms >> 16) & 0xf) << 4;
238 if (cap0 & (1<<19)) {
f580366f 239 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9d31d35b
YL
240 c->x86_cache_alignment = c->x86_clflush_size;
241 }
f580366f 242 }
3da99c97
YL
243}
244
245
246static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
247{
248 u32 tfms, xlvl;
249 u32 ebx;
250
3da99c97
YL
251 /* Intel-defined flags: level 0x00000001 */
252 if (c->cpuid_level >= 0x00000001) {
253 u32 capability, excap;
254
255 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
256 c->x86_capability[0] = capability;
257 c->x86_capability[4] = excap;
258 }
f580366f 259
f580366f
YL
260 /* AMD-defined flags: level 0x80000001 */
261 xlvl = cpuid_eax(0x80000000);
262 c->extended_cpuid_level = xlvl;
263 if ((xlvl & 0xffff0000) == 0x80000000) {
264 if (xlvl >= 0x80000001) {
265 c->x86_capability[1] = cpuid_edx(0x80000001);
266 c->x86_capability[6] = cpuid_ecx(0x80000001);
267 }
f580366f
YL
268 }
269
270 /* Transmeta-defined flags: level 0x80860001 */
271 xlvl = cpuid_eax(0x80860000);
272 if ((xlvl & 0xffff0000) == 0x80860000) {
273 /* Don't set x86_cpuid_level here for now to not confuse. */
274 if (xlvl >= 0x80860001)
275 c->x86_capability[2] = cpuid_edx(0x80860001);
276 }
277
f580366f
YL
278 if (c->extended_cpuid_level >= 0x80000007)
279 c->x86_power = cpuid_edx(0x80000007);
280
87a1c441
YL
281 if (c->extended_cpuid_level >= 0x80000008) {
282 u32 eax = cpuid_eax(0x80000008);
283
284 c->x86_virt_bits = (eax >> 8) & 0xff;
285 c->x86_phys_bits = eax & 0xff;
286 }
3da99c97 287}
87a1c441 288
3da99c97
YL
289/* Do some early cpuid on the boot CPU to get some parameter that are
290 needed before check_bugs. Everything advanced is in identify_cpu
291 below. */
292static void __init early_identify_cpu(struct cpuinfo_x86 *c)
293{
294
295 c->x86_clflush_size = 64;
296 c->x86_cache_alignment = c->x86_clflush_size;
297
298 memset(&c->x86_capability, 0, sizeof c->x86_capability);
299
300 c->extended_cpuid_level = 0;
301
302 cpu_detect(c);
303
304 get_cpu_vendor(c);
305
306 get_cpu_cap(c);
7e00df58 307
10a434fc
YL
308 if (this_cpu->c_early_init)
309 this_cpu->c_early_init(c);
f580366f
YL
310
311 validate_pat_support(c);
f580366f
YL
312}
313
3da99c97
YL
314void __init early_cpu_init(void)
315{
10a434fc
YL
316 struct cpu_dev **cdev;
317 int count = 0;
318
319 printk("KERNEL supported cpus:\n");
320 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
321 struct cpu_dev *cpudev = *cdev;
322 unsigned int j;
3da99c97 323
10a434fc
YL
324 if (count >= X86_VENDOR_NUM)
325 break;
326 cpu_devs[count] = cpudev;
327 count++;
328
329 for (j = 0; j < 2; j++) {
330 if (!cpudev->c_ident[j])
331 continue;
332 printk(" %s %s\n", cpudev->c_vendor,
333 cpudev->c_ident[j]);
334 }
335 }
3da99c97 336
3da99c97
YL
337 early_identify_cpu(&boot_cpu_data);
338}
339
9d31d35b
YL
340/*
341 * The NOPL instruction is supposed to exist on all CPUs with
342 * family >= 6, unfortunately, that's not true in practice because
343 * of early VIA chips and (more importantly) broken virtualizers that
344 * are not easy to detect. Hence, probe for it based on first
345 * principles.
346 *
347 * Note: no 64-bit chip is known to lack these, but put the code here
348 * for consistency with 32 bits, and to make it utterly trivial to
349 * diagnose the problem should it ever surface.
350 */
351static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
352{
353 const u32 nopl_signature = 0x888c53b1; /* Random number */
354 u32 has_nopl = nopl_signature;
355
356 clear_cpu_cap(c, X86_FEATURE_NOPL);
357 if (c->x86 >= 6) {
358 asm volatile("\n"
359 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
360 "2:\n"
361 " .section .fixup,\"ax\"\n"
362 "3: xor %0,%0\n"
363 " jmp 2b\n"
364 " .previous\n"
365 _ASM_EXTABLE(1b,3b)
366 : "+a" (has_nopl));
367
368 if (has_nopl == nopl_signature)
369 set_cpu_cap(c, X86_FEATURE_NOPL);
370 }
371}
372
3da99c97
YL
373static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
374{
375 c->extended_cpuid_level = 0;
376
377 cpu_detect(c);
378
379 get_cpu_vendor(c);
380
381 get_cpu_cap(c);
382
383 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
384#ifdef CONFIG_SMP
385 c->phys_proc_id = c->initial_apicid;
386#endif
387
388 if (c->extended_cpuid_level >= 0x80000004)
389 get_model_name(c); /* Default name */
390
391 init_scattered_cpuid_features(c);
392 detect_nopl(c);
393}
394
f580366f
YL
395/*
396 * This does the hard work of actually picking apart the CPU stuff...
397 */
9a250347 398static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
f580366f
YL
399{
400 int i;
401
3da99c97
YL
402 c->loops_per_jiffy = loops_per_jiffy;
403 c->x86_cache_size = -1;
404 c->x86_vendor = X86_VENDOR_UNKNOWN;
405 c->x86_model = c->x86_mask = 0; /* So far unknown... */
406 c->x86_vendor_id[0] = '\0'; /* Unset */
407 c->x86_model_id[0] = '\0'; /* Unset */
408 c->x86_clflush_size = 64;
409 c->x86_cache_alignment = c->x86_clflush_size;
410 c->x86_max_cores = 1;
411 c->x86_coreid_bits = 0;
412 memset(&c->x86_capability, 0, sizeof c->x86_capability);
f580366f 413
3da99c97 414 generic_identify(c);
f580366f
YL
415
416 c->apicid = phys_pkg_id(0);
417
418 /*
419 * Vendor-specific initialization. In this section we
420 * canonicalize the feature flags, meaning if there are
421 * features a certain CPU supports which CPUID doesn't
422 * tell us, CPUID claiming incorrect flags, or other bugs,
423 * we handle them here.
424 *
425 * At the end of this section, c->x86_capability better
426 * indicate the features this CPU genuinely supports!
427 */
428 if (this_cpu->c_init)
429 this_cpu->c_init(c);
430
431 detect_ht(c);
432
433 /*
434 * On SMP, boot_cpu_data holds the common feature set between
435 * all CPUs; so make sure that we indicate which features are
436 * common between the CPUs. The first time this routine gets
437 * executed, c == &boot_cpu_data.
438 */
439 if (c != &boot_cpu_data) {
440 /* AND the already accumulated flags with these */
441 for (i = 0; i < NCAPINTS; i++)
442 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
443 }
444
445 /* Clear all flags overriden by options */
446 for (i = 0; i < NCAPINTS; i++)
447 c->x86_capability[i] &= ~cleared_cpu_caps[i];
448
449#ifdef CONFIG_X86_MCE
450 mcheck_init(c);
451#endif
452 select_idle_routine(c);
453
454#ifdef CONFIG_NUMA
455 numa_add_cpu(smp_processor_id());
456#endif
457
458}
459
9d31d35b 460void __init identify_boot_cpu(void)
f580366f
YL
461{
462 identify_cpu(&boot_cpu_data);
463}
464
465void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
466{
467 BUG_ON(c == &boot_cpu_data);
468 identify_cpu(c);
469 mtrr_ap_init();
470}
471
b05f78f5
YL
472struct msr_range {
473 unsigned min;
474 unsigned max;
475};
476
477static struct msr_range msr_range_array[] __cpuinitdata = {
478 { 0x00000000, 0x00000418},
479 { 0xc0000000, 0xc000040b},
480 { 0xc0010000, 0xc0010142},
481 { 0xc0011000, 0xc001103b},
482};
483
484static void __cpuinit print_cpu_msr(void)
485{
486 unsigned index;
487 u64 val;
488 int i;
489 unsigned index_min, index_max;
490
491 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
492 index_min = msr_range_array[i].min;
493 index_max = msr_range_array[i].max;
494 for (index = index_min; index < index_max; index++) {
495 if (rdmsrl_amd_safe(index, &val))
496 continue;
497 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
498 }
499 }
500}
501
502static int show_msr __cpuinitdata;
503static __init int setup_show_msr(char *arg)
504{
505 int num;
506
507 get_option(&arg, &num);
508
509 if (num > 0)
510 show_msr = num;
511 return 1;
512}
513__setup("show_msr=", setup_show_msr);
514
9d31d35b
YL
515static __init int setup_noclflush(char *arg)
516{
517 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
518 return 1;
519}
520__setup("noclflush", setup_noclflush);
521
f580366f
YL
522void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
523{
524 if (c->x86_model_id[0])
525 printk(KERN_CONT "%s", c->x86_model_id);
526
527 if (c->x86_mask || c->cpuid_level >= 0)
528 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
529 else
530 printk(KERN_CONT "\n");
b05f78f5
YL
531
532#ifdef CONFIG_SMP
533 if (c->cpu_index < show_msr)
534 print_cpu_msr();
535#else
536 if (show_msr)
537 print_cpu_msr();
538#endif
f580366f
YL
539}
540
541static __init int setup_disablecpuid(char *arg)
542{
543 int bit;
544 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
545 setup_clear_cpu_cap(bit);
546 else
547 return 0;
548 return 1;
549}
550__setup("clearcpuid=", setup_disablecpuid);
0f0124fa 551
0f0124fa
YL
552cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
553
554struct x8664_pda **_cpu_pda __read_mostly;
555EXPORT_SYMBOL(_cpu_pda);
556
557struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
558
559char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
560
561unsigned long __supported_pte_mask __read_mostly = ~0UL;
562EXPORT_SYMBOL_GPL(__supported_pte_mask);
563
564static int do_not_nx __cpuinitdata;
565
566/* noexec=on|off
567Control non executable mappings for 64bit processes.
568
569on Enable(default)
570off Disable
571*/
572static int __init nonx_setup(char *str)
573{
574 if (!str)
575 return -EINVAL;
576 if (!strncmp(str, "on", 2)) {
577 __supported_pte_mask |= _PAGE_NX;
578 do_not_nx = 0;
579 } else if (!strncmp(str, "off", 3)) {
580 do_not_nx = 1;
581 __supported_pte_mask &= ~_PAGE_NX;
582 }
583 return 0;
584}
585early_param("noexec", nonx_setup);
586
587int force_personality32;
588
589/* noexec32=on|off
590Control non executable heap for 32bit processes.
591To control the stack too use noexec=off
592
593on PROT_READ does not imply PROT_EXEC for 32bit processes (default)
594off PROT_READ implies PROT_EXEC
595*/
596static int __init nonx32_setup(char *str)
597{
598 if (!strcmp(str, "on"))
599 force_personality32 &= ~READ_IMPLIES_EXEC;
600 else if (!strcmp(str, "off"))
601 force_personality32 |= READ_IMPLIES_EXEC;
602 return 1;
603}
604__setup("noexec32=", nonx32_setup);
605
606void pda_init(int cpu)
607{
608 struct x8664_pda *pda = cpu_pda(cpu);
609
610 /* Setup up data that may be needed in __get_free_pages early */
ada85708
JF
611 loadsegment(fs, 0);
612 loadsegment(gs, 0);
0f0124fa
YL
613 /* Memory clobbers used to order PDA accessed */
614 mb();
615 wrmsrl(MSR_GS_BASE, pda);
616 mb();
617
618 pda->cpunumber = cpu;
619 pda->irqcount = -1;
620 pda->kernelstack = (unsigned long)stack_thread_info() -
621 PDA_STACKOFFSET + THREAD_SIZE;
622 pda->active_mm = &init_mm;
623 pda->mmu_state = 0;
624
625 if (cpu == 0) {
626 /* others are initialized in smpboot.c */
627 pda->pcurrent = &init_task;
628 pda->irqstackptr = boot_cpu_stack;
629 } else {
630 pda->irqstackptr = (char *)
631 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
632 if (!pda->irqstackptr)
633 panic("cannot allocate irqstack for cpu %d", cpu);
634
635 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
636 pda->nodenumber = cpu_to_node(cpu);
637 }
638
639 pda->irqstackptr += IRQSTACKSIZE-64;
640}
641
642char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
cbcd79c2 643 DEBUG_STKSZ] __page_aligned_bss;
0f0124fa
YL
644
645extern asmlinkage void ignore_sysret(void);
646
647/* May not be marked __init: used by software suspend */
648void syscall_init(void)
649{
650 /*
651 * LSTAR and STAR live in a bit strange symbiosis.
652 * They both write to the same internal register. STAR allows to
653 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
654 */
655 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
656 wrmsrl(MSR_LSTAR, system_call);
657 wrmsrl(MSR_CSTAR, ignore_sysret);
658
659#ifdef CONFIG_IA32_EMULATION
660 syscall32_cpu_init();
661#endif
662
663 /* Flags to clear on syscall */
664 wrmsrl(MSR_SYSCALL_MASK,
665 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
666}
667
668void __cpuinit check_efer(void)
669{
670 unsigned long efer;
671
672 rdmsrl(MSR_EFER, efer);
673 if (!(efer & EFER_NX) || do_not_nx)
674 __supported_pte_mask &= ~_PAGE_NX;
675}
676
677unsigned long kernel_eflags;
678
679/*
680 * Copies of the original ist values from the tss are only accessed during
681 * debugging, no special alignment required.
682 */
683DEFINE_PER_CPU(struct orig_ist, orig_ist);
684
685/*
686 * cpu_init() initializes state that is per-CPU. Some data is already
687 * initialized (naturally) in the bootstrap process, such as the GDT
688 * and IDT. We reload them nevertheless, this function acts as a
689 * 'CPU state barrier', nothing should get across.
690 * A lot of state is already set up in PDA init.
691 */
692void __cpuinit cpu_init(void)
693{
694 int cpu = stack_smp_processor_id();
695 struct tss_struct *t = &per_cpu(init_tss, cpu);
696 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
697 unsigned long v;
698 char *estacks = NULL;
699 struct task_struct *me;
700 int i;
701
702 /* CPU 0 is initialised in head64.c */
703 if (cpu != 0)
704 pda_init(cpu);
705 else
706 estacks = boot_exception_stacks;
707
708 me = current;
709
710 if (cpu_test_and_set(cpu, cpu_initialized))
711 panic("CPU#%d already initialized!\n", cpu);
712
713 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
714
715 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
716
717 /*
718 * Initialize the per-CPU GDT with the boot GDT,
719 * and set up the GDT descriptor:
720 */
721
722 switch_to_new_gdt();
723 load_idt((const struct desc_ptr *)&idt_descr);
724
725 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
726 syscall_init();
727
728 wrmsrl(MSR_FS_BASE, 0);
729 wrmsrl(MSR_KERNEL_GS_BASE, 0);
730 barrier();
731
732 check_efer();
733
734 /*
735 * set up and load the per-CPU TSS
736 */
737 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
738 static const unsigned int order[N_EXCEPTION_STACKS] = {
739 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
740 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
741 };
742 if (cpu) {
743 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
744 if (!estacks)
745 panic("Cannot allocate exception stack %ld %d\n",
746 v, cpu);
747 }
748 estacks += PAGE_SIZE << order[v];
749 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
750 }
751
752 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
753 /*
754 * <= is required because the CPU will access up to
755 * 8 bits beyond the end of the IO permission bitmap.
756 */
757 for (i = 0; i <= IO_BITMAP_LONGS; i++)
758 t->io_bitmap[i] = ~0UL;
759
760 atomic_inc(&init_mm.mm_count);
761 me->active_mm = &init_mm;
762 if (me->mm)
763 BUG();
764 enter_lazy_tlb(&init_mm, me);
765
766 load_sp0(t, &current->thread);
767 set_tss_desc(cpu, t);
768 load_TR_desc();
769 load_LDT(&init_mm.context);
770
771#ifdef CONFIG_KGDB
772 /*
773 * If the kgdb is connected no debug regs should be altered. This
774 * is only applicable when KGDB and a KGDB I/O module are built
775 * into the kernel and you are using early debugging with
776 * kgdbwait. KGDB will control the kernel HW breakpoint registers.
777 */
778 if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
779 arch_kgdb_ops.correct_hw_break();
780 else {
781#endif
782 /*
783 * Clear all 6 debug registers:
784 */
785
786 set_debugreg(0UL, 0);
787 set_debugreg(0UL, 1);
788 set_debugreg(0UL, 2);
789 set_debugreg(0UL, 3);
790 set_debugreg(0UL, 6);
791 set_debugreg(0UL, 7);
792#ifdef CONFIG_KGDB
793 /* If the kgdb is connected no debug regs should be altered. */
794 }
795#endif
796
797 fpu_init();
798
799 raw_local_save_flags(kernel_eflags);
800
801 if (is_uv_system())
802 uv_cpu_init();
803}
This page took 0.163609 seconds and 5 git commands to generate.