Commit | Line | Data |
---|---|---|
f580366f | 1 | #include <linux/init.h> |
0f0124fa YL |
2 | #include <linux/kernel.h> |
3 | #include <linux/sched.h> | |
4 | #include <linux/string.h> | |
5 | #include <linux/bootmem.h> | |
6 | #include <linux/bitops.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/kgdb.h> | |
9 | #include <linux/topology.h> | |
f580366f YL |
10 | #include <linux/string.h> |
11 | #include <linux/delay.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/percpu.h> | |
f580366f YL |
15 | #include <asm/processor.h> |
16 | #include <asm/i387.h> | |
17 | #include <asm/msr.h> | |
18 | #include <asm/io.h> | |
19 | #include <asm/mmu_context.h> | |
20 | #include <asm/mtrr.h> | |
21 | #include <asm/mce.h> | |
22 | #include <asm/pat.h> | |
23 | #include <asm/numa.h> | |
24 | #ifdef CONFIG_X86_LOCAL_APIC | |
25 | #include <asm/mpspec.h> | |
26 | #include <asm/apic.h> | |
27 | #include <mach_apic.h> | |
28 | #endif | |
0f0124fa YL |
29 | #include <asm/pda.h> |
30 | #include <asm/pgtable.h> | |
31 | #include <asm/processor.h> | |
32 | #include <asm/desc.h> | |
33 | #include <asm/atomic.h> | |
34 | #include <asm/proto.h> | |
35 | #include <asm/sections.h> | |
36 | #include <asm/setup.h> | |
37 | #include <asm/genapic.h> | |
f580366f YL |
38 | |
39 | #include "cpu.h" | |
40 | ||
41 | /* We need valid kernel segments for data and code in long mode too | |
42 | * IRET will check the segment types kkeil 2000/10/28 | |
43 | * Also sysret mandates a special GDT layout | |
44 | */ | |
45 | /* The TLS descriptors are currently at a different place compared to i386. | |
46 | Hopefully nobody expects them at a fixed place (Wine?) */ | |
47 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | |
48 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | |
49 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | |
50 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | |
51 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | |
52 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | |
53 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | |
54 | } }; | |
55 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | |
56 | ||
57 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | |
58 | ||
59 | /* Current gdt points %fs at the "master" per-cpu area: after this, | |
60 | * it's on the real one. */ | |
61 | void switch_to_new_gdt(void) | |
62 | { | |
63 | struct desc_ptr gdt_descr; | |
64 | ||
65 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | |
66 | gdt_descr.size = GDT_SIZE - 1; | |
67 | load_gdt(&gdt_descr); | |
68 | } | |
69 | ||
70 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | |
71 | ||
72 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | |
73 | { | |
74 | display_cacheinfo(c); | |
75 | } | |
76 | ||
77 | static struct cpu_dev __cpuinitdata default_cpu = { | |
78 | .c_init = default_init, | |
79 | .c_vendor = "Unknown", | |
80 | }; | |
81 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | |
82 | ||
83 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |
84 | { | |
85 | unsigned int *v; | |
86 | ||
87 | if (c->extended_cpuid_level < 0x80000004) | |
88 | return 0; | |
89 | ||
90 | v = (unsigned int *) c->x86_model_id; | |
91 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | |
92 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | |
93 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | |
94 | c->x86_model_id[48] = 0; | |
95 | return 1; | |
96 | } | |
97 | ||
98 | ||
99 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |
100 | { | |
87a1c441 | 101 | unsigned int n, dummy, ebx, ecx, edx; |
f580366f YL |
102 | |
103 | n = c->extended_cpuid_level; | |
104 | ||
105 | if (n >= 0x80000005) { | |
106 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); | |
107 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " | |
108 | "D cache %dK (%d bytes/line)\n", | |
109 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | |
110 | c->x86_cache_size = (ecx>>24) + (edx>>24); | |
111 | /* On K8 L1 TLB is inclusive, so don't count it */ | |
112 | c->x86_tlbsize = 0; | |
113 | } | |
114 | ||
115 | if (n >= 0x80000006) { | |
116 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); | |
117 | ecx = cpuid_ecx(0x80000006); | |
118 | c->x86_cache_size = ecx >> 16; | |
119 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); | |
120 | ||
121 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | |
122 | c->x86_cache_size, ecx & 0xFF); | |
123 | } | |
f580366f YL |
124 | } |
125 | ||
126 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |
127 | { | |
128 | #ifdef CONFIG_SMP | |
129 | u32 eax, ebx, ecx, edx; | |
130 | int index_msb, core_bits; | |
131 | ||
132 | cpuid(1, &eax, &ebx, &ecx, &edx); | |
133 | ||
134 | ||
135 | if (!cpu_has(c, X86_FEATURE_HT)) | |
136 | return; | |
137 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | |
138 | goto out; | |
139 | ||
140 | smp_num_siblings = (ebx & 0xff0000) >> 16; | |
141 | ||
142 | if (smp_num_siblings == 1) { | |
143 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | |
144 | } else if (smp_num_siblings > 1) { | |
145 | ||
146 | if (smp_num_siblings > NR_CPUS) { | |
147 | printk(KERN_WARNING "CPU: Unsupported number of " | |
148 | "siblings %d", smp_num_siblings); | |
149 | smp_num_siblings = 1; | |
150 | return; | |
151 | } | |
152 | ||
153 | index_msb = get_count_order(smp_num_siblings); | |
154 | c->phys_proc_id = phys_pkg_id(index_msb); | |
155 | ||
156 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | |
157 | ||
158 | index_msb = get_count_order(smp_num_siblings); | |
159 | ||
160 | core_bits = get_count_order(c->x86_max_cores); | |
161 | ||
162 | c->cpu_core_id = phys_pkg_id(index_msb) & | |
163 | ((1 << core_bits) - 1); | |
164 | } | |
165 | out: | |
166 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | |
167 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | |
168 | c->phys_proc_id); | |
169 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | |
170 | c->cpu_core_id); | |
171 | } | |
172 | ||
173 | #endif | |
174 | } | |
175 | ||
176 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |
177 | { | |
178 | char *v = c->x86_vendor_id; | |
179 | int i; | |
180 | static int printed; | |
181 | ||
182 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
183 | if (cpu_devs[i]) { | |
184 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | |
185 | (cpu_devs[i]->c_ident[1] && | |
186 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | |
187 | c->x86_vendor = i; | |
188 | this_cpu = cpu_devs[i]; | |
189 | return; | |
190 | } | |
191 | } | |
192 | } | |
193 | if (!printed) { | |
194 | printed++; | |
195 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | |
196 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | |
197 | } | |
198 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
199 | } | |
200 | ||
201 | static void __init early_cpu_support_print(void) | |
202 | { | |
203 | int i,j; | |
204 | struct cpu_dev *cpu_devx; | |
205 | ||
206 | printk("KERNEL supported cpus:\n"); | |
207 | for (i = 0; i < X86_VENDOR_NUM; i++) { | |
208 | cpu_devx = cpu_devs[i]; | |
209 | if (!cpu_devx) | |
210 | continue; | |
211 | for (j = 0; j < 2; j++) { | |
212 | if (!cpu_devx->c_ident[j]) | |
213 | continue; | |
214 | printk(" %s %s\n", cpu_devx->c_vendor, | |
215 | cpu_devx->c_ident[j]); | |
216 | } | |
217 | } | |
218 | } | |
219 | ||
220 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); | |
221 | ||
222 | void __init early_cpu_init(void) | |
223 | { | |
224 | struct cpu_vendor_dev *cvdev; | |
225 | ||
226 | for (cvdev = __x86cpuvendor_start ; | |
227 | cvdev < __x86cpuvendor_end ; | |
228 | cvdev++) | |
229 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | |
230 | early_cpu_support_print(); | |
231 | early_identify_cpu(&boot_cpu_data); | |
232 | } | |
233 | ||
234 | /* Do some early cpuid on the boot CPU to get some parameter that are | |
235 | needed before check_bugs. Everything advanced is in identify_cpu | |
236 | below. */ | |
237 | static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) | |
238 | { | |
239 | u32 tfms, xlvl; | |
240 | ||
241 | c->loops_per_jiffy = loops_per_jiffy; | |
242 | c->x86_cache_size = -1; | |
243 | c->x86_vendor = X86_VENDOR_UNKNOWN; | |
244 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ | |
245 | c->x86_vendor_id[0] = '\0'; /* Unset */ | |
246 | c->x86_model_id[0] = '\0'; /* Unset */ | |
247 | c->x86_clflush_size = 64; | |
248 | c->x86_cache_alignment = c->x86_clflush_size; | |
249 | c->x86_max_cores = 1; | |
250 | c->x86_coreid_bits = 0; | |
251 | c->extended_cpuid_level = 0; | |
252 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | |
253 | ||
254 | /* Get vendor name */ | |
255 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | |
256 | (unsigned int *)&c->x86_vendor_id[0], | |
257 | (unsigned int *)&c->x86_vendor_id[8], | |
258 | (unsigned int *)&c->x86_vendor_id[4]); | |
259 | ||
260 | get_cpu_vendor(c); | |
261 | ||
262 | /* Initialize the standard set of capabilities */ | |
263 | /* Note that the vendor-specific code below might override */ | |
264 | ||
265 | /* Intel-defined flags: level 0x00000001 */ | |
266 | if (c->cpuid_level >= 0x00000001) { | |
267 | __u32 misc; | |
268 | cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4], | |
269 | &c->x86_capability[0]); | |
270 | c->x86 = (tfms >> 8) & 0xf; | |
271 | c->x86_model = (tfms >> 4) & 0xf; | |
272 | c->x86_mask = tfms & 0xf; | |
273 | if (c->x86 == 0xf) | |
274 | c->x86 += (tfms >> 20) & 0xff; | |
275 | if (c->x86 >= 0x6) | |
276 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | |
277 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | |
278 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | |
279 | } else { | |
280 | /* Have CPUID level 0 only - unheard of */ | |
281 | c->x86 = 4; | |
282 | } | |
283 | ||
284 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; | |
285 | #ifdef CONFIG_SMP | |
286 | c->phys_proc_id = c->initial_apicid; | |
287 | #endif | |
288 | /* AMD-defined flags: level 0x80000001 */ | |
289 | xlvl = cpuid_eax(0x80000000); | |
290 | c->extended_cpuid_level = xlvl; | |
291 | if ((xlvl & 0xffff0000) == 0x80000000) { | |
292 | if (xlvl >= 0x80000001) { | |
293 | c->x86_capability[1] = cpuid_edx(0x80000001); | |
294 | c->x86_capability[6] = cpuid_ecx(0x80000001); | |
295 | } | |
296 | if (xlvl >= 0x80000004) | |
297 | get_model_name(c); /* Default name */ | |
298 | } | |
299 | ||
300 | /* Transmeta-defined flags: level 0x80860001 */ | |
301 | xlvl = cpuid_eax(0x80860000); | |
302 | if ((xlvl & 0xffff0000) == 0x80860000) { | |
303 | /* Don't set x86_cpuid_level here for now to not confuse. */ | |
304 | if (xlvl >= 0x80860001) | |
305 | c->x86_capability[2] = cpuid_edx(0x80860001); | |
306 | } | |
307 | ||
308 | c->extended_cpuid_level = cpuid_eax(0x80000000); | |
309 | if (c->extended_cpuid_level >= 0x80000007) | |
310 | c->x86_power = cpuid_edx(0x80000007); | |
311 | ||
87a1c441 YL |
312 | if (c->extended_cpuid_level >= 0x80000008) { |
313 | u32 eax = cpuid_eax(0x80000008); | |
314 | ||
315 | c->x86_virt_bits = (eax >> 8) & 0xff; | |
316 | c->x86_phys_bits = eax & 0xff; | |
317 | } | |
318 | ||
8d28aab5 JF |
319 | /* Assume all 64-bit CPUs support 32-bit syscall */ |
320 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | |
321 | ||
f580366f YL |
322 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && |
323 | cpu_devs[c->x86_vendor]->c_early_init) | |
324 | cpu_devs[c->x86_vendor]->c_early_init(c); | |
325 | ||
326 | validate_pat_support(c); | |
327 | ||
328 | /* early_param could clear that, but recall get it set again */ | |
329 | if (disable_apic) | |
330 | clear_cpu_cap(c, X86_FEATURE_APIC); | |
331 | } | |
332 | ||
333 | /* | |
334 | * This does the hard work of actually picking apart the CPU stuff... | |
335 | */ | |
9a250347 | 336 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) |
f580366f YL |
337 | { |
338 | int i; | |
339 | ||
340 | early_identify_cpu(c); | |
341 | ||
342 | init_scattered_cpuid_features(c); | |
343 | ||
344 | c->apicid = phys_pkg_id(0); | |
345 | ||
346 | /* | |
347 | * Vendor-specific initialization. In this section we | |
348 | * canonicalize the feature flags, meaning if there are | |
349 | * features a certain CPU supports which CPUID doesn't | |
350 | * tell us, CPUID claiming incorrect flags, or other bugs, | |
351 | * we handle them here. | |
352 | * | |
353 | * At the end of this section, c->x86_capability better | |
354 | * indicate the features this CPU genuinely supports! | |
355 | */ | |
356 | if (this_cpu->c_init) | |
357 | this_cpu->c_init(c); | |
358 | ||
359 | detect_ht(c); | |
360 | ||
361 | /* | |
362 | * On SMP, boot_cpu_data holds the common feature set between | |
363 | * all CPUs; so make sure that we indicate which features are | |
364 | * common between the CPUs. The first time this routine gets | |
365 | * executed, c == &boot_cpu_data. | |
366 | */ | |
367 | if (c != &boot_cpu_data) { | |
368 | /* AND the already accumulated flags with these */ | |
369 | for (i = 0; i < NCAPINTS; i++) | |
370 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | |
371 | } | |
372 | ||
373 | /* Clear all flags overriden by options */ | |
374 | for (i = 0; i < NCAPINTS; i++) | |
375 | c->x86_capability[i] &= ~cleared_cpu_caps[i]; | |
376 | ||
377 | #ifdef CONFIG_X86_MCE | |
378 | mcheck_init(c); | |
379 | #endif | |
380 | select_idle_routine(c); | |
381 | ||
382 | #ifdef CONFIG_NUMA | |
383 | numa_add_cpu(smp_processor_id()); | |
384 | #endif | |
385 | ||
386 | } | |
387 | ||
388 | void __cpuinit identify_boot_cpu(void) | |
389 | { | |
390 | identify_cpu(&boot_cpu_data); | |
391 | } | |
392 | ||
393 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |
394 | { | |
395 | BUG_ON(c == &boot_cpu_data); | |
396 | identify_cpu(c); | |
397 | mtrr_ap_init(); | |
398 | } | |
399 | ||
400 | static __init int setup_noclflush(char *arg) | |
401 | { | |
402 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); | |
403 | return 1; | |
404 | } | |
405 | __setup("noclflush", setup_noclflush); | |
406 | ||
407 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |
408 | { | |
409 | if (c->x86_model_id[0]) | |
410 | printk(KERN_CONT "%s", c->x86_model_id); | |
411 | ||
412 | if (c->x86_mask || c->cpuid_level >= 0) | |
413 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); | |
414 | else | |
415 | printk(KERN_CONT "\n"); | |
416 | } | |
417 | ||
418 | static __init int setup_disablecpuid(char *arg) | |
419 | { | |
420 | int bit; | |
421 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | |
422 | setup_clear_cpu_cap(bit); | |
423 | else | |
424 | return 0; | |
425 | return 1; | |
426 | } | |
427 | __setup("clearcpuid=", setup_disablecpuid); | |
0f0124fa | 428 | |
0f0124fa YL |
429 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
430 | ||
431 | struct x8664_pda **_cpu_pda __read_mostly; | |
432 | EXPORT_SYMBOL(_cpu_pda); | |
433 | ||
434 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | |
435 | ||
436 | char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | |
437 | ||
438 | unsigned long __supported_pte_mask __read_mostly = ~0UL; | |
439 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | |
440 | ||
441 | static int do_not_nx __cpuinitdata; | |
442 | ||
443 | /* noexec=on|off | |
444 | Control non executable mappings for 64bit processes. | |
445 | ||
446 | on Enable(default) | |
447 | off Disable | |
448 | */ | |
449 | static int __init nonx_setup(char *str) | |
450 | { | |
451 | if (!str) | |
452 | return -EINVAL; | |
453 | if (!strncmp(str, "on", 2)) { | |
454 | __supported_pte_mask |= _PAGE_NX; | |
455 | do_not_nx = 0; | |
456 | } else if (!strncmp(str, "off", 3)) { | |
457 | do_not_nx = 1; | |
458 | __supported_pte_mask &= ~_PAGE_NX; | |
459 | } | |
460 | return 0; | |
461 | } | |
462 | early_param("noexec", nonx_setup); | |
463 | ||
464 | int force_personality32; | |
465 | ||
466 | /* noexec32=on|off | |
467 | Control non executable heap for 32bit processes. | |
468 | To control the stack too use noexec=off | |
469 | ||
470 | on PROT_READ does not imply PROT_EXEC for 32bit processes (default) | |
471 | off PROT_READ implies PROT_EXEC | |
472 | */ | |
473 | static int __init nonx32_setup(char *str) | |
474 | { | |
475 | if (!strcmp(str, "on")) | |
476 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
477 | else if (!strcmp(str, "off")) | |
478 | force_personality32 |= READ_IMPLIES_EXEC; | |
479 | return 1; | |
480 | } | |
481 | __setup("noexec32=", nonx32_setup); | |
482 | ||
483 | void pda_init(int cpu) | |
484 | { | |
485 | struct x8664_pda *pda = cpu_pda(cpu); | |
486 | ||
487 | /* Setup up data that may be needed in __get_free_pages early */ | |
ada85708 JF |
488 | loadsegment(fs, 0); |
489 | loadsegment(gs, 0); | |
0f0124fa YL |
490 | /* Memory clobbers used to order PDA accessed */ |
491 | mb(); | |
492 | wrmsrl(MSR_GS_BASE, pda); | |
493 | mb(); | |
494 | ||
495 | pda->cpunumber = cpu; | |
496 | pda->irqcount = -1; | |
497 | pda->kernelstack = (unsigned long)stack_thread_info() - | |
498 | PDA_STACKOFFSET + THREAD_SIZE; | |
499 | pda->active_mm = &init_mm; | |
500 | pda->mmu_state = 0; | |
501 | ||
502 | if (cpu == 0) { | |
503 | /* others are initialized in smpboot.c */ | |
504 | pda->pcurrent = &init_task; | |
505 | pda->irqstackptr = boot_cpu_stack; | |
506 | } else { | |
507 | pda->irqstackptr = (char *) | |
508 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | |
509 | if (!pda->irqstackptr) | |
510 | panic("cannot allocate irqstack for cpu %d", cpu); | |
511 | ||
512 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | |
513 | pda->nodenumber = cpu_to_node(cpu); | |
514 | } | |
515 | ||
516 | pda->irqstackptr += IRQSTACKSIZE-64; | |
517 | } | |
518 | ||
519 | char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | |
520 | DEBUG_STKSZ] | |
521 | __attribute__((section(".bss.page_aligned"))); | |
522 | ||
523 | extern asmlinkage void ignore_sysret(void); | |
524 | ||
525 | /* May not be marked __init: used by software suspend */ | |
526 | void syscall_init(void) | |
527 | { | |
528 | /* | |
529 | * LSTAR and STAR live in a bit strange symbiosis. | |
530 | * They both write to the same internal register. STAR allows to | |
531 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. | |
532 | */ | |
533 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | |
534 | wrmsrl(MSR_LSTAR, system_call); | |
535 | wrmsrl(MSR_CSTAR, ignore_sysret); | |
536 | ||
537 | #ifdef CONFIG_IA32_EMULATION | |
538 | syscall32_cpu_init(); | |
539 | #endif | |
540 | ||
541 | /* Flags to clear on syscall */ | |
542 | wrmsrl(MSR_SYSCALL_MASK, | |
543 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); | |
544 | } | |
545 | ||
546 | void __cpuinit check_efer(void) | |
547 | { | |
548 | unsigned long efer; | |
549 | ||
550 | rdmsrl(MSR_EFER, efer); | |
551 | if (!(efer & EFER_NX) || do_not_nx) | |
552 | __supported_pte_mask &= ~_PAGE_NX; | |
553 | } | |
554 | ||
555 | unsigned long kernel_eflags; | |
556 | ||
557 | /* | |
558 | * Copies of the original ist values from the tss are only accessed during | |
559 | * debugging, no special alignment required. | |
560 | */ | |
561 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | |
562 | ||
563 | /* | |
564 | * cpu_init() initializes state that is per-CPU. Some data is already | |
565 | * initialized (naturally) in the bootstrap process, such as the GDT | |
566 | * and IDT. We reload them nevertheless, this function acts as a | |
567 | * 'CPU state barrier', nothing should get across. | |
568 | * A lot of state is already set up in PDA init. | |
569 | */ | |
570 | void __cpuinit cpu_init(void) | |
571 | { | |
572 | int cpu = stack_smp_processor_id(); | |
573 | struct tss_struct *t = &per_cpu(init_tss, cpu); | |
574 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | |
575 | unsigned long v; | |
576 | char *estacks = NULL; | |
577 | struct task_struct *me; | |
578 | int i; | |
579 | ||
580 | /* CPU 0 is initialised in head64.c */ | |
581 | if (cpu != 0) | |
582 | pda_init(cpu); | |
583 | else | |
584 | estacks = boot_exception_stacks; | |
585 | ||
586 | me = current; | |
587 | ||
588 | if (cpu_test_and_set(cpu, cpu_initialized)) | |
589 | panic("CPU#%d already initialized!\n", cpu); | |
590 | ||
591 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | |
592 | ||
593 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | |
594 | ||
595 | /* | |
596 | * Initialize the per-CPU GDT with the boot GDT, | |
597 | * and set up the GDT descriptor: | |
598 | */ | |
599 | ||
600 | switch_to_new_gdt(); | |
601 | load_idt((const struct desc_ptr *)&idt_descr); | |
602 | ||
603 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | |
604 | syscall_init(); | |
605 | ||
606 | wrmsrl(MSR_FS_BASE, 0); | |
607 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | |
608 | barrier(); | |
609 | ||
610 | check_efer(); | |
611 | ||
612 | /* | |
613 | * set up and load the per-CPU TSS | |
614 | */ | |
615 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | |
616 | static const unsigned int order[N_EXCEPTION_STACKS] = { | |
617 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | |
618 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | |
619 | }; | |
620 | if (cpu) { | |
621 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | |
622 | if (!estacks) | |
623 | panic("Cannot allocate exception stack %ld %d\n", | |
624 | v, cpu); | |
625 | } | |
626 | estacks += PAGE_SIZE << order[v]; | |
627 | orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks; | |
628 | } | |
629 | ||
630 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | |
631 | /* | |
632 | * <= is required because the CPU will access up to | |
633 | * 8 bits beyond the end of the IO permission bitmap. | |
634 | */ | |
635 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | |
636 | t->io_bitmap[i] = ~0UL; | |
637 | ||
638 | atomic_inc(&init_mm.mm_count); | |
639 | me->active_mm = &init_mm; | |
640 | if (me->mm) | |
641 | BUG(); | |
642 | enter_lazy_tlb(&init_mm, me); | |
643 | ||
644 | load_sp0(t, ¤t->thread); | |
645 | set_tss_desc(cpu, t); | |
646 | load_TR_desc(); | |
647 | load_LDT(&init_mm.context); | |
648 | ||
649 | #ifdef CONFIG_KGDB | |
650 | /* | |
651 | * If the kgdb is connected no debug regs should be altered. This | |
652 | * is only applicable when KGDB and a KGDB I/O module are built | |
653 | * into the kernel and you are using early debugging with | |
654 | * kgdbwait. KGDB will control the kernel HW breakpoint registers. | |
655 | */ | |
656 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | |
657 | arch_kgdb_ops.correct_hw_break(); | |
658 | else { | |
659 | #endif | |
660 | /* | |
661 | * Clear all 6 debug registers: | |
662 | */ | |
663 | ||
664 | set_debugreg(0UL, 0); | |
665 | set_debugreg(0UL, 1); | |
666 | set_debugreg(0UL, 2); | |
667 | set_debugreg(0UL, 3); | |
668 | set_debugreg(0UL, 6); | |
669 | set_debugreg(0UL, 7); | |
670 | #ifdef CONFIG_KGDB | |
671 | /* If the kgdb is connected no debug regs should be altered. */ | |
672 | } | |
673 | #endif | |
674 | ||
675 | fpu_init(); | |
676 | ||
677 | raw_local_save_flags(kernel_eflags); | |
678 | ||
679 | if (is_uv_system()) | |
680 | uv_cpu_init(); | |
681 | } |