| 1 | #include <linux/bootmem.h> |
| 2 | #include <linux/linkage.h> |
| 3 | #include <linux/bitops.h> |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/percpu.h> |
| 7 | #include <linux/string.h> |
| 8 | #include <linux/delay.h> |
| 9 | #include <linux/sched.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/kgdb.h> |
| 12 | #include <linux/smp.h> |
| 13 | #include <linux/io.h> |
| 14 | |
| 15 | #include <asm/stackprotector.h> |
| 16 | #include <asm/perf_event.h> |
| 17 | #include <asm/mmu_context.h> |
| 18 | #include <asm/archrandom.h> |
| 19 | #include <asm/hypervisor.h> |
| 20 | #include <asm/processor.h> |
| 21 | #include <asm/debugreg.h> |
| 22 | #include <asm/sections.h> |
| 23 | #include <linux/topology.h> |
| 24 | #include <linux/cpumask.h> |
| 25 | #include <asm/pgtable.h> |
| 26 | #include <linux/atomic.h> |
| 27 | #include <asm/proto.h> |
| 28 | #include <asm/setup.h> |
| 29 | #include <asm/apic.h> |
| 30 | #include <asm/desc.h> |
| 31 | #include <asm/i387.h> |
| 32 | #include <asm/fpu-internal.h> |
| 33 | #include <asm/mtrr.h> |
| 34 | #include <linux/numa.h> |
| 35 | #include <asm/asm.h> |
| 36 | #include <asm/cpu.h> |
| 37 | #include <asm/mce.h> |
| 38 | #include <asm/msr.h> |
| 39 | #include <asm/pat.h> |
| 40 | #include <asm/microcode.h> |
| 41 | #include <asm/microcode_intel.h> |
| 42 | |
| 43 | #ifdef CONFIG_X86_LOCAL_APIC |
| 44 | #include <asm/uv/uv.h> |
| 45 | #endif |
| 46 | |
| 47 | #include "cpu.h" |
| 48 | |
| 49 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
| 50 | cpumask_var_t cpu_initialized_mask; |
| 51 | cpumask_var_t cpu_callout_mask; |
| 52 | cpumask_var_t cpu_callin_mask; |
| 53 | |
| 54 | /* representing cpus for which sibling maps can be computed */ |
| 55 | cpumask_var_t cpu_sibling_setup_mask; |
| 56 | |
| 57 | /* correctly size the local cpu masks */ |
| 58 | void __init setup_cpu_local_masks(void) |
| 59 | { |
| 60 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
| 61 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
| 62 | alloc_bootmem_cpumask_var(&cpu_callout_mask); |
| 63 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
| 64 | } |
| 65 | |
| 66 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
| 67 | { |
| 68 | #ifdef CONFIG_X86_64 |
| 69 | cpu_detect_cache_sizes(c); |
| 70 | #else |
| 71 | /* Not much we can do here... */ |
| 72 | /* Check if at least it has cpuid */ |
| 73 | if (c->cpuid_level == -1) { |
| 74 | /* No cpuid. It must be an ancient CPU */ |
| 75 | if (c->x86 == 4) |
| 76 | strcpy(c->x86_model_id, "486"); |
| 77 | else if (c->x86 == 3) |
| 78 | strcpy(c->x86_model_id, "386"); |
| 79 | } |
| 80 | #endif |
| 81 | } |
| 82 | |
| 83 | static const struct cpu_dev __cpuinitconst default_cpu = { |
| 84 | .c_init = default_init, |
| 85 | .c_vendor = "Unknown", |
| 86 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
| 87 | }; |
| 88 | |
| 89 | static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; |
| 90 | |
| 91 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
| 92 | #ifdef CONFIG_X86_64 |
| 93 | /* |
| 94 | * We need valid kernel segments for data and code in long mode too |
| 95 | * IRET will check the segment types kkeil 2000/10/28 |
| 96 | * Also sysret mandates a special GDT layout |
| 97 | * |
| 98 | * TLS descriptors are currently at a different place compared to i386. |
| 99 | * Hopefully nobody expects them at a fixed place (Wine?) |
| 100 | */ |
| 101 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
| 102 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), |
| 103 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), |
| 104 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), |
| 105 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), |
| 106 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), |
| 107 | #else |
| 108 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
| 109 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| 110 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), |
| 111 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), |
| 112 | /* |
| 113 | * Segments used for calling PnP BIOS have byte granularity. |
| 114 | * They code segments and data segments have fixed 64k limits, |
| 115 | * the transfer segment sizes are set at run time. |
| 116 | */ |
| 117 | /* 32-bit code */ |
| 118 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
| 119 | /* 16-bit code */ |
| 120 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
| 121 | /* 16-bit data */ |
| 122 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
| 123 | /* 16-bit data */ |
| 124 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
| 125 | /* 16-bit data */ |
| 126 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
| 127 | /* |
| 128 | * The APM segments have byte granularity and their bases |
| 129 | * are set at run time. All have 64k limits. |
| 130 | */ |
| 131 | /* 32-bit code */ |
| 132 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
| 133 | /* 16-bit code */ |
| 134 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
| 135 | /* data */ |
| 136 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
| 137 | |
| 138 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| 139 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
| 140 | GDT_STACK_CANARY_INIT |
| 141 | #endif |
| 142 | } }; |
| 143 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
| 144 | |
| 145 | static int __init x86_xsave_setup(char *s) |
| 146 | { |
| 147 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
| 148 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
| 149 | setup_clear_cpu_cap(X86_FEATURE_AVX); |
| 150 | setup_clear_cpu_cap(X86_FEATURE_AVX2); |
| 151 | return 1; |
| 152 | } |
| 153 | __setup("noxsave", x86_xsave_setup); |
| 154 | |
| 155 | static int __init x86_xsaveopt_setup(char *s) |
| 156 | { |
| 157 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
| 158 | return 1; |
| 159 | } |
| 160 | __setup("noxsaveopt", x86_xsaveopt_setup); |
| 161 | |
| 162 | #ifdef CONFIG_X86_32 |
| 163 | static int cachesize_override __cpuinitdata = -1; |
| 164 | static int disable_x86_serial_nr __cpuinitdata = 1; |
| 165 | |
| 166 | static int __init cachesize_setup(char *str) |
| 167 | { |
| 168 | get_option(&str, &cachesize_override); |
| 169 | return 1; |
| 170 | } |
| 171 | __setup("cachesize=", cachesize_setup); |
| 172 | |
| 173 | static int __init x86_fxsr_setup(char *s) |
| 174 | { |
| 175 | setup_clear_cpu_cap(X86_FEATURE_FXSR); |
| 176 | setup_clear_cpu_cap(X86_FEATURE_XMM); |
| 177 | return 1; |
| 178 | } |
| 179 | __setup("nofxsr", x86_fxsr_setup); |
| 180 | |
| 181 | static int __init x86_sep_setup(char *s) |
| 182 | { |
| 183 | setup_clear_cpu_cap(X86_FEATURE_SEP); |
| 184 | return 1; |
| 185 | } |
| 186 | __setup("nosep", x86_sep_setup); |
| 187 | |
| 188 | /* Standard macro to see if a specific flag is changeable */ |
| 189 | static inline int flag_is_changeable_p(u32 flag) |
| 190 | { |
| 191 | u32 f1, f2; |
| 192 | |
| 193 | /* |
| 194 | * Cyrix and IDT cpus allow disabling of CPUID |
| 195 | * so the code below may return different results |
| 196 | * when it is executed before and after enabling |
| 197 | * the CPUID. Add "volatile" to not allow gcc to |
| 198 | * optimize the subsequent calls to this function. |
| 199 | */ |
| 200 | asm volatile ("pushfl \n\t" |
| 201 | "pushfl \n\t" |
| 202 | "popl %0 \n\t" |
| 203 | "movl %0, %1 \n\t" |
| 204 | "xorl %2, %0 \n\t" |
| 205 | "pushl %0 \n\t" |
| 206 | "popfl \n\t" |
| 207 | "pushfl \n\t" |
| 208 | "popl %0 \n\t" |
| 209 | "popfl \n\t" |
| 210 | |
| 211 | : "=&r" (f1), "=&r" (f2) |
| 212 | : "ir" (flag)); |
| 213 | |
| 214 | return ((f1^f2) & flag) != 0; |
| 215 | } |
| 216 | |
| 217 | /* Probe for the CPUID instruction */ |
| 218 | int __cpuinit have_cpuid_p(void) |
| 219 | { |
| 220 | return flag_is_changeable_p(X86_EFLAGS_ID); |
| 221 | } |
| 222 | |
| 223 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| 224 | { |
| 225 | unsigned long lo, hi; |
| 226 | |
| 227 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
| 228 | return; |
| 229 | |
| 230 | /* Disable processor serial number: */ |
| 231 | |
| 232 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| 233 | lo |= 0x200000; |
| 234 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
| 235 | |
| 236 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
| 237 | clear_cpu_cap(c, X86_FEATURE_PN); |
| 238 | |
| 239 | /* Disabling the serial number may affect the cpuid level */ |
| 240 | c->cpuid_level = cpuid_eax(0); |
| 241 | } |
| 242 | |
| 243 | static int __init x86_serial_nr_setup(char *s) |
| 244 | { |
| 245 | disable_x86_serial_nr = 0; |
| 246 | return 1; |
| 247 | } |
| 248 | __setup("serialnumber", x86_serial_nr_setup); |
| 249 | #else |
| 250 | static inline int flag_is_changeable_p(u32 flag) |
| 251 | { |
| 252 | return 1; |
| 253 | } |
| 254 | static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
| 255 | { |
| 256 | } |
| 257 | #endif |
| 258 | |
| 259 | static __init int setup_disable_smep(char *arg) |
| 260 | { |
| 261 | setup_clear_cpu_cap(X86_FEATURE_SMEP); |
| 262 | return 1; |
| 263 | } |
| 264 | __setup("nosmep", setup_disable_smep); |
| 265 | |
| 266 | static __always_inline void setup_smep(struct cpuinfo_x86 *c) |
| 267 | { |
| 268 | if (cpu_has(c, X86_FEATURE_SMEP)) |
| 269 | set_in_cr4(X86_CR4_SMEP); |
| 270 | } |
| 271 | |
| 272 | static __init int setup_disable_smap(char *arg) |
| 273 | { |
| 274 | setup_clear_cpu_cap(X86_FEATURE_SMAP); |
| 275 | return 1; |
| 276 | } |
| 277 | __setup("nosmap", setup_disable_smap); |
| 278 | |
| 279 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
| 280 | { |
| 281 | unsigned long eflags; |
| 282 | |
| 283 | /* This should have been cleared long ago */ |
| 284 | raw_local_save_flags(eflags); |
| 285 | BUG_ON(eflags & X86_EFLAGS_AC); |
| 286 | |
| 287 | if (cpu_has(c, X86_FEATURE_SMAP)) |
| 288 | set_in_cr4(X86_CR4_SMAP); |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * Some CPU features depend on higher CPUID levels, which may not always |
| 293 | * be available due to CPUID level capping or broken virtualization |
| 294 | * software. Add those features to this table to auto-disable them. |
| 295 | */ |
| 296 | struct cpuid_dependent_feature { |
| 297 | u32 feature; |
| 298 | u32 level; |
| 299 | }; |
| 300 | |
| 301 | static const struct cpuid_dependent_feature __cpuinitconst |
| 302 | cpuid_dependent_features[] = { |
| 303 | { X86_FEATURE_MWAIT, 0x00000005 }, |
| 304 | { X86_FEATURE_DCA, 0x00000009 }, |
| 305 | { X86_FEATURE_XSAVE, 0x0000000d }, |
| 306 | { 0, 0 } |
| 307 | }; |
| 308 | |
| 309 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
| 310 | { |
| 311 | const struct cpuid_dependent_feature *df; |
| 312 | |
| 313 | for (df = cpuid_dependent_features; df->feature; df++) { |
| 314 | |
| 315 | if (!cpu_has(c, df->feature)) |
| 316 | continue; |
| 317 | /* |
| 318 | * Note: cpuid_level is set to -1 if unavailable, but |
| 319 | * extended_extended_level is set to 0 if unavailable |
| 320 | * and the legitimate extended levels are all negative |
| 321 | * when signed; hence the weird messing around with |
| 322 | * signs here... |
| 323 | */ |
| 324 | if (!((s32)df->level < 0 ? |
| 325 | (u32)df->level > (u32)c->extended_cpuid_level : |
| 326 | (s32)df->level > (s32)c->cpuid_level)) |
| 327 | continue; |
| 328 | |
| 329 | clear_cpu_cap(c, df->feature); |
| 330 | if (!warn) |
| 331 | continue; |
| 332 | |
| 333 | printk(KERN_WARNING |
| 334 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", |
| 335 | x86_cap_flags[df->feature], df->level); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * Naming convention should be: <Name> [(<Codename>)] |
| 341 | * This table only is used unless init_<vendor>() below doesn't set it; |
| 342 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
| 343 | * isn't used |
| 344 | */ |
| 345 | |
| 346 | /* Look up CPU names by table lookup. */ |
| 347 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) |
| 348 | { |
| 349 | const struct cpu_model_info *info; |
| 350 | |
| 351 | if (c->x86_model >= 16) |
| 352 | return NULL; /* Range check */ |
| 353 | |
| 354 | if (!this_cpu) |
| 355 | return NULL; |
| 356 | |
| 357 | info = this_cpu->c_models; |
| 358 | |
| 359 | while (info && info->family) { |
| 360 | if (info->family == c->x86) |
| 361 | return info->model_names[c->x86_model]; |
| 362 | info++; |
| 363 | } |
| 364 | return NULL; /* Not found */ |
| 365 | } |
| 366 | |
| 367 | __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; |
| 368 | __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; |
| 369 | |
| 370 | void load_percpu_segment(int cpu) |
| 371 | { |
| 372 | #ifdef CONFIG_X86_32 |
| 373 | loadsegment(fs, __KERNEL_PERCPU); |
| 374 | #else |
| 375 | loadsegment(gs, 0); |
| 376 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); |
| 377 | #endif |
| 378 | load_stack_canary_segment(); |
| 379 | } |
| 380 | |
| 381 | /* |
| 382 | * Current gdt points %fs at the "master" per-cpu area: after this, |
| 383 | * it's on the real one. |
| 384 | */ |
| 385 | void switch_to_new_gdt(int cpu) |
| 386 | { |
| 387 | struct desc_ptr gdt_descr; |
| 388 | |
| 389 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
| 390 | gdt_descr.size = GDT_SIZE - 1; |
| 391 | load_gdt(&gdt_descr); |
| 392 | /* Reload the per-cpu base */ |
| 393 | |
| 394 | load_percpu_segment(cpu); |
| 395 | } |
| 396 | |
| 397 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
| 398 | |
| 399 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) |
| 400 | { |
| 401 | unsigned int *v; |
| 402 | char *p, *q; |
| 403 | |
| 404 | if (c->extended_cpuid_level < 0x80000004) |
| 405 | return; |
| 406 | |
| 407 | v = (unsigned int *)c->x86_model_id; |
| 408 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
| 409 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
| 410 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
| 411 | c->x86_model_id[48] = 0; |
| 412 | |
| 413 | /* |
| 414 | * Intel chips right-justify this string for some dumb reason; |
| 415 | * undo that brain damage: |
| 416 | */ |
| 417 | p = q = &c->x86_model_id[0]; |
| 418 | while (*p == ' ') |
| 419 | p++; |
| 420 | if (p != q) { |
| 421 | while (*p) |
| 422 | *q++ = *p++; |
| 423 | while (q <= &c->x86_model_id[48]) |
| 424 | *q++ = '\0'; /* Zero-pad the rest */ |
| 425 | } |
| 426 | } |
| 427 | |
| 428 | void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
| 429 | { |
| 430 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
| 431 | |
| 432 | n = c->extended_cpuid_level; |
| 433 | |
| 434 | if (n >= 0x80000005) { |
| 435 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
| 436 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
| 437 | #ifdef CONFIG_X86_64 |
| 438 | /* On K8 L1 TLB is inclusive, so don't count it */ |
| 439 | c->x86_tlbsize = 0; |
| 440 | #endif |
| 441 | } |
| 442 | |
| 443 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
| 444 | return; |
| 445 | |
| 446 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
| 447 | l2size = ecx >> 16; |
| 448 | |
| 449 | #ifdef CONFIG_X86_64 |
| 450 | c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); |
| 451 | #else |
| 452 | /* do processor-specific cache resizing */ |
| 453 | if (this_cpu->c_size_cache) |
| 454 | l2size = this_cpu->c_size_cache(c, l2size); |
| 455 | |
| 456 | /* Allow user to override all this if necessary. */ |
| 457 | if (cachesize_override != -1) |
| 458 | l2size = cachesize_override; |
| 459 | |
| 460 | if (l2size == 0) |
| 461 | return; /* Again, no L2 cache is possible */ |
| 462 | #endif |
| 463 | |
| 464 | c->x86_cache_size = l2size; |
| 465 | } |
| 466 | |
| 467 | u16 __read_mostly tlb_lli_4k[NR_INFO]; |
| 468 | u16 __read_mostly tlb_lli_2m[NR_INFO]; |
| 469 | u16 __read_mostly tlb_lli_4m[NR_INFO]; |
| 470 | u16 __read_mostly tlb_lld_4k[NR_INFO]; |
| 471 | u16 __read_mostly tlb_lld_2m[NR_INFO]; |
| 472 | u16 __read_mostly tlb_lld_4m[NR_INFO]; |
| 473 | |
| 474 | /* |
| 475 | * tlb_flushall_shift shows the balance point in replacing cr3 write |
| 476 | * with multiple 'invlpg'. It will do this replacement when |
| 477 | * flush_tlb_lines <= active_lines/2^tlb_flushall_shift. |
| 478 | * If tlb_flushall_shift is -1, means the replacement will be disabled. |
| 479 | */ |
| 480 | s8 __read_mostly tlb_flushall_shift = -1; |
| 481 | |
| 482 | void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) |
| 483 | { |
| 484 | if (this_cpu->c_detect_tlb) |
| 485 | this_cpu->c_detect_tlb(c); |
| 486 | |
| 487 | printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ |
| 488 | "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \ |
| 489 | "tlb_flushall_shift: %d\n", |
| 490 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
| 491 | tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], |
| 492 | tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], |
| 493 | tlb_flushall_shift); |
| 494 | } |
| 495 | |
| 496 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
| 497 | { |
| 498 | #ifdef CONFIG_X86_HT |
| 499 | u32 eax, ebx, ecx, edx; |
| 500 | int index_msb, core_bits; |
| 501 | static bool printed; |
| 502 | |
| 503 | if (!cpu_has(c, X86_FEATURE_HT)) |
| 504 | return; |
| 505 | |
| 506 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
| 507 | goto out; |
| 508 | |
| 509 | if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) |
| 510 | return; |
| 511 | |
| 512 | cpuid(1, &eax, &ebx, &ecx, &edx); |
| 513 | |
| 514 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
| 515 | |
| 516 | if (smp_num_siblings == 1) { |
| 517 | printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); |
| 518 | goto out; |
| 519 | } |
| 520 | |
| 521 | if (smp_num_siblings <= 1) |
| 522 | goto out; |
| 523 | |
| 524 | index_msb = get_count_order(smp_num_siblings); |
| 525 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
| 526 | |
| 527 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
| 528 | |
| 529 | index_msb = get_count_order(smp_num_siblings); |
| 530 | |
| 531 | core_bits = get_count_order(c->x86_max_cores); |
| 532 | |
| 533 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
| 534 | ((1 << core_bits) - 1); |
| 535 | |
| 536 | out: |
| 537 | if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { |
| 538 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", |
| 539 | c->phys_proc_id); |
| 540 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", |
| 541 | c->cpu_core_id); |
| 542 | printed = 1; |
| 543 | } |
| 544 | #endif |
| 545 | } |
| 546 | |
| 547 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
| 548 | { |
| 549 | char *v = c->x86_vendor_id; |
| 550 | int i; |
| 551 | |
| 552 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
| 553 | if (!cpu_devs[i]) |
| 554 | break; |
| 555 | |
| 556 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
| 557 | (cpu_devs[i]->c_ident[1] && |
| 558 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
| 559 | |
| 560 | this_cpu = cpu_devs[i]; |
| 561 | c->x86_vendor = this_cpu->c_x86_vendor; |
| 562 | return; |
| 563 | } |
| 564 | } |
| 565 | |
| 566 | printk_once(KERN_ERR |
| 567 | "CPU: vendor_id '%s' unknown, using generic init.\n" \ |
| 568 | "CPU: Your system may be unstable.\n", v); |
| 569 | |
| 570 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
| 571 | this_cpu = &default_cpu; |
| 572 | } |
| 573 | |
| 574 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) |
| 575 | { |
| 576 | /* Get vendor name */ |
| 577 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
| 578 | (unsigned int *)&c->x86_vendor_id[0], |
| 579 | (unsigned int *)&c->x86_vendor_id[8], |
| 580 | (unsigned int *)&c->x86_vendor_id[4]); |
| 581 | |
| 582 | c->x86 = 4; |
| 583 | /* Intel-defined flags: level 0x00000001 */ |
| 584 | if (c->cpuid_level >= 0x00000001) { |
| 585 | u32 junk, tfms, cap0, misc; |
| 586 | |
| 587 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
| 588 | c->x86 = (tfms >> 8) & 0xf; |
| 589 | c->x86_model = (tfms >> 4) & 0xf; |
| 590 | c->x86_mask = tfms & 0xf; |
| 591 | |
| 592 | if (c->x86 == 0xf) |
| 593 | c->x86 += (tfms >> 20) & 0xff; |
| 594 | if (c->x86 >= 0x6) |
| 595 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
| 596 | |
| 597 | if (cap0 & (1<<19)) { |
| 598 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
| 599 | c->x86_cache_alignment = c->x86_clflush_size; |
| 600 | } |
| 601 | } |
| 602 | } |
| 603 | |
| 604 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) |
| 605 | { |
| 606 | u32 tfms, xlvl; |
| 607 | u32 ebx; |
| 608 | |
| 609 | /* Intel-defined flags: level 0x00000001 */ |
| 610 | if (c->cpuid_level >= 0x00000001) { |
| 611 | u32 capability, excap; |
| 612 | |
| 613 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
| 614 | c->x86_capability[0] = capability; |
| 615 | c->x86_capability[4] = excap; |
| 616 | } |
| 617 | |
| 618 | /* Additional Intel-defined flags: level 0x00000007 */ |
| 619 | if (c->cpuid_level >= 0x00000007) { |
| 620 | u32 eax, ebx, ecx, edx; |
| 621 | |
| 622 | cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); |
| 623 | |
| 624 | c->x86_capability[9] = ebx; |
| 625 | } |
| 626 | |
| 627 | /* AMD-defined flags: level 0x80000001 */ |
| 628 | xlvl = cpuid_eax(0x80000000); |
| 629 | c->extended_cpuid_level = xlvl; |
| 630 | |
| 631 | if ((xlvl & 0xffff0000) == 0x80000000) { |
| 632 | if (xlvl >= 0x80000001) { |
| 633 | c->x86_capability[1] = cpuid_edx(0x80000001); |
| 634 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
| 635 | } |
| 636 | } |
| 637 | |
| 638 | if (c->extended_cpuid_level >= 0x80000008) { |
| 639 | u32 eax = cpuid_eax(0x80000008); |
| 640 | |
| 641 | c->x86_virt_bits = (eax >> 8) & 0xff; |
| 642 | c->x86_phys_bits = eax & 0xff; |
| 643 | } |
| 644 | #ifdef CONFIG_X86_32 |
| 645 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) |
| 646 | c->x86_phys_bits = 36; |
| 647 | #endif |
| 648 | |
| 649 | if (c->extended_cpuid_level >= 0x80000007) |
| 650 | c->x86_power = cpuid_edx(0x80000007); |
| 651 | |
| 652 | init_scattered_cpuid_features(c); |
| 653 | } |
| 654 | |
| 655 | static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
| 656 | { |
| 657 | #ifdef CONFIG_X86_32 |
| 658 | int i; |
| 659 | |
| 660 | /* |
| 661 | * First of all, decide if this is a 486 or higher |
| 662 | * It's a 486 if we can modify the AC flag |
| 663 | */ |
| 664 | if (flag_is_changeable_p(X86_EFLAGS_AC)) |
| 665 | c->x86 = 4; |
| 666 | else |
| 667 | c->x86 = 3; |
| 668 | |
| 669 | for (i = 0; i < X86_VENDOR_NUM; i++) |
| 670 | if (cpu_devs[i] && cpu_devs[i]->c_identify) { |
| 671 | c->x86_vendor_id[0] = 0; |
| 672 | cpu_devs[i]->c_identify(c); |
| 673 | if (c->x86_vendor_id[0]) { |
| 674 | get_cpu_vendor(c); |
| 675 | break; |
| 676 | } |
| 677 | } |
| 678 | #endif |
| 679 | } |
| 680 | |
| 681 | /* |
| 682 | * Do minimum CPU detection early. |
| 683 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
| 684 | * cache alignment. |
| 685 | * The others are not touched to avoid unwanted side effects. |
| 686 | * |
| 687 | * WARNING: this function is only called on the BP. Don't add code here |
| 688 | * that is supposed to run on all CPUs. |
| 689 | */ |
| 690 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
| 691 | { |
| 692 | #ifdef CONFIG_X86_64 |
| 693 | c->x86_clflush_size = 64; |
| 694 | c->x86_phys_bits = 36; |
| 695 | c->x86_virt_bits = 48; |
| 696 | #else |
| 697 | c->x86_clflush_size = 32; |
| 698 | c->x86_phys_bits = 32; |
| 699 | c->x86_virt_bits = 32; |
| 700 | #endif |
| 701 | c->x86_cache_alignment = c->x86_clflush_size; |
| 702 | |
| 703 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
| 704 | c->extended_cpuid_level = 0; |
| 705 | |
| 706 | if (!have_cpuid_p()) |
| 707 | identify_cpu_without_cpuid(c); |
| 708 | |
| 709 | /* cyrix could have cpuid enabled via c_identify()*/ |
| 710 | if (!have_cpuid_p()) |
| 711 | return; |
| 712 | |
| 713 | cpu_detect(c); |
| 714 | |
| 715 | get_cpu_vendor(c); |
| 716 | |
| 717 | get_cpu_cap(c); |
| 718 | |
| 719 | if (this_cpu->c_early_init) |
| 720 | this_cpu->c_early_init(c); |
| 721 | |
| 722 | c->cpu_index = 0; |
| 723 | filter_cpuid_features(c, false); |
| 724 | |
| 725 | if (this_cpu->c_bsp_init) |
| 726 | this_cpu->c_bsp_init(c); |
| 727 | } |
| 728 | |
| 729 | void __init early_cpu_init(void) |
| 730 | { |
| 731 | const struct cpu_dev *const *cdev; |
| 732 | int count = 0; |
| 733 | |
| 734 | #ifdef CONFIG_PROCESSOR_SELECT |
| 735 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
| 736 | #endif |
| 737 | |
| 738 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
| 739 | const struct cpu_dev *cpudev = *cdev; |
| 740 | |
| 741 | if (count >= X86_VENDOR_NUM) |
| 742 | break; |
| 743 | cpu_devs[count] = cpudev; |
| 744 | count++; |
| 745 | |
| 746 | #ifdef CONFIG_PROCESSOR_SELECT |
| 747 | { |
| 748 | unsigned int j; |
| 749 | |
| 750 | for (j = 0; j < 2; j++) { |
| 751 | if (!cpudev->c_ident[j]) |
| 752 | continue; |
| 753 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, |
| 754 | cpudev->c_ident[j]); |
| 755 | } |
| 756 | } |
| 757 | #endif |
| 758 | } |
| 759 | early_identify_cpu(&boot_cpu_data); |
| 760 | } |
| 761 | |
| 762 | /* |
| 763 | * The NOPL instruction is supposed to exist on all CPUs of family >= 6; |
| 764 | * unfortunately, that's not true in practice because of early VIA |
| 765 | * chips and (more importantly) broken virtualizers that are not easy |
| 766 | * to detect. In the latter case it doesn't even *fail* reliably, so |
| 767 | * probing for it doesn't even work. Disable it completely on 32-bit |
| 768 | * unless we can find a reliable way to detect all the broken cases. |
| 769 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
| 770 | */ |
| 771 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) |
| 772 | { |
| 773 | #ifdef CONFIG_X86_32 |
| 774 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
| 775 | #else |
| 776 | set_cpu_cap(c, X86_FEATURE_NOPL); |
| 777 | #endif |
| 778 | } |
| 779 | |
| 780 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
| 781 | { |
| 782 | c->extended_cpuid_level = 0; |
| 783 | |
| 784 | if (!have_cpuid_p()) |
| 785 | identify_cpu_without_cpuid(c); |
| 786 | |
| 787 | /* cyrix could have cpuid enabled via c_identify()*/ |
| 788 | if (!have_cpuid_p()) |
| 789 | return; |
| 790 | |
| 791 | cpu_detect(c); |
| 792 | |
| 793 | get_cpu_vendor(c); |
| 794 | |
| 795 | get_cpu_cap(c); |
| 796 | |
| 797 | if (c->cpuid_level >= 0x00000001) { |
| 798 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; |
| 799 | #ifdef CONFIG_X86_32 |
| 800 | # ifdef CONFIG_X86_HT |
| 801 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 802 | # else |
| 803 | c->apicid = c->initial_apicid; |
| 804 | # endif |
| 805 | #endif |
| 806 | c->phys_proc_id = c->initial_apicid; |
| 807 | } |
| 808 | |
| 809 | get_model_name(c); /* Default name */ |
| 810 | |
| 811 | detect_nopl(c); |
| 812 | } |
| 813 | |
| 814 | /* |
| 815 | * This does the hard work of actually picking apart the CPU stuff... |
| 816 | */ |
| 817 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) |
| 818 | { |
| 819 | int i; |
| 820 | |
| 821 | c->loops_per_jiffy = loops_per_jiffy; |
| 822 | c->x86_cache_size = -1; |
| 823 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
| 824 | c->x86_model = c->x86_mask = 0; /* So far unknown... */ |
| 825 | c->x86_vendor_id[0] = '\0'; /* Unset */ |
| 826 | c->x86_model_id[0] = '\0'; /* Unset */ |
| 827 | c->x86_max_cores = 1; |
| 828 | c->x86_coreid_bits = 0; |
| 829 | #ifdef CONFIG_X86_64 |
| 830 | c->x86_clflush_size = 64; |
| 831 | c->x86_phys_bits = 36; |
| 832 | c->x86_virt_bits = 48; |
| 833 | #else |
| 834 | c->cpuid_level = -1; /* CPUID not detected */ |
| 835 | c->x86_clflush_size = 32; |
| 836 | c->x86_phys_bits = 32; |
| 837 | c->x86_virt_bits = 32; |
| 838 | #endif |
| 839 | c->x86_cache_alignment = c->x86_clflush_size; |
| 840 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
| 841 | |
| 842 | generic_identify(c); |
| 843 | |
| 844 | if (this_cpu->c_identify) |
| 845 | this_cpu->c_identify(c); |
| 846 | |
| 847 | /* Clear/Set all flags overriden by options, after probe */ |
| 848 | for (i = 0; i < NCAPINTS; i++) { |
| 849 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; |
| 850 | c->x86_capability[i] |= cpu_caps_set[i]; |
| 851 | } |
| 852 | |
| 853 | #ifdef CONFIG_X86_64 |
| 854 | c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); |
| 855 | #endif |
| 856 | |
| 857 | /* |
| 858 | * Vendor-specific initialization. In this section we |
| 859 | * canonicalize the feature flags, meaning if there are |
| 860 | * features a certain CPU supports which CPUID doesn't |
| 861 | * tell us, CPUID claiming incorrect flags, or other bugs, |
| 862 | * we handle them here. |
| 863 | * |
| 864 | * At the end of this section, c->x86_capability better |
| 865 | * indicate the features this CPU genuinely supports! |
| 866 | */ |
| 867 | if (this_cpu->c_init) |
| 868 | this_cpu->c_init(c); |
| 869 | |
| 870 | /* Disable the PN if appropriate */ |
| 871 | squash_the_stupid_serial_number(c); |
| 872 | |
| 873 | /* Set up SMEP/SMAP */ |
| 874 | setup_smep(c); |
| 875 | setup_smap(c); |
| 876 | |
| 877 | /* |
| 878 | * The vendor-specific functions might have changed features. |
| 879 | * Now we do "generic changes." |
| 880 | */ |
| 881 | |
| 882 | /* Filter out anything that depends on CPUID levels we don't have */ |
| 883 | filter_cpuid_features(c, true); |
| 884 | |
| 885 | /* If the model name is still unset, do table lookup. */ |
| 886 | if (!c->x86_model_id[0]) { |
| 887 | const char *p; |
| 888 | p = table_lookup_model(c); |
| 889 | if (p) |
| 890 | strcpy(c->x86_model_id, p); |
| 891 | else |
| 892 | /* Last resort... */ |
| 893 | sprintf(c->x86_model_id, "%02x/%02x", |
| 894 | c->x86, c->x86_model); |
| 895 | } |
| 896 | |
| 897 | #ifdef CONFIG_X86_64 |
| 898 | detect_ht(c); |
| 899 | #endif |
| 900 | |
| 901 | init_hypervisor(c); |
| 902 | x86_init_rdrand(c); |
| 903 | |
| 904 | /* |
| 905 | * Clear/Set all flags overriden by options, need do it |
| 906 | * before following smp all cpus cap AND. |
| 907 | */ |
| 908 | for (i = 0; i < NCAPINTS; i++) { |
| 909 | c->x86_capability[i] &= ~cpu_caps_cleared[i]; |
| 910 | c->x86_capability[i] |= cpu_caps_set[i]; |
| 911 | } |
| 912 | |
| 913 | /* |
| 914 | * On SMP, boot_cpu_data holds the common feature set between |
| 915 | * all CPUs; so make sure that we indicate which features are |
| 916 | * common between the CPUs. The first time this routine gets |
| 917 | * executed, c == &boot_cpu_data. |
| 918 | */ |
| 919 | if (c != &boot_cpu_data) { |
| 920 | /* AND the already accumulated flags with these */ |
| 921 | for (i = 0; i < NCAPINTS; i++) |
| 922 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
| 923 | |
| 924 | /* OR, i.e. replicate the bug flags */ |
| 925 | for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) |
| 926 | c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; |
| 927 | } |
| 928 | |
| 929 | /* Init Machine Check Exception if available. */ |
| 930 | mcheck_cpu_init(c); |
| 931 | |
| 932 | select_idle_routine(c); |
| 933 | |
| 934 | #ifdef CONFIG_NUMA |
| 935 | numa_add_cpu(smp_processor_id()); |
| 936 | #endif |
| 937 | } |
| 938 | |
| 939 | #ifdef CONFIG_X86_64 |
| 940 | static void vgetcpu_set_mode(void) |
| 941 | { |
| 942 | if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) |
| 943 | vgetcpu_mode = VGETCPU_RDTSCP; |
| 944 | else |
| 945 | vgetcpu_mode = VGETCPU_LSL; |
| 946 | } |
| 947 | #endif |
| 948 | |
| 949 | void __init identify_boot_cpu(void) |
| 950 | { |
| 951 | identify_cpu(&boot_cpu_data); |
| 952 | init_amd_e400_c1e_mask(); |
| 953 | #ifdef CONFIG_X86_32 |
| 954 | sysenter_setup(); |
| 955 | enable_sep_cpu(); |
| 956 | #else |
| 957 | vgetcpu_set_mode(); |
| 958 | #endif |
| 959 | cpu_detect_tlb(&boot_cpu_data); |
| 960 | } |
| 961 | |
| 962 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) |
| 963 | { |
| 964 | BUG_ON(c == &boot_cpu_data); |
| 965 | identify_cpu(c); |
| 966 | #ifdef CONFIG_X86_32 |
| 967 | enable_sep_cpu(); |
| 968 | #endif |
| 969 | mtrr_ap_init(); |
| 970 | } |
| 971 | |
| 972 | struct msr_range { |
| 973 | unsigned min; |
| 974 | unsigned max; |
| 975 | }; |
| 976 | |
| 977 | static const struct msr_range msr_range_array[] __cpuinitconst = { |
| 978 | { 0x00000000, 0x00000418}, |
| 979 | { 0xc0000000, 0xc000040b}, |
| 980 | { 0xc0010000, 0xc0010142}, |
| 981 | { 0xc0011000, 0xc001103b}, |
| 982 | }; |
| 983 | |
| 984 | static void __cpuinit __print_cpu_msr(void) |
| 985 | { |
| 986 | unsigned index_min, index_max; |
| 987 | unsigned index; |
| 988 | u64 val; |
| 989 | int i; |
| 990 | |
| 991 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { |
| 992 | index_min = msr_range_array[i].min; |
| 993 | index_max = msr_range_array[i].max; |
| 994 | |
| 995 | for (index = index_min; index < index_max; index++) { |
| 996 | if (rdmsrl_safe(index, &val)) |
| 997 | continue; |
| 998 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); |
| 999 | } |
| 1000 | } |
| 1001 | } |
| 1002 | |
| 1003 | static int show_msr __cpuinitdata; |
| 1004 | |
| 1005 | static __init int setup_show_msr(char *arg) |
| 1006 | { |
| 1007 | int num; |
| 1008 | |
| 1009 | get_option(&arg, &num); |
| 1010 | |
| 1011 | if (num > 0) |
| 1012 | show_msr = num; |
| 1013 | return 1; |
| 1014 | } |
| 1015 | __setup("show_msr=", setup_show_msr); |
| 1016 | |
| 1017 | static __init int setup_noclflush(char *arg) |
| 1018 | { |
| 1019 | setup_clear_cpu_cap(X86_FEATURE_CLFLSH); |
| 1020 | return 1; |
| 1021 | } |
| 1022 | __setup("noclflush", setup_noclflush); |
| 1023 | |
| 1024 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
| 1025 | { |
| 1026 | const char *vendor = NULL; |
| 1027 | |
| 1028 | if (c->x86_vendor < X86_VENDOR_NUM) { |
| 1029 | vendor = this_cpu->c_vendor; |
| 1030 | } else { |
| 1031 | if (c->cpuid_level >= 0) |
| 1032 | vendor = c->x86_vendor_id; |
| 1033 | } |
| 1034 | |
| 1035 | if (vendor && !strstr(c->x86_model_id, vendor)) |
| 1036 | printk(KERN_CONT "%s ", vendor); |
| 1037 | |
| 1038 | if (c->x86_model_id[0]) |
| 1039 | printk(KERN_CONT "%s", strim(c->x86_model_id)); |
| 1040 | else |
| 1041 | printk(KERN_CONT "%d86", c->x86); |
| 1042 | |
| 1043 | printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); |
| 1044 | |
| 1045 | if (c->x86_mask || c->cpuid_level >= 0) |
| 1046 | printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); |
| 1047 | else |
| 1048 | printk(KERN_CONT ")\n"); |
| 1049 | |
| 1050 | print_cpu_msr(c); |
| 1051 | } |
| 1052 | |
| 1053 | void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) |
| 1054 | { |
| 1055 | if (c->cpu_index < show_msr) |
| 1056 | __print_cpu_msr(); |
| 1057 | } |
| 1058 | |
| 1059 | static __init int setup_disablecpuid(char *arg) |
| 1060 | { |
| 1061 | int bit; |
| 1062 | |
| 1063 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
| 1064 | setup_clear_cpu_cap(bit); |
| 1065 | else |
| 1066 | return 0; |
| 1067 | |
| 1068 | return 1; |
| 1069 | } |
| 1070 | __setup("clearcpuid=", setup_disablecpuid); |
| 1071 | |
| 1072 | #ifdef CONFIG_X86_64 |
| 1073 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
| 1074 | struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, |
| 1075 | (unsigned long) debug_idt_table }; |
| 1076 | |
| 1077 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
| 1078 | irq_stack_union) __aligned(PAGE_SIZE); |
| 1079 | |
| 1080 | /* |
| 1081 | * The following four percpu variables are hot. Align current_task to |
| 1082 | * cacheline size such that all four fall in the same cacheline. |
| 1083 | */ |
| 1084 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = |
| 1085 | &init_task; |
| 1086 | EXPORT_PER_CPU_SYMBOL(current_task); |
| 1087 | |
| 1088 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
| 1089 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
| 1090 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
| 1091 | |
| 1092 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
| 1093 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
| 1094 | |
| 1095 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
| 1096 | |
| 1097 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); |
| 1098 | |
| 1099 | /* |
| 1100 | * Special IST stacks which the CPU switches to when it calls |
| 1101 | * an IST-marked descriptor entry. Up to 7 stacks (hardware |
| 1102 | * limit), all of them are 4K, except the debug stack which |
| 1103 | * is 8K. |
| 1104 | */ |
| 1105 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { |
| 1106 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, |
| 1107 | [DEBUG_STACK - 1] = DEBUG_STKSZ |
| 1108 | }; |
| 1109 | |
| 1110 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
| 1111 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
| 1112 | |
| 1113 | /* May not be marked __init: used by software suspend */ |
| 1114 | void syscall_init(void) |
| 1115 | { |
| 1116 | /* |
| 1117 | * LSTAR and STAR live in a bit strange symbiosis. |
| 1118 | * They both write to the same internal register. STAR allows to |
| 1119 | * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. |
| 1120 | */ |
| 1121 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); |
| 1122 | wrmsrl(MSR_LSTAR, system_call); |
| 1123 | wrmsrl(MSR_CSTAR, ignore_sysret); |
| 1124 | |
| 1125 | #ifdef CONFIG_IA32_EMULATION |
| 1126 | syscall32_cpu_init(); |
| 1127 | #endif |
| 1128 | |
| 1129 | /* Flags to clear on syscall */ |
| 1130 | wrmsrl(MSR_SYSCALL_MASK, |
| 1131 | X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| |
| 1132 | X86_EFLAGS_IOPL|X86_EFLAGS_AC); |
| 1133 | } |
| 1134 | |
| 1135 | /* |
| 1136 | * Copies of the original ist values from the tss are only accessed during |
| 1137 | * debugging, no special alignment required. |
| 1138 | */ |
| 1139 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
| 1140 | |
| 1141 | static DEFINE_PER_CPU(unsigned long, debug_stack_addr); |
| 1142 | DEFINE_PER_CPU(int, debug_stack_usage); |
| 1143 | |
| 1144 | int is_debug_stack(unsigned long addr) |
| 1145 | { |
| 1146 | return __get_cpu_var(debug_stack_usage) || |
| 1147 | (addr <= __get_cpu_var(debug_stack_addr) && |
| 1148 | addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); |
| 1149 | } |
| 1150 | |
| 1151 | DEFINE_PER_CPU(u32, debug_idt_ctr); |
| 1152 | |
| 1153 | void debug_stack_set_zero(void) |
| 1154 | { |
| 1155 | this_cpu_inc(debug_idt_ctr); |
| 1156 | load_current_idt(); |
| 1157 | } |
| 1158 | |
| 1159 | void debug_stack_reset(void) |
| 1160 | { |
| 1161 | if (WARN_ON(!this_cpu_read(debug_idt_ctr))) |
| 1162 | return; |
| 1163 | if (this_cpu_dec_return(debug_idt_ctr) == 0) |
| 1164 | load_current_idt(); |
| 1165 | } |
| 1166 | |
| 1167 | #else /* CONFIG_X86_64 */ |
| 1168 | |
| 1169 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
| 1170 | EXPORT_PER_CPU_SYMBOL(current_task); |
| 1171 | DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); |
| 1172 | |
| 1173 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 1174 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
| 1175 | #endif |
| 1176 | |
| 1177 | #endif /* CONFIG_X86_64 */ |
| 1178 | |
| 1179 | /* |
| 1180 | * Clear all 6 debug registers: |
| 1181 | */ |
| 1182 | static void clear_all_debug_regs(void) |
| 1183 | { |
| 1184 | int i; |
| 1185 | |
| 1186 | for (i = 0; i < 8; i++) { |
| 1187 | /* Ignore db4, db5 */ |
| 1188 | if ((i == 4) || (i == 5)) |
| 1189 | continue; |
| 1190 | |
| 1191 | set_debugreg(0, i); |
| 1192 | } |
| 1193 | } |
| 1194 | |
| 1195 | #ifdef CONFIG_KGDB |
| 1196 | /* |
| 1197 | * Restore debug regs if using kgdbwait and you have a kernel debugger |
| 1198 | * connection established. |
| 1199 | */ |
| 1200 | static void dbg_restore_debug_regs(void) |
| 1201 | { |
| 1202 | if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) |
| 1203 | arch_kgdb_ops.correct_hw_break(); |
| 1204 | } |
| 1205 | #else /* ! CONFIG_KGDB */ |
| 1206 | #define dbg_restore_debug_regs() |
| 1207 | #endif /* ! CONFIG_KGDB */ |
| 1208 | |
| 1209 | /* |
| 1210 | * cpu_init() initializes state that is per-CPU. Some data is already |
| 1211 | * initialized (naturally) in the bootstrap process, such as the GDT |
| 1212 | * and IDT. We reload them nevertheless, this function acts as a |
| 1213 | * 'CPU state barrier', nothing should get across. |
| 1214 | * A lot of state is already set up in PDA init for 64 bit |
| 1215 | */ |
| 1216 | #ifdef CONFIG_X86_64 |
| 1217 | |
| 1218 | void __cpuinit cpu_init(void) |
| 1219 | { |
| 1220 | struct orig_ist *oist; |
| 1221 | struct task_struct *me; |
| 1222 | struct tss_struct *t; |
| 1223 | unsigned long v; |
| 1224 | int cpu; |
| 1225 | int i; |
| 1226 | |
| 1227 | /* |
| 1228 | * Load microcode on this cpu if a valid microcode is available. |
| 1229 | * This is early microcode loading procedure. |
| 1230 | */ |
| 1231 | load_ucode_ap(); |
| 1232 | |
| 1233 | cpu = stack_smp_processor_id(); |
| 1234 | t = &per_cpu(init_tss, cpu); |
| 1235 | oist = &per_cpu(orig_ist, cpu); |
| 1236 | |
| 1237 | #ifdef CONFIG_NUMA |
| 1238 | if (this_cpu_read(numa_node) == 0 && |
| 1239 | early_cpu_to_node(cpu) != NUMA_NO_NODE) |
| 1240 | set_numa_node(early_cpu_to_node(cpu)); |
| 1241 | #endif |
| 1242 | |
| 1243 | me = current; |
| 1244 | |
| 1245 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) |
| 1246 | panic("CPU#%d already initialized!\n", cpu); |
| 1247 | |
| 1248 | pr_debug("Initializing CPU#%d\n", cpu); |
| 1249 | |
| 1250 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1251 | |
| 1252 | /* |
| 1253 | * Initialize the per-CPU GDT with the boot GDT, |
| 1254 | * and set up the GDT descriptor: |
| 1255 | */ |
| 1256 | |
| 1257 | switch_to_new_gdt(cpu); |
| 1258 | loadsegment(fs, 0); |
| 1259 | |
| 1260 | load_idt((const struct desc_ptr *)&idt_descr); |
| 1261 | |
| 1262 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
| 1263 | syscall_init(); |
| 1264 | |
| 1265 | wrmsrl(MSR_FS_BASE, 0); |
| 1266 | wrmsrl(MSR_KERNEL_GS_BASE, 0); |
| 1267 | barrier(); |
| 1268 | |
| 1269 | x86_configure_nx(); |
| 1270 | enable_x2apic(); |
| 1271 | |
| 1272 | /* |
| 1273 | * set up and load the per-CPU TSS |
| 1274 | */ |
| 1275 | if (!oist->ist[0]) { |
| 1276 | char *estacks = per_cpu(exception_stacks, cpu); |
| 1277 | |
| 1278 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
| 1279 | estacks += exception_stack_sizes[v]; |
| 1280 | oist->ist[v] = t->x86_tss.ist[v] = |
| 1281 | (unsigned long)estacks; |
| 1282 | if (v == DEBUG_STACK-1) |
| 1283 | per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; |
| 1284 | } |
| 1285 | } |
| 1286 | |
| 1287 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
| 1288 | |
| 1289 | /* |
| 1290 | * <= is required because the CPU will access up to |
| 1291 | * 8 bits beyond the end of the IO permission bitmap. |
| 1292 | */ |
| 1293 | for (i = 0; i <= IO_BITMAP_LONGS; i++) |
| 1294 | t->io_bitmap[i] = ~0UL; |
| 1295 | |
| 1296 | atomic_inc(&init_mm.mm_count); |
| 1297 | me->active_mm = &init_mm; |
| 1298 | BUG_ON(me->mm); |
| 1299 | enter_lazy_tlb(&init_mm, me); |
| 1300 | |
| 1301 | load_sp0(t, ¤t->thread); |
| 1302 | set_tss_desc(cpu, t); |
| 1303 | load_TR_desc(); |
| 1304 | load_LDT(&init_mm.context); |
| 1305 | |
| 1306 | clear_all_debug_regs(); |
| 1307 | dbg_restore_debug_regs(); |
| 1308 | |
| 1309 | fpu_init(); |
| 1310 | |
| 1311 | if (is_uv_system()) |
| 1312 | uv_cpu_init(); |
| 1313 | } |
| 1314 | |
| 1315 | #else |
| 1316 | |
| 1317 | void __cpuinit cpu_init(void) |
| 1318 | { |
| 1319 | int cpu = smp_processor_id(); |
| 1320 | struct task_struct *curr = current; |
| 1321 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
| 1322 | struct thread_struct *thread = &curr->thread; |
| 1323 | |
| 1324 | show_ucode_info_early(); |
| 1325 | |
| 1326 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { |
| 1327 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
| 1328 | for (;;) |
| 1329 | local_irq_enable(); |
| 1330 | } |
| 1331 | |
| 1332 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
| 1333 | |
| 1334 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) |
| 1335 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
| 1336 | |
| 1337 | load_idt(&idt_descr); |
| 1338 | switch_to_new_gdt(cpu); |
| 1339 | |
| 1340 | /* |
| 1341 | * Set up and load the per-CPU TSS and LDT |
| 1342 | */ |
| 1343 | atomic_inc(&init_mm.mm_count); |
| 1344 | curr->active_mm = &init_mm; |
| 1345 | BUG_ON(curr->mm); |
| 1346 | enter_lazy_tlb(&init_mm, curr); |
| 1347 | |
| 1348 | load_sp0(t, thread); |
| 1349 | set_tss_desc(cpu, t); |
| 1350 | load_TR_desc(); |
| 1351 | load_LDT(&init_mm.context); |
| 1352 | |
| 1353 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
| 1354 | |
| 1355 | #ifdef CONFIG_DOUBLEFAULT |
| 1356 | /* Set up doublefault TSS pointer in the GDT */ |
| 1357 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
| 1358 | #endif |
| 1359 | |
| 1360 | clear_all_debug_regs(); |
| 1361 | dbg_restore_debug_regs(); |
| 1362 | |
| 1363 | fpu_init(); |
| 1364 | } |
| 1365 | #endif |