1 #include <linux/init.h>
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/thread_info.h>
9 #include <linux/module.h>
10 #include <linux/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/pgtable.h>
19 #include <linux/topology.h>
24 #ifdef CONFIG_X86_LOCAL_APIC
25 #include <asm/mpspec.h>
29 static void early_init_intel(struct cpuinfo_x86
*c
)
33 /* Unmask CPUID levels if masked: */
34 if (c
->x86
> 6 || (c
->x86
== 6 && c
->x86_model
>= 0xd)) {
35 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
37 if (misc_enable
& MSR_IA32_MISC_ENABLE_LIMIT_CPUID
) {
38 misc_enable
&= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID
;
39 wrmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
40 c
->cpuid_level
= cpuid_eax(0);
45 if ((c
->x86
== 0xf && c
->x86_model
>= 0x03) ||
46 (c
->x86
== 0x6 && c
->x86_model
>= 0x0e))
47 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
49 if (c
->x86
>= 6 && !cpu_has(c
, X86_FEATURE_IA64
)) {
52 wrmsr(MSR_IA32_UCODE_REV
, 0, 0);
53 /* Required by the SDM */
55 rdmsr(MSR_IA32_UCODE_REV
, lower_word
, c
->microcode
);
59 * Atom erratum AAE44/AAF40/AAG38/AAH41:
61 * A race condition between speculative fetches and invalidating
62 * a large page. This is worked around in microcode, but we
63 * need the microcode to have already been loaded... so if it is
64 * not, recommend a BIOS update and disable large pages.
66 if (c
->x86
== 6 && c
->x86_model
== 0x1c && c
->x86_mask
<= 2 &&
67 c
->microcode
< 0x20e) {
68 printk(KERN_WARNING
"Atom PSE erratum detected, BIOS microcode update recommended\n");
69 clear_cpu_cap(c
, X86_FEATURE_PSE
);
73 set_cpu_cap(c
, X86_FEATURE_SYSENTER32
);
75 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
76 if (c
->x86
== 15 && c
->x86_cache_alignment
== 64)
77 c
->x86_cache_alignment
= 128;
80 /* CPUID workaround for 0F33/0F34 CPU */
81 if (c
->x86
== 0xF && c
->x86_model
== 0x3
82 && (c
->x86_mask
== 0x3 || c
->x86_mask
== 0x4))
83 c
->x86_phys_bits
= 36;
86 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
87 * with P/T states and does not stop in deep C-states.
89 * It is also reliable across cores and sockets. (but not across
90 * cabinets - we turn it off in that case explicitly.)
92 if (c
->x86_power
& (1 << 8)) {
93 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
94 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC
);
95 if (!check_tsc_unstable())
96 sched_clock_stable
= 1;
99 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
101 switch (c
->x86_model
) {
102 case 0x27: /* Penwell */
103 case 0x35: /* Cloverview */
104 set_cpu_cap(c
, X86_FEATURE_NONSTOP_TSC_S3
);
112 * There is a known erratum on Pentium III and Core Solo
114 * " Page with PAT set to WC while associated MTRR is UC
115 * may consolidate to UC "
116 * Because of this erratum, it is better to stick with
117 * setting WC in MTRR rather than using PAT on these CPUs.
119 * Enable PAT WC only on P4, Core 2 or later CPUs.
121 if (c
->x86
== 6 && c
->x86_model
< 15)
122 clear_cpu_cap(c
, X86_FEATURE_PAT
);
124 #ifdef CONFIG_KMEMCHECK
126 * P4s have a "fast strings" feature which causes single-
127 * stepping REP instructions to only generate a #DB on
128 * cache-line boundaries.
130 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
131 * (model 2) with the same problem.
134 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
136 if (misc_enable
& MSR_IA32_MISC_ENABLE_FAST_STRING
) {
137 printk(KERN_INFO
"kmemcheck: Disabling fast string operations\n");
139 misc_enable
&= ~MSR_IA32_MISC_ENABLE_FAST_STRING
;
140 wrmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
146 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
147 * clear the fast string and enhanced fast string CPU capabilities.
149 if (c
->x86
> 6 || (c
->x86
== 6 && c
->x86_model
>= 0xd)) {
150 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_enable
);
151 if (!(misc_enable
& MSR_IA32_MISC_ENABLE_FAST_STRING
)) {
152 printk(KERN_INFO
"Disabled fast string operations\n");
153 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD
);
154 setup_clear_cpu_cap(X86_FEATURE_ERMS
);
161 * Early probe support logic for ppro memory erratum #50
163 * This is called before we do cpu ident work
166 int ppro_with_ram_bug(void)
168 /* Uses data from early_cpu_detect now */
169 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
&&
170 boot_cpu_data
.x86
== 6 &&
171 boot_cpu_data
.x86_model
== 1 &&
172 boot_cpu_data
.x86_mask
< 8) {
173 printk(KERN_INFO
"Pentium Pro with Errata#50 detected. Taking evasive action.\n");
179 static void intel_smp_check(struct cpuinfo_x86
*c
)
181 /* calling is from identify_secondary_cpu() ? */
186 * Mask B, Pentium, but not Pentium MMX
189 c
->x86_mask
>= 1 && c
->x86_mask
<= 4 &&
192 * Remember we have B step Pentia with bugs
194 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
195 "with B stepping processors.\n");
199 static void intel_workarounds(struct cpuinfo_x86
*c
)
201 unsigned long lo
, hi
;
203 #ifdef CONFIG_X86_F00F_BUG
205 * All current models of Pentium and Pentium with MMX technology CPUs
206 * have the F0 0F bug, which lets nonprivileged users lock up the
207 * system. Announce that the fault handler will be checking for it.
209 clear_cpu_bug(c
, X86_BUG_F00F
);
210 if (!paravirt_enabled() && c
->x86
== 5) {
211 static int f00f_workaround_enabled
;
213 set_cpu_bug(c
, X86_BUG_F00F
);
214 if (!f00f_workaround_enabled
) {
215 printk(KERN_NOTICE
"Intel Pentium with F0 0F bug - workaround enabled.\n");
216 f00f_workaround_enabled
= 1;
222 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
225 if ((c
->x86
<<8 | c
->x86_model
<<4 | c
->x86_mask
) < 0x633)
226 clear_cpu_cap(c
, X86_FEATURE_SEP
);
229 * P4 Xeon errata 037 workaround.
230 * Hardware prefetcher may cause stale data to be loaded into the cache.
232 if ((c
->x86
== 15) && (c
->x86_model
== 1) && (c
->x86_mask
== 1)) {
233 rdmsr(MSR_IA32_MISC_ENABLE
, lo
, hi
);
234 if ((lo
& MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE
) == 0) {
235 printk (KERN_INFO
"CPU: C0 stepping P4 Xeon detected.\n");
236 printk (KERN_INFO
"CPU: Disabling hardware prefetching (Errata 037)\n");
237 lo
|= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE
;
238 wrmsr(MSR_IA32_MISC_ENABLE
, lo
, hi
);
243 * See if we have a good local APIC by checking for buggy Pentia,
244 * i.e. all B steppings and the C2 stepping of P54C when using their
245 * integrated APIC (see 11AP erratum in "Pentium Processor
246 * Specification Update").
248 if (cpu_has_apic
&& (c
->x86
<<8 | c
->x86_model
<<4) == 0x520 &&
249 (c
->x86_mask
< 0x6 || c
->x86_mask
== 0xb))
250 set_cpu_cap(c
, X86_FEATURE_11AP
);
253 #ifdef CONFIG_X86_INTEL_USERCOPY
255 * Set up the preferred alignment for movsl bulk memory moves
258 case 4: /* 486: untested */
260 case 5: /* Old Pentia: untested */
262 case 6: /* PII/PIII only like movsl with 8-byte alignment */
265 case 15: /* P4 is OK down to 8-byte alignment */
271 #ifdef CONFIG_X86_NUMAQ
278 static void intel_workarounds(struct cpuinfo_x86
*c
)
283 static void srat_detect_node(struct cpuinfo_x86
*c
)
287 int cpu
= smp_processor_id();
289 /* Don't do the funky fallback heuristics the AMD version employs
291 node
= numa_cpu_node(cpu
);
292 if (node
== NUMA_NO_NODE
|| !node_online(node
)) {
293 /* reuse the value from init_cpu_to_node() */
294 node
= cpu_to_node(cpu
);
296 numa_set_node(cpu
, node
);
301 * find out the number of processor cores on the die
303 static int intel_num_cpu_cores(struct cpuinfo_x86
*c
)
305 unsigned int eax
, ebx
, ecx
, edx
;
307 if (c
->cpuid_level
< 4)
310 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
311 cpuid_count(4, 0, &eax
, &ebx
, &ecx
, &edx
);
313 return (eax
>> 26) + 1;
318 static void detect_vmx_virtcap(struct cpuinfo_x86
*c
)
320 /* Intel VMX MSR indicated features */
321 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
322 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
323 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
324 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
325 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
326 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
328 u32 vmx_msr_low
, vmx_msr_high
, msr_ctl
, msr_ctl2
;
330 clear_cpu_cap(c
, X86_FEATURE_TPR_SHADOW
);
331 clear_cpu_cap(c
, X86_FEATURE_VNMI
);
332 clear_cpu_cap(c
, X86_FEATURE_FLEXPRIORITY
);
333 clear_cpu_cap(c
, X86_FEATURE_EPT
);
334 clear_cpu_cap(c
, X86_FEATURE_VPID
);
336 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS
, vmx_msr_low
, vmx_msr_high
);
337 msr_ctl
= vmx_msr_high
| vmx_msr_low
;
338 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW
)
339 set_cpu_cap(c
, X86_FEATURE_TPR_SHADOW
);
340 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_VNMI
)
341 set_cpu_cap(c
, X86_FEATURE_VNMI
);
342 if (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS
) {
343 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2
,
344 vmx_msr_low
, vmx_msr_high
);
345 msr_ctl2
= vmx_msr_high
| vmx_msr_low
;
346 if ((msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC
) &&
347 (msr_ctl
& X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW
))
348 set_cpu_cap(c
, X86_FEATURE_FLEXPRIORITY
);
349 if (msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_EPT
)
350 set_cpu_cap(c
, X86_FEATURE_EPT
);
351 if (msr_ctl2
& X86_VMX_FEATURE_PROC_CTLS2_VPID
)
352 set_cpu_cap(c
, X86_FEATURE_VPID
);
356 static void init_intel(struct cpuinfo_x86
*c
)
362 intel_workarounds(c
);
365 * Detect the extended topology information if available. This
366 * will reinitialise the initial_apicid which will be used
367 * in init_intel_cacheinfo()
369 detect_extended_topology(c
);
371 l2
= init_intel_cacheinfo(c
);
372 if (c
->cpuid_level
> 9) {
373 unsigned eax
= cpuid_eax(10);
374 /* Check for version and the number of counters */
375 if ((eax
& 0xff) && (((eax
>>8) & 0xff) > 1))
376 set_cpu_cap(c
, X86_FEATURE_ARCH_PERFMON
);
380 set_cpu_cap(c
, X86_FEATURE_LFENCE_RDTSC
);
383 rdmsr(MSR_IA32_MISC_ENABLE
, l1
, l2
);
385 set_cpu_cap(c
, X86_FEATURE_BTS
);
387 set_cpu_cap(c
, X86_FEATURE_PEBS
);
390 if (c
->x86
== 6 && c
->x86_model
== 29 && cpu_has_clflush
)
391 set_cpu_cap(c
, X86_FEATURE_CLFLUSH_MONITOR
);
395 c
->x86_cache_alignment
= c
->x86_clflush_size
* 2;
397 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
400 * Names for the Pentium II/Celeron processors
401 * detectable only by also checking the cache size.
402 * Dixon is NOT a Celeron.
407 switch (c
->x86_model
) {
410 p
= "Celeron (Covington)";
412 p
= "Mobile Pentium II (Dixon)";
417 p
= "Celeron (Mendocino)";
418 else if (c
->x86_mask
== 0 || c
->x86_mask
== 5)
424 p
= "Celeron (Coppermine)";
429 strcpy(c
->x86_model_id
, p
);
433 set_cpu_cap(c
, X86_FEATURE_P4
);
435 set_cpu_cap(c
, X86_FEATURE_P3
);
438 if (!cpu_has(c
, X86_FEATURE_XTOPOLOGY
)) {
440 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
443 c
->x86_max_cores
= intel_num_cpu_cores(c
);
449 /* Work around errata */
452 if (cpu_has(c
, X86_FEATURE_VMX
))
453 detect_vmx_virtcap(c
);
456 * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
457 * x86_energy_perf_policy(8) is available to change it at run-time
459 if (cpu_has(c
, X86_FEATURE_EPB
)) {
462 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS
, epb
);
463 if ((epb
& 0xF) == ENERGY_PERF_BIAS_PERFORMANCE
) {
464 printk_once(KERN_WARNING
"ENERGY_PERF_BIAS:"
465 " Set to 'normal', was 'performance'\n"
466 "ENERGY_PERF_BIAS: View and update with"
467 " x86_energy_perf_policy(8)\n");
468 epb
= (epb
& ~0xF) | ENERGY_PERF_BIAS_NORMAL
;
469 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS
, epb
);
475 static unsigned int intel_size_cache(struct cpuinfo_x86
*c
, unsigned int size
)
478 * Intel PIII Tualatin. This comes in two flavours.
479 * One has 256kb of cache, the other 512. We have no way
480 * to determine which, so we use a boottime override
481 * for the 512kb model, and assume 256 otherwise.
483 if ((c
->x86
== 6) && (c
->x86_model
== 11) && (size
== 0))
489 #define TLB_INST_4K 0x01
490 #define TLB_INST_4M 0x02
491 #define TLB_INST_2M_4M 0x03
493 #define TLB_INST_ALL 0x05
494 #define TLB_INST_1G 0x06
496 #define TLB_DATA_4K 0x11
497 #define TLB_DATA_4M 0x12
498 #define TLB_DATA_2M_4M 0x13
499 #define TLB_DATA_4K_4M 0x14
501 #define TLB_DATA_1G 0x16
503 #define TLB_DATA0_4K 0x21
504 #define TLB_DATA0_4M 0x22
505 #define TLB_DATA0_2M_4M 0x23
509 static const struct _tlb_table intel_tlb_table
[] = {
510 { 0x01, TLB_INST_4K
, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
511 { 0x02, TLB_INST_4M
, 2, " TLB_INST 4 MByte pages, full associative" },
512 { 0x03, TLB_DATA_4K
, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
513 { 0x04, TLB_DATA_4M
, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
514 { 0x05, TLB_DATA_4M
, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
515 { 0x0b, TLB_INST_4M
, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
516 { 0x4f, TLB_INST_4K
, 32, " TLB_INST 4 KByte pages */" },
517 { 0x50, TLB_INST_ALL
, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
518 { 0x51, TLB_INST_ALL
, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
519 { 0x52, TLB_INST_ALL
, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
520 { 0x55, TLB_INST_2M_4M
, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
521 { 0x56, TLB_DATA0_4M
, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
522 { 0x57, TLB_DATA0_4K
, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
523 { 0x59, TLB_DATA0_4K
, 16, " TLB_DATA0 4 KByte pages, fully associative" },
524 { 0x5a, TLB_DATA0_2M_4M
, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
525 { 0x5b, TLB_DATA_4K_4M
, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
526 { 0x5c, TLB_DATA_4K_4M
, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
527 { 0x5d, TLB_DATA_4K_4M
, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
528 { 0xb0, TLB_INST_4K
, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
529 { 0xb1, TLB_INST_2M_4M
, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
530 { 0xb2, TLB_INST_4K
, 64, " TLB_INST 4KByte pages, 4-way set associative" },
531 { 0xb3, TLB_DATA_4K
, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
532 { 0xb4, TLB_DATA_4K
, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
533 { 0xba, TLB_DATA_4K
, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
534 { 0xc0, TLB_DATA_4K_4M
, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
535 { 0xca, STLB_4K
, 512, " STLB 4 KByte pages, 4-way associative" },
539 static void intel_tlb_lookup(const unsigned char desc
)
545 /* look up this descriptor in the table */
546 for (k
= 0; intel_tlb_table
[k
].descriptor
!= desc
&& \
547 intel_tlb_table
[k
].descriptor
!= 0; k
++)
550 if (intel_tlb_table
[k
].tlb_type
== 0)
553 switch (intel_tlb_table
[k
].tlb_type
) {
555 if (tlb_lli_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
556 tlb_lli_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
557 if (tlb_lld_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
558 tlb_lld_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
561 if (tlb_lli_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
562 tlb_lli_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
563 if (tlb_lli_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
564 tlb_lli_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
565 if (tlb_lli_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
566 tlb_lli_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
569 if (tlb_lli_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
570 tlb_lli_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
573 if (tlb_lli_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
574 tlb_lli_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
577 if (tlb_lli_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
578 tlb_lli_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
579 if (tlb_lli_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
580 tlb_lli_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
584 if (tlb_lld_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
585 tlb_lld_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
589 if (tlb_lld_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
590 tlb_lld_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
593 case TLB_DATA0_2M_4M
:
594 if (tlb_lld_2m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
595 tlb_lld_2m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
596 if (tlb_lld_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
597 tlb_lld_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
600 if (tlb_lld_4k
[ENTRIES
] < intel_tlb_table
[k
].entries
)
601 tlb_lld_4k
[ENTRIES
] = intel_tlb_table
[k
].entries
;
602 if (tlb_lld_4m
[ENTRIES
] < intel_tlb_table
[k
].entries
)
603 tlb_lld_4m
[ENTRIES
] = intel_tlb_table
[k
].entries
;
608 static void intel_tlb_flushall_shift_set(struct cpuinfo_x86
*c
)
610 switch ((c
->x86
<< 8) + c
->x86_model
) {
611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
612 case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
613 case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
614 case 0x61d: /* six-core 45 nm xeon "Dunnington" */
615 tlb_flushall_shift
= -1;
617 case 0x61a: /* 45 nm nehalem, "Bloomfield" */
618 case 0x61e: /* 45 nm nehalem, "Lynnfield" */
619 case 0x625: /* 32 nm nehalem, "Clarkdale" */
620 case 0x62c: /* 32 nm nehalem, "Gulftown" */
621 case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
622 case 0x62f: /* 32 nm Xeon E7 */
623 tlb_flushall_shift
= 6;
625 case 0x62a: /* SandyBridge */
626 case 0x62d: /* SandyBridge, "Romely-EP" */
627 tlb_flushall_shift
= 5;
629 case 0x63a: /* Ivybridge */
630 tlb_flushall_shift
= 1;
633 tlb_flushall_shift
= 6;
637 static void intel_detect_tlb(struct cpuinfo_x86
*c
)
640 unsigned int regs
[4];
641 unsigned char *desc
= (unsigned char *)regs
;
643 if (c
->cpuid_level
< 2)
646 /* Number of times to iterate */
647 n
= cpuid_eax(2) & 0xFF;
649 for (i
= 0 ; i
< n
; i
++) {
650 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
652 /* If bit 31 is set, this is an unknown format */
653 for (j
= 0 ; j
< 3 ; j
++)
654 if (regs
[j
] & (1 << 31))
657 /* Byte 0 is level count, not a descriptor */
658 for (j
= 1 ; j
< 16 ; j
++)
659 intel_tlb_lookup(desc
[j
]);
661 intel_tlb_flushall_shift_set(c
);
664 static const struct cpu_dev intel_cpu_dev
= {
666 .c_ident
= { "GenuineIntel" },
669 { .vendor
= X86_VENDOR_INTEL
, .family
= 4, .model_names
=
671 [0] = "486 DX-25/33",
682 { .vendor
= X86_VENDOR_INTEL
, .family
= 5, .model_names
=
684 [0] = "Pentium 60/66 A-step",
685 [1] = "Pentium 60/66",
686 [2] = "Pentium 75 - 200",
687 [3] = "OverDrive PODP5V83",
689 [7] = "Mobile Pentium 75 - 200",
690 [8] = "Mobile Pentium MMX"
693 { .vendor
= X86_VENDOR_INTEL
, .family
= 6, .model_names
=
695 [0] = "Pentium Pro A-step",
697 [3] = "Pentium II (Klamath)",
698 [4] = "Pentium II (Deschutes)",
699 [5] = "Pentium II (Deschutes)",
700 [6] = "Mobile Pentium II",
701 [7] = "Pentium III (Katmai)",
702 [8] = "Pentium III (Coppermine)",
703 [10] = "Pentium III (Cascades)",
704 [11] = "Pentium III (Tualatin)",
707 { .vendor
= X86_VENDOR_INTEL
, .family
= 15, .model_names
=
709 [0] = "Pentium 4 (Unknown)",
710 [1] = "Pentium 4 (Willamette)",
711 [2] = "Pentium 4 (Northwood)",
712 [4] = "Pentium 4 (Foster)",
713 [5] = "Pentium 4 (Foster)",
717 .c_size_cache
= intel_size_cache
,
719 .c_detect_tlb
= intel_detect_tlb
,
720 .c_early_init
= early_init_intel
,
721 .c_init
= init_intel
,
722 .c_x86_vendor
= X86_VENDOR_INTEL
,
725 cpu_dev_register(intel_cpu_dev
);