Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/init.h> |
2 | #include <linux/kernel.h> | |
3 | ||
4 | #include <linux/string.h> | |
5 | #include <linux/bitops.h> | |
6 | #include <linux/smp.h> | |
7 | #include <linux/thread_info.h> | |
53e86b91 | 8 | #include <linux/module.h> |
1da177e4 LT |
9 | |
10 | #include <asm/processor.h> | |
d72b1b4f | 11 | #include <asm/pgtable.h> |
1da177e4 LT |
12 | #include <asm/msr.h> |
13 | #include <asm/uaccess.h> | |
eee3af4a MM |
14 | #include <asm/ptrace.h> |
15 | #include <asm/ds.h> | |
1da177e4 LT |
16 | |
17 | #include "cpu.h" | |
18 | ||
19 | #ifdef CONFIG_X86_LOCAL_APIC | |
20 | #include <asm/mpspec.h> | |
21 | #include <asm/apic.h> | |
22 | #include <mach_apic.h> | |
23 | #endif | |
24 | ||
1da177e4 LT |
25 | #ifdef CONFIG_X86_INTEL_USERCOPY |
26 | /* | |
27 | * Alignment at which movsl is preferred for bulk memory copies. | |
28 | */ | |
6c036527 | 29 | struct movsl_mask movsl_mask __read_mostly; |
1da177e4 LT |
30 | #endif |
31 | ||
3bc9b76b | 32 | void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) |
1da177e4 LT |
33 | { |
34 | if (c->x86_vendor != X86_VENDOR_INTEL) | |
35 | return; | |
36 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | |
37 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | |
38 | c->x86_cache_alignment = 128; | |
39 | } | |
40 | ||
41 | /* | |
42 | * Early probe support logic for ppro memory erratum #50 | |
43 | * | |
44 | * This is called before we do cpu ident work | |
45 | */ | |
46 | ||
3bc9b76b | 47 | int __cpuinit ppro_with_ram_bug(void) |
1da177e4 LT |
48 | { |
49 | /* Uses data from early_cpu_detect now */ | |
50 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
51 | boot_cpu_data.x86 == 6 && | |
52 | boot_cpu_data.x86_model == 1 && | |
53 | boot_cpu_data.x86_mask < 8) { | |
54 | printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); | |
55 | return 1; | |
56 | } | |
57 | return 0; | |
58 | } | |
59 | ||
60 | ||
61 | /* | |
62 | * P4 Xeon errata 037 workaround. | |
63 | * Hardware prefetcher may cause stale data to be loaded into the cache. | |
64 | */ | |
3bc9b76b | 65 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) |
1da177e4 LT |
66 | { |
67 | unsigned long lo, hi; | |
68 | ||
69 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { | |
70 | rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); | |
71 | if ((lo & (1<<9)) == 0) { | |
72 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | |
73 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | |
74 | lo |= (1<<9); /* Disable hw prefetching */ | |
75 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | |
76 | } | |
77 | } | |
78 | } | |
79 | ||
80 | ||
3dd9d514 AK |
81 | /* |
82 | * find out the number of processor cores on the die | |
83 | */ | |
3bc9b76b | 84 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) |
3dd9d514 | 85 | { |
f2ab4461 | 86 | unsigned int eax, ebx, ecx, edx; |
3dd9d514 AK |
87 | |
88 | if (c->cpuid_level < 4) | |
89 | return 1; | |
90 | ||
f2ab4461 ZA |
91 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
92 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | |
3dd9d514 AK |
93 | if (eax & 0x1f) |
94 | return ((eax >> 26) + 1); | |
95 | else | |
96 | return 1; | |
97 | } | |
98 | ||
d72b1b4f SR |
99 | #ifdef CONFIG_X86_F00F_BUG |
100 | static void __cpuinit trap_init_f00f_bug(void) | |
101 | { | |
102 | __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | |
103 | ||
104 | /* | |
105 | * Update the IDT descriptor and reload the IDT so that | |
106 | * it uses the read-only mapped virtual address. | |
107 | */ | |
108 | idt_descr.address = fix_to_virt(FIX_F00F_IDT); | |
109 | load_idt(&idt_descr); | |
110 | } | |
111 | #endif | |
112 | ||
3bc9b76b | 113 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
1da177e4 LT |
114 | { |
115 | unsigned int l2 = 0; | |
116 | char *p = NULL; | |
117 | ||
118 | #ifdef CONFIG_X86_F00F_BUG | |
119 | /* | |
120 | * All current models of Pentium and Pentium with MMX technology CPUs | |
121 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | |
122 | * Note that the workaround only should be initialized once... | |
123 | */ | |
124 | c->f00f_bug = 0; | |
4f205fd4 | 125 | if (!paravirt_enabled() && c->x86 == 5) { |
1da177e4 LT |
126 | static int f00f_workaround_enabled = 0; |
127 | ||
128 | c->f00f_bug = 1; | |
129 | if ( !f00f_workaround_enabled ) { | |
130 | trap_init_f00f_bug(); | |
131 | printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); | |
132 | f00f_workaround_enabled = 1; | |
133 | } | |
134 | } | |
135 | #endif | |
136 | ||
137 | select_idle_routine(c); | |
138 | l2 = init_intel_cacheinfo(c); | |
0080e667 VP |
139 | if (c->cpuid_level > 9 ) { |
140 | unsigned eax = cpuid_eax(10); | |
141 | /* Check for version and the number of counters */ | |
142 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | |
143 | set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability); | |
144 | } | |
1da177e4 LT |
145 | |
146 | /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ | |
147 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | |
148 | clear_bit(X86_FEATURE_SEP, c->x86_capability); | |
149 | ||
150 | /* Names for the Pentium II/Celeron processors | |
151 | detectable only by also checking the cache size. | |
152 | Dixon is NOT a Celeron. */ | |
153 | if (c->x86 == 6) { | |
154 | switch (c->x86_model) { | |
155 | case 5: | |
156 | if (c->x86_mask == 0) { | |
157 | if (l2 == 0) | |
158 | p = "Celeron (Covington)"; | |
159 | else if (l2 == 256) | |
160 | p = "Mobile Pentium II (Dixon)"; | |
161 | } | |
162 | break; | |
163 | ||
164 | case 6: | |
165 | if (l2 == 128) | |
166 | p = "Celeron (Mendocino)"; | |
167 | else if (c->x86_mask == 0 || c->x86_mask == 5) | |
168 | p = "Celeron-A"; | |
169 | break; | |
170 | ||
171 | case 8: | |
172 | if (l2 == 128) | |
173 | p = "Celeron (Coppermine)"; | |
174 | break; | |
175 | } | |
176 | } | |
177 | ||
178 | if ( p ) | |
179 | strcpy(c->x86_model_id, p); | |
180 | ||
94605eff | 181 | c->x86_max_cores = num_cpu_cores(c); |
3dd9d514 | 182 | |
1da177e4 LT |
183 | detect_ht(c); |
184 | ||
185 | /* Work around errata */ | |
186 | Intel_errata_workarounds(c); | |
187 | ||
188 | #ifdef CONFIG_X86_INTEL_USERCOPY | |
189 | /* | |
190 | * Set up the preferred alignment for movsl bulk memory moves | |
191 | */ | |
192 | switch (c->x86) { | |
193 | case 4: /* 486: untested */ | |
194 | break; | |
195 | case 5: /* Old Pentia: untested */ | |
196 | break; | |
197 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | |
198 | movsl_mask.mask = 7; | |
199 | break; | |
200 | case 15: /* P4 is OK down to 8-byte alignment */ | |
201 | movsl_mask.mask = 7; | |
202 | break; | |
203 | } | |
204 | #endif | |
205 | ||
3aefbe07 | 206 | if (c->x86 == 15) { |
1da177e4 | 207 | set_bit(X86_FEATURE_P4, c->x86_capability); |
3aefbe07 AK |
208 | set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability); |
209 | } | |
1da177e4 LT |
210 | if (c->x86 == 6) |
211 | set_bit(X86_FEATURE_P3, c->x86_capability); | |
39b3a791 AK |
212 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
213 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | |
214 | set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); | |
1da177e4 | 215 | |
42ed458a SE |
216 | if (cpu_has_ds) { |
217 | unsigned int l1; | |
218 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | |
538f188e SE |
219 | if (!(l1 & (1<<11))) |
220 | set_bit(X86_FEATURE_BTS, c->x86_capability); | |
42ed458a SE |
221 | if (!(l1 & (1<<12))) |
222 | set_bit(X86_FEATURE_PEBS, c->x86_capability); | |
223 | } | |
eee3af4a MM |
224 | |
225 | if (cpu_has_bts) | |
226 | ds_init_intel(c); | |
42ed458a | 227 | } |
1da177e4 | 228 | |
e9dff0ee | 229 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) |
1da177e4 LT |
230 | { |
231 | /* Intel PIII Tualatin. This comes in two flavours. | |
232 | * One has 256kb of cache, the other 512. We have no way | |
233 | * to determine which, so we use a boottime override | |
234 | * for the 512kb model, and assume 256 otherwise. | |
235 | */ | |
236 | if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) | |
237 | size = 256; | |
238 | return size; | |
239 | } | |
240 | ||
3bc9b76b | 241 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
1da177e4 LT |
242 | .c_vendor = "Intel", |
243 | .c_ident = { "GenuineIntel" }, | |
244 | .c_models = { | |
245 | { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = | |
246 | { | |
247 | [0] = "486 DX-25/33", | |
248 | [1] = "486 DX-50", | |
249 | [2] = "486 SX", | |
250 | [3] = "486 DX/2", | |
251 | [4] = "486 SL", | |
252 | [5] = "486 SX/2", | |
253 | [7] = "486 DX/2-WB", | |
254 | [8] = "486 DX/4", | |
255 | [9] = "486 DX/4-WB" | |
256 | } | |
257 | }, | |
258 | { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = | |
259 | { | |
260 | [0] = "Pentium 60/66 A-step", | |
261 | [1] = "Pentium 60/66", | |
262 | [2] = "Pentium 75 - 200", | |
263 | [3] = "OverDrive PODP5V83", | |
264 | [4] = "Pentium MMX", | |
265 | [7] = "Mobile Pentium 75 - 200", | |
266 | [8] = "Mobile Pentium MMX" | |
267 | } | |
268 | }, | |
269 | { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = | |
270 | { | |
271 | [0] = "Pentium Pro A-step", | |
272 | [1] = "Pentium Pro", | |
273 | [3] = "Pentium II (Klamath)", | |
274 | [4] = "Pentium II (Deschutes)", | |
275 | [5] = "Pentium II (Deschutes)", | |
276 | [6] = "Mobile Pentium II", | |
277 | [7] = "Pentium III (Katmai)", | |
278 | [8] = "Pentium III (Coppermine)", | |
279 | [10] = "Pentium III (Cascades)", | |
280 | [11] = "Pentium III (Tualatin)", | |
281 | } | |
282 | }, | |
283 | { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = | |
284 | { | |
285 | [0] = "Pentium 4 (Unknown)", | |
286 | [1] = "Pentium 4 (Willamette)", | |
287 | [2] = "Pentium 4 (Northwood)", | |
288 | [4] = "Pentium 4 (Foster)", | |
289 | [5] = "Pentium 4 (Foster)", | |
290 | } | |
291 | }, | |
292 | }, | |
293 | .c_init = init_intel, | |
1da177e4 LT |
294 | .c_size_cache = intel_size_cache, |
295 | }; | |
296 | ||
297 | __init int intel_cpu_init(void) | |
298 | { | |
299 | cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev; | |
300 | return 0; | |
301 | } | |
302 | ||
53e86b91 NP |
303 | #ifndef CONFIG_X86_CMPXCHG |
304 | unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new) | |
305 | { | |
306 | u8 prev; | |
307 | unsigned long flags; | |
308 | ||
309 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | |
310 | local_irq_save(flags); | |
311 | prev = *(u8 *)ptr; | |
312 | if (prev == old) | |
313 | *(u8 *)ptr = new; | |
314 | local_irq_restore(flags); | |
315 | return prev; | |
316 | } | |
317 | EXPORT_SYMBOL(cmpxchg_386_u8); | |
318 | ||
319 | unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new) | |
320 | { | |
321 | u16 prev; | |
322 | unsigned long flags; | |
323 | ||
324 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | |
325 | local_irq_save(flags); | |
326 | prev = *(u16 *)ptr; | |
327 | if (prev == old) | |
328 | *(u16 *)ptr = new; | |
329 | local_irq_restore(flags); | |
330 | return prev; | |
331 | } | |
332 | EXPORT_SYMBOL(cmpxchg_386_u16); | |
333 | ||
334 | unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new) | |
335 | { | |
336 | u32 prev; | |
337 | unsigned long flags; | |
338 | ||
339 | /* Poor man's cmpxchg for 386. Unsuitable for SMP */ | |
340 | local_irq_save(flags); | |
341 | prev = *(u32 *)ptr; | |
342 | if (prev == old) | |
343 | *(u32 *)ptr = new; | |
344 | local_irq_restore(flags); | |
345 | return prev; | |
346 | } | |
347 | EXPORT_SYMBOL(cmpxchg_386_u32); | |
348 | #endif | |
349 | ||
2c0b8a75 MD |
350 | #ifndef CONFIG_X86_CMPXCHG64 |
351 | unsigned long long cmpxchg_486_u64(volatile void *ptr, u64 old, u64 new) | |
352 | { | |
353 | u64 prev; | |
354 | unsigned long flags; | |
355 | ||
356 | /* Poor man's cmpxchg8b for 386 and 486. Unsuitable for SMP */ | |
357 | local_irq_save(flags); | |
358 | prev = *(u64 *)ptr; | |
359 | if (prev == old) | |
360 | *(u64 *)ptr = new; | |
361 | local_irq_restore(flags); | |
362 | return prev; | |
363 | } | |
364 | EXPORT_SYMBOL(cmpxchg_486_u64); | |
365 | #endif | |
366 | ||
1da177e4 LT |
367 | // arch_initcall(intel_cpu_init); |
368 |