x86: seperate funcs from setup_64 to cpu common_64.c
[deliverable/linux.git] / arch / x86 / kernel / cpu / common_64.c
CommitLineData
f580366f
YL
1#include <linux/init.h>
2#include <linux/string.h>
3#include <linux/delay.h>
4#include <linux/smp.h>
5#include <linux/module.h>
6#include <linux/percpu.h>
7#include <linux/bootmem.h>
8#include <asm/processor.h>
9#include <asm/i387.h>
10#include <asm/msr.h>
11#include <asm/io.h>
12#include <asm/mmu_context.h>
13#include <asm/mtrr.h>
14#include <asm/mce.h>
15#include <asm/pat.h>
16#include <asm/numa.h>
17#ifdef CONFIG_X86_LOCAL_APIC
18#include <asm/mpspec.h>
19#include <asm/apic.h>
20#include <mach_apic.h>
21#endif
22
23#include "cpu.h"
24
25/* We need valid kernel segments for data and code in long mode too
26 * IRET will check the segment types kkeil 2000/10/28
27 * Also sysret mandates a special GDT layout
28 */
29/* The TLS descriptors are currently at a different place compared to i386.
30 Hopefully nobody expects them at a fixed place (Wine?) */
31DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
32 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
33 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
34 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
35 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
36 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
37 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
38} };
39EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
40
41__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
42
43/* Current gdt points %fs at the "master" per-cpu area: after this,
44 * it's on the real one. */
45void switch_to_new_gdt(void)
46{
47 struct desc_ptr gdt_descr;
48
49 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
50 gdt_descr.size = GDT_SIZE - 1;
51 load_gdt(&gdt_descr);
52}
53
54struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
55
56static void __cpuinit default_init(struct cpuinfo_x86 *c)
57{
58 display_cacheinfo(c);
59}
60
61static struct cpu_dev __cpuinitdata default_cpu = {
62 .c_init = default_init,
63 .c_vendor = "Unknown",
64};
65static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
66
67int __cpuinit get_model_name(struct cpuinfo_x86 *c)
68{
69 unsigned int *v;
70
71 if (c->extended_cpuid_level < 0x80000004)
72 return 0;
73
74 v = (unsigned int *) c->x86_model_id;
75 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
76 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
77 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
78 c->x86_model_id[48] = 0;
79 return 1;
80}
81
82
83void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
84{
85 unsigned int n, dummy, eax, ebx, ecx, edx;
86
87 n = c->extended_cpuid_level;
88
89 if (n >= 0x80000005) {
90 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
91 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
92 "D cache %dK (%d bytes/line)\n",
93 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
94 c->x86_cache_size = (ecx>>24) + (edx>>24);
95 /* On K8 L1 TLB is inclusive, so don't count it */
96 c->x86_tlbsize = 0;
97 }
98
99 if (n >= 0x80000006) {
100 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
101 ecx = cpuid_ecx(0x80000006);
102 c->x86_cache_size = ecx >> 16;
103 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
104
105 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
106 c->x86_cache_size, ecx & 0xFF);
107 }
108 if (n >= 0x80000008) {
109 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
110 c->x86_virt_bits = (eax >> 8) & 0xff;
111 c->x86_phys_bits = eax & 0xff;
112 }
113}
114
115void __cpuinit detect_ht(struct cpuinfo_x86 *c)
116{
117#ifdef CONFIG_SMP
118 u32 eax, ebx, ecx, edx;
119 int index_msb, core_bits;
120
121 cpuid(1, &eax, &ebx, &ecx, &edx);
122
123
124 if (!cpu_has(c, X86_FEATURE_HT))
125 return;
126 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
127 goto out;
128
129 smp_num_siblings = (ebx & 0xff0000) >> 16;
130
131 if (smp_num_siblings == 1) {
132 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
133 } else if (smp_num_siblings > 1) {
134
135 if (smp_num_siblings > NR_CPUS) {
136 printk(KERN_WARNING "CPU: Unsupported number of "
137 "siblings %d", smp_num_siblings);
138 smp_num_siblings = 1;
139 return;
140 }
141
142 index_msb = get_count_order(smp_num_siblings);
143 c->phys_proc_id = phys_pkg_id(index_msb);
144
145 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
146
147 index_msb = get_count_order(smp_num_siblings);
148
149 core_bits = get_count_order(c->x86_max_cores);
150
151 c->cpu_core_id = phys_pkg_id(index_msb) &
152 ((1 << core_bits) - 1);
153 }
154out:
155 if ((c->x86_max_cores * smp_num_siblings) > 1) {
156 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
157 c->phys_proc_id);
158 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
159 c->cpu_core_id);
160 }
161
162#endif
163}
164
165static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
166{
167 char *v = c->x86_vendor_id;
168 int i;
169 static int printed;
170
171 for (i = 0; i < X86_VENDOR_NUM; i++) {
172 if (cpu_devs[i]) {
173 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
174 (cpu_devs[i]->c_ident[1] &&
175 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
176 c->x86_vendor = i;
177 this_cpu = cpu_devs[i];
178 return;
179 }
180 }
181 }
182 if (!printed) {
183 printed++;
184 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
185 printk(KERN_ERR "CPU: Your system may be unstable.\n");
186 }
187 c->x86_vendor = X86_VENDOR_UNKNOWN;
188}
189
190static void __init early_cpu_support_print(void)
191{
192 int i,j;
193 struct cpu_dev *cpu_devx;
194
195 printk("KERNEL supported cpus:\n");
196 for (i = 0; i < X86_VENDOR_NUM; i++) {
197 cpu_devx = cpu_devs[i];
198 if (!cpu_devx)
199 continue;
200 for (j = 0; j < 2; j++) {
201 if (!cpu_devx->c_ident[j])
202 continue;
203 printk(" %s %s\n", cpu_devx->c_vendor,
204 cpu_devx->c_ident[j]);
205 }
206 }
207}
208
209static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
210
211void __init early_cpu_init(void)
212{
213 struct cpu_vendor_dev *cvdev;
214
215 for (cvdev = __x86cpuvendor_start ;
216 cvdev < __x86cpuvendor_end ;
217 cvdev++)
218 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
219 early_cpu_support_print();
220 early_identify_cpu(&boot_cpu_data);
221}
222
223/* Do some early cpuid on the boot CPU to get some parameter that are
224 needed before check_bugs. Everything advanced is in identify_cpu
225 below. */
226static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
227{
228 u32 tfms, xlvl;
229
230 c->loops_per_jiffy = loops_per_jiffy;
231 c->x86_cache_size = -1;
232 c->x86_vendor = X86_VENDOR_UNKNOWN;
233 c->x86_model = c->x86_mask = 0; /* So far unknown... */
234 c->x86_vendor_id[0] = '\0'; /* Unset */
235 c->x86_model_id[0] = '\0'; /* Unset */
236 c->x86_clflush_size = 64;
237 c->x86_cache_alignment = c->x86_clflush_size;
238 c->x86_max_cores = 1;
239 c->x86_coreid_bits = 0;
240 c->extended_cpuid_level = 0;
241 memset(&c->x86_capability, 0, sizeof c->x86_capability);
242
243 /* Get vendor name */
244 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
245 (unsigned int *)&c->x86_vendor_id[0],
246 (unsigned int *)&c->x86_vendor_id[8],
247 (unsigned int *)&c->x86_vendor_id[4]);
248
249 get_cpu_vendor(c);
250
251 /* Initialize the standard set of capabilities */
252 /* Note that the vendor-specific code below might override */
253
254 /* Intel-defined flags: level 0x00000001 */
255 if (c->cpuid_level >= 0x00000001) {
256 __u32 misc;
257 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
258 &c->x86_capability[0]);
259 c->x86 = (tfms >> 8) & 0xf;
260 c->x86_model = (tfms >> 4) & 0xf;
261 c->x86_mask = tfms & 0xf;
262 if (c->x86 == 0xf)
263 c->x86 += (tfms >> 20) & 0xff;
264 if (c->x86 >= 0x6)
265 c->x86_model += ((tfms >> 16) & 0xF) << 4;
266 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
267 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
268 } else {
269 /* Have CPUID level 0 only - unheard of */
270 c->x86 = 4;
271 }
272
273 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
274#ifdef CONFIG_SMP
275 c->phys_proc_id = c->initial_apicid;
276#endif
277 /* AMD-defined flags: level 0x80000001 */
278 xlvl = cpuid_eax(0x80000000);
279 c->extended_cpuid_level = xlvl;
280 if ((xlvl & 0xffff0000) == 0x80000000) {
281 if (xlvl >= 0x80000001) {
282 c->x86_capability[1] = cpuid_edx(0x80000001);
283 c->x86_capability[6] = cpuid_ecx(0x80000001);
284 }
285 if (xlvl >= 0x80000004)
286 get_model_name(c); /* Default name */
287 }
288
289 /* Transmeta-defined flags: level 0x80860001 */
290 xlvl = cpuid_eax(0x80860000);
291 if ((xlvl & 0xffff0000) == 0x80860000) {
292 /* Don't set x86_cpuid_level here for now to not confuse. */
293 if (xlvl >= 0x80860001)
294 c->x86_capability[2] = cpuid_edx(0x80860001);
295 }
296
297 c->extended_cpuid_level = cpuid_eax(0x80000000);
298 if (c->extended_cpuid_level >= 0x80000007)
299 c->x86_power = cpuid_edx(0x80000007);
300
301 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
302 cpu_devs[c->x86_vendor]->c_early_init)
303 cpu_devs[c->x86_vendor]->c_early_init(c);
304
305 validate_pat_support(c);
306
307 /* early_param could clear that, but recall get it set again */
308 if (disable_apic)
309 clear_cpu_cap(c, X86_FEATURE_APIC);
310}
311
312/*
313 * This does the hard work of actually picking apart the CPU stuff...
314 */
315void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
316{
317 int i;
318
319 early_identify_cpu(c);
320
321 init_scattered_cpuid_features(c);
322
323 c->apicid = phys_pkg_id(0);
324
325 /*
326 * Vendor-specific initialization. In this section we
327 * canonicalize the feature flags, meaning if there are
328 * features a certain CPU supports which CPUID doesn't
329 * tell us, CPUID claiming incorrect flags, or other bugs,
330 * we handle them here.
331 *
332 * At the end of this section, c->x86_capability better
333 * indicate the features this CPU genuinely supports!
334 */
335 if (this_cpu->c_init)
336 this_cpu->c_init(c);
337
338 detect_ht(c);
339
340 /*
341 * On SMP, boot_cpu_data holds the common feature set between
342 * all CPUs; so make sure that we indicate which features are
343 * common between the CPUs. The first time this routine gets
344 * executed, c == &boot_cpu_data.
345 */
346 if (c != &boot_cpu_data) {
347 /* AND the already accumulated flags with these */
348 for (i = 0; i < NCAPINTS; i++)
349 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
350 }
351
352 /* Clear all flags overriden by options */
353 for (i = 0; i < NCAPINTS; i++)
354 c->x86_capability[i] &= ~cleared_cpu_caps[i];
355
356#ifdef CONFIG_X86_MCE
357 mcheck_init(c);
358#endif
359 select_idle_routine(c);
360
361#ifdef CONFIG_NUMA
362 numa_add_cpu(smp_processor_id());
363#endif
364
365}
366
367void __cpuinit identify_boot_cpu(void)
368{
369 identify_cpu(&boot_cpu_data);
370}
371
372void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
373{
374 BUG_ON(c == &boot_cpu_data);
375 identify_cpu(c);
376 mtrr_ap_init();
377}
378
379static __init int setup_noclflush(char *arg)
380{
381 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
382 return 1;
383}
384__setup("noclflush", setup_noclflush);
385
386void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
387{
388 if (c->x86_model_id[0])
389 printk(KERN_CONT "%s", c->x86_model_id);
390
391 if (c->x86_mask || c->cpuid_level >= 0)
392 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
393 else
394 printk(KERN_CONT "\n");
395}
396
397static __init int setup_disablecpuid(char *arg)
398{
399 int bit;
400 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
401 setup_clear_cpu_cap(bit);
402 else
403 return 0;
404 return 1;
405}
406__setup("clearcpuid=", setup_disablecpuid);
This page took 0.039357 seconds and 5 git commands to generate.