x86: don't special-case pmd allocations as much
[deliverable/linux.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
5b83683f 33#include <linux/efi.h>
1da177e4
LT
34#include <linux/acpi.h>
35#include <linux/kallsyms.h>
36#include <linux/edd.h>
bbfceef4 37#include <linux/mmzone.h>
5f5609df 38#include <linux/kexec.h>
95235ca2 39#include <linux/cpufreq.h>
e9928674 40#include <linux/dmi.h>
17a941d8 41#include <linux/dma-mapping.h>
681558fd 42#include <linux/ctype.h>
746ef0cd 43#include <linux/uaccess.h>
bbfceef4 44
1da177e4
LT
45#include <asm/mtrr.h>
46#include <asm/uaccess.h>
47#include <asm/system.h>
e4026440 48#include <asm/vsyscall.h>
1da177e4
LT
49#include <asm/io.h>
50#include <asm/smp.h>
51#include <asm/msr.h>
52#include <asm/desc.h>
53#include <video/edid.h>
54#include <asm/e820.h>
55#include <asm/dma.h>
aaf23042 56#include <asm/gart.h>
1da177e4
LT
57#include <asm/mpspec.h>
58#include <asm/mmu_context.h>
1da177e4
LT
59#include <asm/proto.h>
60#include <asm/setup.h>
61#include <asm/mach_apic.h>
62#include <asm/numa.h>
2bc0414e 63#include <asm/sections.h>
f2d3efed 64#include <asm/dmi.h>
00bf4098 65#include <asm/cacheflush.h>
af7a78e9 66#include <asm/mce.h>
eee3af4a 67#include <asm/ds.h>
df3825c5 68#include <asm/topology.h>
1da177e4 69
746ef0cd
GOC
70#ifdef CONFIG_PARAVIRT
71#include <asm/paravirt.h>
72#else
73#define ARCH_SETUP
74#endif
75
1da177e4
LT
76/*
77 * Machine setup..
78 */
79
6c231b7b 80struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 81EXPORT_SYMBOL(boot_cpu_data);
1da177e4 82
7d851c8d
AK
83__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
84
1da177e4
LT
85unsigned long mmu_cr4_features;
86
1da177e4
LT
87/* Boot loader ID as an integer, for the benefit of proc_dointvec */
88int bootloader_type;
89
90unsigned long saved_video_mode;
91
f039b754
AK
92int force_mwait __cpuinitdata;
93
04e1ba85 94/*
f2d3efed
AK
95 * Early DMI memory
96 */
97int dmi_alloc_index;
98char dmi_alloc_data[DMI_MAX_DATA];
99
1da177e4
LT
100/*
101 * Setup options
102 */
1da177e4 103struct screen_info screen_info;
2ee60e17 104EXPORT_SYMBOL(screen_info);
1da177e4
LT
105struct sys_desc_table_struct {
106 unsigned short length;
107 unsigned char table[0];
108};
109
110struct edid_info edid_info;
ba70710e 111EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
112
113extern int root_mountflags;
1da177e4 114
adf48856 115char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
116
117struct resource standard_io_resources[] = {
118 { .name = "dma1", .start = 0x00, .end = 0x1f,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "pic1", .start = 0x20, .end = 0x21,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "timer0", .start = 0x40, .end = 0x43,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "timer1", .start = 0x50, .end = 0x53,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "keyboard", .start = 0x60, .end = 0x6f,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "pic2", .start = 0xa0, .end = 0xa1,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
132 { .name = "dma2", .start = 0xc0, .end = 0xdf,
133 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
134 { .name = "fpu", .start = 0xf0, .end = 0xff,
135 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
136};
137
1da177e4
LT
138#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
139
c9cce83d 140static struct resource data_resource = {
1da177e4
LT
141 .name = "Kernel data",
142 .start = 0,
143 .end = 0,
144 .flags = IORESOURCE_RAM,
145};
c9cce83d 146static struct resource code_resource = {
1da177e4
LT
147 .name = "Kernel code",
148 .start = 0,
149 .end = 0,
150 .flags = IORESOURCE_RAM,
151};
c9cce83d 152static struct resource bss_resource = {
00bf4098
BW
153 .name = "Kernel bss",
154 .start = 0,
155 .end = 0,
156 .flags = IORESOURCE_RAM,
157};
1da177e4 158
8c61b900
TG
159static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
160
2c8c0e6b
AK
161#ifdef CONFIG_PROC_VMCORE
162/* elfcorehdr= specifies the location of elf core header
163 * stored by the crashed kernel. This option will be passed
164 * by kexec loader to the capture kernel.
165 */
166static int __init setup_elfcorehdr(char *arg)
681558fd 167{
2c8c0e6b
AK
168 char *end;
169 if (!arg)
170 return -EINVAL;
171 elfcorehdr_addr = memparse(arg, &end);
172 return end > arg ? 0 : -EINVAL;
681558fd 173}
2c8c0e6b 174early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
175#endif
176
2b97690f 177#ifndef CONFIG_NUMA
bbfceef4
MT
178static void __init
179contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 180{
bbfceef4
MT
181 unsigned long bootmap_size, bootmap;
182
bbfceef4
MT
183 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
184 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
185 if (bootmap == -1L)
04e1ba85 186 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 187 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
188 e820_register_active_regions(0, start_pfn, end_pfn);
189 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 190 reserve_bootmem(bootmap, bootmap_size);
04e1ba85 191}
1da177e4
LT
192#endif
193
1da177e4
LT
194#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
195struct edd edd;
196#ifdef CONFIG_EDD_MODULE
197EXPORT_SYMBOL(edd);
198#endif
199/**
200 * copy_edd() - Copy the BIOS EDD information
201 * from boot_params into a safe place.
202 *
203 */
204static inline void copy_edd(void)
205{
30c82645
PA
206 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
207 sizeof(edd.mbr_signature));
208 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
209 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
210 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
211}
212#else
213static inline void copy_edd(void)
214{
215}
216#endif
217
5c3391f9
BW
218#ifdef CONFIG_KEXEC
219static void __init reserve_crashkernel(void)
220{
221 unsigned long long free_mem;
222 unsigned long long crash_size, crash_base;
223 int ret;
224
04e1ba85
TG
225 free_mem =
226 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9
BW
227
228 ret = parse_crashkernel(boot_command_line, free_mem,
229 &crash_size, &crash_base);
230 if (ret == 0 && crash_size) {
231 if (crash_base > 0) {
232 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
233 "for crashkernel (System RAM: %ldMB)\n",
234 (unsigned long)(crash_size >> 20),
235 (unsigned long)(crash_base >> 20),
236 (unsigned long)(free_mem >> 20));
237 crashk_res.start = crash_base;
238 crashk_res.end = crash_base + crash_size - 1;
239 reserve_bootmem(crash_base, crash_size);
240 } else
241 printk(KERN_INFO "crashkernel reservation failed - "
242 "you have to specify a base address\n");
243 }
244}
245#else
246static inline void __init reserve_crashkernel(void)
247{}
248#endif
249
746ef0cd 250/* Overridden in paravirt.c if CONFIG_PARAVIRT */
e3cfac84 251void __attribute__((weak)) __init memory_setup(void)
746ef0cd
GOC
252{
253 machine_specific_memory_setup();
254}
255
1da177e4
LT
256void __init setup_arch(char **cmdline_p)
257{
04e1ba85
TG
258 unsigned i;
259
adf48856 260 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 261
30c82645
PA
262 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
263 screen_info = boot_params.screen_info;
264 edid_info = boot_params.edid_info;
265 saved_video_mode = boot_params.hdr.vid_mode;
266 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
267
268#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
269 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
270 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
271 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 272#endif
5b83683f
HY
273#ifdef CONFIG_EFI
274 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
275 "EL64", 4))
276 efi_enabled = 1;
277#endif
746ef0cd
GOC
278
279 ARCH_SETUP
280
281 memory_setup();
1da177e4
LT
282 copy_edd();
283
30c82645 284 if (!boot_params.hdr.root_flags)
1da177e4
LT
285 root_mountflags &= ~MS_RDONLY;
286 init_mm.start_code = (unsigned long) &_text;
287 init_mm.end_code = (unsigned long) &_etext;
288 init_mm.end_data = (unsigned long) &_edata;
289 init_mm.brk = (unsigned long) &_end;
290
e3ebadd9
LT
291 code_resource.start = virt_to_phys(&_text);
292 code_resource.end = virt_to_phys(&_etext)-1;
293 data_resource.start = virt_to_phys(&_etext);
294 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
295 bss_resource.start = virt_to_phys(&__bss_start);
296 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 297
1da177e4
LT
298 early_identify_cpu(&boot_cpu_data);
299
adf48856 300 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
301 *cmdline_p = command_line;
302
303 parse_early_param();
304
305 finish_e820_parsing();
9ca33eb6 306
aaf23042
YL
307 early_gart_iommu_check();
308
5cb248ab 309 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
310 /*
311 * partially used pages are not usable - thus
312 * we are rounding upwards:
313 */
314 end_pfn = e820_end_of_ram();
99fc8d42
JB
315 /* update e820 for memory not covered by WB MTRRs */
316 mtrr_bp_init();
317 if (mtrr_trim_uncached_memory(end_pfn)) {
318 e820_register_active_regions(0, 0, -1UL);
319 end_pfn = e820_end_of_ram();
320 }
321
caff0710 322 num_physpages = end_pfn;
1da177e4
LT
323
324 check_efer();
325
326 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
5b83683f
HY
327 if (efi_enabled)
328 efi_init();
1da177e4 329
f2d3efed
AK
330 dmi_scan_machine();
331
b02aae9c
RH
332 io_delay_init();
333
71fff5e6 334#ifdef CONFIG_SMP
df3825c5 335 /* setup to use the early static init tables during kernel startup */
3effef1f
YL
336 x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
337 x86_bios_cpu_apicid_early_ptr = (void *)x86_bios_cpu_apicid_init;
e8c10ef9 338#ifdef CONFIG_NUMA
3effef1f 339 x86_cpu_to_node_map_early_ptr = (void *)x86_cpu_to_node_map_init;
71fff5e6 340#endif
e8c10ef9 341#endif
71fff5e6 342
888ba6c6 343#ifdef CONFIG_ACPI
1da177e4
LT
344 /*
345 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
346 * Call this early for SRAT node setup.
347 */
348 acpi_boot_table_init();
349#endif
350
caff0710
JB
351 /* How many end-of-memory variables you have, grandma! */
352 max_low_pfn = end_pfn;
353 max_pfn = end_pfn;
354 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
355
5cb248ab
MG
356 /* Remove active ranges so rediscovery with NUMA-awareness happens */
357 remove_all_active_ranges();
358
1da177e4
LT
359#ifdef CONFIG_ACPI_NUMA
360 /*
361 * Parse SRAT to discover nodes.
362 */
363 acpi_numa_init();
364#endif
365
2b97690f 366#ifdef CONFIG_NUMA
04e1ba85 367 numa_initmem_init(0, end_pfn);
1da177e4 368#else
bbfceef4 369 contig_initmem_init(0, end_pfn);
1da177e4
LT
370#endif
371
75175278 372 early_res_to_bootmem();
1da177e4 373
673d5b43 374#ifdef CONFIG_ACPI_SLEEP
1da177e4 375 /*
04e1ba85 376 * Reserve low memory region for sleep support.
1da177e4 377 */
04e1ba85
TG
378 acpi_reserve_bootmem();
379#endif
5b83683f 380
a3828064 381 if (efi_enabled)
5b83683f 382 efi_reserve_bootmem();
5b83683f 383
04e1ba85
TG
384 /*
385 * Find and reserve possible boot-time SMP configuration:
386 */
1da177e4 387 find_smp_config();
1da177e4 388#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
389 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
390 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
391 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
392 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
393 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
394
395 if (ramdisk_end <= end_of_mem) {
396 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
397 initrd_start = ramdisk_image + PAGE_OFFSET;
398 initrd_end = initrd_start+ramdisk_size;
399 } else {
75175278
AK
400 /* Assumes everything on node 0 */
401 free_bootmem(ramdisk_image, ramdisk_size);
1da177e4 402 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
403 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
404 ramdisk_end, end_of_mem);
1da177e4
LT
405 initrd_start = 0;
406 }
407 }
408#endif
5c3391f9 409 reserve_crashkernel();
1da177e4 410 paging_init();
e4026440 411 map_vsyscall();
1da177e4 412
dfa4698c 413 early_quirks();
1da177e4 414
888ba6c6 415#ifdef CONFIG_ACPI
1da177e4
LT
416 /*
417 * Read APIC and some other early information from ACPI tables.
418 */
419 acpi_boot_init();
420#endif
421
05b3cbd8
RT
422 init_cpu_to_node();
423
1da177e4
LT
424 /*
425 * get boot-time SMP configuration:
426 */
427 if (smp_found_config)
428 get_smp_config();
429 init_apic_mappings();
3e35a0e5 430 ioapic_init_mappings();
1da177e4
LT
431
432 /*
fc986db4 433 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 434 */
c9cce83d 435 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 436 e820_mark_nosave_regions();
1da177e4 437
1da177e4 438 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 439 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 440 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 441
a1e97782 442 e820_setup_gap();
1da177e4 443
1da177e4
LT
444#ifdef CONFIG_VT
445#if defined(CONFIG_VGA_CONSOLE)
5b83683f
HY
446 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
447 conswitchp = &vga_con;
1da177e4
LT
448#elif defined(CONFIG_DUMMY_CONSOLE)
449 conswitchp = &dummy_con;
450#endif
451#endif
452}
453
e6982c67 454static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
455{
456 unsigned int *v;
457
ebfcaa96 458 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
459 return 0;
460
461 v = (unsigned int *) c->x86_model_id;
462 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
463 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
464 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
465 c->x86_model_id[48] = 0;
466 return 1;
467}
468
469
e6982c67 470static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
471{
472 unsigned int n, dummy, eax, ebx, ecx, edx;
473
ebfcaa96 474 n = c->extended_cpuid_level;
1da177e4
LT
475
476 if (n >= 0x80000005) {
477 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
478 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
479 "D cache %dK (%d bytes/line)\n",
480 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
481 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
482 /* On K8 L1 TLB is inclusive, so don't count it */
483 c->x86_tlbsize = 0;
484 }
485
486 if (n >= 0x80000006) {
487 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
488 ecx = cpuid_ecx(0x80000006);
489 c->x86_cache_size = ecx >> 16;
490 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
491
492 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
493 c->x86_cache_size, ecx & 0xFF);
494 }
1da177e4 495 if (n >= 0x80000008) {
04e1ba85 496 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
497 c->x86_virt_bits = (eax >> 8) & 0xff;
498 c->x86_phys_bits = eax & 0xff;
499 }
500}
501
3f098c26
AK
502#ifdef CONFIG_NUMA
503static int nearby_node(int apicid)
504{
04e1ba85
TG
505 int i, node;
506
3f098c26 507 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 508 node = apicid_to_node[i];
3f098c26
AK
509 if (node != NUMA_NO_NODE && node_online(node))
510 return node;
511 }
512 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 513 node = apicid_to_node[i];
3f098c26
AK
514 if (node != NUMA_NO_NODE && node_online(node))
515 return node;
516 }
517 return first_node(node_online_map); /* Shouldn't happen */
518}
519#endif
520
63518644
AK
521/*
522 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
523 * Assumes number of cores is a power of two.
524 */
adb8daed 525static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
63518644
AK
526{
527#ifdef CONFIG_SMP
b41e2939 528 unsigned bits;
3f098c26 529#ifdef CONFIG_NUMA
f3fa8ebc 530 int cpu = smp_processor_id();
3f098c26 531 int node = 0;
60c1bc82 532 unsigned apicid = hard_smp_processor_id();
3f098c26 533#endif
a860b63c 534 bits = c->x86_coreid_bits;
b41e2939
AK
535
536 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 537 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 538 /* Convert the APIC ID into the socket ID */
f3fa8ebc 539 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
540
541#ifdef CONFIG_NUMA
04e1ba85
TG
542 node = c->phys_proc_id;
543 if (apicid_to_node[apicid] != NUMA_NO_NODE)
544 node = apicid_to_node[apicid];
545 if (!node_online(node)) {
546 /* Two possibilities here:
547 - The CPU is missing memory and no node was created.
548 In that case try picking one from a nearby CPU
549 - The APIC IDs differ from the HyperTransport node IDs
550 which the K8 northbridge parsing fills in.
551 Assume they are all increased by a constant offset,
552 but in the same order as the HT nodeids.
553 If that doesn't result in a usable node fall back to the
554 path for the previous case. */
555
92cb7612 556 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
04e1ba85
TG
557
558 if (ht_nodeid >= 0 &&
559 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
560 node = apicid_to_node[ht_nodeid];
561 /* Pick a nearby node */
562 if (!node_online(node))
563 node = nearby_node(apicid);
564 }
69d81fcd 565 numa_set_node(cpu, node);
3f098c26 566
e42f9437 567 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 568#endif
63518644
AK
569#endif
570}
1da177e4 571
2b16a235 572static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
a860b63c
YL
573{
574#ifdef CONFIG_SMP
575 unsigned bits, ecx;
576
577 /* Multi core CPU? */
578 if (c->extended_cpuid_level < 0x80000008)
579 return;
580
581 ecx = cpuid_ecx(0x80000008);
582
583 c->x86_max_cores = (ecx & 0xff) + 1;
584
585 /* CPU telling us the core id bits shift? */
586 bits = (ecx >> 12) & 0xF;
587
588 /* Otherwise recompute */
589 if (bits == 0) {
590 while ((1 << bits) < c->x86_max_cores)
591 bits++;
592 }
593
594 c->x86_coreid_bits = bits;
595
596#endif
597}
598
fb79d22e
TG
599#define ENABLE_C1E_MASK 0x18000000
600#define CPUID_PROCESSOR_SIGNATURE 1
601#define CPUID_XFAM 0x0ff00000
602#define CPUID_XFAM_K8 0x00000000
603#define CPUID_XFAM_10H 0x00100000
604#define CPUID_XFAM_11H 0x00200000
605#define CPUID_XMOD 0x000f0000
606#define CPUID_XMOD_REV_F 0x00040000
607
608/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
609static __cpuinit int amd_apic_timer_broken(void)
610{
04e1ba85
TG
611 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
612
fb79d22e
TG
613 switch (eax & CPUID_XFAM) {
614 case CPUID_XFAM_K8:
615 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
616 break;
617 case CPUID_XFAM_10H:
618 case CPUID_XFAM_11H:
619 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
620 if (lo & ENABLE_C1E_MASK)
621 return 1;
622 break;
623 default:
624 /* err on the side of caution */
625 return 1;
626 }
627 return 0;
628}
629
2b16a235
AK
630static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
631{
632 early_init_amd_mc(c);
633
634 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
635 if (c->x86_power & (1<<8))
636 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
637}
638
ed77504b 639static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 640{
7bcd3f34 641 unsigned level;
1da177e4 642
bc5e8fdf
LT
643#ifdef CONFIG_SMP
644 unsigned long value;
645
7d318d77
AK
646 /*
647 * Disable TLB flush filter by setting HWCR.FFDIS on K8
648 * bit 6 of msr C001_0015
04e1ba85 649 *
7d318d77
AK
650 * Errata 63 for SH-B3 steppings
651 * Errata 122 for all steppings (F+ have it disabled by default)
652 */
653 if (c->x86 == 15) {
654 rdmsrl(MSR_K8_HWCR, value);
655 value |= 1 << 6;
656 wrmsrl(MSR_K8_HWCR, value);
657 }
bc5e8fdf
LT
658#endif
659
1da177e4
LT
660 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
661 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
5548fecd 662 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
04e1ba85 663
7bcd3f34
AK
664 /* On C+ stepping K8 rep microcode works well for copy/memset */
665 level = cpuid_eax(1);
04e1ba85
TG
666 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
667 level >= 0x0f58))
53756d37 668 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 669 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 670 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 671
18bd057b
AK
672 /* Enable workaround for FXSAVE leak */
673 if (c->x86 >= 6)
53756d37 674 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 675
e42f9437
RS
676 level = get_model_name(c);
677 if (!level) {
04e1ba85 678 switch (c->x86) {
1da177e4
LT
679 case 15:
680 /* Should distinguish Models here, but this is only
681 a fallback anyways. */
682 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
683 break;
684 }
685 }
1da177e4
LT
686 display_cacheinfo(c);
687
faee9a5d
AK
688 /* Multi core CPU? */
689 if (c->extended_cpuid_level >= 0x80000008)
63518644 690 amd_detect_cmp(c);
1da177e4 691
67cddd94
AK
692 if (c->extended_cpuid_level >= 0x80000006 &&
693 (cpuid_edx(0x80000006) & 0xf000))
694 num_cache_leaves = 4;
695 else
696 num_cache_leaves = 3;
2049336f 697
0bd8acd1 698 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 699 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 700
de421863
AK
701 /* MFENCE stops RDTSC speculation */
702 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
f039b754 703
fb79d22e
TG
704 if (amd_apic_timer_broken())
705 disable_apic_timer = 1;
1da177e4
LT
706}
707
1a53905a 708void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
709{
710#ifdef CONFIG_SMP
04e1ba85
TG
711 u32 eax, ebx, ecx, edx;
712 int index_msb, core_bits;
94605eff
SS
713
714 cpuid(1, &eax, &ebx, &ecx, &edx);
715
94605eff 716
e42f9437 717 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 718 return;
04e1ba85 719 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 720 goto out;
1da177e4 721
1da177e4 722 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 723
1da177e4
LT
724 if (smp_num_siblings == 1) {
725 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 726 } else if (smp_num_siblings > 1) {
94605eff 727
1da177e4 728 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
729 printk(KERN_WARNING "CPU: Unsupported number of "
730 "siblings %d", smp_num_siblings);
1da177e4
LT
731 smp_num_siblings = 1;
732 return;
733 }
94605eff
SS
734
735 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 736 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 737
94605eff 738 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 739
04e1ba85 740 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
741
742 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 743
f3fa8ebc 744 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 745 ((1 << core_bits) - 1);
1da177e4 746 }
e42f9437
RS
747out:
748 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
749 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
750 c->phys_proc_id);
751 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
752 c->cpu_core_id);
e42f9437
RS
753 }
754
1da177e4
LT
755#endif
756}
757
3dd9d514
AK
758/*
759 * find out the number of processor cores on the die
760 */
e6982c67 761static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 762{
2bbc419f 763 unsigned int eax, t;
3dd9d514
AK
764
765 if (c->cpuid_level < 4)
766 return 1;
767
2bbc419f 768 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
769
770 if (eax & 0x1f)
771 return ((eax >> 26) + 1);
772 else
773 return 1;
774}
775
df0cc26b
AK
776static void srat_detect_node(void)
777{
778#ifdef CONFIG_NUMA
ddea7be0 779 unsigned node;
df0cc26b 780 int cpu = smp_processor_id();
e42f9437 781 int apicid = hard_smp_processor_id();
df0cc26b
AK
782
783 /* Don't do the funky fallback heuristics the AMD version employs
784 for now. */
e42f9437 785 node = apicid_to_node[apicid];
df0cc26b 786 if (node == NUMA_NO_NODE)
0d015324 787 node = first_node(node_online_map);
69d81fcd 788 numa_set_node(cpu, node);
df0cc26b 789
c31fbb1a 790 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
791#endif
792}
793
2b16a235
AK
794static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
795{
796 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
797 (c->x86 == 0x6 && c->x86_model >= 0x0e))
798 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
799}
800
e6982c67 801static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
802{
803 /* Cache sizes */
804 unsigned n;
805
806 init_intel_cacheinfo(c);
04e1ba85 807 if (c->cpuid_level > 9) {
0080e667
VP
808 unsigned eax = cpuid_eax(10);
809 /* Check for version and the number of counters */
810 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 811 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
812 }
813
36b2a8d5
SE
814 if (cpu_has_ds) {
815 unsigned int l1, l2;
816 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 817 if (!(l1 & (1<<11)))
53756d37 818 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 819 if (!(l1 & (1<<12)))
53756d37 820 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
821 }
822
eee3af4a
MM
823
824 if (cpu_has_bts)
825 ds_init_intel(c);
826
ebfcaa96 827 n = c->extended_cpuid_level;
1da177e4
LT
828 if (n >= 0x80000008) {
829 unsigned eax = cpuid_eax(0x80000008);
830 c->x86_virt_bits = (eax >> 8) & 0xff;
831 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
832 /* CPUID workaround for Intel 0F34 CPU */
833 if (c->x86_vendor == X86_VENDOR_INTEL &&
834 c->x86 == 0xF && c->x86_model == 0x3 &&
835 c->x86_mask == 0x4)
836 c->x86_phys_bits = 36;
1da177e4
LT
837 }
838
839 if (c->x86 == 15)
840 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
841 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
842 (c->x86 == 0x6 && c->x86_model >= 0x0e))
53756d37 843 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
27fbe5b2 844 if (c->x86 == 6)
53756d37 845 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
707fa8ed 846 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
04e1ba85 847 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
848
849 srat_detect_node();
1da177e4
LT
850}
851
672289e9 852static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
853{
854 char *v = c->x86_vendor_id;
855
856 if (!strcmp(v, "AuthenticAMD"))
857 c->x86_vendor = X86_VENDOR_AMD;
858 else if (!strcmp(v, "GenuineIntel"))
859 c->x86_vendor = X86_VENDOR_INTEL;
860 else
861 c->x86_vendor = X86_VENDOR_UNKNOWN;
862}
863
1da177e4
LT
864/* Do some early cpuid on the boot CPU to get some parameter that are
865 needed before check_bugs. Everything advanced is in identify_cpu
866 below. */
8c61b900 867static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 868{
a860b63c 869 u32 tfms, xlvl;
1da177e4
LT
870
871 c->loops_per_jiffy = loops_per_jiffy;
872 c->x86_cache_size = -1;
873 c->x86_vendor = X86_VENDOR_UNKNOWN;
874 c->x86_model = c->x86_mask = 0; /* So far unknown... */
875 c->x86_vendor_id[0] = '\0'; /* Unset */
876 c->x86_model_id[0] = '\0'; /* Unset */
877 c->x86_clflush_size = 64;
878 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 879 c->x86_max_cores = 1;
a860b63c 880 c->x86_coreid_bits = 0;
ebfcaa96 881 c->extended_cpuid_level = 0;
1da177e4
LT
882 memset(&c->x86_capability, 0, sizeof c->x86_capability);
883
884 /* Get vendor name */
885 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
886 (unsigned int *)&c->x86_vendor_id[0],
887 (unsigned int *)&c->x86_vendor_id[8],
888 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 889
1da177e4
LT
890 get_cpu_vendor(c);
891
892 /* Initialize the standard set of capabilities */
893 /* Note that the vendor-specific code below might override */
894
895 /* Intel-defined flags: level 0x00000001 */
896 if (c->cpuid_level >= 0x00000001) {
897 __u32 misc;
898 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
899 &c->x86_capability[0]);
900 c->x86 = (tfms >> 8) & 0xf;
901 c->x86_model = (tfms >> 4) & 0xf;
902 c->x86_mask = tfms & 0xf;
f5f786d0 903 if (c->x86 == 0xf)
1da177e4 904 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 905 if (c->x86 >= 0x6)
1da177e4 906 c->x86_model += ((tfms >> 16) & 0xF) << 4;
04e1ba85 907 if (c->x86_capability[0] & (1<<19))
1da177e4 908 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
909 } else {
910 /* Have CPUID level 0 only - unheard of */
911 c->x86 = 4;
912 }
a158608b
AK
913
914#ifdef CONFIG_SMP
f3fa8ebc 915 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 916#endif
1da177e4
LT
917 /* AMD-defined flags: level 0x80000001 */
918 xlvl = cpuid_eax(0x80000000);
ebfcaa96 919 c->extended_cpuid_level = xlvl;
1da177e4
LT
920 if ((xlvl & 0xffff0000) == 0x80000000) {
921 if (xlvl >= 0x80000001) {
922 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 923 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
924 }
925 if (xlvl >= 0x80000004)
926 get_model_name(c); /* Default name */
927 }
928
929 /* Transmeta-defined flags: level 0x80860001 */
930 xlvl = cpuid_eax(0x80860000);
931 if ((xlvl & 0xffff0000) == 0x80860000) {
932 /* Don't set x86_cpuid_level here for now to not confuse. */
933 if (xlvl >= 0x80860001)
934 c->x86_capability[2] = cpuid_edx(0x80860001);
935 }
936
9566e91d
AH
937 c->extended_cpuid_level = cpuid_eax(0x80000000);
938 if (c->extended_cpuid_level >= 0x80000007)
939 c->x86_power = cpuid_edx(0x80000007);
940
a860b63c
YL
941 switch (c->x86_vendor) {
942 case X86_VENDOR_AMD:
943 early_init_amd(c);
944 break;
71617bf1
YL
945 case X86_VENDOR_INTEL:
946 early_init_intel(c);
947 break;
a860b63c
YL
948 }
949
950}
951
952/*
953 * This does the hard work of actually picking apart the CPU stuff...
954 */
955void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
956{
957 int i;
958
959 early_identify_cpu(c);
960
1d67953f
VP
961 init_scattered_cpuid_features(c);
962
1e9f28fa
SS
963 c->apicid = phys_pkg_id(0);
964
1da177e4
LT
965 /*
966 * Vendor-specific initialization. In this section we
967 * canonicalize the feature flags, meaning if there are
968 * features a certain CPU supports which CPUID doesn't
969 * tell us, CPUID claiming incorrect flags, or other bugs,
970 * we handle them here.
971 *
972 * At the end of this section, c->x86_capability better
973 * indicate the features this CPU genuinely supports!
974 */
975 switch (c->x86_vendor) {
976 case X86_VENDOR_AMD:
977 init_amd(c);
978 break;
979
980 case X86_VENDOR_INTEL:
981 init_intel(c);
982 break;
983
984 case X86_VENDOR_UNKNOWN:
985 default:
986 display_cacheinfo(c);
987 break;
988 }
989
04e1ba85 990 detect_ht(c);
1da177e4
LT
991
992 /*
993 * On SMP, boot_cpu_data holds the common feature set between
994 * all CPUs; so make sure that we indicate which features are
995 * common between the CPUs. The first time this routine gets
996 * executed, c == &boot_cpu_data.
997 */
998 if (c != &boot_cpu_data) {
999 /* AND the already accumulated flags with these */
04e1ba85 1000 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1001 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1002 }
1003
7d851c8d
AK
1004 /* Clear all flags overriden by options */
1005 for (i = 0; i < NCAPINTS; i++)
1006 c->x86_capability[i] ^= cleared_cpu_caps[i];
1007
1da177e4
LT
1008#ifdef CONFIG_X86_MCE
1009 mcheck_init(c);
1010#endif
74ff305b
HS
1011 select_idle_routine(c);
1012
8bd99481 1013 if (c != &boot_cpu_data)
3b520b23 1014 mtrr_ap_init();
1da177e4 1015#ifdef CONFIG_NUMA
3019e8eb 1016 numa_add_cpu(smp_processor_id());
1da177e4 1017#endif
2b16a235 1018
1da177e4 1019}
1da177e4 1020
191679fd
AK
1021static __init int setup_noclflush(char *arg)
1022{
1023 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
1024 return 1;
1025}
1026__setup("noclflush", setup_noclflush);
1027
e6982c67 1028void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1029{
1030 if (c->x86_model_id[0])
04e1ba85 1031 printk(KERN_INFO "%s", c->x86_model_id);
1da177e4 1032
04e1ba85
TG
1033 if (c->x86_mask || c->cpuid_level >= 0)
1034 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1035 else
04e1ba85 1036 printk(KERN_CONT "\n");
1da177e4
LT
1037}
1038
ac72e788
AK
1039static __init int setup_disablecpuid(char *arg)
1040{
1041 int bit;
1042 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1043 setup_clear_cpu_cap(bit);
1044 else
1045 return 0;
1046 return 1;
1047}
1048__setup("clearcpuid=", setup_disablecpuid);
1049
1da177e4
LT
1050/*
1051 * Get CPU information for use by the procfs.
1052 */
1053
1054static int show_cpuinfo(struct seq_file *m, void *v)
1055{
1056 struct cpuinfo_x86 *c = v;
04e1ba85 1057 int cpu = 0, i;
1da177e4 1058
04e1ba85 1059 /*
1da177e4
LT
1060 * These flag bits must match the definitions in <asm/cpufeature.h>.
1061 * NULL means this bit is undefined or reserved; either way it doesn't
1062 * have meaning as far as Linux is concerned. Note that it's important
1063 * to realize there is a difference between this table and CPUID -- if
1064 * applications want to get the raw CPUID data, they should access
1065 * /dev/cpu/<cpu_nr>/cpuid instead.
1066 */
121d7bf5 1067 static const char *const x86_cap_flags[] = {
1da177e4 1068 /* Intel-defined */
04e1ba85
TG
1069 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1070 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1071 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1072 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1073
1074 /* AMD-defined */
3c3b73b6 1075 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1076 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1077 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1078 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1079 "3dnowext", "3dnow",
1da177e4
LT
1080
1081 /* Transmeta-defined */
1082 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1084 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1085 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1086
1087 /* Other (Linux-defined) */
ec481536
PA
1088 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1089 NULL, NULL, NULL, NULL,
1090 "constant_tsc", "up", NULL, "arch_perfmon",
1091 "pebs", "bts", NULL, "sync_rdtsc",
1092 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1093 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1094
1095 /* Intel-defined (#2) */
9d95dd84 1096 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1097 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1098 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1099 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1100
5b7abc6f
PA
1101 /* VIA/Cyrix/Centaur-defined */
1102 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1103 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1104 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1106
1da177e4 1107 /* AMD-defined (#2) */
e1054b39
PA
1108 "lahf_lm", "cmp_legacy", "svm", "extapic",
1109 "cr8_legacy", "abm", "sse4a", "misalignsse",
1110 "3dnowprefetch", "osvw", "ibs", "sse5",
1111 "skinit", "wdt", NULL, NULL,
1da177e4 1112 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1113 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1114
1115 /* Auxiliary (Linux-defined) */
1116 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1117 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1119 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1120 };
121d7bf5 1121 static const char *const x86_power_flags[] = {
1da177e4
LT
1122 "ts", /* temperature sensor */
1123 "fid", /* frequency id control */
1124 "vid", /* voltage id control */
1125 "ttp", /* thermal trip */
1126 "tm",
3f98bc49 1127 "stc",
f790cd30
AK
1128 "100mhzsteps",
1129 "hwpstate",
d824395c
JR
1130 "", /* tsc invariant mapped to constant_tsc */
1131 /* nothing */
1da177e4
LT
1132 };
1133
1134
1135#ifdef CONFIG_SMP
92cb7612 1136 cpu = c->cpu_index;
1da177e4
LT
1137#endif
1138
04e1ba85
TG
1139 seq_printf(m, "processor\t: %u\n"
1140 "vendor_id\t: %s\n"
1141 "cpu family\t: %d\n"
1142 "model\t\t: %d\n"
1143 "model name\t: %s\n",
1144 (unsigned)cpu,
1145 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1146 c->x86,
1147 (int)c->x86_model,
1148 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1149
1da177e4
LT
1150 if (c->x86_mask || c->cpuid_level >= 0)
1151 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1152 else
1153 seq_printf(m, "stepping\t: unknown\n");
04e1ba85
TG
1154
1155 if (cpu_has(c, X86_FEATURE_TSC)) {
92cb7612 1156 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
04e1ba85 1157
95235ca2
VP
1158 if (!freq)
1159 freq = cpu_khz;
1da177e4 1160 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
04e1ba85 1161 freq / 1000, (freq % 1000));
1da177e4
LT
1162 }
1163
1164 /* Cache size */
04e1ba85 1165 if (c->x86_cache_size >= 0)
1da177e4 1166 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
04e1ba85 1167
1da177e4 1168#ifdef CONFIG_SMP
94605eff 1169 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1170 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1171 seq_printf(m, "siblings\t: %d\n",
1172 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1173 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1174 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1175 }
04e1ba85 1176#endif
1da177e4
LT
1177
1178 seq_printf(m,
04e1ba85
TG
1179 "fpu\t\t: yes\n"
1180 "fpu_exception\t: yes\n"
1181 "cpuid level\t: %d\n"
1182 "wp\t\t: yes\n"
1183 "flags\t\t:",
1da177e4
LT
1184 c->cpuid_level);
1185
04e1ba85
TG
1186 for (i = 0; i < 32*NCAPINTS; i++)
1187 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1188 seq_printf(m, " %s", x86_cap_flags[i]);
1189
1da177e4
LT
1190 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1191 c->loops_per_jiffy/(500000/HZ),
1192 (c->loops_per_jiffy/(5000/HZ)) % 100);
1193
04e1ba85 1194 if (c->x86_tlbsize > 0)
1da177e4
LT
1195 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1196 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1197 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1198
04e1ba85 1199 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1da177e4
LT
1200 c->x86_phys_bits, c->x86_virt_bits);
1201
1202 seq_printf(m, "power management:");
04e1ba85
TG
1203 for (i = 0; i < 32; i++) {
1204 if (c->x86_power & (1 << i)) {
1205 if (i < ARRAY_SIZE(x86_power_flags) &&
1206 x86_power_flags[i])
1207 seq_printf(m, "%s%s",
1208 x86_power_flags[i][0]?" ":"",
1209 x86_power_flags[i]);
1210 else
1211 seq_printf(m, " [%d]", i);
1212 }
1da177e4 1213 }
1da177e4 1214
d31ddaa1 1215 seq_printf(m, "\n\n");
1da177e4
LT
1216
1217 return 0;
1218}
1219
1220static void *c_start(struct seq_file *m, loff_t *pos)
1221{
92cb7612 1222 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1223 *pos = first_cpu(cpu_online_map);
1224 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1225 return &cpu_data(*pos);
1226 return NULL;
1da177e4
LT
1227}
1228
1229static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1230{
c0c52d28 1231 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1232 return c_start(m, pos);
1233}
1234
1235static void c_stop(struct seq_file *m, void *v)
1236{
1237}
1238
8a45eb31 1239const struct seq_operations cpuinfo_op = {
04e1ba85 1240 .start = c_start,
1da177e4
LT
1241 .next = c_next,
1242 .stop = c_stop,
1243 .show = show_cpuinfo,
1244};
This page took 0.437104 seconds and 5 git commands to generate.