x86: change NR_CPUS arrays in numa_64
[deliverable/linux.git] / arch / x86 / kernel / setup_64.c
CommitLineData
1da177e4 1/*
1da177e4 2 * Copyright (C) 1995 Linus Torvalds
1da177e4
LT
3 */
4
5/*
6 * This file handles the architecture-dependent parts of initialization
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <linux/slab.h>
17#include <linux/user.h>
18#include <linux/a.out.h>
894673ee 19#include <linux/screen_info.h>
1da177e4
LT
20#include <linux/ioport.h>
21#include <linux/delay.h>
1da177e4
LT
22#include <linux/init.h>
23#include <linux/initrd.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <asm/processor.h>
28#include <linux/console.h>
29#include <linux/seq_file.h>
aac04b32 30#include <linux/crash_dump.h>
1da177e4
LT
31#include <linux/root_dev.h>
32#include <linux/pci.h>
5b83683f 33#include <linux/efi.h>
1da177e4
LT
34#include <linux/acpi.h>
35#include <linux/kallsyms.h>
36#include <linux/edd.h>
bbfceef4 37#include <linux/mmzone.h>
5f5609df 38#include <linux/kexec.h>
95235ca2 39#include <linux/cpufreq.h>
e9928674 40#include <linux/dmi.h>
17a941d8 41#include <linux/dma-mapping.h>
681558fd 42#include <linux/ctype.h>
746ef0cd 43#include <linux/uaccess.h>
bbfceef4 44
1da177e4
LT
45#include <asm/mtrr.h>
46#include <asm/uaccess.h>
47#include <asm/system.h>
e4026440 48#include <asm/vsyscall.h>
1da177e4
LT
49#include <asm/io.h>
50#include <asm/smp.h>
51#include <asm/msr.h>
52#include <asm/desc.h>
53#include <video/edid.h>
54#include <asm/e820.h>
55#include <asm/dma.h>
aaf23042 56#include <asm/gart.h>
1da177e4
LT
57#include <asm/mpspec.h>
58#include <asm/mmu_context.h>
1da177e4
LT
59#include <asm/proto.h>
60#include <asm/setup.h>
61#include <asm/mach_apic.h>
62#include <asm/numa.h>
2bc0414e 63#include <asm/sections.h>
f2d3efed 64#include <asm/dmi.h>
00bf4098 65#include <asm/cacheflush.h>
af7a78e9 66#include <asm/mce.h>
eee3af4a 67#include <asm/ds.h>
df3825c5 68#include <asm/topology.h>
1da177e4 69
746ef0cd
GOC
70#ifdef CONFIG_PARAVIRT
71#include <asm/paravirt.h>
72#else
73#define ARCH_SETUP
74#endif
75
1da177e4
LT
76/*
77 * Machine setup..
78 */
79
6c231b7b 80struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 81EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
82
83unsigned long mmu_cr4_features;
84
1da177e4
LT
85/* Boot loader ID as an integer, for the benefit of proc_dointvec */
86int bootloader_type;
87
88unsigned long saved_video_mode;
89
f039b754
AK
90int force_mwait __cpuinitdata;
91
04e1ba85 92/*
f2d3efed
AK
93 * Early DMI memory
94 */
95int dmi_alloc_index;
96char dmi_alloc_data[DMI_MAX_DATA];
97
1da177e4
LT
98/*
99 * Setup options
100 */
1da177e4 101struct screen_info screen_info;
2ee60e17 102EXPORT_SYMBOL(screen_info);
1da177e4
LT
103struct sys_desc_table_struct {
104 unsigned short length;
105 unsigned char table[0];
106};
107
108struct edid_info edid_info;
ba70710e 109EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
110
111extern int root_mountflags;
1da177e4 112
adf48856 113char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
114
115struct resource standard_io_resources[] = {
116 { .name = "dma1", .start = 0x00, .end = 0x1f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic1", .start = 0x20, .end = 0x21,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "timer0", .start = 0x40, .end = 0x43,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "timer1", .start = 0x50, .end = 0x53,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
124 { .name = "keyboard", .start = 0x60, .end = 0x6f,
125 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
126 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
127 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
128 { .name = "pic2", .start = 0xa0, .end = 0xa1,
129 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
130 { .name = "dma2", .start = 0xc0, .end = 0xdf,
131 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
132 { .name = "fpu", .start = 0xf0, .end = 0xff,
133 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
134};
135
1da177e4
LT
136#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
137
c9cce83d 138static struct resource data_resource = {
1da177e4
LT
139 .name = "Kernel data",
140 .start = 0,
141 .end = 0,
142 .flags = IORESOURCE_RAM,
143};
c9cce83d 144static struct resource code_resource = {
1da177e4
LT
145 .name = "Kernel code",
146 .start = 0,
147 .end = 0,
148 .flags = IORESOURCE_RAM,
149};
c9cce83d 150static struct resource bss_resource = {
00bf4098
BW
151 .name = "Kernel bss",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_RAM,
155};
1da177e4 156
8c61b900
TG
157static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
158
2c8c0e6b
AK
159#ifdef CONFIG_PROC_VMCORE
160/* elfcorehdr= specifies the location of elf core header
161 * stored by the crashed kernel. This option will be passed
162 * by kexec loader to the capture kernel.
163 */
164static int __init setup_elfcorehdr(char *arg)
681558fd 165{
2c8c0e6b
AK
166 char *end;
167 if (!arg)
168 return -EINVAL;
169 elfcorehdr_addr = memparse(arg, &end);
170 return end > arg ? 0 : -EINVAL;
681558fd 171}
2c8c0e6b 172early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
173#endif
174
2b97690f 175#ifndef CONFIG_NUMA
bbfceef4
MT
176static void __init
177contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 178{
bbfceef4
MT
179 unsigned long bootmap_size, bootmap;
180
bbfceef4
MT
181 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
182 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
183 if (bootmap == -1L)
04e1ba85 184 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
bbfceef4 185 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
186 e820_register_active_regions(0, start_pfn, end_pfn);
187 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 188 reserve_bootmem(bootmap, bootmap_size);
04e1ba85 189}
1da177e4
LT
190#endif
191
1da177e4
LT
192#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
193struct edd edd;
194#ifdef CONFIG_EDD_MODULE
195EXPORT_SYMBOL(edd);
196#endif
197/**
198 * copy_edd() - Copy the BIOS EDD information
199 * from boot_params into a safe place.
200 *
201 */
202static inline void copy_edd(void)
203{
30c82645
PA
204 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
205 sizeof(edd.mbr_signature));
206 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
207 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
208 edd.edd_info_nr = boot_params.eddbuf_entries;
1da177e4
LT
209}
210#else
211static inline void copy_edd(void)
212{
213}
214#endif
215
5c3391f9
BW
216#ifdef CONFIG_KEXEC
217static void __init reserve_crashkernel(void)
218{
219 unsigned long long free_mem;
220 unsigned long long crash_size, crash_base;
221 int ret;
222
04e1ba85
TG
223 free_mem =
224 ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
5c3391f9
BW
225
226 ret = parse_crashkernel(boot_command_line, free_mem,
227 &crash_size, &crash_base);
228 if (ret == 0 && crash_size) {
229 if (crash_base > 0) {
230 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
231 "for crashkernel (System RAM: %ldMB)\n",
232 (unsigned long)(crash_size >> 20),
233 (unsigned long)(crash_base >> 20),
234 (unsigned long)(free_mem >> 20));
235 crashk_res.start = crash_base;
236 crashk_res.end = crash_base + crash_size - 1;
237 reserve_bootmem(crash_base, crash_size);
238 } else
239 printk(KERN_INFO "crashkernel reservation failed - "
240 "you have to specify a base address\n");
241 }
242}
243#else
244static inline void __init reserve_crashkernel(void)
245{}
246#endif
247
1da177e4 248#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
249
250unsigned __initdata ebda_addr;
251unsigned __initdata ebda_size;
252
d504e39e 253static void __init discover_ebda(void)
1da177e4 254{
ac71d12c 255 /*
04e1ba85 256 * there is a real-mode segmented pointer pointing to the
1da177e4
LT
257 * 4K EBDA area at 0x40E
258 */
bdb96a66 259 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
746ef0cd
GOC
260 /*
261 * There can be some situations, like paravirtualized guests,
262 * in which there is no available ebda information. In such
263 * case, just skip it
264 */
265 if (!ebda_addr) {
266 ebda_size = 0;
267 return;
268 }
269
ac71d12c
AK
270 ebda_addr <<= 4;
271
bdb96a66 272 ebda_size = *(unsigned short *)__va(ebda_addr);
ac71d12c
AK
273
274 /* Round EBDA up to pages */
275 if (ebda_size == 0)
276 ebda_size = 1;
277 ebda_size <<= 10;
278 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
279 if (ebda_size > 64*1024)
280 ebda_size = 64*1024;
1da177e4
LT
281}
282
746ef0cd 283/* Overridden in paravirt.c if CONFIG_PARAVIRT */
e3cfac84 284void __attribute__((weak)) __init memory_setup(void)
746ef0cd
GOC
285{
286 machine_specific_memory_setup();
287}
288
1da177e4
LT
289void __init setup_arch(char **cmdline_p)
290{
04e1ba85
TG
291 unsigned i;
292
adf48856 293 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 294
30c82645
PA
295 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
296 screen_info = boot_params.screen_info;
297 edid_info = boot_params.edid_info;
298 saved_video_mode = boot_params.hdr.vid_mode;
299 bootloader_type = boot_params.hdr.type_of_loader;
1da177e4
LT
300
301#ifdef CONFIG_BLK_DEV_RAM
30c82645
PA
302 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
303 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
304 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
1da177e4 305#endif
5b83683f
HY
306#ifdef CONFIG_EFI
307 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
308 "EL64", 4))
309 efi_enabled = 1;
310#endif
746ef0cd
GOC
311
312 ARCH_SETUP
313
314 memory_setup();
1da177e4
LT
315 copy_edd();
316
30c82645 317 if (!boot_params.hdr.root_flags)
1da177e4
LT
318 root_mountflags &= ~MS_RDONLY;
319 init_mm.start_code = (unsigned long) &_text;
320 init_mm.end_code = (unsigned long) &_etext;
321 init_mm.end_data = (unsigned long) &_edata;
322 init_mm.brk = (unsigned long) &_end;
323
e3ebadd9
LT
324 code_resource.start = virt_to_phys(&_text);
325 code_resource.end = virt_to_phys(&_etext)-1;
326 data_resource.start = virt_to_phys(&_etext);
327 data_resource.end = virt_to_phys(&_edata)-1;
00bf4098
BW
328 bss_resource.start = virt_to_phys(&__bss_start);
329 bss_resource.end = virt_to_phys(&__bss_stop)-1;
1da177e4 330
1da177e4
LT
331 early_identify_cpu(&boot_cpu_data);
332
adf48856 333 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
334 *cmdline_p = command_line;
335
336 parse_early_param();
337
338 finish_e820_parsing();
9ca33eb6 339
aaf23042
YL
340 early_gart_iommu_check();
341
5cb248ab 342 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
343 /*
344 * partially used pages are not usable - thus
345 * we are rounding upwards:
346 */
347 end_pfn = e820_end_of_ram();
caff0710 348 num_physpages = end_pfn;
1da177e4
LT
349
350 check_efer();
351
ac71d12c
AK
352 discover_ebda();
353
1da177e4 354 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
5b83683f
HY
355 if (efi_enabled)
356 efi_init();
1da177e4 357
f2d3efed
AK
358 dmi_scan_machine();
359
b02aae9c
RH
360 io_delay_init();
361
71fff5e6 362#ifdef CONFIG_SMP
df3825c5 363 /* setup to use the early static init tables during kernel startup */
3b419089 364 x86_cpu_to_apicid_early_ptr = (void *)&x86_cpu_to_apicid_init;
df3825c5 365 x86_cpu_to_node_map_early_ptr = (void *)&x86_cpu_to_node_map_init;
71fff5e6
MT
366#endif
367
888ba6c6 368#ifdef CONFIG_ACPI
1da177e4
LT
369 /*
370 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
371 * Call this early for SRAT node setup.
372 */
373 acpi_boot_table_init();
374#endif
375
caff0710
JB
376 /* How many end-of-memory variables you have, grandma! */
377 max_low_pfn = end_pfn;
378 max_pfn = end_pfn;
379 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
380
5cb248ab
MG
381 /* Remove active ranges so rediscovery with NUMA-awareness happens */
382 remove_all_active_ranges();
383
1da177e4
LT
384#ifdef CONFIG_ACPI_NUMA
385 /*
386 * Parse SRAT to discover nodes.
387 */
388 acpi_numa_init();
389#endif
390
2b97690f 391#ifdef CONFIG_NUMA
04e1ba85 392 numa_initmem_init(0, end_pfn);
1da177e4 393#else
bbfceef4 394 contig_initmem_init(0, end_pfn);
1da177e4
LT
395#endif
396
397 /* Reserve direct mapping */
04e1ba85 398 reserve_bootmem_generic(table_start << PAGE_SHIFT,
1da177e4
LT
399 (table_end - table_start) << PAGE_SHIFT);
400
401 /* reserve kernel */
ceee8822
AK
402 reserve_bootmem_generic(__pa_symbol(&_text),
403 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
404
405 /*
406 * reserve physical page 0 - it's a special BIOS page on many boxes,
407 * enabling clean reboots, SMP operation, laptop functions.
408 */
409 reserve_bootmem_generic(0, PAGE_SIZE);
410
411 /* reserve ebda region */
ac71d12c
AK
412 if (ebda_addr)
413 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
414#ifdef CONFIG_NUMA
415 /* reserve nodemap region */
416 if (nodemap_addr)
417 reserve_bootmem_generic(nodemap_addr, nodemap_size);
418#endif
1da177e4
LT
419
420#ifdef CONFIG_SMP
1da177e4 421 /* Reserve SMP trampoline */
90b1c208 422 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
1da177e4
LT
423#endif
424
673d5b43 425#ifdef CONFIG_ACPI_SLEEP
1da177e4 426 /*
04e1ba85 427 * Reserve low memory region for sleep support.
1da177e4 428 */
04e1ba85
TG
429 acpi_reserve_bootmem();
430#endif
5b83683f
HY
431
432 if (efi_enabled) {
433 efi_map_memmap();
434 efi_reserve_bootmem();
435 }
436
04e1ba85
TG
437 /*
438 * Find and reserve possible boot-time SMP configuration:
439 */
1da177e4 440 find_smp_config();
1da177e4 441#ifdef CONFIG_BLK_DEV_INITRD
30c82645
PA
442 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
443 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
444 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
445 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
446 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
447
448 if (ramdisk_end <= end_of_mem) {
449 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
450 initrd_start = ramdisk_image + PAGE_OFFSET;
451 initrd_end = initrd_start+ramdisk_size;
452 } else {
1da177e4 453 printk(KERN_ERR "initrd extends beyond end of memory "
30c82645
PA
454 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
455 ramdisk_end, end_of_mem);
1da177e4
LT
456 initrd_start = 0;
457 }
458 }
459#endif
5c3391f9 460 reserve_crashkernel();
1da177e4 461 paging_init();
e4026440 462 map_vsyscall();
1da177e4 463
dfa4698c 464 early_quirks();
1da177e4 465
51f62e18
AR
466 /*
467 * set this early, so we dont allocate cpu0
468 * if MADT list doesnt list BSP first
469 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
470 */
471 cpu_set(0, cpu_present_map);
888ba6c6 472#ifdef CONFIG_ACPI
1da177e4
LT
473 /*
474 * Read APIC and some other early information from ACPI tables.
475 */
476 acpi_boot_init();
477#endif
478
05b3cbd8
RT
479 init_cpu_to_node();
480
1da177e4
LT
481 /*
482 * get boot-time SMP configuration:
483 */
484 if (smp_found_config)
485 get_smp_config();
486 init_apic_mappings();
3e35a0e5 487 ioapic_init_mappings();
1da177e4
LT
488
489 /*
fc986db4 490 * We trust e820 completely. No explicit ROM probing in memory.
04e1ba85 491 */
c9cce83d 492 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
e8eff5ac 493 e820_mark_nosave_regions();
1da177e4 494
1da177e4 495 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 496 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4 497 request_resource(&ioport_resource, &standard_io_resources[i]);
1da177e4 498
a1e97782 499 e820_setup_gap();
1da177e4 500
1da177e4
LT
501#ifdef CONFIG_VT
502#if defined(CONFIG_VGA_CONSOLE)
5b83683f
HY
503 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
504 conswitchp = &vga_con;
1da177e4
LT
505#elif defined(CONFIG_DUMMY_CONSOLE)
506 conswitchp = &dummy_con;
507#endif
508#endif
509}
510
e6982c67 511static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
512{
513 unsigned int *v;
514
ebfcaa96 515 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
516 return 0;
517
518 v = (unsigned int *) c->x86_model_id;
519 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
520 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
521 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
522 c->x86_model_id[48] = 0;
523 return 1;
524}
525
526
e6982c67 527static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
528{
529 unsigned int n, dummy, eax, ebx, ecx, edx;
530
ebfcaa96 531 n = c->extended_cpuid_level;
1da177e4
LT
532
533 if (n >= 0x80000005) {
534 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
04e1ba85
TG
535 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
536 "D cache %dK (%d bytes/line)\n",
537 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
538 c->x86_cache_size = (ecx>>24) + (edx>>24);
1da177e4
LT
539 /* On K8 L1 TLB is inclusive, so don't count it */
540 c->x86_tlbsize = 0;
541 }
542
543 if (n >= 0x80000006) {
544 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
545 ecx = cpuid_ecx(0x80000006);
546 c->x86_cache_size = ecx >> 16;
547 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
548
549 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
550 c->x86_cache_size, ecx & 0xFF);
551 }
1da177e4 552 if (n >= 0x80000008) {
04e1ba85 553 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
1da177e4
LT
554 c->x86_virt_bits = (eax >> 8) & 0xff;
555 c->x86_phys_bits = eax & 0xff;
556 }
557}
558
3f098c26
AK
559#ifdef CONFIG_NUMA
560static int nearby_node(int apicid)
561{
04e1ba85
TG
562 int i, node;
563
3f098c26 564 for (i = apicid - 1; i >= 0; i--) {
04e1ba85 565 node = apicid_to_node[i];
3f098c26
AK
566 if (node != NUMA_NO_NODE && node_online(node))
567 return node;
568 }
569 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
04e1ba85 570 node = apicid_to_node[i];
3f098c26
AK
571 if (node != NUMA_NO_NODE && node_online(node))
572 return node;
573 }
574 return first_node(node_online_map); /* Shouldn't happen */
575}
576#endif
577
63518644
AK
578/*
579 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
580 * Assumes number of cores is a power of two.
581 */
582static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
583{
584#ifdef CONFIG_SMP
b41e2939 585 unsigned bits;
3f098c26 586#ifdef CONFIG_NUMA
f3fa8ebc 587 int cpu = smp_processor_id();
3f098c26 588 int node = 0;
60c1bc82 589 unsigned apicid = hard_smp_processor_id();
3f098c26 590#endif
a860b63c 591 bits = c->x86_coreid_bits;
b41e2939
AK
592
593 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 594 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 595 /* Convert the APIC ID into the socket ID */
f3fa8ebc 596 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
597
598#ifdef CONFIG_NUMA
04e1ba85
TG
599 node = c->phys_proc_id;
600 if (apicid_to_node[apicid] != NUMA_NO_NODE)
601 node = apicid_to_node[apicid];
602 if (!node_online(node)) {
603 /* Two possibilities here:
604 - The CPU is missing memory and no node was created.
605 In that case try picking one from a nearby CPU
606 - The APIC IDs differ from the HyperTransport node IDs
607 which the K8 northbridge parsing fills in.
608 Assume they are all increased by a constant offset,
609 but in the same order as the HT nodeids.
610 If that doesn't result in a usable node fall back to the
611 path for the previous case. */
612
92cb7612 613 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
04e1ba85
TG
614
615 if (ht_nodeid >= 0 &&
616 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
617 node = apicid_to_node[ht_nodeid];
618 /* Pick a nearby node */
619 if (!node_online(node))
620 node = nearby_node(apicid);
621 }
69d81fcd 622 numa_set_node(cpu, node);
3f098c26 623
e42f9437 624 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 625#endif
63518644
AK
626#endif
627}
1da177e4 628
2b16a235 629static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
a860b63c
YL
630{
631#ifdef CONFIG_SMP
632 unsigned bits, ecx;
633
634 /* Multi core CPU? */
635 if (c->extended_cpuid_level < 0x80000008)
636 return;
637
638 ecx = cpuid_ecx(0x80000008);
639
640 c->x86_max_cores = (ecx & 0xff) + 1;
641
642 /* CPU telling us the core id bits shift? */
643 bits = (ecx >> 12) & 0xF;
644
645 /* Otherwise recompute */
646 if (bits == 0) {
647 while ((1 << bits) < c->x86_max_cores)
648 bits++;
649 }
650
651 c->x86_coreid_bits = bits;
652
653#endif
654}
655
fb79d22e
TG
656#define ENABLE_C1E_MASK 0x18000000
657#define CPUID_PROCESSOR_SIGNATURE 1
658#define CPUID_XFAM 0x0ff00000
659#define CPUID_XFAM_K8 0x00000000
660#define CPUID_XFAM_10H 0x00100000
661#define CPUID_XFAM_11H 0x00200000
662#define CPUID_XMOD 0x000f0000
663#define CPUID_XMOD_REV_F 0x00040000
664
665/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
666static __cpuinit int amd_apic_timer_broken(void)
667{
04e1ba85
TG
668 u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
669
fb79d22e
TG
670 switch (eax & CPUID_XFAM) {
671 case CPUID_XFAM_K8:
672 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
673 break;
674 case CPUID_XFAM_10H:
675 case CPUID_XFAM_11H:
676 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
677 if (lo & ENABLE_C1E_MASK)
678 return 1;
679 break;
680 default:
681 /* err on the side of caution */
682 return 1;
683 }
684 return 0;
685}
686
2b16a235
AK
687static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
688{
689 early_init_amd_mc(c);
690
691 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
692 if (c->x86_power & (1<<8))
693 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
694}
695
ed77504b 696static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 697{
7bcd3f34 698 unsigned level;
1da177e4 699
bc5e8fdf
LT
700#ifdef CONFIG_SMP
701 unsigned long value;
702
7d318d77
AK
703 /*
704 * Disable TLB flush filter by setting HWCR.FFDIS on K8
705 * bit 6 of msr C001_0015
04e1ba85 706 *
7d318d77
AK
707 * Errata 63 for SH-B3 steppings
708 * Errata 122 for all steppings (F+ have it disabled by default)
709 */
710 if (c->x86 == 15) {
711 rdmsrl(MSR_K8_HWCR, value);
712 value |= 1 << 6;
713 wrmsrl(MSR_K8_HWCR, value);
714 }
bc5e8fdf
LT
715#endif
716
1da177e4
LT
717 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
718 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
5548fecd 719 clear_bit(0*32+31, (unsigned long *)&c->x86_capability);
04e1ba85 720
7bcd3f34
AK
721 /* On C+ stepping K8 rep microcode works well for copy/memset */
722 level = cpuid_eax(1);
04e1ba85
TG
723 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
724 level >= 0x0f58))
53756d37 725 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
99741faa 726 if (c->x86 == 0x10 || c->x86 == 0x11)
53756d37 727 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
7bcd3f34 728
18bd057b
AK
729 /* Enable workaround for FXSAVE leak */
730 if (c->x86 >= 6)
53756d37 731 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
18bd057b 732
e42f9437
RS
733 level = get_model_name(c);
734 if (!level) {
04e1ba85 735 switch (c->x86) {
1da177e4
LT
736 case 15:
737 /* Should distinguish Models here, but this is only
738 a fallback anyways. */
739 strcpy(c->x86_model_id, "Hammer");
04e1ba85
TG
740 break;
741 }
742 }
1da177e4
LT
743 display_cacheinfo(c);
744
faee9a5d
AK
745 /* Multi core CPU? */
746 if (c->extended_cpuid_level >= 0x80000008)
63518644 747 amd_detect_cmp(c);
1da177e4 748
67cddd94
AK
749 if (c->extended_cpuid_level >= 0x80000006 &&
750 (cpuid_edx(0x80000006) & 0xf000))
751 num_cache_leaves = 4;
752 else
753 num_cache_leaves = 3;
2049336f 754
0bd8acd1 755 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
53756d37 756 set_cpu_cap(c, X86_FEATURE_K8);
0bd8acd1 757
de421863
AK
758 /* MFENCE stops RDTSC speculation */
759 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
f039b754
AK
760
761 /* Family 10 doesn't support C states in MWAIT so don't use it */
762 if (c->x86 == 0x10 && !force_mwait)
53756d37 763 clear_cpu_cap(c, X86_FEATURE_MWAIT);
fb79d22e
TG
764
765 if (amd_apic_timer_broken())
766 disable_apic_timer = 1;
1da177e4
LT
767}
768
1a53905a 769void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
770{
771#ifdef CONFIG_SMP
04e1ba85
TG
772 u32 eax, ebx, ecx, edx;
773 int index_msb, core_bits;
94605eff
SS
774
775 cpuid(1, &eax, &ebx, &ecx, &edx);
776
94605eff 777
e42f9437 778 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 779 return;
04e1ba85 780 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
e42f9437 781 goto out;
1da177e4 782
1da177e4 783 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 784
1da177e4
LT
785 if (smp_num_siblings == 1) {
786 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
04e1ba85 787 } else if (smp_num_siblings > 1) {
94605eff 788
1da177e4 789 if (smp_num_siblings > NR_CPUS) {
04e1ba85
TG
790 printk(KERN_WARNING "CPU: Unsupported number of "
791 "siblings %d", smp_num_siblings);
1da177e4
LT
792 smp_num_siblings = 1;
793 return;
794 }
94605eff
SS
795
796 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 797 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 798
94605eff 799 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 800
04e1ba85 801 index_msb = get_count_order(smp_num_siblings);
94605eff
SS
802
803 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 804
f3fa8ebc 805 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 806 ((1 << core_bits) - 1);
1da177e4 807 }
e42f9437
RS
808out:
809 if ((c->x86_max_cores * smp_num_siblings) > 1) {
04e1ba85
TG
810 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
811 c->phys_proc_id);
812 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
813 c->cpu_core_id);
e42f9437
RS
814 }
815
1da177e4
LT
816#endif
817}
818
3dd9d514
AK
819/*
820 * find out the number of processor cores on the die
821 */
e6982c67 822static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 823{
2bbc419f 824 unsigned int eax, t;
3dd9d514
AK
825
826 if (c->cpuid_level < 4)
827 return 1;
828
2bbc419f 829 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
830
831 if (eax & 0x1f)
832 return ((eax >> 26) + 1);
833 else
834 return 1;
835}
836
df0cc26b
AK
837static void srat_detect_node(void)
838{
839#ifdef CONFIG_NUMA
ddea7be0 840 unsigned node;
df0cc26b 841 int cpu = smp_processor_id();
e42f9437 842 int apicid = hard_smp_processor_id();
df0cc26b
AK
843
844 /* Don't do the funky fallback heuristics the AMD version employs
845 for now. */
e42f9437 846 node = apicid_to_node[apicid];
df0cc26b 847 if (node == NUMA_NO_NODE)
0d015324 848 node = first_node(node_online_map);
69d81fcd 849 numa_set_node(cpu, node);
df0cc26b 850
c31fbb1a 851 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
852#endif
853}
854
2b16a235
AK
855static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
856{
857 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
858 (c->x86 == 0x6 && c->x86_model >= 0x0e))
859 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
860}
861
e6982c67 862static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
863{
864 /* Cache sizes */
865 unsigned n;
866
867 init_intel_cacheinfo(c);
04e1ba85 868 if (c->cpuid_level > 9) {
0080e667
VP
869 unsigned eax = cpuid_eax(10);
870 /* Check for version and the number of counters */
871 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
53756d37 872 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667
VP
873 }
874
36b2a8d5
SE
875 if (cpu_has_ds) {
876 unsigned int l1, l2;
877 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5 878 if (!(l1 & (1<<11)))
53756d37 879 set_cpu_cap(c, X86_FEATURE_BTS);
36b2a8d5 880 if (!(l1 & (1<<12)))
53756d37 881 set_cpu_cap(c, X86_FEATURE_PEBS);
36b2a8d5
SE
882 }
883
eee3af4a
MM
884
885 if (cpu_has_bts)
886 ds_init_intel(c);
887
ebfcaa96 888 n = c->extended_cpuid_level;
1da177e4
LT
889 if (n >= 0x80000008) {
890 unsigned eax = cpuid_eax(0x80000008);
891 c->x86_virt_bits = (eax >> 8) & 0xff;
892 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
893 /* CPUID workaround for Intel 0F34 CPU */
894 if (c->x86_vendor == X86_VENDOR_INTEL &&
895 c->x86 == 0xF && c->x86_model == 0x3 &&
896 c->x86_mask == 0x4)
897 c->x86_phys_bits = 36;
1da177e4
LT
898 }
899
900 if (c->x86 == 15)
901 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
902 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
903 (c->x86 == 0x6 && c->x86_model >= 0x0e))
53756d37 904 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
27fbe5b2 905 if (c->x86 == 6)
53756d37 906 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
707fa8ed 907 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
04e1ba85 908 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
909
910 srat_detect_node();
1da177e4
LT
911}
912
672289e9 913static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
914{
915 char *v = c->x86_vendor_id;
916
917 if (!strcmp(v, "AuthenticAMD"))
918 c->x86_vendor = X86_VENDOR_AMD;
919 else if (!strcmp(v, "GenuineIntel"))
920 c->x86_vendor = X86_VENDOR_INTEL;
921 else
922 c->x86_vendor = X86_VENDOR_UNKNOWN;
923}
924
925struct cpu_model_info {
926 int vendor;
927 int family;
928 char *model_names[16];
929};
930
931/* Do some early cpuid on the boot CPU to get some parameter that are
932 needed before check_bugs. Everything advanced is in identify_cpu
933 below. */
8c61b900 934static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4 935{
a860b63c 936 u32 tfms, xlvl;
1da177e4
LT
937
938 c->loops_per_jiffy = loops_per_jiffy;
939 c->x86_cache_size = -1;
940 c->x86_vendor = X86_VENDOR_UNKNOWN;
941 c->x86_model = c->x86_mask = 0; /* So far unknown... */
942 c->x86_vendor_id[0] = '\0'; /* Unset */
943 c->x86_model_id[0] = '\0'; /* Unset */
944 c->x86_clflush_size = 64;
945 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 946 c->x86_max_cores = 1;
a860b63c 947 c->x86_coreid_bits = 0;
ebfcaa96 948 c->extended_cpuid_level = 0;
1da177e4
LT
949 memset(&c->x86_capability, 0, sizeof c->x86_capability);
950
951 /* Get vendor name */
952 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
953 (unsigned int *)&c->x86_vendor_id[0],
954 (unsigned int *)&c->x86_vendor_id[8],
955 (unsigned int *)&c->x86_vendor_id[4]);
04e1ba85 956
1da177e4
LT
957 get_cpu_vendor(c);
958
959 /* Initialize the standard set of capabilities */
960 /* Note that the vendor-specific code below might override */
961
962 /* Intel-defined flags: level 0x00000001 */
963 if (c->cpuid_level >= 0x00000001) {
964 __u32 misc;
965 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
966 &c->x86_capability[0]);
967 c->x86 = (tfms >> 8) & 0xf;
968 c->x86_model = (tfms >> 4) & 0xf;
969 c->x86_mask = tfms & 0xf;
f5f786d0 970 if (c->x86 == 0xf)
1da177e4 971 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 972 if (c->x86 >= 0x6)
1da177e4 973 c->x86_model += ((tfms >> 16) & 0xF) << 4;
04e1ba85 974 if (c->x86_capability[0] & (1<<19))
1da177e4 975 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
976 } else {
977 /* Have CPUID level 0 only - unheard of */
978 c->x86 = 4;
979 }
a158608b
AK
980
981#ifdef CONFIG_SMP
f3fa8ebc 982 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 983#endif
1da177e4
LT
984 /* AMD-defined flags: level 0x80000001 */
985 xlvl = cpuid_eax(0x80000000);
ebfcaa96 986 c->extended_cpuid_level = xlvl;
1da177e4
LT
987 if ((xlvl & 0xffff0000) == 0x80000000) {
988 if (xlvl >= 0x80000001) {
989 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 990 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
991 }
992 if (xlvl >= 0x80000004)
993 get_model_name(c); /* Default name */
994 }
995
996 /* Transmeta-defined flags: level 0x80860001 */
997 xlvl = cpuid_eax(0x80860000);
998 if ((xlvl & 0xffff0000) == 0x80860000) {
999 /* Don't set x86_cpuid_level here for now to not confuse. */
1000 if (xlvl >= 0x80860001)
1001 c->x86_capability[2] = cpuid_edx(0x80860001);
1002 }
1003
9566e91d
AH
1004 c->extended_cpuid_level = cpuid_eax(0x80000000);
1005 if (c->extended_cpuid_level >= 0x80000007)
1006 c->x86_power = cpuid_edx(0x80000007);
1007
a860b63c
YL
1008 switch (c->x86_vendor) {
1009 case X86_VENDOR_AMD:
1010 early_init_amd(c);
1011 break;
1012 }
1013
1014}
1015
1016/*
1017 * This does the hard work of actually picking apart the CPU stuff...
1018 */
1019void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1020{
1021 int i;
1022
1023 early_identify_cpu(c);
1024
1d67953f
VP
1025 init_scattered_cpuid_features(c);
1026
1e9f28fa
SS
1027 c->apicid = phys_pkg_id(0);
1028
1da177e4
LT
1029 /*
1030 * Vendor-specific initialization. In this section we
1031 * canonicalize the feature flags, meaning if there are
1032 * features a certain CPU supports which CPUID doesn't
1033 * tell us, CPUID claiming incorrect flags, or other bugs,
1034 * we handle them here.
1035 *
1036 * At the end of this section, c->x86_capability better
1037 * indicate the features this CPU genuinely supports!
1038 */
1039 switch (c->x86_vendor) {
1040 case X86_VENDOR_AMD:
1041 init_amd(c);
1042 break;
1043
1044 case X86_VENDOR_INTEL:
1045 init_intel(c);
1046 break;
1047
1048 case X86_VENDOR_UNKNOWN:
1049 default:
1050 display_cacheinfo(c);
1051 break;
1052 }
1053
1054 select_idle_routine(c);
04e1ba85 1055 detect_ht(c);
1da177e4
LT
1056
1057 /*
1058 * On SMP, boot_cpu_data holds the common feature set between
1059 * all CPUs; so make sure that we indicate which features are
1060 * common between the CPUs. The first time this routine gets
1061 * executed, c == &boot_cpu_data.
1062 */
1063 if (c != &boot_cpu_data) {
1064 /* AND the already accumulated flags with these */
04e1ba85 1065 for (i = 0; i < NCAPINTS; i++)
1da177e4
LT
1066 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1067 }
1068
1069#ifdef CONFIG_X86_MCE
1070 mcheck_init(c);
1071#endif
8bd99481 1072 if (c != &boot_cpu_data)
3b520b23 1073 mtrr_ap_init();
1da177e4 1074#ifdef CONFIG_NUMA
3019e8eb 1075 numa_add_cpu(smp_processor_id());
1da177e4 1076#endif
2b16a235 1077
2b16a235
AK
1078 switch (c->x86_vendor) {
1079 case X86_VENDOR_AMD:
1080 early_init_amd(c);
1081 break;
1082 case X86_VENDOR_INTEL:
1083 early_init_intel(c);
1084 break;
1085 }
1da177e4 1086}
1da177e4 1087
e6982c67 1088void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
1089{
1090 if (c->x86_model_id[0])
04e1ba85 1091 printk(KERN_INFO "%s", c->x86_model_id);
1da177e4 1092
04e1ba85
TG
1093 if (c->x86_mask || c->cpuid_level >= 0)
1094 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1da177e4 1095 else
04e1ba85 1096 printk(KERN_CONT "\n");
1da177e4
LT
1097}
1098
1099/*
1100 * Get CPU information for use by the procfs.
1101 */
1102
1103static int show_cpuinfo(struct seq_file *m, void *v)
1104{
1105 struct cpuinfo_x86 *c = v;
04e1ba85 1106 int cpu = 0, i;
1da177e4 1107
04e1ba85 1108 /*
1da177e4
LT
1109 * These flag bits must match the definitions in <asm/cpufeature.h>.
1110 * NULL means this bit is undefined or reserved; either way it doesn't
1111 * have meaning as far as Linux is concerned. Note that it's important
1112 * to realize there is a difference between this table and CPUID -- if
1113 * applications want to get the raw CPUID data, they should access
1114 * /dev/cpu/<cpu_nr>/cpuid instead.
1115 */
121d7bf5 1116 static const char *const x86_cap_flags[] = {
1da177e4 1117 /* Intel-defined */
04e1ba85
TG
1118 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1119 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1120 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1121 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1da177e4
LT
1122
1123 /* AMD-defined */
3c3b73b6 1124 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1125 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1126 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
f790cd30
AK
1127 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1128 "3dnowext", "3dnow",
1da177e4
LT
1129
1130 /* Transmeta-defined */
1131 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1132 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1133 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1134 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1135
1136 /* Other (Linux-defined) */
ec481536
PA
1137 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1138 NULL, NULL, NULL, NULL,
1139 "constant_tsc", "up", NULL, "arch_perfmon",
1140 "pebs", "bts", NULL, "sync_rdtsc",
1141 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
1142 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1143
1144 /* Intel-defined (#2) */
9d95dd84 1145 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307 1146 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
e1054b39 1147 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1da177e4
LT
1148 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1149
5b7abc6f
PA
1150 /* VIA/Cyrix/Centaur-defined */
1151 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
ec481536 1152 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
5b7abc6f
PA
1153 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1155
1da177e4 1156 /* AMD-defined (#2) */
e1054b39
PA
1157 "lahf_lm", "cmp_legacy", "svm", "extapic",
1158 "cr8_legacy", "abm", "sse4a", "misalignsse",
1159 "3dnowprefetch", "osvw", "ibs", "sse5",
1160 "skinit", "wdt", NULL, NULL,
1da177e4 1161 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 1162 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1d67953f
VP
1163
1164 /* Auxiliary (Linux-defined) */
1165 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1166 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1167 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1168 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4 1169 };
121d7bf5 1170 static const char *const x86_power_flags[] = {
1da177e4
LT
1171 "ts", /* temperature sensor */
1172 "fid", /* frequency id control */
1173 "vid", /* voltage id control */
1174 "ttp", /* thermal trip */
1175 "tm",
3f98bc49 1176 "stc",
f790cd30
AK
1177 "100mhzsteps",
1178 "hwpstate",
d824395c
JR
1179 "", /* tsc invariant mapped to constant_tsc */
1180 /* nothing */
1da177e4
LT
1181 };
1182
1183
1184#ifdef CONFIG_SMP
92cb7612 1185 cpu = c->cpu_index;
1da177e4
LT
1186#endif
1187
04e1ba85
TG
1188 seq_printf(m, "processor\t: %u\n"
1189 "vendor_id\t: %s\n"
1190 "cpu family\t: %d\n"
1191 "model\t\t: %d\n"
1192 "model name\t: %s\n",
1193 (unsigned)cpu,
1194 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1195 c->x86,
1196 (int)c->x86_model,
1197 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1198
1da177e4
LT
1199 if (c->x86_mask || c->cpuid_level >= 0)
1200 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1201 else
1202 seq_printf(m, "stepping\t: unknown\n");
04e1ba85
TG
1203
1204 if (cpu_has(c, X86_FEATURE_TSC)) {
92cb7612 1205 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
04e1ba85 1206
95235ca2
VP
1207 if (!freq)
1208 freq = cpu_khz;
1da177e4 1209 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
04e1ba85 1210 freq / 1000, (freq % 1000));
1da177e4
LT
1211 }
1212
1213 /* Cache size */
04e1ba85 1214 if (c->x86_cache_size >= 0)
1da177e4 1215 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
04e1ba85 1216
1da177e4 1217#ifdef CONFIG_SMP
94605eff 1218 if (smp_num_siblings * c->x86_max_cores > 1) {
f3fa8ebc 1219 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
08357611
MT
1220 seq_printf(m, "siblings\t: %d\n",
1221 cpus_weight(per_cpu(cpu_core_map, cpu)));
f3fa8ebc 1222 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1223 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1224 }
04e1ba85 1225#endif
1da177e4
LT
1226
1227 seq_printf(m,
04e1ba85
TG
1228 "fpu\t\t: yes\n"
1229 "fpu_exception\t: yes\n"
1230 "cpuid level\t: %d\n"
1231 "wp\t\t: yes\n"
1232 "flags\t\t:",
1da177e4
LT
1233 c->cpuid_level);
1234
04e1ba85
TG
1235 for (i = 0; i < 32*NCAPINTS; i++)
1236 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1237 seq_printf(m, " %s", x86_cap_flags[i]);
1238
1da177e4
LT
1239 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1240 c->loops_per_jiffy/(500000/HZ),
1241 (c->loops_per_jiffy/(5000/HZ)) % 100);
1242
04e1ba85 1243 if (c->x86_tlbsize > 0)
1da177e4
LT
1244 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1245 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1246 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1247
04e1ba85 1248 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1da177e4
LT
1249 c->x86_phys_bits, c->x86_virt_bits);
1250
1251 seq_printf(m, "power management:");
04e1ba85
TG
1252 for (i = 0; i < 32; i++) {
1253 if (c->x86_power & (1 << i)) {
1254 if (i < ARRAY_SIZE(x86_power_flags) &&
1255 x86_power_flags[i])
1256 seq_printf(m, "%s%s",
1257 x86_power_flags[i][0]?" ":"",
1258 x86_power_flags[i]);
1259 else
1260 seq_printf(m, " [%d]", i);
1261 }
1da177e4 1262 }
1da177e4 1263
d31ddaa1 1264 seq_printf(m, "\n\n");
1da177e4
LT
1265
1266 return 0;
1267}
1268
1269static void *c_start(struct seq_file *m, loff_t *pos)
1270{
92cb7612 1271 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28
AH
1272 *pos = first_cpu(cpu_online_map);
1273 if ((*pos) < NR_CPUS && cpu_online(*pos))
92cb7612
MT
1274 return &cpu_data(*pos);
1275 return NULL;
1da177e4
LT
1276}
1277
1278static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1279{
c0c52d28 1280 *pos = next_cpu(*pos, cpu_online_map);
1da177e4
LT
1281 return c_start(m, pos);
1282}
1283
1284static void c_stop(struct seq_file *m, void *v)
1285{
1286}
1287
1288struct seq_operations cpuinfo_op = {
04e1ba85 1289 .start = c_start,
1da177e4
LT
1290 .next = c_next,
1291 .stop = c_stop,
1292 .show = show_cpuinfo,
1293};
This page took 0.441624 seconds and 5 git commands to generate.