[PATCH] i386: Remove fastcall in paravirt.[ch]
[deliverable/linux.git] / arch / x86_64 / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
1da177e4
LT
8 */
9
10/*
11 * This file handles the architecture-dependent parts of initialization
12 */
13
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/user.h>
23#include <linux/a.out.h>
894673ee 24#include <linux/screen_info.h>
1da177e4
LT
25#include <linux/ioport.h>
26#include <linux/delay.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/initrd.h>
29#include <linux/highmem.h>
30#include <linux/bootmem.h>
31#include <linux/module.h>
32#include <asm/processor.h>
33#include <linux/console.h>
34#include <linux/seq_file.h>
aac04b32 35#include <linux/crash_dump.h>
1da177e4
LT
36#include <linux/root_dev.h>
37#include <linux/pci.h>
38#include <linux/acpi.h>
39#include <linux/kallsyms.h>
40#include <linux/edd.h>
bbfceef4 41#include <linux/mmzone.h>
5f5609df 42#include <linux/kexec.h>
95235ca2 43#include <linux/cpufreq.h>
e9928674 44#include <linux/dmi.h>
17a941d8 45#include <linux/dma-mapping.h>
681558fd 46#include <linux/ctype.h>
bbfceef4 47
1da177e4
LT
48#include <asm/mtrr.h>
49#include <asm/uaccess.h>
50#include <asm/system.h>
51#include <asm/io.h>
52#include <asm/smp.h>
53#include <asm/msr.h>
54#include <asm/desc.h>
55#include <video/edid.h>
56#include <asm/e820.h>
57#include <asm/dma.h>
58#include <asm/mpspec.h>
59#include <asm/mmu_context.h>
60#include <asm/bootsetup.h>
61#include <asm/proto.h>
62#include <asm/setup.h>
63#include <asm/mach_apic.h>
64#include <asm/numa.h>
2bc0414e 65#include <asm/sections.h>
f2d3efed 66#include <asm/dmi.h>
1da177e4
LT
67
68/*
69 * Machine setup..
70 */
71
6c231b7b 72struct cpuinfo_x86 boot_cpu_data __read_mostly;
2ee60e17 73EXPORT_SYMBOL(boot_cpu_data);
1da177e4
LT
74
75unsigned long mmu_cr4_features;
76
1da177e4
LT
77/* Boot loader ID as an integer, for the benefit of proc_dointvec */
78int bootloader_type;
79
80unsigned long saved_video_mode;
81
f2d3efed
AK
82/*
83 * Early DMI memory
84 */
85int dmi_alloc_index;
86char dmi_alloc_data[DMI_MAX_DATA];
87
1da177e4
LT
88/*
89 * Setup options
90 */
1da177e4 91struct screen_info screen_info;
2ee60e17 92EXPORT_SYMBOL(screen_info);
1da177e4
LT
93struct sys_desc_table_struct {
94 unsigned short length;
95 unsigned char table[0];
96};
97
98struct edid_info edid_info;
ba70710e 99EXPORT_SYMBOL_GPL(edid_info);
1da177e4
LT
100
101extern int root_mountflags;
1da177e4 102
adf48856 103char __initdata command_line[COMMAND_LINE_SIZE];
1da177e4
LT
104
105struct resource standard_io_resources[] = {
106 { .name = "dma1", .start = 0x00, .end = 0x1f,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "pic1", .start = 0x20, .end = 0x21,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer0", .start = 0x40, .end = 0x43,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "timer1", .start = 0x50, .end = 0x53,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "keyboard", .start = 0x60, .end = 0x6f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "pic2", .start = 0xa0, .end = 0xa1,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "dma2", .start = 0xc0, .end = 0xdf,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
122 { .name = "fpu", .start = 0xf0, .end = 0xff,
123 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
124};
125
1da177e4
LT
126#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
127
128struct resource data_resource = {
129 .name = "Kernel data",
130 .start = 0,
131 .end = 0,
132 .flags = IORESOURCE_RAM,
133};
134struct resource code_resource = {
135 .name = "Kernel code",
136 .start = 0,
137 .end = 0,
138 .flags = IORESOURCE_RAM,
139};
140
2c8c0e6b
AK
141#ifdef CONFIG_PROC_VMCORE
142/* elfcorehdr= specifies the location of elf core header
143 * stored by the crashed kernel. This option will be passed
144 * by kexec loader to the capture kernel.
145 */
146static int __init setup_elfcorehdr(char *arg)
681558fd 147{
2c8c0e6b
AK
148 char *end;
149 if (!arg)
150 return -EINVAL;
151 elfcorehdr_addr = memparse(arg, &end);
152 return end > arg ? 0 : -EINVAL;
681558fd 153}
2c8c0e6b 154early_param("elfcorehdr", setup_elfcorehdr);
e2c03888
AK
155#endif
156
2b97690f 157#ifndef CONFIG_NUMA
bbfceef4
MT
158static void __init
159contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
1da177e4 160{
bbfceef4
MT
161 unsigned long bootmap_size, bootmap;
162
bbfceef4
MT
163 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
164 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
165 if (bootmap == -1L)
166 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
167 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
5cb248ab
MG
168 e820_register_active_regions(0, start_pfn, end_pfn);
169 free_bootmem_with_active_regions(0, end_pfn);
bbfceef4 170 reserve_bootmem(bootmap, bootmap_size);
1da177e4
LT
171}
172#endif
173
1da177e4
LT
174#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
175struct edd edd;
176#ifdef CONFIG_EDD_MODULE
177EXPORT_SYMBOL(edd);
178#endif
179/**
180 * copy_edd() - Copy the BIOS EDD information
181 * from boot_params into a safe place.
182 *
183 */
184static inline void copy_edd(void)
185{
186 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
187 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
188 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
189 edd.edd_info_nr = EDD_NR;
190}
191#else
192static inline void copy_edd(void)
193{
194}
195#endif
196
197#define EBDA_ADDR_POINTER 0x40E
ac71d12c
AK
198
199unsigned __initdata ebda_addr;
200unsigned __initdata ebda_size;
201
202static void discover_ebda(void)
1da177e4 203{
ac71d12c 204 /*
1da177e4
LT
205 * there is a real-mode segmented pointer pointing to the
206 * 4K EBDA area at 0x40E
207 */
ac71d12c
AK
208 ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
209 ebda_addr <<= 4;
210
211 ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
212
213 /* Round EBDA up to pages */
214 if (ebda_size == 0)
215 ebda_size = 1;
216 ebda_size <<= 10;
217 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
218 if (ebda_size > 64*1024)
219 ebda_size = 64*1024;
1da177e4
LT
220}
221
222void __init setup_arch(char **cmdline_p)
223{
adf48856 224 printk(KERN_INFO "Command line: %s\n", boot_command_line);
43c85c9c 225
1da177e4 226 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
1da177e4
LT
227 screen_info = SCREEN_INFO;
228 edid_info = EDID_INFO;
229 saved_video_mode = SAVED_VIDEO_MODE;
230 bootloader_type = LOADER_TYPE;
231
232#ifdef CONFIG_BLK_DEV_RAM
233 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
234 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
235 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
236#endif
237 setup_memory_region();
238 copy_edd();
239
240 if (!MOUNT_ROOT_RDONLY)
241 root_mountflags &= ~MS_RDONLY;
242 init_mm.start_code = (unsigned long) &_text;
243 init_mm.end_code = (unsigned long) &_etext;
244 init_mm.end_data = (unsigned long) &_edata;
245 init_mm.brk = (unsigned long) &_end;
246
247 code_resource.start = virt_to_phys(&_text);
248 code_resource.end = virt_to_phys(&_etext)-1;
249 data_resource.start = virt_to_phys(&_etext);
250 data_resource.end = virt_to_phys(&_edata)-1;
251
1da177e4
LT
252 early_identify_cpu(&boot_cpu_data);
253
adf48856 254 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
2c8c0e6b
AK
255 *cmdline_p = command_line;
256
257 parse_early_param();
258
259 finish_e820_parsing();
9ca33eb6 260
5cb248ab 261 e820_register_active_regions(0, 0, -1UL);
1da177e4
LT
262 /*
263 * partially used pages are not usable - thus
264 * we are rounding upwards:
265 */
266 end_pfn = e820_end_of_ram();
caff0710 267 num_physpages = end_pfn;
1da177e4
LT
268
269 check_efer();
270
ac71d12c
AK
271 discover_ebda();
272
1da177e4
LT
273 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
274
f2d3efed
AK
275 dmi_scan_machine();
276
f6c2e333
SS
277 zap_low_mappings(0);
278
888ba6c6 279#ifdef CONFIG_ACPI
1da177e4
LT
280 /*
281 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
282 * Call this early for SRAT node setup.
283 */
284 acpi_boot_table_init();
285#endif
286
caff0710
JB
287 /* How many end-of-memory variables you have, grandma! */
288 max_low_pfn = end_pfn;
289 max_pfn = end_pfn;
290 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
291
5cb248ab
MG
292 /* Remove active ranges so rediscovery with NUMA-awareness happens */
293 remove_all_active_ranges();
294
1da177e4
LT
295#ifdef CONFIG_ACPI_NUMA
296 /*
297 * Parse SRAT to discover nodes.
298 */
299 acpi_numa_init();
300#endif
301
2b97690f 302#ifdef CONFIG_NUMA
1da177e4
LT
303 numa_initmem_init(0, end_pfn);
304#else
bbfceef4 305 contig_initmem_init(0, end_pfn);
1da177e4
LT
306#endif
307
308 /* Reserve direct mapping */
309 reserve_bootmem_generic(table_start << PAGE_SHIFT,
310 (table_end - table_start) << PAGE_SHIFT);
311
312 /* reserve kernel */
ceee8822
AK
313 reserve_bootmem_generic(__pa_symbol(&_text),
314 __pa_symbol(&_end) - __pa_symbol(&_text));
1da177e4
LT
315
316 /*
317 * reserve physical page 0 - it's a special BIOS page on many boxes,
318 * enabling clean reboots, SMP operation, laptop functions.
319 */
320 reserve_bootmem_generic(0, PAGE_SIZE);
321
322 /* reserve ebda region */
ac71d12c
AK
323 if (ebda_addr)
324 reserve_bootmem_generic(ebda_addr, ebda_size);
076422d2
AS
325#ifdef CONFIG_NUMA
326 /* reserve nodemap region */
327 if (nodemap_addr)
328 reserve_bootmem_generic(nodemap_addr, nodemap_size);
329#endif
1da177e4
LT
330
331#ifdef CONFIG_SMP
332 /*
333 * But first pinch a few for the stack/trampoline stuff
334 * FIXME: Don't need the extra page at 4K, but need to fix
335 * trampoline before removing it. (see the GDT stuff)
336 */
337 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
338
339 /* Reserve SMP trampoline */
340 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
341#endif
342
343#ifdef CONFIG_ACPI_SLEEP
344 /*
345 * Reserve low memory region for sleep support.
346 */
347 acpi_reserve_bootmem();
348#endif
1da177e4
LT
349 /*
350 * Find and reserve possible boot-time SMP configuration:
351 */
352 find_smp_config();
1da177e4
LT
353#ifdef CONFIG_BLK_DEV_INITRD
354 if (LOADER_TYPE && INITRD_START) {
355 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
356 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
19e5d9c0 357 initrd_start = INITRD_START + PAGE_OFFSET;
1da177e4
LT
358 initrd_end = initrd_start+INITRD_SIZE;
359 }
360 else {
361 printk(KERN_ERR "initrd extends beyond end of memory "
362 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
363 (unsigned long)(INITRD_START + INITRD_SIZE),
364 (unsigned long)(end_pfn << PAGE_SHIFT));
365 initrd_start = 0;
366 }
367 }
368#endif
5f5609df
EB
369#ifdef CONFIG_KEXEC
370 if (crashk_res.start != crashk_res.end) {
00212fef 371 reserve_bootmem_generic(crashk_res.start,
5f5609df
EB
372 crashk_res.end - crashk_res.start + 1);
373 }
374#endif
0d317fb7 375
1da177e4
LT
376 paging_init();
377
f157cbb1 378#ifdef CONFIG_PCI
dfa4698c 379 early_quirks();
f157cbb1 380#endif
1da177e4 381
51f62e18
AR
382 /*
383 * set this early, so we dont allocate cpu0
384 * if MADT list doesnt list BSP first
385 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
386 */
387 cpu_set(0, cpu_present_map);
888ba6c6 388#ifdef CONFIG_ACPI
1da177e4
LT
389 /*
390 * Read APIC and some other early information from ACPI tables.
391 */
392 acpi_boot_init();
393#endif
394
05b3cbd8
RT
395 init_cpu_to_node();
396
1da177e4
LT
397 /*
398 * get boot-time SMP configuration:
399 */
400 if (smp_found_config)
401 get_smp_config();
402 init_apic_mappings();
1da177e4
LT
403
404 /*
fc986db4
AK
405 * We trust e820 completely. No explicit ROM probing in memory.
406 */
1da177e4 407 e820_reserve_resources();
e8eff5ac 408 e820_mark_nosave_regions();
1da177e4 409
1da177e4
LT
410 {
411 unsigned i;
412 /* request I/O space for devices used on all i[345]86 PCs */
9d0ef4fd 413 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
1da177e4
LT
414 request_resource(&ioport_resource, &standard_io_resources[i]);
415 }
416
a1e97782 417 e820_setup_gap();
1da177e4 418
1da177e4
LT
419#ifdef CONFIG_VT
420#if defined(CONFIG_VGA_CONSOLE)
421 conswitchp = &vga_con;
422#elif defined(CONFIG_DUMMY_CONSOLE)
423 conswitchp = &dummy_con;
424#endif
425#endif
426}
427
e6982c67 428static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
1da177e4
LT
429{
430 unsigned int *v;
431
ebfcaa96 432 if (c->extended_cpuid_level < 0x80000004)
1da177e4
LT
433 return 0;
434
435 v = (unsigned int *) c->x86_model_id;
436 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
437 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
438 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
439 c->x86_model_id[48] = 0;
440 return 1;
441}
442
443
e6982c67 444static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
1da177e4
LT
445{
446 unsigned int n, dummy, eax, ebx, ecx, edx;
447
ebfcaa96 448 n = c->extended_cpuid_level;
1da177e4
LT
449
450 if (n >= 0x80000005) {
451 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
452 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
453 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
454 c->x86_cache_size=(ecx>>24)+(edx>>24);
455 /* On K8 L1 TLB is inclusive, so don't count it */
456 c->x86_tlbsize = 0;
457 }
458
459 if (n >= 0x80000006) {
460 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
461 ecx = cpuid_ecx(0x80000006);
462 c->x86_cache_size = ecx >> 16;
463 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
464
465 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
466 c->x86_cache_size, ecx & 0xFF);
467 }
468
469 if (n >= 0x80000007)
470 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
471 if (n >= 0x80000008) {
472 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
473 c->x86_virt_bits = (eax >> 8) & 0xff;
474 c->x86_phys_bits = eax & 0xff;
475 }
476}
477
3f098c26
AK
478#ifdef CONFIG_NUMA
479static int nearby_node(int apicid)
480{
481 int i;
482 for (i = apicid - 1; i >= 0; i--) {
483 int node = apicid_to_node[i];
484 if (node != NUMA_NO_NODE && node_online(node))
485 return node;
486 }
487 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
488 int node = apicid_to_node[i];
489 if (node != NUMA_NO_NODE && node_online(node))
490 return node;
491 }
492 return first_node(node_online_map); /* Shouldn't happen */
493}
494#endif
495
63518644
AK
496/*
497 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
498 * Assumes number of cores is a power of two.
499 */
500static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
501{
502#ifdef CONFIG_SMP
b41e2939 503 unsigned bits;
3f098c26 504#ifdef CONFIG_NUMA
f3fa8ebc 505 int cpu = smp_processor_id();
3f098c26 506 int node = 0;
60c1bc82 507 unsigned apicid = hard_smp_processor_id();
3f098c26 508#endif
faee9a5d 509 unsigned ecx = cpuid_ecx(0x80000008);
b41e2939 510
faee9a5d 511 c->x86_max_cores = (ecx & 0xff) + 1;
b41e2939 512
faee9a5d
AK
513 /* CPU telling us the core id bits shift? */
514 bits = (ecx >> 12) & 0xF;
515
516 /* Otherwise recompute */
517 if (bits == 0) {
518 while ((1 << bits) < c->x86_max_cores)
519 bits++;
520 }
b41e2939
AK
521
522 /* Low order bits define the core id (index of core in socket) */
f3fa8ebc 523 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
b41e2939 524 /* Convert the APIC ID into the socket ID */
f3fa8ebc 525 c->phys_proc_id = phys_pkg_id(bits);
63518644
AK
526
527#ifdef CONFIG_NUMA
f3fa8ebc 528 node = c->phys_proc_id;
3f098c26
AK
529 if (apicid_to_node[apicid] != NUMA_NO_NODE)
530 node = apicid_to_node[apicid];
531 if (!node_online(node)) {
532 /* Two possibilities here:
533 - The CPU is missing memory and no node was created.
534 In that case try picking one from a nearby CPU
535 - The APIC IDs differ from the HyperTransport node IDs
536 which the K8 northbridge parsing fills in.
537 Assume they are all increased by a constant offset,
538 but in the same order as the HT nodeids.
539 If that doesn't result in a usable node fall back to the
540 path for the previous case. */
f3fa8ebc 541 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
3f098c26
AK
542 if (ht_nodeid >= 0 &&
543 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
544 node = apicid_to_node[ht_nodeid];
545 /* Pick a nearby node */
546 if (!node_online(node))
547 node = nearby_node(apicid);
548 }
69d81fcd 549 numa_set_node(cpu, node);
3f098c26 550
e42f9437 551 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
63518644 552#endif
63518644
AK
553#endif
554}
1da177e4 555
ed77504b 556static void __cpuinit init_amd(struct cpuinfo_x86 *c)
1da177e4 557{
7bcd3f34 558 unsigned level;
1da177e4 559
bc5e8fdf
LT
560#ifdef CONFIG_SMP
561 unsigned long value;
562
7d318d77
AK
563 /*
564 * Disable TLB flush filter by setting HWCR.FFDIS on K8
565 * bit 6 of msr C001_0015
566 *
567 * Errata 63 for SH-B3 steppings
568 * Errata 122 for all steppings (F+ have it disabled by default)
569 */
570 if (c->x86 == 15) {
571 rdmsrl(MSR_K8_HWCR, value);
572 value |= 1 << 6;
573 wrmsrl(MSR_K8_HWCR, value);
574 }
bc5e8fdf
LT
575#endif
576
1da177e4
LT
577 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
578 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
579 clear_bit(0*32+31, &c->x86_capability);
580
7bcd3f34
AK
581 /* On C+ stepping K8 rep microcode works well for copy/memset */
582 level = cpuid_eax(1);
583 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
584 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
585
18bd057b
AK
586 /* Enable workaround for FXSAVE leak */
587 if (c->x86 >= 6)
588 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
589
e42f9437
RS
590 level = get_model_name(c);
591 if (!level) {
1da177e4
LT
592 switch (c->x86) {
593 case 15:
594 /* Should distinguish Models here, but this is only
595 a fallback anyways. */
596 strcpy(c->x86_model_id, "Hammer");
597 break;
598 }
599 }
600 display_cacheinfo(c);
601
130951cc
AK
602 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
603 if (c->x86_power & (1<<8))
604 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
605
faee9a5d
AK
606 /* Multi core CPU? */
607 if (c->extended_cpuid_level >= 0x80000008)
63518644 608 amd_detect_cmp(c);
1da177e4 609
240cd6a8
AK
610 /* Fix cpuid4 emulation for more */
611 num_cache_leaves = 3;
2049336f 612
61677965
AK
613 /* RDTSC can be speculated around */
614 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1da177e4
LT
615}
616
e6982c67 617static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1da177e4
LT
618{
619#ifdef CONFIG_SMP
620 u32 eax, ebx, ecx, edx;
94605eff 621 int index_msb, core_bits;
94605eff
SS
622
623 cpuid(1, &eax, &ebx, &ecx, &edx);
624
94605eff 625
e42f9437 626 if (!cpu_has(c, X86_FEATURE_HT))
1da177e4 627 return;
e42f9437
RS
628 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
629 goto out;
1da177e4 630
1da177e4 631 smp_num_siblings = (ebx & 0xff0000) >> 16;
94605eff 632
1da177e4
LT
633 if (smp_num_siblings == 1) {
634 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
94605eff
SS
635 } else if (smp_num_siblings > 1 ) {
636
1da177e4
LT
637 if (smp_num_siblings > NR_CPUS) {
638 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
639 smp_num_siblings = 1;
640 return;
641 }
94605eff
SS
642
643 index_msb = get_count_order(smp_num_siblings);
f3fa8ebc 644 c->phys_proc_id = phys_pkg_id(index_msb);
3dd9d514 645
94605eff 646 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
3dd9d514 647
94605eff
SS
648 index_msb = get_count_order(smp_num_siblings) ;
649
650 core_bits = get_count_order(c->x86_max_cores);
3dd9d514 651
f3fa8ebc 652 c->cpu_core_id = phys_pkg_id(index_msb) &
94605eff 653 ((1 << core_bits) - 1);
1da177e4 654 }
e42f9437
RS
655out:
656 if ((c->x86_max_cores * smp_num_siblings) > 1) {
657 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
658 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
659 }
660
1da177e4
LT
661#endif
662}
663
3dd9d514
AK
664/*
665 * find out the number of processor cores on the die
666 */
e6982c67 667static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 668{
2bbc419f 669 unsigned int eax, t;
3dd9d514
AK
670
671 if (c->cpuid_level < 4)
672 return 1;
673
2bbc419f 674 cpuid_count(4, 0, &eax, &t, &t, &t);
3dd9d514
AK
675
676 if (eax & 0x1f)
677 return ((eax >> 26) + 1);
678 else
679 return 1;
680}
681
df0cc26b
AK
682static void srat_detect_node(void)
683{
684#ifdef CONFIG_NUMA
ddea7be0 685 unsigned node;
df0cc26b 686 int cpu = smp_processor_id();
e42f9437 687 int apicid = hard_smp_processor_id();
df0cc26b
AK
688
689 /* Don't do the funky fallback heuristics the AMD version employs
690 for now. */
e42f9437 691 node = apicid_to_node[apicid];
df0cc26b 692 if (node == NUMA_NO_NODE)
0d015324 693 node = first_node(node_online_map);
69d81fcd 694 numa_set_node(cpu, node);
df0cc26b 695
c31fbb1a 696 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
df0cc26b
AK
697#endif
698}
699
e6982c67 700static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
701{
702 /* Cache sizes */
703 unsigned n;
704
705 init_intel_cacheinfo(c);
0080e667
VP
706 if (c->cpuid_level > 9 ) {
707 unsigned eax = cpuid_eax(10);
708 /* Check for version and the number of counters */
709 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
710 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
711 }
712
36b2a8d5
SE
713 if (cpu_has_ds) {
714 unsigned int l1, l2;
715 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
ee58fad5
SE
716 if (!(l1 & (1<<11)))
717 set_bit(X86_FEATURE_BTS, c->x86_capability);
36b2a8d5
SE
718 if (!(l1 & (1<<12)))
719 set_bit(X86_FEATURE_PEBS, c->x86_capability);
720 }
721
ebfcaa96 722 n = c->extended_cpuid_level;
1da177e4
LT
723 if (n >= 0x80000008) {
724 unsigned eax = cpuid_eax(0x80000008);
725 c->x86_virt_bits = (eax >> 8) & 0xff;
726 c->x86_phys_bits = eax & 0xff;
af9c142d
SL
727 /* CPUID workaround for Intel 0F34 CPU */
728 if (c->x86_vendor == X86_VENDOR_INTEL &&
729 c->x86 == 0xF && c->x86_model == 0x3 &&
730 c->x86_mask == 0x4)
731 c->x86_phys_bits = 36;
1da177e4
LT
732 }
733
734 if (c->x86 == 15)
735 c->x86_cache_alignment = c->x86_clflush_size * 2;
39b3a791
AK
736 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
737 (c->x86 == 0x6 && c->x86_model >= 0x0e))
c29601e9 738 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
27fbe5b2
AK
739 if (c->x86 == 6)
740 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
f3d73707
AV
741 if (c->x86 == 15)
742 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
743 else
744 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
94605eff 745 c->x86_max_cores = intel_num_cpu_cores(c);
df0cc26b
AK
746
747 srat_detect_node();
1da177e4
LT
748}
749
672289e9 750static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1da177e4
LT
751{
752 char *v = c->x86_vendor_id;
753
754 if (!strcmp(v, "AuthenticAMD"))
755 c->x86_vendor = X86_VENDOR_AMD;
756 else if (!strcmp(v, "GenuineIntel"))
757 c->x86_vendor = X86_VENDOR_INTEL;
758 else
759 c->x86_vendor = X86_VENDOR_UNKNOWN;
760}
761
762struct cpu_model_info {
763 int vendor;
764 int family;
765 char *model_names[16];
766};
767
768/* Do some early cpuid on the boot CPU to get some parameter that are
769 needed before check_bugs. Everything advanced is in identify_cpu
770 below. */
e6982c67 771void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
772{
773 u32 tfms;
774
775 c->loops_per_jiffy = loops_per_jiffy;
776 c->x86_cache_size = -1;
777 c->x86_vendor = X86_VENDOR_UNKNOWN;
778 c->x86_model = c->x86_mask = 0; /* So far unknown... */
779 c->x86_vendor_id[0] = '\0'; /* Unset */
780 c->x86_model_id[0] = '\0'; /* Unset */
781 c->x86_clflush_size = 64;
782 c->x86_cache_alignment = c->x86_clflush_size;
94605eff 783 c->x86_max_cores = 1;
ebfcaa96 784 c->extended_cpuid_level = 0;
1da177e4
LT
785 memset(&c->x86_capability, 0, sizeof c->x86_capability);
786
787 /* Get vendor name */
788 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
789 (unsigned int *)&c->x86_vendor_id[0],
790 (unsigned int *)&c->x86_vendor_id[8],
791 (unsigned int *)&c->x86_vendor_id[4]);
792
793 get_cpu_vendor(c);
794
795 /* Initialize the standard set of capabilities */
796 /* Note that the vendor-specific code below might override */
797
798 /* Intel-defined flags: level 0x00000001 */
799 if (c->cpuid_level >= 0x00000001) {
800 __u32 misc;
801 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
802 &c->x86_capability[0]);
803 c->x86 = (tfms >> 8) & 0xf;
804 c->x86_model = (tfms >> 4) & 0xf;
805 c->x86_mask = tfms & 0xf;
f5f786d0 806 if (c->x86 == 0xf)
1da177e4 807 c->x86 += (tfms >> 20) & 0xff;
f5f786d0 808 if (c->x86 >= 0x6)
1da177e4 809 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1da177e4
LT
810 if (c->x86_capability[0] & (1<<19))
811 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1da177e4
LT
812 } else {
813 /* Have CPUID level 0 only - unheard of */
814 c->x86 = 4;
815 }
a158608b
AK
816
817#ifdef CONFIG_SMP
f3fa8ebc 818 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
a158608b 819#endif
1da177e4
LT
820}
821
822/*
823 * This does the hard work of actually picking apart the CPU stuff...
824 */
e6982c67 825void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1da177e4
LT
826{
827 int i;
828 u32 xlvl;
829
830 early_identify_cpu(c);
831
832 /* AMD-defined flags: level 0x80000001 */
833 xlvl = cpuid_eax(0x80000000);
ebfcaa96 834 c->extended_cpuid_level = xlvl;
1da177e4
LT
835 if ((xlvl & 0xffff0000) == 0x80000000) {
836 if (xlvl >= 0x80000001) {
837 c->x86_capability[1] = cpuid_edx(0x80000001);
5b7abc6f 838 c->x86_capability[6] = cpuid_ecx(0x80000001);
1da177e4
LT
839 }
840 if (xlvl >= 0x80000004)
841 get_model_name(c); /* Default name */
842 }
843
844 /* Transmeta-defined flags: level 0x80860001 */
845 xlvl = cpuid_eax(0x80860000);
846 if ((xlvl & 0xffff0000) == 0x80860000) {
847 /* Don't set x86_cpuid_level here for now to not confuse. */
848 if (xlvl >= 0x80860001)
849 c->x86_capability[2] = cpuid_edx(0x80860001);
850 }
851
1e9f28fa
SS
852 c->apicid = phys_pkg_id(0);
853
1da177e4
LT
854 /*
855 * Vendor-specific initialization. In this section we
856 * canonicalize the feature flags, meaning if there are
857 * features a certain CPU supports which CPUID doesn't
858 * tell us, CPUID claiming incorrect flags, or other bugs,
859 * we handle them here.
860 *
861 * At the end of this section, c->x86_capability better
862 * indicate the features this CPU genuinely supports!
863 */
864 switch (c->x86_vendor) {
865 case X86_VENDOR_AMD:
866 init_amd(c);
867 break;
868
869 case X86_VENDOR_INTEL:
870 init_intel(c);
871 break;
872
873 case X86_VENDOR_UNKNOWN:
874 default:
875 display_cacheinfo(c);
876 break;
877 }
878
879 select_idle_routine(c);
880 detect_ht(c);
1da177e4
LT
881
882 /*
883 * On SMP, boot_cpu_data holds the common feature set between
884 * all CPUs; so make sure that we indicate which features are
885 * common between the CPUs. The first time this routine gets
886 * executed, c == &boot_cpu_data.
887 */
888 if (c != &boot_cpu_data) {
889 /* AND the already accumulated flags with these */
890 for (i = 0 ; i < NCAPINTS ; i++)
891 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
892 }
893
894#ifdef CONFIG_X86_MCE
895 mcheck_init(c);
896#endif
3b520b23
SL
897 if (c == &boot_cpu_data)
898 mtrr_bp_init();
899 else
900 mtrr_ap_init();
1da177e4 901#ifdef CONFIG_NUMA
3019e8eb 902 numa_add_cpu(smp_processor_id());
1da177e4
LT
903#endif
904}
905
906
e6982c67 907void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1da177e4
LT
908{
909 if (c->x86_model_id[0])
910 printk("%s", c->x86_model_id);
911
912 if (c->x86_mask || c->cpuid_level >= 0)
913 printk(" stepping %02x\n", c->x86_mask);
914 else
915 printk("\n");
916}
917
918/*
919 * Get CPU information for use by the procfs.
920 */
921
922static int show_cpuinfo(struct seq_file *m, void *v)
923{
924 struct cpuinfo_x86 *c = v;
925
926 /*
927 * These flag bits must match the definitions in <asm/cpufeature.h>.
928 * NULL means this bit is undefined or reserved; either way it doesn't
929 * have meaning as far as Linux is concerned. Note that it's important
930 * to realize there is a difference between this table and CPUID -- if
931 * applications want to get the raw CPUID data, they should access
932 * /dev/cpu/<cpu_nr>/cpuid instead.
933 */
934 static char *x86_cap_flags[] = {
935 /* Intel-defined */
936 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
937 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
938 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
939 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
940
941 /* AMD-defined */
3c3b73b6 942 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
943 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
7b0e8501 945 NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
1da177e4
LT
946
947 /* Transmeta-defined */
948 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
949 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
950 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
951 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
952
953 /* Other (Linux-defined) */
622dcaf9 954 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
c29601e9 955 "constant_tsc", NULL, NULL,
d167a518 956 "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
957 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
959
960 /* Intel-defined (#2) */
9d95dd84 961 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
dcf10307
DJ
962 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
963 NULL, NULL, "dca", NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
964 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
965
5b7abc6f
PA
966 /* VIA/Cyrix/Centaur-defined */
967 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
968 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
969 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
970 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
971
1da177e4 972 /* AMD-defined (#2) */
3f98bc49 973 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1da177e4
LT
974 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
5b7abc6f 976 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1da177e4
LT
977 };
978 static char *x86_power_flags[] = {
979 "ts", /* temperature sensor */
980 "fid", /* frequency id control */
981 "vid", /* voltage id control */
982 "ttp", /* thermal trip */
983 "tm",
3f98bc49
AK
984 "stc",
985 NULL,
39b3a791 986 /* nothing */ /* constant_tsc - moved to flags */
1da177e4
LT
987 };
988
989
990#ifdef CONFIG_SMP
991 if (!cpu_online(c-cpu_data))
992 return 0;
993#endif
994
995 seq_printf(m,"processor\t: %u\n"
996 "vendor_id\t: %s\n"
997 "cpu family\t: %d\n"
998 "model\t\t: %d\n"
999 "model name\t: %s\n",
1000 (unsigned)(c-cpu_data),
1001 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1002 c->x86,
1003 (int)c->x86_model,
1004 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1005
1006 if (c->x86_mask || c->cpuid_level >= 0)
1007 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1008 else
1009 seq_printf(m, "stepping\t: unknown\n");
1010
1011 if (cpu_has(c,X86_FEATURE_TSC)) {
95235ca2
VP
1012 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1013 if (!freq)
1014 freq = cpu_khz;
1da177e4 1015 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
95235ca2 1016 freq / 1000, (freq % 1000));
1da177e4
LT
1017 }
1018
1019 /* Cache size */
1020 if (c->x86_cache_size >= 0)
1021 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1022
1023#ifdef CONFIG_SMP
94605eff 1024 if (smp_num_siblings * c->x86_max_cores > 1) {
db468681 1025 int cpu = c - cpu_data;
f3fa8ebc 1026 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
94605eff 1027 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
f3fa8ebc 1028 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
94605eff 1029 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
db468681 1030 }
1da177e4
LT
1031#endif
1032
1033 seq_printf(m,
1034 "fpu\t\t: yes\n"
1035 "fpu_exception\t: yes\n"
1036 "cpuid level\t: %d\n"
1037 "wp\t\t: yes\n"
1038 "flags\t\t:",
1039 c->cpuid_level);
1040
1041 {
1042 int i;
1043 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
3d1712c9 1044 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1da177e4
LT
1045 seq_printf(m, " %s", x86_cap_flags[i]);
1046 }
1047
1048 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1049 c->loops_per_jiffy/(500000/HZ),
1050 (c->loops_per_jiffy/(5000/HZ)) % 100);
1051
1052 if (c->x86_tlbsize > 0)
1053 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1054 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1055 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1056
1057 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1058 c->x86_phys_bits, c->x86_virt_bits);
1059
1060 seq_printf(m, "power management:");
1061 {
1062 unsigned i;
1063 for (i = 0; i < 32; i++)
1064 if (c->x86_power & (1 << i)) {
3f98bc49
AK
1065 if (i < ARRAY_SIZE(x86_power_flags) &&
1066 x86_power_flags[i])
1067 seq_printf(m, "%s%s",
1068 x86_power_flags[i][0]?" ":"",
1069 x86_power_flags[i]);
1da177e4
LT
1070 else
1071 seq_printf(m, " [%d]", i);
1072 }
1073 }
1da177e4 1074
d31ddaa1 1075 seq_printf(m, "\n\n");
1da177e4
LT
1076
1077 return 0;
1078}
1079
1080static void *c_start(struct seq_file *m, loff_t *pos)
1081{
1082 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1083}
1084
1085static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1086{
1087 ++*pos;
1088 return c_start(m, pos);
1089}
1090
1091static void c_stop(struct seq_file *m, void *v)
1092{
1093}
1094
1095struct seq_operations cpuinfo_op = {
1096 .start =c_start,
1097 .next = c_next,
1098 .stop = c_stop,
1099 .show = show_cpuinfo,
1100};
e9928674 1101
9c63f873 1102#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
160bd18e
MP
1103#include <linux/platform_device.h>
1104static __init int add_pcspkr(void)
1105{
1106 struct platform_device *pd;
1107 int ret;
1108
1109 pd = platform_device_alloc("pcspkr", -1);
1110 if (!pd)
1111 return -ENOMEM;
1112
1113 ret = platform_device_add(pd);
1114 if (ret)
1115 platform_device_put(pd);
1116
1117 return ret;
1118}
1119device_initcall(add_pcspkr);
1120#endif
This page took 0.28922 seconds and 5 git commands to generate.