2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/screen_info.h>
19 #include <linux/ioport.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/initrd.h>
23 #include <linux/highmem.h>
24 #include <linux/bootmem.h>
25 #include <linux/module.h>
26 #include <asm/processor.h>
27 #include <linux/console.h>
28 #include <linux/seq_file.h>
29 #include <linux/crash_dump.h>
30 #include <linux/root_dev.h>
31 #include <linux/pci.h>
32 #include <linux/efi.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/iscsi_ibft.h>
37 #include <linux/mmzone.h>
38 #include <linux/kexec.h>
39 #include <linux/cpufreq.h>
40 #include <linux/dmi.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/ctype.h>
43 #include <linux/uaccess.h>
44 #include <linux/init_ohci1394_dma.h>
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <asm/vsyscall.h>
54 #include <video/edid.h>
58 #include <asm/mpspec.h>
59 #include <asm/mmu_context.h>
60 #include <asm/proto.h>
61 #include <asm/setup.h>
63 #include <asm/sections.h>
65 #include <asm/cacheflush.h>
68 #include <asm/topology.h>
69 #include <asm/trampoline.h>
71 #include <mach_apic.h>
72 #ifdef CONFIG_PARAVIRT
73 #include <asm/paravirt.h>
82 struct cpuinfo_x86 boot_cpu_data __read_mostly
;
83 EXPORT_SYMBOL(boot_cpu_data
);
85 __u32 cleared_cpu_caps
[NCAPINTS
] __cpuinitdata
;
87 unsigned long mmu_cr4_features
;
89 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
92 unsigned long saved_video_mode
;
94 int force_mwait __cpuinitdata
;
100 char dmi_alloc_data
[DMI_MAX_DATA
];
105 struct screen_info screen_info
;
106 EXPORT_SYMBOL(screen_info
);
107 struct sys_desc_table_struct
{
108 unsigned short length
;
109 unsigned char table
[0];
112 struct edid_info edid_info
;
113 EXPORT_SYMBOL_GPL(edid_info
);
115 extern int root_mountflags
;
117 char __initdata command_line
[COMMAND_LINE_SIZE
];
119 static struct resource standard_io_resources
[] = {
120 { .name
= "dma1", .start
= 0x00, .end
= 0x1f,
121 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
122 { .name
= "pic1", .start
= 0x20, .end
= 0x21,
123 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
124 { .name
= "timer0", .start
= 0x40, .end
= 0x43,
125 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
126 { .name
= "timer1", .start
= 0x50, .end
= 0x53,
127 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
128 { .name
= "keyboard", .start
= 0x60, .end
= 0x6f,
129 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
130 { .name
= "dma page reg", .start
= 0x80, .end
= 0x8f,
131 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
132 { .name
= "pic2", .start
= 0xa0, .end
= 0xa1,
133 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
134 { .name
= "dma2", .start
= 0xc0, .end
= 0xdf,
135 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
},
136 { .name
= "fpu", .start
= 0xf0, .end
= 0xff,
137 .flags
= IORESOURCE_BUSY
| IORESOURCE_IO
}
140 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
142 static struct resource data_resource
= {
143 .name
= "Kernel data",
146 .flags
= IORESOURCE_RAM
,
148 static struct resource code_resource
= {
149 .name
= "Kernel code",
152 .flags
= IORESOURCE_RAM
,
154 static struct resource bss_resource
= {
155 .name
= "Kernel bss",
158 .flags
= IORESOURCE_RAM
,
161 static void __cpuinit
early_identify_cpu(struct cpuinfo_x86
*c
);
163 #ifdef CONFIG_PROC_VMCORE
164 /* elfcorehdr= specifies the location of elf core header
165 * stored by the crashed kernel. This option will be passed
166 * by kexec loader to the capture kernel.
168 static int __init
setup_elfcorehdr(char *arg
)
173 elfcorehdr_addr
= memparse(arg
, &end
);
174 return end
> arg
? 0 : -EINVAL
;
176 early_param("elfcorehdr", setup_elfcorehdr
);
181 contig_initmem_init(unsigned long start_pfn
, unsigned long end_pfn
)
183 unsigned long bootmap_size
, bootmap
;
185 bootmap_size
= bootmem_bootmap_pages(end_pfn
)<<PAGE_SHIFT
;
186 bootmap
= find_e820_area(0, end_pfn
<<PAGE_SHIFT
, bootmap_size
,
189 panic("Cannot find bootmem map of size %ld\n", bootmap_size
);
190 bootmap_size
= init_bootmem(bootmap
>> PAGE_SHIFT
, end_pfn
);
191 e820_register_active_regions(0, start_pfn
, end_pfn
);
192 free_bootmem_with_active_regions(0, end_pfn
);
193 early_res_to_bootmem(0, end_pfn
<<PAGE_SHIFT
);
194 reserve_bootmem(bootmap
, bootmap_size
, BOOTMEM_DEFAULT
);
198 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
200 #ifdef CONFIG_EDD_MODULE
204 * copy_edd() - Copy the BIOS EDD information
205 * from boot_params into a safe place.
208 static inline void copy_edd(void)
210 memcpy(edd
.mbr_signature
, boot_params
.edd_mbr_sig_buffer
,
211 sizeof(edd
.mbr_signature
));
212 memcpy(edd
.edd_info
, boot_params
.eddbuf
, sizeof(edd
.edd_info
));
213 edd
.mbr_signature_nr
= boot_params
.edd_mbr_sig_buf_entries
;
214 edd
.edd_info_nr
= boot_params
.eddbuf_entries
;
217 static inline void copy_edd(void)
223 static void __init
reserve_crashkernel(void)
225 unsigned long long total_mem
;
226 unsigned long long crash_size
, crash_base
;
229 total_mem
= ((unsigned long long)max_low_pfn
- min_low_pfn
) << PAGE_SHIFT
;
231 ret
= parse_crashkernel(boot_command_line
, total_mem
,
232 &crash_size
, &crash_base
);
233 if (ret
== 0 && crash_size
) {
234 if (crash_base
<= 0) {
235 printk(KERN_INFO
"crashkernel reservation failed - "
236 "you have to specify a base address\n");
240 if (reserve_bootmem(crash_base
, crash_size
,
241 BOOTMEM_EXCLUSIVE
) < 0) {
242 printk(KERN_INFO
"crashkernel reservation failed - "
243 "memory is in use\n");
247 printk(KERN_INFO
"Reserving %ldMB of memory at %ldMB "
248 "for crashkernel (System RAM: %ldMB)\n",
249 (unsigned long)(crash_size
>> 20),
250 (unsigned long)(crash_base
>> 20),
251 (unsigned long)(total_mem
>> 20));
252 crashk_res
.start
= crash_base
;
253 crashk_res
.end
= crash_base
+ crash_size
- 1;
254 insert_resource(&iomem_resource
, &crashk_res
);
258 static inline void __init
reserve_crashkernel(void)
262 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
263 void __attribute__((weak
)) __init
memory_setup(void)
265 machine_specific_memory_setup();
268 static void __init
parse_setup_data(void)
270 struct setup_data
*data
;
271 unsigned long pa_data
;
273 if (boot_params
.hdr
.version
< 0x0209)
275 pa_data
= boot_params
.hdr
.setup_data
;
277 data
= early_ioremap(pa_data
, PAGE_SIZE
);
278 switch (data
->type
) {
282 #ifndef CONFIG_DEBUG_BOOT_PARAMS
283 free_early(pa_data
, pa_data
+sizeof(*data
)+data
->len
);
285 pa_data
= data
->next
;
286 early_iounmap(data
, PAGE_SIZE
);
291 * setup_arch - architecture-specific boot-time initializations
293 * Note: On x86_64, fixmaps are ready for use even before this is called.
295 void __init
setup_arch(char **cmdline_p
)
299 printk(KERN_INFO
"Command line: %s\n", boot_command_line
);
301 ROOT_DEV
= old_decode_dev(boot_params
.hdr
.root_dev
);
302 screen_info
= boot_params
.screen_info
;
303 edid_info
= boot_params
.edid_info
;
304 saved_video_mode
= boot_params
.hdr
.vid_mode
;
305 bootloader_type
= boot_params
.hdr
.type_of_loader
;
307 #ifdef CONFIG_BLK_DEV_RAM
308 rd_image_start
= boot_params
.hdr
.ram_size
& RAMDISK_IMAGE_START_MASK
;
309 rd_prompt
= ((boot_params
.hdr
.ram_size
& RAMDISK_PROMPT_FLAG
) != 0);
310 rd_doload
= ((boot_params
.hdr
.ram_size
& RAMDISK_LOAD_FLAG
) != 0);
313 if (!strncmp((char *)&boot_params
.efi_info
.efi_loader_signature
,
323 if (!boot_params
.hdr
.root_flags
)
324 root_mountflags
&= ~MS_RDONLY
;
325 init_mm
.start_code
= (unsigned long) &_text
;
326 init_mm
.end_code
= (unsigned long) &_etext
;
327 init_mm
.end_data
= (unsigned long) &_edata
;
328 init_mm
.brk
= (unsigned long) &_end
;
330 code_resource
.start
= virt_to_phys(&_text
);
331 code_resource
.end
= virt_to_phys(&_etext
)-1;
332 data_resource
.start
= virt_to_phys(&_etext
);
333 data_resource
.end
= virt_to_phys(&_edata
)-1;
334 bss_resource
.start
= virt_to_phys(&__bss_start
);
335 bss_resource
.end
= virt_to_phys(&__bss_stop
)-1;
337 early_identify_cpu(&boot_cpu_data
);
339 strlcpy(command_line
, boot_command_line
, COMMAND_LINE_SIZE
);
340 *cmdline_p
= command_line
;
346 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
347 if (init_ohci1394_dma_early
)
348 init_ohci1394_dma_on_all_controllers();
351 finish_e820_parsing();
353 /* after parse_early_param, so could debug it */
354 insert_resource(&iomem_resource
, &code_resource
);
355 insert_resource(&iomem_resource
, &data_resource
);
356 insert_resource(&iomem_resource
, &bss_resource
);
358 early_gart_iommu_check();
360 e820_register_active_regions(0, 0, -1UL);
362 * partially used pages are not usable - thus
363 * we are rounding upwards:
365 end_pfn
= e820_end_of_ram();
366 /* update e820 for memory not covered by WB MTRRs */
368 if (mtrr_trim_uncached_memory(end_pfn
)) {
369 e820_register_active_regions(0, 0, -1UL);
370 end_pfn
= e820_end_of_ram();
373 num_physpages
= end_pfn
;
377 max_pfn_mapped
= init_memory_mapping(0, (max_pfn_mapped
<< PAGE_SHIFT
));
388 /* setup to use the early static init tables during kernel startup */
389 x86_cpu_to_apicid_early_ptr
= (void *)x86_cpu_to_apicid_init
;
390 x86_bios_cpu_apicid_early_ptr
= (void *)x86_bios_cpu_apicid_init
;
392 x86_cpu_to_node_map_early_ptr
= (void *)x86_cpu_to_node_map_init
;
398 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
399 * Call this early for SRAT node setup.
401 acpi_boot_table_init();
404 /* How many end-of-memory variables you have, grandma! */
405 max_low_pfn
= end_pfn
;
407 high_memory
= (void *)__va(end_pfn
* PAGE_SIZE
- 1) + 1;
409 /* Remove active ranges so rediscovery with NUMA-awareness happens */
410 remove_all_active_ranges();
412 #ifdef CONFIG_ACPI_NUMA
414 * Parse SRAT to discover nodes.
420 numa_initmem_init(0, end_pfn
);
422 contig_initmem_init(0, end_pfn
);
425 dma32_reserve_bootmem();
427 #ifdef CONFIG_ACPI_SLEEP
429 * Reserve low memory region for sleep support.
431 acpi_reserve_bootmem();
435 efi_reserve_bootmem();
438 * Find and reserve possible boot-time SMP configuration:
441 #ifdef CONFIG_BLK_DEV_INITRD
442 if (boot_params
.hdr
.type_of_loader
&& boot_params
.hdr
.ramdisk_image
) {
443 unsigned long ramdisk_image
= boot_params
.hdr
.ramdisk_image
;
444 unsigned long ramdisk_size
= boot_params
.hdr
.ramdisk_size
;
445 unsigned long ramdisk_end
= ramdisk_image
+ ramdisk_size
;
446 unsigned long end_of_mem
= end_pfn
<< PAGE_SHIFT
;
448 if (ramdisk_end
<= end_of_mem
) {
450 * don't need to reserve again, already reserved early
451 * in x86_64_start_kernel, and early_res_to_bootmem
452 * convert that to reserved in bootmem
454 initrd_start
= ramdisk_image
+ PAGE_OFFSET
;
455 initrd_end
= initrd_start
+ramdisk_size
;
457 free_bootmem(ramdisk_image
, ramdisk_size
);
458 printk(KERN_ERR
"initrd extends beyond end of memory "
459 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
460 ramdisk_end
, end_of_mem
);
465 reserve_crashkernel();
467 reserve_ibft_region();
476 * Read APIC and some other early information from ACPI tables.
484 * get boot-time SMP configuration:
486 if (smp_found_config
)
488 init_apic_mappings();
489 ioapic_init_mappings();
492 * We trust e820 completely. No explicit ROM probing in memory.
494 e820_reserve_resources();
495 e820_mark_nosave_regions();
497 /* request I/O space for devices used on all i[345]86 PCs */
498 for (i
= 0; i
< ARRAY_SIZE(standard_io_resources
); i
++)
499 request_resource(&ioport_resource
, &standard_io_resources
[i
]);
504 #if defined(CONFIG_VGA_CONSOLE)
505 if (!efi_enabled
|| (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY
))
506 conswitchp
= &vga_con
;
507 #elif defined(CONFIG_DUMMY_CONSOLE)
508 conswitchp
= &dummy_con
;
513 static int __cpuinit
get_model_name(struct cpuinfo_x86
*c
)
517 if (c
->extended_cpuid_level
< 0x80000004)
520 v
= (unsigned int *) c
->x86_model_id
;
521 cpuid(0x80000002, &v
[0], &v
[1], &v
[2], &v
[3]);
522 cpuid(0x80000003, &v
[4], &v
[5], &v
[6], &v
[7]);
523 cpuid(0x80000004, &v
[8], &v
[9], &v
[10], &v
[11]);
524 c
->x86_model_id
[48] = 0;
529 static void __cpuinit
display_cacheinfo(struct cpuinfo_x86
*c
)
531 unsigned int n
, dummy
, eax
, ebx
, ecx
, edx
;
533 n
= c
->extended_cpuid_level
;
535 if (n
>= 0x80000005) {
536 cpuid(0x80000005, &dummy
, &ebx
, &ecx
, &edx
);
537 printk(KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), "
538 "D cache %dK (%d bytes/line)\n",
539 edx
>>24, edx
&0xFF, ecx
>>24, ecx
&0xFF);
540 c
->x86_cache_size
= (ecx
>>24) + (edx
>>24);
541 /* On K8 L1 TLB is inclusive, so don't count it */
545 if (n
>= 0x80000006) {
546 cpuid(0x80000006, &dummy
, &ebx
, &ecx
, &edx
);
547 ecx
= cpuid_ecx(0x80000006);
548 c
->x86_cache_size
= ecx
>> 16;
549 c
->x86_tlbsize
+= ((ebx
>> 16) & 0xfff) + (ebx
& 0xfff);
551 printk(KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)\n",
552 c
->x86_cache_size
, ecx
& 0xFF);
554 if (n
>= 0x80000008) {
555 cpuid(0x80000008, &eax
, &dummy
, &dummy
, &dummy
);
556 c
->x86_virt_bits
= (eax
>> 8) & 0xff;
557 c
->x86_phys_bits
= eax
& 0xff;
562 static int __cpuinit
nearby_node(int apicid
)
566 for (i
= apicid
- 1; i
>= 0; i
--) {
567 node
= apicid_to_node
[i
];
568 if (node
!= NUMA_NO_NODE
&& node_online(node
))
571 for (i
= apicid
+ 1; i
< MAX_LOCAL_APIC
; i
++) {
572 node
= apicid_to_node
[i
];
573 if (node
!= NUMA_NO_NODE
&& node_online(node
))
576 return first_node(node_online_map
); /* Shouldn't happen */
581 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
582 * Assumes number of cores is a power of two.
584 static void __cpuinit
amd_detect_cmp(struct cpuinfo_x86
*c
)
589 int cpu
= smp_processor_id();
591 unsigned apicid
= hard_smp_processor_id();
593 bits
= c
->x86_coreid_bits
;
595 /* Low order bits define the core id (index of core in socket) */
596 c
->cpu_core_id
= c
->initial_apicid
& ((1 << bits
)-1);
597 /* Convert the initial APIC ID into the socket ID */
598 c
->phys_proc_id
= c
->initial_apicid
>> bits
;
601 node
= c
->phys_proc_id
;
602 if (apicid_to_node
[apicid
] != NUMA_NO_NODE
)
603 node
= apicid_to_node
[apicid
];
604 if (!node_online(node
)) {
605 /* Two possibilities here:
606 - The CPU is missing memory and no node was created.
607 In that case try picking one from a nearby CPU
608 - The APIC IDs differ from the HyperTransport node IDs
609 which the K8 northbridge parsing fills in.
610 Assume they are all increased by a constant offset,
611 but in the same order as the HT nodeids.
612 If that doesn't result in a usable node fall back to the
613 path for the previous case. */
615 int ht_nodeid
= c
->initial_apicid
;
617 if (ht_nodeid
>= 0 &&
618 apicid_to_node
[ht_nodeid
] != NUMA_NO_NODE
)
619 node
= apicid_to_node
[ht_nodeid
];
620 /* Pick a nearby node */
621 if (!node_online(node
))
622 node
= nearby_node(apicid
);
624 numa_set_node(cpu
, node
);
626 printk(KERN_INFO
"CPU %d/%x -> Node %d\n", cpu
, apicid
, node
);
631 static void __cpuinit
early_init_amd_mc(struct cpuinfo_x86
*c
)
636 /* Multi core CPU? */
637 if (c
->extended_cpuid_level
< 0x80000008)
640 ecx
= cpuid_ecx(0x80000008);
642 c
->x86_max_cores
= (ecx
& 0xff) + 1;
644 /* CPU telling us the core id bits shift? */
645 bits
= (ecx
>> 12) & 0xF;
647 /* Otherwise recompute */
649 while ((1 << bits
) < c
->x86_max_cores
)
653 c
->x86_coreid_bits
= bits
;
658 #define ENABLE_C1E_MASK 0x18000000
659 #define CPUID_PROCESSOR_SIGNATURE 1
660 #define CPUID_XFAM 0x0ff00000
661 #define CPUID_XFAM_K8 0x00000000
662 #define CPUID_XFAM_10H 0x00100000
663 #define CPUID_XFAM_11H 0x00200000
664 #define CPUID_XMOD 0x000f0000
665 #define CPUID_XMOD_REV_F 0x00040000
667 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
668 static __cpuinit
int amd_apic_timer_broken(void)
670 u32 lo
, hi
, eax
= cpuid_eax(CPUID_PROCESSOR_SIGNATURE
);
672 switch (eax
& CPUID_XFAM
) {
674 if ((eax
& CPUID_XMOD
) < CPUID_XMOD_REV_F
)
678 rdmsr(MSR_K8_ENABLE_C1E
, lo
, hi
);
679 if (lo
& ENABLE_C1E_MASK
)
683 /* err on the side of caution */
689 static void __cpuinit
early_init_amd(struct cpuinfo_x86
*c
)
691 early_init_amd_mc(c
);
693 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
694 if (c
->x86_power
& (1<<8))
695 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
698 static void __cpuinit
init_amd(struct cpuinfo_x86
*c
)
706 * Disable TLB flush filter by setting HWCR.FFDIS on K8
707 * bit 6 of msr C001_0015
709 * Errata 63 for SH-B3 steppings
710 * Errata 122 for all steppings (F+ have it disabled by default)
713 rdmsrl(MSR_K8_HWCR
, value
);
715 wrmsrl(MSR_K8_HWCR
, value
);
719 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
720 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
721 clear_cpu_cap(c
, 0*32+31);
723 /* On C+ stepping K8 rep microcode works well for copy/memset */
724 level
= cpuid_eax(1);
725 if (c
->x86
== 15 && ((level
>= 0x0f48 && level
< 0x0f50) ||
727 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
728 if (c
->x86
== 0x10 || c
->x86
== 0x11)
729 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
731 /* Enable workaround for FXSAVE leak */
733 set_cpu_cap(c
, X86_FEATURE_FXSAVE_LEAK
);
735 level
= get_model_name(c
);
739 /* Should distinguish Models here, but this is only
740 a fallback anyways. */
741 strcpy(c
->x86_model_id
, "Hammer");
745 display_cacheinfo(c
);
747 /* Multi core CPU? */
748 if (c
->extended_cpuid_level
>= 0x80000008)
751 if (c
->extended_cpuid_level
>= 0x80000006 &&
752 (cpuid_edx(0x80000006) & 0xf000))
753 num_cache_leaves
= 4;
755 num_cache_leaves
= 3;
757 if (c
->x86
== 0xf || c
->x86
== 0x10 || c
->x86
== 0x11)
758 set_cpu_cap(c
, X86_FEATURE_K8
);
760 /* MFENCE stops RDTSC speculation */
761 set_cpu_cap(c
, X86_FEATURE_MFENCE_RDTSC
);
763 if (amd_apic_timer_broken())
764 disable_apic_timer
= 1;
766 if (c
== &boot_cpu_data
&& c
->x86
>= 0xf && c
->x86
<= 0x11) {
767 unsigned long long tseg
;
770 * Split up direct mapping around the TSEG SMM area.
771 * Don't do it for gbpages because there seems very little
772 * benefit in doing so.
774 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR
, &tseg
) &&
775 (tseg
>> PMD_SHIFT
) < (max_pfn_mapped
>> (PMD_SHIFT
-PAGE_SHIFT
)))
776 set_memory_4k((unsigned long)__va(tseg
), 1);
780 void __cpuinit
detect_ht(struct cpuinfo_x86
*c
)
783 u32 eax
, ebx
, ecx
, edx
;
784 int index_msb
, core_bits
;
786 cpuid(1, &eax
, &ebx
, &ecx
, &edx
);
789 if (!cpu_has(c
, X86_FEATURE_HT
))
791 if (cpu_has(c
, X86_FEATURE_CMP_LEGACY
))
794 smp_num_siblings
= (ebx
& 0xff0000) >> 16;
796 if (smp_num_siblings
== 1) {
797 printk(KERN_INFO
"CPU: Hyper-Threading is disabled\n");
798 } else if (smp_num_siblings
> 1) {
800 if (smp_num_siblings
> NR_CPUS
) {
801 printk(KERN_WARNING
"CPU: Unsupported number of "
802 "siblings %d", smp_num_siblings
);
803 smp_num_siblings
= 1;
807 index_msb
= get_count_order(smp_num_siblings
);
808 c
->phys_proc_id
= phys_pkg_id(index_msb
);
810 smp_num_siblings
= smp_num_siblings
/ c
->x86_max_cores
;
812 index_msb
= get_count_order(smp_num_siblings
);
814 core_bits
= get_count_order(c
->x86_max_cores
);
816 c
->cpu_core_id
= phys_pkg_id(index_msb
) &
817 ((1 << core_bits
) - 1);
820 if ((c
->x86_max_cores
* smp_num_siblings
) > 1) {
821 printk(KERN_INFO
"CPU: Physical Processor ID: %d\n",
823 printk(KERN_INFO
"CPU: Processor Core ID: %d\n",
831 * find out the number of processor cores on the die
833 static int __cpuinit
intel_num_cpu_cores(struct cpuinfo_x86
*c
)
837 if (c
->cpuid_level
< 4)
840 cpuid_count(4, 0, &eax
, &t
, &t
, &t
);
843 return ((eax
>> 26) + 1);
848 static void __cpuinit
srat_detect_node(void)
852 int cpu
= smp_processor_id();
853 int apicid
= hard_smp_processor_id();
855 /* Don't do the funky fallback heuristics the AMD version employs
857 node
= apicid_to_node
[apicid
];
858 if (node
== NUMA_NO_NODE
|| !node_online(node
))
859 node
= first_node(node_online_map
);
860 numa_set_node(cpu
, node
);
862 printk(KERN_INFO
"CPU %d/%x -> Node %d\n", cpu
, apicid
, node
);
866 static void __cpuinit
early_init_intel(struct cpuinfo_x86
*c
)
868 if ((c
->x86
== 0xf && c
->x86_model
>= 0x03) ||
869 (c
->x86
== 0x6 && c
->x86_model
>= 0x0e))
870 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
873 static void __cpuinit
init_intel(struct cpuinfo_x86
*c
)
878 init_intel_cacheinfo(c
);
879 if (c
->cpuid_level
> 9) {
880 unsigned eax
= cpuid_eax(10);
881 /* Check for version and the number of counters */
882 if ((eax
& 0xff) && (((eax
>>8) & 0xff) > 1))
883 set_cpu_cap(c
, X86_FEATURE_ARCH_PERFMON
);
888 rdmsr(MSR_IA32_MISC_ENABLE
, l1
, l2
);
890 set_cpu_cap(c
, X86_FEATURE_BTS
);
892 set_cpu_cap(c
, X86_FEATURE_PEBS
);
899 n
= c
->extended_cpuid_level
;
900 if (n
>= 0x80000008) {
901 unsigned eax
= cpuid_eax(0x80000008);
902 c
->x86_virt_bits
= (eax
>> 8) & 0xff;
903 c
->x86_phys_bits
= eax
& 0xff;
904 /* CPUID workaround for Intel 0F34 CPU */
905 if (c
->x86_vendor
== X86_VENDOR_INTEL
&&
906 c
->x86
== 0xF && c
->x86_model
== 0x3 &&
908 c
->x86_phys_bits
= 36;
912 c
->x86_cache_alignment
= c
->x86_clflush_size
* 2;
914 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
915 set_cpu_cap(c
, X86_FEATURE_LFENCE_RDTSC
);
916 c
->x86_max_cores
= intel_num_cpu_cores(c
);
921 static void __cpuinit
early_init_centaur(struct cpuinfo_x86
*c
)
923 if (c
->x86
== 0x6 && c
->x86_model
>= 0xf)
924 set_bit(X86_FEATURE_CONSTANT_TSC
, &c
->x86_capability
);
927 static void __cpuinit
init_centaur(struct cpuinfo_x86
*c
)
932 n
= c
->extended_cpuid_level
;
933 if (n
>= 0x80000008) {
934 unsigned eax
= cpuid_eax(0x80000008);
935 c
->x86_virt_bits
= (eax
>> 8) & 0xff;
936 c
->x86_phys_bits
= eax
& 0xff;
939 if (c
->x86
== 0x6 && c
->x86_model
>= 0xf) {
940 c
->x86_cache_alignment
= c
->x86_clflush_size
* 2;
941 set_cpu_cap(c
, X86_FEATURE_CONSTANT_TSC
);
942 set_cpu_cap(c
, X86_FEATURE_REP_GOOD
);
944 set_cpu_cap(c
, X86_FEATURE_LFENCE_RDTSC
);
947 static void __cpuinit
get_cpu_vendor(struct cpuinfo_x86
*c
)
949 char *v
= c
->x86_vendor_id
;
951 if (!strcmp(v
, "AuthenticAMD"))
952 c
->x86_vendor
= X86_VENDOR_AMD
;
953 else if (!strcmp(v
, "GenuineIntel"))
954 c
->x86_vendor
= X86_VENDOR_INTEL
;
955 else if (!strcmp(v
, "CentaurHauls"))
956 c
->x86_vendor
= X86_VENDOR_CENTAUR
;
958 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
961 /* Do some early cpuid on the boot CPU to get some parameter that are
962 needed before check_bugs. Everything advanced is in identify_cpu
964 static void __cpuinit
early_identify_cpu(struct cpuinfo_x86
*c
)
968 c
->loops_per_jiffy
= loops_per_jiffy
;
969 c
->x86_cache_size
= -1;
970 c
->x86_vendor
= X86_VENDOR_UNKNOWN
;
971 c
->x86_model
= c
->x86_mask
= 0; /* So far unknown... */
972 c
->x86_vendor_id
[0] = '\0'; /* Unset */
973 c
->x86_model_id
[0] = '\0'; /* Unset */
974 c
->x86_clflush_size
= 64;
975 c
->x86_cache_alignment
= c
->x86_clflush_size
;
976 c
->x86_max_cores
= 1;
977 c
->x86_coreid_bits
= 0;
978 c
->extended_cpuid_level
= 0;
979 memset(&c
->x86_capability
, 0, sizeof c
->x86_capability
);
981 /* Get vendor name */
982 cpuid(0x00000000, (unsigned int *)&c
->cpuid_level
,
983 (unsigned int *)&c
->x86_vendor_id
[0],
984 (unsigned int *)&c
->x86_vendor_id
[8],
985 (unsigned int *)&c
->x86_vendor_id
[4]);
989 /* Initialize the standard set of capabilities */
990 /* Note that the vendor-specific code below might override */
992 /* Intel-defined flags: level 0x00000001 */
993 if (c
->cpuid_level
>= 0x00000001) {
995 cpuid(0x00000001, &tfms
, &misc
, &c
->x86_capability
[4],
996 &c
->x86_capability
[0]);
997 c
->x86
= (tfms
>> 8) & 0xf;
998 c
->x86_model
= (tfms
>> 4) & 0xf;
999 c
->x86_mask
= tfms
& 0xf;
1001 c
->x86
+= (tfms
>> 20) & 0xff;
1003 c
->x86_model
+= ((tfms
>> 16) & 0xF) << 4;
1004 if (test_cpu_cap(c
, X86_FEATURE_CLFLSH
))
1005 c
->x86_clflush_size
= ((misc
>> 8) & 0xff) * 8;
1007 /* Have CPUID level 0 only - unheard of */
1011 c
->initial_apicid
= (cpuid_ebx(1) >> 24) & 0xff;
1013 c
->phys_proc_id
= c
->initial_apicid
;
1015 /* AMD-defined flags: level 0x80000001 */
1016 xlvl
= cpuid_eax(0x80000000);
1017 c
->extended_cpuid_level
= xlvl
;
1018 if ((xlvl
& 0xffff0000) == 0x80000000) {
1019 if (xlvl
>= 0x80000001) {
1020 c
->x86_capability
[1] = cpuid_edx(0x80000001);
1021 c
->x86_capability
[6] = cpuid_ecx(0x80000001);
1023 if (xlvl
>= 0x80000004)
1024 get_model_name(c
); /* Default name */
1027 /* Transmeta-defined flags: level 0x80860001 */
1028 xlvl
= cpuid_eax(0x80860000);
1029 if ((xlvl
& 0xffff0000) == 0x80860000) {
1030 /* Don't set x86_cpuid_level here for now to not confuse. */
1031 if (xlvl
>= 0x80860001)
1032 c
->x86_capability
[2] = cpuid_edx(0x80860001);
1035 c
->extended_cpuid_level
= cpuid_eax(0x80000000);
1036 if (c
->extended_cpuid_level
>= 0x80000007)
1037 c
->x86_power
= cpuid_edx(0x80000007);
1040 clear_cpu_cap(c
, X86_FEATURE_PAT
);
1042 switch (c
->x86_vendor
) {
1043 case X86_VENDOR_AMD
:
1045 if (c
->x86
>= 0xf && c
->x86
<= 0x11)
1046 set_cpu_cap(c
, X86_FEATURE_PAT
);
1048 case X86_VENDOR_INTEL
:
1049 early_init_intel(c
);
1050 if (c
->x86
== 0xF || (c
->x86
== 6 && c
->x86_model
>= 15))
1051 set_cpu_cap(c
, X86_FEATURE_PAT
);
1053 case X86_VENDOR_CENTAUR
:
1054 early_init_centaur(c
);
1061 * This does the hard work of actually picking apart the CPU stuff...
1063 void __cpuinit
identify_cpu(struct cpuinfo_x86
*c
)
1067 early_identify_cpu(c
);
1069 init_scattered_cpuid_features(c
);
1071 c
->apicid
= phys_pkg_id(0);
1074 * Vendor-specific initialization. In this section we
1075 * canonicalize the feature flags, meaning if there are
1076 * features a certain CPU supports which CPUID doesn't
1077 * tell us, CPUID claiming incorrect flags, or other bugs,
1078 * we handle them here.
1080 * At the end of this section, c->x86_capability better
1081 * indicate the features this CPU genuinely supports!
1083 switch (c
->x86_vendor
) {
1084 case X86_VENDOR_AMD
:
1088 case X86_VENDOR_INTEL
:
1092 case X86_VENDOR_CENTAUR
:
1096 case X86_VENDOR_UNKNOWN
:
1098 display_cacheinfo(c
);
1105 * On SMP, boot_cpu_data holds the common feature set between
1106 * all CPUs; so make sure that we indicate which features are
1107 * common between the CPUs. The first time this routine gets
1108 * executed, c == &boot_cpu_data.
1110 if (c
!= &boot_cpu_data
) {
1111 /* AND the already accumulated flags with these */
1112 for (i
= 0; i
< NCAPINTS
; i
++)
1113 boot_cpu_data
.x86_capability
[i
] &= c
->x86_capability
[i
];
1116 /* Clear all flags overriden by options */
1117 for (i
= 0; i
< NCAPINTS
; i
++)
1118 c
->x86_capability
[i
] &= ~cleared_cpu_caps
[i
];
1120 #ifdef CONFIG_X86_MCE
1123 select_idle_routine(c
);
1126 numa_add_cpu(smp_processor_id());
1131 void __cpuinit
identify_boot_cpu(void)
1133 identify_cpu(&boot_cpu_data
);
1136 void __cpuinit
identify_secondary_cpu(struct cpuinfo_x86
*c
)
1138 BUG_ON(c
== &boot_cpu_data
);
1143 static __init
int setup_noclflush(char *arg
)
1145 setup_clear_cpu_cap(X86_FEATURE_CLFLSH
);
1148 __setup("noclflush", setup_noclflush
);
1150 void __cpuinit
print_cpu_info(struct cpuinfo_x86
*c
)
1152 if (c
->x86_model_id
[0])
1153 printk(KERN_CONT
"%s", c
->x86_model_id
);
1155 if (c
->x86_mask
|| c
->cpuid_level
>= 0)
1156 printk(KERN_CONT
" stepping %02x\n", c
->x86_mask
);
1158 printk(KERN_CONT
"\n");
1161 static __init
int setup_disablecpuid(char *arg
)
1164 if (get_option(&arg
, &bit
) && bit
< NCAPINTS
*32)
1165 setup_clear_cpu_cap(bit
);
1170 __setup("clearcpuid=", setup_disablecpuid
);