4b4ec3d7991024c073a90c0e492bffdc0dbbc473
[deliverable/linux.git] / include / asm-x86 / processor_64.h
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 */
4
5 #ifndef __ASM_X86_64_PROCESSOR_H
6 #define __ASM_X86_64_PROCESSOR_H
7
8 #include <asm/segment.h>
9 #include <asm/page.h>
10 #include <asm/types.h>
11 #include <asm/sigcontext.h>
12 #include <asm/cpufeature.h>
13 #include <linux/threads.h>
14 #include <asm/msr.h>
15 #include <asm/current.h>
16 #include <asm/system.h>
17 #include <asm/mmsegment.h>
18 #include <asm/percpu.h>
19 #include <linux/personality.h>
20 #include <linux/cpumask.h>
21 #include <asm/desc_defs.h>
22
23 /*
24 * CPU type and hardware bug flags. Kept separately for each CPU.
25 */
26
27 struct cpuinfo_x86 {
28 __u8 x86; /* CPU family */
29 __u8 x86_vendor; /* CPU vendor */
30 __u8 x86_model;
31 __u8 x86_mask;
32 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
33 __u32 x86_capability[NCAPINTS];
34 char x86_vendor_id[16];
35 char x86_model_id[64];
36 int x86_cache_size; /* in KB */
37 int x86_clflush_size;
38 int x86_cache_alignment;
39 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
40 __u8 x86_virt_bits, x86_phys_bits;
41 __u8 x86_max_cores; /* cpuid returned max cores value */
42 __u8 x86_coreid_bits; /* cpuid returned core id bits */
43 __u32 x86_power;
44 __u32 extended_cpuid_level; /* Max extended CPUID function supported */
45 unsigned long loops_per_jiffy;
46 #ifdef CONFIG_SMP
47 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
48 #endif
49 __u8 apicid;
50 #ifdef CONFIG_SMP
51 __u8 booted_cores; /* number of cores as seen by OS */
52 __u8 phys_proc_id; /* Physical Processor id. */
53 __u8 cpu_core_id; /* Core id. */
54 __u8 cpu_index; /* index into per_cpu list */
55 #endif
56 } ____cacheline_aligned;
57
58 #define X86_VENDOR_INTEL 0
59 #define X86_VENDOR_CYRIX 1
60 #define X86_VENDOR_AMD 2
61 #define X86_VENDOR_UMC 3
62 #define X86_VENDOR_NEXGEN 4
63 #define X86_VENDOR_CENTAUR 5
64 #define X86_VENDOR_TRANSMETA 7
65 #define X86_VENDOR_NUM 8
66 #define X86_VENDOR_UNKNOWN 0xff
67
68 #ifdef CONFIG_SMP
69 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
70 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
71 #define current_cpu_data cpu_data(smp_processor_id())
72 #else
73 #define cpu_data(cpu) boot_cpu_data
74 #define current_cpu_data boot_cpu_data
75 #endif
76
77 extern char ignore_irq13;
78
79 extern void identify_cpu(struct cpuinfo_x86 *);
80
81 /*
82 * User space process size. 47bits minus one guard page.
83 */
84 #define TASK_SIZE64 (0x800000000000UL - 4096)
85
86 /* This decides where the kernel will search for a free chunk of vm
87 * space during mmap's.
88 */
89 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
90
91 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
92 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
93
94
95 struct i387_fxsave_struct {
96 u16 cwd;
97 u16 swd;
98 u16 twd;
99 u16 fop;
100 u64 rip;
101 u64 rdp;
102 u32 mxcsr;
103 u32 mxcsr_mask;
104 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
105 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
106 u32 padding[24];
107 } __attribute__ ((aligned (16)));
108
109 union i387_union {
110 struct i387_fxsave_struct fxsave;
111 };
112
113 extern struct cpuinfo_x86 boot_cpu_data;
114 /* Save the original ist values for checking stack pointers during debugging */
115 struct orig_ist {
116 unsigned long ist[7];
117 };
118 DECLARE_PER_CPU(struct orig_ist, orig_ist);
119
120 #ifdef CONFIG_X86_VSMP
121 #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
122 #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
123 #else
124 #define ARCH_MIN_TASKALIGN 16
125 #define ARCH_MIN_MMSTRUCT_ALIGN 0
126 #endif
127
128 #define INIT_THREAD { \
129 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
130 }
131
132 #define INIT_TSS { \
133 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
134 }
135
136 #define INIT_MMAP \
137 { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
138
139 #define start_thread(regs,new_rip,new_rsp) do { \
140 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
141 load_gs_index(0); \
142 (regs)->ip = (new_rip); \
143 (regs)->sp = (new_rsp); \
144 write_pda(oldrsp, (new_rsp)); \
145 (regs)->cs = __USER_CS; \
146 (regs)->ss = __USER_DS; \
147 (regs)->flags = 0x200; \
148 set_fs(USER_DS); \
149 } while(0)
150
151 /*
152 * Return saved PC of a blocked thread.
153 * What is this good for? it will be always the scheduler or ret_from_fork.
154 */
155 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
156
157 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
158 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
159
160
161 #if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2)
162 #define ASM_NOP1 P6_NOP1
163 #define ASM_NOP2 P6_NOP2
164 #define ASM_NOP3 P6_NOP3
165 #define ASM_NOP4 P6_NOP4
166 #define ASM_NOP5 P6_NOP5
167 #define ASM_NOP6 P6_NOP6
168 #define ASM_NOP7 P6_NOP7
169 #define ASM_NOP8 P6_NOP8
170 #else
171 #define ASM_NOP1 K8_NOP1
172 #define ASM_NOP2 K8_NOP2
173 #define ASM_NOP3 K8_NOP3
174 #define ASM_NOP4 K8_NOP4
175 #define ASM_NOP5 K8_NOP5
176 #define ASM_NOP6 K8_NOP6
177 #define ASM_NOP7 K8_NOP7
178 #define ASM_NOP8 K8_NOP8
179 #endif
180
181 /* Opteron nops */
182 #define K8_NOP1 ".byte 0x90\n"
183 #define K8_NOP2 ".byte 0x66,0x90\n"
184 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
185 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
186 #define K8_NOP5 K8_NOP3 K8_NOP2
187 #define K8_NOP6 K8_NOP3 K8_NOP3
188 #define K8_NOP7 K8_NOP4 K8_NOP3
189 #define K8_NOP8 K8_NOP4 K8_NOP4
190
191 /* P6 nops */
192 /* uses eax dependencies (Intel-recommended choice) */
193 #define P6_NOP1 ".byte 0x90\n"
194 #define P6_NOP2 ".byte 0x66,0x90\n"
195 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
196 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
197 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
198 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
199 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
200 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
201
202 #define ASM_NOP_MAX 8
203
204 static inline void prefetchw(void *x)
205 {
206 alternative_input("prefetcht0 (%1)",
207 "prefetchw (%1)",
208 X86_FEATURE_3DNOW,
209 "r" (x));
210 }
211
212
213 #define stack_current() \
214 ({ \
215 struct thread_info *ti; \
216 asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
217 ti->task; \
218 })
219
220
221 #endif /* __ASM_X86_64_PROCESSOR_H */
This page took 0.035276 seconds and 4 git commands to generate.