Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PROCESSOR_H |
2 | #define _ASM_X86_PROCESSOR_H | |
c758ecf6 | 3 | |
053de044 GOC |
4 | #include <asm/processor-flags.h> |
5 | ||
683e0253 GOC |
6 | /* Forward declaration, a strange C thing */ |
7 | struct task_struct; | |
8 | struct mm_struct; | |
9 | ||
2f66dcc9 GOC |
10 | #include <asm/vm86.h> |
11 | #include <asm/math_emu.h> | |
12 | #include <asm/segment.h> | |
2f66dcc9 GOC |
13 | #include <asm/types.h> |
14 | #include <asm/sigcontext.h> | |
15 | #include <asm/current.h> | |
16 | #include <asm/cpufeature.h> | |
2f66dcc9 | 17 | #include <asm/page.h> |
54321d94 | 18 | #include <asm/pgtable_types.h> |
5300db88 | 19 | #include <asm/percpu.h> |
2f66dcc9 GOC |
20 | #include <asm/msr.h> |
21 | #include <asm/desc_defs.h> | |
bd61643e | 22 | #include <asm/nops.h> |
f05e798a | 23 | #include <asm/special_insns.h> |
4d46a89e | 24 | |
2f66dcc9 | 25 | #include <linux/personality.h> |
5300db88 GOC |
26 | #include <linux/cpumask.h> |
27 | #include <linux/cache.h> | |
2f66dcc9 | 28 | #include <linux/threads.h> |
5cbc19a9 | 29 | #include <linux/math64.h> |
2f66dcc9 | 30 | #include <linux/init.h> |
faa4602e | 31 | #include <linux/err.h> |
f05e798a DH |
32 | #include <linux/irqflags.h> |
33 | ||
34 | /* | |
35 | * We handle most unaligned accesses in hardware. On the other hand | |
36 | * unaligned DMA can be quite expensive on some Nehalem processors. | |
37 | * | |
38 | * Based on this we disable the IP header alignment in network drivers. | |
39 | */ | |
40 | #define NET_IP_ALIGN 0 | |
c72dcf83 | 41 | |
b332828c | 42 | #define HBP_NUM 4 |
0ccb8acc GOC |
43 | /* |
44 | * Default implementation of macro that returns current | |
45 | * instruction pointer ("program counter"). | |
46 | */ | |
47 | static inline void *current_text_addr(void) | |
48 | { | |
49 | void *pc; | |
4d46a89e IM |
50 | |
51 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); | |
52 | ||
0ccb8acc GOC |
53 | return pc; |
54 | } | |
55 | ||
dbcb4660 | 56 | #ifdef CONFIG_X86_VSMP |
4d46a89e IM |
57 | # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
58 | # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) | |
dbcb4660 | 59 | #else |
4d46a89e IM |
60 | # define ARCH_MIN_TASKALIGN 16 |
61 | # define ARCH_MIN_MMSTRUCT_ALIGN 0 | |
dbcb4660 GOC |
62 | #endif |
63 | ||
5300db88 GOC |
64 | /* |
65 | * CPU type and hardware bug flags. Kept separately for each CPU. | |
66 | * Members of this structure are referenced in head.S, so think twice | |
67 | * before touching them. [mj] | |
68 | */ | |
69 | ||
70 | struct cpuinfo_x86 { | |
4d46a89e IM |
71 | __u8 x86; /* CPU family */ |
72 | __u8 x86_vendor; /* CPU vendor */ | |
73 | __u8 x86_model; | |
74 | __u8 x86_mask; | |
5300db88 | 75 | #ifdef CONFIG_X86_32 |
4d46a89e IM |
76 | char wp_works_ok; /* It doesn't on 386's */ |
77 | ||
78 | /* Problems on some 486Dx4's and old 386's: */ | |
79 | char hlt_works_ok; | |
80 | char hard_math; | |
81 | char rfu; | |
82 | char fdiv_bug; | |
83 | char f00f_bug; | |
84 | char coma_bug; | |
85 | char pad0; | |
5300db88 | 86 | #else |
4d46a89e | 87 | /* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
b1882e68 | 88 | int x86_tlbsize; |
13c6c532 | 89 | #endif |
4d46a89e IM |
90 | __u8 x86_virt_bits; |
91 | __u8 x86_phys_bits; | |
92 | /* CPUID returned core id bits: */ | |
93 | __u8 x86_coreid_bits; | |
94 | /* Max extended CPUID function supported: */ | |
95 | __u32 extended_cpuid_level; | |
4d46a89e IM |
96 | /* Maximum supported CPUID level, -1=no CPUID: */ |
97 | int cpuid_level; | |
98 | __u32 x86_capability[NCAPINTS]; | |
99 | char x86_vendor_id[16]; | |
100 | char x86_model_id[64]; | |
101 | /* in KB - valid for CPUS which support this call: */ | |
102 | int x86_cache_size; | |
103 | int x86_cache_alignment; /* In bytes */ | |
104 | int x86_power; | |
105 | unsigned long loops_per_jiffy; | |
4d46a89e IM |
106 | /* cpuid returned max cores value: */ |
107 | u16 x86_max_cores; | |
108 | u16 apicid; | |
01aaea1a | 109 | u16 initial_apicid; |
4d46a89e | 110 | u16 x86_clflush_size; |
4d46a89e IM |
111 | /* number of cores as seen by the OS: */ |
112 | u16 booted_cores; | |
113 | /* Physical processor id: */ | |
114 | u16 phys_proc_id; | |
115 | /* Core id: */ | |
116 | u16 cpu_core_id; | |
6057b4d3 AH |
117 | /* Compute unit id */ |
118 | u8 compute_unit_id; | |
4d46a89e IM |
119 | /* Index into per_cpu list: */ |
120 | u16 cpu_index; | |
506ed6b5 | 121 | u32 microcode; |
5300db88 GOC |
122 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
123 | ||
4d46a89e IM |
124 | #define X86_VENDOR_INTEL 0 |
125 | #define X86_VENDOR_CYRIX 1 | |
126 | #define X86_VENDOR_AMD 2 | |
127 | #define X86_VENDOR_UMC 3 | |
4d46a89e IM |
128 | #define X86_VENDOR_CENTAUR 5 |
129 | #define X86_VENDOR_TRANSMETA 7 | |
130 | #define X86_VENDOR_NSC 8 | |
131 | #define X86_VENDOR_NUM 9 | |
132 | ||
133 | #define X86_VENDOR_UNKNOWN 0xff | |
5300db88 | 134 | |
1a53905a GOC |
135 | /* |
136 | * capabilities of CPUs | |
137 | */ | |
4d46a89e IM |
138 | extern struct cpuinfo_x86 boot_cpu_data; |
139 | extern struct cpuinfo_x86 new_cpu_data; | |
140 | ||
141 | extern struct tss_struct doublefault_tss; | |
3e0c3737 YL |
142 | extern __u32 cpu_caps_cleared[NCAPINTS]; |
143 | extern __u32 cpu_caps_set[NCAPINTS]; | |
5300db88 GOC |
144 | |
145 | #ifdef CONFIG_SMP | |
9b8de747 | 146 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
5300db88 | 147 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
5300db88 | 148 | #else |
7b543a53 | 149 | #define cpu_info boot_cpu_data |
5300db88 | 150 | #define cpu_data(cpu) boot_cpu_data |
5300db88 GOC |
151 | #endif |
152 | ||
1c6c727d JS |
153 | extern const struct seq_operations cpuinfo_op; |
154 | ||
3d3f487c GC |
155 | static inline int hlt_works(int cpu) |
156 | { | |
157 | #ifdef CONFIG_X86_32 | |
158 | return cpu_data(cpu).hlt_works_ok; | |
159 | #else | |
160 | return 1; | |
161 | #endif | |
162 | } | |
163 | ||
4d46a89e IM |
164 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
165 | ||
166 | extern void cpu_detect(struct cpuinfo_x86 *c); | |
1a53905a | 167 | |
8fd329a1 JS |
168 | extern struct pt_regs *idle_regs(struct pt_regs *); |
169 | ||
f580366f | 170 | extern void early_cpu_init(void); |
1a53905a GOC |
171 | extern void identify_boot_cpu(void); |
172 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | |
5300db88 | 173 | extern void print_cpu_info(struct cpuinfo_x86 *); |
21c3fcf3 | 174 | void print_cpu_msr(struct cpuinfo_x86 *); |
5300db88 GOC |
175 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); |
176 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | |
177 | extern unsigned short num_cache_leaves; | |
178 | ||
bbb65d2d | 179 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
1a53905a | 180 | extern void detect_ht(struct cpuinfo_x86 *c); |
1a53905a | 181 | |
c758ecf6 | 182 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
4d46a89e | 183 | unsigned int *ecx, unsigned int *edx) |
c758ecf6 GOC |
184 | { |
185 | /* ecx is often an input as well as an output. */ | |
45a94d7c | 186 | asm volatile("cpuid" |
cca2e6f8 JP |
187 | : "=a" (*eax), |
188 | "=b" (*ebx), | |
189 | "=c" (*ecx), | |
190 | "=d" (*edx) | |
506ed6b5 AK |
191 | : "0" (*eax), "2" (*ecx) |
192 | : "memory"); | |
c758ecf6 GOC |
193 | } |
194 | ||
c72dcf83 GOC |
195 | static inline void load_cr3(pgd_t *pgdir) |
196 | { | |
197 | write_cr3(__pa(pgdir)); | |
198 | } | |
c758ecf6 | 199 | |
ca241c75 GOC |
200 | #ifdef CONFIG_X86_32 |
201 | /* This is the TSS defined by the hardware. */ | |
202 | struct x86_hw_tss { | |
4d46a89e IM |
203 | unsigned short back_link, __blh; |
204 | unsigned long sp0; | |
205 | unsigned short ss0, __ss0h; | |
206 | unsigned long sp1; | |
207 | /* ss1 caches MSR_IA32_SYSENTER_CS: */ | |
208 | unsigned short ss1, __ss1h; | |
209 | unsigned long sp2; | |
210 | unsigned short ss2, __ss2h; | |
211 | unsigned long __cr3; | |
212 | unsigned long ip; | |
213 | unsigned long flags; | |
214 | unsigned long ax; | |
215 | unsigned long cx; | |
216 | unsigned long dx; | |
217 | unsigned long bx; | |
218 | unsigned long sp; | |
219 | unsigned long bp; | |
220 | unsigned long si; | |
221 | unsigned long di; | |
222 | unsigned short es, __esh; | |
223 | unsigned short cs, __csh; | |
224 | unsigned short ss, __ssh; | |
225 | unsigned short ds, __dsh; | |
226 | unsigned short fs, __fsh; | |
227 | unsigned short gs, __gsh; | |
228 | unsigned short ldt, __ldth; | |
229 | unsigned short trace; | |
230 | unsigned short io_bitmap_base; | |
231 | ||
ca241c75 GOC |
232 | } __attribute__((packed)); |
233 | #else | |
234 | struct x86_hw_tss { | |
4d46a89e IM |
235 | u32 reserved1; |
236 | u64 sp0; | |
237 | u64 sp1; | |
238 | u64 sp2; | |
239 | u64 reserved2; | |
240 | u64 ist[7]; | |
241 | u32 reserved3; | |
242 | u32 reserved4; | |
243 | u16 reserved5; | |
244 | u16 io_bitmap_base; | |
245 | ||
ca241c75 GOC |
246 | } __attribute__((packed)) ____cacheline_aligned; |
247 | #endif | |
248 | ||
249 | /* | |
4d46a89e | 250 | * IO-bitmap sizes: |
ca241c75 | 251 | */ |
4d46a89e IM |
252 | #define IO_BITMAP_BITS 65536 |
253 | #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) | |
254 | #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) | |
255 | #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) | |
256 | #define INVALID_IO_BITMAP_OFFSET 0x8000 | |
ca241c75 GOC |
257 | |
258 | struct tss_struct { | |
4d46a89e IM |
259 | /* |
260 | * The hardware state: | |
261 | */ | |
262 | struct x86_hw_tss x86_tss; | |
ca241c75 GOC |
263 | |
264 | /* | |
265 | * The extra 1 is there because the CPU will access an | |
266 | * additional byte beyond the end of the IO permission | |
267 | * bitmap. The extra byte must be all 1 bits, and must | |
268 | * be within the limit. | |
269 | */ | |
4d46a89e | 270 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
4d46a89e | 271 | |
ca241c75 | 272 | /* |
4d46a89e | 273 | * .. and then another 0x100 bytes for the emergency kernel stack: |
ca241c75 | 274 | */ |
4d46a89e IM |
275 | unsigned long stack[64]; |
276 | ||
84e65b0a | 277 | } ____cacheline_aligned; |
ca241c75 | 278 | |
9b8de747 | 279 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); |
ca241c75 | 280 | |
4d46a89e IM |
281 | /* |
282 | * Save the original ist values for checking stack pointers during debugging | |
283 | */ | |
1a53905a | 284 | struct orig_ist { |
4d46a89e | 285 | unsigned long ist[7]; |
1a53905a GOC |
286 | }; |
287 | ||
99f8ecdf | 288 | #define MXCSR_DEFAULT 0x1f80 |
46265df0 | 289 | |
99f8ecdf | 290 | struct i387_fsave_struct { |
ca9cda2f IM |
291 | u32 cwd; /* FPU Control Word */ |
292 | u32 swd; /* FPU Status Word */ | |
293 | u32 twd; /* FPU Tag Word */ | |
294 | u32 fip; /* FPU IP Offset */ | |
295 | u32 fcs; /* FPU IP Selector */ | |
296 | u32 foo; /* FPU Operand Pointer Offset */ | |
297 | u32 fos; /* FPU Operand Pointer Selector */ | |
298 | ||
299 | /* 8*10 bytes for each FP-reg = 80 bytes: */ | |
4d46a89e | 300 | u32 st_space[20]; |
ca9cda2f IM |
301 | |
302 | /* Software status information [not touched by FSAVE ]: */ | |
4d46a89e | 303 | u32 status; |
46265df0 GOC |
304 | }; |
305 | ||
46265df0 | 306 | struct i387_fxsave_struct { |
ca9cda2f IM |
307 | u16 cwd; /* Control Word */ |
308 | u16 swd; /* Status Word */ | |
309 | u16 twd; /* Tag Word */ | |
310 | u16 fop; /* Last Instruction Opcode */ | |
99f8ecdf RM |
311 | union { |
312 | struct { | |
ca9cda2f IM |
313 | u64 rip; /* Instruction Pointer */ |
314 | u64 rdp; /* Data Pointer */ | |
99f8ecdf RM |
315 | }; |
316 | struct { | |
ca9cda2f IM |
317 | u32 fip; /* FPU IP Offset */ |
318 | u32 fcs; /* FPU IP Selector */ | |
319 | u32 foo; /* FPU Operand Offset */ | |
320 | u32 fos; /* FPU Operand Selector */ | |
99f8ecdf RM |
321 | }; |
322 | }; | |
ca9cda2f IM |
323 | u32 mxcsr; /* MXCSR Register State */ |
324 | u32 mxcsr_mask; /* MXCSR Mask */ | |
325 | ||
326 | /* 8*16 bytes for each FP-reg = 128 bytes: */ | |
4d46a89e | 327 | u32 st_space[32]; |
ca9cda2f IM |
328 | |
329 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | |
4d46a89e | 330 | u32 xmm_space[64]; |
ca9cda2f | 331 | |
bdd8caba SS |
332 | u32 padding[12]; |
333 | ||
334 | union { | |
335 | u32 padding1[12]; | |
336 | u32 sw_reserved[12]; | |
337 | }; | |
4d46a89e | 338 | |
46265df0 GOC |
339 | } __attribute__((aligned(16))); |
340 | ||
99f8ecdf | 341 | struct i387_soft_struct { |
4d46a89e IM |
342 | u32 cwd; |
343 | u32 swd; | |
344 | u32 twd; | |
345 | u32 fip; | |
346 | u32 fcs; | |
347 | u32 foo; | |
348 | u32 fos; | |
349 | /* 8*10 bytes for each FP-reg = 80 bytes: */ | |
350 | u32 st_space[20]; | |
351 | u8 ftop; | |
352 | u8 changed; | |
353 | u8 lookahead; | |
354 | u8 no_update; | |
355 | u8 rm; | |
356 | u8 alimit; | |
ae6af41f | 357 | struct math_emu_info *info; |
4d46a89e | 358 | u32 entry_eip; |
99f8ecdf RM |
359 | }; |
360 | ||
a30469e7 SS |
361 | struct ymmh_struct { |
362 | /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ | |
363 | u32 ymmh_space[64]; | |
364 | }; | |
365 | ||
dc1e35c6 SS |
366 | struct xsave_hdr_struct { |
367 | u64 xstate_bv; | |
368 | u64 reserved1[2]; | |
369 | u64 reserved2[5]; | |
370 | } __attribute__((packed)); | |
371 | ||
372 | struct xsave_struct { | |
373 | struct i387_fxsave_struct i387; | |
374 | struct xsave_hdr_struct xsave_hdr; | |
a30469e7 | 375 | struct ymmh_struct ymmh; |
dc1e35c6 SS |
376 | /* new processor state extensions will go here */ |
377 | } __attribute__ ((packed, aligned (64))); | |
378 | ||
61c4628b | 379 | union thread_xstate { |
99f8ecdf | 380 | struct i387_fsave_struct fsave; |
46265df0 | 381 | struct i387_fxsave_struct fxsave; |
4d46a89e | 382 | struct i387_soft_struct soft; |
b359e8a4 | 383 | struct xsave_struct xsave; |
46265df0 GOC |
384 | }; |
385 | ||
86603283 | 386 | struct fpu { |
7e16838d LT |
387 | unsigned int last_cpu; |
388 | unsigned int has_fpu; | |
86603283 AK |
389 | union thread_xstate *state; |
390 | }; | |
391 | ||
fe676203 | 392 | #ifdef CONFIG_X86_64 |
2f66dcc9 | 393 | DECLARE_PER_CPU(struct orig_ist, orig_ist); |
26f80bd6 | 394 | |
947e76cd BG |
395 | union irq_stack_union { |
396 | char irq_stack[IRQ_STACK_SIZE]; | |
397 | /* | |
398 | * GCC hardcodes the stack canary as %gs:40. Since the | |
399 | * irq_stack is the object at %gs:0, we reserve the bottom | |
400 | * 48 bytes of the irq stack for the canary. | |
401 | */ | |
402 | struct { | |
403 | char gs_base[40]; | |
404 | unsigned long stack_canary; | |
405 | }; | |
406 | }; | |
407 | ||
9b8de747 | 408 | DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); |
2add8e23 BG |
409 | DECLARE_INIT_PER_CPU(irq_stack_union); |
410 | ||
26f80bd6 | 411 | DECLARE_PER_CPU(char *, irq_stack_ptr); |
9766cdbc JSR |
412 | DECLARE_PER_CPU(unsigned int, irq_count); |
413 | extern unsigned long kernel_eflags; | |
414 | extern asmlinkage void ignore_sysret(void); | |
60a5317f TH |
415 | #else /* X86_64 */ |
416 | #ifdef CONFIG_CC_STACKPROTECTOR | |
1ea0d14e JF |
417 | /* |
418 | * Make sure stack canary segment base is cached-aligned: | |
419 | * "For Intel Atom processors, avoid non zero segment base address | |
420 | * that is not aligned to cache line boundary at all cost." | |
421 | * (Optim Ref Manual Assembly/Compiler Coding Rule 15.) | |
422 | */ | |
423 | struct stack_canary { | |
424 | char __pad[20]; /* canary at %gs:20 */ | |
425 | unsigned long canary; | |
426 | }; | |
53f82452 | 427 | DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
96a388de | 428 | #endif |
60a5317f | 429 | #endif /* X86_64 */ |
c758ecf6 | 430 | |
61c4628b | 431 | extern unsigned int xstate_size; |
aa283f49 SS |
432 | extern void free_thread_xstate(struct task_struct *); |
433 | extern struct kmem_cache *task_xstate_cachep; | |
683e0253 | 434 | |
24f1e32c FW |
435 | struct perf_event; |
436 | ||
cb38d377 | 437 | struct thread_struct { |
4d46a89e IM |
438 | /* Cached TLS descriptors: */ |
439 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | |
440 | unsigned long sp0; | |
441 | unsigned long sp; | |
cb38d377 | 442 | #ifdef CONFIG_X86_32 |
4d46a89e | 443 | unsigned long sysenter_cs; |
cb38d377 | 444 | #else |
4d46a89e IM |
445 | unsigned long usersp; /* Copy from PDA */ |
446 | unsigned short es; | |
447 | unsigned short ds; | |
448 | unsigned short fsindex; | |
449 | unsigned short gsindex; | |
cb38d377 | 450 | #endif |
0c23590f | 451 | #ifdef CONFIG_X86_32 |
4d46a89e | 452 | unsigned long ip; |
0c23590f | 453 | #endif |
d756f4ad | 454 | #ifdef CONFIG_X86_64 |
4d46a89e | 455 | unsigned long fs; |
d756f4ad | 456 | #endif |
4d46a89e | 457 | unsigned long gs; |
24f1e32c FW |
458 | /* Save middle states of ptrace breakpoints */ |
459 | struct perf_event *ptrace_bps[HBP_NUM]; | |
460 | /* Debug status used for traps, single steps, etc... */ | |
461 | unsigned long debugreg6; | |
326264a0 FW |
462 | /* Keep track of the exact dr7 value set by the user */ |
463 | unsigned long ptrace_dr7; | |
4d46a89e IM |
464 | /* Fault info: */ |
465 | unsigned long cr2; | |
51e7dc70 | 466 | unsigned long trap_nr; |
4d46a89e | 467 | unsigned long error_code; |
61c4628b | 468 | /* floating point and extended processor state */ |
86603283 | 469 | struct fpu fpu; |
cb38d377 | 470 | #ifdef CONFIG_X86_32 |
4d46a89e | 471 | /* Virtual 86 mode info */ |
cb38d377 GOC |
472 | struct vm86_struct __user *vm86_info; |
473 | unsigned long screen_bitmap; | |
4d46a89e IM |
474 | unsigned long v86flags; |
475 | unsigned long v86mask; | |
476 | unsigned long saved_sp0; | |
477 | unsigned int saved_fs; | |
478 | unsigned int saved_gs; | |
cb38d377 | 479 | #endif |
4d46a89e IM |
480 | /* IO permissions: */ |
481 | unsigned long *io_bitmap_ptr; | |
482 | unsigned long iopl; | |
483 | /* Max allowed port in the bitmap, in bytes: */ | |
484 | unsigned io_bitmap_max; | |
cb38d377 GOC |
485 | }; |
486 | ||
62d7d7ed GOC |
487 | /* |
488 | * Set IOPL bits in EFLAGS from given mask | |
489 | */ | |
490 | static inline void native_set_iopl_mask(unsigned mask) | |
491 | { | |
492 | #ifdef CONFIG_X86_32 | |
493 | unsigned int reg; | |
4d46a89e | 494 | |
cca2e6f8 JP |
495 | asm volatile ("pushfl;" |
496 | "popl %0;" | |
497 | "andl %1, %0;" | |
498 | "orl %2, %0;" | |
499 | "pushl %0;" | |
500 | "popfl" | |
501 | : "=&r" (reg) | |
502 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | |
62d7d7ed GOC |
503 | #endif |
504 | } | |
505 | ||
4d46a89e IM |
506 | static inline void |
507 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) | |
7818a1e0 GOC |
508 | { |
509 | tss->x86_tss.sp0 = thread->sp0; | |
510 | #ifdef CONFIG_X86_32 | |
4d46a89e | 511 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
7818a1e0 GOC |
512 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
513 | tss->x86_tss.ss1 = thread->sysenter_cs; | |
514 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
515 | } | |
516 | #endif | |
517 | } | |
1b46cbe0 | 518 | |
e801f864 GOC |
519 | static inline void native_swapgs(void) |
520 | { | |
521 | #ifdef CONFIG_X86_64 | |
522 | asm volatile("swapgs" ::: "memory"); | |
523 | #endif | |
524 | } | |
525 | ||
7818a1e0 GOC |
526 | #ifdef CONFIG_PARAVIRT |
527 | #include <asm/paravirt.h> | |
528 | #else | |
4d46a89e IM |
529 | #define __cpuid native_cpuid |
530 | #define paravirt_enabled() 0 | |
1b46cbe0 | 531 | |
cca2e6f8 JP |
532 | static inline void load_sp0(struct tss_struct *tss, |
533 | struct thread_struct *thread) | |
7818a1e0 GOC |
534 | { |
535 | native_load_sp0(tss, thread); | |
536 | } | |
537 | ||
62d7d7ed | 538 | #define set_iopl_mask native_set_iopl_mask |
1b46cbe0 GOC |
539 | #endif /* CONFIG_PARAVIRT */ |
540 | ||
541 | /* | |
542 | * Save the cr4 feature set we're using (ie | |
543 | * Pentium 4MB enable and PPro Global page | |
544 | * enable), so that any CPU's that boot up | |
545 | * after us can get the correct flags. | |
546 | */ | |
4d46a89e | 547 | extern unsigned long mmu_cr4_features; |
1b46cbe0 GOC |
548 | |
549 | static inline void set_in_cr4(unsigned long mask) | |
550 | { | |
2df7a6e9 | 551 | unsigned long cr4; |
4d46a89e | 552 | |
1b46cbe0 GOC |
553 | mmu_cr4_features |= mask; |
554 | cr4 = read_cr4(); | |
555 | cr4 |= mask; | |
556 | write_cr4(cr4); | |
557 | } | |
558 | ||
559 | static inline void clear_in_cr4(unsigned long mask) | |
560 | { | |
2df7a6e9 | 561 | unsigned long cr4; |
4d46a89e | 562 | |
1b46cbe0 GOC |
563 | mmu_cr4_features &= ~mask; |
564 | cr4 = read_cr4(); | |
565 | cr4 &= ~mask; | |
566 | write_cr4(cr4); | |
567 | } | |
568 | ||
fc87e906 | 569 | typedef struct { |
4d46a89e | 570 | unsigned long seg; |
fc87e906 GOC |
571 | } mm_segment_t; |
572 | ||
573 | ||
683e0253 GOC |
574 | /* |
575 | * create a kernel thread without removing it from tasklists | |
576 | */ | |
577 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | |
578 | ||
579 | /* Free all resources held by a thread. */ | |
580 | extern void release_thread(struct task_struct *); | |
581 | ||
4d46a89e | 582 | /* Prepare to copy thread state - unlazy all lazy state */ |
683e0253 | 583 | extern void prepare_to_copy(struct task_struct *tsk); |
1b46cbe0 | 584 | |
683e0253 | 585 | unsigned long get_wchan(struct task_struct *p); |
c758ecf6 GOC |
586 | |
587 | /* | |
588 | * Generic CPUID function | |
589 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | |
590 | * resulting in stale register contents being returned. | |
591 | */ | |
592 | static inline void cpuid(unsigned int op, | |
593 | unsigned int *eax, unsigned int *ebx, | |
594 | unsigned int *ecx, unsigned int *edx) | |
595 | { | |
596 | *eax = op; | |
597 | *ecx = 0; | |
598 | __cpuid(eax, ebx, ecx, edx); | |
599 | } | |
600 | ||
601 | /* Some CPUID calls want 'count' to be placed in ecx */ | |
602 | static inline void cpuid_count(unsigned int op, int count, | |
603 | unsigned int *eax, unsigned int *ebx, | |
604 | unsigned int *ecx, unsigned int *edx) | |
605 | { | |
606 | *eax = op; | |
607 | *ecx = count; | |
608 | __cpuid(eax, ebx, ecx, edx); | |
609 | } | |
610 | ||
611 | /* | |
612 | * CPUID functions returning a single datum | |
613 | */ | |
614 | static inline unsigned int cpuid_eax(unsigned int op) | |
615 | { | |
616 | unsigned int eax, ebx, ecx, edx; | |
617 | ||
618 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 619 | |
c758ecf6 GOC |
620 | return eax; |
621 | } | |
4d46a89e | 622 | |
c758ecf6 GOC |
623 | static inline unsigned int cpuid_ebx(unsigned int op) |
624 | { | |
625 | unsigned int eax, ebx, ecx, edx; | |
626 | ||
627 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 628 | |
c758ecf6 GOC |
629 | return ebx; |
630 | } | |
4d46a89e | 631 | |
c758ecf6 GOC |
632 | static inline unsigned int cpuid_ecx(unsigned int op) |
633 | { | |
634 | unsigned int eax, ebx, ecx, edx; | |
635 | ||
636 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 637 | |
c758ecf6 GOC |
638 | return ecx; |
639 | } | |
4d46a89e | 640 | |
c758ecf6 GOC |
641 | static inline unsigned int cpuid_edx(unsigned int op) |
642 | { | |
643 | unsigned int eax, ebx, ecx, edx; | |
644 | ||
645 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
4d46a89e | 646 | |
c758ecf6 GOC |
647 | return edx; |
648 | } | |
649 | ||
683e0253 GOC |
650 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
651 | static inline void rep_nop(void) | |
652 | { | |
cca2e6f8 | 653 | asm volatile("rep; nop" ::: "memory"); |
683e0253 GOC |
654 | } |
655 | ||
4d46a89e IM |
656 | static inline void cpu_relax(void) |
657 | { | |
658 | rep_nop(); | |
659 | } | |
660 | ||
5367b688 | 661 | /* Stop speculative execution and prefetching of modified code. */ |
683e0253 GOC |
662 | static inline void sync_core(void) |
663 | { | |
664 | int tmp; | |
4d46a89e | 665 | |
5367b688 BH |
666 | #if defined(CONFIG_M386) || defined(CONFIG_M486) |
667 | if (boot_cpu_data.x86 < 5) | |
668 | /* There is no speculative execution. | |
669 | * jmp is a barrier to prefetching. */ | |
670 | asm volatile("jmp 1f\n1:\n" ::: "memory"); | |
671 | else | |
672 | #endif | |
673 | /* cpuid is a barrier to speculative execution. | |
674 | * Prefetched instructions are automatically | |
675 | * invalidated when modified. */ | |
676 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | |
677 | : "ebx", "ecx", "edx", "memory"); | |
683e0253 GOC |
678 | } |
679 | ||
cca2e6f8 JP |
680 | static inline void __monitor(const void *eax, unsigned long ecx, |
681 | unsigned long edx) | |
683e0253 | 682 | { |
4d46a89e | 683 | /* "monitor %eax, %ecx, %edx;" */ |
cca2e6f8 JP |
684 | asm volatile(".byte 0x0f, 0x01, 0xc8;" |
685 | :: "a" (eax), "c" (ecx), "d"(edx)); | |
683e0253 GOC |
686 | } |
687 | ||
688 | static inline void __mwait(unsigned long eax, unsigned long ecx) | |
689 | { | |
4d46a89e | 690 | /* "mwait %eax, %ecx;" */ |
cca2e6f8 JP |
691 | asm volatile(".byte 0x0f, 0x01, 0xc9;" |
692 | :: "a" (eax), "c" (ecx)); | |
683e0253 GOC |
693 | } |
694 | ||
695 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | |
696 | { | |
7f424a8b | 697 | trace_hardirqs_on(); |
4d46a89e | 698 | /* "mwait %eax, %ecx;" */ |
cca2e6f8 JP |
699 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" |
700 | :: "a" (eax), "c" (ecx)); | |
683e0253 GOC |
701 | } |
702 | ||
683e0253 | 703 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
02c68a02 | 704 | extern void init_amd_e400_c1e_mask(void); |
683e0253 | 705 | |
4d46a89e | 706 | extern unsigned long boot_option_idle_override; |
02c68a02 | 707 | extern bool amd_e400_c1e_detected; |
683e0253 | 708 | |
d1896049 TR |
709 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
710 | IDLE_POLL, IDLE_FORCE_MWAIT}; | |
711 | ||
1a53905a GOC |
712 | extern void enable_sep_cpu(void); |
713 | extern int sysenter_setup(void); | |
714 | ||
29c84391 JK |
715 | extern void early_trap_init(void); |
716 | ||
1a53905a | 717 | /* Defined in head.S */ |
4d46a89e | 718 | extern struct desc_ptr early_gdt_descr; |
1a53905a GOC |
719 | |
720 | extern void cpu_set_gdt(int); | |
552be871 | 721 | extern void switch_to_new_gdt(int); |
11e3a840 | 722 | extern void load_percpu_segment(int); |
1a53905a | 723 | extern void cpu_init(void); |
1a53905a | 724 | |
c2724775 MM |
725 | static inline unsigned long get_debugctlmsr(void) |
726 | { | |
ea8e61b7 | 727 | unsigned long debugctlmsr = 0; |
c2724775 MM |
728 | |
729 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
730 | if (boot_cpu_data.x86 < 6) | |
731 | return 0; | |
732 | #endif | |
733 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
734 | ||
ea8e61b7 | 735 | return debugctlmsr; |
c2724775 MM |
736 | } |
737 | ||
5b0e5084 JB |
738 | static inline void update_debugctlmsr(unsigned long debugctlmsr) |
739 | { | |
740 | #ifndef CONFIG_X86_DEBUGCTLMSR | |
741 | if (boot_cpu_data.x86 < 6) | |
742 | return; | |
743 | #endif | |
744 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); | |
745 | } | |
746 | ||
4d46a89e IM |
747 | /* |
748 | * from system description table in BIOS. Mostly for MCA use, but | |
749 | * others may find it useful: | |
750 | */ | |
751 | extern unsigned int machine_id; | |
752 | extern unsigned int machine_submodel_id; | |
753 | extern unsigned int BIOS_revision; | |
1a53905a | 754 | |
4d46a89e IM |
755 | /* Boot loader type from the setup header: */ |
756 | extern int bootloader_type; | |
5031296c | 757 | extern int bootloader_version; |
1a53905a | 758 | |
4d46a89e | 759 | extern char ignore_fpu_irq; |
683e0253 GOC |
760 | |
761 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | |
762 | #define ARCH_HAS_PREFETCHW | |
763 | #define ARCH_HAS_SPINLOCK_PREFETCH | |
764 | ||
ae2e15eb | 765 | #ifdef CONFIG_X86_32 |
4d46a89e IM |
766 | # define BASE_PREFETCH ASM_NOP4 |
767 | # define ARCH_HAS_PREFETCH | |
ae2e15eb | 768 | #else |
4d46a89e | 769 | # define BASE_PREFETCH "prefetcht0 (%1)" |
ae2e15eb GOC |
770 | #endif |
771 | ||
4d46a89e IM |
772 | /* |
773 | * Prefetch instructions for Pentium III (+) and AMD Athlon (+) | |
774 | * | |
775 | * It's not worth to care about 3dnow prefetches for the K6 | |
776 | * because they are microcoded there and very slow. | |
777 | */ | |
ae2e15eb GOC |
778 | static inline void prefetch(const void *x) |
779 | { | |
780 | alternative_input(BASE_PREFETCH, | |
781 | "prefetchnta (%1)", | |
782 | X86_FEATURE_XMM, | |
783 | "r" (x)); | |
784 | } | |
785 | ||
4d46a89e IM |
786 | /* |
787 | * 3dnow prefetch to get an exclusive cache line. | |
788 | * Useful for spinlocks to avoid one state transition in the | |
789 | * cache coherency protocol: | |
790 | */ | |
ae2e15eb GOC |
791 | static inline void prefetchw(const void *x) |
792 | { | |
793 | alternative_input(BASE_PREFETCH, | |
794 | "prefetchw (%1)", | |
795 | X86_FEATURE_3DNOW, | |
796 | "r" (x)); | |
797 | } | |
798 | ||
4d46a89e IM |
799 | static inline void spin_lock_prefetch(const void *x) |
800 | { | |
801 | prefetchw(x); | |
802 | } | |
803 | ||
2f66dcc9 GOC |
804 | #ifdef CONFIG_X86_32 |
805 | /* | |
806 | * User space process size: 3GB (default). | |
807 | */ | |
4d46a89e | 808 | #define TASK_SIZE PAGE_OFFSET |
d9517346 | 809 | #define TASK_SIZE_MAX TASK_SIZE |
4d46a89e IM |
810 | #define STACK_TOP TASK_SIZE |
811 | #define STACK_TOP_MAX STACK_TOP | |
812 | ||
813 | #define INIT_THREAD { \ | |
814 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ | |
815 | .vm86_info = NULL, \ | |
816 | .sysenter_cs = __KERNEL_CS, \ | |
817 | .io_bitmap_ptr = NULL, \ | |
2f66dcc9 GOC |
818 | } |
819 | ||
820 | /* | |
821 | * Note that the .io_bitmap member must be extra-big. This is because | |
822 | * the CPU will access an additional byte beyond the end of the IO | |
823 | * permission bitmap. The extra byte must be all 1 bits, and must | |
824 | * be within the limit. | |
825 | */ | |
4d46a89e IM |
826 | #define INIT_TSS { \ |
827 | .x86_tss = { \ | |
2f66dcc9 | 828 | .sp0 = sizeof(init_stack) + (long)&init_stack, \ |
4d46a89e IM |
829 | .ss0 = __KERNEL_DS, \ |
830 | .ss1 = __KERNEL_CS, \ | |
831 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | |
832 | }, \ | |
833 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ | |
2f66dcc9 GOC |
834 | } |
835 | ||
2f66dcc9 GOC |
836 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
837 | ||
838 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | |
839 | #define KSTK_TOP(info) \ | |
840 | ({ \ | |
841 | unsigned long *__ptr = (unsigned long *)(info); \ | |
842 | (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ | |
843 | }) | |
844 | ||
845 | /* | |
846 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | |
847 | * This is necessary to guarantee that the entire "struct pt_regs" | |
b595076a | 848 | * is accessible even if the CPU haven't stored the SS/ESP registers |
2f66dcc9 GOC |
849 | * on the stack (interrupt gate does not save these registers |
850 | * when switching to the same priv ring). | |
851 | * Therefore beware: accessing the ss/esp fields of the | |
852 | * "struct pt_regs" is possible, but they may contain the | |
853 | * completely wrong values. | |
854 | */ | |
855 | #define task_pt_regs(task) \ | |
856 | ({ \ | |
857 | struct pt_regs *__regs__; \ | |
858 | __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ | |
859 | __regs__ - 1; \ | |
860 | }) | |
861 | ||
4d46a89e | 862 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
2f66dcc9 GOC |
863 | |
864 | #else | |
865 | /* | |
866 | * User space process size. 47bits minus one guard page. | |
867 | */ | |
d9517346 | 868 | #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) |
2f66dcc9 GOC |
869 | |
870 | /* This decides where the kernel will search for a free chunk of vm | |
871 | * space during mmap's. | |
872 | */ | |
4d46a89e IM |
873 | #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
874 | 0xc0000000 : 0xFFFFe000) | |
2f66dcc9 | 875 | |
6bd33008 | 876 | #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
d9517346 | 877 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
6bd33008 | 878 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
d9517346 | 879 | IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
2f66dcc9 | 880 | |
922a70d3 | 881 | #define STACK_TOP TASK_SIZE |
d9517346 | 882 | #define STACK_TOP_MAX TASK_SIZE_MAX |
922a70d3 | 883 | |
2f66dcc9 GOC |
884 | #define INIT_THREAD { \ |
885 | .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | |
886 | } | |
887 | ||
888 | #define INIT_TSS { \ | |
889 | .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ | |
890 | } | |
891 | ||
2f66dcc9 GOC |
892 | /* |
893 | * Return saved PC of a blocked thread. | |
894 | * What is this good for? it will be always the scheduler or ret_from_fork. | |
895 | */ | |
4d46a89e | 896 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) |
2f66dcc9 | 897 | |
4d46a89e | 898 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
89240ba0 | 899 | extern unsigned long KSTK_ESP(struct task_struct *task); |
d046ff8b L |
900 | |
901 | /* | |
902 | * User space RSP while inside the SYSCALL fast path | |
903 | */ | |
904 | DECLARE_PER_CPU(unsigned long, old_rsp); | |
905 | ||
2f66dcc9 GOC |
906 | #endif /* CONFIG_X86_64 */ |
907 | ||
513ad84b IM |
908 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
909 | unsigned long new_sp); | |
910 | ||
4d46a89e IM |
911 | /* |
912 | * This decides where the kernel will search for a free chunk of vm | |
683e0253 GOC |
913 | * space during mmap's. |
914 | */ | |
915 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | |
916 | ||
4d46a89e | 917 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) |
683e0253 | 918 | |
529e25f6 EB |
919 | /* Get/set a process' ability to use the timestamp counter instruction */ |
920 | #define GET_TSC_CTL(adr) get_tsc_mode((adr)) | |
921 | #define SET_TSC_CTL(val) set_tsc_mode((val)) | |
922 | ||
923 | extern int get_tsc_mode(unsigned long adr); | |
924 | extern int set_tsc_mode(unsigned int val); | |
925 | ||
6a812691 AH |
926 | extern int amd_get_nb_id(int cpu); |
927 | ||
5cbc19a9 PZ |
928 | struct aperfmperf { |
929 | u64 aperf, mperf; | |
930 | }; | |
931 | ||
932 | static inline void get_aperfmperf(struct aperfmperf *am) | |
933 | { | |
934 | WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF)); | |
935 | ||
936 | rdmsrl(MSR_IA32_APERF, am->aperf); | |
937 | rdmsrl(MSR_IA32_MPERF, am->mperf); | |
938 | } | |
939 | ||
940 | #define APERFMPERF_SHIFT 10 | |
941 | ||
942 | static inline | |
943 | unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, | |
944 | struct aperfmperf *new) | |
945 | { | |
946 | u64 aperf = new->aperf - old->aperf; | |
947 | u64 mperf = new->mperf - old->mperf; | |
948 | unsigned long ratio = aperf; | |
949 | ||
950 | mperf >>= APERFMPERF_SHIFT; | |
951 | if (mperf) | |
952 | ratio = div64_u64(aperf, mperf); | |
953 | ||
954 | return ratio; | |
955 | } | |
956 | ||
d78d671d HR |
957 | /* |
958 | * AMD errata checking | |
959 | */ | |
960 | #ifdef CONFIG_CPU_SUP_AMD | |
1be85a6d | 961 | extern const int amd_erratum_383[]; |
9d8888c2 | 962 | extern const int amd_erratum_400[]; |
d78d671d HR |
963 | extern bool cpu_has_amd_erratum(const int *); |
964 | ||
965 | #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } | |
966 | #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } | |
967 | #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ | |
968 | ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) | |
969 | #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) | |
970 | #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) | |
971 | #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) | |
972 | ||
973 | #else | |
974 | #define cpu_has_amd_erratum(x) (false) | |
975 | #endif /* CONFIG_CPU_SUP_AMD */ | |
976 | ||
f05e798a DH |
977 | void cpu_idle_wait(void); |
978 | ||
979 | extern unsigned long arch_align_stack(unsigned long sp); | |
980 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | |
981 | ||
982 | void default_idle(void); | |
983 | bool set_pm_idle_to_default(void); | |
984 | ||
985 | void stop_this_cpu(void *dummy); | |
986 | ||
1965aae3 | 987 | #endif /* _ASM_X86_PROCESSOR_H */ |