x86: unify prefetch operations
[deliverable/linux.git] / include / asm-x86 / processor_64.h
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 */
4
5 #ifndef __ASM_X86_64_PROCESSOR_H
6 #define __ASM_X86_64_PROCESSOR_H
7
8 #include <asm/segment.h>
9 #include <asm/page.h>
10 #include <asm/types.h>
11 #include <asm/sigcontext.h>
12 #include <asm/cpufeature.h>
13 #include <linux/threads.h>
14 #include <asm/msr.h>
15 #include <asm/current.h>
16 #include <asm/system.h>
17 #include <linux/personality.h>
18 #include <asm/desc_defs.h>
19
20 /*
21 * User space process size. 47bits minus one guard page.
22 */
23 #define TASK_SIZE64 (0x800000000000UL - 4096)
24
25 /* This decides where the kernel will search for a free chunk of vm
26 * space during mmap's.
27 */
28 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
29
30 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
31 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
32
33
34 struct i387_fxsave_struct {
35 u16 cwd;
36 u16 swd;
37 u16 twd;
38 u16 fop;
39 u64 rip;
40 u64 rdp;
41 u32 mxcsr;
42 u32 mxcsr_mask;
43 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
44 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
45 u32 padding[24];
46 } __attribute__ ((aligned (16)));
47
48 union i387_union {
49 struct i387_fxsave_struct fxsave;
50 };
51
52 DECLARE_PER_CPU(struct orig_ist, orig_ist);
53
54 #define INIT_THREAD { \
55 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
56 }
57
58 #define INIT_TSS { \
59 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
60 }
61
62 #define start_thread(regs,new_rip,new_rsp) do { \
63 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
64 load_gs_index(0); \
65 (regs)->ip = (new_rip); \
66 (regs)->sp = (new_rsp); \
67 write_pda(oldrsp, (new_rsp)); \
68 (regs)->cs = __USER_CS; \
69 (regs)->ss = __USER_DS; \
70 (regs)->flags = 0x200; \
71 set_fs(USER_DS); \
72 } while(0)
73
74 /*
75 * Return saved PC of a blocked thread.
76 * What is this good for? it will be always the scheduler or ret_from_fork.
77 */
78 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
79
80 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
81 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
82
83
84 #if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2)
85 #define ASM_NOP1 P6_NOP1
86 #define ASM_NOP2 P6_NOP2
87 #define ASM_NOP3 P6_NOP3
88 #define ASM_NOP4 P6_NOP4
89 #define ASM_NOP5 P6_NOP5
90 #define ASM_NOP6 P6_NOP6
91 #define ASM_NOP7 P6_NOP7
92 #define ASM_NOP8 P6_NOP8
93 #else
94 #define ASM_NOP1 K8_NOP1
95 #define ASM_NOP2 K8_NOP2
96 #define ASM_NOP3 K8_NOP3
97 #define ASM_NOP4 K8_NOP4
98 #define ASM_NOP5 K8_NOP5
99 #define ASM_NOP6 K8_NOP6
100 #define ASM_NOP7 K8_NOP7
101 #define ASM_NOP8 K8_NOP8
102 #endif
103
104 /* Opteron nops */
105 #define K8_NOP1 ".byte 0x90\n"
106 #define K8_NOP2 ".byte 0x66,0x90\n"
107 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
108 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
109 #define K8_NOP5 K8_NOP3 K8_NOP2
110 #define K8_NOP6 K8_NOP3 K8_NOP3
111 #define K8_NOP7 K8_NOP4 K8_NOP3
112 #define K8_NOP8 K8_NOP4 K8_NOP4
113
114 /* P6 nops */
115 /* uses eax dependencies (Intel-recommended choice) */
116 #define P6_NOP1 ".byte 0x90\n"
117 #define P6_NOP2 ".byte 0x66,0x90\n"
118 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
119 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
120 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
121 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
122 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
123 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
124
125 #define ASM_NOP_MAX 8
126
127 #endif /* __ASM_X86_64_PROCESSOR_H */
This page took 0.041495 seconds and 5 git commands to generate.