84a4c5e47d575e889ebaa3d5b704ce28ab72742d
[deliverable/linux.git] / include / asm-x86 / processor_32.h
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 */
4
5 #ifndef __ASM_I386_PROCESSOR_H
6 #define __ASM_I386_PROCESSOR_H
7
8 #include <asm/vm86.h>
9 #include <asm/math_emu.h>
10 #include <asm/segment.h>
11 #include <asm/page.h>
12 #include <asm/types.h>
13 #include <asm/sigcontext.h>
14 #include <asm/cpufeature.h>
15 #include <asm/msr.h>
16 #include <asm/system.h>
17 #include <linux/threads.h>
18 #include <linux/init.h>
19 #include <asm/desc_defs.h>
20
21
22 /*
23 * the following now lives in the per cpu area:
24 * extern int cpu_llc_id[NR_CPUS];
25 */
26 DECLARE_PER_CPU(u8, cpu_llc_id);
27
28 /*
29 * User space process size: 3GB (default).
30 */
31 #define TASK_SIZE (PAGE_OFFSET)
32
33
34 struct i387_fsave_struct {
35 long cwd;
36 long swd;
37 long twd;
38 long fip;
39 long fcs;
40 long foo;
41 long fos;
42 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
43 long status; /* software status information */
44 };
45
46 struct i387_fxsave_struct {
47 unsigned short cwd;
48 unsigned short swd;
49 unsigned short twd;
50 unsigned short fop;
51 long fip;
52 long fcs;
53 long foo;
54 long fos;
55 long mxcsr;
56 long mxcsr_mask;
57 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
58 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
59 long padding[56];
60 } __attribute__ ((aligned (16)));
61
62 struct i387_soft_struct {
63 long cwd;
64 long swd;
65 long twd;
66 long fip;
67 long fcs;
68 long foo;
69 long fos;
70 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
71 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
72 struct info *info;
73 unsigned long entry_eip;
74 };
75
76 union i387_union {
77 struct i387_fsave_struct fsave;
78 struct i387_fxsave_struct fxsave;
79 struct i387_soft_struct soft;
80 };
81
82 #define INIT_THREAD { \
83 .sp0 = sizeof(init_stack) + (long)&init_stack, \
84 .vm86_info = NULL, \
85 .sysenter_cs = __KERNEL_CS, \
86 .io_bitmap_ptr = NULL, \
87 .fs = __KERNEL_PERCPU, \
88 }
89
90 /*
91 * Note that the .io_bitmap member must be extra-big. This is because
92 * the CPU will access an additional byte beyond the end of the IO
93 * permission bitmap. The extra byte must be all 1 bits, and must
94 * be within the limit.
95 */
96 #define INIT_TSS { \
97 .x86_tss = { \
98 .sp0 = sizeof(init_stack) + (long)&init_stack, \
99 .ss0 = __KERNEL_DS, \
100 .ss1 = __KERNEL_CS, \
101 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
102 }, \
103 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
104 }
105
106 #define start_thread(regs, new_eip, new_esp) do { \
107 __asm__("movl %0,%%gs": :"r" (0)); \
108 regs->fs = 0; \
109 set_fs(USER_DS); \
110 regs->ds = __USER_DS; \
111 regs->es = __USER_DS; \
112 regs->ss = __USER_DS; \
113 regs->cs = __USER_CS; \
114 regs->ip = new_eip; \
115 regs->sp = new_esp; \
116 } while (0)
117
118
119 extern unsigned long thread_saved_pc(struct task_struct *tsk);
120
121 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
122 #define KSTK_TOP(info) \
123 ({ \
124 unsigned long *__ptr = (unsigned long *)(info); \
125 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
126 })
127
128 /*
129 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
130 * This is necessary to guarantee that the entire "struct pt_regs"
131 * is accessable even if the CPU haven't stored the SS/ESP registers
132 * on the stack (interrupt gate does not save these registers
133 * when switching to the same priv ring).
134 * Therefore beware: accessing the ss/esp fields of the
135 * "struct pt_regs" is possible, but they may contain the
136 * completely wrong values.
137 */
138 #define task_pt_regs(task) \
139 ({ \
140 struct pt_regs *__regs__; \
141 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
142 __regs__ - 1; \
143 })
144
145 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
146
147 /* generic versions from gas */
148 #define GENERIC_NOP1 ".byte 0x90\n"
149 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
150 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
151 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
152 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
153 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
154 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
155 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
156
157 /* Opteron nops */
158 #define K8_NOP1 GENERIC_NOP1
159 #define K8_NOP2 ".byte 0x66,0x90\n"
160 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
161 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
162 #define K8_NOP5 K8_NOP3 K8_NOP2
163 #define K8_NOP6 K8_NOP3 K8_NOP3
164 #define K8_NOP7 K8_NOP4 K8_NOP3
165 #define K8_NOP8 K8_NOP4 K8_NOP4
166
167 /* K7 nops */
168 /* uses eax dependencies (arbitary choice) */
169 #define K7_NOP1 GENERIC_NOP1
170 #define K7_NOP2 ".byte 0x8b,0xc0\n"
171 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
172 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
173 #define K7_NOP5 K7_NOP4 ASM_NOP1
174 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
175 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
176 #define K7_NOP8 K7_NOP7 ASM_NOP1
177
178 /* P6 nops */
179 /* uses eax dependencies (Intel-recommended choice) */
180 #define P6_NOP1 GENERIC_NOP1
181 #define P6_NOP2 ".byte 0x66,0x90\n"
182 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
183 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
184 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
185 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
186 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
187 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
188
189 #ifdef CONFIG_MK8
190 #define ASM_NOP1 K8_NOP1
191 #define ASM_NOP2 K8_NOP2
192 #define ASM_NOP3 K8_NOP3
193 #define ASM_NOP4 K8_NOP4
194 #define ASM_NOP5 K8_NOP5
195 #define ASM_NOP6 K8_NOP6
196 #define ASM_NOP7 K8_NOP7
197 #define ASM_NOP8 K8_NOP8
198 #elif defined(CONFIG_MK7)
199 #define ASM_NOP1 K7_NOP1
200 #define ASM_NOP2 K7_NOP2
201 #define ASM_NOP3 K7_NOP3
202 #define ASM_NOP4 K7_NOP4
203 #define ASM_NOP5 K7_NOP5
204 #define ASM_NOP6 K7_NOP6
205 #define ASM_NOP7 K7_NOP7
206 #define ASM_NOP8 K7_NOP8
207 #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
208 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
209 defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
210 #define ASM_NOP1 P6_NOP1
211 #define ASM_NOP2 P6_NOP2
212 #define ASM_NOP3 P6_NOP3
213 #define ASM_NOP4 P6_NOP4
214 #define ASM_NOP5 P6_NOP5
215 #define ASM_NOP6 P6_NOP6
216 #define ASM_NOP7 P6_NOP7
217 #define ASM_NOP8 P6_NOP8
218 #else
219 #define ASM_NOP1 GENERIC_NOP1
220 #define ASM_NOP2 GENERIC_NOP2
221 #define ASM_NOP3 GENERIC_NOP3
222 #define ASM_NOP4 GENERIC_NOP4
223 #define ASM_NOP5 GENERIC_NOP5
224 #define ASM_NOP6 GENERIC_NOP6
225 #define ASM_NOP7 GENERIC_NOP7
226 #define ASM_NOP8 GENERIC_NOP8
227 #endif
228
229 #define ASM_NOP_MAX 8
230
231 /* Prefetch instructions for Pentium III and AMD Athlon */
232 /* It's not worth to care about 3dnow! prefetches for the K6
233 because they are microcoded there and very slow.
234 However we don't do prefetches for pre XP Athlons currently
235 That should be fixed. */
236 static inline void prefetch(const void *x)
237 {
238 alternative_input(ASM_NOP4,
239 "prefetchnta (%1)",
240 X86_FEATURE_XMM,
241 "r" (x));
242 }
243
244 #define ARCH_HAS_PREFETCH
245
246 /* 3dnow! prefetch to get an exclusive cache line. Useful for
247 spinlocks to avoid one state transition in the cache coherency protocol. */
248 static inline void prefetchw(const void *x)
249 {
250 alternative_input(ASM_NOP4,
251 "prefetchw (%1)",
252 X86_FEATURE_3DNOW,
253 "r" (x));
254 }
255
256 #endif /* __ASM_I386_PROCESSOR_H */
This page took 0.045753 seconds and 4 git commands to generate.