2 * Copyright (C) 1994 Linus Torvalds
5 #ifndef __ASM_I386_PROCESSOR_H
6 #define __ASM_I386_PROCESSOR_H
9 #include <asm/math_emu.h>
10 #include <asm/segment.h>
12 #include <asm/types.h>
13 #include <asm/sigcontext.h>
14 #include <asm/cpufeature.h>
16 #include <asm/system.h>
17 #include <linux/threads.h>
18 #include <linux/init.h>
19 #include <asm/desc_defs.h>
23 * the following now lives in the per cpu area:
24 * extern int cpu_llc_id[NR_CPUS];
26 DECLARE_PER_CPU(u8
, cpu_llc_id
);
29 * User space process size: 3GB (default).
31 #define TASK_SIZE (PAGE_OFFSET)
34 struct i387_fsave_struct
{
42 long st_space
[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
43 long status
; /* software status information */
46 struct i387_fxsave_struct
{
57 long st_space
[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
58 long xmm_space
[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
60 } __attribute__ ((aligned (16)));
62 struct i387_soft_struct
{
70 long st_space
[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
71 unsigned char ftop
, changed
, lookahead
, no_update
, rm
, alimit
;
73 unsigned long entry_eip
;
77 struct i387_fsave_struct fsave
;
78 struct i387_fxsave_struct fxsave
;
79 struct i387_soft_struct soft
;
82 #define INIT_THREAD { \
83 .sp0 = sizeof(init_stack) + (long)&init_stack, \
85 .sysenter_cs = __KERNEL_CS, \
86 .io_bitmap_ptr = NULL, \
87 .fs = __KERNEL_PERCPU, \
91 * Note that the .io_bitmap member must be extra-big. This is because
92 * the CPU will access an additional byte beyond the end of the IO
93 * permission bitmap. The extra byte must be all 1 bits, and must
94 * be within the limit.
98 .sp0 = sizeof(init_stack) + (long)&init_stack, \
100 .ss1 = __KERNEL_CS, \
101 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
103 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
106 #define start_thread(regs, new_eip, new_esp) do { \
107 __asm__("movl %0,%%gs": :"r" (0)); \
110 regs->ds = __USER_DS; \
111 regs->es = __USER_DS; \
112 regs->ss = __USER_DS; \
113 regs->cs = __USER_CS; \
114 regs->ip = new_eip; \
115 regs->sp = new_esp; \
119 extern unsigned long thread_saved_pc(struct task_struct
*tsk
);
121 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
122 #define KSTK_TOP(info) \
124 unsigned long *__ptr = (unsigned long *)(info); \
125 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
129 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
130 * This is necessary to guarantee that the entire "struct pt_regs"
131 * is accessable even if the CPU haven't stored the SS/ESP registers
132 * on the stack (interrupt gate does not save these registers
133 * when switching to the same priv ring).
134 * Therefore beware: accessing the ss/esp fields of the
135 * "struct pt_regs" is possible, but they may contain the
136 * completely wrong values.
138 #define task_pt_regs(task) \
140 struct pt_regs *__regs__; \
141 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
145 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
147 /* generic versions from gas */
148 #define GENERIC_NOP1 ".byte 0x90\n"
149 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
150 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
151 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
152 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
153 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
154 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
155 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
158 #define K8_NOP1 GENERIC_NOP1
159 #define K8_NOP2 ".byte 0x66,0x90\n"
160 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
161 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
162 #define K8_NOP5 K8_NOP3 K8_NOP2
163 #define K8_NOP6 K8_NOP3 K8_NOP3
164 #define K8_NOP7 K8_NOP4 K8_NOP3
165 #define K8_NOP8 K8_NOP4 K8_NOP4
168 /* uses eax dependencies (arbitary choice) */
169 #define K7_NOP1 GENERIC_NOP1
170 #define K7_NOP2 ".byte 0x8b,0xc0\n"
171 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
172 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
173 #define K7_NOP5 K7_NOP4 ASM_NOP1
174 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
175 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
176 #define K7_NOP8 K7_NOP7 ASM_NOP1
179 /* uses eax dependencies (Intel-recommended choice) */
180 #define P6_NOP1 GENERIC_NOP1
181 #define P6_NOP2 ".byte 0x66,0x90\n"
182 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
183 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
184 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
185 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
186 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
187 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
190 #define ASM_NOP1 K8_NOP1
191 #define ASM_NOP2 K8_NOP2
192 #define ASM_NOP3 K8_NOP3
193 #define ASM_NOP4 K8_NOP4
194 #define ASM_NOP5 K8_NOP5
195 #define ASM_NOP6 K8_NOP6
196 #define ASM_NOP7 K8_NOP7
197 #define ASM_NOP8 K8_NOP8
198 #elif defined(CONFIG_MK7)
199 #define ASM_NOP1 K7_NOP1
200 #define ASM_NOP2 K7_NOP2
201 #define ASM_NOP3 K7_NOP3
202 #define ASM_NOP4 K7_NOP4
203 #define ASM_NOP5 K7_NOP5
204 #define ASM_NOP6 K7_NOP6
205 #define ASM_NOP7 K7_NOP7
206 #define ASM_NOP8 K7_NOP8
207 #elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
208 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
209 defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
210 #define ASM_NOP1 P6_NOP1
211 #define ASM_NOP2 P6_NOP2
212 #define ASM_NOP3 P6_NOP3
213 #define ASM_NOP4 P6_NOP4
214 #define ASM_NOP5 P6_NOP5
215 #define ASM_NOP6 P6_NOP6
216 #define ASM_NOP7 P6_NOP7
217 #define ASM_NOP8 P6_NOP8
219 #define ASM_NOP1 GENERIC_NOP1
220 #define ASM_NOP2 GENERIC_NOP2
221 #define ASM_NOP3 GENERIC_NOP3
222 #define ASM_NOP4 GENERIC_NOP4
223 #define ASM_NOP5 GENERIC_NOP5
224 #define ASM_NOP6 GENERIC_NOP6
225 #define ASM_NOP7 GENERIC_NOP7
226 #define ASM_NOP8 GENERIC_NOP8
229 #define ASM_NOP_MAX 8
231 /* Prefetch instructions for Pentium III and AMD Athlon */
232 /* It's not worth to care about 3dnow! prefetches for the K6
233 because they are microcoded there and very slow.
234 However we don't do prefetches for pre XP Athlons currently
235 That should be fixed. */
236 static inline void prefetch(const void *x
)
238 alternative_input(ASM_NOP4
,
244 #define ARCH_HAS_PREFETCH
246 /* 3dnow! prefetch to get an exclusive cache line. Useful for
247 spinlocks to avoid one state transition in the cache coherency protocol. */
248 static inline void prefetchw(const void *x
)
250 alternative_input(ASM_NOP4
,
256 #endif /* __ASM_I386_PROCESSOR_H */