1 #ifndef __ASM_X86_PROCESSOR_H
2 #define __ASM_X86_PROCESSOR_H
4 #include <asm/processor-flags.h>
6 /* Forward declaration, a strange C thing */
11 #include <asm/percpu.h>
12 #include <asm/system.h>
15 * Default implementation of macro that returns current
16 * instruction pointer ("program counter").
18 static inline void *current_text_addr(void)
21 asm volatile("mov $1f,%0\n1:":"=r" (pc
));
25 #ifdef CONFIG_X86_VSMP
26 #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
27 #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
29 #define ARCH_MIN_TASKALIGN 16
30 #define ARCH_MIN_MMSTRUCT_ALIGN 0
33 static inline void native_cpuid(unsigned int *eax
, unsigned int *ebx
,
34 unsigned int *ecx
, unsigned int *edx
)
36 /* ecx is often an input as well as an output. */
42 : "0" (*eax
), "2" (*ecx
));
45 static inline void load_cr3(pgd_t
*pgdir
)
47 write_cr3(__pa(pgdir
));
51 /* This is the TSS defined by the hardware. */
53 unsigned short back_link
, __blh
;
55 unsigned short ss0
, __ss0h
;
57 unsigned short ss1
, __ss1h
; /* ss1 caches MSR_IA32_SYSENTER_CS */
59 unsigned short ss2
, __ss2h
;
63 unsigned long ax
, cx
, dx
, bx
;
64 unsigned long sp
, bp
, si
, di
;
65 unsigned short es
, __esh
;
66 unsigned short cs
, __csh
;
67 unsigned short ss
, __ssh
;
68 unsigned short ds
, __dsh
;
69 unsigned short fs
, __fsh
;
70 unsigned short gs
, __gsh
;
71 unsigned short ldt
, __ldth
;
72 unsigned short trace
, io_bitmap_base
;
73 } __attribute__((packed
));
86 } __attribute__((packed
)) ____cacheline_aligned
;
92 #define IO_BITMAP_BITS 65536
93 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
94 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
95 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
96 #define INVALID_IO_BITMAP_OFFSET 0x8000
97 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
100 struct x86_hw_tss x86_tss
;
103 * The extra 1 is there because the CPU will access an
104 * additional byte beyond the end of the IO permission
105 * bitmap. The extra byte must be all 1 bits, and must
106 * be within the limit.
108 unsigned long io_bitmap
[IO_BITMAP_LONGS
+ 1];
110 * Cache the current maximum and the last task that used the bitmap:
112 unsigned long io_bitmap_max
;
113 struct thread_struct
*io_bitmap_owner
;
115 * pads the TSS to be cacheline-aligned (size is 0x100)
117 unsigned long __cacheline_filler
[35];
119 * .. and then another 0x100 bytes for emergency kernel stack
121 unsigned long stack
[64];
122 } __attribute__((packed
));
124 DECLARE_PER_CPU(struct tss_struct
, init_tss
);
127 # include "processor_32.h"
129 # include "processor_64.h"
132 extern void print_cpu_info(struct cpuinfo_x86
*);
133 extern void init_scattered_cpuid_features(struct cpuinfo_x86
*c
);
134 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86
*c
);
135 extern unsigned short num_cache_leaves
;
137 struct thread_struct
{
138 /* cached TLS descriptors. */
139 struct desc_struct tls_array
[GDT_ENTRY_TLS_ENTRIES
];
143 unsigned long sysenter_cs
;
145 unsigned long usersp
; /* Copy from PDA */
146 unsigned short es
, ds
, fsindex
, gsindex
;
151 /* Hardware debugging registers */
152 unsigned long debugreg0
;
153 unsigned long debugreg1
;
154 unsigned long debugreg2
;
155 unsigned long debugreg3
;
156 unsigned long debugreg6
;
157 unsigned long debugreg7
;
159 unsigned long cr2
, trap_no
, error_code
;
160 /* floating point info */
161 union i387_union i387
__attribute__((aligned(16)));;
163 /* virtual 86 mode info */
164 struct vm86_struct __user
*vm86_info
;
165 unsigned long screen_bitmap
;
166 unsigned long v86flags
, v86mask
, saved_sp0
;
167 unsigned int saved_fs
, saved_gs
;
170 unsigned long *io_bitmap_ptr
;
172 /* max allowed port in the bitmap, in bytes: */
173 unsigned io_bitmap_max
;
174 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
175 unsigned long debugctlmsr
;
176 /* Debug Store - if not 0 points to a DS Save Area configuration;
177 * goes into MSR_IA32_DS_AREA */
178 unsigned long ds_area_msr
;
181 static inline unsigned long native_get_debugreg(int regno
)
183 unsigned long val
= 0; /* Damn you, gcc! */
187 asm("mov %%db0, %0" :"=r" (val
)); break;
189 asm("mov %%db1, %0" :"=r" (val
)); break;
191 asm("mov %%db2, %0" :"=r" (val
)); break;
193 asm("mov %%db3, %0" :"=r" (val
)); break;
195 asm("mov %%db6, %0" :"=r" (val
)); break;
197 asm("mov %%db7, %0" :"=r" (val
)); break;
204 static inline void native_set_debugreg(int regno
, unsigned long value
)
208 asm("mov %0,%%db0" : /* no output */ :"r" (value
));
211 asm("mov %0,%%db1" : /* no output */ :"r" (value
));
214 asm("mov %0,%%db2" : /* no output */ :"r" (value
));
217 asm("mov %0,%%db3" : /* no output */ :"r" (value
));
220 asm("mov %0,%%db6" : /* no output */ :"r" (value
));
223 asm("mov %0,%%db7" : /* no output */ :"r" (value
));
231 * Set IOPL bits in EFLAGS from given mask
233 static inline void native_set_iopl_mask(unsigned mask
)
237 __asm__
__volatile__ ("pushfl;"
244 : "i" (~X86_EFLAGS_IOPL
), "r" (mask
));
248 static inline void native_load_sp0(struct tss_struct
*tss
,
249 struct thread_struct
*thread
)
251 tss
->x86_tss
.sp0
= thread
->sp0
;
253 /* Only happens when SEP is enabled, no need to test "SEP"arately */
254 if (unlikely(tss
->x86_tss
.ss1
!= thread
->sysenter_cs
)) {
255 tss
->x86_tss
.ss1
= thread
->sysenter_cs
;
256 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
261 #ifdef CONFIG_PARAVIRT
262 #include <asm/paravirt.h>
264 #define __cpuid native_cpuid
265 #define paravirt_enabled() 0
268 * These special macros can be used to get or set a debugging register
270 #define get_debugreg(var, register) \
271 (var) = native_get_debugreg(register)
272 #define set_debugreg(value, register) \
273 native_set_debugreg(register, value)
275 static inline void load_sp0(struct tss_struct
*tss
,
276 struct thread_struct
*thread
)
278 native_load_sp0(tss
, thread
);
281 #define set_iopl_mask native_set_iopl_mask
282 #endif /* CONFIG_PARAVIRT */
285 * Save the cr4 feature set we're using (ie
286 * Pentium 4MB enable and PPro Global page
287 * enable), so that any CPU's that boot up
288 * after us can get the correct flags.
290 extern unsigned long mmu_cr4_features
;
292 static inline void set_in_cr4(unsigned long mask
)
295 mmu_cr4_features
|= mask
;
301 static inline void clear_in_cr4(unsigned long mask
)
304 mmu_cr4_features
&= ~mask
;
310 struct microcode_header
{
318 unsigned int datasize
;
319 unsigned int totalsize
;
320 unsigned int reserved
[3];
324 struct microcode_header hdr
;
325 unsigned int bits
[0];
328 typedef struct microcode microcode_t
;
329 typedef struct microcode_header microcode_header_t
;
331 /* microcode format is extended from prescott processors */
332 struct extended_signature
{
338 struct extended_sigtable
{
341 unsigned int reserved
[3];
342 struct extended_signature sigs
[0];
346 * create a kernel thread without removing it from tasklists
348 extern int kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
);
350 /* Free all resources held by a thread. */
351 extern void release_thread(struct task_struct
*);
353 /* Prepare to copy thread state - unlazy all lazy status */
354 extern void prepare_to_copy(struct task_struct
*tsk
);
356 unsigned long get_wchan(struct task_struct
*p
);
359 * Generic CPUID function
360 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
361 * resulting in stale register contents being returned.
363 static inline void cpuid(unsigned int op
,
364 unsigned int *eax
, unsigned int *ebx
,
365 unsigned int *ecx
, unsigned int *edx
)
369 __cpuid(eax
, ebx
, ecx
, edx
);
372 /* Some CPUID calls want 'count' to be placed in ecx */
373 static inline void cpuid_count(unsigned int op
, int count
,
374 unsigned int *eax
, unsigned int *ebx
,
375 unsigned int *ecx
, unsigned int *edx
)
379 __cpuid(eax
, ebx
, ecx
, edx
);
383 * CPUID functions returning a single datum
385 static inline unsigned int cpuid_eax(unsigned int op
)
387 unsigned int eax
, ebx
, ecx
, edx
;
389 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
392 static inline unsigned int cpuid_ebx(unsigned int op
)
394 unsigned int eax
, ebx
, ecx
, edx
;
396 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
399 static inline unsigned int cpuid_ecx(unsigned int op
)
401 unsigned int eax
, ebx
, ecx
, edx
;
403 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
406 static inline unsigned int cpuid_edx(unsigned int op
)
408 unsigned int eax
, ebx
, ecx
, edx
;
410 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
414 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
415 static inline void rep_nop(void)
417 __asm__
__volatile__("rep;nop": : :"memory");
420 /* Stop speculative execution */
421 static inline void sync_core(void)
424 asm volatile("cpuid" : "=a" (tmp
) : "0" (1)
425 : "ebx", "ecx", "edx", "memory");
428 #define cpu_relax() rep_nop()
430 static inline void __monitor(const void *eax
, unsigned long ecx
,
433 /* "monitor %eax,%ecx,%edx;" */
435 ".byte 0x0f,0x01,0xc8;"
436 : :"a" (eax
), "c" (ecx
), "d"(edx
));
439 static inline void __mwait(unsigned long eax
, unsigned long ecx
)
441 /* "mwait %eax,%ecx;" */
443 ".byte 0x0f,0x01,0xc9;"
444 : :"a" (eax
), "c" (ecx
));
447 static inline void __sti_mwait(unsigned long eax
, unsigned long ecx
)
449 /* "mwait %eax,%ecx;" */
451 "sti; .byte 0x0f,0x01,0xc9;"
452 : :"a" (eax
), "c" (ecx
));
455 extern void mwait_idle_with_hints(unsigned long eax
, unsigned long ecx
);
457 extern int force_mwait
;
459 extern void select_idle_routine(const struct cpuinfo_x86
*c
);
461 extern unsigned long boot_option_idle_override
;
463 /* Boot loader type from the setup header */
464 extern int bootloader_type
;
465 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
467 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
468 #define ARCH_HAS_PREFETCHW
469 #define ARCH_HAS_SPINLOCK_PREFETCH
471 #define spin_lock_prefetch(x) prefetchw(x)
472 /* This decides where the kernel will search for a free chunk of vm
473 * space during mmap's.
475 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
477 #define KSTK_EIP(task) (task_pt_regs(task)->ip)