x86: provide 64-bit with a load_sp0 function.
[deliverable/linux.git] / include / asm-x86 / processor.h
CommitLineData
c758ecf6
GOC
1#ifndef __ASM_X86_PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H
3
053de044
GOC
4#include <asm/processor-flags.h>
5
683e0253
GOC
6/* Forward declaration, a strange C thing */
7struct task_struct;
8struct mm_struct;
9
c72dcf83 10#include <asm/page.h>
ca241c75 11#include <asm/percpu.h>
c72dcf83
GOC
12#include <asm/system.h>
13
0ccb8acc
GOC
14/*
15 * Default implementation of macro that returns current
16 * instruction pointer ("program counter").
17 */
18static inline void *current_text_addr(void)
19{
20 void *pc;
21 asm volatile("mov $1f,%0\n1:":"=r" (pc));
22 return pc;
23}
24
c758ecf6
GOC
25static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
26 unsigned int *ecx, unsigned int *edx)
27{
28 /* ecx is often an input as well as an output. */
29 __asm__("cpuid"
30 : "=a" (*eax),
31 "=b" (*ebx),
32 "=c" (*ecx),
33 "=d" (*edx)
34 : "0" (*eax), "2" (*ecx));
35}
36
c72dcf83
GOC
37static inline void load_cr3(pgd_t *pgdir)
38{
39 write_cr3(__pa(pgdir));
40}
c758ecf6 41
ca241c75
GOC
42#ifdef CONFIG_X86_32
43/* This is the TSS defined by the hardware. */
44struct x86_hw_tss {
45 unsigned short back_link, __blh;
46 unsigned long sp0;
47 unsigned short ss0, __ss0h;
48 unsigned long sp1;
49 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
50 unsigned long sp2;
51 unsigned short ss2, __ss2h;
52 unsigned long __cr3;
53 unsigned long ip;
54 unsigned long flags;
55 unsigned long ax, cx, dx, bx;
56 unsigned long sp, bp, si, di;
57 unsigned short es, __esh;
58 unsigned short cs, __csh;
59 unsigned short ss, __ssh;
60 unsigned short ds, __dsh;
61 unsigned short fs, __fsh;
62 unsigned short gs, __gsh;
63 unsigned short ldt, __ldth;
64 unsigned short trace, io_bitmap_base;
65} __attribute__((packed));
66#else
67struct x86_hw_tss {
68 u32 reserved1;
69 u64 sp0;
70 u64 sp1;
71 u64 sp2;
72 u64 reserved2;
73 u64 ist[7];
74 u32 reserved3;
75 u32 reserved4;
76 u16 reserved5;
77 u16 io_bitmap_base;
78} __attribute__((packed)) ____cacheline_aligned;
79#endif
80
81/*
82 * Size of io_bitmap.
83 */
84#define IO_BITMAP_BITS 65536
85#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
86#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
87#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
88#define INVALID_IO_BITMAP_OFFSET 0x8000
89#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
90
91struct tss_struct {
92 struct x86_hw_tss x86_tss;
93
94 /*
95 * The extra 1 is there because the CPU will access an
96 * additional byte beyond the end of the IO permission
97 * bitmap. The extra byte must be all 1 bits, and must
98 * be within the limit.
99 */
100 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
101 /*
102 * Cache the current maximum and the last task that used the bitmap:
103 */
104 unsigned long io_bitmap_max;
105 struct thread_struct *io_bitmap_owner;
106 /*
107 * pads the TSS to be cacheline-aligned (size is 0x100)
108 */
109 unsigned long __cacheline_filler[35];
110 /*
111 * .. and then another 0x100 bytes for emergency kernel stack
112 */
113 unsigned long stack[64];
114} __attribute__((packed));
115
116DECLARE_PER_CPU(struct tss_struct, init_tss);
117
96a388de
TG
118#ifdef CONFIG_X86_32
119# include "processor_32.h"
120#else
121# include "processor_64.h"
122#endif
c758ecf6 123
683e0253
GOC
124extern void print_cpu_info(struct cpuinfo_x86 *);
125extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
126extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
127extern unsigned short num_cache_leaves;
128
1b46cbe0
GOC
129static inline unsigned long native_get_debugreg(int regno)
130{
131 unsigned long val = 0; /* Damn you, gcc! */
132
133 switch (regno) {
134 case 0:
135 asm("mov %%db0, %0" :"=r" (val)); break;
136 case 1:
137 asm("mov %%db1, %0" :"=r" (val)); break;
138 case 2:
139 asm("mov %%db2, %0" :"=r" (val)); break;
140 case 3:
141 asm("mov %%db3, %0" :"=r" (val)); break;
142 case 6:
143 asm("mov %%db6, %0" :"=r" (val)); break;
144 case 7:
145 asm("mov %%db7, %0" :"=r" (val)); break;
146 default:
147 BUG();
148 }
149 return val;
150}
151
152static inline void native_set_debugreg(int regno, unsigned long value)
153{
154 switch (regno) {
155 case 0:
156 asm("mov %0,%%db0" : /* no output */ :"r" (value));
157 break;
158 case 1:
159 asm("mov %0,%%db1" : /* no output */ :"r" (value));
160 break;
161 case 2:
162 asm("mov %0,%%db2" : /* no output */ :"r" (value));
163 break;
164 case 3:
165 asm("mov %0,%%db3" : /* no output */ :"r" (value));
166 break;
167 case 6:
168 asm("mov %0,%%db6" : /* no output */ :"r" (value));
169 break;
170 case 7:
171 asm("mov %0,%%db7" : /* no output */ :"r" (value));
172 break;
173 default:
174 BUG();
175 }
176}
177
62d7d7ed
GOC
178/*
179 * Set IOPL bits in EFLAGS from given mask
180 */
181static inline void native_set_iopl_mask(unsigned mask)
182{
183#ifdef CONFIG_X86_32
184 unsigned int reg;
185 __asm__ __volatile__ ("pushfl;"
186 "popl %0;"
187 "andl %1, %0;"
188 "orl %2, %0;"
189 "pushl %0;"
190 "popfl"
191 : "=&r" (reg)
192 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
193#endif
194}
195
7818a1e0
GOC
196static inline void native_load_sp0(struct tss_struct *tss,
197 struct thread_struct *thread)
198{
199 tss->x86_tss.sp0 = thread->sp0;
200#ifdef CONFIG_X86_32
201 /* Only happens when SEP is enabled, no need to test "SEP"arately */
202 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
203 tss->x86_tss.ss1 = thread->sysenter_cs;
204 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
205 }
206#endif
207}
1b46cbe0 208
7818a1e0
GOC
209#ifdef CONFIG_PARAVIRT
210#include <asm/paravirt.h>
211#else
c758ecf6 212#define __cpuid native_cpuid
1b46cbe0
GOC
213#define paravirt_enabled() 0
214
215/*
216 * These special macros can be used to get or set a debugging register
217 */
218#define get_debugreg(var, register) \
219 (var) = native_get_debugreg(register)
220#define set_debugreg(value, register) \
221 native_set_debugreg(register, value)
222
7818a1e0
GOC
223static inline void load_sp0(struct tss_struct *tss,
224 struct thread_struct *thread)
225{
226 native_load_sp0(tss, thread);
227}
228
62d7d7ed 229#define set_iopl_mask native_set_iopl_mask
1b46cbe0
GOC
230#endif /* CONFIG_PARAVIRT */
231
232/*
233 * Save the cr4 feature set we're using (ie
234 * Pentium 4MB enable and PPro Global page
235 * enable), so that any CPU's that boot up
236 * after us can get the correct flags.
237 */
238extern unsigned long mmu_cr4_features;
239
240static inline void set_in_cr4(unsigned long mask)
241{
242 unsigned cr4;
243 mmu_cr4_features |= mask;
244 cr4 = read_cr4();
245 cr4 |= mask;
246 write_cr4(cr4);
247}
248
249static inline void clear_in_cr4(unsigned long mask)
250{
251 unsigned cr4;
252 mmu_cr4_features &= ~mask;
253 cr4 = read_cr4();
254 cr4 &= ~mask;
255 write_cr4(cr4);
256}
257
683e0253
GOC
258struct microcode_header {
259 unsigned int hdrver;
260 unsigned int rev;
261 unsigned int date;
262 unsigned int sig;
263 unsigned int cksum;
264 unsigned int ldrver;
265 unsigned int pf;
266 unsigned int datasize;
267 unsigned int totalsize;
268 unsigned int reserved[3];
269};
270
271struct microcode {
272 struct microcode_header hdr;
273 unsigned int bits[0];
274};
275
276typedef struct microcode microcode_t;
277typedef struct microcode_header microcode_header_t;
278
279/* microcode format is extended from prescott processors */
280struct extended_signature {
281 unsigned int sig;
282 unsigned int pf;
283 unsigned int cksum;
284};
285
286struct extended_sigtable {
287 unsigned int count;
288 unsigned int cksum;
289 unsigned int reserved[3];
290 struct extended_signature sigs[0];
291};
292
293/*
294 * create a kernel thread without removing it from tasklists
295 */
296extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
297
298/* Free all resources held by a thread. */
299extern void release_thread(struct task_struct *);
300
301/* Prepare to copy thread state - unlazy all lazy status */
302extern void prepare_to_copy(struct task_struct *tsk);
1b46cbe0 303
683e0253 304unsigned long get_wchan(struct task_struct *p);
c758ecf6
GOC
305
306/*
307 * Generic CPUID function
308 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
309 * resulting in stale register contents being returned.
310 */
311static inline void cpuid(unsigned int op,
312 unsigned int *eax, unsigned int *ebx,
313 unsigned int *ecx, unsigned int *edx)
314{
315 *eax = op;
316 *ecx = 0;
317 __cpuid(eax, ebx, ecx, edx);
318}
319
320/* Some CPUID calls want 'count' to be placed in ecx */
321static inline void cpuid_count(unsigned int op, int count,
322 unsigned int *eax, unsigned int *ebx,
323 unsigned int *ecx, unsigned int *edx)
324{
325 *eax = op;
326 *ecx = count;
327 __cpuid(eax, ebx, ecx, edx);
328}
329
330/*
331 * CPUID functions returning a single datum
332 */
333static inline unsigned int cpuid_eax(unsigned int op)
334{
335 unsigned int eax, ebx, ecx, edx;
336
337 cpuid(op, &eax, &ebx, &ecx, &edx);
338 return eax;
339}
340static inline unsigned int cpuid_ebx(unsigned int op)
341{
342 unsigned int eax, ebx, ecx, edx;
343
344 cpuid(op, &eax, &ebx, &ecx, &edx);
345 return ebx;
346}
347static inline unsigned int cpuid_ecx(unsigned int op)
348{
349 unsigned int eax, ebx, ecx, edx;
350
351 cpuid(op, &eax, &ebx, &ecx, &edx);
352 return ecx;
353}
354static inline unsigned int cpuid_edx(unsigned int op)
355{
356 unsigned int eax, ebx, ecx, edx;
357
358 cpuid(op, &eax, &ebx, &ecx, &edx);
359 return edx;
360}
361
683e0253
GOC
362/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
363static inline void rep_nop(void)
364{
365 __asm__ __volatile__("rep;nop": : :"memory");
366}
367
368/* Stop speculative execution */
369static inline void sync_core(void)
370{
371 int tmp;
372 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
373 : "ebx", "ecx", "edx", "memory");
374}
375
376#define cpu_relax() rep_nop()
377
378static inline void __monitor(const void *eax, unsigned long ecx,
379 unsigned long edx)
380{
381 /* "monitor %eax,%ecx,%edx;" */
382 asm volatile(
383 ".byte 0x0f,0x01,0xc8;"
384 : :"a" (eax), "c" (ecx), "d"(edx));
385}
386
387static inline void __mwait(unsigned long eax, unsigned long ecx)
388{
389 /* "mwait %eax,%ecx;" */
390 asm volatile(
391 ".byte 0x0f,0x01,0xc9;"
392 : :"a" (eax), "c" (ecx));
393}
394
395static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
396{
397 /* "mwait %eax,%ecx;" */
398 asm volatile(
399 "sti; .byte 0x0f,0x01,0xc9;"
400 : :"a" (eax), "c" (ecx));
401}
402
403extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
404
405extern int force_mwait;
406
407extern void select_idle_routine(const struct cpuinfo_x86 *c);
408
409extern unsigned long boot_option_idle_override;
410
411/* Boot loader type from the setup header */
412extern int bootloader_type;
413#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
414
415#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
416#define ARCH_HAS_PREFETCHW
417#define ARCH_HAS_SPINLOCK_PREFETCH
418
419#define spin_lock_prefetch(x) prefetchw(x)
420/* This decides where the kernel will search for a free chunk of vm
421 * space during mmap's.
422 */
423#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
424
425#define KSTK_EIP(task) (task_pt_regs(task)->ip)
426
c758ecf6 427#endif
This page took 0.118956 seconds and 5 git commands to generate.