Merge branch 'regmap-linus' into regmap-next
[deliverable/linux.git] / arch / x86 / include / asm / system.h
1 #ifndef _ASM_X86_SYSTEM_H
2 #define _ASM_X86_SYSTEM_H
3
4 #include <asm/asm.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
8 #include <asm/nops.h>
9
10 #include <linux/kernel.h>
11 #include <linux/irqflags.h>
12
13 /* entries in ARCH_DLINFO: */
14 #if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
15 # define AT_VECTOR_SIZE_ARCH 2
16 #else /* else it's non-compat x86-64 */
17 # define AT_VECTOR_SIZE_ARCH 1
18 #endif
19
20 struct task_struct; /* one of the stranger aspects of C forward declarations */
21 struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next);
23 struct tss_struct;
24 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
26 extern void show_regs_common(void);
27
28 #ifdef CONFIG_X86_32
29
30 #ifdef CONFIG_CC_STACKPROTECTOR
31 #define __switch_canary \
32 "movl %P[task_canary](%[next]), %%ebx\n\t" \
33 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
34 #define __switch_canary_oparam \
35 , [stack_canary] "=m" (stack_canary.canary)
36 #define __switch_canary_iparam \
37 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
38 #else /* CC_STACKPROTECTOR */
39 #define __switch_canary
40 #define __switch_canary_oparam
41 #define __switch_canary_iparam
42 #endif /* CC_STACKPROTECTOR */
43
44 /*
45 * Saving eflags is important. It switches not only IOPL between tasks,
46 * it also protects other tasks from NT leaking through sysenter etc.
47 */
48 #define switch_to(prev, next, last) \
49 do { \
50 /* \
51 * Context-switching clobbers all registers, so we clobber \
52 * them explicitly, via unused output variables. \
53 * (EAX and EBP is not listed because EBP is saved/restored \
54 * explicitly for wchan access and EAX is the return value of \
55 * __switch_to()) \
56 */ \
57 unsigned long ebx, ecx, edx, esi, edi; \
58 \
59 asm volatile("pushfl\n\t" /* save flags */ \
60 "pushl %%ebp\n\t" /* save EBP */ \
61 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
62 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
63 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
64 "pushl %[next_ip]\n\t" /* restore EIP */ \
65 __switch_canary \
66 "jmp __switch_to\n" /* regparm call */ \
67 "1:\t" \
68 "popl %%ebp\n\t" /* restore EBP */ \
69 "popfl\n" /* restore flags */ \
70 \
71 /* output parameters */ \
72 : [prev_sp] "=m" (prev->thread.sp), \
73 [prev_ip] "=m" (prev->thread.ip), \
74 "=a" (last), \
75 \
76 /* clobbered output registers: */ \
77 "=b" (ebx), "=c" (ecx), "=d" (edx), \
78 "=S" (esi), "=D" (edi) \
79 \
80 __switch_canary_oparam \
81 \
82 /* input parameters: */ \
83 : [next_sp] "m" (next->thread.sp), \
84 [next_ip] "m" (next->thread.ip), \
85 \
86 /* regparm parameters for __switch_to(): */ \
87 [prev] "a" (prev), \
88 [next] "d" (next) \
89 \
90 __switch_canary_iparam \
91 \
92 : /* reloaded segment registers */ \
93 "memory"); \
94 } while (0)
95
96 /*
97 * disable hlt during certain critical i/o operations
98 */
99 #define HAVE_DISABLE_HLT
100 #else
101
102 /* frame pointer must be last for get_wchan */
103 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
104 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
105
106 #define __EXTRA_CLOBBER \
107 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
108 "r12", "r13", "r14", "r15"
109
110 #ifdef CONFIG_CC_STACKPROTECTOR
111 #define __switch_canary \
112 "movq %P[task_canary](%%rsi),%%r8\n\t" \
113 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
114 #define __switch_canary_oparam \
115 , [gs_canary] "=m" (irq_stack_union.stack_canary)
116 #define __switch_canary_iparam \
117 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
118 #else /* CC_STACKPROTECTOR */
119 #define __switch_canary
120 #define __switch_canary_oparam
121 #define __switch_canary_iparam
122 #endif /* CC_STACKPROTECTOR */
123
124 /* Save restore flags to clear handle leaking NT */
125 #define switch_to(prev, next, last) \
126 asm volatile(SAVE_CONTEXT \
127 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
128 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
129 "call __switch_to\n\t" \
130 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
131 __switch_canary \
132 "movq %P[thread_info](%%rsi),%%r8\n\t" \
133 "movq %%rax,%%rdi\n\t" \
134 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
135 "jnz ret_from_fork\n\t" \
136 RESTORE_CONTEXT \
137 : "=a" (last) \
138 __switch_canary_oparam \
139 : [next] "S" (next), [prev] "D" (prev), \
140 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
141 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
142 [_tif_fork] "i" (_TIF_FORK), \
143 [thread_info] "i" (offsetof(struct task_struct, stack)), \
144 [current_task] "m" (current_task) \
145 __switch_canary_iparam \
146 : "memory", "cc" __EXTRA_CLOBBER)
147 #endif
148
149 #ifdef __KERNEL__
150
151 extern void native_load_gs_index(unsigned);
152
153 /*
154 * Load a segment. Fall back on loading the zero
155 * segment if something goes wrong..
156 */
157 #define loadsegment(seg, value) \
158 do { \
159 unsigned short __val = (value); \
160 \
161 asm volatile(" \n" \
162 "1: movl %k0,%%" #seg " \n" \
163 \
164 ".section .fixup,\"ax\" \n" \
165 "2: xorl %k0,%k0 \n" \
166 " jmp 1b \n" \
167 ".previous \n" \
168 \
169 _ASM_EXTABLE(1b, 2b) \
170 \
171 : "+r" (__val) : : "memory"); \
172 } while (0)
173
174 /*
175 * Save a segment register away
176 */
177 #define savesegment(seg, value) \
178 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
179
180 /*
181 * x86_32 user gs accessors.
182 */
183 #ifdef CONFIG_X86_32
184 #ifdef CONFIG_X86_32_LAZY_GS
185 #define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
186 #define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
187 #define task_user_gs(tsk) ((tsk)->thread.gs)
188 #define lazy_save_gs(v) savesegment(gs, (v))
189 #define lazy_load_gs(v) loadsegment(gs, (v))
190 #else /* X86_32_LAZY_GS */
191 #define get_user_gs(regs) (u16)((regs)->gs)
192 #define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
193 #define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
194 #define lazy_save_gs(v) do { } while (0)
195 #define lazy_load_gs(v) do { } while (0)
196 #endif /* X86_32_LAZY_GS */
197 #endif /* X86_32 */
198
199 static inline unsigned long get_limit(unsigned long segment)
200 {
201 unsigned long __limit;
202 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
203 return __limit + 1;
204 }
205
206 static inline void native_clts(void)
207 {
208 asm volatile("clts");
209 }
210
211 /*
212 * Volatile isn't enough to prevent the compiler from reordering the
213 * read/write functions for the control registers and messing everything up.
214 * A memory clobber would solve the problem, but would prevent reordering of
215 * all loads stores around it, which can hurt performance. Solution is to
216 * use a variable and mimic reads and writes to it to enforce serialization
217 */
218 static unsigned long __force_order;
219
220 static inline unsigned long native_read_cr0(void)
221 {
222 unsigned long val;
223 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
224 return val;
225 }
226
227 static inline void native_write_cr0(unsigned long val)
228 {
229 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
230 }
231
232 static inline unsigned long native_read_cr2(void)
233 {
234 unsigned long val;
235 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
236 return val;
237 }
238
239 static inline void native_write_cr2(unsigned long val)
240 {
241 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
242 }
243
244 static inline unsigned long native_read_cr3(void)
245 {
246 unsigned long val;
247 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
248 return val;
249 }
250
251 static inline void native_write_cr3(unsigned long val)
252 {
253 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
254 }
255
256 static inline unsigned long native_read_cr4(void)
257 {
258 unsigned long val;
259 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
260 return val;
261 }
262
263 static inline unsigned long native_read_cr4_safe(void)
264 {
265 unsigned long val;
266 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
267 * exists, so it will never fail. */
268 #ifdef CONFIG_X86_32
269 asm volatile("1: mov %%cr4, %0\n"
270 "2:\n"
271 _ASM_EXTABLE(1b, 2b)
272 : "=r" (val), "=m" (__force_order) : "0" (0));
273 #else
274 val = native_read_cr4();
275 #endif
276 return val;
277 }
278
279 static inline void native_write_cr4(unsigned long val)
280 {
281 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
282 }
283
284 #ifdef CONFIG_X86_64
285 static inline unsigned long native_read_cr8(void)
286 {
287 unsigned long cr8;
288 asm volatile("movq %%cr8,%0" : "=r" (cr8));
289 return cr8;
290 }
291
292 static inline void native_write_cr8(unsigned long val)
293 {
294 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
295 }
296 #endif
297
298 static inline void native_wbinvd(void)
299 {
300 asm volatile("wbinvd": : :"memory");
301 }
302
303 #ifdef CONFIG_PARAVIRT
304 #include <asm/paravirt.h>
305 #else
306
307 static inline unsigned long read_cr0(void)
308 {
309 return native_read_cr0();
310 }
311
312 static inline void write_cr0(unsigned long x)
313 {
314 native_write_cr0(x);
315 }
316
317 static inline unsigned long read_cr2(void)
318 {
319 return native_read_cr2();
320 }
321
322 static inline void write_cr2(unsigned long x)
323 {
324 native_write_cr2(x);
325 }
326
327 static inline unsigned long read_cr3(void)
328 {
329 return native_read_cr3();
330 }
331
332 static inline void write_cr3(unsigned long x)
333 {
334 native_write_cr3(x);
335 }
336
337 static inline unsigned long read_cr4(void)
338 {
339 return native_read_cr4();
340 }
341
342 static inline unsigned long read_cr4_safe(void)
343 {
344 return native_read_cr4_safe();
345 }
346
347 static inline void write_cr4(unsigned long x)
348 {
349 native_write_cr4(x);
350 }
351
352 static inline void wbinvd(void)
353 {
354 native_wbinvd();
355 }
356
357 #ifdef CONFIG_X86_64
358
359 static inline unsigned long read_cr8(void)
360 {
361 return native_read_cr8();
362 }
363
364 static inline void write_cr8(unsigned long x)
365 {
366 native_write_cr8(x);
367 }
368
369 static inline void load_gs_index(unsigned selector)
370 {
371 native_load_gs_index(selector);
372 }
373
374 #endif
375
376 /* Clear the 'TS' bit */
377 static inline void clts(void)
378 {
379 native_clts();
380 }
381
382 #endif/* CONFIG_PARAVIRT */
383
384 #define stts() write_cr0(read_cr0() | X86_CR0_TS)
385
386 #endif /* __KERNEL__ */
387
388 static inline void clflush(volatile void *__p)
389 {
390 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
391 }
392
393 #define nop() asm volatile ("nop")
394
395 void disable_hlt(void);
396 void enable_hlt(void);
397
398 void cpu_idle_wait(void);
399
400 extern unsigned long arch_align_stack(unsigned long sp);
401 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
402
403 void default_idle(void);
404
405 void stop_this_cpu(void *dummy);
406
407 /*
408 * Force strict CPU ordering.
409 * And yes, this is required on UP too when we're talking
410 * to devices.
411 */
412 #ifdef CONFIG_X86_32
413 /*
414 * Some non-Intel clones support out of order store. wmb() ceases to be a
415 * nop for these.
416 */
417 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
418 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
419 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
420 #else
421 #define mb() asm volatile("mfence":::"memory")
422 #define rmb() asm volatile("lfence":::"memory")
423 #define wmb() asm volatile("sfence" ::: "memory")
424 #endif
425
426 /**
427 * read_barrier_depends - Flush all pending reads that subsequents reads
428 * depend on.
429 *
430 * No data-dependent reads from memory-like regions are ever reordered
431 * over this barrier. All reads preceding this primitive are guaranteed
432 * to access memory (but not necessarily other CPUs' caches) before any
433 * reads following this primitive that depend on the data return by
434 * any of the preceding reads. This primitive is much lighter weight than
435 * rmb() on most CPUs, and is never heavier weight than is
436 * rmb().
437 *
438 * These ordering constraints are respected by both the local CPU
439 * and the compiler.
440 *
441 * Ordering is not guaranteed by anything other than these primitives,
442 * not even by data dependencies. See the documentation for
443 * memory_barrier() for examples and URLs to more information.
444 *
445 * For example, the following code would force ordering (the initial
446 * value of "a" is zero, "b" is one, and "p" is "&a"):
447 *
448 * <programlisting>
449 * CPU 0 CPU 1
450 *
451 * b = 2;
452 * memory_barrier();
453 * p = &b; q = p;
454 * read_barrier_depends();
455 * d = *q;
456 * </programlisting>
457 *
458 * because the read of "*q" depends on the read of "p" and these
459 * two reads are separated by a read_barrier_depends(). However,
460 * the following code, with the same initial values for "a" and "b":
461 *
462 * <programlisting>
463 * CPU 0 CPU 1
464 *
465 * a = 2;
466 * memory_barrier();
467 * b = 3; y = b;
468 * read_barrier_depends();
469 * x = a;
470 * </programlisting>
471 *
472 * does not enforce ordering, since there is no data dependency between
473 * the read of "a" and the read of "b". Therefore, on some CPUs, such
474 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
475 * in cases like this where there are no data dependencies.
476 **/
477
478 #define read_barrier_depends() do { } while (0)
479
480 #ifdef CONFIG_SMP
481 #define smp_mb() mb()
482 #ifdef CONFIG_X86_PPRO_FENCE
483 # define smp_rmb() rmb()
484 #else
485 # define smp_rmb() barrier()
486 #endif
487 #ifdef CONFIG_X86_OOSTORE
488 # define smp_wmb() wmb()
489 #else
490 # define smp_wmb() barrier()
491 #endif
492 #define smp_read_barrier_depends() read_barrier_depends()
493 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
494 #else
495 #define smp_mb() barrier()
496 #define smp_rmb() barrier()
497 #define smp_wmb() barrier()
498 #define smp_read_barrier_depends() do { } while (0)
499 #define set_mb(var, value) do { var = value; barrier(); } while (0)
500 #endif
501
502 /*
503 * Stop RDTSC speculation. This is needed when you need to use RDTSC
504 * (or get_cycles or vread that possibly accesses the TSC) in a defined
505 * code region.
506 *
507 * (Could use an alternative three way for this if there was one.)
508 */
509 static __always_inline void rdtsc_barrier(void)
510 {
511 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
512 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
513 }
514
515 /*
516 * We handle most unaligned accesses in hardware. On the other hand
517 * unaligned DMA can be quite expensive on some Nehalem processors.
518 *
519 * Based on this we disable the IP header alignment in network drivers.
520 */
521 #define NET_IP_ALIGN 0
522 #endif /* _ASM_X86_SYSTEM_H */
This page took 0.100942 seconds and 5 git commands to generate.