Merge tag 'tpm-fixes-for-4.2-rc2' of https://github.com/PeterHuewe/linux-tpmdd into...
[deliverable/linux.git] / arch / x86 / include / asm / barrier.h
CommitLineData
f05e798a
DH
1#ifndef _ASM_X86_BARRIER_H
2#define _ASM_X86_BARRIER_H
3
4#include <asm/alternative.h>
5#include <asm/nops.h>
6
7/*
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
10 * to devices.
11 */
12
13#ifdef CONFIG_X86_32
14/*
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
16 * nop for these.
17 */
18#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21#else
22#define mb() asm volatile("mfence":::"memory")
23#define rmb() asm volatile("lfence":::"memory")
24#define wmb() asm volatile("sfence" ::: "memory")
25#endif
26
f05e798a 27#ifdef CONFIG_X86_PPRO_FENCE
1077fa36 28#define dma_rmb() rmb()
f05e798a 29#else
1077fa36 30#define dma_rmb() barrier()
f05e798a 31#endif
1077fa36
AD
32#define dma_wmb() barrier()
33
34#ifdef CONFIG_SMP
35#define smp_mb() mb()
36#define smp_rmb() dma_rmb()
09df7c4c 37#define smp_wmb() barrier()
b92b8b35 38#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
47933ad4 39#else /* !SMP */
f05e798a
DH
40#define smp_mb() barrier()
41#define smp_rmb() barrier()
42#define smp_wmb() barrier()
b92b8b35 43#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
47933ad4
PZ
44#endif /* SMP */
45
8a449718
AD
46#define read_barrier_depends() do { } while (0)
47#define smp_read_barrier_depends() do { } while (0)
48
09df7c4c 49#if defined(CONFIG_X86_PPRO_FENCE)
47933ad4
PZ
50
51/*
4f3aaf2c 52 * For this option x86 doesn't have a strong TSO memory
47933ad4
PZ
53 * model and we should fall back to full barriers.
54 */
55
56#define smp_store_release(p, v) \
57do { \
58 compiletime_assert_atomic_type(*p); \
59 smp_mb(); \
60 ACCESS_ONCE(*p) = (v); \
61} while (0)
62
63#define smp_load_acquire(p) \
64({ \
65 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
66 compiletime_assert_atomic_type(*p); \
67 smp_mb(); \
68 ___p1; \
69})
70
71#else /* regular x86 TSO memory ordering */
72
73#define smp_store_release(p, v) \
74do { \
75 compiletime_assert_atomic_type(*p); \
76 barrier(); \
77 ACCESS_ONCE(*p) = (v); \
78} while (0)
79
80#define smp_load_acquire(p) \
81({ \
82 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
83 compiletime_assert_atomic_type(*p); \
84 barrier(); \
85 ___p1; \
86})
87
f05e798a
DH
88#endif
89
d00a5692
PZ
90/* Atomic operations are already serializing on x86 */
91#define smp_mb__before_atomic() barrier()
92#define smp_mb__after_atomic() barrier()
93
f05e798a
DH
94/*
95 * Stop RDTSC speculation. This is needed when you need to use RDTSC
96 * (or get_cycles or vread that possibly accesses the TSC) in a defined
97 * code region.
f05e798a
DH
98 */
99static __always_inline void rdtsc_barrier(void)
100{
c70e1b47
BP
101 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
102 "lfence", X86_FEATURE_LFENCE_RDTSC);
f05e798a
DH
103}
104
105#endif /* _ASM_X86_BARRIER_H */
This page took 0.173331 seconds and 5 git commands to generate.