Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/jk/spufs into...
[deliverable/linux.git] / arch / x86 / include / asm / cmpxchg_64.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_CMPXCHG_64_H
2#define _ASM_X86_CMPXCHG_64_H
a436ed9c
JD
3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
e52da357
JP
6#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
7 (ptr), sizeof(*(ptr))))
a436ed9c
JD
8
9#define __xg(x) ((volatile long *)(x))
10
11static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
12{
13 *ptr = val;
14}
15
16#define _set_64bit set_64bit
17
18/*
19 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
20 * Note 2: xchg has side effect, so that attribute volatile is necessary,
21 * but generally the primitive is invalid, *ptr is output argument. --ANK
22 */
e52da357
JP
23static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
24 int size)
a436ed9c
JD
25{
26 switch (size) {
e52da357
JP
27 case 1:
28 asm volatile("xchgb %b0,%1"
29 : "=q" (x)
30 : "m" (*__xg(ptr)), "0" (x)
31 : "memory");
32 break;
33 case 2:
34 asm volatile("xchgw %w0,%1"
35 : "=r" (x)
36 : "m" (*__xg(ptr)), "0" (x)
37 : "memory");
38 break;
39 case 4:
40 asm volatile("xchgl %k0,%1"
41 : "=r" (x)
42 : "m" (*__xg(ptr)), "0" (x)
43 : "memory");
44 break;
45 case 8:
46 asm volatile("xchgq %0,%1"
47 : "=r" (x)
48 : "m" (*__xg(ptr)), "0" (x)
49 : "memory");
50 break;
a436ed9c
JD
51 }
52 return x;
53}
54
55/*
56 * Atomic compare and exchange. Compare OLD with MEM, if identical,
57 * store NEW in MEM. Return the initial value in MEM. Success is
58 * indicated by comparing RETURN with OLD.
59 */
60
61#define __HAVE_ARCH_CMPXCHG 1
62
63static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
64 unsigned long new, int size)
65{
66 unsigned long prev;
67 switch (size) {
68 case 1:
e52da357
JP
69 asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
70 : "=a"(prev)
71 : "q"(new), "m"(*__xg(ptr)), "0"(old)
72 : "memory");
a436ed9c
JD
73 return prev;
74 case 2:
e52da357
JP
75 asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
76 : "=a"(prev)
77 : "r"(new), "m"(*__xg(ptr)), "0"(old)
78 : "memory");
a436ed9c
JD
79 return prev;
80 case 4:
e52da357
JP
81 asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
82 : "=a"(prev)
83 : "r"(new), "m"(*__xg(ptr)), "0"(old)
84 : "memory");
a436ed9c
JD
85 return prev;
86 case 8:
e52da357
JP
87 asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
88 : "=a"(prev)
89 : "r"(new), "m"(*__xg(ptr)), "0"(old)
90 : "memory");
a436ed9c
JD
91 return prev;
92 }
93 return old;
94}
95
15878c0b
JF
96/*
97 * Always use locked operations when touching memory shared with a
98 * hypervisor, since the system may be SMP even if the guest kernel
99 * isn't.
100 */
101static inline unsigned long __sync_cmpxchg(volatile void *ptr,
102 unsigned long old,
103 unsigned long new, int size)
104{
105 unsigned long prev;
106 switch (size) {
107 case 1:
108 asm volatile("lock; cmpxchgb %b1,%2"
109 : "=a"(prev)
110 : "q"(new), "m"(*__xg(ptr)), "0"(old)
111 : "memory");
112 return prev;
113 case 2:
114 asm volatile("lock; cmpxchgw %w1,%2"
115 : "=a"(prev)
116 : "r"(new), "m"(*__xg(ptr)), "0"(old)
117 : "memory");
118 return prev;
119 case 4:
120 asm volatile("lock; cmpxchgl %1,%2"
121 : "=a"(prev)
122 : "r"(new), "m"(*__xg(ptr)), "0"(old)
123 : "memory");
124 return prev;
125 }
126 return old;
127}
128
a436ed9c 129static inline unsigned long __cmpxchg_local(volatile void *ptr,
e52da357
JP
130 unsigned long old,
131 unsigned long new, int size)
a436ed9c
JD
132{
133 unsigned long prev;
134 switch (size) {
135 case 1:
e52da357
JP
136 asm volatile("cmpxchgb %b1,%2"
137 : "=a"(prev)
138 : "q"(new), "m"(*__xg(ptr)), "0"(old)
139 : "memory");
a436ed9c
JD
140 return prev;
141 case 2:
e52da357
JP
142 asm volatile("cmpxchgw %w1,%2"
143 : "=a"(prev)
144 : "r"(new), "m"(*__xg(ptr)), "0"(old)
145 : "memory");
a436ed9c
JD
146 return prev;
147 case 4:
e52da357
JP
148 asm volatile("cmpxchgl %k1,%2"
149 : "=a"(prev)
150 : "r"(new), "m"(*__xg(ptr)), "0"(old)
151 : "memory");
a436ed9c
JD
152 return prev;
153 case 8:
e52da357
JP
154 asm volatile("cmpxchgq %1,%2"
155 : "=a"(prev)
156 : "r"(new), "m"(*__xg(ptr)), "0"(old)
157 : "memory");
a436ed9c
JD
158 return prev;
159 }
160 return old;
161}
162
32f49eab
MD
163#define cmpxchg(ptr, o, n) \
164 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
e52da357 165 (unsigned long)(n), sizeof(*(ptr))))
32f49eab 166#define cmpxchg64(ptr, o, n) \
e52da357 167({ \
32f49eab
MD
168 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
169 cmpxchg((ptr), (o), (n)); \
e52da357 170})
32f49eab
MD
171#define cmpxchg_local(ptr, o, n) \
172 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
e52da357
JP
173 (unsigned long)(n), \
174 sizeof(*(ptr))))
15878c0b
JF
175#define sync_cmpxchg(ptr, o, n) \
176 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
177 (unsigned long)(n), \
178 sizeof(*(ptr))))
32f49eab 179#define cmpxchg64_local(ptr, o, n) \
e52da357 180({ \
32f49eab
MD
181 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
182 cmpxchg_local((ptr), (o), (n)); \
e52da357 183})
a436ed9c 184
1965aae3 185#endif /* _ASM_X86_CMPXCHG_64_H */
This page took 0.21972 seconds and 5 git commands to generate.