Merge branch 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux into drm...
[deliverable/linux.git] / arch / arm64 / include / asm / cmpxchg.h
CommitLineData
10b663ae
CM
1/*
2 * Based on arch/arm/include/asm/cmpxchg.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_CMPXCHG_H
19#define __ASM_CMPXCHG_H
20
21#include <linux/bug.h>
22
c342f782 23#include <asm/atomic.h>
10b663ae 24#include <asm/barrier.h>
c8366ba0 25#include <asm/lse.h>
10b663ae 26
305d454a
WD
27/*
28 * We need separate acquire parameters for ll/sc and lse, since the full
29 * barrier case is generated as release+dmb for the former and
30 * acquire+release for the latter.
31 */
32#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
33static inline unsigned long __xchg_case_##name(unsigned long x, \
34 volatile void *ptr) \
35{ \
36 unsigned long ret, tmp; \
37 \
38 asm volatile(ARM64_LSE_ATOMIC_INSN( \
39 /* LL/SC */ \
40 " prfm pstl1strm, %2\n" \
41 "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
42 " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
43 " cbnz %w1, 1b\n" \
44 " " #mb, \
45 /* LSE atomics */ \
46 " nop\n" \
47 " nop\n" \
48 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
49 " nop\n" \
50 " " #nop_lse) \
51 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
52 : "r" (x) \
53 : cl); \
54 \
55 return ret; \
10b663ae
CM
56}
57
305d454a
WD
58__XCHG_CASE(w, b, 1, , , , , , )
59__XCHG_CASE(w, h, 2, , , , , , )
60__XCHG_CASE(w, , 4, , , , , , )
61__XCHG_CASE( , , 8, , , , , , )
62__XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
63__XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
64__XCHG_CASE(w, , acq_4, , , a, a, , "memory")
65__XCHG_CASE( , , acq_8, , , a, a, , "memory")
66__XCHG_CASE(w, b, rel_1, , , , , l, "memory")
67__XCHG_CASE(w, h, rel_2, , , , , l, "memory")
68__XCHG_CASE(w, , rel_4, , , , , l, "memory")
69__XCHG_CASE( , , rel_8, , , , , l, "memory")
70__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
71__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
72__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
73__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
74
75#undef __XCHG_CASE
76
77#define __XCHG_GEN(sfx) \
78static inline unsigned long __xchg##sfx(unsigned long x, \
79 volatile void *ptr, \
80 int size) \
81{ \
82 switch (size) { \
83 case 1: \
84 return __xchg_case##sfx##_1(x, ptr); \
85 case 2: \
86 return __xchg_case##sfx##_2(x, ptr); \
87 case 4: \
88 return __xchg_case##sfx##_4(x, ptr); \
89 case 8: \
90 return __xchg_case##sfx##_8(x, ptr); \
91 default: \
92 BUILD_BUG(); \
93 } \
94 \
95 unreachable(); \
96}
97
98__XCHG_GEN()
99__XCHG_GEN(_acq)
100__XCHG_GEN(_rel)
101__XCHG_GEN(_mb)
102
103#undef __XCHG_GEN
104
105#define __xchg_wrapper(sfx, ptr, x) \
106({ \
107 __typeof__(*(ptr)) __ret; \
108 __ret = (__typeof__(*(ptr))) \
109 __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
110 __ret; \
e1dfda9c 111})
10b663ae 112
305d454a
WD
113/* xchg */
114#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
115#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
116#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
117#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
118
119#define __CMPXCHG_GEN(sfx) \
120static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
121 unsigned long old, \
122 unsigned long new, \
123 int size) \
124{ \
125 switch (size) { \
126 case 1: \
127 return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
128 case 2: \
129 return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
130 case 4: \
131 return __cmpxchg_case##sfx##_4(ptr, old, new); \
132 case 8: \
133 return __cmpxchg_case##sfx##_8(ptr, old, new); \
134 default: \
135 BUILD_BUG(); \
136 } \
137 \
138 unreachable(); \
10b663ae
CM
139}
140
305d454a
WD
141__CMPXCHG_GEN()
142__CMPXCHG_GEN(_acq)
143__CMPXCHG_GEN(_rel)
144__CMPXCHG_GEN(_mb)
10b663ae 145
305d454a 146#undef __CMPXCHG_GEN
60010e50 147
305d454a
WD
148#define __cmpxchg_wrapper(sfx, ptr, o, n) \
149({ \
150 __typeof__(*(ptr)) __ret; \
151 __ret = (__typeof__(*(ptr))) \
152 __cmpxchg##sfx((ptr), (unsigned long)(o), \
153 (unsigned long)(n), sizeof(*(ptr))); \
154 __ret; \
60010e50 155})
10b663ae 156
305d454a
WD
157/* cmpxchg */
158#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
159#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
160#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
161#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
162#define cmpxchg_local cmpxchg_relaxed
163
164/* cmpxchg64 */
165#define cmpxchg64_relaxed cmpxchg_relaxed
166#define cmpxchg64_acquire cmpxchg_acquire
167#define cmpxchg64_release cmpxchg_release
168#define cmpxchg64 cmpxchg
169#define cmpxchg64_local cmpxchg_local
170
171/* cmpxchg_double */
e9a4b795
WD
172#define system_has_cmpxchg_double() 1
173
174#define __cmpxchg_double_check(ptr1, ptr2) \
175({ \
176 if (sizeof(*(ptr1)) != 8) \
177 BUILD_BUG(); \
178 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
179})
180
5284e1b4
SC
181#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
182({\
183 int __ret;\
e9a4b795
WD
184 __cmpxchg_double_check(ptr1, ptr2); \
185 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
186 (unsigned long)(n1), (unsigned long)(n2), \
187 ptr1); \
5284e1b4
SC
188 __ret; \
189})
190
191#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
192({\
193 int __ret;\
e9a4b795
WD
194 __cmpxchg_double_check(ptr1, ptr2); \
195 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
196 (unsigned long)(n1), (unsigned long)(n2), \
197 ptr1); \
5284e1b4
SC
198 __ret; \
199})
200
305d454a 201/* this_cpu_cmpxchg */
f3eab718
SC
202#define _protect_cmpxchg_local(pcp, o, n) \
203({ \
204 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
205 preempt_disable(); \
206 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
207 preempt_enable(); \
208 __ret; \
209})
210
211#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
212#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
213#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
214#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
215
216#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
217({ \
218 int __ret; \
219 preempt_disable(); \
220 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
221 raw_cpu_ptr(&(ptr2)), \
222 o1, o2, n1, n2); \
223 preempt_enable(); \
224 __ret; \
225})
5284e1b4 226
03e3c2b7
WD
227#define __CMPWAIT_CASE(w, sz, name) \
228static inline void __cmpwait_case_##name(volatile void *ptr, \
229 unsigned long val) \
230{ \
231 unsigned long tmp; \
232 \
233 asm volatile( \
234 " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
235 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
236 " cbnz %" #w "[tmp], 1f\n" \
237 " wfe\n" \
238 "1:" \
239 : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
240 : [val] "r" (val)); \
241}
242
243__CMPWAIT_CASE(w, b, 1);
244__CMPWAIT_CASE(w, h, 2);
245__CMPWAIT_CASE(w, , 4);
246__CMPWAIT_CASE( , , 8);
247
248#undef __CMPWAIT_CASE
249
250#define __CMPWAIT_GEN(sfx) \
251static inline void __cmpwait##sfx(volatile void *ptr, \
252 unsigned long val, \
253 int size) \
254{ \
255 switch (size) { \
256 case 1: \
257 return __cmpwait_case##sfx##_1(ptr, (u8)val); \
258 case 2: \
259 return __cmpwait_case##sfx##_2(ptr, (u16)val); \
260 case 4: \
261 return __cmpwait_case##sfx##_4(ptr, val); \
262 case 8: \
263 return __cmpwait_case##sfx##_8(ptr, val); \
264 default: \
265 BUILD_BUG(); \
266 } \
267 \
268 unreachable(); \
269}
270
271__CMPWAIT_GEN()
272
273#undef __CMPWAIT_GEN
274
275#define __cmpwait_relaxed(ptr, val) \
276 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
277
10b663ae 278#endif /* __ASM_CMPXCHG_H */
This page took 0.180324 seconds and 5 git commands to generate.