arm64: atomics: implement atomic{,64}_cmpxchg using cmpxchg
[deliverable/linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
1 /*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #ifndef __ASM_ATOMIC_LL_SC_H
22 #define __ASM_ATOMIC_LL_SC_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 /*
29 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
30 * store exclusive to ensure that these are atomic. We may loop
31 * to ensure that the update happens.
32 *
33 * NOTE: these functions do *not* follow the PCS and must explicitly
34 * save any clobbered registers other than x0 (regardless of return
35 * value). This is achieved through -fcall-saved-* compiler flags for
36 * this file, which unfortunately don't work on a per-function basis
37 * (the optimize attribute silently ignores these options).
38 */
39
40 #define ATOMIC_OP(op, asm_op) \
41 __LL_SC_INLINE void \
42 __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
43 { \
44 unsigned long tmp; \
45 int result; \
46 \
47 asm volatile("// atomic_" #op "\n" \
48 " prfm pstl1strm, %2\n" \
49 "1: ldxr %w0, %2\n" \
50 " " #asm_op " %w0, %w0, %w3\n" \
51 " stxr %w1, %w0, %2\n" \
52 " cbnz %w1, 1b" \
53 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
54 : "Ir" (i)); \
55 } \
56 __LL_SC_EXPORT(atomic_##op);
57
58 #define ATOMIC_OP_RETURN(op, asm_op) \
59 __LL_SC_INLINE int \
60 __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
61 { \
62 unsigned long tmp; \
63 int result; \
64 \
65 asm volatile("// atomic_" #op "_return\n" \
66 " prfm pstl1strm, %2\n" \
67 "1: ldxr %w0, %2\n" \
68 " " #asm_op " %w0, %w0, %w3\n" \
69 " stlxr %w1, %w0, %2\n" \
70 " cbnz %w1, 1b" \
71 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
72 : "Ir" (i) \
73 : "memory"); \
74 \
75 smp_mb(); \
76 return result; \
77 } \
78 __LL_SC_EXPORT(atomic_##op##_return);
79
80 #define ATOMIC_OPS(op, asm_op) \
81 ATOMIC_OP(op, asm_op) \
82 ATOMIC_OP_RETURN(op, asm_op)
83
84 ATOMIC_OPS(add, add)
85 ATOMIC_OPS(sub, sub)
86
87 ATOMIC_OP(and, and)
88 ATOMIC_OP(andnot, bic)
89 ATOMIC_OP(or, orr)
90 ATOMIC_OP(xor, eor)
91
92 #undef ATOMIC_OPS
93 #undef ATOMIC_OP_RETURN
94 #undef ATOMIC_OP
95
96 #define ATOMIC64_OP(op, asm_op) \
97 __LL_SC_INLINE void \
98 __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
99 { \
100 long result; \
101 unsigned long tmp; \
102 \
103 asm volatile("// atomic64_" #op "\n" \
104 " prfm pstl1strm, %2\n" \
105 "1: ldxr %0, %2\n" \
106 " " #asm_op " %0, %0, %3\n" \
107 " stxr %w1, %0, %2\n" \
108 " cbnz %w1, 1b" \
109 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
110 : "Ir" (i)); \
111 } \
112 __LL_SC_EXPORT(atomic64_##op);
113
114 #define ATOMIC64_OP_RETURN(op, asm_op) \
115 __LL_SC_INLINE long \
116 __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
117 { \
118 long result; \
119 unsigned long tmp; \
120 \
121 asm volatile("// atomic64_" #op "_return\n" \
122 " prfm pstl1strm, %2\n" \
123 "1: ldxr %0, %2\n" \
124 " " #asm_op " %0, %0, %3\n" \
125 " stlxr %w1, %0, %2\n" \
126 " cbnz %w1, 1b" \
127 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
128 : "Ir" (i) \
129 : "memory"); \
130 \
131 smp_mb(); \
132 return result; \
133 } \
134 __LL_SC_EXPORT(atomic64_##op##_return);
135
136 #define ATOMIC64_OPS(op, asm_op) \
137 ATOMIC64_OP(op, asm_op) \
138 ATOMIC64_OP_RETURN(op, asm_op)
139
140 ATOMIC64_OPS(add, add)
141 ATOMIC64_OPS(sub, sub)
142
143 ATOMIC64_OP(and, and)
144 ATOMIC64_OP(andnot, bic)
145 ATOMIC64_OP(or, orr)
146 ATOMIC64_OP(xor, eor)
147
148 #undef ATOMIC64_OPS
149 #undef ATOMIC64_OP_RETURN
150 #undef ATOMIC64_OP
151
152 __LL_SC_INLINE long
153 __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
154 {
155 long result;
156 unsigned long tmp;
157
158 asm volatile("// atomic64_dec_if_positive\n"
159 " prfm pstl1strm, %2\n"
160 "1: ldxr %0, %2\n"
161 " subs %0, %0, #1\n"
162 " b.mi 2f\n"
163 " stlxr %w1, %0, %2\n"
164 " cbnz %w1, 1b\n"
165 " dmb ish\n"
166 "2:"
167 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
168 :
169 : "cc", "memory");
170
171 return result;
172 }
173 __LL_SC_EXPORT(atomic64_dec_if_positive);
174
175 #define __CMPXCHG_CASE(w, sz, name, mb, rel, cl) \
176 __LL_SC_INLINE unsigned long \
177 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
178 unsigned long old, \
179 unsigned long new)) \
180 { \
181 unsigned long tmp, oldval; \
182 \
183 asm volatile( \
184 " prfm pstl1strm, %2\n" \
185 "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \
186 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
187 " cbnz %" #w "[tmp], 2f\n" \
188 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
189 " cbnz %w[tmp], 1b\n" \
190 " " #mb "\n" \
191 " mov %" #w "[oldval], %" #w "[old]\n" \
192 "2:" \
193 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
194 [v] "+Q" (*(unsigned long *)ptr) \
195 : [old] "Lr" (old), [new] "r" (new) \
196 : cl); \
197 \
198 return oldval; \
199 } \
200 __LL_SC_EXPORT(__cmpxchg_case_##name);
201
202 __CMPXCHG_CASE(w, b, 1, , , )
203 __CMPXCHG_CASE(w, h, 2, , , )
204 __CMPXCHG_CASE(w, , 4, , , )
205 __CMPXCHG_CASE( , , 8, , , )
206 __CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
207 __CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
208 __CMPXCHG_CASE(w, , mb_4, dmb ish, l, "memory")
209 __CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory")
210
211 #undef __CMPXCHG_CASE
212
213 #define __CMPXCHG_DBL(name, mb, rel, cl) \
214 __LL_SC_INLINE int \
215 __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
216 unsigned long old2, \
217 unsigned long new1, \
218 unsigned long new2, \
219 volatile void *ptr)) \
220 { \
221 unsigned long tmp, ret; \
222 \
223 asm volatile("// __cmpxchg_double" #name "\n" \
224 " prfm pstl1strm, %2\n" \
225 "1: ldxp %0, %1, %2\n" \
226 " eor %0, %0, %3\n" \
227 " eor %1, %1, %4\n" \
228 " orr %1, %0, %1\n" \
229 " cbnz %1, 2f\n" \
230 " st" #rel "xp %w0, %5, %6, %2\n" \
231 " cbnz %w0, 1b\n" \
232 " " #mb "\n" \
233 "2:" \
234 : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
235 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
236 : cl); \
237 \
238 return ret; \
239 } \
240 __LL_SC_EXPORT(__cmpxchg_double##name);
241
242 __CMPXCHG_DBL( , , , )
243 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
244
245 #undef __CMPXCHG_DBL
246
247 #endif /* __ASM_ATOMIC_LL_SC_H */
This page took 0.049258 seconds and 5 git commands to generate.