4864158d486eed51991708a57112ffa979cf074a
[deliverable/linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
1 /*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #ifndef __ASM_ATOMIC_LL_SC_H
22 #define __ASM_ATOMIC_LL_SC_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 /*
29 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
30 * store exclusive to ensure that these are atomic. We may loop
31 * to ensure that the update happens.
32 *
33 * NOTE: these functions do *not* follow the PCS and must explicitly
34 * save any clobbered registers other than x0 (regardless of return
35 * value). This is achieved through -fcall-saved-* compiler flags for
36 * this file, which unfortunately don't work on a per-function basis
37 * (the optimize attribute silently ignores these options).
38 */
39
40 #define ATOMIC_OP(op, asm_op) \
41 __LL_SC_INLINE void \
42 __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
43 { \
44 unsigned long tmp; \
45 int result; \
46 \
47 asm volatile("// atomic_" #op "\n" \
48 "1: ldxr %w0, %2\n" \
49 " " #asm_op " %w0, %w0, %w3\n" \
50 " stxr %w1, %w0, %2\n" \
51 " cbnz %w1, 1b" \
52 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
53 : "Ir" (i)); \
54 } \
55 __LL_SC_EXPORT(atomic_##op);
56
57 #define ATOMIC_OP_RETURN(op, asm_op) \
58 __LL_SC_INLINE int \
59 __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
60 { \
61 unsigned long tmp; \
62 int result; \
63 \
64 asm volatile("// atomic_" #op "_return\n" \
65 "1: ldxr %w0, %2\n" \
66 " " #asm_op " %w0, %w0, %w3\n" \
67 " stlxr %w1, %w0, %2\n" \
68 " cbnz %w1, 1b" \
69 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
70 : "Ir" (i) \
71 : "memory"); \
72 \
73 smp_mb(); \
74 return result; \
75 } \
76 __LL_SC_EXPORT(atomic_##op##_return);
77
78 #define ATOMIC_OPS(op, asm_op) \
79 ATOMIC_OP(op, asm_op) \
80 ATOMIC_OP_RETURN(op, asm_op)
81
82 ATOMIC_OPS(add, add)
83 ATOMIC_OPS(sub, sub)
84
85 ATOMIC_OP(and, and)
86 ATOMIC_OP(andnot, bic)
87 ATOMIC_OP(or, orr)
88 ATOMIC_OP(xor, eor)
89
90 #undef ATOMIC_OPS
91 #undef ATOMIC_OP_RETURN
92 #undef ATOMIC_OP
93
94 __LL_SC_INLINE int
95 __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new))
96 {
97 unsigned long tmp;
98 int oldval;
99
100 smp_mb();
101
102 asm volatile("// atomic_cmpxchg\n"
103 "1: ldxr %w1, %2\n"
104 " cmp %w1, %w3\n"
105 " b.ne 2f\n"
106 " stxr %w0, %w4, %2\n"
107 " cbnz %w0, 1b\n"
108 "2:"
109 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
110 : "Ir" (old), "r" (new)
111 : "cc");
112
113 smp_mb();
114 return oldval;
115 }
116 __LL_SC_EXPORT(atomic_cmpxchg);
117
118 #define ATOMIC64_OP(op, asm_op) \
119 __LL_SC_INLINE void \
120 __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
121 { \
122 long result; \
123 unsigned long tmp; \
124 \
125 asm volatile("// atomic64_" #op "\n" \
126 "1: ldxr %0, %2\n" \
127 " " #asm_op " %0, %0, %3\n" \
128 " stxr %w1, %0, %2\n" \
129 " cbnz %w1, 1b" \
130 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
131 : "Ir" (i)); \
132 } \
133 __LL_SC_EXPORT(atomic64_##op);
134
135 #define ATOMIC64_OP_RETURN(op, asm_op) \
136 __LL_SC_INLINE long \
137 __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
138 { \
139 long result; \
140 unsigned long tmp; \
141 \
142 asm volatile("// atomic64_" #op "_return\n" \
143 "1: ldxr %0, %2\n" \
144 " " #asm_op " %0, %0, %3\n" \
145 " stlxr %w1, %0, %2\n" \
146 " cbnz %w1, 1b" \
147 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
148 : "Ir" (i) \
149 : "memory"); \
150 \
151 smp_mb(); \
152 return result; \
153 } \
154 __LL_SC_EXPORT(atomic64_##op##_return);
155
156 #define ATOMIC64_OPS(op, asm_op) \
157 ATOMIC64_OP(op, asm_op) \
158 ATOMIC64_OP_RETURN(op, asm_op)
159
160 ATOMIC64_OPS(add, add)
161 ATOMIC64_OPS(sub, sub)
162
163 ATOMIC64_OP(and, and)
164 ATOMIC64_OP(andnot, bic)
165 ATOMIC64_OP(or, orr)
166 ATOMIC64_OP(xor, eor)
167
168 #undef ATOMIC64_OPS
169 #undef ATOMIC64_OP_RETURN
170 #undef ATOMIC64_OP
171
172 __LL_SC_INLINE long
173 __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new))
174 {
175 long oldval;
176 unsigned long res;
177
178 smp_mb();
179
180 asm volatile("// atomic64_cmpxchg\n"
181 "1: ldxr %1, %2\n"
182 " cmp %1, %3\n"
183 " b.ne 2f\n"
184 " stxr %w0, %4, %2\n"
185 " cbnz %w0, 1b\n"
186 "2:"
187 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
188 : "Ir" (old), "r" (new)
189 : "cc");
190
191 smp_mb();
192 return oldval;
193 }
194 __LL_SC_EXPORT(atomic64_cmpxchg);
195
196 __LL_SC_INLINE long
197 __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
198 {
199 long result;
200 unsigned long tmp;
201
202 asm volatile("// atomic64_dec_if_positive\n"
203 "1: ldxr %0, %2\n"
204 " subs %0, %0, #1\n"
205 " b.mi 2f\n"
206 " stlxr %w1, %0, %2\n"
207 " cbnz %w1, 1b\n"
208 " dmb ish\n"
209 "2:"
210 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
211 :
212 : "cc", "memory");
213
214 return result;
215 }
216 __LL_SC_EXPORT(atomic64_dec_if_positive);
217
218 #define __CMPXCHG_CASE(w, sz, name, mb, cl) \
219 __LL_SC_INLINE unsigned long \
220 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
221 unsigned long old, \
222 unsigned long new)) \
223 { \
224 unsigned long tmp, oldval; \
225 \
226 asm volatile( \
227 " " #mb "\n" \
228 "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \
229 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
230 " cbnz %" #w "[tmp], 2f\n" \
231 " stxr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
232 " cbnz %w[tmp], 1b\n" \
233 " " #mb "\n" \
234 " mov %" #w "[oldval], %" #w "[old]\n" \
235 "2:" \
236 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
237 [v] "+Q" (*(unsigned long *)ptr) \
238 : [old] "Lr" (old), [new] "r" (new) \
239 : cl); \
240 \
241 return oldval; \
242 } \
243 __LL_SC_EXPORT(__cmpxchg_case_##name);
244
245 __CMPXCHG_CASE(w, b, 1, , )
246 __CMPXCHG_CASE(w, h, 2, , )
247 __CMPXCHG_CASE(w, , 4, , )
248 __CMPXCHG_CASE( , , 8, , )
249 __CMPXCHG_CASE(w, b, mb_1, dmb ish, "memory")
250 __CMPXCHG_CASE(w, h, mb_2, dmb ish, "memory")
251 __CMPXCHG_CASE(w, , mb_4, dmb ish, "memory")
252 __CMPXCHG_CASE( , , mb_8, dmb ish, "memory")
253
254 #undef __CMPXCHG_CASE
255
256 #endif /* __ASM_ATOMIC_LL_SC_H */
This page took 0.034903 seconds and 4 git commands to generate.