arm64: introduce CONFIG_ARM64_LSE_ATOMICS as fallback to ll/sc atomics
[deliverable/linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
1 /*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #ifndef __ASM_ATOMIC_LL_SC_H
22 #define __ASM_ATOMIC_LL_SC_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 /*
29 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
30 * store exclusive to ensure that these are atomic. We may loop
31 * to ensure that the update happens.
32 *
33 * NOTE: these functions do *not* follow the PCS and must explicitly
34 * save any clobbered registers other than x0 (regardless of return
35 * value). This is achieved through -fcall-saved-* compiler flags for
36 * this file, which unfortunately don't work on a per-function basis
37 * (the optimize attribute silently ignores these options).
38 */
39
40 #ifndef __LL_SC_INLINE
41 #define __LL_SC_INLINE static inline
42 #endif
43
44 #ifndef __LL_SC_PREFIX
45 #define __LL_SC_PREFIX(x) x
46 #endif
47
48 #ifndef __LL_SC_EXPORT
49 #define __LL_SC_EXPORT(x)
50 #endif
51
52 #define ATOMIC_OP(op, asm_op) \
53 __LL_SC_INLINE void \
54 __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
55 { \
56 unsigned long tmp; \
57 int result; \
58 \
59 asm volatile("// atomic_" #op "\n" \
60 "1: ldxr %w0, %2\n" \
61 " " #asm_op " %w0, %w0, %w3\n" \
62 " stxr %w1, %w0, %2\n" \
63 " cbnz %w1, 1b" \
64 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
65 : "Ir" (i)); \
66 } \
67 __LL_SC_EXPORT(atomic_##op);
68
69 #define ATOMIC_OP_RETURN(op, asm_op) \
70 __LL_SC_INLINE int \
71 __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
72 { \
73 unsigned long tmp; \
74 int result; \
75 \
76 asm volatile("// atomic_" #op "_return\n" \
77 "1: ldxr %w0, %2\n" \
78 " " #asm_op " %w0, %w0, %w3\n" \
79 " stlxr %w1, %w0, %2\n" \
80 " cbnz %w1, 1b" \
81 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
82 : "Ir" (i) \
83 : "memory"); \
84 \
85 smp_mb(); \
86 return result; \
87 } \
88 __LL_SC_EXPORT(atomic_##op##_return);
89
90 #define ATOMIC_OPS(op, asm_op) \
91 ATOMIC_OP(op, asm_op) \
92 ATOMIC_OP_RETURN(op, asm_op)
93
94 ATOMIC_OPS(add, add)
95 ATOMIC_OPS(sub, sub)
96
97 ATOMIC_OP(and, and)
98 ATOMIC_OP(andnot, bic)
99 ATOMIC_OP(or, orr)
100 ATOMIC_OP(xor, eor)
101
102 #undef ATOMIC_OPS
103 #undef ATOMIC_OP_RETURN
104 #undef ATOMIC_OP
105
106 __LL_SC_INLINE int
107 __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new))
108 {
109 unsigned long tmp;
110 int oldval;
111
112 smp_mb();
113
114 asm volatile("// atomic_cmpxchg\n"
115 "1: ldxr %w1, %2\n"
116 " cmp %w1, %w3\n"
117 " b.ne 2f\n"
118 " stxr %w0, %w4, %2\n"
119 " cbnz %w0, 1b\n"
120 "2:"
121 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
122 : "Ir" (old), "r" (new)
123 : "cc");
124
125 smp_mb();
126 return oldval;
127 }
128 __LL_SC_EXPORT(atomic_cmpxchg);
129
130 #define ATOMIC64_OP(op, asm_op) \
131 __LL_SC_INLINE void \
132 __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
133 { \
134 long result; \
135 unsigned long tmp; \
136 \
137 asm volatile("// atomic64_" #op "\n" \
138 "1: ldxr %0, %2\n" \
139 " " #asm_op " %0, %0, %3\n" \
140 " stxr %w1, %0, %2\n" \
141 " cbnz %w1, 1b" \
142 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
143 : "Ir" (i)); \
144 } \
145 __LL_SC_EXPORT(atomic64_##op);
146
147 #define ATOMIC64_OP_RETURN(op, asm_op) \
148 __LL_SC_INLINE long \
149 __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
150 { \
151 long result; \
152 unsigned long tmp; \
153 \
154 asm volatile("// atomic64_" #op "_return\n" \
155 "1: ldxr %0, %2\n" \
156 " " #asm_op " %0, %0, %3\n" \
157 " stlxr %w1, %0, %2\n" \
158 " cbnz %w1, 1b" \
159 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
160 : "Ir" (i) \
161 : "memory"); \
162 \
163 smp_mb(); \
164 return result; \
165 } \
166 __LL_SC_EXPORT(atomic64_##op##_return);
167
168 #define ATOMIC64_OPS(op, asm_op) \
169 ATOMIC64_OP(op, asm_op) \
170 ATOMIC64_OP_RETURN(op, asm_op)
171
172 ATOMIC64_OPS(add, add)
173 ATOMIC64_OPS(sub, sub)
174
175 ATOMIC64_OP(and, and)
176 ATOMIC64_OP(andnot, bic)
177 ATOMIC64_OP(or, orr)
178 ATOMIC64_OP(xor, eor)
179
180 #undef ATOMIC64_OPS
181 #undef ATOMIC64_OP_RETURN
182 #undef ATOMIC64_OP
183
184 __LL_SC_INLINE long
185 __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new))
186 {
187 long oldval;
188 unsigned long res;
189
190 smp_mb();
191
192 asm volatile("// atomic64_cmpxchg\n"
193 "1: ldxr %1, %2\n"
194 " cmp %1, %3\n"
195 " b.ne 2f\n"
196 " stxr %w0, %4, %2\n"
197 " cbnz %w0, 1b\n"
198 "2:"
199 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
200 : "Ir" (old), "r" (new)
201 : "cc");
202
203 smp_mb();
204 return oldval;
205 }
206 __LL_SC_EXPORT(atomic64_cmpxchg);
207
208 __LL_SC_INLINE long
209 __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
210 {
211 long result;
212 unsigned long tmp;
213
214 asm volatile("// atomic64_dec_if_positive\n"
215 "1: ldxr %0, %2\n"
216 " subs %0, %0, #1\n"
217 " b.mi 2f\n"
218 " stlxr %w1, %0, %2\n"
219 " cbnz %w1, 1b\n"
220 " dmb ish\n"
221 "2:"
222 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
223 :
224 : "cc", "memory");
225
226 return result;
227 }
228 __LL_SC_EXPORT(atomic64_dec_if_positive);
229
230 #endif /* __ASM_ATOMIC_LL_SC_H */
This page took 0.045378 seconds and 5 git commands to generate.