arm64: atomics: move ll/sc atomics into separate header file
[deliverable/linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
1 /*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #ifndef __ASM_ATOMIC_LL_SC_H
22 #define __ASM_ATOMIC_LL_SC_H
23
24 /*
25 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
26 * store exclusive to ensure that these are atomic. We may loop
27 * to ensure that the update happens.
28 *
29 * NOTE: these functions do *not* follow the PCS and must explicitly
30 * save any clobbered registers other than x0 (regardless of return
31 * value). This is achieved through -fcall-saved-* compiler flags for
32 * this file, which unfortunately don't work on a per-function basis
33 * (the optimize attribute silently ignores these options).
34 */
35
36 #ifndef __LL_SC_INLINE
37 #define __LL_SC_INLINE static inline
38 #endif
39
40 #ifndef __LL_SC_PREFIX
41 #define __LL_SC_PREFIX(x) x
42 #endif
43
44 #define ATOMIC_OP(op, asm_op) \
45 __LL_SC_INLINE void \
46 __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
47 { \
48 unsigned long tmp; \
49 int result; \
50 \
51 asm volatile("// atomic_" #op "\n" \
52 "1: ldxr %w0, %2\n" \
53 " " #asm_op " %w0, %w0, %w3\n" \
54 " stxr %w1, %w0, %2\n" \
55 " cbnz %w1, 1b" \
56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
57 : "Ir" (i)); \
58 } \
59
60 #define ATOMIC_OP_RETURN(op, asm_op) \
61 __LL_SC_INLINE int \
62 __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
63 { \
64 unsigned long tmp; \
65 int result; \
66 \
67 asm volatile("// atomic_" #op "_return\n" \
68 "1: ldxr %w0, %2\n" \
69 " " #asm_op " %w0, %w0, %w3\n" \
70 " stlxr %w1, %w0, %2\n" \
71 " cbnz %w1, 1b" \
72 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
73 : "Ir" (i) \
74 : "memory"); \
75 \
76 smp_mb(); \
77 return result; \
78 }
79
80 #define ATOMIC_OPS(op, asm_op) \
81 ATOMIC_OP(op, asm_op) \
82 ATOMIC_OP_RETURN(op, asm_op)
83
84 ATOMIC_OPS(add, add)
85 ATOMIC_OPS(sub, sub)
86
87 ATOMIC_OP(and, and)
88 ATOMIC_OP(andnot, bic)
89 ATOMIC_OP(or, orr)
90 ATOMIC_OP(xor, eor)
91
92 #undef ATOMIC_OPS
93 #undef ATOMIC_OP_RETURN
94 #undef ATOMIC_OP
95
96 __LL_SC_INLINE int
97 __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new))
98 {
99 unsigned long tmp;
100 int oldval;
101
102 smp_mb();
103
104 asm volatile("// atomic_cmpxchg\n"
105 "1: ldxr %w1, %2\n"
106 " cmp %w1, %w3\n"
107 " b.ne 2f\n"
108 " stxr %w0, %w4, %2\n"
109 " cbnz %w0, 1b\n"
110 "2:"
111 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
112 : "Ir" (old), "r" (new)
113 : "cc");
114
115 smp_mb();
116 return oldval;
117 }
118
119 #define ATOMIC64_OP(op, asm_op) \
120 __LL_SC_INLINE void \
121 __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
122 { \
123 long result; \
124 unsigned long tmp; \
125 \
126 asm volatile("// atomic64_" #op "\n" \
127 "1: ldxr %0, %2\n" \
128 " " #asm_op " %0, %0, %3\n" \
129 " stxr %w1, %0, %2\n" \
130 " cbnz %w1, 1b" \
131 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
132 : "Ir" (i)); \
133 } \
134
135 #define ATOMIC64_OP_RETURN(op, asm_op) \
136 __LL_SC_INLINE long \
137 __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
138 { \
139 long result; \
140 unsigned long tmp; \
141 \
142 asm volatile("// atomic64_" #op "_return\n" \
143 "1: ldxr %0, %2\n" \
144 " " #asm_op " %0, %0, %3\n" \
145 " stlxr %w1, %0, %2\n" \
146 " cbnz %w1, 1b" \
147 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
148 : "Ir" (i) \
149 : "memory"); \
150 \
151 smp_mb(); \
152 return result; \
153 }
154
155 #define ATOMIC64_OPS(op, asm_op) \
156 ATOMIC64_OP(op, asm_op) \
157 ATOMIC64_OP_RETURN(op, asm_op)
158
159 ATOMIC64_OPS(add, add)
160 ATOMIC64_OPS(sub, sub)
161
162 ATOMIC64_OP(and, and)
163 ATOMIC64_OP(andnot, bic)
164 ATOMIC64_OP(or, orr)
165 ATOMIC64_OP(xor, eor)
166
167 #undef ATOMIC64_OPS
168 #undef ATOMIC64_OP_RETURN
169 #undef ATOMIC64_OP
170
171 __LL_SC_INLINE long
172 __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new))
173 {
174 long oldval;
175 unsigned long res;
176
177 smp_mb();
178
179 asm volatile("// atomic64_cmpxchg\n"
180 "1: ldxr %1, %2\n"
181 " cmp %1, %3\n"
182 " b.ne 2f\n"
183 " stxr %w0, %4, %2\n"
184 " cbnz %w0, 1b\n"
185 "2:"
186 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
187 : "Ir" (old), "r" (new)
188 : "cc");
189
190 smp_mb();
191 return oldval;
192 }
193
194 __LL_SC_INLINE long
195 __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
196 {
197 long result;
198 unsigned long tmp;
199
200 asm volatile("// atomic64_dec_if_positive\n"
201 "1: ldxr %0, %2\n"
202 " subs %0, %0, #1\n"
203 " b.mi 2f\n"
204 " stlxr %w1, %0, %2\n"
205 " cbnz %w1, 1b\n"
206 " dmb ish\n"
207 "2:"
208 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
209 :
210 : "cc", "memory");
211
212 return result;
213 }
214
215 #endif /* __ASM_ATOMIC_LL_SC_H */
This page took 0.039812 seconds and 5 git commands to generate.