Merge branch 'pci/resource' into next
[deliverable/linux.git] / arch / arm64 / include / asm / atomic.h
1 /*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
22
23 #include <linux/compiler.h>
24 #include <linux/types.h>
25
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
28
29 #define ATOMIC_INIT(i) { (i) }
30
31 #ifdef __KERNEL__
32
33 /*
34 * On ARM, ordinary assignment (str instruction) doesn't clear the local
35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return.
37 */
38 #define atomic_read(v) (*(volatile int *)&(v)->counter)
39 #define atomic_set(v,i) (((v)->counter) = (i))
40
41 /*
42 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens.
45 */
46 static inline void atomic_add(int i, atomic_t *v)
47 {
48 unsigned long tmp;
49 int result;
50
51 asm volatile("// atomic_add\n"
52 "1: ldxr %w0, %2\n"
53 " add %w0, %w0, %w3\n"
54 " stxr %w1, %w0, %2\n"
55 " cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 : "Ir" (i)
58 : "cc");
59 }
60
61 static inline int atomic_add_return(int i, atomic_t *v)
62 {
63 unsigned long tmp;
64 int result;
65
66 asm volatile("// atomic_add_return\n"
67 "1: ldaxr %w0, %2\n"
68 " add %w0, %w0, %w3\n"
69 " stlxr %w1, %w0, %2\n"
70 " cbnz %w1, 1b"
71 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
72 : "Ir" (i)
73 : "cc", "memory");
74
75 return result;
76 }
77
78 static inline void atomic_sub(int i, atomic_t *v)
79 {
80 unsigned long tmp;
81 int result;
82
83 asm volatile("// atomic_sub\n"
84 "1: ldxr %w0, %2\n"
85 " sub %w0, %w0, %w3\n"
86 " stxr %w1, %w0, %2\n"
87 " cbnz %w1, 1b"
88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
89 : "Ir" (i)
90 : "cc");
91 }
92
93 static inline int atomic_sub_return(int i, atomic_t *v)
94 {
95 unsigned long tmp;
96 int result;
97
98 asm volatile("// atomic_sub_return\n"
99 "1: ldaxr %w0, %2\n"
100 " sub %w0, %w0, %w3\n"
101 " stlxr %w1, %w0, %2\n"
102 " cbnz %w1, 1b"
103 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
104 : "Ir" (i)
105 : "cc", "memory");
106
107 return result;
108 }
109
110 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
111 {
112 unsigned long tmp;
113 int oldval;
114
115 asm volatile("// atomic_cmpxchg\n"
116 "1: ldaxr %w1, %2\n"
117 " cmp %w1, %w3\n"
118 " b.ne 2f\n"
119 " stlxr %w0, %w4, %2\n"
120 " cbnz %w0, 1b\n"
121 "2:"
122 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
123 : "Ir" (old), "r" (new)
124 : "cc", "memory");
125
126 return oldval;
127 }
128
129 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
130
131 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
132 {
133 int c, old;
134
135 c = atomic_read(v);
136 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
137 c = old;
138 return c;
139 }
140
141 #define atomic_inc(v) atomic_add(1, v)
142 #define atomic_dec(v) atomic_sub(1, v)
143
144 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
145 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
146 #define atomic_inc_return(v) (atomic_add_return(1, v))
147 #define atomic_dec_return(v) (atomic_sub_return(1, v))
148 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
149
150 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
151
152 #define smp_mb__before_atomic_dec() smp_mb()
153 #define smp_mb__after_atomic_dec() smp_mb()
154 #define smp_mb__before_atomic_inc() smp_mb()
155 #define smp_mb__after_atomic_inc() smp_mb()
156
157 /*
158 * 64-bit atomic operations.
159 */
160 #define ATOMIC64_INIT(i) { (i) }
161
162 #define atomic64_read(v) (*(volatile long long *)&(v)->counter)
163 #define atomic64_set(v,i) (((v)->counter) = (i))
164
165 static inline void atomic64_add(u64 i, atomic64_t *v)
166 {
167 long result;
168 unsigned long tmp;
169
170 asm volatile("// atomic64_add\n"
171 "1: ldxr %0, %2\n"
172 " add %0, %0, %3\n"
173 " stxr %w1, %0, %2\n"
174 " cbnz %w1, 1b"
175 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
176 : "Ir" (i)
177 : "cc");
178 }
179
180 static inline long atomic64_add_return(long i, atomic64_t *v)
181 {
182 long result;
183 unsigned long tmp;
184
185 asm volatile("// atomic64_add_return\n"
186 "1: ldaxr %0, %2\n"
187 " add %0, %0, %3\n"
188 " stlxr %w1, %0, %2\n"
189 " cbnz %w1, 1b"
190 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
191 : "Ir" (i)
192 : "cc", "memory");
193
194 return result;
195 }
196
197 static inline void atomic64_sub(u64 i, atomic64_t *v)
198 {
199 long result;
200 unsigned long tmp;
201
202 asm volatile("// atomic64_sub\n"
203 "1: ldxr %0, %2\n"
204 " sub %0, %0, %3\n"
205 " stxr %w1, %0, %2\n"
206 " cbnz %w1, 1b"
207 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
208 : "Ir" (i)
209 : "cc");
210 }
211
212 static inline long atomic64_sub_return(long i, atomic64_t *v)
213 {
214 long result;
215 unsigned long tmp;
216
217 asm volatile("// atomic64_sub_return\n"
218 "1: ldaxr %0, %2\n"
219 " sub %0, %0, %3\n"
220 " stlxr %w1, %0, %2\n"
221 " cbnz %w1, 1b"
222 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
223 : "Ir" (i)
224 : "cc", "memory");
225
226 return result;
227 }
228
229 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
230 {
231 long oldval;
232 unsigned long res;
233
234 asm volatile("// atomic64_cmpxchg\n"
235 "1: ldaxr %1, %2\n"
236 " cmp %1, %3\n"
237 " b.ne 2f\n"
238 " stlxr %w0, %4, %2\n"
239 " cbnz %w0, 1b\n"
240 "2:"
241 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
242 : "Ir" (old), "r" (new)
243 : "cc", "memory");
244
245 return oldval;
246 }
247
248 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
249
250 static inline long atomic64_dec_if_positive(atomic64_t *v)
251 {
252 long result;
253 unsigned long tmp;
254
255 asm volatile("// atomic64_dec_if_positive\n"
256 "1: ldaxr %0, %2\n"
257 " subs %0, %0, #1\n"
258 " b.mi 2f\n"
259 " stlxr %w1, %0, %2\n"
260 " cbnz %w1, 1b\n"
261 "2:"
262 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
263 :
264 : "cc", "memory");
265
266 return result;
267 }
268
269 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
270 {
271 long c, old;
272
273 c = atomic64_read(v);
274 while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
275 c = old;
276
277 return c != u;
278 }
279
280 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
281 #define atomic64_inc(v) atomic64_add(1LL, (v))
282 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
283 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
284 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
285 #define atomic64_dec(v) atomic64_sub(1LL, (v))
286 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
287 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
288 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
289
290 #endif
291 #endif
This page took 0.064525 seconds and 5 git commands to generate.