Merge remote-tracking branch 'asoc/fix/dapm' into asoc-linus
[deliverable/linux.git] / arch / arm / include / asm / atomic.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/atomic.h
1da177e4
LT
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
8dc39b88 14#include <linux/compiler.h>
f38d999c 15#include <linux/prefetch.h>
ea435467 16#include <linux/types.h>
9f97da78
DH
17#include <linux/irqflags.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
1da177e4 20
1da177e4
LT
21#define ATOMIC_INIT(i) { (i) }
22
23#ifdef __KERNEL__
24
200b812d
CM
25/*
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */
62e8a325
PZ
30#define atomic_read(v) READ_ONCE((v)->counter)
31#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
1da177e4
LT
32
33#if __LINUX_ARM_ARCH__ >= 6
34
35/*
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
200b812d 38 * to ensure that the update happens.
1da177e4 39 */
bac4e960 40
aee9a554
PZ
41#define ATOMIC_OP(op, c_op, asm_op) \
42static inline void atomic_##op(int i, atomic_t *v) \
43{ \
44 unsigned long tmp; \
45 int result; \
46 \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49"1: ldrex %0, [%3]\n" \
50" " #asm_op " %0, %0, %4\n" \
51" strex %1, %0, [%3]\n" \
52" teq %1, #0\n" \
53" bne 1b" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
56 : "cc"); \
57} \
58
59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
0ca326de 60static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
aee9a554
PZ
61{ \
62 unsigned long tmp; \
63 int result; \
64 \
aee9a554
PZ
65 prefetchw(&v->counter); \
66 \
67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
68"1: ldrex %0, [%3]\n" \
69" " #asm_op " %0, %0, %4\n" \
70" strex %1, %0, [%3]\n" \
71" teq %1, #0\n" \
72" bne 1b" \
73 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
74 : "r" (&v->counter), "Ir" (i) \
75 : "cc"); \
76 \
aee9a554 77 return result; \
1da177e4
LT
78}
79
0ca326de
WD
80#define atomic_add_return_relaxed atomic_add_return_relaxed
81#define atomic_sub_return_relaxed atomic_sub_return_relaxed
82
83static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
4a6dae6d 84{
4dcc1cf7
CG
85 int oldval;
86 unsigned long res;
4a6dae6d 87
c32ffce0 88 prefetchw(&ptr->counter);
bac4e960 89
4a6dae6d
NP
90 do {
91 __asm__ __volatile__("@ atomic_cmpxchg\n"
398aa668 92 "ldrex %1, [%3]\n"
a7d06833 93 "mov %0, #0\n"
398aa668
WD
94 "teq %1, %4\n"
95 "strexeq %0, %5, [%3]\n"
96 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
4a6dae6d
NP
97 : "r" (&ptr->counter), "Ir" (old), "r" (new)
98 : "cc");
99 } while (res);
100
101 return oldval;
102}
0ca326de 103#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
4a6dae6d 104
db38ee87
WD
105static inline int __atomic_add_unless(atomic_t *v, int a, int u)
106{
107 int oldval, newval;
108 unsigned long tmp;
109
110 smp_mb();
111 prefetchw(&v->counter);
112
113 __asm__ __volatile__ ("@ atomic_add_unless\n"
114"1: ldrex %0, [%4]\n"
115" teq %0, %5\n"
116" beq 2f\n"
117" add %1, %0, %6\n"
118" strex %2, %1, [%4]\n"
119" teq %2, #0\n"
120" bne 1b\n"
121"2:"
122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
123 : "r" (&v->counter), "r" (u), "r" (a)
124 : "cc");
125
126 if (oldval != u)
127 smp_mb();
128
129 return oldval;
130}
131
1da177e4
LT
132#else /* ARM_ARCH_6 */
133
1da177e4
LT
134#ifdef CONFIG_SMP
135#error SMP not supported on pre-ARMv6 CPUs
136#endif
137
aee9a554
PZ
138#define ATOMIC_OP(op, c_op, asm_op) \
139static inline void atomic_##op(int i, atomic_t *v) \
140{ \
141 unsigned long flags; \
142 \
143 raw_local_irq_save(flags); \
144 v->counter c_op i; \
145 raw_local_irq_restore(flags); \
146} \
147
148#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
149static inline int atomic_##op##_return(int i, atomic_t *v) \
150{ \
151 unsigned long flags; \
152 int val; \
153 \
154 raw_local_irq_save(flags); \
155 v->counter c_op i; \
156 val = v->counter; \
157 raw_local_irq_restore(flags); \
158 \
159 return val; \
1da177e4 160}
1da177e4 161
4a6dae6d
NP
162static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
163{
164 int ret;
165 unsigned long flags;
166
8dd5c845 167 raw_local_irq_save(flags);
4a6dae6d
NP
168 ret = v->counter;
169 if (likely(ret == old))
170 v->counter = new;
8dd5c845 171 raw_local_irq_restore(flags);
4a6dae6d
NP
172
173 return ret;
174}
175
f24219b4 176static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8426e1f6
NP
177{
178 int c, old;
179
180 c = atomic_read(v);
181 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
182 c = old;
f24219b4 183 return c;
8426e1f6 184}
8426e1f6 185
db38ee87
WD
186#endif /* __LINUX_ARM_ARCH__ */
187
aee9a554
PZ
188#define ATOMIC_OPS(op, c_op, asm_op) \
189 ATOMIC_OP(op, c_op, asm_op) \
190 ATOMIC_OP_RETURN(op, c_op, asm_op)
191
192ATOMIC_OPS(add, +=, add)
193ATOMIC_OPS(sub, -=, sub)
194
12589790
PZ
195#define atomic_andnot atomic_andnot
196
197ATOMIC_OP(and, &=, and)
198ATOMIC_OP(andnot, &= ~, bic)
199ATOMIC_OP(or, |=, orr)
200ATOMIC_OP(xor, ^=, eor)
201
aee9a554
PZ
202#undef ATOMIC_OPS
203#undef ATOMIC_OP_RETURN
204#undef ATOMIC_OP
205
db38ee87
WD
206#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
207
bac4e960
RK
208#define atomic_inc(v) atomic_add(1, v)
209#define atomic_dec(v) atomic_sub(1, v)
1da177e4
LT
210
211#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
212#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
6e490b01
WD
213#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
214#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
1da177e4
LT
215#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
216
217#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
218
24b44a66
WD
219#ifndef CONFIG_GENERIC_ATOMIC64
220typedef struct {
237f1233 221 long long counter;
24b44a66
WD
222} atomic64_t;
223
224#define ATOMIC64_INIT(i) { (i) }
225
4fd75911 226#ifdef CONFIG_ARM_LPAE
237f1233 227static inline long long atomic64_read(const atomic64_t *v)
4fd75911 228{
237f1233 229 long long result;
4fd75911
WD
230
231 __asm__ __volatile__("@ atomic64_read\n"
232" ldrd %0, %H0, [%1]"
233 : "=&r" (result)
234 : "r" (&v->counter), "Qo" (v->counter)
235 );
236
237 return result;
238}
239
237f1233 240static inline void atomic64_set(atomic64_t *v, long long i)
4fd75911
WD
241{
242 __asm__ __volatile__("@ atomic64_set\n"
243" strd %2, %H2, [%1]"
244 : "=Qo" (v->counter)
245 : "r" (&v->counter), "r" (i)
246 );
247}
248#else
237f1233 249static inline long long atomic64_read(const atomic64_t *v)
24b44a66 250{
237f1233 251 long long result;
24b44a66
WD
252
253 __asm__ __volatile__("@ atomic64_read\n"
254" ldrexd %0, %H0, [%1]"
255 : "=&r" (result)
398aa668 256 : "r" (&v->counter), "Qo" (v->counter)
24b44a66
WD
257 );
258
259 return result;
260}
261
237f1233 262static inline void atomic64_set(atomic64_t *v, long long i)
24b44a66 263{
237f1233 264 long long tmp;
24b44a66 265
f38d999c 266 prefetchw(&v->counter);
24b44a66 267 __asm__ __volatile__("@ atomic64_set\n"
398aa668
WD
268"1: ldrexd %0, %H0, [%2]\n"
269" strexd %0, %3, %H3, [%2]\n"
24b44a66
WD
270" teq %0, #0\n"
271" bne 1b"
398aa668 272 : "=&r" (tmp), "=Qo" (v->counter)
24b44a66
WD
273 : "r" (&v->counter), "r" (i)
274 : "cc");
275}
4fd75911 276#endif
24b44a66 277
aee9a554
PZ
278#define ATOMIC64_OP(op, op1, op2) \
279static inline void atomic64_##op(long long i, atomic64_t *v) \
280{ \
281 long long result; \
282 unsigned long tmp; \
283 \
284 prefetchw(&v->counter); \
285 __asm__ __volatile__("@ atomic64_" #op "\n" \
286"1: ldrexd %0, %H0, [%3]\n" \
287" " #op1 " %Q0, %Q0, %Q4\n" \
288" " #op2 " %R0, %R0, %R4\n" \
289" strexd %1, %0, %H0, [%3]\n" \
290" teq %1, #0\n" \
291" bne 1b" \
292 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
293 : "r" (&v->counter), "r" (i) \
294 : "cc"); \
295} \
296
297#define ATOMIC64_OP_RETURN(op, op1, op2) \
0ca326de
WD
298static inline long long \
299atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
aee9a554
PZ
300{ \
301 long long result; \
302 unsigned long tmp; \
303 \
aee9a554
PZ
304 prefetchw(&v->counter); \
305 \
306 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
307"1: ldrexd %0, %H0, [%3]\n" \
308" " #op1 " %Q0, %Q0, %Q4\n" \
309" " #op2 " %R0, %R0, %R4\n" \
310" strexd %1, %0, %H0, [%3]\n" \
311" teq %1, #0\n" \
312" bne 1b" \
313 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
314 : "r" (&v->counter), "r" (i) \
315 : "cc"); \
316 \
aee9a554 317 return result; \
24b44a66
WD
318}
319
aee9a554
PZ
320#define ATOMIC64_OPS(op, op1, op2) \
321 ATOMIC64_OP(op, op1, op2) \
322 ATOMIC64_OP_RETURN(op, op1, op2)
24b44a66 323
aee9a554
PZ
324ATOMIC64_OPS(add, adds, adc)
325ATOMIC64_OPS(sub, subs, sbc)
24b44a66 326
0ca326de
WD
327#define atomic64_add_return_relaxed atomic64_add_return_relaxed
328#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
329
12589790
PZ
330#define atomic64_andnot atomic64_andnot
331
332ATOMIC64_OP(and, and, and)
333ATOMIC64_OP(andnot, bic, bic)
334ATOMIC64_OP(or, orr, orr)
335ATOMIC64_OP(xor, eor, eor)
336
aee9a554
PZ
337#undef ATOMIC64_OPS
338#undef ATOMIC64_OP_RETURN
339#undef ATOMIC64_OP
24b44a66 340
0ca326de
WD
341static inline long long
342atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
24b44a66 343{
237f1233 344 long long oldval;
24b44a66
WD
345 unsigned long res;
346
c32ffce0 347 prefetchw(&ptr->counter);
24b44a66
WD
348
349 do {
350 __asm__ __volatile__("@ atomic64_cmpxchg\n"
398aa668 351 "ldrexd %1, %H1, [%3]\n"
24b44a66 352 "mov %0, #0\n"
398aa668
WD
353 "teq %1, %4\n"
354 "teqeq %H1, %H4\n"
355 "strexdeq %0, %5, %H5, [%3]"
356 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
24b44a66
WD
357 : "r" (&ptr->counter), "r" (old), "r" (new)
358 : "cc");
359 } while (res);
360
24b44a66
WD
361 return oldval;
362}
0ca326de 363#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
24b44a66 364
0ca326de 365static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
24b44a66 366{
237f1233 367 long long result;
24b44a66
WD
368 unsigned long tmp;
369
c32ffce0 370 prefetchw(&ptr->counter);
24b44a66
WD
371
372 __asm__ __volatile__("@ atomic64_xchg\n"
398aa668
WD
373"1: ldrexd %0, %H0, [%3]\n"
374" strexd %1, %4, %H4, [%3]\n"
24b44a66
WD
375" teq %1, #0\n"
376" bne 1b"
398aa668 377 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
24b44a66
WD
378 : "r" (&ptr->counter), "r" (new)
379 : "cc");
380
24b44a66
WD
381 return result;
382}
0ca326de 383#define atomic64_xchg_relaxed atomic64_xchg_relaxed
24b44a66 384
237f1233 385static inline long long atomic64_dec_if_positive(atomic64_t *v)
24b44a66 386{
237f1233 387 long long result;
24b44a66
WD
388 unsigned long tmp;
389
390 smp_mb();
c32ffce0 391 prefetchw(&v->counter);
24b44a66
WD
392
393 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
398aa668 394"1: ldrexd %0, %H0, [%3]\n"
2245f924
VK
395" subs %Q0, %Q0, #1\n"
396" sbc %R0, %R0, #0\n"
397" teq %R0, #0\n"
24b44a66 398" bmi 2f\n"
398aa668 399" strexd %1, %0, %H0, [%3]\n"
24b44a66
WD
400" teq %1, #0\n"
401" bne 1b\n"
402"2:"
398aa668 403 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
24b44a66
WD
404 : "r" (&v->counter)
405 : "cc");
406
407 smp_mb();
408
409 return result;
410}
411
237f1233 412static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
24b44a66 413{
237f1233 414 long long val;
24b44a66
WD
415 unsigned long tmp;
416 int ret = 1;
417
418 smp_mb();
c32ffce0 419 prefetchw(&v->counter);
24b44a66
WD
420
421 __asm__ __volatile__("@ atomic64_add_unless\n"
398aa668
WD
422"1: ldrexd %0, %H0, [%4]\n"
423" teq %0, %5\n"
424" teqeq %H0, %H5\n"
24b44a66
WD
425" moveq %1, #0\n"
426" beq 2f\n"
2245f924
VK
427" adds %Q0, %Q0, %Q6\n"
428" adc %R0, %R0, %R6\n"
398aa668 429" strexd %2, %0, %H0, [%4]\n"
24b44a66
WD
430" teq %2, #0\n"
431" bne 1b\n"
432"2:"
398aa668 433 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
24b44a66
WD
434 : "r" (&v->counter), "r" (u), "r" (a)
435 : "cc");
436
437 if (ret)
438 smp_mb();
439
440 return ret;
441}
442
443#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
444#define atomic64_inc(v) atomic64_add(1LL, (v))
6e490b01 445#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
24b44a66
WD
446#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
447#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
448#define atomic64_dec(v) atomic64_sub(1LL, (v))
6e490b01 449#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
24b44a66
WD
450#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
451#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
452
7847777a 453#endif /* !CONFIG_GENERIC_ATOMIC64 */
1da177e4
LT
454#endif
455#endif
This page took 0.737699 seconds and 5 git commands to generate.