2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
23 #include <linux/compiler.h>
24 #include <linux/types.h>
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
29 #define ATOMIC_INIT(i) { (i) }
34 * On ARM, ordinary assignment (str instruction) doesn't clear the local
35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return.
38 #define atomic_read(v) ACCESS_ONCE((v)->counter)
39 #define atomic_set(v,i) (((v)->counter) = (i))
42 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens.
47 #define ATOMIC_OP(op, asm_op) \
48 static inline void atomic_##op(int i, atomic_t *v) \
53 asm volatile("// atomic_" #op "\n" \
55 " " #asm_op " %w0, %w0, %w3\n" \
56 " stxr %w1, %w0, %2\n" \
58 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
62 #define ATOMIC_OP_RETURN(op, asm_op) \
63 static inline int atomic_##op##_return(int i, atomic_t *v) \
68 asm volatile("// atomic_" #op "_return\n" \
70 " " #asm_op " %w0, %w0, %w3\n" \
71 " stlxr %w1, %w0, %2\n" \
73 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
81 #define ATOMIC_OPS(op, asm_op) \
82 ATOMIC_OP(op, asm_op) \
83 ATOMIC_OP_RETURN(op, asm_op)
89 #undef ATOMIC_OP_RETURN
92 static inline int atomic_cmpxchg(atomic_t
*ptr
, int old
, int new)
99 asm volatile("// atomic_cmpxchg\n"
103 " stxr %w0, %w4, %2\n"
106 : "=&r" (tmp
), "=&r" (oldval
), "+Q" (ptr
->counter
)
107 : "Ir" (old
), "r" (new)
114 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
116 static inline int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
121 while (c
!= u
&& (old
= atomic_cmpxchg((v
), c
, c
+ a
)) != c
)
126 #define atomic_inc(v) atomic_add(1, v)
127 #define atomic_dec(v) atomic_sub(1, v)
129 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
130 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
131 #define atomic_inc_return(v) (atomic_add_return(1, v))
132 #define atomic_dec_return(v) (atomic_sub_return(1, v))
133 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
135 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
138 * 64-bit atomic operations.
140 #define ATOMIC64_INIT(i) { (i) }
142 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
143 #define atomic64_set(v,i) (((v)->counter) = (i))
145 #define ATOMIC64_OP(op, asm_op) \
146 static inline void atomic64_##op(long i, atomic64_t *v) \
151 asm volatile("// atomic64_" #op "\n" \
153 " " #asm_op " %0, %0, %3\n" \
154 " stxr %w1, %0, %2\n" \
156 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
160 #define ATOMIC64_OP_RETURN(op, asm_op) \
161 static inline long atomic64_##op##_return(long i, atomic64_t *v) \
166 asm volatile("// atomic64_" #op "_return\n" \
168 " " #asm_op " %0, %0, %3\n" \
169 " stlxr %w1, %0, %2\n" \
171 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
179 #define ATOMIC64_OPS(op, asm_op) \
180 ATOMIC64_OP(op, asm_op) \
181 ATOMIC64_OP_RETURN(op, asm_op)
183 ATOMIC64_OPS(add
, add
)
184 ATOMIC64_OPS(sub
, sub
)
187 #undef ATOMIC64_OP_RETURN
190 static inline long atomic64_cmpxchg(atomic64_t
*ptr
, long old
, long new)
197 asm volatile("// atomic64_cmpxchg\n"
201 " stxr %w0, %4, %2\n"
204 : "=&r" (res
), "=&r" (oldval
), "+Q" (ptr
->counter
)
205 : "Ir" (old
), "r" (new)
212 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
214 static inline long atomic64_dec_if_positive(atomic64_t
*v
)
219 asm volatile("// atomic64_dec_if_positive\n"
223 " stlxr %w1, %0, %2\n"
227 : "=&r" (result
), "=&r" (tmp
), "+Q" (v
->counter
)
234 static inline int atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
238 c
= atomic64_read(v
);
239 while (c
!= u
&& (old
= atomic64_cmpxchg((v
), c
, c
+ a
)) != c
)
245 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
246 #define atomic64_inc(v) atomic64_add(1LL, (v))
247 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
248 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
249 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
250 #define atomic64_dec(v) atomic64_sub(1LL, (v))
251 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
252 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
253 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
This page took 0.037123 seconds and 6 git commands to generate.