2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
23 #include <linux/compiler.h>
24 #include <linux/types.h>
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
29 #define ATOMIC_INIT(i) { (i) }
34 * On ARM, ordinary assignment (str instruction) doesn't clear the local
35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return.
38 #define atomic_read(v) (*(volatile int *)&(v)->counter)
39 #define atomic_set(v,i) (((v)->counter) = (i))
42 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens.
46 static inline void atomic_add(int i
, atomic_t
*v
)
51 asm volatile("// atomic_add\n"
53 " add %w0, %w0, %w4\n"
54 " stxr %w1, %w0, [%3]\n"
56 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
57 : "r" (&v
->counter
), "Ir" (i
)
61 static inline int atomic_add_return(int i
, atomic_t
*v
)
66 asm volatile("// atomic_add_return\n"
67 "1: ldaxr %w0, [%3]\n"
68 " add %w0, %w0, %w4\n"
69 " stlxr %w1, %w0, [%3]\n"
71 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
72 : "r" (&v
->counter
), "Ir" (i
)
78 static inline void atomic_sub(int i
, atomic_t
*v
)
83 asm volatile("// atomic_sub\n"
85 " sub %w0, %w0, %w4\n"
86 " stxr %w1, %w0, [%3]\n"
88 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
89 : "r" (&v
->counter
), "Ir" (i
)
93 static inline int atomic_sub_return(int i
, atomic_t
*v
)
98 asm volatile("// atomic_sub_return\n"
99 "1: ldaxr %w0, [%3]\n"
100 " sub %w0, %w0, %w4\n"
101 " stlxr %w1, %w0, [%3]\n"
103 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
104 : "r" (&v
->counter
), "Ir" (i
)
110 static inline int atomic_cmpxchg(atomic_t
*ptr
, int old
, int new)
115 asm volatile("// atomic_cmpxchg\n"
116 "1: ldaxr %w1, [%3]\n"
119 " stlxr %w0, %w5, [%3]\n"
122 : "=&r" (tmp
), "=&r" (oldval
), "+o" (ptr
->counter
)
123 : "r" (&ptr
->counter
), "Ir" (old
), "r" (new)
129 static inline void atomic_clear_mask(unsigned long mask
, unsigned long *addr
)
131 unsigned long tmp
, tmp2
;
133 asm volatile("// atomic_clear_mask\n"
136 " stxr %w1, %0, [%3]\n"
138 : "=&r" (tmp
), "=&r" (tmp2
), "+o" (*addr
)
139 : "r" (addr
), "Ir" (mask
)
143 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
145 static inline int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
150 while (c
!= u
&& (old
= atomic_cmpxchg((v
), c
, c
+ a
)) != c
)
155 #define atomic_inc(v) atomic_add(1, v)
156 #define atomic_dec(v) atomic_sub(1, v)
158 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
159 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
160 #define atomic_inc_return(v) (atomic_add_return(1, v))
161 #define atomic_dec_return(v) (atomic_sub_return(1, v))
162 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
164 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
166 #define smp_mb__before_atomic_dec() smp_mb()
167 #define smp_mb__after_atomic_dec() smp_mb()
168 #define smp_mb__before_atomic_inc() smp_mb()
169 #define smp_mb__after_atomic_inc() smp_mb()
172 * 64-bit atomic operations.
174 #define ATOMIC64_INIT(i) { (i) }
176 #define atomic64_read(v) (*(volatile long long *)&(v)->counter)
177 #define atomic64_set(v,i) (((v)->counter) = (i))
179 static inline void atomic64_add(u64 i
, atomic64_t
*v
)
184 asm volatile("// atomic64_add\n"
187 " stxr %w1, %0, [%3]\n"
189 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
190 : "r" (&v
->counter
), "Ir" (i
)
194 static inline long atomic64_add_return(long i
, atomic64_t
*v
)
199 asm volatile("// atomic64_add_return\n"
200 "1: ldaxr %0, [%3]\n"
202 " stlxr %w1, %0, [%3]\n"
204 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
205 : "r" (&v
->counter
), "Ir" (i
)
211 static inline void atomic64_sub(u64 i
, atomic64_t
*v
)
216 asm volatile("// atomic64_sub\n"
219 " stxr %w1, %0, [%3]\n"
221 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
222 : "r" (&v
->counter
), "Ir" (i
)
226 static inline long atomic64_sub_return(long i
, atomic64_t
*v
)
231 asm volatile("// atomic64_sub_return\n"
232 "1: ldaxr %0, [%3]\n"
234 " stlxr %w1, %0, [%3]\n"
236 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
237 : "r" (&v
->counter
), "Ir" (i
)
243 static inline long atomic64_cmpxchg(atomic64_t
*ptr
, long old
, long new)
248 asm volatile("// atomic64_cmpxchg\n"
249 "1: ldaxr %1, [%3]\n"
252 " stlxr %w0, %5, [%3]\n"
255 : "=&r" (res
), "=&r" (oldval
), "+o" (ptr
->counter
)
256 : "r" (&ptr
->counter
), "Ir" (old
), "r" (new)
262 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
264 static inline long atomic64_dec_if_positive(atomic64_t
*v
)
269 asm volatile("// atomic64_dec_if_positive\n"
270 "1: ldaxr %0, [%3]\n"
273 " stlxr %w1, %0, [%3]\n"
276 : "=&r" (result
), "=&r" (tmp
), "+o" (v
->counter
)
283 static inline int atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
287 c
= atomic64_read(v
);
288 while (c
!= u
&& (old
= atomic64_cmpxchg((v
), c
, c
+ a
)) != c
)
294 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
295 #define atomic64_inc(v) atomic64_add(1LL, (v))
296 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
297 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
298 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
299 #define atomic64_dec(v) atomic64_sub(1LL, (v))
300 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
301 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
302 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
This page took 0.047383 seconds and 5 git commands to generate.