Merge remote-tracking branch 'asoc/topic/core' into asoc-next
[deliverable/linux.git] / arch / arm64 / include / asm / atomic.h
CommitLineData
6170a974
CM
1/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#ifndef __ASM_ATOMIC_H
21#define __ASM_ATOMIC_H
22
23#include <linux/compiler.h>
24#include <linux/types.h>
25
26#include <asm/barrier.h>
27#include <asm/cmpxchg.h>
28
29#define ATOMIC_INIT(i) { (i) }
30
31#ifdef __KERNEL__
32
33/*
34 * On ARM, ordinary assignment (str instruction) doesn't clear the local
35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return.
37 */
2291059c 38#define atomic_read(v) ACCESS_ONCE((v)->counter)
6170a974
CM
39#define atomic_set(v,i) (((v)->counter) = (i))
40
41/*
42 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens.
45 */
6170a974 46
92ba1f53
PZ
47#define ATOMIC_OP(op, asm_op) \
48static inline void atomic_##op(int i, atomic_t *v) \
49{ \
50 unsigned long tmp; \
51 int result; \
52 \
53 asm volatile("// atomic_" #op "\n" \
54"1: ldxr %w0, %2\n" \
55" " #asm_op " %w0, %w0, %w3\n" \
56" stxr %w1, %w0, %2\n" \
57" cbnz %w1, 1b" \
58 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
59 : "Ir" (i)); \
60} \
61
62#define ATOMIC_OP_RETURN(op, asm_op) \
63static inline int atomic_##op##_return(int i, atomic_t *v) \
64{ \
65 unsigned long tmp; \
66 int result; \
67 \
68 asm volatile("// atomic_" #op "_return\n" \
69"1: ldxr %w0, %2\n" \
70" " #asm_op " %w0, %w0, %w3\n" \
71" stlxr %w1, %w0, %2\n" \
72" cbnz %w1, 1b" \
73 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
74 : "Ir" (i) \
75 : "memory"); \
76 \
77 smp_mb(); \
78 return result; \
6170a974
CM
79}
80
92ba1f53
PZ
81#define ATOMIC_OPS(op, asm_op) \
82 ATOMIC_OP(op, asm_op) \
83 ATOMIC_OP_RETURN(op, asm_op)
6170a974 84
92ba1f53
PZ
85ATOMIC_OPS(add, add)
86ATOMIC_OPS(sub, sub)
6170a974 87
92ba1f53
PZ
88#undef ATOMIC_OPS
89#undef ATOMIC_OP_RETURN
90#undef ATOMIC_OP
6170a974
CM
91
92static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
93{
94 unsigned long tmp;
95 int oldval;
96
8e86f0b4
WD
97 smp_mb();
98
6170a974 99 asm volatile("// atomic_cmpxchg\n"
8e86f0b4 100"1: ldxr %w1, %2\n"
3a0310eb 101" cmp %w1, %w3\n"
6170a974 102" b.ne 2f\n"
8e86f0b4 103" stxr %w0, %w4, %2\n"
6170a974
CM
104" cbnz %w0, 1b\n"
105"2:"
3a0310eb
WD
106 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
107 : "Ir" (old), "r" (new)
95c41896 108 : "cc");
6170a974 109
8e86f0b4 110 smp_mb();
6170a974
CM
111 return oldval;
112}
113
6170a974
CM
114#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
115
116static inline int __atomic_add_unless(atomic_t *v, int a, int u)
117{
118 int c, old;
119
120 c = atomic_read(v);
121 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
122 c = old;
123 return c;
124}
125
126#define atomic_inc(v) atomic_add(1, v)
127#define atomic_dec(v) atomic_sub(1, v)
128
129#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
130#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
131#define atomic_inc_return(v) (atomic_add_return(1, v))
132#define atomic_dec_return(v) (atomic_sub_return(1, v))
133#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
134
135#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
136
6170a974
CM
137/*
138 * 64-bit atomic operations.
139 */
140#define ATOMIC64_INIT(i) { (i) }
141
2291059c 142#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6170a974
CM
143#define atomic64_set(v,i) (((v)->counter) = (i))
144
92ba1f53
PZ
145#define ATOMIC64_OP(op, asm_op) \
146static inline void atomic64_##op(long i, atomic64_t *v) \
147{ \
148 long result; \
149 unsigned long tmp; \
150 \
151 asm volatile("// atomic64_" #op "\n" \
152"1: ldxr %0, %2\n" \
153" " #asm_op " %0, %0, %3\n" \
154" stxr %w1, %0, %2\n" \
155" cbnz %w1, 1b" \
156 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
157 : "Ir" (i)); \
158} \
159
160#define ATOMIC64_OP_RETURN(op, asm_op) \
161static inline long atomic64_##op##_return(long i, atomic64_t *v) \
162{ \
163 long result; \
164 unsigned long tmp; \
165 \
166 asm volatile("// atomic64_" #op "_return\n" \
167"1: ldxr %0, %2\n" \
168" " #asm_op " %0, %0, %3\n" \
169" stlxr %w1, %0, %2\n" \
170" cbnz %w1, 1b" \
171 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
172 : "Ir" (i) \
173 : "memory"); \
174 \
175 smp_mb(); \
176 return result; \
6170a974
CM
177}
178
92ba1f53
PZ
179#define ATOMIC64_OPS(op, asm_op) \
180 ATOMIC64_OP(op, asm_op) \
181 ATOMIC64_OP_RETURN(op, asm_op)
6170a974 182
92ba1f53
PZ
183ATOMIC64_OPS(add, add)
184ATOMIC64_OPS(sub, sub)
6170a974 185
92ba1f53
PZ
186#undef ATOMIC64_OPS
187#undef ATOMIC64_OP_RETURN
188#undef ATOMIC64_OP
6170a974
CM
189
190static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
191{
192 long oldval;
193 unsigned long res;
194
8e86f0b4
WD
195 smp_mb();
196
6170a974 197 asm volatile("// atomic64_cmpxchg\n"
8e86f0b4 198"1: ldxr %1, %2\n"
3a0310eb 199" cmp %1, %3\n"
6170a974 200" b.ne 2f\n"
8e86f0b4 201" stxr %w0, %4, %2\n"
6170a974
CM
202" cbnz %w0, 1b\n"
203"2:"
3a0310eb
WD
204 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
205 : "Ir" (old), "r" (new)
95c41896 206 : "cc");
6170a974 207
8e86f0b4 208 smp_mb();
6170a974
CM
209 return oldval;
210}
211
212#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
213
214static inline long atomic64_dec_if_positive(atomic64_t *v)
215{
216 long result;
217 unsigned long tmp;
218
219 asm volatile("// atomic64_dec_if_positive\n"
8e86f0b4 220"1: ldxr %0, %2\n"
6170a974
CM
221" subs %0, %0, #1\n"
222" b.mi 2f\n"
3a0310eb 223" stlxr %w1, %0, %2\n"
6170a974 224" cbnz %w1, 1b\n"
8e86f0b4 225" dmb ish\n"
6170a974 226"2:"
3a0310eb
WD
227 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
228 :
229 : "cc", "memory");
6170a974
CM
230
231 return result;
232}
233
234static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
235{
236 long c, old;
237
238 c = atomic64_read(v);
239 while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
240 c = old;
241
242 return c != u;
243}
244
245#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
246#define atomic64_inc(v) atomic64_add(1LL, (v))
247#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
248#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
249#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
250#define atomic64_dec(v) atomic64_sub(1LL, (v))
251#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
252#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
253#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
254
255#endif
256#endif
This page took 0.125648 seconds and 5 git commands to generate.