Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/atomic.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef __ASM_ARM_ATOMIC_H | |
12 | #define __ASM_ARM_ATOMIC_H | |
13 | ||
8dc39b88 | 14 | #include <linux/compiler.h> |
f38d999c | 15 | #include <linux/prefetch.h> |
ea435467 | 16 | #include <linux/types.h> |
9f97da78 DH |
17 | #include <linux/irqflags.h> |
18 | #include <asm/barrier.h> | |
19 | #include <asm/cmpxchg.h> | |
1da177e4 | 20 | |
1da177e4 LT |
21 | #define ATOMIC_INIT(i) { (i) } |
22 | ||
23 | #ifdef __KERNEL__ | |
24 | ||
200b812d CM |
25 | /* |
26 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | |
27 | * strex/ldrex monitor on some implementations. The reason we can use it for | |
28 | * atomic_set() is the clrex or dummy strex done on every exception return. | |
29 | */ | |
2291059c | 30 | #define atomic_read(v) ACCESS_ONCE((v)->counter) |
200b812d | 31 | #define atomic_set(v,i) (((v)->counter) = (i)) |
1da177e4 LT |
32 | |
33 | #if __LINUX_ARM_ARCH__ >= 6 | |
34 | ||
35 | /* | |
36 | * ARMv6 UP and SMP safe atomic ops. We use load exclusive and | |
37 | * store exclusive to ensure that these are atomic. We may loop | |
200b812d | 38 | * to ensure that the update happens. |
1da177e4 | 39 | */ |
bac4e960 | 40 | |
aee9a554 PZ |
41 | #define ATOMIC_OP(op, c_op, asm_op) \ |
42 | static inline void atomic_##op(int i, atomic_t *v) \ | |
43 | { \ | |
44 | unsigned long tmp; \ | |
45 | int result; \ | |
46 | \ | |
47 | prefetchw(&v->counter); \ | |
48 | __asm__ __volatile__("@ atomic_" #op "\n" \ | |
49 | "1: ldrex %0, [%3]\n" \ | |
50 | " " #asm_op " %0, %0, %4\n" \ | |
51 | " strex %1, %0, [%3]\n" \ | |
52 | " teq %1, #0\n" \ | |
53 | " bne 1b" \ | |
54 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
55 | : "r" (&v->counter), "Ir" (i) \ | |
56 | : "cc"); \ | |
57 | } \ | |
58 | ||
59 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
60 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
61 | { \ | |
62 | unsigned long tmp; \ | |
63 | int result; \ | |
64 | \ | |
65 | smp_mb(); \ | |
66 | prefetchw(&v->counter); \ | |
67 | \ | |
68 | __asm__ __volatile__("@ atomic_" #op "_return\n" \ | |
69 | "1: ldrex %0, [%3]\n" \ | |
70 | " " #asm_op " %0, %0, %4\n" \ | |
71 | " strex %1, %0, [%3]\n" \ | |
72 | " teq %1, #0\n" \ | |
73 | " bne 1b" \ | |
74 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
75 | : "r" (&v->counter), "Ir" (i) \ | |
76 | : "cc"); \ | |
77 | \ | |
78 | smp_mb(); \ | |
79 | \ | |
80 | return result; \ | |
1da177e4 LT |
81 | } |
82 | ||
4a6dae6d NP |
83 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
84 | { | |
4dcc1cf7 CG |
85 | int oldval; |
86 | unsigned long res; | |
4a6dae6d | 87 | |
bac4e960 | 88 | smp_mb(); |
c32ffce0 | 89 | prefetchw(&ptr->counter); |
bac4e960 | 90 | |
4a6dae6d NP |
91 | do { |
92 | __asm__ __volatile__("@ atomic_cmpxchg\n" | |
398aa668 | 93 | "ldrex %1, [%3]\n" |
a7d06833 | 94 | "mov %0, #0\n" |
398aa668 WD |
95 | "teq %1, %4\n" |
96 | "strexeq %0, %5, [%3]\n" | |
97 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
4a6dae6d NP |
98 | : "r" (&ptr->counter), "Ir" (old), "r" (new) |
99 | : "cc"); | |
100 | } while (res); | |
101 | ||
bac4e960 RK |
102 | smp_mb(); |
103 | ||
4a6dae6d NP |
104 | return oldval; |
105 | } | |
106 | ||
db38ee87 WD |
107 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
108 | { | |
109 | int oldval, newval; | |
110 | unsigned long tmp; | |
111 | ||
112 | smp_mb(); | |
113 | prefetchw(&v->counter); | |
114 | ||
115 | __asm__ __volatile__ ("@ atomic_add_unless\n" | |
116 | "1: ldrex %0, [%4]\n" | |
117 | " teq %0, %5\n" | |
118 | " beq 2f\n" | |
119 | " add %1, %0, %6\n" | |
120 | " strex %2, %1, [%4]\n" | |
121 | " teq %2, #0\n" | |
122 | " bne 1b\n" | |
123 | "2:" | |
124 | : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) | |
125 | : "r" (&v->counter), "r" (u), "r" (a) | |
126 | : "cc"); | |
127 | ||
128 | if (oldval != u) | |
129 | smp_mb(); | |
130 | ||
131 | return oldval; | |
132 | } | |
133 | ||
1da177e4 LT |
134 | #else /* ARM_ARCH_6 */ |
135 | ||
1da177e4 LT |
136 | #ifdef CONFIG_SMP |
137 | #error SMP not supported on pre-ARMv6 CPUs | |
138 | #endif | |
139 | ||
aee9a554 PZ |
140 | #define ATOMIC_OP(op, c_op, asm_op) \ |
141 | static inline void atomic_##op(int i, atomic_t *v) \ | |
142 | { \ | |
143 | unsigned long flags; \ | |
144 | \ | |
145 | raw_local_irq_save(flags); \ | |
146 | v->counter c_op i; \ | |
147 | raw_local_irq_restore(flags); \ | |
148 | } \ | |
149 | ||
150 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | |
151 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | |
152 | { \ | |
153 | unsigned long flags; \ | |
154 | int val; \ | |
155 | \ | |
156 | raw_local_irq_save(flags); \ | |
157 | v->counter c_op i; \ | |
158 | val = v->counter; \ | |
159 | raw_local_irq_restore(flags); \ | |
160 | \ | |
161 | return val; \ | |
1da177e4 | 162 | } |
1da177e4 | 163 | |
4a6dae6d NP |
164 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
165 | { | |
166 | int ret; | |
167 | unsigned long flags; | |
168 | ||
8dd5c845 | 169 | raw_local_irq_save(flags); |
4a6dae6d NP |
170 | ret = v->counter; |
171 | if (likely(ret == old)) | |
172 | v->counter = new; | |
8dd5c845 | 173 | raw_local_irq_restore(flags); |
4a6dae6d NP |
174 | |
175 | return ret; | |
176 | } | |
177 | ||
f24219b4 | 178 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
8426e1f6 NP |
179 | { |
180 | int c, old; | |
181 | ||
182 | c = atomic_read(v); | |
183 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | |
184 | c = old; | |
f24219b4 | 185 | return c; |
8426e1f6 | 186 | } |
8426e1f6 | 187 | |
db38ee87 WD |
188 | #endif /* __LINUX_ARM_ARCH__ */ |
189 | ||
aee9a554 PZ |
190 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
191 | ATOMIC_OP(op, c_op, asm_op) \ | |
192 | ATOMIC_OP_RETURN(op, c_op, asm_op) | |
193 | ||
194 | ATOMIC_OPS(add, +=, add) | |
195 | ATOMIC_OPS(sub, -=, sub) | |
196 | ||
197 | #undef ATOMIC_OPS | |
198 | #undef ATOMIC_OP_RETURN | |
199 | #undef ATOMIC_OP | |
200 | ||
db38ee87 WD |
201 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
202 | ||
bac4e960 RK |
203 | #define atomic_inc(v) atomic_add(1, v) |
204 | #define atomic_dec(v) atomic_sub(1, v) | |
1da177e4 LT |
205 | |
206 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
207 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
208 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | |
209 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | |
210 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
211 | ||
212 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | |
213 | ||
24b44a66 WD |
214 | #ifndef CONFIG_GENERIC_ATOMIC64 |
215 | typedef struct { | |
237f1233 | 216 | long long counter; |
24b44a66 WD |
217 | } atomic64_t; |
218 | ||
219 | #define ATOMIC64_INIT(i) { (i) } | |
220 | ||
4fd75911 | 221 | #ifdef CONFIG_ARM_LPAE |
237f1233 | 222 | static inline long long atomic64_read(const atomic64_t *v) |
4fd75911 | 223 | { |
237f1233 | 224 | long long result; |
4fd75911 WD |
225 | |
226 | __asm__ __volatile__("@ atomic64_read\n" | |
227 | " ldrd %0, %H0, [%1]" | |
228 | : "=&r" (result) | |
229 | : "r" (&v->counter), "Qo" (v->counter) | |
230 | ); | |
231 | ||
232 | return result; | |
233 | } | |
234 | ||
237f1233 | 235 | static inline void atomic64_set(atomic64_t *v, long long i) |
4fd75911 WD |
236 | { |
237 | __asm__ __volatile__("@ atomic64_set\n" | |
238 | " strd %2, %H2, [%1]" | |
239 | : "=Qo" (v->counter) | |
240 | : "r" (&v->counter), "r" (i) | |
241 | ); | |
242 | } | |
243 | #else | |
237f1233 | 244 | static inline long long atomic64_read(const atomic64_t *v) |
24b44a66 | 245 | { |
237f1233 | 246 | long long result; |
24b44a66 WD |
247 | |
248 | __asm__ __volatile__("@ atomic64_read\n" | |
249 | " ldrexd %0, %H0, [%1]" | |
250 | : "=&r" (result) | |
398aa668 | 251 | : "r" (&v->counter), "Qo" (v->counter) |
24b44a66 WD |
252 | ); |
253 | ||
254 | return result; | |
255 | } | |
256 | ||
237f1233 | 257 | static inline void atomic64_set(atomic64_t *v, long long i) |
24b44a66 | 258 | { |
237f1233 | 259 | long long tmp; |
24b44a66 | 260 | |
f38d999c | 261 | prefetchw(&v->counter); |
24b44a66 | 262 | __asm__ __volatile__("@ atomic64_set\n" |
398aa668 WD |
263 | "1: ldrexd %0, %H0, [%2]\n" |
264 | " strexd %0, %3, %H3, [%2]\n" | |
24b44a66 WD |
265 | " teq %0, #0\n" |
266 | " bne 1b" | |
398aa668 | 267 | : "=&r" (tmp), "=Qo" (v->counter) |
24b44a66 WD |
268 | : "r" (&v->counter), "r" (i) |
269 | : "cc"); | |
270 | } | |
4fd75911 | 271 | #endif |
24b44a66 | 272 | |
aee9a554 PZ |
273 | #define ATOMIC64_OP(op, op1, op2) \ |
274 | static inline void atomic64_##op(long long i, atomic64_t *v) \ | |
275 | { \ | |
276 | long long result; \ | |
277 | unsigned long tmp; \ | |
278 | \ | |
279 | prefetchw(&v->counter); \ | |
280 | __asm__ __volatile__("@ atomic64_" #op "\n" \ | |
281 | "1: ldrexd %0, %H0, [%3]\n" \ | |
282 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
283 | " " #op2 " %R0, %R0, %R4\n" \ | |
284 | " strexd %1, %0, %H0, [%3]\n" \ | |
285 | " teq %1, #0\n" \ | |
286 | " bne 1b" \ | |
287 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
288 | : "r" (&v->counter), "r" (i) \ | |
289 | : "cc"); \ | |
290 | } \ | |
291 | ||
292 | #define ATOMIC64_OP_RETURN(op, op1, op2) \ | |
293 | static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ | |
294 | { \ | |
295 | long long result; \ | |
296 | unsigned long tmp; \ | |
297 | \ | |
298 | smp_mb(); \ | |
299 | prefetchw(&v->counter); \ | |
300 | \ | |
301 | __asm__ __volatile__("@ atomic64_" #op "_return\n" \ | |
302 | "1: ldrexd %0, %H0, [%3]\n" \ | |
303 | " " #op1 " %Q0, %Q0, %Q4\n" \ | |
304 | " " #op2 " %R0, %R0, %R4\n" \ | |
305 | " strexd %1, %0, %H0, [%3]\n" \ | |
306 | " teq %1, #0\n" \ | |
307 | " bne 1b" \ | |
308 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ | |
309 | : "r" (&v->counter), "r" (i) \ | |
310 | : "cc"); \ | |
311 | \ | |
312 | smp_mb(); \ | |
313 | \ | |
314 | return result; \ | |
24b44a66 WD |
315 | } |
316 | ||
aee9a554 PZ |
317 | #define ATOMIC64_OPS(op, op1, op2) \ |
318 | ATOMIC64_OP(op, op1, op2) \ | |
319 | ATOMIC64_OP_RETURN(op, op1, op2) | |
24b44a66 | 320 | |
aee9a554 PZ |
321 | ATOMIC64_OPS(add, adds, adc) |
322 | ATOMIC64_OPS(sub, subs, sbc) | |
24b44a66 | 323 | |
aee9a554 PZ |
324 | #undef ATOMIC64_OPS |
325 | #undef ATOMIC64_OP_RETURN | |
326 | #undef ATOMIC64_OP | |
24b44a66 | 327 | |
237f1233 CG |
328 | static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, |
329 | long long new) | |
24b44a66 | 330 | { |
237f1233 | 331 | long long oldval; |
24b44a66 WD |
332 | unsigned long res; |
333 | ||
334 | smp_mb(); | |
c32ffce0 | 335 | prefetchw(&ptr->counter); |
24b44a66 WD |
336 | |
337 | do { | |
338 | __asm__ __volatile__("@ atomic64_cmpxchg\n" | |
398aa668 | 339 | "ldrexd %1, %H1, [%3]\n" |
24b44a66 | 340 | "mov %0, #0\n" |
398aa668 WD |
341 | "teq %1, %4\n" |
342 | "teqeq %H1, %H4\n" | |
343 | "strexdeq %0, %5, %H5, [%3]" | |
344 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
24b44a66 WD |
345 | : "r" (&ptr->counter), "r" (old), "r" (new) |
346 | : "cc"); | |
347 | } while (res); | |
348 | ||
349 | smp_mb(); | |
350 | ||
351 | return oldval; | |
352 | } | |
353 | ||
237f1233 | 354 | static inline long long atomic64_xchg(atomic64_t *ptr, long long new) |
24b44a66 | 355 | { |
237f1233 | 356 | long long result; |
24b44a66 WD |
357 | unsigned long tmp; |
358 | ||
359 | smp_mb(); | |
c32ffce0 | 360 | prefetchw(&ptr->counter); |
24b44a66 WD |
361 | |
362 | __asm__ __volatile__("@ atomic64_xchg\n" | |
398aa668 WD |
363 | "1: ldrexd %0, %H0, [%3]\n" |
364 | " strexd %1, %4, %H4, [%3]\n" | |
24b44a66 WD |
365 | " teq %1, #0\n" |
366 | " bne 1b" | |
398aa668 | 367 | : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) |
24b44a66 WD |
368 | : "r" (&ptr->counter), "r" (new) |
369 | : "cc"); | |
370 | ||
371 | smp_mb(); | |
372 | ||
373 | return result; | |
374 | } | |
375 | ||
237f1233 | 376 | static inline long long atomic64_dec_if_positive(atomic64_t *v) |
24b44a66 | 377 | { |
237f1233 | 378 | long long result; |
24b44a66 WD |
379 | unsigned long tmp; |
380 | ||
381 | smp_mb(); | |
c32ffce0 | 382 | prefetchw(&v->counter); |
24b44a66 WD |
383 | |
384 | __asm__ __volatile__("@ atomic64_dec_if_positive\n" | |
398aa668 | 385 | "1: ldrexd %0, %H0, [%3]\n" |
2245f924 VK |
386 | " subs %Q0, %Q0, #1\n" |
387 | " sbc %R0, %R0, #0\n" | |
388 | " teq %R0, #0\n" | |
24b44a66 | 389 | " bmi 2f\n" |
398aa668 | 390 | " strexd %1, %0, %H0, [%3]\n" |
24b44a66 WD |
391 | " teq %1, #0\n" |
392 | " bne 1b\n" | |
393 | "2:" | |
398aa668 | 394 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
395 | : "r" (&v->counter) |
396 | : "cc"); | |
397 | ||
398 | smp_mb(); | |
399 | ||
400 | return result; | |
401 | } | |
402 | ||
237f1233 | 403 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
24b44a66 | 404 | { |
237f1233 | 405 | long long val; |
24b44a66 WD |
406 | unsigned long tmp; |
407 | int ret = 1; | |
408 | ||
409 | smp_mb(); | |
c32ffce0 | 410 | prefetchw(&v->counter); |
24b44a66 WD |
411 | |
412 | __asm__ __volatile__("@ atomic64_add_unless\n" | |
398aa668 WD |
413 | "1: ldrexd %0, %H0, [%4]\n" |
414 | " teq %0, %5\n" | |
415 | " teqeq %H0, %H5\n" | |
24b44a66 WD |
416 | " moveq %1, #0\n" |
417 | " beq 2f\n" | |
2245f924 VK |
418 | " adds %Q0, %Q0, %Q6\n" |
419 | " adc %R0, %R0, %R6\n" | |
398aa668 | 420 | " strexd %2, %0, %H0, [%4]\n" |
24b44a66 WD |
421 | " teq %2, #0\n" |
422 | " bne 1b\n" | |
423 | "2:" | |
398aa668 | 424 | : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
425 | : "r" (&v->counter), "r" (u), "r" (a) |
426 | : "cc"); | |
427 | ||
428 | if (ret) | |
429 | smp_mb(); | |
430 | ||
431 | return ret; | |
432 | } | |
433 | ||
434 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
435 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
436 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | |
437 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
438 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
439 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
440 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | |
441 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
442 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
443 | ||
7847777a | 444 | #endif /* !CONFIG_GENERIC_ATOMIC64 */ |
1da177e4 LT |
445 | #endif |
446 | #endif |