Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/atomic.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef __ASM_ARM_ATOMIC_H | |
12 | #define __ASM_ARM_ATOMIC_H | |
13 | ||
8dc39b88 | 14 | #include <linux/compiler.h> |
ea435467 | 15 | #include <linux/types.h> |
9f97da78 DH |
16 | #include <linux/irqflags.h> |
17 | #include <asm/barrier.h> | |
18 | #include <asm/cmpxchg.h> | |
1da177e4 | 19 | |
1da177e4 LT |
20 | #define ATOMIC_INIT(i) { (i) } |
21 | ||
22 | #ifdef __KERNEL__ | |
23 | ||
200b812d CM |
24 | /* |
25 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | |
26 | * strex/ldrex monitor on some implementations. The reason we can use it for | |
27 | * atomic_set() is the clrex or dummy strex done on every exception return. | |
28 | */ | |
f3d46f9d | 29 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
200b812d | 30 | #define atomic_set(v,i) (((v)->counter) = (i)) |
1da177e4 LT |
31 | |
32 | #if __LINUX_ARM_ARCH__ >= 6 | |
33 | ||
34 | /* | |
35 | * ARMv6 UP and SMP safe atomic ops. We use load exclusive and | |
36 | * store exclusive to ensure that these are atomic. We may loop | |
200b812d | 37 | * to ensure that the update happens. |
1da177e4 | 38 | */ |
bac4e960 RK |
39 | static inline void atomic_add(int i, atomic_t *v) |
40 | { | |
41 | unsigned long tmp; | |
42 | int result; | |
43 | ||
44 | __asm__ __volatile__("@ atomic_add\n" | |
398aa668 WD |
45 | "1: ldrex %0, [%3]\n" |
46 | " add %0, %0, %4\n" | |
47 | " strex %1, %0, [%3]\n" | |
bac4e960 RK |
48 | " teq %1, #0\n" |
49 | " bne 1b" | |
398aa668 | 50 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
bac4e960 RK |
51 | : "r" (&v->counter), "Ir" (i) |
52 | : "cc"); | |
53 | } | |
54 | ||
1da177e4 LT |
55 | static inline int atomic_add_return(int i, atomic_t *v) |
56 | { | |
57 | unsigned long tmp; | |
58 | int result; | |
59 | ||
bac4e960 RK |
60 | smp_mb(); |
61 | ||
1da177e4 | 62 | __asm__ __volatile__("@ atomic_add_return\n" |
398aa668 WD |
63 | "1: ldrex %0, [%3]\n" |
64 | " add %0, %0, %4\n" | |
65 | " strex %1, %0, [%3]\n" | |
1da177e4 LT |
66 | " teq %1, #0\n" |
67 | " bne 1b" | |
398aa668 | 68 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
1da177e4 LT |
69 | : "r" (&v->counter), "Ir" (i) |
70 | : "cc"); | |
71 | ||
bac4e960 RK |
72 | smp_mb(); |
73 | ||
1da177e4 LT |
74 | return result; |
75 | } | |
76 | ||
bac4e960 RK |
77 | static inline void atomic_sub(int i, atomic_t *v) |
78 | { | |
79 | unsigned long tmp; | |
80 | int result; | |
81 | ||
82 | __asm__ __volatile__("@ atomic_sub\n" | |
398aa668 WD |
83 | "1: ldrex %0, [%3]\n" |
84 | " sub %0, %0, %4\n" | |
85 | " strex %1, %0, [%3]\n" | |
bac4e960 RK |
86 | " teq %1, #0\n" |
87 | " bne 1b" | |
398aa668 | 88 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
bac4e960 RK |
89 | : "r" (&v->counter), "Ir" (i) |
90 | : "cc"); | |
91 | } | |
92 | ||
1da177e4 LT |
93 | static inline int atomic_sub_return(int i, atomic_t *v) |
94 | { | |
95 | unsigned long tmp; | |
96 | int result; | |
97 | ||
bac4e960 RK |
98 | smp_mb(); |
99 | ||
1da177e4 | 100 | __asm__ __volatile__("@ atomic_sub_return\n" |
398aa668 WD |
101 | "1: ldrex %0, [%3]\n" |
102 | " sub %0, %0, %4\n" | |
103 | " strex %1, %0, [%3]\n" | |
1da177e4 LT |
104 | " teq %1, #0\n" |
105 | " bne 1b" | |
398aa668 | 106 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
1da177e4 LT |
107 | : "r" (&v->counter), "Ir" (i) |
108 | : "cc"); | |
109 | ||
bac4e960 RK |
110 | smp_mb(); |
111 | ||
1da177e4 LT |
112 | return result; |
113 | } | |
114 | ||
4a6dae6d NP |
115 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
116 | { | |
49ee57a3 | 117 | unsigned long oldval, res; |
4a6dae6d | 118 | |
bac4e960 RK |
119 | smp_mb(); |
120 | ||
4a6dae6d NP |
121 | do { |
122 | __asm__ __volatile__("@ atomic_cmpxchg\n" | |
398aa668 | 123 | "ldrex %1, [%3]\n" |
a7d06833 | 124 | "mov %0, #0\n" |
398aa668 WD |
125 | "teq %1, %4\n" |
126 | "strexeq %0, %5, [%3]\n" | |
127 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
4a6dae6d NP |
128 | : "r" (&ptr->counter), "Ir" (old), "r" (new) |
129 | : "cc"); | |
130 | } while (res); | |
131 | ||
bac4e960 RK |
132 | smp_mb(); |
133 | ||
4a6dae6d NP |
134 | return oldval; |
135 | } | |
136 | ||
1da177e4 LT |
137 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
138 | { | |
139 | unsigned long tmp, tmp2; | |
140 | ||
141 | __asm__ __volatile__("@ atomic_clear_mask\n" | |
398aa668 WD |
142 | "1: ldrex %0, [%3]\n" |
143 | " bic %0, %0, %4\n" | |
144 | " strex %1, %0, [%3]\n" | |
1da177e4 LT |
145 | " teq %1, #0\n" |
146 | " bne 1b" | |
398aa668 | 147 | : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) |
1da177e4 LT |
148 | : "r" (addr), "Ir" (mask) |
149 | : "cc"); | |
150 | } | |
151 | ||
152 | #else /* ARM_ARCH_6 */ | |
153 | ||
1da177e4 LT |
154 | #ifdef CONFIG_SMP |
155 | #error SMP not supported on pre-ARMv6 CPUs | |
156 | #endif | |
157 | ||
1da177e4 LT |
158 | static inline int atomic_add_return(int i, atomic_t *v) |
159 | { | |
160 | unsigned long flags; | |
161 | int val; | |
162 | ||
8dd5c845 | 163 | raw_local_irq_save(flags); |
1da177e4 LT |
164 | val = v->counter; |
165 | v->counter = val += i; | |
8dd5c845 | 166 | raw_local_irq_restore(flags); |
1da177e4 LT |
167 | |
168 | return val; | |
169 | } | |
bac4e960 | 170 | #define atomic_add(i, v) (void) atomic_add_return(i, v) |
1da177e4 LT |
171 | |
172 | static inline int atomic_sub_return(int i, atomic_t *v) | |
173 | { | |
174 | unsigned long flags; | |
175 | int val; | |
176 | ||
8dd5c845 | 177 | raw_local_irq_save(flags); |
1da177e4 LT |
178 | val = v->counter; |
179 | v->counter = val -= i; | |
8dd5c845 | 180 | raw_local_irq_restore(flags); |
1da177e4 LT |
181 | |
182 | return val; | |
183 | } | |
bac4e960 | 184 | #define atomic_sub(i, v) (void) atomic_sub_return(i, v) |
1da177e4 | 185 | |
4a6dae6d NP |
186 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
187 | { | |
188 | int ret; | |
189 | unsigned long flags; | |
190 | ||
8dd5c845 | 191 | raw_local_irq_save(flags); |
4a6dae6d NP |
192 | ret = v->counter; |
193 | if (likely(ret == old)) | |
194 | v->counter = new; | |
8dd5c845 | 195 | raw_local_irq_restore(flags); |
4a6dae6d NP |
196 | |
197 | return ret; | |
198 | } | |
199 | ||
1da177e4 LT |
200 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
201 | { | |
202 | unsigned long flags; | |
203 | ||
8dd5c845 | 204 | raw_local_irq_save(flags); |
1da177e4 | 205 | *addr &= ~mask; |
8dd5c845 | 206 | raw_local_irq_restore(flags); |
1da177e4 LT |
207 | } |
208 | ||
209 | #endif /* __LINUX_ARM_ARCH__ */ | |
210 | ||
ffbf670f IM |
211 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
212 | ||
f24219b4 | 213 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
8426e1f6 NP |
214 | { |
215 | int c, old; | |
216 | ||
217 | c = atomic_read(v); | |
218 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | |
219 | c = old; | |
f24219b4 | 220 | return c; |
8426e1f6 | 221 | } |
8426e1f6 | 222 | |
bac4e960 RK |
223 | #define atomic_inc(v) atomic_add(1, v) |
224 | #define atomic_dec(v) atomic_sub(1, v) | |
1da177e4 LT |
225 | |
226 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
227 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
228 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | |
229 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | |
230 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
231 | ||
232 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | |
233 | ||
bac4e960 RK |
234 | #define smp_mb__before_atomic_dec() smp_mb() |
235 | #define smp_mb__after_atomic_dec() smp_mb() | |
236 | #define smp_mb__before_atomic_inc() smp_mb() | |
237 | #define smp_mb__after_atomic_inc() smp_mb() | |
1da177e4 | 238 | |
24b44a66 WD |
239 | #ifndef CONFIG_GENERIC_ATOMIC64 |
240 | typedef struct { | |
241 | u64 __aligned(8) counter; | |
242 | } atomic64_t; | |
243 | ||
244 | #define ATOMIC64_INIT(i) { (i) } | |
245 | ||
b89d607b | 246 | static inline u64 atomic64_read(const atomic64_t *v) |
24b44a66 WD |
247 | { |
248 | u64 result; | |
249 | ||
250 | __asm__ __volatile__("@ atomic64_read\n" | |
251 | " ldrexd %0, %H0, [%1]" | |
252 | : "=&r" (result) | |
398aa668 | 253 | : "r" (&v->counter), "Qo" (v->counter) |
24b44a66 WD |
254 | ); |
255 | ||
256 | return result; | |
257 | } | |
258 | ||
259 | static inline void atomic64_set(atomic64_t *v, u64 i) | |
260 | { | |
261 | u64 tmp; | |
262 | ||
263 | __asm__ __volatile__("@ atomic64_set\n" | |
398aa668 WD |
264 | "1: ldrexd %0, %H0, [%2]\n" |
265 | " strexd %0, %3, %H3, [%2]\n" | |
24b44a66 WD |
266 | " teq %0, #0\n" |
267 | " bne 1b" | |
398aa668 | 268 | : "=&r" (tmp), "=Qo" (v->counter) |
24b44a66 WD |
269 | : "r" (&v->counter), "r" (i) |
270 | : "cc"); | |
271 | } | |
272 | ||
273 | static inline void atomic64_add(u64 i, atomic64_t *v) | |
274 | { | |
275 | u64 result; | |
276 | unsigned long tmp; | |
277 | ||
278 | __asm__ __volatile__("@ atomic64_add\n" | |
398aa668 WD |
279 | "1: ldrexd %0, %H0, [%3]\n" |
280 | " adds %0, %0, %4\n" | |
281 | " adc %H0, %H0, %H4\n" | |
282 | " strexd %1, %0, %H0, [%3]\n" | |
24b44a66 WD |
283 | " teq %1, #0\n" |
284 | " bne 1b" | |
398aa668 | 285 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
286 | : "r" (&v->counter), "r" (i) |
287 | : "cc"); | |
288 | } | |
289 | ||
290 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |
291 | { | |
292 | u64 result; | |
293 | unsigned long tmp; | |
294 | ||
295 | smp_mb(); | |
296 | ||
297 | __asm__ __volatile__("@ atomic64_add_return\n" | |
398aa668 WD |
298 | "1: ldrexd %0, %H0, [%3]\n" |
299 | " adds %0, %0, %4\n" | |
300 | " adc %H0, %H0, %H4\n" | |
301 | " strexd %1, %0, %H0, [%3]\n" | |
24b44a66 WD |
302 | " teq %1, #0\n" |
303 | " bne 1b" | |
398aa668 | 304 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
305 | : "r" (&v->counter), "r" (i) |
306 | : "cc"); | |
307 | ||
308 | smp_mb(); | |
309 | ||
310 | return result; | |
311 | } | |
312 | ||
313 | static inline void atomic64_sub(u64 i, atomic64_t *v) | |
314 | { | |
315 | u64 result; | |
316 | unsigned long tmp; | |
317 | ||
318 | __asm__ __volatile__("@ atomic64_sub\n" | |
398aa668 WD |
319 | "1: ldrexd %0, %H0, [%3]\n" |
320 | " subs %0, %0, %4\n" | |
321 | " sbc %H0, %H0, %H4\n" | |
322 | " strexd %1, %0, %H0, [%3]\n" | |
24b44a66 WD |
323 | " teq %1, #0\n" |
324 | " bne 1b" | |
398aa668 | 325 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
326 | : "r" (&v->counter), "r" (i) |
327 | : "cc"); | |
328 | } | |
329 | ||
330 | static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) | |
331 | { | |
332 | u64 result; | |
333 | unsigned long tmp; | |
334 | ||
335 | smp_mb(); | |
336 | ||
337 | __asm__ __volatile__("@ atomic64_sub_return\n" | |
398aa668 WD |
338 | "1: ldrexd %0, %H0, [%3]\n" |
339 | " subs %0, %0, %4\n" | |
340 | " sbc %H0, %H0, %H4\n" | |
341 | " strexd %1, %0, %H0, [%3]\n" | |
24b44a66 WD |
342 | " teq %1, #0\n" |
343 | " bne 1b" | |
398aa668 | 344 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
345 | : "r" (&v->counter), "r" (i) |
346 | : "cc"); | |
347 | ||
348 | smp_mb(); | |
349 | ||
350 | return result; | |
351 | } | |
352 | ||
353 | static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) | |
354 | { | |
355 | u64 oldval; | |
356 | unsigned long res; | |
357 | ||
358 | smp_mb(); | |
359 | ||
360 | do { | |
361 | __asm__ __volatile__("@ atomic64_cmpxchg\n" | |
398aa668 | 362 | "ldrexd %1, %H1, [%3]\n" |
24b44a66 | 363 | "mov %0, #0\n" |
398aa668 WD |
364 | "teq %1, %4\n" |
365 | "teqeq %H1, %H4\n" | |
366 | "strexdeq %0, %5, %H5, [%3]" | |
367 | : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) | |
24b44a66 WD |
368 | : "r" (&ptr->counter), "r" (old), "r" (new) |
369 | : "cc"); | |
370 | } while (res); | |
371 | ||
372 | smp_mb(); | |
373 | ||
374 | return oldval; | |
375 | } | |
376 | ||
377 | static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) | |
378 | { | |
379 | u64 result; | |
380 | unsigned long tmp; | |
381 | ||
382 | smp_mb(); | |
383 | ||
384 | __asm__ __volatile__("@ atomic64_xchg\n" | |
398aa668 WD |
385 | "1: ldrexd %0, %H0, [%3]\n" |
386 | " strexd %1, %4, %H4, [%3]\n" | |
24b44a66 WD |
387 | " teq %1, #0\n" |
388 | " bne 1b" | |
398aa668 | 389 | : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) |
24b44a66 WD |
390 | : "r" (&ptr->counter), "r" (new) |
391 | : "cc"); | |
392 | ||
393 | smp_mb(); | |
394 | ||
395 | return result; | |
396 | } | |
397 | ||
398 | static inline u64 atomic64_dec_if_positive(atomic64_t *v) | |
399 | { | |
400 | u64 result; | |
401 | unsigned long tmp; | |
402 | ||
403 | smp_mb(); | |
404 | ||
405 | __asm__ __volatile__("@ atomic64_dec_if_positive\n" | |
398aa668 | 406 | "1: ldrexd %0, %H0, [%3]\n" |
24b44a66 WD |
407 | " subs %0, %0, #1\n" |
408 | " sbc %H0, %H0, #0\n" | |
409 | " teq %H0, #0\n" | |
410 | " bmi 2f\n" | |
398aa668 | 411 | " strexd %1, %0, %H0, [%3]\n" |
24b44a66 WD |
412 | " teq %1, #0\n" |
413 | " bne 1b\n" | |
414 | "2:" | |
398aa668 | 415 | : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
416 | : "r" (&v->counter) |
417 | : "cc"); | |
418 | ||
419 | smp_mb(); | |
420 | ||
421 | return result; | |
422 | } | |
423 | ||
424 | static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | |
425 | { | |
426 | u64 val; | |
427 | unsigned long tmp; | |
428 | int ret = 1; | |
429 | ||
430 | smp_mb(); | |
431 | ||
432 | __asm__ __volatile__("@ atomic64_add_unless\n" | |
398aa668 WD |
433 | "1: ldrexd %0, %H0, [%4]\n" |
434 | " teq %0, %5\n" | |
435 | " teqeq %H0, %H5\n" | |
24b44a66 WD |
436 | " moveq %1, #0\n" |
437 | " beq 2f\n" | |
398aa668 WD |
438 | " adds %0, %0, %6\n" |
439 | " adc %H0, %H0, %H6\n" | |
440 | " strexd %2, %0, %H0, [%4]\n" | |
24b44a66 WD |
441 | " teq %2, #0\n" |
442 | " bne 1b\n" | |
443 | "2:" | |
398aa668 | 444 | : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) |
24b44a66 WD |
445 | : "r" (&v->counter), "r" (u), "r" (a) |
446 | : "cc"); | |
447 | ||
448 | if (ret) | |
449 | smp_mb(); | |
450 | ||
451 | return ret; | |
452 | } | |
453 | ||
454 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
455 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
456 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | |
457 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
458 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
459 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
460 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | |
461 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
462 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
463 | ||
7847777a | 464 | #endif /* !CONFIG_GENERIC_ATOMIC64 */ |
1da177e4 LT |
465 | #endif |
466 | #endif |