1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
5 * PowerPC atomic operations
8 typedef struct { int counter
; } atomic_t
;
11 #include <linux/compiler.h>
12 #include <asm/synch.h>
13 #include <asm/asm-compat.h>
14 #include <asm/system.h>
16 #define ATOMIC_INIT(i) { (i) }
18 static __inline__
int atomic_read(const atomic_t
*v
)
22 __asm__
__volatile__("lwz%U1%X1 %0,%1" : "=r"(t
) : "m"(v
->counter
));
27 static __inline__
void atomic_set(atomic_t
*v
, int i
)
29 __asm__
__volatile__("stw%U0%X0 %1,%0" : "=m"(v
->counter
) : "r"(i
));
32 static __inline__
void atomic_add(int a
, atomic_t
*v
)
37 "1: lwarx %0,0,%3 # atomic_add\n\
42 : "=&r" (t
), "+m" (v
->counter
)
43 : "r" (a
), "r" (&v
->counter
)
47 static __inline__
int atomic_add_return(int a
, atomic_t
*v
)
53 "1: lwarx %0,0,%2 # atomic_add_return\n\
60 : "r" (a
), "r" (&v
->counter
)
66 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
68 static __inline__
void atomic_sub(int a
, atomic_t
*v
)
73 "1: lwarx %0,0,%3 # atomic_sub\n\
78 : "=&r" (t
), "+m" (v
->counter
)
79 : "r" (a
), "r" (&v
->counter
)
83 static __inline__
int atomic_sub_return(int a
, atomic_t
*v
)
89 "1: lwarx %0,0,%2 # atomic_sub_return\n\
96 : "r" (a
), "r" (&v
->counter
)
102 static __inline__
void atomic_inc(atomic_t
*v
)
106 __asm__
__volatile__(
107 "1: lwarx %0,0,%2 # atomic_inc\n\
112 : "=&r" (t
), "+m" (v
->counter
)
117 static __inline__
int atomic_inc_return(atomic_t
*v
)
121 __asm__
__volatile__(
123 "1: lwarx %0,0,%1 # atomic_inc_return\n\
137 * atomic_inc_and_test - increment and test
138 * @v: pointer of type atomic_t
140 * Atomically increments @v by 1
141 * and returns true if the result is zero, or false for all
144 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
146 static __inline__
void atomic_dec(atomic_t
*v
)
150 __asm__
__volatile__(
151 "1: lwarx %0,0,%2 # atomic_dec\n\
156 : "=&r" (t
), "+m" (v
->counter
)
161 static __inline__
int atomic_dec_return(atomic_t
*v
)
165 __asm__
__volatile__(
167 "1: lwarx %0,0,%1 # atomic_dec_return\n\
180 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
181 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
184 * atomic_add_unless - add unless the number is a given value
185 * @v: pointer of type atomic_t
186 * @a: the amount to add to v...
187 * @u: ...unless v is equal to u.
189 * Atomically adds @a to @v, so long as it was not @u.
190 * Returns non-zero if @v was not @u, and zero otherwise.
192 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
196 __asm__
__volatile__ (
198 "1: lwarx %0,0,%1 # atomic_add_unless\n\
209 : "r" (&v
->counter
), "r" (a
), "r" (u
)
215 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
217 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
218 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
221 * Atomically test *v and decrement if it is greater than 0.
222 * The function returns the old value of *v minus 1, even if
223 * the atomic variable, v, was not decremented.
225 static __inline__
int atomic_dec_if_positive(atomic_t
*v
)
229 __asm__
__volatile__(
231 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
247 #define smp_mb__before_atomic_dec() smp_mb()
248 #define smp_mb__after_atomic_dec() smp_mb()
249 #define smp_mb__before_atomic_inc() smp_mb()
250 #define smp_mb__after_atomic_inc() smp_mb()
254 typedef struct { long counter
; } atomic64_t
;
256 #define ATOMIC64_INIT(i) { (i) }
258 static __inline__
long atomic64_read(const atomic64_t
*v
)
262 __asm__
__volatile__("ld%U1%X1 %0,%1" : "=r"(t
) : "m"(v
->counter
));
267 static __inline__
void atomic64_set(atomic64_t
*v
, long i
)
269 __asm__
__volatile__("std%U0%X0 %1,%0" : "=m"(v
->counter
) : "r"(i
));
272 static __inline__
void atomic64_add(long a
, atomic64_t
*v
)
276 __asm__
__volatile__(
277 "1: ldarx %0,0,%3 # atomic64_add\n\
281 : "=&r" (t
), "+m" (v
->counter
)
282 : "r" (a
), "r" (&v
->counter
)
286 static __inline__
long atomic64_add_return(long a
, atomic64_t
*v
)
290 __asm__
__volatile__(
292 "1: ldarx %0,0,%2 # atomic64_add_return\n\
298 : "r" (a
), "r" (&v
->counter
)
304 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
306 static __inline__
void atomic64_sub(long a
, atomic64_t
*v
)
310 __asm__
__volatile__(
311 "1: ldarx %0,0,%3 # atomic64_sub\n\
315 : "=&r" (t
), "+m" (v
->counter
)
316 : "r" (a
), "r" (&v
->counter
)
320 static __inline__
long atomic64_sub_return(long a
, atomic64_t
*v
)
324 __asm__
__volatile__(
326 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
332 : "r" (a
), "r" (&v
->counter
)
338 static __inline__
void atomic64_inc(atomic64_t
*v
)
342 __asm__
__volatile__(
343 "1: ldarx %0,0,%2 # atomic64_inc\n\
347 : "=&r" (t
), "+m" (v
->counter
)
352 static __inline__
long atomic64_inc_return(atomic64_t
*v
)
356 __asm__
__volatile__(
358 "1: ldarx %0,0,%1 # atomic64_inc_return\n\
371 * atomic64_inc_and_test - increment and test
372 * @v: pointer of type atomic64_t
374 * Atomically increments @v by 1
375 * and returns true if the result is zero, or false for all
378 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
380 static __inline__
void atomic64_dec(atomic64_t
*v
)
384 __asm__
__volatile__(
385 "1: ldarx %0,0,%2 # atomic64_dec\n\
389 : "=&r" (t
), "+m" (v
->counter
)
394 static __inline__
long atomic64_dec_return(atomic64_t
*v
)
398 __asm__
__volatile__(
400 "1: ldarx %0,0,%1 # atomic64_dec_return\n\
412 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
413 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
416 * Atomically test *v and decrement if it is greater than 0.
417 * The function returns the old value of *v minus 1.
419 static __inline__
long atomic64_dec_if_positive(atomic64_t
*v
)
423 __asm__
__volatile__(
425 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
439 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
440 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
443 * atomic64_add_unless - add unless the number is a given value
444 * @v: pointer of type atomic64_t
445 * @a: the amount to add to v...
446 * @u: ...unless v is equal to u.
448 * Atomically adds @a to @v, so long as it was not @u.
449 * Returns non-zero if @v was not @u, and zero otherwise.
451 static __inline__
int atomic64_add_unless(atomic64_t
*v
, long a
, long u
)
455 __asm__
__volatile__ (
457 "1: ldarx %0,0,%1 # atomic_add_unless\n\
467 : "r" (&v
->counter
), "r" (a
), "r" (u
)
473 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
475 #endif /* __powerpc64__ */
477 #include <asm-generic/atomic.h>
478 #endif /* __KERNEL__ */
479 #endif /* _ASM_POWERPC_ATOMIC_H_ */
This page took 0.039438 seconds and 5 git commands to generate.