1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
4 #include <linux/compiler.h>
5 #include <linux/types.h>
8 * include/asm-s390/atomic.h
11 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
12 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
13 * Denis Joseph Barrow,
14 * Arnd Bergmann (arndb@de.ibm.com)
16 * Derived from "include/asm-i386/bitops.h"
17 * Copyright (C) 1992, Linus Torvalds
22 * Atomic operations that C can't guarantee us. Useful for
23 * resource counting etc..
24 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
27 #define ATOMIC_INIT(i) { (i) }
31 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
33 #define __CS_LOOP(ptr, op_val, op_string) ({ \
34 typeof(ptr->counter) old_val, new_val; \
38 op_string " %1,%3\n" \
41 : "=&d" (old_val), "=&d" (new_val), \
42 "=Q" (((atomic_t *)(ptr))->counter) \
43 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
50 #define __CS_LOOP(ptr, op_val, op_string) ({ \
51 typeof(ptr->counter) old_val, new_val; \
55 op_string " %1,%4\n" \
58 : "=&d" (old_val), "=&d" (new_val), \
59 "=m" (((atomic_t *)(ptr))->counter) \
60 : "a" (ptr), "d" (op_val), \
61 "m" (((atomic_t *)(ptr))->counter) \
68 static inline int atomic_read(const atomic_t
*v
)
74 static inline void atomic_set(atomic_t
*v
, int i
)
80 static __inline__
int atomic_add_return(int i
, atomic_t
* v
)
82 return __CS_LOOP(v
, i
, "ar");
84 #define atomic_add(_i, _v) atomic_add_return(_i, _v)
85 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
86 #define atomic_inc(_v) atomic_add_return(1, _v)
87 #define atomic_inc_return(_v) atomic_add_return(1, _v)
88 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
90 static __inline__
int atomic_sub_return(int i
, atomic_t
* v
)
92 return __CS_LOOP(v
, i
, "sr");
94 #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
95 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
96 #define atomic_dec(_v) atomic_sub_return(1, _v)
97 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
98 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
100 static __inline__
void atomic_clear_mask(unsigned long mask
, atomic_t
* v
)
102 __CS_LOOP(v
, ~mask
, "nr");
105 static __inline__
void atomic_set_mask(unsigned long mask
, atomic_t
* v
)
107 __CS_LOOP(v
, mask
, "or");
110 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
112 static __inline__
int atomic_cmpxchg(atomic_t
*v
, int old
, int new)
114 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
117 : "+d" (old
), "=Q" (v
->counter
)
118 : "d" (new), "Q" (v
->counter
)
123 : "+d" (old
), "=m" (v
->counter
)
124 : "a" (v
), "d" (new), "m" (v
->counter
)
126 #endif /* __GNUC__ */
130 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
135 if (unlikely(c
== u
))
137 old
= atomic_cmpxchg(v
, c
, c
+ a
);
138 if (likely(old
== c
))
145 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
150 #define ATOMIC64_INIT(i) { (i) }
152 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
154 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
155 typeof(ptr->counter) old_val, new_val; \
159 op_string " %1,%3\n" \
162 : "=&d" (old_val), "=&d" (new_val), \
163 "=Q" (((atomic_t *)(ptr))->counter) \
164 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
165 : "cc", "memory" ); \
171 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
172 typeof(ptr->counter) old_val, new_val; \
176 op_string " %1,%4\n" \
177 " csg %0,%1,0(%3)\n" \
179 : "=&d" (old_val), "=&d" (new_val), \
180 "=m" (((atomic_t *)(ptr))->counter) \
181 : "a" (ptr), "d" (op_val), \
182 "m" (((atomic_t *)(ptr))->counter) \
183 : "cc", "memory" ); \
187 #endif /* __GNUC__ */
189 static inline long long atomic64_read(const atomic64_t
*v
)
195 static inline void atomic64_set(atomic64_t
*v
, long long i
)
201 static __inline__
long long atomic64_add_return(long long i
, atomic64_t
* v
)
203 return __CSG_LOOP(v
, i
, "agr");
205 #define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
206 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
207 #define atomic64_inc(_v) atomic64_add_return(1, _v)
208 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
209 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
211 static __inline__
long long atomic64_sub_return(long long i
, atomic64_t
* v
)
213 return __CSG_LOOP(v
, i
, "sgr");
215 #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
216 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
217 #define atomic64_dec(_v) atomic64_sub_return(1, _v)
218 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
219 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
221 static __inline__
void atomic64_clear_mask(unsigned long mask
, atomic64_t
* v
)
223 __CSG_LOOP(v
, ~mask
, "ngr");
226 static __inline__
void atomic64_set_mask(unsigned long mask
, atomic64_t
* v
)
228 __CSG_LOOP(v
, mask
, "ogr");
231 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
233 static __inline__
long long atomic64_cmpxchg(atomic64_t
*v
,
234 long long old
, long long new)
236 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
239 : "+d" (old
), "=Q" (v
->counter
)
240 : "d" (new), "Q" (v
->counter
)
245 : "+d" (old
), "=m" (v
->counter
)
246 : "a" (v
), "d" (new), "m" (v
->counter
)
248 #endif /* __GNUC__ */
252 static __inline__
int atomic64_add_unless(atomic64_t
*v
,
253 long long a
, long long u
)
256 c
= atomic64_read(v
);
258 if (unlikely(c
== u
))
260 old
= atomic64_cmpxchg(v
, c
, c
+ a
);
261 if (likely(old
== c
))
268 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
273 #define smp_mb__before_atomic_dec() smp_mb()
274 #define smp_mb__after_atomic_dec() smp_mb()
275 #define smp_mb__before_atomic_inc() smp_mb()
276 #define smp_mb__after_atomic_inc() smp_mb()
278 #include <asm-generic/atomic.h>
279 #endif /* __KERNEL__ */
280 #endif /* __ARCH_S390_ATOMIC__ */
This page took 0.064826 seconds and 5 git commands to generate.