2 * Copyright IBM Corp. 1999, 2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Arnd Bergmann <arndb@de.ibm.com>,
7 * Atomic operations that C can't guarantee us.
8 * Useful for resource counting etc.
9 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
13 #ifndef __ARCH_S390_ATOMIC__
14 #define __ARCH_S390_ATOMIC__
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
21 #define ATOMIC_INIT(i) { (i) }
23 #define __ATOMIC_NO_BARRIER "\n"
25 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
27 #define __ATOMIC_OR "lao"
28 #define __ATOMIC_AND "lan"
29 #define __ATOMIC_ADD "laa"
30 #define __ATOMIC_BARRIER "bcr 14,0\n"
32 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
36 typecheck(atomic_t *, ptr); \
39 op_string " %0,%2,%1\n" \
41 : "=d" (old_val), "+Q" ((ptr)->counter) \
47 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
49 #define __ATOMIC_OR "or"
50 #define __ATOMIC_AND "nr"
51 #define __ATOMIC_ADD "ar"
52 #define __ATOMIC_BARRIER "\n"
54 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
56 int old_val, new_val; \
58 typecheck(atomic_t *, ptr); \
62 op_string " %1,%3\n" \
65 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
71 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
73 static inline int atomic_read(const atomic_t
*v
)
79 : "=d" (c
) : "Q" (v
->counter
));
83 static inline void atomic_set(atomic_t
*v
, int i
)
87 : "=Q" (v
->counter
) : "d" (i
));
90 static inline int atomic_add_return(int i
, atomic_t
*v
)
92 return __ATOMIC_LOOP(v
, i
, __ATOMIC_ADD
, __ATOMIC_BARRIER
) + i
;
95 static inline void atomic_add(int i
, atomic_t
*v
)
97 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
98 if (__builtin_constant_p(i
) && (i
> -129) && (i
< 128)) {
107 __ATOMIC_LOOP(v
, i
, __ATOMIC_ADD
, __ATOMIC_NO_BARRIER
);
110 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
111 #define atomic_inc(_v) atomic_add(1, _v)
112 #define atomic_inc_return(_v) atomic_add_return(1, _v)
113 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
114 #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
115 #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
116 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
117 #define atomic_dec(_v) atomic_sub(1, _v)
118 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
119 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
121 static inline void atomic_clear_mask(unsigned int mask
, atomic_t
*v
)
123 __ATOMIC_LOOP(v
, ~mask
, __ATOMIC_AND
, __ATOMIC_NO_BARRIER
);
126 static inline void atomic_set_mask(unsigned int mask
, atomic_t
*v
)
128 __ATOMIC_LOOP(v
, mask
, __ATOMIC_OR
, __ATOMIC_NO_BARRIER
);
131 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133 static inline int atomic_cmpxchg(atomic_t
*v
, int old
, int new)
137 : "+d" (old
), "+Q" (v
->counter
)
143 static inline int __atomic_add_unless(atomic_t
*v
, int a
, int u
)
148 if (unlikely(c
== u
))
150 old
= atomic_cmpxchg(v
, c
, c
+ a
);
151 if (likely(old
== c
))
161 #define ATOMIC64_INIT(i) { (i) }
165 #define __ATOMIC64_NO_BARRIER "\n"
167 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
169 #define __ATOMIC64_OR "laog"
170 #define __ATOMIC64_AND "lang"
171 #define __ATOMIC64_ADD "laag"
172 #define __ATOMIC64_BARRIER "bcr 14,0\n"
174 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
178 typecheck(atomic64_t *, ptr); \
181 op_string " %0,%2,%1\n" \
183 : "=d" (old_val), "+Q" ((ptr)->counter) \
189 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
191 #define __ATOMIC64_OR "ogr"
192 #define __ATOMIC64_AND "ngr"
193 #define __ATOMIC64_ADD "agr"
194 #define __ATOMIC64_BARRIER "\n"
196 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
198 long long old_val, new_val; \
200 typecheck(atomic64_t *, ptr); \
204 op_string " %1,%3\n" \
207 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
213 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
215 static inline long long atomic64_read(const atomic64_t
*v
)
221 : "=d" (c
) : "Q" (v
->counter
));
225 static inline void atomic64_set(atomic64_t
*v
, long long i
)
229 : "=Q" (v
->counter
) : "d" (i
));
232 static inline long long atomic64_add_return(long long i
, atomic64_t
*v
)
234 return __ATOMIC64_LOOP(v
, i
, __ATOMIC64_ADD
, __ATOMIC64_BARRIER
) + i
;
237 static inline void atomic64_add(long long i
, atomic64_t
*v
)
239 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
240 if (__builtin_constant_p(i
) && (i
> -129) && (i
< 128)) {
249 __ATOMIC64_LOOP(v
, i
, __ATOMIC64_ADD
, __ATOMIC64_NO_BARRIER
);
252 static inline void atomic64_clear_mask(unsigned long mask
, atomic64_t
*v
)
254 __ATOMIC64_LOOP(v
, ~mask
, __ATOMIC64_AND
, __ATOMIC64_NO_BARRIER
);
257 static inline void atomic64_set_mask(unsigned long mask
, atomic64_t
*v
)
259 __ATOMIC64_LOOP(v
, mask
, __ATOMIC64_OR
, __ATOMIC64_NO_BARRIER
);
262 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
264 static inline long long atomic64_cmpxchg(atomic64_t
*v
,
265 long long old
, long long new)
269 : "+d" (old
), "+Q" (v
->counter
)
275 #undef __ATOMIC64_LOOP
277 #else /* CONFIG_64BIT */
283 static inline long long atomic64_read(const atomic64_t
*v
)
289 : "=&d" (rp
) : "Q" (v
->counter
) );
293 static inline void atomic64_set(atomic64_t
*v
, long long i
)
295 register_pair rp
= {.pair
= i
};
299 : "=Q" (v
->counter
) : "d" (rp
) );
302 static inline long long atomic64_xchg(atomic64_t
*v
, long long new)
304 register_pair rp_new
= {.pair
= new};
305 register_pair rp_old
;
311 : "=&d" (rp_old
), "+Q" (v
->counter
)
317 static inline long long atomic64_cmpxchg(atomic64_t
*v
,
318 long long old
, long long new)
320 register_pair rp_old
= {.pair
= old
};
321 register_pair rp_new
= {.pair
= new};
325 : "+&d" (rp_old
), "+Q" (v
->counter
)
332 static inline long long atomic64_add_return(long long i
, atomic64_t
*v
)
337 old
= atomic64_read(v
);
339 } while (atomic64_cmpxchg(v
, old
, new) != old
);
343 static inline void atomic64_set_mask(unsigned long long mask
, atomic64_t
*v
)
348 old
= atomic64_read(v
);
350 } while (atomic64_cmpxchg(v
, old
, new) != old
);
353 static inline void atomic64_clear_mask(unsigned long long mask
, atomic64_t
*v
)
358 old
= atomic64_read(v
);
360 } while (atomic64_cmpxchg(v
, old
, new) != old
);
363 static inline void atomic64_add(long long i
, atomic64_t
*v
)
365 atomic64_add_return(i
, v
);
368 #endif /* CONFIG_64BIT */
370 static inline int atomic64_add_unless(atomic64_t
*v
, long long i
, long long u
)
374 c
= atomic64_read(v
);
376 if (unlikely(c
== u
))
378 old
= atomic64_cmpxchg(v
, c
, c
+ i
);
379 if (likely(old
== c
))
386 static inline long long atomic64_dec_if_positive(atomic64_t
*v
)
388 long long c
, old
, dec
;
390 c
= atomic64_read(v
);
393 if (unlikely(dec
< 0))
395 old
= atomic64_cmpxchg((v
), c
, dec
);
396 if (likely(old
== c
))
403 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
404 #define atomic64_inc(_v) atomic64_add(1, _v)
405 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
406 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
407 #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
408 #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
409 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
410 #define atomic64_dec(_v) atomic64_sub(1, _v)
411 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
412 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
413 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
415 #define smp_mb__before_atomic_dec() smp_mb()
416 #define smp_mb__after_atomic_dec() smp_mb()
417 #define smp_mb__before_atomic_inc() smp_mb()
418 #define smp_mb__after_atomic_inc() smp_mb()
420 #endif /* __ARCH_S390_ATOMIC__ */
This page took 0.060706 seconds and 5 git commands to generate.