1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
4 #include <linux/compiler.h>
7 * include/asm-s390/atomic.h
10 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
11 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
12 * Denis Joseph Barrow,
13 * Arnd Bergmann (arndb@de.ibm.com)
15 * Derived from "include/asm-i386/bitops.h"
16 * Copyright (C) 1992, Linus Torvalds
21 * Atomic operations that C can't guarantee us. Useful for
22 * resource counting etc..
23 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
28 } __attribute__ ((aligned (4))) atomic_t
;
29 #define ATOMIC_INIT(i) { (i) }
33 #define __CS_LOOP(ptr, op_val, op_string) ({ \
34 typeof(ptr->counter) old_val, new_val; \
35 __asm__ __volatile__(" l %0,0(%3)\n" \
37 op_string " %1,%4\n" \
40 : "=&d" (old_val), "=&d" (new_val), \
41 "=m" (((atomic_t *)(ptr))->counter) \
42 : "a" (ptr), "d" (op_val), \
43 "m" (((atomic_t *)(ptr))->counter) \
47 #define atomic_read(v) ((v)->counter)
48 #define atomic_set(v,i) (((v)->counter) = (i))
50 static __inline__
int atomic_add_return(int i
, atomic_t
* v
)
52 return __CS_LOOP(v
, i
, "ar");
54 #define atomic_add(_i, _v) atomic_add_return(_i, _v)
55 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
56 #define atomic_inc(_v) atomic_add_return(1, _v)
57 #define atomic_inc_return(_v) atomic_add_return(1, _v)
58 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
60 static __inline__
int atomic_sub_return(int i
, atomic_t
* v
)
62 return __CS_LOOP(v
, i
, "sr");
64 #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
65 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
66 #define atomic_dec(_v) atomic_sub_return(1, _v)
67 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
68 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
70 static __inline__
void atomic_clear_mask(unsigned long mask
, atomic_t
* v
)
72 __CS_LOOP(v
, ~mask
, "nr");
75 static __inline__
void atomic_set_mask(unsigned long mask
, atomic_t
* v
)
77 __CS_LOOP(v
, mask
, "or");
80 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
82 static __inline__
int atomic_cmpxchg(atomic_t
*v
, int old
, int new)
84 __asm__
__volatile__(" cs %0,%3,0(%2)\n"
85 : "+d" (old
), "=m" (v
->counter
)
86 : "a" (v
), "d" (new), "m" (v
->counter
)
91 static __inline__
int atomic_add_unless(atomic_t
*v
, int a
, int u
)
98 old
= atomic_cmpxchg(v
, c
, c
+ a
);
106 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
112 volatile long long counter
;
113 } __attribute__ ((aligned (8))) atomic64_t
;
114 #define ATOMIC64_INIT(i) { (i) }
116 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
117 typeof(ptr->counter) old_val, new_val; \
118 __asm__ __volatile__(" lg %0,0(%3)\n" \
120 op_string " %1,%4\n" \
121 " csg %0,%1,0(%3)\n" \
123 : "=&d" (old_val), "=&d" (new_val), \
124 "=m" (((atomic_t *)(ptr))->counter) \
125 : "a" (ptr), "d" (op_val), \
126 "m" (((atomic_t *)(ptr))->counter) \
127 : "cc", "memory" ); \
130 #define atomic64_read(v) ((v)->counter)
131 #define atomic64_set(v,i) (((v)->counter) = (i))
133 static __inline__
long long atomic64_add_return(long long i
, atomic64_t
* v
)
135 return __CSG_LOOP(v
, i
, "agr");
137 #define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
138 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
139 #define atomic64_inc(_v) atomic64_add_return(1, _v)
140 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
141 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
143 static __inline__
long long atomic64_sub_return(long long i
, atomic64_t
* v
)
145 return __CSG_LOOP(v
, i
, "sgr");
147 #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
148 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
149 #define atomic64_dec(_v) atomic64_sub_return(1, _v)
150 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
151 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
153 static __inline__
void atomic64_clear_mask(unsigned long mask
, atomic64_t
* v
)
155 __CSG_LOOP(v
, ~mask
, "ngr");
158 static __inline__
void atomic64_set_mask(unsigned long mask
, atomic64_t
* v
)
160 __CSG_LOOP(v
, mask
, "ogr");
163 static __inline__
long long atomic64_cmpxchg(atomic64_t
*v
,
164 long long old
, long long new)
166 __asm__
__volatile__(" csg %0,%3,0(%2)\n"
167 : "+d" (old
), "=m" (v
->counter
)
168 : "a" (v
), "d" (new), "m" (v
->counter
)
173 static __inline__
int atomic64_add_unless(atomic64_t
*v
,
174 long long a
, long long u
)
177 c
= atomic64_read(v
);
179 if (unlikely(c
== u
))
181 old
= atomic64_cmpxchg(v
, c
, c
+ a
);
182 if (likely(old
== c
))
189 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
194 #define smp_mb__before_atomic_dec() smp_mb()
195 #define smp_mb__after_atomic_dec() smp_mb()
196 #define smp_mb__before_atomic_inc() smp_mb()
197 #define smp_mb__after_atomic_inc() smp_mb()
199 #include <asm-generic/atomic.h>
200 #endif /* __KERNEL__ */
201 #endif /* __ARCH_S390_ATOMIC__ */
This page took 0.035907 seconds and 5 git commands to generate.