atomic_t: unify all arch definitions
[deliverable/linux.git] / arch / s390 / include / asm / atomic.h
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3
4 #include <linux/compiler.h>
5 #include <linux/types.h>
6
7 /*
8 * include/asm-s390/atomic.h
9 *
10 * S390 version
11 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
12 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
13 * Denis Joseph Barrow,
14 * Arnd Bergmann (arndb@de.ibm.com)
15 *
16 * Derived from "include/asm-i386/bitops.h"
17 * Copyright (C) 1992, Linus Torvalds
18 *
19 */
20
21 /*
22 * Atomic operations that C can't guarantee us. Useful for
23 * resource counting etc..
24 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
25 */
26
27 #define ATOMIC_INIT(i) { (i) }
28
29 #ifdef __KERNEL__
30
31 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
32
33 #define __CS_LOOP(ptr, op_val, op_string) ({ \
34 typeof(ptr->counter) old_val, new_val; \
35 asm volatile( \
36 " l %0,%2\n" \
37 "0: lr %1,%0\n" \
38 op_string " %1,%3\n" \
39 " cs %0,%1,%2\n" \
40 " jl 0b" \
41 : "=&d" (old_val), "=&d" (new_val), \
42 "=Q" (((atomic_t *)(ptr))->counter) \
43 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
44 : "cc", "memory"); \
45 new_val; \
46 })
47
48 #else /* __GNUC__ */
49
50 #define __CS_LOOP(ptr, op_val, op_string) ({ \
51 typeof(ptr->counter) old_val, new_val; \
52 asm volatile( \
53 " l %0,0(%3)\n" \
54 "0: lr %1,%0\n" \
55 op_string " %1,%4\n" \
56 " cs %0,%1,0(%3)\n" \
57 " jl 0b" \
58 : "=&d" (old_val), "=&d" (new_val), \
59 "=m" (((atomic_t *)(ptr))->counter) \
60 : "a" (ptr), "d" (op_val), \
61 "m" (((atomic_t *)(ptr))->counter) \
62 : "cc", "memory"); \
63 new_val; \
64 })
65
66 #endif /* __GNUC__ */
67
68 static inline int atomic_read(const atomic_t *v)
69 {
70 barrier();
71 return v->counter;
72 }
73
74 static inline void atomic_set(atomic_t *v, int i)
75 {
76 v->counter = i;
77 barrier();
78 }
79
80 static __inline__ int atomic_add_return(int i, atomic_t * v)
81 {
82 return __CS_LOOP(v, i, "ar");
83 }
84 #define atomic_add(_i, _v) atomic_add_return(_i, _v)
85 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
86 #define atomic_inc(_v) atomic_add_return(1, _v)
87 #define atomic_inc_return(_v) atomic_add_return(1, _v)
88 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
89
90 static __inline__ int atomic_sub_return(int i, atomic_t * v)
91 {
92 return __CS_LOOP(v, i, "sr");
93 }
94 #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
95 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
96 #define atomic_dec(_v) atomic_sub_return(1, _v)
97 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
98 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
99
100 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
101 {
102 __CS_LOOP(v, ~mask, "nr");
103 }
104
105 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
106 {
107 __CS_LOOP(v, mask, "or");
108 }
109
110 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
111
112 static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
113 {
114 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
115 asm volatile(
116 " cs %0,%2,%1"
117 : "+d" (old), "=Q" (v->counter)
118 : "d" (new), "Q" (v->counter)
119 : "cc", "memory");
120 #else /* __GNUC__ */
121 asm volatile(
122 " cs %0,%3,0(%2)"
123 : "+d" (old), "=m" (v->counter)
124 : "a" (v), "d" (new), "m" (v->counter)
125 : "cc", "memory");
126 #endif /* __GNUC__ */
127 return old;
128 }
129
130 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
131 {
132 int c, old;
133 c = atomic_read(v);
134 for (;;) {
135 if (unlikely(c == u))
136 break;
137 old = atomic_cmpxchg(v, c, c + a);
138 if (likely(old == c))
139 break;
140 c = old;
141 }
142 return c != u;
143 }
144
145 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
146
147 #undef __CS_LOOP
148
149 #ifdef __s390x__
150 #define ATOMIC64_INIT(i) { (i) }
151
152 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
153
154 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
155 typeof(ptr->counter) old_val, new_val; \
156 asm volatile( \
157 " lg %0,%2\n" \
158 "0: lgr %1,%0\n" \
159 op_string " %1,%3\n" \
160 " csg %0,%1,%2\n" \
161 " jl 0b" \
162 : "=&d" (old_val), "=&d" (new_val), \
163 "=Q" (((atomic_t *)(ptr))->counter) \
164 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
165 : "cc", "memory" ); \
166 new_val; \
167 })
168
169 #else /* __GNUC__ */
170
171 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
172 typeof(ptr->counter) old_val, new_val; \
173 asm volatile( \
174 " lg %0,0(%3)\n" \
175 "0: lgr %1,%0\n" \
176 op_string " %1,%4\n" \
177 " csg %0,%1,0(%3)\n" \
178 " jl 0b" \
179 : "=&d" (old_val), "=&d" (new_val), \
180 "=m" (((atomic_t *)(ptr))->counter) \
181 : "a" (ptr), "d" (op_val), \
182 "m" (((atomic_t *)(ptr))->counter) \
183 : "cc", "memory" ); \
184 new_val; \
185 })
186
187 #endif /* __GNUC__ */
188
189 static inline long long atomic64_read(const atomic64_t *v)
190 {
191 barrier();
192 return v->counter;
193 }
194
195 static inline void atomic64_set(atomic64_t *v, long long i)
196 {
197 v->counter = i;
198 barrier();
199 }
200
201 static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
202 {
203 return __CSG_LOOP(v, i, "agr");
204 }
205 #define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
206 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
207 #define atomic64_inc(_v) atomic64_add_return(1, _v)
208 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
209 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
210
211 static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
212 {
213 return __CSG_LOOP(v, i, "sgr");
214 }
215 #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
216 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
217 #define atomic64_dec(_v) atomic64_sub_return(1, _v)
218 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
219 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
220
221 static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
222 {
223 __CSG_LOOP(v, ~mask, "ngr");
224 }
225
226 static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
227 {
228 __CSG_LOOP(v, mask, "ogr");
229 }
230
231 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
232
233 static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
234 long long old, long long new)
235 {
236 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
237 asm volatile(
238 " csg %0,%2,%1"
239 : "+d" (old), "=Q" (v->counter)
240 : "d" (new), "Q" (v->counter)
241 : "cc", "memory");
242 #else /* __GNUC__ */
243 asm volatile(
244 " csg %0,%3,0(%2)"
245 : "+d" (old), "=m" (v->counter)
246 : "a" (v), "d" (new), "m" (v->counter)
247 : "cc", "memory");
248 #endif /* __GNUC__ */
249 return old;
250 }
251
252 static __inline__ int atomic64_add_unless(atomic64_t *v,
253 long long a, long long u)
254 {
255 long long c, old;
256 c = atomic64_read(v);
257 for (;;) {
258 if (unlikely(c == u))
259 break;
260 old = atomic64_cmpxchg(v, c, c + a);
261 if (likely(old == c))
262 break;
263 c = old;
264 }
265 return c != u;
266 }
267
268 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
269
270 #undef __CSG_LOOP
271 #endif
272
273 #define smp_mb__before_atomic_dec() smp_mb()
274 #define smp_mb__after_atomic_dec() smp_mb()
275 #define smp_mb__before_atomic_inc() smp_mb()
276 #define smp_mb__after_atomic_inc() smp_mb()
277
278 #include <asm-generic/atomic.h>
279 #endif /* __KERNEL__ */
280 #endif /* __ARCH_S390_ATOMIC__ */
This page took 0.064826 seconds and 5 git commands to generate.