Pull sysfs into test branch
[deliverable/linux.git] / include / asm-powerpc / atomic.h
1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
3
4 /*
5 * PowerPC atomic operations
6 */
7
8 typedef struct { volatile int counter; } atomic_t;
9
10 #ifdef __KERNEL__
11 #include <linux/compiler.h>
12 #include <asm/synch.h>
13 #include <asm/asm-compat.h>
14
15 #define ATOMIC_INIT(i) { (i) }
16
17 #define atomic_read(v) ((v)->counter)
18 #define atomic_set(v,i) (((v)->counter) = (i))
19
20 static __inline__ void atomic_add(int a, atomic_t *v)
21 {
22 int t;
23
24 __asm__ __volatile__(
25 "1: lwarx %0,0,%3 # atomic_add\n\
26 add %0,%2,%0\n"
27 PPC405_ERR77(0,%3)
28 " stwcx. %0,0,%3 \n\
29 bne- 1b"
30 : "=&r" (t), "+m" (v->counter)
31 : "r" (a), "r" (&v->counter)
32 : "cc");
33 }
34
35 static __inline__ int atomic_add_return(int a, atomic_t *v)
36 {
37 int t;
38
39 __asm__ __volatile__(
40 LWSYNC_ON_SMP
41 "1: lwarx %0,0,%2 # atomic_add_return\n\
42 add %0,%1,%0\n"
43 PPC405_ERR77(0,%2)
44 " stwcx. %0,0,%2 \n\
45 bne- 1b"
46 ISYNC_ON_SMP
47 : "=&r" (t)
48 : "r" (a), "r" (&v->counter)
49 : "cc", "memory");
50
51 return t;
52 }
53
54 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
55
56 static __inline__ void atomic_sub(int a, atomic_t *v)
57 {
58 int t;
59
60 __asm__ __volatile__(
61 "1: lwarx %0,0,%3 # atomic_sub\n\
62 subf %0,%2,%0\n"
63 PPC405_ERR77(0,%3)
64 " stwcx. %0,0,%3 \n\
65 bne- 1b"
66 : "=&r" (t), "+m" (v->counter)
67 : "r" (a), "r" (&v->counter)
68 : "cc");
69 }
70
71 static __inline__ int atomic_sub_return(int a, atomic_t *v)
72 {
73 int t;
74
75 __asm__ __volatile__(
76 LWSYNC_ON_SMP
77 "1: lwarx %0,0,%2 # atomic_sub_return\n\
78 subf %0,%1,%0\n"
79 PPC405_ERR77(0,%2)
80 " stwcx. %0,0,%2 \n\
81 bne- 1b"
82 ISYNC_ON_SMP
83 : "=&r" (t)
84 : "r" (a), "r" (&v->counter)
85 : "cc", "memory");
86
87 return t;
88 }
89
90 static __inline__ void atomic_inc(atomic_t *v)
91 {
92 int t;
93
94 __asm__ __volatile__(
95 "1: lwarx %0,0,%2 # atomic_inc\n\
96 addic %0,%0,1\n"
97 PPC405_ERR77(0,%2)
98 " stwcx. %0,0,%2 \n\
99 bne- 1b"
100 : "=&r" (t), "+m" (v->counter)
101 : "r" (&v->counter)
102 : "cc");
103 }
104
105 static __inline__ int atomic_inc_return(atomic_t *v)
106 {
107 int t;
108
109 __asm__ __volatile__(
110 LWSYNC_ON_SMP
111 "1: lwarx %0,0,%1 # atomic_inc_return\n\
112 addic %0,%0,1\n"
113 PPC405_ERR77(0,%1)
114 " stwcx. %0,0,%1 \n\
115 bne- 1b"
116 ISYNC_ON_SMP
117 : "=&r" (t)
118 : "r" (&v->counter)
119 : "cc", "memory");
120
121 return t;
122 }
123
124 /*
125 * atomic_inc_and_test - increment and test
126 * @v: pointer of type atomic_t
127 *
128 * Atomically increments @v by 1
129 * and returns true if the result is zero, or false for all
130 * other cases.
131 */
132 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
133
134 static __inline__ void atomic_dec(atomic_t *v)
135 {
136 int t;
137
138 __asm__ __volatile__(
139 "1: lwarx %0,0,%2 # atomic_dec\n\
140 addic %0,%0,-1\n"
141 PPC405_ERR77(0,%2)\
142 " stwcx. %0,0,%2\n\
143 bne- 1b"
144 : "=&r" (t), "+m" (v->counter)
145 : "r" (&v->counter)
146 : "cc");
147 }
148
149 static __inline__ int atomic_dec_return(atomic_t *v)
150 {
151 int t;
152
153 __asm__ __volatile__(
154 LWSYNC_ON_SMP
155 "1: lwarx %0,0,%1 # atomic_dec_return\n\
156 addic %0,%0,-1\n"
157 PPC405_ERR77(0,%1)
158 " stwcx. %0,0,%1\n\
159 bne- 1b"
160 ISYNC_ON_SMP
161 : "=&r" (t)
162 : "r" (&v->counter)
163 : "cc", "memory");
164
165 return t;
166 }
167
168 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
169 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
170
171 /**
172 * atomic_add_unless - add unless the number is a given value
173 * @v: pointer of type atomic_t
174 * @a: the amount to add to v...
175 * @u: ...unless v is equal to u.
176 *
177 * Atomically adds @a to @v, so long as it was not @u.
178 * Returns non-zero if @v was not @u, and zero otherwise.
179 */
180 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
181 {
182 int t;
183
184 __asm__ __volatile__ (
185 LWSYNC_ON_SMP
186 "1: lwarx %0,0,%1 # atomic_add_unless\n\
187 cmpw 0,%0,%3 \n\
188 beq- 2f \n\
189 add %0,%2,%0 \n"
190 PPC405_ERR77(0,%2)
191 " stwcx. %0,0,%1 \n\
192 bne- 1b \n"
193 ISYNC_ON_SMP
194 " subf %0,%2,%0 \n\
195 2:"
196 : "=&r" (t)
197 : "r" (&v->counter), "r" (a), "r" (u)
198 : "cc", "memory");
199
200 return t != u;
201 }
202
203 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
204
205 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
206 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
207
208 /*
209 * Atomically test *v and decrement if it is greater than 0.
210 * The function returns the old value of *v minus 1, even if
211 * the atomic variable, v, was not decremented.
212 */
213 static __inline__ int atomic_dec_if_positive(atomic_t *v)
214 {
215 int t;
216
217 __asm__ __volatile__(
218 LWSYNC_ON_SMP
219 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
220 cmpwi %0,1\n\
221 addi %0,%0,-1\n\
222 blt- 2f\n"
223 PPC405_ERR77(0,%1)
224 " stwcx. %0,0,%1\n\
225 bne- 1b"
226 ISYNC_ON_SMP
227 "\n\
228 2:" : "=&b" (t)
229 : "r" (&v->counter)
230 : "cc", "memory");
231
232 return t;
233 }
234
235 #define smp_mb__before_atomic_dec() smp_mb()
236 #define smp_mb__after_atomic_dec() smp_mb()
237 #define smp_mb__before_atomic_inc() smp_mb()
238 #define smp_mb__after_atomic_inc() smp_mb()
239
240 #ifdef __powerpc64__
241
242 typedef struct { volatile long counter; } atomic64_t;
243
244 #define ATOMIC64_INIT(i) { (i) }
245
246 #define atomic64_read(v) ((v)->counter)
247 #define atomic64_set(v,i) (((v)->counter) = (i))
248
249 static __inline__ void atomic64_add(long a, atomic64_t *v)
250 {
251 long t;
252
253 __asm__ __volatile__(
254 "1: ldarx %0,0,%3 # atomic64_add\n\
255 add %0,%2,%0\n\
256 stdcx. %0,0,%3 \n\
257 bne- 1b"
258 : "=&r" (t), "+m" (v->counter)
259 : "r" (a), "r" (&v->counter)
260 : "cc");
261 }
262
263 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
264 {
265 long t;
266
267 __asm__ __volatile__(
268 LWSYNC_ON_SMP
269 "1: ldarx %0,0,%2 # atomic64_add_return\n\
270 add %0,%1,%0\n\
271 stdcx. %0,0,%2 \n\
272 bne- 1b"
273 ISYNC_ON_SMP
274 : "=&r" (t)
275 : "r" (a), "r" (&v->counter)
276 : "cc", "memory");
277
278 return t;
279 }
280
281 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
282
283 static __inline__ void atomic64_sub(long a, atomic64_t *v)
284 {
285 long t;
286
287 __asm__ __volatile__(
288 "1: ldarx %0,0,%3 # atomic64_sub\n\
289 subf %0,%2,%0\n\
290 stdcx. %0,0,%3 \n\
291 bne- 1b"
292 : "=&r" (t), "+m" (v->counter)
293 : "r" (a), "r" (&v->counter)
294 : "cc");
295 }
296
297 static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
298 {
299 long t;
300
301 __asm__ __volatile__(
302 LWSYNC_ON_SMP
303 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
304 subf %0,%1,%0\n\
305 stdcx. %0,0,%2 \n\
306 bne- 1b"
307 ISYNC_ON_SMP
308 : "=&r" (t)
309 : "r" (a), "r" (&v->counter)
310 : "cc", "memory");
311
312 return t;
313 }
314
315 static __inline__ void atomic64_inc(atomic64_t *v)
316 {
317 long t;
318
319 __asm__ __volatile__(
320 "1: ldarx %0,0,%2 # atomic64_inc\n\
321 addic %0,%0,1\n\
322 stdcx. %0,0,%2 \n\
323 bne- 1b"
324 : "=&r" (t), "+m" (v->counter)
325 : "r" (&v->counter)
326 : "cc");
327 }
328
329 static __inline__ long atomic64_inc_return(atomic64_t *v)
330 {
331 long t;
332
333 __asm__ __volatile__(
334 LWSYNC_ON_SMP
335 "1: ldarx %0,0,%1 # atomic64_inc_return\n\
336 addic %0,%0,1\n\
337 stdcx. %0,0,%1 \n\
338 bne- 1b"
339 ISYNC_ON_SMP
340 : "=&r" (t)
341 : "r" (&v->counter)
342 : "cc", "memory");
343
344 return t;
345 }
346
347 /*
348 * atomic64_inc_and_test - increment and test
349 * @v: pointer of type atomic64_t
350 *
351 * Atomically increments @v by 1
352 * and returns true if the result is zero, or false for all
353 * other cases.
354 */
355 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
356
357 static __inline__ void atomic64_dec(atomic64_t *v)
358 {
359 long t;
360
361 __asm__ __volatile__(
362 "1: ldarx %0,0,%2 # atomic64_dec\n\
363 addic %0,%0,-1\n\
364 stdcx. %0,0,%2\n\
365 bne- 1b"
366 : "=&r" (t), "+m" (v->counter)
367 : "r" (&v->counter)
368 : "cc");
369 }
370
371 static __inline__ long atomic64_dec_return(atomic64_t *v)
372 {
373 long t;
374
375 __asm__ __volatile__(
376 LWSYNC_ON_SMP
377 "1: ldarx %0,0,%1 # atomic64_dec_return\n\
378 addic %0,%0,-1\n\
379 stdcx. %0,0,%1\n\
380 bne- 1b"
381 ISYNC_ON_SMP
382 : "=&r" (t)
383 : "r" (&v->counter)
384 : "cc", "memory");
385
386 return t;
387 }
388
389 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
390 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
391
392 /*
393 * Atomically test *v and decrement if it is greater than 0.
394 * The function returns the old value of *v minus 1.
395 */
396 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
397 {
398 long t;
399
400 __asm__ __volatile__(
401 LWSYNC_ON_SMP
402 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
403 addic. %0,%0,-1\n\
404 blt- 2f\n\
405 stdcx. %0,0,%1\n\
406 bne- 1b"
407 ISYNC_ON_SMP
408 "\n\
409 2:" : "=&r" (t)
410 : "r" (&v->counter)
411 : "cc", "memory");
412
413 return t;
414 }
415
416 #endif /* __powerpc64__ */
417
418 #include <asm-generic/atomic.h>
419 #endif /* __KERNEL__ */
420 #endif /* _ASM_POWERPC_ATOMIC_H_ */
This page took 0.078431 seconds and 6 git commands to generate.