locking, mips: Fix atomics
[deliverable/linux.git] / arch / mips / include / asm / atomic.h
1 /*
2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13 */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cmpxchg.h>
22 #include <asm/war.h>
23
24 #define ATOMIC_INIT(i) { (i) }
25
26 /*
27 * atomic_read - read atomic variable
28 * @v: pointer of type atomic_t
29 *
30 * Atomically reads the value of @v.
31 */
32 #define atomic_read(v) (*(volatile int *)&(v)->counter)
33
34 /*
35 * atomic_set - set atomic variable
36 * @v: pointer of type atomic_t
37 * @i: required value
38 *
39 * Atomically sets the value of @v to @i.
40 */
41 #define atomic_set(v, i) ((v)->counter = (i))
42
43 #define ATOMIC_OP(op, c_op, asm_op) \
44 static __inline__ void atomic_##op(int i, atomic_t * v) \
45 { \
46 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
47 int temp; \
48 \
49 __asm__ __volatile__( \
50 " .set arch=r4000 \n" \
51 "1: ll %0, %1 # atomic_" #op " \n" \
52 " " #asm_op " %0, %2 \n" \
53 " sc %0, %1 \n" \
54 " beqzl %0, 1b \n" \
55 " .set mips0 \n" \
56 : "=&r" (temp), "+m" (v->counter) \
57 : "Ir" (i)); \
58 } else if (kernel_uses_llsc) { \
59 int temp; \
60 \
61 do { \
62 __asm__ __volatile__( \
63 " .set arch=r4000 \n" \
64 " ll %0, %1 # atomic_" #op "\n" \
65 " " #asm_op " %0, %2 \n" \
66 " sc %0, %1 \n" \
67 " .set mips0 \n" \
68 : "=&r" (temp), "+m" (v->counter) \
69 : "Ir" (i)); \
70 } while (unlikely(!temp)); \
71 } else { \
72 unsigned long flags; \
73 \
74 raw_local_irq_save(flags); \
75 v->counter c_op i; \
76 raw_local_irq_restore(flags); \
77 } \
78 } \
79
80 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
81 static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
82 { \
83 int result; \
84 \
85 smp_mb__before_llsc(); \
86 \
87 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
88 int temp; \
89 \
90 __asm__ __volatile__( \
91 " .set arch=r4000 \n" \
92 "1: ll %1, %2 # atomic_" #op "_return \n" \
93 " " #asm_op " %0, %1, %3 \n" \
94 " sc %0, %2 \n" \
95 " beqzl %0, 1b \n" \
96 " " #asm_op " %0, %1, %3 \n" \
97 " .set mips0 \n" \
98 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
99 : "Ir" (i)); \
100 } else if (kernel_uses_llsc) { \
101 int temp; \
102 \
103 do { \
104 __asm__ __volatile__( \
105 " .set arch=r4000 \n" \
106 " ll %1, %2 # atomic_" #op "_return \n" \
107 " " #asm_op " %0, %1, %3 \n" \
108 " sc %0, %2 \n" \
109 " .set mips0 \n" \
110 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
111 : "Ir" (i)); \
112 } while (unlikely(!result)); \
113 \
114 result = temp; result c_op i; \
115 } else { \
116 unsigned long flags; \
117 \
118 raw_local_irq_save(flags); \
119 result = v->counter; \
120 result c_op i; \
121 v->counter = result; \
122 raw_local_irq_restore(flags); \
123 } \
124 \
125 smp_llsc_mb(); \
126 \
127 return result; \
128 }
129
130 #define ATOMIC_OPS(op, c_op, asm_op) \
131 ATOMIC_OP(op, c_op, asm_op) \
132 ATOMIC_OP_RETURN(op, c_op, asm_op)
133
134 ATOMIC_OPS(add, +=, addu)
135 ATOMIC_OPS(sub, -=, subu)
136
137 #undef ATOMIC_OPS
138 #undef ATOMIC_OP_RETURN
139 #undef ATOMIC_OP
140
141 /*
142 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
143 * @i: integer value to subtract
144 * @v: pointer of type atomic_t
145 *
146 * Atomically test @v and subtract @i if @v is greater or equal than @i.
147 * The function returns the old value of @v minus @i.
148 */
149 static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
150 {
151 int result;
152
153 smp_mb__before_llsc();
154
155 if (kernel_uses_llsc && R10000_LLSC_WAR) {
156 int temp;
157
158 __asm__ __volatile__(
159 " .set arch=r4000 \n"
160 "1: ll %1, %2 # atomic_sub_if_positive\n"
161 " subu %0, %1, %3 \n"
162 " bltz %0, 1f \n"
163 " sc %0, %2 \n"
164 " .set noreorder \n"
165 " beqzl %0, 1b \n"
166 " subu %0, %1, %3 \n"
167 " .set reorder \n"
168 "1: \n"
169 " .set mips0 \n"
170 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
171 : "Ir" (i), "m" (v->counter)
172 : "memory");
173 } else if (kernel_uses_llsc) {
174 int temp;
175
176 __asm__ __volatile__(
177 " .set arch=r4000 \n"
178 "1: ll %1, %2 # atomic_sub_if_positive\n"
179 " subu %0, %1, %3 \n"
180 " bltz %0, 1f \n"
181 " sc %0, %2 \n"
182 " .set noreorder \n"
183 " beqz %0, 1b \n"
184 " subu %0, %1, %3 \n"
185 " .set reorder \n"
186 "1: \n"
187 " .set mips0 \n"
188 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
189 : "Ir" (i));
190 } else {
191 unsigned long flags;
192
193 raw_local_irq_save(flags);
194 result = v->counter;
195 result -= i;
196 if (result >= 0)
197 v->counter = result;
198 raw_local_irq_restore(flags);
199 }
200
201 smp_llsc_mb();
202
203 return result;
204 }
205
206 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
207 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
208
209 /**
210 * __atomic_add_unless - add unless the number is a given value
211 * @v: pointer of type atomic_t
212 * @a: the amount to add to v...
213 * @u: ...unless v is equal to u.
214 *
215 * Atomically adds @a to @v, so long as it was not @u.
216 * Returns the old value of @v.
217 */
218 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
219 {
220 int c, old;
221 c = atomic_read(v);
222 for (;;) {
223 if (unlikely(c == (u)))
224 break;
225 old = atomic_cmpxchg((v), c, c + (a));
226 if (likely(old == c))
227 break;
228 c = old;
229 }
230 return c;
231 }
232
233 #define atomic_dec_return(v) atomic_sub_return(1, (v))
234 #define atomic_inc_return(v) atomic_add_return(1, (v))
235
236 /*
237 * atomic_sub_and_test - subtract value from variable and test result
238 * @i: integer value to subtract
239 * @v: pointer of type atomic_t
240 *
241 * Atomically subtracts @i from @v and returns
242 * true if the result is zero, or false for all
243 * other cases.
244 */
245 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
246
247 /*
248 * atomic_inc_and_test - increment and test
249 * @v: pointer of type atomic_t
250 *
251 * Atomically increments @v by 1
252 * and returns true if the result is zero, or false for all
253 * other cases.
254 */
255 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
256
257 /*
258 * atomic_dec_and_test - decrement by 1 and test
259 * @v: pointer of type atomic_t
260 *
261 * Atomically decrements @v by 1 and
262 * returns true if the result is 0, or false for all other
263 * cases.
264 */
265 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
266
267 /*
268 * atomic_dec_if_positive - decrement by 1 if old value positive
269 * @v: pointer of type atomic_t
270 */
271 #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
272
273 /*
274 * atomic_inc - increment atomic variable
275 * @v: pointer of type atomic_t
276 *
277 * Atomically increments @v by 1.
278 */
279 #define atomic_inc(v) atomic_add(1, (v))
280
281 /*
282 * atomic_dec - decrement and test
283 * @v: pointer of type atomic_t
284 *
285 * Atomically decrements @v by 1.
286 */
287 #define atomic_dec(v) atomic_sub(1, (v))
288
289 /*
290 * atomic_add_negative - add and test if negative
291 * @v: pointer of type atomic_t
292 * @i: integer value to add
293 *
294 * Atomically adds @i to @v and returns true
295 * if the result is negative, or false when
296 * result is greater than or equal to zero.
297 */
298 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
299
300 #ifdef CONFIG_64BIT
301
302 #define ATOMIC64_INIT(i) { (i) }
303
304 /*
305 * atomic64_read - read atomic variable
306 * @v: pointer of type atomic64_t
307 *
308 */
309 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
310
311 /*
312 * atomic64_set - set atomic variable
313 * @v: pointer of type atomic64_t
314 * @i: required value
315 */
316 #define atomic64_set(v, i) ((v)->counter = (i))
317
318 #define ATOMIC64_OP(op, c_op, asm_op) \
319 static __inline__ void atomic64_##op(long i, atomic64_t * v) \
320 { \
321 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
322 long temp; \
323 \
324 __asm__ __volatile__( \
325 " .set arch=r4000 \n" \
326 "1: lld %0, %1 # atomic64_" #op " \n" \
327 " " #asm_op " %0, %2 \n" \
328 " scd %0, %1 \n" \
329 " beqzl %0, 1b \n" \
330 " .set mips0 \n" \
331 : "=&r" (temp), "+m" (v->counter) \
332 : "Ir" (i)); \
333 } else if (kernel_uses_llsc) { \
334 long temp; \
335 \
336 do { \
337 __asm__ __volatile__( \
338 " .set arch=r4000 \n" \
339 " lld %0, %1 # atomic64_" #op "\n" \
340 " " #asm_op " %0, %2 \n" \
341 " scd %0, %1 \n" \
342 " .set mips0 \n" \
343 : "=&r" (temp), "+m" (v->counter) \
344 : "Ir" (i)); \
345 } while (unlikely(!temp)); \
346 } else { \
347 unsigned long flags; \
348 \
349 raw_local_irq_save(flags); \
350 v->counter c_op i; \
351 raw_local_irq_restore(flags); \
352 } \
353 } \
354
355 #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
356 static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
357 { \
358 long result; \
359 \
360 smp_mb__before_llsc(); \
361 \
362 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
363 long temp; \
364 \
365 __asm__ __volatile__( \
366 " .set arch=r4000 \n" \
367 "1: lld %1, %2 # atomic64_" #op "_return\n" \
368 " " #asm_op " %0, %1, %3 \n" \
369 " scd %0, %2 \n" \
370 " beqzl %0, 1b \n" \
371 " " #asm_op " %0, %1, %3 \n" \
372 " .set mips0 \n" \
373 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
374 : "Ir" (i)); \
375 } else if (kernel_uses_llsc) { \
376 long temp; \
377 \
378 do { \
379 __asm__ __volatile__( \
380 " .set arch=r4000 \n" \
381 " lld %1, %2 # atomic64_" #op "_return\n" \
382 " " #asm_op " %0, %1, %3 \n" \
383 " scd %0, %2 \n" \
384 " .set mips0 \n" \
385 : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
386 : "Ir" (i), "m" (v->counter) \
387 : "memory"); \
388 } while (unlikely(!result)); \
389 \
390 result = temp; result c_op i; \
391 } else { \
392 unsigned long flags; \
393 \
394 raw_local_irq_save(flags); \
395 result = v->counter; \
396 result c_op i; \
397 v->counter = result; \
398 raw_local_irq_restore(flags); \
399 } \
400 \
401 smp_llsc_mb(); \
402 \
403 return result; \
404 }
405
406 #define ATOMIC64_OPS(op, c_op, asm_op) \
407 ATOMIC64_OP(op, c_op, asm_op) \
408 ATOMIC64_OP_RETURN(op, c_op, asm_op)
409
410 ATOMIC64_OPS(add, +=, daddu)
411 ATOMIC64_OPS(sub, -=, dsubu)
412
413 #undef ATOMIC64_OPS
414 #undef ATOMIC64_OP_RETURN
415 #undef ATOMIC64_OP
416
417 /*
418 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
419 * @i: integer value to subtract
420 * @v: pointer of type atomic64_t
421 *
422 * Atomically test @v and subtract @i if @v is greater or equal than @i.
423 * The function returns the old value of @v minus @i.
424 */
425 static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
426 {
427 long result;
428
429 smp_mb__before_llsc();
430
431 if (kernel_uses_llsc && R10000_LLSC_WAR) {
432 long temp;
433
434 __asm__ __volatile__(
435 " .set arch=r4000 \n"
436 "1: lld %1, %2 # atomic64_sub_if_positive\n"
437 " dsubu %0, %1, %3 \n"
438 " bltz %0, 1f \n"
439 " scd %0, %2 \n"
440 " .set noreorder \n"
441 " beqzl %0, 1b \n"
442 " dsubu %0, %1, %3 \n"
443 " .set reorder \n"
444 "1: \n"
445 " .set mips0 \n"
446 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
447 : "Ir" (i), "m" (v->counter)
448 : "memory");
449 } else if (kernel_uses_llsc) {
450 long temp;
451
452 __asm__ __volatile__(
453 " .set arch=r4000 \n"
454 "1: lld %1, %2 # atomic64_sub_if_positive\n"
455 " dsubu %0, %1, %3 \n"
456 " bltz %0, 1f \n"
457 " scd %0, %2 \n"
458 " .set noreorder \n"
459 " beqz %0, 1b \n"
460 " dsubu %0, %1, %3 \n"
461 " .set reorder \n"
462 "1: \n"
463 " .set mips0 \n"
464 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
465 : "Ir" (i));
466 } else {
467 unsigned long flags;
468
469 raw_local_irq_save(flags);
470 result = v->counter;
471 result -= i;
472 if (result >= 0)
473 v->counter = result;
474 raw_local_irq_restore(flags);
475 }
476
477 smp_llsc_mb();
478
479 return result;
480 }
481
482 #define atomic64_cmpxchg(v, o, n) \
483 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
484 #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
485
486 /**
487 * atomic64_add_unless - add unless the number is a given value
488 * @v: pointer of type atomic64_t
489 * @a: the amount to add to v...
490 * @u: ...unless v is equal to u.
491 *
492 * Atomically adds @a to @v, so long as it was not @u.
493 * Returns the old value of @v.
494 */
495 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
496 {
497 long c, old;
498 c = atomic64_read(v);
499 for (;;) {
500 if (unlikely(c == (u)))
501 break;
502 old = atomic64_cmpxchg((v), c, c + (a));
503 if (likely(old == c))
504 break;
505 c = old;
506 }
507 return c != (u);
508 }
509
510 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
511
512 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
513 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
514
515 /*
516 * atomic64_sub_and_test - subtract value from variable and test result
517 * @i: integer value to subtract
518 * @v: pointer of type atomic64_t
519 *
520 * Atomically subtracts @i from @v and returns
521 * true if the result is zero, or false for all
522 * other cases.
523 */
524 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
525
526 /*
527 * atomic64_inc_and_test - increment and test
528 * @v: pointer of type atomic64_t
529 *
530 * Atomically increments @v by 1
531 * and returns true if the result is zero, or false for all
532 * other cases.
533 */
534 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
535
536 /*
537 * atomic64_dec_and_test - decrement by 1 and test
538 * @v: pointer of type atomic64_t
539 *
540 * Atomically decrements @v by 1 and
541 * returns true if the result is 0, or false for all other
542 * cases.
543 */
544 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
545
546 /*
547 * atomic64_dec_if_positive - decrement by 1 if old value positive
548 * @v: pointer of type atomic64_t
549 */
550 #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
551
552 /*
553 * atomic64_inc - increment atomic variable
554 * @v: pointer of type atomic64_t
555 *
556 * Atomically increments @v by 1.
557 */
558 #define atomic64_inc(v) atomic64_add(1, (v))
559
560 /*
561 * atomic64_dec - decrement and test
562 * @v: pointer of type atomic64_t
563 *
564 * Atomically decrements @v by 1.
565 */
566 #define atomic64_dec(v) atomic64_sub(1, (v))
567
568 /*
569 * atomic64_add_negative - add and test if negative
570 * @v: pointer of type atomic64_t
571 * @i: integer value to add
572 *
573 * Atomically adds @i to @v and returns true
574 * if the result is negative, or false when
575 * result is greater than or equal to zero.
576 */
577 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
578
579 #endif /* CONFIG_64BIT */
580
581 #endif /* _ASM_ATOMIC_H */
This page took 0.043086 seconds and 5 git commands to generate.