locking,arch,alpha: Fold atomic_ops
[deliverable/linux.git] / arch / alpha / include / asm / atomic.h
CommitLineData
1da177e4
LT
1#ifndef _ALPHA_ATOMIC_H
2#define _ALPHA_ATOMIC_H
3
ea435467 4#include <linux/types.h>
0db9ae4a 5#include <asm/barrier.h>
5ba840f9 6#include <asm/cmpxchg.h>
0db9ae4a 7
1da177e4
LT
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc...
11 *
12 * But use these as seldom as possible since they are much slower
13 * than regular operations.
14 */
15
16
67a806d9
MG
17#define ATOMIC_INIT(i) { (i) }
18#define ATOMIC64_INIT(i) { (i) }
1da177e4 19
f3d46f9d
AB
20#define atomic_read(v) (*(volatile int *)&(v)->counter)
21#define atomic64_read(v) (*(volatile long *)&(v)->counter)
1da177e4
LT
22
23#define atomic_set(v,i) ((v)->counter = (i))
24#define atomic64_set(v,i) ((v)->counter = (i))
25
26/*
27 * To get proper branch prediction for the main line, we must branch
28 * forward to code at the end of this object's .text section, then
29 * branch back to restart the operation.
30 */
31
b93c7b8c
PZ
32#define ATOMIC_OP(op) \
33static __inline__ void atomic_##op(int i, atomic_t * v) \
34{ \
35 unsigned long temp; \
36 __asm__ __volatile__( \
37 "1: ldl_l %0,%1\n" \
38 " " #op "l %0,%2,%0\n" \
39 " stl_c %0,%1\n" \
40 " beq %0,2f\n" \
41 ".subsection 2\n" \
42 "2: br 1b\n" \
43 ".previous" \
44 :"=&r" (temp), "=m" (v->counter) \
45 :"Ir" (i), "m" (v->counter)); \
46} \
47
48#define ATOMIC_OP_RETURN(op) \
49static inline int atomic_##op##_return(int i, atomic_t *v) \
50{ \
51 long temp, result; \
52 smp_mb(); \
53 __asm__ __volatile__( \
54 "1: ldl_l %0,%1\n" \
55 " " #op "l %0,%3,%2\n" \
56 " " #op "l %0,%3,%0\n" \
57 " stl_c %0,%1\n" \
58 " beq %0,2f\n" \
59 ".subsection 2\n" \
60 "2: br 1b\n" \
61 ".previous" \
62 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
63 :"Ir" (i), "m" (v->counter) : "memory"); \
64 smp_mb(); \
65 return result; \
1da177e4
LT
66}
67
b93c7b8c
PZ
68#define ATOMIC64_OP(op) \
69static __inline__ void atomic64_##op(long i, atomic64_t * v) \
70{ \
71 unsigned long temp; \
72 __asm__ __volatile__( \
73 "1: ldq_l %0,%1\n" \
74 " " #op "q %0,%2,%0\n" \
75 " stq_c %0,%1\n" \
76 " beq %0,2f\n" \
77 ".subsection 2\n" \
78 "2: br 1b\n" \
79 ".previous" \
80 :"=&r" (temp), "=m" (v->counter) \
81 :"Ir" (i), "m" (v->counter)); \
82} \
83
84#define ATOMIC64_OP_RETURN(op) \
85static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
86{ \
87 long temp, result; \
88 smp_mb(); \
89 __asm__ __volatile__( \
90 "1: ldq_l %0,%1\n" \
91 " " #op "q %0,%3,%2\n" \
92 " " #op "q %0,%3,%0\n" \
93 " stq_c %0,%1\n" \
94 " beq %0,2f\n" \
95 ".subsection 2\n" \
96 "2: br 1b\n" \
97 ".previous" \
98 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
99 :"Ir" (i), "m" (v->counter) : "memory"); \
100 smp_mb(); \
101 return result; \
1da177e4
LT
102}
103
b93c7b8c
PZ
104#define ATOMIC_OPS(opg) \
105 ATOMIC_OP(opg) \
106 ATOMIC_OP_RETURN(opg) \
107 ATOMIC64_OP(opg) \
108 ATOMIC64_OP_RETURN(opg)
1da177e4 109
b93c7b8c
PZ
110ATOMIC_OPS(add)
111ATOMIC_OPS(sub)
1da177e4 112
b93c7b8c
PZ
113#undef ATOMIC_OPS
114#undef ATOMIC64_OP_RETURN
115#undef ATOMIC64_OP
116#undef ATOMIC_OP_RETURN
117#undef ATOMIC_OP
1da177e4 118
e96e6994
MD
119#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
120#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
121
122#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
ffbf670f 123#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4a6dae6d 124
e96e6994 125/**
f24219b4 126 * __atomic_add_unless - add unless the number is a given value
e96e6994
MD
127 * @v: pointer of type atomic_t
128 * @a: the amount to add to v...
129 * @u: ...unless v is equal to u.
130 *
131 * Atomically adds @a to @v, so long as it was not @u.
f24219b4 132 * Returns the old value of @v.
e96e6994 133 */
f24219b4 134static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2856f5e3 135{
6da75397
RH
136 int c, new, old;
137 smp_mb();
138 __asm__ __volatile__(
139 "1: ldl_l %[old],%[mem]\n"
140 " cmpeq %[old],%[u],%[c]\n"
141 " addl %[old],%[a],%[new]\n"
142 " bne %[c],2f\n"
143 " stl_c %[new],%[mem]\n"
144 " beq %[new],3f\n"
145 "2:\n"
146 ".subsection 2\n"
147 "3: br 1b\n"
148 ".previous"
149 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
150 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
151 : "memory");
152 smp_mb();
153 return old;
2856f5e3
MD
154}
155
8426e1f6 156
e96e6994
MD
157/**
158 * atomic64_add_unless - add unless the number is a given value
159 * @v: pointer of type atomic64_t
160 * @a: the amount to add to v...
161 * @u: ...unless v is equal to u.
162 *
163 * Atomically adds @a to @v, so long as it was not @u.
6da75397 164 * Returns true iff @v was not @u.
e96e6994 165 */
2856f5e3
MD
166static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
167{
6da75397
RH
168 long c, tmp;
169 smp_mb();
170 __asm__ __volatile__(
171 "1: ldq_l %[tmp],%[mem]\n"
172 " cmpeq %[tmp],%[u],%[c]\n"
173 " addq %[tmp],%[a],%[tmp]\n"
174 " bne %[c],2f\n"
175 " stq_c %[tmp],%[mem]\n"
176 " beq %[tmp],3f\n"
177 "2:\n"
178 ".subsection 2\n"
179 "3: br 1b\n"
180 ".previous"
181 : [tmp] "=&r"(tmp), [c] "=&r"(c)
182 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
183 : "memory");
184 smp_mb();
185 return !c;
2856f5e3
MD
186}
187
748a76b5
RH
188/*
189 * atomic64_dec_if_positive - decrement by 1 if old value positive
190 * @v: pointer of type atomic_t
191 *
192 * The function returns the old value of *v minus 1, even if
193 * the atomic variable, v, was not decremented.
194 */
195static inline long atomic64_dec_if_positive(atomic64_t *v)
196{
197 long old, tmp;
198 smp_mb();
199 __asm__ __volatile__(
200 "1: ldq_l %[old],%[mem]\n"
201 " subq %[old],1,%[tmp]\n"
202 " ble %[old],2f\n"
203 " stq_c %[tmp],%[mem]\n"
204 " beq %[tmp],3f\n"
205 "2:\n"
206 ".subsection 2\n"
207 "3: br 1b\n"
208 ".previous"
209 : [old] "=&r"(old), [tmp] "=&r"(tmp)
210 : [mem] "m"(*v)
211 : "memory");
212 smp_mb();
213 return old - 1;
214}
215
e96e6994
MD
216#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
217
7c72aaf2
HD
218#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
219#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
220
1da177e4
LT
221#define atomic_dec_return(v) atomic_sub_return(1,(v))
222#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
223
224#define atomic_inc_return(v) atomic_add_return(1,(v))
225#define atomic64_inc_return(v) atomic64_add_return(1,(v))
226
227#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
228#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
229
230#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
7c72aaf2
HD
231#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
232
1da177e4
LT
233#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
234#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
235
236#define atomic_inc(v) atomic_add(1,(v))
237#define atomic64_inc(v) atomic64_add(1,(v))
238
239#define atomic_dec(v) atomic_sub(1,(v))
240#define atomic64_dec(v) atomic64_sub(1,(v))
241
1da177e4 242#endif /* _ALPHA_ATOMIC_H */
This page took 0.763263 seconds and 5 git commands to generate.