Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ALPHA_ATOMIC_H |
2 | #define _ALPHA_ATOMIC_H | |
3 | ||
0db9ae4a | 4 | #include <asm/barrier.h> |
2856f5e3 | 5 | #include <asm/system.h> |
0db9ae4a | 6 | |
1da177e4 LT |
7 | /* |
8 | * Atomic operations that C can't guarantee us. Useful for | |
9 | * resource counting etc... | |
10 | * | |
11 | * But use these as seldom as possible since they are much slower | |
12 | * than regular operations. | |
13 | */ | |
14 | ||
15 | ||
16 | /* | |
17 | * Counter is volatile to make sure gcc doesn't try to be clever | |
18 | * and move things around on us. We need to use _exactly_ the address | |
19 | * the user gave us, not some alias that contains the same information. | |
20 | */ | |
21 | typedef struct { volatile int counter; } atomic_t; | |
22 | typedef struct { volatile long counter; } atomic64_t; | |
23 | ||
24 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) | |
25 | #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) | |
26 | ||
27 | #define atomic_read(v) ((v)->counter + 0) | |
28 | #define atomic64_read(v) ((v)->counter + 0) | |
29 | ||
30 | #define atomic_set(v,i) ((v)->counter = (i)) | |
31 | #define atomic64_set(v,i) ((v)->counter = (i)) | |
32 | ||
33 | /* | |
34 | * To get proper branch prediction for the main line, we must branch | |
35 | * forward to code at the end of this object's .text section, then | |
36 | * branch back to restart the operation. | |
37 | */ | |
38 | ||
39 | static __inline__ void atomic_add(int i, atomic_t * v) | |
40 | { | |
41 | unsigned long temp; | |
42 | __asm__ __volatile__( | |
43 | "1: ldl_l %0,%1\n" | |
44 | " addl %0,%2,%0\n" | |
45 | " stl_c %0,%1\n" | |
46 | " beq %0,2f\n" | |
47 | ".subsection 2\n" | |
48 | "2: br 1b\n" | |
49 | ".previous" | |
50 | :"=&r" (temp), "=m" (v->counter) | |
51 | :"Ir" (i), "m" (v->counter)); | |
52 | } | |
53 | ||
54 | static __inline__ void atomic64_add(long i, atomic64_t * v) | |
55 | { | |
56 | unsigned long temp; | |
57 | __asm__ __volatile__( | |
58 | "1: ldq_l %0,%1\n" | |
59 | " addq %0,%2,%0\n" | |
60 | " stq_c %0,%1\n" | |
61 | " beq %0,2f\n" | |
62 | ".subsection 2\n" | |
63 | "2: br 1b\n" | |
64 | ".previous" | |
65 | :"=&r" (temp), "=m" (v->counter) | |
66 | :"Ir" (i), "m" (v->counter)); | |
67 | } | |
68 | ||
69 | static __inline__ void atomic_sub(int i, atomic_t * v) | |
70 | { | |
71 | unsigned long temp; | |
72 | __asm__ __volatile__( | |
73 | "1: ldl_l %0,%1\n" | |
74 | " subl %0,%2,%0\n" | |
75 | " stl_c %0,%1\n" | |
76 | " beq %0,2f\n" | |
77 | ".subsection 2\n" | |
78 | "2: br 1b\n" | |
79 | ".previous" | |
80 | :"=&r" (temp), "=m" (v->counter) | |
81 | :"Ir" (i), "m" (v->counter)); | |
82 | } | |
83 | ||
84 | static __inline__ void atomic64_sub(long i, atomic64_t * v) | |
85 | { | |
86 | unsigned long temp; | |
87 | __asm__ __volatile__( | |
88 | "1: ldq_l %0,%1\n" | |
89 | " subq %0,%2,%0\n" | |
90 | " stq_c %0,%1\n" | |
91 | " beq %0,2f\n" | |
92 | ".subsection 2\n" | |
93 | "2: br 1b\n" | |
94 | ".previous" | |
95 | :"=&r" (temp), "=m" (v->counter) | |
96 | :"Ir" (i), "m" (v->counter)); | |
97 | } | |
98 | ||
99 | ||
100 | /* | |
101 | * Same as above, but return the result value | |
102 | */ | |
26a6e661 | 103 | static inline int atomic_add_return(int i, atomic_t *v) |
1da177e4 LT |
104 | { |
105 | long temp, result; | |
d475f3f4 | 106 | smp_mb(); |
1da177e4 LT |
107 | __asm__ __volatile__( |
108 | "1: ldl_l %0,%1\n" | |
109 | " addl %0,%3,%2\n" | |
110 | " addl %0,%3,%0\n" | |
111 | " stl_c %0,%1\n" | |
112 | " beq %0,2f\n" | |
1da177e4 LT |
113 | ".subsection 2\n" |
114 | "2: br 1b\n" | |
115 | ".previous" | |
116 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
117 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 118 | smp_mb(); |
1da177e4 LT |
119 | return result; |
120 | } | |
121 | ||
1da177e4 LT |
122 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) |
123 | { | |
124 | long temp, result; | |
d475f3f4 | 125 | smp_mb(); |
1da177e4 LT |
126 | __asm__ __volatile__( |
127 | "1: ldq_l %0,%1\n" | |
128 | " addq %0,%3,%2\n" | |
129 | " addq %0,%3,%0\n" | |
130 | " stq_c %0,%1\n" | |
131 | " beq %0,2f\n" | |
1da177e4 LT |
132 | ".subsection 2\n" |
133 | "2: br 1b\n" | |
134 | ".previous" | |
135 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
136 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 137 | smp_mb(); |
1da177e4 LT |
138 | return result; |
139 | } | |
140 | ||
141 | static __inline__ long atomic_sub_return(int i, atomic_t * v) | |
142 | { | |
143 | long temp, result; | |
d475f3f4 | 144 | smp_mb(); |
1da177e4 LT |
145 | __asm__ __volatile__( |
146 | "1: ldl_l %0,%1\n" | |
147 | " subl %0,%3,%2\n" | |
148 | " subl %0,%3,%0\n" | |
149 | " stl_c %0,%1\n" | |
150 | " beq %0,2f\n" | |
1da177e4 LT |
151 | ".subsection 2\n" |
152 | "2: br 1b\n" | |
153 | ".previous" | |
154 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
155 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 156 | smp_mb(); |
1da177e4 LT |
157 | return result; |
158 | } | |
159 | ||
160 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |
161 | { | |
162 | long temp, result; | |
d475f3f4 | 163 | smp_mb(); |
1da177e4 LT |
164 | __asm__ __volatile__( |
165 | "1: ldq_l %0,%1\n" | |
166 | " subq %0,%3,%2\n" | |
167 | " subq %0,%3,%0\n" | |
168 | " stq_c %0,%1\n" | |
169 | " beq %0,2f\n" | |
1da177e4 LT |
170 | ".subsection 2\n" |
171 | "2: br 1b\n" | |
172 | ".previous" | |
173 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
174 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 175 | smp_mb(); |
1da177e4 LT |
176 | return result; |
177 | } | |
178 | ||
e96e6994 MD |
179 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
180 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
181 | ||
182 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | |
ffbf670f | 183 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
4a6dae6d | 184 | |
e96e6994 MD |
185 | /** |
186 | * atomic_add_unless - add unless the number is a given value | |
187 | * @v: pointer of type atomic_t | |
188 | * @a: the amount to add to v... | |
189 | * @u: ...unless v is equal to u. | |
190 | * | |
191 | * Atomically adds @a to @v, so long as it was not @u. | |
192 | * Returns non-zero if @v was not @u, and zero otherwise. | |
193 | */ | |
2856f5e3 MD |
194 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
195 | { | |
196 | int c, old; | |
197 | c = atomic_read(v); | |
198 | for (;;) { | |
199 | if (unlikely(c == (u))) | |
200 | break; | |
201 | old = atomic_cmpxchg((v), c, c + (a)); | |
202 | if (likely(old == c)) | |
203 | break; | |
204 | c = old; | |
205 | } | |
206 | return c != (u); | |
207 | } | |
208 | ||
8426e1f6 NP |
209 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
210 | ||
e96e6994 MD |
211 | /** |
212 | * atomic64_add_unless - add unless the number is a given value | |
213 | * @v: pointer of type atomic64_t | |
214 | * @a: the amount to add to v... | |
215 | * @u: ...unless v is equal to u. | |
216 | * | |
217 | * Atomically adds @a to @v, so long as it was not @u. | |
218 | * Returns non-zero if @v was not @u, and zero otherwise. | |
219 | */ | |
2856f5e3 MD |
220 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
221 | { | |
222 | long c, old; | |
223 | c = atomic64_read(v); | |
224 | for (;;) { | |
225 | if (unlikely(c == (u))) | |
226 | break; | |
227 | old = atomic64_cmpxchg((v), c, c + (a)); | |
228 | if (likely(old == c)) | |
229 | break; | |
230 | c = old; | |
231 | } | |
232 | return c != (u); | |
233 | } | |
234 | ||
e96e6994 MD |
235 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
236 | ||
7c72aaf2 HD |
237 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
238 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
239 | ||
1da177e4 LT |
240 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
241 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) | |
242 | ||
243 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | |
244 | #define atomic64_inc_return(v) atomic64_add_return(1,(v)) | |
245 | ||
246 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
247 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | |
248 | ||
249 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | |
7c72aaf2 HD |
250 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) |
251 | ||
1da177e4 LT |
252 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
253 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) | |
254 | ||
255 | #define atomic_inc(v) atomic_add(1,(v)) | |
256 | #define atomic64_inc(v) atomic64_add(1,(v)) | |
257 | ||
258 | #define atomic_dec(v) atomic_sub(1,(v)) | |
259 | #define atomic64_dec(v) atomic64_sub(1,(v)) | |
260 | ||
261 | #define smp_mb__before_atomic_dec() smp_mb() | |
262 | #define smp_mb__after_atomic_dec() smp_mb() | |
263 | #define smp_mb__before_atomic_inc() smp_mb() | |
264 | #define smp_mb__after_atomic_inc() smp_mb() | |
265 | ||
d3cb4871 | 266 | #include <asm-generic/atomic.h> |
1da177e4 | 267 | #endif /* _ALPHA_ATOMIC_H */ |