x86: sanitize pathes arch/x86/kernel/cpu/Makefile
[deliverable/linux.git] / include / asm-x86_64 / atomic.h
1 #ifndef __ARCH_X86_64_ATOMIC__
2 #define __ARCH_X86_64_ATOMIC__
3
4 #include <asm/alternative.h>
5 #include <asm/cmpxchg.h>
6
7 /* atomic_t should be 32 bit signed type */
8
9 /*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 */
13
14 #ifdef CONFIG_SMP
15 #define LOCK "lock ; "
16 #else
17 #define LOCK ""
18 #endif
19
20 /*
21 * Make sure gcc doesn't try to be clever and move things around
22 * on us. We need to use _exactly_ the address the user gave us,
23 * not some alias that contains the same information.
24 */
25 typedef struct { int counter; } atomic_t;
26
27 #define ATOMIC_INIT(i) { (i) }
28
29 /**
30 * atomic_read - read atomic variable
31 * @v: pointer of type atomic_t
32 *
33 * Atomically reads the value of @v.
34 */
35 #define atomic_read(v) ((v)->counter)
36
37 /**
38 * atomic_set - set atomic variable
39 * @v: pointer of type atomic_t
40 * @i: required value
41 *
42 * Atomically sets the value of @v to @i.
43 */
44 #define atomic_set(v,i) (((v)->counter) = (i))
45
46 /**
47 * atomic_add - add integer to atomic variable
48 * @i: integer value to add
49 * @v: pointer of type atomic_t
50 *
51 * Atomically adds @i to @v.
52 */
53 static __inline__ void atomic_add(int i, atomic_t *v)
54 {
55 __asm__ __volatile__(
56 LOCK_PREFIX "addl %1,%0"
57 :"=m" (v->counter)
58 :"ir" (i), "m" (v->counter));
59 }
60
61 /**
62 * atomic_sub - subtract the atomic variable
63 * @i: integer value to subtract
64 * @v: pointer of type atomic_t
65 *
66 * Atomically subtracts @i from @v.
67 */
68 static __inline__ void atomic_sub(int i, atomic_t *v)
69 {
70 __asm__ __volatile__(
71 LOCK_PREFIX "subl %1,%0"
72 :"=m" (v->counter)
73 :"ir" (i), "m" (v->counter));
74 }
75
76 /**
77 * atomic_sub_and_test - subtract value from variable and test result
78 * @i: integer value to subtract
79 * @v: pointer of type atomic_t
80 *
81 * Atomically subtracts @i from @v and returns
82 * true if the result is zero, or false for all
83 * other cases.
84 */
85 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
86 {
87 unsigned char c;
88
89 __asm__ __volatile__(
90 LOCK_PREFIX "subl %2,%0; sete %1"
91 :"=m" (v->counter), "=qm" (c)
92 :"ir" (i), "m" (v->counter) : "memory");
93 return c;
94 }
95
96 /**
97 * atomic_inc - increment atomic variable
98 * @v: pointer of type atomic_t
99 *
100 * Atomically increments @v by 1.
101 */
102 static __inline__ void atomic_inc(atomic_t *v)
103 {
104 __asm__ __volatile__(
105 LOCK_PREFIX "incl %0"
106 :"=m" (v->counter)
107 :"m" (v->counter));
108 }
109
110 /**
111 * atomic_dec - decrement atomic variable
112 * @v: pointer of type atomic_t
113 *
114 * Atomically decrements @v by 1.
115 */
116 static __inline__ void atomic_dec(atomic_t *v)
117 {
118 __asm__ __volatile__(
119 LOCK_PREFIX "decl %0"
120 :"=m" (v->counter)
121 :"m" (v->counter));
122 }
123
124 /**
125 * atomic_dec_and_test - decrement and test
126 * @v: pointer of type atomic_t
127 *
128 * Atomically decrements @v by 1 and
129 * returns true if the result is 0, or false for all other
130 * cases.
131 */
132 static __inline__ int atomic_dec_and_test(atomic_t *v)
133 {
134 unsigned char c;
135
136 __asm__ __volatile__(
137 LOCK_PREFIX "decl %0; sete %1"
138 :"=m" (v->counter), "=qm" (c)
139 :"m" (v->counter) : "memory");
140 return c != 0;
141 }
142
143 /**
144 * atomic_inc_and_test - increment and test
145 * @v: pointer of type atomic_t
146 *
147 * Atomically increments @v by 1
148 * and returns true if the result is zero, or false for all
149 * other cases.
150 */
151 static __inline__ int atomic_inc_and_test(atomic_t *v)
152 {
153 unsigned char c;
154
155 __asm__ __volatile__(
156 LOCK_PREFIX "incl %0; sete %1"
157 :"=m" (v->counter), "=qm" (c)
158 :"m" (v->counter) : "memory");
159 return c != 0;
160 }
161
162 /**
163 * atomic_add_negative - add and test if negative
164 * @i: integer value to add
165 * @v: pointer of type atomic_t
166 *
167 * Atomically adds @i to @v and returns true
168 * if the result is negative, or false when
169 * result is greater than or equal to zero.
170 */
171 static __inline__ int atomic_add_negative(int i, atomic_t *v)
172 {
173 unsigned char c;
174
175 __asm__ __volatile__(
176 LOCK_PREFIX "addl %2,%0; sets %1"
177 :"=m" (v->counter), "=qm" (c)
178 :"ir" (i), "m" (v->counter) : "memory");
179 return c;
180 }
181
182 /**
183 * atomic_add_return - add and return
184 * @i: integer value to add
185 * @v: pointer of type atomic_t
186 *
187 * Atomically adds @i to @v and returns @i + @v
188 */
189 static __inline__ int atomic_add_return(int i, atomic_t *v)
190 {
191 int __i = i;
192 __asm__ __volatile__(
193 LOCK_PREFIX "xaddl %0, %1"
194 :"+r" (i), "+m" (v->counter)
195 : : "memory");
196 return i + __i;
197 }
198
199 static __inline__ int atomic_sub_return(int i, atomic_t *v)
200 {
201 return atomic_add_return(-i,v);
202 }
203
204 #define atomic_inc_return(v) (atomic_add_return(1,v))
205 #define atomic_dec_return(v) (atomic_sub_return(1,v))
206
207 /* An 64bit atomic type */
208
209 typedef struct { volatile long counter; } atomic64_t;
210
211 #define ATOMIC64_INIT(i) { (i) }
212
213 /**
214 * atomic64_read - read atomic64 variable
215 * @v: pointer of type atomic64_t
216 *
217 * Atomically reads the value of @v.
218 * Doesn't imply a read memory barrier.
219 */
220 #define atomic64_read(v) ((v)->counter)
221
222 /**
223 * atomic64_set - set atomic64 variable
224 * @v: pointer to type atomic64_t
225 * @i: required value
226 *
227 * Atomically sets the value of @v to @i.
228 */
229 #define atomic64_set(v,i) (((v)->counter) = (i))
230
231 /**
232 * atomic64_add - add integer to atomic64 variable
233 * @i: integer value to add
234 * @v: pointer to type atomic64_t
235 *
236 * Atomically adds @i to @v.
237 */
238 static __inline__ void atomic64_add(long i, atomic64_t *v)
239 {
240 __asm__ __volatile__(
241 LOCK_PREFIX "addq %1,%0"
242 :"=m" (v->counter)
243 :"ir" (i), "m" (v->counter));
244 }
245
246 /**
247 * atomic64_sub - subtract the atomic64 variable
248 * @i: integer value to subtract
249 * @v: pointer to type atomic64_t
250 *
251 * Atomically subtracts @i from @v.
252 */
253 static __inline__ void atomic64_sub(long i, atomic64_t *v)
254 {
255 __asm__ __volatile__(
256 LOCK_PREFIX "subq %1,%0"
257 :"=m" (v->counter)
258 :"ir" (i), "m" (v->counter));
259 }
260
261 /**
262 * atomic64_sub_and_test - subtract value from variable and test result
263 * @i: integer value to subtract
264 * @v: pointer to type atomic64_t
265 *
266 * Atomically subtracts @i from @v and returns
267 * true if the result is zero, or false for all
268 * other cases.
269 */
270 static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
271 {
272 unsigned char c;
273
274 __asm__ __volatile__(
275 LOCK_PREFIX "subq %2,%0; sete %1"
276 :"=m" (v->counter), "=qm" (c)
277 :"ir" (i), "m" (v->counter) : "memory");
278 return c;
279 }
280
281 /**
282 * atomic64_inc - increment atomic64 variable
283 * @v: pointer to type atomic64_t
284 *
285 * Atomically increments @v by 1.
286 */
287 static __inline__ void atomic64_inc(atomic64_t *v)
288 {
289 __asm__ __volatile__(
290 LOCK_PREFIX "incq %0"
291 :"=m" (v->counter)
292 :"m" (v->counter));
293 }
294
295 /**
296 * atomic64_dec - decrement atomic64 variable
297 * @v: pointer to type atomic64_t
298 *
299 * Atomically decrements @v by 1.
300 */
301 static __inline__ void atomic64_dec(atomic64_t *v)
302 {
303 __asm__ __volatile__(
304 LOCK_PREFIX "decq %0"
305 :"=m" (v->counter)
306 :"m" (v->counter));
307 }
308
309 /**
310 * atomic64_dec_and_test - decrement and test
311 * @v: pointer to type atomic64_t
312 *
313 * Atomically decrements @v by 1 and
314 * returns true if the result is 0, or false for all other
315 * cases.
316 */
317 static __inline__ int atomic64_dec_and_test(atomic64_t *v)
318 {
319 unsigned char c;
320
321 __asm__ __volatile__(
322 LOCK_PREFIX "decq %0; sete %1"
323 :"=m" (v->counter), "=qm" (c)
324 :"m" (v->counter) : "memory");
325 return c != 0;
326 }
327
328 /**
329 * atomic64_inc_and_test - increment and test
330 * @v: pointer to type atomic64_t
331 *
332 * Atomically increments @v by 1
333 * and returns true if the result is zero, or false for all
334 * other cases.
335 */
336 static __inline__ int atomic64_inc_and_test(atomic64_t *v)
337 {
338 unsigned char c;
339
340 __asm__ __volatile__(
341 LOCK_PREFIX "incq %0; sete %1"
342 :"=m" (v->counter), "=qm" (c)
343 :"m" (v->counter) : "memory");
344 return c != 0;
345 }
346
347 /**
348 * atomic64_add_negative - add and test if negative
349 * @i: integer value to add
350 * @v: pointer to type atomic64_t
351 *
352 * Atomically adds @i to @v and returns true
353 * if the result is negative, or false when
354 * result is greater than or equal to zero.
355 */
356 static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
357 {
358 unsigned char c;
359
360 __asm__ __volatile__(
361 LOCK_PREFIX "addq %2,%0; sets %1"
362 :"=m" (v->counter), "=qm" (c)
363 :"ir" (i), "m" (v->counter) : "memory");
364 return c;
365 }
366
367 /**
368 * atomic64_add_return - add and return
369 * @i: integer value to add
370 * @v: pointer to type atomic64_t
371 *
372 * Atomically adds @i to @v and returns @i + @v
373 */
374 static __inline__ long atomic64_add_return(long i, atomic64_t *v)
375 {
376 long __i = i;
377 __asm__ __volatile__(
378 LOCK_PREFIX "xaddq %0, %1;"
379 :"+r" (i), "+m" (v->counter)
380 : : "memory");
381 return i + __i;
382 }
383
384 static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
385 {
386 return atomic64_add_return(-i,v);
387 }
388
389 #define atomic64_inc_return(v) (atomic64_add_return(1,v))
390 #define atomic64_dec_return(v) (atomic64_sub_return(1,v))
391
392 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
393 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
394
395 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
396 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
397
398 /**
399 * atomic_add_unless - add unless the number is a given value
400 * @v: pointer of type atomic_t
401 * @a: the amount to add to v...
402 * @u: ...unless v is equal to u.
403 *
404 * Atomically adds @a to @v, so long as it was not @u.
405 * Returns non-zero if @v was not @u, and zero otherwise.
406 */
407 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
408 {
409 int c, old;
410 c = atomic_read(v);
411 for (;;) {
412 if (unlikely(c == (u)))
413 break;
414 old = atomic_cmpxchg((v), c, c + (a));
415 if (likely(old == c))
416 break;
417 c = old;
418 }
419 return c != (u);
420 }
421
422 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
423
424 /**
425 * atomic64_add_unless - add unless the number is a given value
426 * @v: pointer of type atomic64_t
427 * @a: the amount to add to v...
428 * @u: ...unless v is equal to u.
429 *
430 * Atomically adds @a to @v, so long as it was not @u.
431 * Returns non-zero if @v was not @u, and zero otherwise.
432 */
433 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
434 {
435 long c, old;
436 c = atomic64_read(v);
437 for (;;) {
438 if (unlikely(c == (u)))
439 break;
440 old = atomic64_cmpxchg((v), c, c + (a));
441 if (likely(old == c))
442 break;
443 c = old;
444 }
445 return c != (u);
446 }
447
448 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
449
450 /* These are x86-specific, used by some header files */
451 #define atomic_clear_mask(mask, addr) \
452 __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
453 : : "r" (~(mask)),"m" (*addr) : "memory")
454
455 #define atomic_set_mask(mask, addr) \
456 __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
457 : : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
458
459 /* Atomic operations are already serializing on x86 */
460 #define smp_mb__before_atomic_dec() barrier()
461 #define smp_mb__after_atomic_dec() barrier()
462 #define smp_mb__before_atomic_inc() barrier()
463 #define smp_mb__after_atomic_inc() barrier()
464
465 #include <asm-generic/atomic.h>
466 #endif
This page took 0.045715 seconds and 5 git commands to generate.