Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / xtensa / include / asm / atomic.h
1 /*
2 * include/asm-xtensa/atomic.h
3 *
4 * Atomic operations that C can't guarantee us. Useful for resource counting..
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2008 Tensilica Inc.
11 */
12
13 #ifndef _XTENSA_ATOMIC_H
14 #define _XTENSA_ATOMIC_H
15
16 #include <linux/stringify.h>
17 #include <linux/types.h>
18
19 #ifdef __KERNEL__
20 #include <asm/processor.h>
21 #include <asm/cmpxchg.h>
22 #include <asm/barrier.h>
23
24 #define ATOMIC_INIT(i) { (i) }
25
26 /*
27 * This Xtensa implementation assumes that the right mechanism
28 * for exclusion is for locking interrupts to level EXCM_LEVEL.
29 *
30 * Locking interrupts looks like this:
31 *
32 * rsil a15, TOPLEVEL
33 * <code>
34 * wsr a15, PS
35 * rsync
36 *
37 * Note that a15 is used here because the register allocation
38 * done by the compiler is not guaranteed and a window overflow
39 * may not occur between the rsil and wsr instructions. By using
40 * a15 in the rsil, the machine is guaranteed to be in a state
41 * where no register reference will cause an overflow.
42 */
43
44 /**
45 * atomic_read - read atomic variable
46 * @v: pointer of type atomic_t
47 *
48 * Atomically reads the value of @v.
49 */
50 #define atomic_read(v) ACCESS_ONCE((v)->counter)
51
52 /**
53 * atomic_set - set atomic variable
54 * @v: pointer of type atomic_t
55 * @i: required value
56 *
57 * Atomically sets the value of @v to @i.
58 */
59 #define atomic_set(v,i) ((v)->counter = (i))
60
61 #if XCHAL_HAVE_S32C1I
62 #define ATOMIC_OP(op) \
63 static inline void atomic_##op(int i, atomic_t * v) \
64 { \
65 unsigned long tmp; \
66 int result; \
67 \
68 __asm__ __volatile__( \
69 "1: l32i %1, %3, 0\n" \
70 " wsr %1, scompare1\n" \
71 " " #op " %0, %1, %2\n" \
72 " s32c1i %0, %3, 0\n" \
73 " bne %0, %1, 1b\n" \
74 : "=&a" (result), "=&a" (tmp) \
75 : "a" (i), "a" (v) \
76 : "memory" \
77 ); \
78 } \
79
80 #define ATOMIC_OP_RETURN(op) \
81 static inline int atomic_##op##_return(int i, atomic_t * v) \
82 { \
83 unsigned long tmp; \
84 int result; \
85 \
86 __asm__ __volatile__( \
87 "1: l32i %1, %3, 0\n" \
88 " wsr %1, scompare1\n" \
89 " " #op " %0, %1, %2\n" \
90 " s32c1i %0, %3, 0\n" \
91 " bne %0, %1, 1b\n" \
92 " " #op " %0, %0, %2\n" \
93 : "=&a" (result), "=&a" (tmp) \
94 : "a" (i), "a" (v) \
95 : "memory" \
96 ); \
97 \
98 return result; \
99 }
100
101 #else /* XCHAL_HAVE_S32C1I */
102
103 #define ATOMIC_OP(op) \
104 static inline void atomic_##op(int i, atomic_t * v) \
105 { \
106 unsigned int vval; \
107 \
108 __asm__ __volatile__( \
109 " rsil a15, "__stringify(TOPLEVEL)"\n"\
110 " l32i %0, %2, 0\n" \
111 " " #op " %0, %0, %1\n" \
112 " s32i %0, %2, 0\n" \
113 " wsr a15, ps\n" \
114 " rsync\n" \
115 : "=&a" (vval) \
116 : "a" (i), "a" (v) \
117 : "a15", "memory" \
118 ); \
119 } \
120
121 #define ATOMIC_OP_RETURN(op) \
122 static inline int atomic_##op##_return(int i, atomic_t * v) \
123 { \
124 unsigned int vval; \
125 \
126 __asm__ __volatile__( \
127 " rsil a15,"__stringify(TOPLEVEL)"\n" \
128 " l32i %0, %2, 0\n" \
129 " " #op " %0, %0, %1\n" \
130 " s32i %0, %2, 0\n" \
131 " wsr a15, ps\n" \
132 " rsync\n" \
133 : "=&a" (vval) \
134 : "a" (i), "a" (v) \
135 : "a15", "memory" \
136 ); \
137 \
138 return vval; \
139 }
140
141 #endif /* XCHAL_HAVE_S32C1I */
142
143 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
144
145 ATOMIC_OPS(add)
146 ATOMIC_OPS(sub)
147
148 ATOMIC_OP(and)
149 ATOMIC_OP(or)
150 ATOMIC_OP(xor)
151
152 #undef ATOMIC_OPS
153 #undef ATOMIC_OP_RETURN
154 #undef ATOMIC_OP
155
156 /**
157 * atomic_sub_and_test - subtract value from variable and test result
158 * @i: integer value to subtract
159 * @v: pointer of type atomic_t
160 *
161 * Atomically subtracts @i from @v and returns
162 * true if the result is zero, or false for all
163 * other cases.
164 */
165 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
166
167 /**
168 * atomic_inc - increment atomic variable
169 * @v: pointer of type atomic_t
170 *
171 * Atomically increments @v by 1.
172 */
173 #define atomic_inc(v) atomic_add(1,(v))
174
175 /**
176 * atomic_inc - increment atomic variable
177 * @v: pointer of type atomic_t
178 *
179 * Atomically increments @v by 1.
180 */
181 #define atomic_inc_return(v) atomic_add_return(1,(v))
182
183 /**
184 * atomic_dec - decrement atomic variable
185 * @v: pointer of type atomic_t
186 *
187 * Atomically decrements @v by 1.
188 */
189 #define atomic_dec(v) atomic_sub(1,(v))
190
191 /**
192 * atomic_dec_return - decrement atomic variable
193 * @v: pointer of type atomic_t
194 *
195 * Atomically decrements @v by 1.
196 */
197 #define atomic_dec_return(v) atomic_sub_return(1,(v))
198
199 /**
200 * atomic_dec_and_test - decrement and test
201 * @v: pointer of type atomic_t
202 *
203 * Atomically decrements @v by 1 and
204 * returns true if the result is 0, or false for all other
205 * cases.
206 */
207 #define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
208
209 /**
210 * atomic_inc_and_test - increment and test
211 * @v: pointer of type atomic_t
212 *
213 * Atomically increments @v by 1
214 * and returns true if the result is zero, or false for all
215 * other cases.
216 */
217 #define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
218
219 /**
220 * atomic_add_negative - add and test if negative
221 * @v: pointer of type atomic_t
222 * @i: integer value to add
223 *
224 * Atomically adds @i to @v and returns true
225 * if the result is negative, or false when
226 * result is greater than or equal to zero.
227 */
228 #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
229
230 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
231 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232
233 /**
234 * __atomic_add_unless - add unless the number is a given value
235 * @v: pointer of type atomic_t
236 * @a: the amount to add to v...
237 * @u: ...unless v is equal to u.
238 *
239 * Atomically adds @a to @v, so long as it was not @u.
240 * Returns the old value of @v.
241 */
242 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
243 {
244 int c, old;
245 c = atomic_read(v);
246 for (;;) {
247 if (unlikely(c == (u)))
248 break;
249 old = atomic_cmpxchg((v), c, c + (a));
250 if (likely(old == c))
251 break;
252 c = old;
253 }
254 return c;
255 }
256
257 #endif /* __KERNEL__ */
258
259 #endif /* _XTENSA_ATOMIC_H */
This page took 0.036813 seconds and 5 git commands to generate.