[ARM] 4365/1: Add AC97 clock to s3c2443 machine
[deliverable/linux.git] / include / asm-parisc / atomic.h
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7
8 #include <linux/types.h>
9 #include <asm/system.h>
10
11 /*
12 * Atomic operations that C can't guarantee us. Useful for
13 * resource counting etc..
14 *
15 * And probably incredibly slow on parisc. OTOH, we don't
16 * have to write any serious assembly. prumpf
17 */
18
19 #ifdef CONFIG_SMP
20 #include <asm/spinlock.h>
21 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
22
23 /* Use an array of spinlocks for our atomic_ts.
24 * Hash function to index into a different SPINLOCK.
25 * Since "a" is usually an address, use one spinlock per cacheline.
26 */
27 # define ATOMIC_HASH_SIZE 4
28 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29
30 extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31
32 /* Can't use raw_spin_lock_irq because of #include problems, so
33 * this is the substitute */
34 #define _atomic_spin_lock_irqsave(l,f) do { \
35 raw_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \
37 __raw_spin_lock(s); \
38 } while(0)
39
40 #define _atomic_spin_unlock_irqrestore(l,f) do { \
41 raw_spinlock_t *s = ATOMIC_HASH(l); \
42 __raw_spin_unlock(s); \
43 local_irq_restore(f); \
44 } while(0)
45
46
47 #else
48 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
50 #endif
51
52 /* This should get optimized out since it's never called.
53 ** Or get a link error if xchg is used "wrong".
54 */
55 extern void __xchg_called_with_bad_pointer(void);
56
57
58 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
59 extern unsigned long __xchg8(char, char *);
60 extern unsigned long __xchg32(int, int *);
61 #ifdef CONFIG_64BIT
62 extern unsigned long __xchg64(unsigned long, unsigned long *);
63 #endif
64
65 /* optimizer better get rid of switch since size is a constant */
66 static __inline__ unsigned long
67 __xchg(unsigned long x, __volatile__ void * ptr, int size)
68 {
69 switch(size) {
70 #ifdef CONFIG_64BIT
71 case 8: return __xchg64(x,(unsigned long *) ptr);
72 #endif
73 case 4: return __xchg32((int) x, (int *) ptr);
74 case 1: return __xchg8((char) x, (char *) ptr);
75 }
76 __xchg_called_with_bad_pointer();
77 return x;
78 }
79
80
81 /*
82 ** REVISIT - Abandoned use of LDCW in xchg() for now:
83 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
84 ** o and while we are at it, could CONFIG_64BIT code use LDCD too?
85 **
86 ** if (__builtin_constant_p(x) && (x == NULL))
87 ** if (((unsigned long)p & 0xf) == 0)
88 ** return __ldcw(p);
89 */
90 #define xchg(ptr,x) \
91 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
92
93
94 #define __HAVE_ARCH_CMPXCHG 1
95
96 /* bug catcher for when unsupported size is used - won't link */
97 extern void __cmpxchg_called_with_bad_pointer(void);
98
99 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
100 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
101 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
102
103 /* don't worry...optimizer will get rid of most of this */
104 static __inline__ unsigned long
105 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
106 {
107 switch(size) {
108 #ifdef CONFIG_64BIT
109 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
110 #endif
111 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
112 }
113 __cmpxchg_called_with_bad_pointer();
114 return old;
115 }
116
117 #define cmpxchg(ptr,o,n) \
118 ({ \
119 __typeof__(*(ptr)) _o_ = (o); \
120 __typeof__(*(ptr)) _n_ = (n); \
121 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
122 (unsigned long)_n_, sizeof(*(ptr))); \
123 })
124
125 /* Note that we need not lock read accesses - aligned word writes/reads
126 * are atomic, so a reader never sees unconsistent values.
127 *
128 * Cache-line alignment would conflict with, for example, linux/module.h
129 */
130
131 typedef struct { volatile int counter; } atomic_t;
132
133 /* It's possible to reduce all atomic operations to either
134 * __atomic_add_return, atomic_set and atomic_read (the latter
135 * is there only for consistency).
136 */
137
138 static __inline__ int __atomic_add_return(int i, atomic_t *v)
139 {
140 int ret;
141 unsigned long flags;
142 _atomic_spin_lock_irqsave(v, flags);
143
144 ret = (v->counter += i);
145
146 _atomic_spin_unlock_irqrestore(v, flags);
147 return ret;
148 }
149
150 static __inline__ void atomic_set(atomic_t *v, int i)
151 {
152 unsigned long flags;
153 _atomic_spin_lock_irqsave(v, flags);
154
155 v->counter = i;
156
157 _atomic_spin_unlock_irqrestore(v, flags);
158 }
159
160 static __inline__ int atomic_read(const atomic_t *v)
161 {
162 return v->counter;
163 }
164
165 /* exported interface */
166 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
167 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
168
169 /**
170 * atomic_add_unless - add unless the number is a given value
171 * @v: pointer of type atomic_t
172 * @a: the amount to add to v...
173 * @u: ...unless v is equal to u.
174 *
175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */
178 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
179 {
180 int c, old;
181 c = atomic_read(v);
182 for (;;) {
183 if (unlikely(c == (u)))
184 break;
185 old = atomic_cmpxchg((v), c, c + (a));
186 if (likely(old == c))
187 break;
188 c = old;
189 }
190 return c != (u);
191 }
192
193 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
194
195 #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
196 #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
197 #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
198 #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
199
200 #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v)))
201 #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v)))
202 #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
203 #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
204
205 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
206
207 /*
208 * atomic_inc_and_test - increment and test
209 * @v: pointer of type atomic_t
210 *
211 * Atomically increments @v by 1
212 * and returns true if the result is zero, or false for all
213 * other cases.
214 */
215 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
216
217 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
218
219 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
220
221 #define ATOMIC_INIT(i) ((atomic_t) { (i) })
222
223 #define smp_mb__before_atomic_dec() smp_mb()
224 #define smp_mb__after_atomic_dec() smp_mb()
225 #define smp_mb__before_atomic_inc() smp_mb()
226 #define smp_mb__after_atomic_inc() smp_mb()
227
228 #ifdef CONFIG_64BIT
229
230 typedef struct { volatile s64 counter; } atomic64_t;
231
232 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
233
234 static __inline__ int
235 __atomic64_add_return(s64 i, atomic64_t *v)
236 {
237 int ret;
238 unsigned long flags;
239 _atomic_spin_lock_irqsave(v, flags);
240
241 ret = (v->counter += i);
242
243 _atomic_spin_unlock_irqrestore(v, flags);
244 return ret;
245 }
246
247 static __inline__ void
248 atomic64_set(atomic64_t *v, s64 i)
249 {
250 unsigned long flags;
251 _atomic_spin_lock_irqsave(v, flags);
252
253 v->counter = i;
254
255 _atomic_spin_unlock_irqrestore(v, flags);
256 }
257
258 static __inline__ s64
259 atomic64_read(const atomic64_t *v)
260 {
261 return v->counter;
262 }
263
264 #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v))))
265 #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v))))
266 #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
267 #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
268
269 #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v)))
270 #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v)))
271 #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
272 #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
273
274 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
275
276 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
277 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
278 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
279
280 /* exported interface */
281 #define atomic64_cmpxchg(v, o, n) \
282 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
283 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
284
285 /**
286 * atomic64_add_unless - add unless the number is a given value
287 * @v: pointer of type atomic64_t
288 * @a: the amount to add to v...
289 * @u: ...unless v is equal to u.
290 *
291 * Atomically adds @a to @v, so long as it was not @u.
292 * Returns non-zero if @v was not @u, and zero otherwise.
293 */
294 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
295 {
296 long c, old;
297 c = atomic64_read(v);
298 for (;;) {
299 if (unlikely(c == (u)))
300 break;
301 old = atomic64_cmpxchg((v), c, c + (a));
302 if (likely(old == c))
303 break;
304 c = old;
305 }
306 return c != (u);
307 }
308
309 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
310
311 #endif /* CONFIG_64BIT */
312
313 #include <asm-generic/atomic.h>
314
315 #endif /* _ASM_PARISC_ATOMIC_H_ */
This page took 0.037106 seconds and 5 git commands to generate.