ARC: refactor atomic inline asm operands with symbolic names
[deliverable/linux.git] / arch / arc / include / asm / atomic.h
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
11
12 #ifndef __ASSEMBLY__
13
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
18 #include <asm/smp.h>
19
20 #define atomic_read(v) ((v)->counter)
21
22 #ifdef CONFIG_ARC_HAS_LLSC
23
24 #define atomic_set(v, i) (((v)->counter) = (i))
25
26 #define ATOMIC_OP(op, c_op, asm_op) \
27 static inline void atomic_##op(int i, atomic_t *v) \
28 { \
29 unsigned int val; \
30 \
31 __asm__ __volatile__( \
32 "1: llock %[val], [%[ctr]] \n" \
33 " " #asm_op " %[val], %[val], %[i] \n" \
34 " scond %[val], [%[ctr]] \n" \
35 " bnz 1b \n" \
36 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
37 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
38 [i] "ir" (i) \
39 : "cc"); \
40 } \
41
42 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
43 static inline int atomic_##op##_return(int i, atomic_t *v) \
44 { \
45 unsigned int val; \
46 \
47 /* \
48 * Explicit full memory barrier needed before/after as \
49 * LLOCK/SCOND thmeselves don't provide any such semantics \
50 */ \
51 smp_mb(); \
52 \
53 __asm__ __volatile__( \
54 "1: llock %[val], [%[ctr]] \n" \
55 " " #asm_op " %[val], %[val], %[i] \n" \
56 " scond %[val], [%[ctr]] \n" \
57 " bnz 1b \n" \
58 : [val] "=&r" (val) \
59 : [ctr] "r" (&v->counter), \
60 [i] "ir" (i) \
61 : "cc"); \
62 \
63 smp_mb(); \
64 \
65 return val; \
66 }
67
68 #else /* !CONFIG_ARC_HAS_LLSC */
69
70 #ifndef CONFIG_SMP
71
72 /* violating atomic_xxx API locking protocol in UP for optimization sake */
73 #define atomic_set(v, i) (((v)->counter) = (i))
74
75 #else
76
77 static inline void atomic_set(atomic_t *v, int i)
78 {
79 /*
80 * Independent of hardware support, all of the atomic_xxx() APIs need
81 * to follow the same locking rules to make sure that a "hardware"
82 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
83 * sequence
84 *
85 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
86 * requires the locking.
87 */
88 unsigned long flags;
89
90 atomic_ops_lock(flags);
91 v->counter = i;
92 atomic_ops_unlock(flags);
93 }
94
95 #endif
96
97 /*
98 * Non hardware assisted Atomic-R-M-W
99 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
100 */
101
102 #define ATOMIC_OP(op, c_op, asm_op) \
103 static inline void atomic_##op(int i, atomic_t *v) \
104 { \
105 unsigned long flags; \
106 \
107 atomic_ops_lock(flags); \
108 v->counter c_op i; \
109 atomic_ops_unlock(flags); \
110 }
111
112 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
113 static inline int atomic_##op##_return(int i, atomic_t *v) \
114 { \
115 unsigned long flags; \
116 unsigned long temp; \
117 \
118 /* \
119 * spin lock/unlock provides the needed smp_mb() before/after \
120 */ \
121 atomic_ops_lock(flags); \
122 temp = v->counter; \
123 temp c_op i; \
124 v->counter = temp; \
125 atomic_ops_unlock(flags); \
126 \
127 return temp; \
128 }
129
130 #endif /* !CONFIG_ARC_HAS_LLSC */
131
132 #define ATOMIC_OPS(op, c_op, asm_op) \
133 ATOMIC_OP(op, c_op, asm_op) \
134 ATOMIC_OP_RETURN(op, c_op, asm_op)
135
136 ATOMIC_OPS(add, +=, add)
137 ATOMIC_OPS(sub, -=, sub)
138 ATOMIC_OP(and, &=, and)
139
140 #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
141
142 #undef ATOMIC_OPS
143 #undef ATOMIC_OP_RETURN
144 #undef ATOMIC_OP
145
146 /**
147 * __atomic_add_unless - add unless the number is a given value
148 * @v: pointer of type atomic_t
149 * @a: the amount to add to v...
150 * @u: ...unless v is equal to u.
151 *
152 * Atomically adds @a to @v, so long as it was not @u.
153 * Returns the old value of @v
154 */
155 #define __atomic_add_unless(v, a, u) \
156 ({ \
157 int c, old; \
158 \
159 /* \
160 * Explicit full memory barrier needed before/after as \
161 * LLOCK/SCOND thmeselves don't provide any such semantics \
162 */ \
163 smp_mb(); \
164 \
165 c = atomic_read(v); \
166 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
167 c = old; \
168 \
169 smp_mb(); \
170 \
171 c; \
172 })
173
174 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
175
176 #define atomic_inc(v) atomic_add(1, v)
177 #define atomic_dec(v) atomic_sub(1, v)
178
179 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
180 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
181 #define atomic_inc_return(v) atomic_add_return(1, (v))
182 #define atomic_dec_return(v) atomic_sub_return(1, (v))
183 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
184
185 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
186
187 #define ATOMIC_INIT(i) { (i) }
188
189 #include <asm-generic/atomic64.h>
190
191 #endif
192
193 #endif
This page took 0.037629 seconds and 5 git commands to generate.