sh: Fixup movli.l/movco.l atomic ops for gcc4.
[deliverable/linux.git] / include / asm-sh / atomic.h
1 #ifndef __ASM_SH_ATOMIC_H
2 #define __ASM_SH_ATOMIC_H
3
4 /*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 */
9
10 typedef struct { volatile int counter; } atomic_t;
11
12 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
13
14 #define atomic_read(v) ((v)->counter)
15 #define atomic_set(v,i) ((v)->counter = (i))
16
17 #include <linux/compiler.h>
18 #include <asm/system.h>
19
20 /*
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
24 */
25 static inline void atomic_add(int i, atomic_t *v)
26 {
27 #ifdef CONFIG_CPU_SH4A
28 unsigned long tmp;
29
30 __asm__ __volatile__ (
31 "1: movli.l @%2, %0 ! atomic_add \n"
32 " add %1, %0 \n"
33 " movco.l %0, @%2 \n"
34 " bf 1b \n"
35 : "=&z" (tmp)
36 : "r" (i), "r" (&v->counter)
37 : "t");
38 #else
39 unsigned long flags;
40
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
44 #endif
45 }
46
47 static inline void atomic_sub(int i, atomic_t *v)
48 {
49 #ifdef CONFIG_CPU_SH4A
50 unsigned long tmp;
51
52 __asm__ __volatile__ (
53 "1: movli.l @%2, %0 ! atomic_sub \n"
54 " sub %1, %0 \n"
55 " movco.l %0, @%2 \n"
56 " bf 1b \n"
57 : "=&z" (tmp)
58 : "r" (i), "r" (&v->counter)
59 : "t");
60 #else
61 unsigned long flags;
62
63 local_irq_save(flags);
64 *(long *)v -= i;
65 local_irq_restore(flags);
66 #endif
67 }
68
69 /*
70 * SH-4A note:
71 *
72 * We basically get atomic_xxx_return() for free compared with
73 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
74 * encoding, so the retval is automatically set without having to
75 * do any special work.
76 */
77 static inline int atomic_add_return(int i, atomic_t *v)
78 {
79 unsigned long temp;
80
81 #ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ (
83 "1: movli.l @%2, %0 ! atomic_add_return \n"
84 " add %1, %0 \n"
85 " movco.l %0, @%2 \n"
86 " bf 1b \n"
87 " synco \n"
88 : "=&z" (temp)
89 : "r" (i), "r" (&v->counter)
90 : "t");
91 #else
92 unsigned long flags;
93
94 local_irq_save(flags);
95 temp = *(long *)v;
96 temp += i;
97 *(long *)v = temp;
98 local_irq_restore(flags);
99 #endif
100
101 return temp;
102 }
103
104 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
105
106 static inline int atomic_sub_return(int i, atomic_t *v)
107 {
108 unsigned long temp;
109
110 #ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ (
112 "1: movli.l @%2, %0 ! atomic_sub_return \n"
113 " sub %1, %0 \n"
114 " movco.l %0, @%2 \n"
115 " bf 1b \n"
116 " synco \n"
117 : "=&z" (temp)
118 : "r" (i), "r" (&v->counter)
119 : "t");
120 #else
121 unsigned long flags;
122
123 local_irq_save(flags);
124 temp = *(long *)v;
125 temp -= i;
126 *(long *)v = temp;
127 local_irq_restore(flags);
128 #endif
129
130 return temp;
131 }
132
133 #define atomic_dec_return(v) atomic_sub_return(1,(v))
134 #define atomic_inc_return(v) atomic_add_return(1,(v))
135
136 /*
137 * atomic_inc_and_test - increment and test
138 * @v: pointer of type atomic_t
139 *
140 * Atomically increments @v by 1
141 * and returns true if the result is zero, or false for all
142 * other cases.
143 */
144 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
145
146 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
147 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
148
149 #define atomic_inc(v) atomic_add(1,(v))
150 #define atomic_dec(v) atomic_sub(1,(v))
151
152 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
153 {
154 int ret;
155 unsigned long flags;
156
157 local_irq_save(flags);
158 ret = v->counter;
159 if (likely(ret == old))
160 v->counter = new;
161 local_irq_restore(flags);
162
163 return ret;
164 }
165
166 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
167
168 static inline int atomic_add_unless(atomic_t *v, int a, int u)
169 {
170 int ret;
171 unsigned long flags;
172
173 local_irq_save(flags);
174 ret = v->counter;
175 if (ret != u)
176 v->counter += a;
177 local_irq_restore(flags);
178
179 return ret != u;
180 }
181 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
182
183 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
184 {
185 #ifdef CONFIG_CPU_SH4A
186 unsigned long tmp;
187
188 __asm__ __volatile__ (
189 "1: movli.l @%2, %0 ! atomic_clear_mask \n"
190 " and %1, %0 \n"
191 " movco.l %0, @%2 \n"
192 " bf 1b \n"
193 : "=&z" (tmp)
194 : "r" (~mask), "r" (&v->counter)
195 : "t");
196 #else
197 unsigned long flags;
198
199 local_irq_save(flags);
200 *(long *)v &= ~mask;
201 local_irq_restore(flags);
202 #endif
203 }
204
205 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
206 {
207 #ifdef CONFIG_CPU_SH4A
208 unsigned long tmp;
209
210 __asm__ __volatile__ (
211 "1: movli.l @%2, %0 ! atomic_set_mask \n"
212 " or %1, %0 \n"
213 " movco.l %0, @%2 \n"
214 " bf 1b \n"
215 : "=&z" (tmp)
216 : "r" (mask), "r" (&v->counter)
217 : "t");
218 #else
219 unsigned long flags;
220
221 local_irq_save(flags);
222 *(long *)v |= mask;
223 local_irq_restore(flags);
224 #endif
225 }
226
227 /* Atomic operations are already serializing on SH */
228 #define smp_mb__before_atomic_dec() barrier()
229 #define smp_mb__after_atomic_dec() barrier()
230 #define smp_mb__before_atomic_inc() barrier()
231 #define smp_mb__after_atomic_inc() barrier()
232
233 #include <asm-generic/atomic.h>
234 #endif /* __ASM_SH_ATOMIC_H */
This page took 0.036651 seconds and 5 git commands to generate.