Commit | Line | Data |
---|---|---|
9f97da78 DH |
1 | #ifndef __ASM_ARM_CMPXCHG_H |
2 | #define __ASM_ARM_CMPXCHG_H | |
3 | ||
4 | #include <linux/irqflags.h> | |
c32ffce0 | 5 | #include <linux/prefetch.h> |
9f97da78 DH |
6 | #include <asm/barrier.h> |
7 | ||
8 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) | |
9 | /* | |
10 | * On the StrongARM, "swp" is terminally broken since it bypasses the | |
11 | * cache totally. This means that the cache becomes inconsistent, and, | |
12 | * since we use normal loads/stores as well, this is really bad. | |
13 | * Typically, this causes oopsen in filp_close, but could have other, | |
14 | * more disastrous effects. There are two work-arounds: | |
15 | * 1. Disable interrupts and emulate the atomic swap | |
16 | * 2. Clean the cache, perform atomic swap, flush the cache | |
17 | * | |
18 | * We choose (1) since its the "easiest" to achieve here and is not | |
19 | * dependent on the processor type. | |
20 | * | |
21 | * NOTE that this solution won't work on an SMP system, so explcitly | |
22 | * forbid it here. | |
23 | */ | |
24 | #define swp_is_buggy | |
25 | #endif | |
26 | ||
27 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
28 | { | |
29 | extern void __bad_xchg(volatile void *, int); | |
30 | unsigned long ret; | |
31 | #ifdef swp_is_buggy | |
32 | unsigned long flags; | |
33 | #endif | |
34 | #if __LINUX_ARM_ARCH__ >= 6 | |
35 | unsigned int tmp; | |
36 | #endif | |
37 | ||
38 | smp_mb(); | |
c32ffce0 | 39 | prefetchw((const void *)ptr); |
9f97da78 DH |
40 | |
41 | switch (size) { | |
42 | #if __LINUX_ARM_ARCH__ >= 6 | |
43 | case 1: | |
44 | asm volatile("@ __xchg1\n" | |
45 | "1: ldrexb %0, [%3]\n" | |
46 | " strexb %1, %2, [%3]\n" | |
47 | " teq %1, #0\n" | |
48 | " bne 1b" | |
49 | : "=&r" (ret), "=&r" (tmp) | |
50 | : "r" (x), "r" (ptr) | |
51 | : "memory", "cc"); | |
52 | break; | |
53 | case 4: | |
54 | asm volatile("@ __xchg4\n" | |
55 | "1: ldrex %0, [%3]\n" | |
56 | " strex %1, %2, [%3]\n" | |
57 | " teq %1, #0\n" | |
58 | " bne 1b" | |
59 | : "=&r" (ret), "=&r" (tmp) | |
60 | : "r" (x), "r" (ptr) | |
61 | : "memory", "cc"); | |
62 | break; | |
63 | #elif defined(swp_is_buggy) | |
64 | #ifdef CONFIG_SMP | |
65 | #error SMP is not supported on this platform | |
66 | #endif | |
67 | case 1: | |
68 | raw_local_irq_save(flags); | |
69 | ret = *(volatile unsigned char *)ptr; | |
70 | *(volatile unsigned char *)ptr = x; | |
71 | raw_local_irq_restore(flags); | |
72 | break; | |
73 | ||
74 | case 4: | |
75 | raw_local_irq_save(flags); | |
76 | ret = *(volatile unsigned long *)ptr; | |
77 | *(volatile unsigned long *)ptr = x; | |
78 | raw_local_irq_restore(flags); | |
79 | break; | |
80 | #else | |
81 | case 1: | |
82 | asm volatile("@ __xchg1\n" | |
83 | " swpb %0, %1, [%2]" | |
84 | : "=&r" (ret) | |
85 | : "r" (x), "r" (ptr) | |
86 | : "memory", "cc"); | |
87 | break; | |
88 | case 4: | |
89 | asm volatile("@ __xchg4\n" | |
90 | " swp %0, %1, [%2]" | |
91 | : "=&r" (ret) | |
92 | : "r" (x), "r" (ptr) | |
93 | : "memory", "cc"); | |
94 | break; | |
95 | #endif | |
96 | default: | |
97 | __bad_xchg(ptr, size), ret = 0; | |
98 | break; | |
99 | } | |
100 | smp_mb(); | |
101 | ||
102 | return ret; | |
103 | } | |
104 | ||
105 | #define xchg(ptr,x) \ | |
106 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
107 | ||
108 | #include <asm-generic/cmpxchg-local.h> | |
109 | ||
110 | #if __LINUX_ARM_ARCH__ < 6 | |
111 | /* min ARCH < ARMv6 */ | |
112 | ||
113 | #ifdef CONFIG_SMP | |
114 | #error "SMP is not supported on this platform" | |
115 | #endif | |
116 | ||
117 | /* | |
118 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
119 | * them available. | |
120 | */ | |
121 | #define cmpxchg_local(ptr, o, n) \ | |
122 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | |
123 | (unsigned long)(n), sizeof(*(ptr)))) | |
124 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
125 | ||
126 | #ifndef CONFIG_SMP | |
127 | #include <asm-generic/cmpxchg.h> | |
128 | #endif | |
129 | ||
130 | #else /* min ARCH >= ARMv6 */ | |
131 | ||
132 | extern void __bad_cmpxchg(volatile void *ptr, int size); | |
133 | ||
134 | /* | |
135 | * cmpxchg only support 32-bits operands on ARMv6. | |
136 | */ | |
137 | ||
138 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |
139 | unsigned long new, int size) | |
140 | { | |
141 | unsigned long oldval, res; | |
142 | ||
c32ffce0 WD |
143 | prefetchw((const void *)ptr); |
144 | ||
9f97da78 DH |
145 | switch (size) { |
146 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ | |
147 | case 1: | |
148 | do { | |
149 | asm volatile("@ __cmpxchg1\n" | |
150 | " ldrexb %1, [%2]\n" | |
151 | " mov %0, #0\n" | |
152 | " teq %1, %3\n" | |
153 | " strexbeq %0, %4, [%2]\n" | |
154 | : "=&r" (res), "=&r" (oldval) | |
155 | : "r" (ptr), "Ir" (old), "r" (new) | |
156 | : "memory", "cc"); | |
157 | } while (res); | |
158 | break; | |
159 | case 2: | |
160 | do { | |
161 | asm volatile("@ __cmpxchg1\n" | |
162 | " ldrexh %1, [%2]\n" | |
163 | " mov %0, #0\n" | |
164 | " teq %1, %3\n" | |
165 | " strexheq %0, %4, [%2]\n" | |
166 | : "=&r" (res), "=&r" (oldval) | |
167 | : "r" (ptr), "Ir" (old), "r" (new) | |
168 | : "memory", "cc"); | |
169 | } while (res); | |
170 | break; | |
171 | #endif | |
172 | case 4: | |
173 | do { | |
174 | asm volatile("@ __cmpxchg4\n" | |
175 | " ldrex %1, [%2]\n" | |
176 | " mov %0, #0\n" | |
177 | " teq %1, %3\n" | |
178 | " strexeq %0, %4, [%2]\n" | |
179 | : "=&r" (res), "=&r" (oldval) | |
180 | : "r" (ptr), "Ir" (old), "r" (new) | |
181 | : "memory", "cc"); | |
182 | } while (res); | |
183 | break; | |
184 | default: | |
185 | __bad_cmpxchg(ptr, size); | |
186 | oldval = 0; | |
187 | } | |
188 | ||
189 | return oldval; | |
190 | } | |
191 | ||
192 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |
193 | unsigned long new, int size) | |
194 | { | |
195 | unsigned long ret; | |
196 | ||
197 | smp_mb(); | |
198 | ret = __cmpxchg(ptr, old, new, size); | |
199 | smp_mb(); | |
200 | ||
201 | return ret; | |
202 | } | |
203 | ||
204 | #define cmpxchg(ptr,o,n) \ | |
205 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ | |
206 | (unsigned long)(o), \ | |
207 | (unsigned long)(n), \ | |
208 | sizeof(*(ptr)))) | |
209 | ||
210 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | |
211 | unsigned long old, | |
212 | unsigned long new, int size) | |
213 | { | |
214 | unsigned long ret; | |
215 | ||
216 | switch (size) { | |
217 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ | |
218 | case 1: | |
219 | case 2: | |
220 | ret = __cmpxchg_local_generic(ptr, old, new, size); | |
221 | break; | |
222 | #endif | |
223 | default: | |
224 | ret = __cmpxchg(ptr, old, new, size); | |
225 | } | |
226 | ||
227 | return ret; | |
228 | } | |
229 | ||
2523c67b WD |
230 | static inline unsigned long long __cmpxchg64(unsigned long long *ptr, |
231 | unsigned long long old, | |
232 | unsigned long long new) | |
233 | { | |
234 | unsigned long long oldval; | |
235 | unsigned long res; | |
236 | ||
c32ffce0 WD |
237 | prefetchw(ptr); |
238 | ||
2523c67b WD |
239 | __asm__ __volatile__( |
240 | "1: ldrexd %1, %H1, [%3]\n" | |
241 | " teq %1, %4\n" | |
242 | " teqeq %H1, %H4\n" | |
243 | " bne 2f\n" | |
244 | " strexd %0, %5, %H5, [%3]\n" | |
245 | " teq %0, #0\n" | |
246 | " bne 1b\n" | |
247 | "2:" | |
248 | : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) | |
249 | : "r" (ptr), "r" (old), "r" (new) | |
250 | : "cc"); | |
251 | ||
252 | return oldval; | |
253 | } | |
254 | ||
255 | static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, | |
256 | unsigned long long old, | |
257 | unsigned long long new) | |
258 | { | |
259 | unsigned long long ret; | |
260 | ||
261 | smp_mb(); | |
262 | ret = __cmpxchg64(ptr, old, new); | |
263 | smp_mb(); | |
264 | ||
265 | return ret; | |
266 | } | |
267 | ||
9f97da78 DH |
268 | #define cmpxchg_local(ptr,o,n) \ |
269 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | |
270 | (unsigned long)(o), \ | |
271 | (unsigned long)(n), \ | |
272 | sizeof(*(ptr)))) | |
273 | ||
3e0f5a15 | 274 | #define cmpxchg64(ptr, o, n) \ |
2523c67b WD |
275 | ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ |
276 | (unsigned long long)(o), \ | |
277 | (unsigned long long)(n))) | |
3e0f5a15 | 278 | |
775ebcc1 | 279 | #define cmpxchg64_relaxed(ptr, o, n) \ |
2523c67b WD |
280 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ |
281 | (unsigned long long)(o), \ | |
282 | (unsigned long long)(n))) | |
9f97da78 | 283 | |
775ebcc1 WD |
284 | #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) |
285 | ||
9f97da78 DH |
286 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
287 | ||
288 | #endif /* __ASM_ARM_CMPXCHG_H */ |