arm64: cmpxchg: patch in lse instructions when supported by the CPU
[deliverable/linux.git] / arch / arm64 / include / asm / atomic_lse.h
CommitLineData
c0385b24
WD
1/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __ASM_ATOMIC_LSE_H
22#define __ASM_ATOMIC_LSE_H
23
24#ifndef __ARM64_IN_ATOMIC_IMPL
25#error "please don't include this file directly"
26#endif
27
c09d6a04
WD
28#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
29
30static inline void atomic_andnot(int i, atomic_t *v)
31{
32 register int w0 asm ("w0") = i;
33 register atomic_t *x1 asm ("x1") = v;
34
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
36 " stclr %w[i], %[v]\n")
37 : [i] "+r" (w0), [v] "+Q" (v->counter)
38 : "r" (x1)
39 : "x30");
40}
41
42static inline void atomic_or(int i, atomic_t *v)
43{
44 register int w0 asm ("w0") = i;
45 register atomic_t *x1 asm ("x1") = v;
46
47 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
48 " stset %w[i], %[v]\n")
49 : [i] "+r" (w0), [v] "+Q" (v->counter)
50 : "r" (x1)
51 : "x30");
52}
53
54static inline void atomic_xor(int i, atomic_t *v)
55{
56 register int w0 asm ("w0") = i;
57 register atomic_t *x1 asm ("x1") = v;
58
59 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
60 " steor %w[i], %[v]\n")
61 : [i] "+r" (w0), [v] "+Q" (v->counter)
62 : "r" (x1)
63 : "x30");
64}
65
66static inline void atomic_add(int i, atomic_t *v)
67{
68 register int w0 asm ("w0") = i;
69 register atomic_t *x1 asm ("x1") = v;
70
71 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
72 " stadd %w[i], %[v]\n")
73 : [i] "+r" (w0), [v] "+Q" (v->counter)
74 : "r" (x1)
75 : "x30");
76}
77
78static inline int atomic_add_return(int i, atomic_t *v)
79{
80 register int w0 asm ("w0") = i;
81 register atomic_t *x1 asm ("x1") = v;
82
83 asm volatile(ARM64_LSE_ATOMIC_INSN(
84 /* LL/SC */
85 " nop\n"
86 __LL_SC_ATOMIC(add_return),
87 /* LSE atomics */
88 " ldaddal %w[i], w30, %[v]\n"
89 " add %w[i], %w[i], w30")
90 : [i] "+r" (w0), [v] "+Q" (v->counter)
91 : "r" (x1)
92 : "x30", "memory");
93
94 return w0;
95}
96
97static inline void atomic_and(int i, atomic_t *v)
98{
99 register int w0 asm ("w0") = i;
100 register atomic_t *x1 asm ("x1") = v;
101
102 asm volatile(ARM64_LSE_ATOMIC_INSN(
103 /* LL/SC */
104 " nop\n"
105 __LL_SC_ATOMIC(and),
106 /* LSE atomics */
107 " mvn %w[i], %w[i]\n"
108 " stclr %w[i], %[v]")
109 : [i] "+r" (w0), [v] "+Q" (v->counter)
110 : "r" (x1)
111 : "x30");
112}
113
114static inline void atomic_sub(int i, atomic_t *v)
115{
116 register int w0 asm ("w0") = i;
117 register atomic_t *x1 asm ("x1") = v;
118
119 asm volatile(ARM64_LSE_ATOMIC_INSN(
120 /* LL/SC */
121 " nop\n"
122 __LL_SC_ATOMIC(sub),
123 /* LSE atomics */
124 " neg %w[i], %w[i]\n"
125 " stadd %w[i], %[v]")
126 : [i] "+r" (w0), [v] "+Q" (v->counter)
127 : "r" (x1)
128 : "x30");
129}
130
131static inline int atomic_sub_return(int i, atomic_t *v)
132{
133 register int w0 asm ("w0") = i;
134 register atomic_t *x1 asm ("x1") = v;
135
136 asm volatile(ARM64_LSE_ATOMIC_INSN(
137 /* LL/SC */
138 " nop\n"
139 __LL_SC_ATOMIC(sub_return)
140 " nop",
141 /* LSE atomics */
142 " neg %w[i], %w[i]\n"
143 " ldaddal %w[i], w30, %[v]\n"
144 " add %w[i], %w[i], w30")
145 : [i] "+r" (w0), [v] "+Q" (v->counter)
146 : "r" (x1)
147 : "x30", "memory");
148
149 return w0;
150}
c0385b24
WD
151
152static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
153{
154 register unsigned long x0 asm ("x0") = (unsigned long)ptr;
155 register int w1 asm ("w1") = old;
156 register int w2 asm ("w2") = new;
157
c09d6a04
WD
158 asm volatile(ARM64_LSE_ATOMIC_INSN(
159 /* LL/SC */
160 " nop\n"
161 __LL_SC_ATOMIC(cmpxchg)
162 " nop",
163 /* LSE atomics */
164 " mov w30, %w[old]\n"
165 " casal w30, %w[new], %[v]\n"
166 " mov %w[ret], w30")
167 : [ret] "+r" (x0), [v] "+Q" (ptr->counter)
168 : [old] "r" (w1), [new] "r" (w2)
c0385b24
WD
169 : "x30", "cc", "memory");
170
171 return x0;
172}
173
c09d6a04 174#undef __LL_SC_ATOMIC
c0385b24 175
c09d6a04
WD
176#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
177
178static inline void atomic64_andnot(long i, atomic64_t *v)
179{
180 register long x0 asm ("x0") = i;
181 register atomic64_t *x1 asm ("x1") = v;
182
183 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
184 " stclr %[i], %[v]\n")
185 : [i] "+r" (x0), [v] "+Q" (v->counter)
186 : "r" (x1)
187 : "x30");
188}
189
190static inline void atomic64_or(long i, atomic64_t *v)
191{
192 register long x0 asm ("x0") = i;
193 register atomic64_t *x1 asm ("x1") = v;
194
195 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
196 " stset %[i], %[v]\n")
197 : [i] "+r" (x0), [v] "+Q" (v->counter)
198 : "r" (x1)
199 : "x30");
200}
201
202static inline void atomic64_xor(long i, atomic64_t *v)
203{
204 register long x0 asm ("x0") = i;
205 register atomic64_t *x1 asm ("x1") = v;
206
207 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
208 " steor %[i], %[v]\n")
209 : [i] "+r" (x0), [v] "+Q" (v->counter)
210 : "r" (x1)
211 : "x30");
212}
213
214static inline void atomic64_add(long i, atomic64_t *v)
215{
216 register long x0 asm ("x0") = i;
217 register atomic64_t *x1 asm ("x1") = v;
218
219 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
220 " stadd %[i], %[v]\n")
221 : [i] "+r" (x0), [v] "+Q" (v->counter)
222 : "r" (x1)
223 : "x30");
224}
225
226static inline long atomic64_add_return(long i, atomic64_t *v)
227{
228 register long x0 asm ("x0") = i;
229 register atomic64_t *x1 asm ("x1") = v;
230
231 asm volatile(ARM64_LSE_ATOMIC_INSN(
232 /* LL/SC */
233 " nop\n"
234 __LL_SC_ATOMIC64(add_return),
235 /* LSE atomics */
236 " ldaddal %[i], x30, %[v]\n"
237 " add %[i], %[i], x30")
238 : [i] "+r" (x0), [v] "+Q" (v->counter)
239 : "r" (x1)
240 : "x30", "memory");
241
242 return x0;
243}
244
245static inline void atomic64_and(long i, atomic64_t *v)
246{
247 register long x0 asm ("x0") = i;
248 register atomic64_t *x1 asm ("x1") = v;
249
250 asm volatile(ARM64_LSE_ATOMIC_INSN(
251 /* LL/SC */
252 " nop\n"
253 __LL_SC_ATOMIC64(and),
254 /* LSE atomics */
255 " mvn %[i], %[i]\n"
256 " stclr %[i], %[v]")
257 : [i] "+r" (x0), [v] "+Q" (v->counter)
258 : "r" (x1)
259 : "x30");
260}
261
262static inline void atomic64_sub(long i, atomic64_t *v)
263{
264 register long x0 asm ("x0") = i;
265 register atomic64_t *x1 asm ("x1") = v;
266
267 asm volatile(ARM64_LSE_ATOMIC_INSN(
268 /* LL/SC */
269 " nop\n"
270 __LL_SC_ATOMIC64(sub),
271 /* LSE atomics */
272 " neg %[i], %[i]\n"
273 " stadd %[i], %[v]")
274 : [i] "+r" (x0), [v] "+Q" (v->counter)
275 : "r" (x1)
276 : "x30");
277}
278
279static inline long atomic64_sub_return(long i, atomic64_t *v)
280{
281 register long x0 asm ("x0") = i;
282 register atomic64_t *x1 asm ("x1") = v;
283
284 asm volatile(ARM64_LSE_ATOMIC_INSN(
285 /* LL/SC */
286 " nop\n"
287 __LL_SC_ATOMIC64(sub_return)
288 " nop",
289 /* LSE atomics */
290 " neg %[i], %[i]\n"
291 " ldaddal %[i], x30, %[v]\n"
292 " add %[i], %[i], x30")
293 : [i] "+r" (x0), [v] "+Q" (v->counter)
294 : "r" (x1)
295 : "x30", "memory");
296
297 return x0;
298}
c0385b24
WD
299static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
300{
301 register unsigned long x0 asm ("x0") = (unsigned long)ptr;
302 register long x1 asm ("x1") = old;
303 register long x2 asm ("x2") = new;
304
c09d6a04
WD
305 asm volatile(ARM64_LSE_ATOMIC_INSN(
306 /* LL/SC */
307 " nop\n"
308 __LL_SC_ATOMIC64(cmpxchg)
309 " nop",
310 /* LSE atomics */
311 " mov x30, %[old]\n"
312 " casal x30, %[new], %[v]\n"
313 " mov %[ret], x30")
314 : [ret] "+r" (x0), [v] "+Q" (ptr->counter)
315 : [old] "r" (x1), [new] "r" (x2)
c0385b24
WD
316 : "x30", "cc", "memory");
317
318 return x0;
319}
320
321static inline long atomic64_dec_if_positive(atomic64_t *v)
322{
c09d6a04 323 register long x0 asm ("x0") = (long)v;
c0385b24 324
c09d6a04
WD
325 asm volatile(ARM64_LSE_ATOMIC_INSN(
326 /* LL/SC */
327 " nop\n"
328 __LL_SC_ATOMIC64(dec_if_positive)
329 " nop\n"
330 " nop\n"
331 " nop\n"
332 " nop\n"
333 " nop",
334 /* LSE atomics */
335 "1: ldr x30, %[v]\n"
336 " subs %[ret], x30, #1\n"
337 " b.mi 2f\n"
338 " casal x30, %[ret], %[v]\n"
339 " sub x30, x30, #1\n"
340 " sub x30, x30, %[ret]\n"
341 " cbnz x30, 1b\n"
342 "2:")
343 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
c0385b24
WD
344 :
345 : "x30", "cc", "memory");
346
347 return x0;
348}
349
c09d6a04
WD
350#undef __LL_SC_ATOMIC64
351
c342f782
WD
352#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
353
354#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
355static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
356 unsigned long old, \
357 unsigned long new) \
358{ \
359 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
360 register unsigned long x1 asm ("x1") = old; \
361 register unsigned long x2 asm ("x2") = new; \
362 \
363 asm volatile(ARM64_LSE_ATOMIC_INSN( \
364 /* LL/SC */ \
365 "nop\n" \
366 __LL_SC_CMPXCHG(name) \
367 "nop", \
368 /* LSE atomics */ \
369 " mov " #w "30, %" #w "[old]\n" \
370 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
371 " mov %" #w "[ret], " #w "30") \
372 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
373 : [old] "r" (x1), [new] "r" (x2) \
374 : "x30" , ##cl); \
375 \
376 return x0; \
377}
378
379__CMPXCHG_CASE(w, b, 1, )
380__CMPXCHG_CASE(w, h, 2, )
381__CMPXCHG_CASE(w, , 4, )
382__CMPXCHG_CASE(x, , 8, )
383__CMPXCHG_CASE(w, b, mb_1, al, "memory")
384__CMPXCHG_CASE(w, h, mb_2, al, "memory")
385__CMPXCHG_CASE(w, , mb_4, al, "memory")
386__CMPXCHG_CASE(x, , mb_8, al, "memory")
387
388#undef __LL_SC_CMPXCHG
389#undef __CMPXCHG_CASE
390
c0385b24 391#endif /* __ASM_ATOMIC_LSE_H */
This page took 0.043735 seconds and 5 git commands to generate.