55d740e634596363f6cbfbdeacd77113a00a486e
2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
28 #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
30 static inline void atomic_andnot(int i
, atomic_t
*v
)
32 register int w0
asm ("w0") = i
;
33 register atomic_t
*x1
asm ("x1") = v
;
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot
),
36 " stclr %w[i], %[v]\n")
37 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
42 static inline void atomic_or(int i
, atomic_t
*v
)
44 register int w0
asm ("w0") = i
;
45 register atomic_t
*x1
asm ("x1") = v
;
47 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
48 " stset %w[i], %[v]\n")
49 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
54 static inline void atomic_xor(int i
, atomic_t
*v
)
56 register int w0
asm ("w0") = i
;
57 register atomic_t
*x1
asm ("x1") = v
;
59 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
60 " steor %w[i], %[v]\n")
61 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
66 static inline void atomic_add(int i
, atomic_t
*v
)
68 register int w0
asm ("w0") = i
;
69 register atomic_t
*x1
asm ("x1") = v
;
71 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add
),
72 " stadd %w[i], %[v]\n")
73 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
78 static inline int atomic_add_return(int i
, atomic_t
*v
)
80 register int w0
asm ("w0") = i
;
81 register atomic_t
*x1
asm ("x1") = v
;
83 asm volatile(ARM64_LSE_ATOMIC_INSN(
86 __LL_SC_ATOMIC(add_return
),
88 " ldaddal %w[i], w30, %[v]\n"
89 " add %w[i], %w[i], w30")
90 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
97 static inline void atomic_and(int i
, atomic_t
*v
)
99 register int w0
asm ("w0") = i
;
100 register atomic_t
*x1
asm ("x1") = v
;
102 asm volatile(ARM64_LSE_ATOMIC_INSN(
107 " mvn %w[i], %w[i]\n"
108 " stclr %w[i], %[v]")
109 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
114 static inline void atomic_sub(int i
, atomic_t
*v
)
116 register int w0
asm ("w0") = i
;
117 register atomic_t
*x1
asm ("x1") = v
;
119 asm volatile(ARM64_LSE_ATOMIC_INSN(
124 " neg %w[i], %w[i]\n"
125 " stadd %w[i], %[v]")
126 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
131 static inline int atomic_sub_return(int i
, atomic_t
*v
)
133 register int w0
asm ("w0") = i
;
134 register atomic_t
*x1
asm ("x1") = v
;
136 asm volatile(ARM64_LSE_ATOMIC_INSN(
139 __LL_SC_ATOMIC(sub_return
)
142 " neg %w[i], %w[i]\n"
143 " ldaddal %w[i], w30, %[v]\n"
144 " add %w[i], %w[i], w30")
145 : [i
] "+r" (w0
), [v
] "+Q" (v
->counter
)
152 #undef __LL_SC_ATOMIC
154 #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
156 static inline void atomic64_andnot(long i
, atomic64_t
*v
)
158 register long x0
asm ("x0") = i
;
159 register atomic64_t
*x1
asm ("x1") = v
;
161 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot
),
162 " stclr %[i], %[v]\n")
163 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
168 static inline void atomic64_or(long i
, atomic64_t
*v
)
170 register long x0
asm ("x0") = i
;
171 register atomic64_t
*x1
asm ("x1") = v
;
173 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
174 " stset %[i], %[v]\n")
175 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
180 static inline void atomic64_xor(long i
, atomic64_t
*v
)
182 register long x0
asm ("x0") = i
;
183 register atomic64_t
*x1
asm ("x1") = v
;
185 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
186 " steor %[i], %[v]\n")
187 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
192 static inline void atomic64_add(long i
, atomic64_t
*v
)
194 register long x0
asm ("x0") = i
;
195 register atomic64_t
*x1
asm ("x1") = v
;
197 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add
),
198 " stadd %[i], %[v]\n")
199 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
204 static inline long atomic64_add_return(long i
, atomic64_t
*v
)
206 register long x0
asm ("x0") = i
;
207 register atomic64_t
*x1
asm ("x1") = v
;
209 asm volatile(ARM64_LSE_ATOMIC_INSN(
212 __LL_SC_ATOMIC64(add_return
),
214 " ldaddal %[i], x30, %[v]\n"
215 " add %[i], %[i], x30")
216 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
223 static inline void atomic64_and(long i
, atomic64_t
*v
)
225 register long x0
asm ("x0") = i
;
226 register atomic64_t
*x1
asm ("x1") = v
;
228 asm volatile(ARM64_LSE_ATOMIC_INSN(
231 __LL_SC_ATOMIC64(and),
235 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
240 static inline void atomic64_sub(long i
, atomic64_t
*v
)
242 register long x0
asm ("x0") = i
;
243 register atomic64_t
*x1
asm ("x1") = v
;
245 asm volatile(ARM64_LSE_ATOMIC_INSN(
248 __LL_SC_ATOMIC64(sub
),
252 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
257 static inline long atomic64_sub_return(long i
, atomic64_t
*v
)
259 register long x0
asm ("x0") = i
;
260 register atomic64_t
*x1
asm ("x1") = v
;
262 asm volatile(ARM64_LSE_ATOMIC_INSN(
265 __LL_SC_ATOMIC64(sub_return
)
269 " ldaddal %[i], x30, %[v]\n"
270 " add %[i], %[i], x30")
271 : [i
] "+r" (x0
), [v
] "+Q" (v
->counter
)
278 static inline long atomic64_dec_if_positive(atomic64_t
*v
)
280 register long x0
asm ("x0") = (long)v
;
282 asm volatile(ARM64_LSE_ATOMIC_INSN(
285 __LL_SC_ATOMIC64(dec_if_positive
)
293 " subs %[ret], x30, #1\n"
295 " casal x30, %[ret], %[v]\n"
296 " sub x30, x30, #1\n"
297 " sub x30, x30, %[ret]\n"
300 : [ret
] "+&r" (x0
), [v
] "+Q" (v
->counter
)
302 : "x30", "cc", "memory");
307 #undef __LL_SC_ATOMIC64
309 #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
311 #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
312 static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
316 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
317 register unsigned long x1 asm ("x1") = old; \
318 register unsigned long x2 asm ("x2") = new; \
320 asm volatile(ARM64_LSE_ATOMIC_INSN( \
323 __LL_SC_CMPXCHG(name) \
326 " mov " #w "30, %" #w "[old]\n" \
327 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
328 " mov %" #w "[ret], " #w "30") \
329 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
330 : [old] "r" (x1), [new] "r" (x2) \
336 __CMPXCHG_CASE(w
, b
, 1, )
337 __CMPXCHG_CASE(w
, h
, 2, )
338 __CMPXCHG_CASE(w
, , 4, )
339 __CMPXCHG_CASE(x
, , 8, )
340 __CMPXCHG_CASE(w
, b
, mb_1
, al
, "memory")
341 __CMPXCHG_CASE(w
, h
, mb_2
, al
, "memory")
342 __CMPXCHG_CASE(w
, , mb_4
, al
, "memory")
343 __CMPXCHG_CASE(x
, , mb_8
, al
, "memory")
345 #undef __LL_SC_CMPXCHG
346 #undef __CMPXCHG_CASE
348 #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
350 #define __CMPXCHG_DBL(name, mb, cl...) \
351 static inline int __cmpxchg_double##name(unsigned long old1, \
352 unsigned long old2, \
353 unsigned long new1, \
354 unsigned long new2, \
355 volatile void *ptr) \
357 unsigned long oldval1 = old1; \
358 unsigned long oldval2 = old2; \
359 register unsigned long x0 asm ("x0") = old1; \
360 register unsigned long x1 asm ("x1") = old2; \
361 register unsigned long x2 asm ("x2") = new1; \
362 register unsigned long x3 asm ("x3") = new2; \
363 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
365 asm volatile(ARM64_LSE_ATOMIC_INSN( \
370 __LL_SC_CMPXCHG_DBL(name), \
372 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
373 " eor %[old1], %[old1], %[oldval1]\n" \
374 " eor %[old2], %[old2], %[oldval2]\n" \
375 " orr %[old1], %[old1], %[old2]") \
376 : [old1] "+r" (x0), [old2] "+r" (x1), \
377 [v] "+Q" (*(unsigned long *)ptr) \
378 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
379 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
386 __CMPXCHG_DBL(_mb
, al
, "memory")
388 #undef __LL_SC_CMPXCHG_DBL
391 #endif /* __ASM_ATOMIC_LSE_H */
This page took 0.037663 seconds and 4 git commands to generate.