arm64: cmpxchg: patch in lse instructions when supported by the CPU
[deliverable/linux.git] / arch / arm64 / include / asm / cmpxchg.h
CommitLineData
10b663ae
CM
1/*
2 * Based on arch/arm/include/asm/cmpxchg.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_CMPXCHG_H
19#define __ASM_CMPXCHG_H
20
21#include <linux/bug.h>
5284e1b4 22#include <linux/mmdebug.h>
10b663ae 23
c342f782 24#include <asm/atomic.h>
10b663ae 25#include <asm/barrier.h>
c8366ba0 26#include <asm/lse.h>
10b663ae
CM
27
28static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
29{
30 unsigned long ret, tmp;
31
32 switch (size) {
33 case 1:
c8366ba0
WD
34 asm volatile(ARM64_LSE_ATOMIC_INSN(
35 /* LL/SC */
8e86f0b4 36 "1: ldxrb %w0, %2\n"
3a0310eb 37 " stlxrb %w1, %w3, %2\n"
10b663ae 38 " cbnz %w1, 1b\n"
c8366ba0
WD
39 " dmb ish",
40 /* LSE atomics */
41 " nop\n"
42 " swpalb %w3, %w0, %2\n"
43 " nop\n"
44 " nop")
3a0310eb
WD
45 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
46 : "r" (x)
95c41896 47 : "memory");
10b663ae
CM
48 break;
49 case 2:
c8366ba0
WD
50 asm volatile(ARM64_LSE_ATOMIC_INSN(
51 /* LL/SC */
8e86f0b4 52 "1: ldxrh %w0, %2\n"
3a0310eb 53 " stlxrh %w1, %w3, %2\n"
10b663ae 54 " cbnz %w1, 1b\n"
c8366ba0
WD
55 " dmb ish",
56 /* LSE atomics */
57 " nop\n"
58 " swpalh %w3, %w0, %2\n"
59 " nop\n"
60 " nop")
3a0310eb
WD
61 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
62 : "r" (x)
95c41896 63 : "memory");
10b663ae
CM
64 break;
65 case 4:
c8366ba0
WD
66 asm volatile(ARM64_LSE_ATOMIC_INSN(
67 /* LL/SC */
8e86f0b4 68 "1: ldxr %w0, %2\n"
3a0310eb 69 " stlxr %w1, %w3, %2\n"
10b663ae 70 " cbnz %w1, 1b\n"
c8366ba0
WD
71 " dmb ish",
72 /* LSE atomics */
73 " nop\n"
74 " swpal %w3, %w0, %2\n"
75 " nop\n"
76 " nop")
3a0310eb
WD
77 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
78 : "r" (x)
95c41896 79 : "memory");
10b663ae
CM
80 break;
81 case 8:
c8366ba0
WD
82 asm volatile(ARM64_LSE_ATOMIC_INSN(
83 /* LL/SC */
8e86f0b4 84 "1: ldxr %0, %2\n"
3a0310eb 85 " stlxr %w1, %3, %2\n"
10b663ae 86 " cbnz %w1, 1b\n"
c8366ba0
WD
87 " dmb ish",
88 /* LSE atomics */
89 " nop\n"
90 " swpal %3, %0, %2\n"
91 " nop\n"
92 " nop")
3a0310eb
WD
93 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
94 : "r" (x)
95c41896 95 : "memory");
10b663ae
CM
96 break;
97 default:
98 BUILD_BUG();
99 }
100
101 return ret;
102}
103
104#define xchg(ptr,x) \
e1dfda9c
WD
105({ \
106 __typeof__(*(ptr)) __ret; \
107 __ret = (__typeof__(*(ptr))) \
108 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
109 __ret; \
110})
10b663ae
CM
111
112static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
113 unsigned long new, int size)
114{
10b663ae
CM
115 switch (size) {
116 case 1:
c342f782 117 return __cmpxchg_case_1(ptr, old, new);
10b663ae 118 case 2:
c342f782 119 return __cmpxchg_case_2(ptr, old, new);
10b663ae 120 case 4:
c342f782 121 return __cmpxchg_case_4(ptr, old, new);
10b663ae 122 case 8:
c342f782 123 return __cmpxchg_case_8(ptr, old, new);
10b663ae
CM
124 default:
125 BUILD_BUG();
126 }
127
c342f782 128 unreachable();
10b663ae
CM
129}
130
5284e1b4
SC
131#define system_has_cmpxchg_double() 1
132
133static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
134 unsigned long old1, unsigned long old2,
135 unsigned long new1, unsigned long new2, int size)
136{
137 unsigned long loop, lost;
138
139 switch (size) {
140 case 8:
141 VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
142 do {
143 asm volatile("// __cmpxchg_double8\n"
144 " ldxp %0, %1, %2\n"
145 " eor %0, %0, %3\n"
146 " eor %1, %1, %4\n"
147 " orr %1, %0, %1\n"
148 " mov %w0, #0\n"
149 " cbnz %1, 1f\n"
150 " stxp %w0, %5, %6, %2\n"
151 "1:\n"
152 : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
153 : "r" (old1), "r"(old2), "r"(new1), "r"(new2));
154 } while (loop);
155 break;
156 default:
157 BUILD_BUG();
158 }
159
160 return !lost;
161}
162
163static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
164 unsigned long old1, unsigned long old2,
165 unsigned long new1, unsigned long new2, int size)
166{
167 int ret;
168
169 smp_mb();
170 ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
171 smp_mb();
172
173 return ret;
174}
175
10b663ae
CM
176static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
177 unsigned long new, int size)
178{
c342f782
WD
179 switch (size) {
180 case 1:
181 return __cmpxchg_case_mb_1(ptr, old, new);
182 case 2:
183 return __cmpxchg_case_mb_2(ptr, old, new);
184 case 4:
185 return __cmpxchg_case_mb_4(ptr, old, new);
186 case 8:
187 return __cmpxchg_case_mb_8(ptr, old, new);
188 default:
189 BUILD_BUG();
190 }
10b663ae 191
c342f782 192 unreachable();
10b663ae
CM
193}
194
60010e50
MH
195#define cmpxchg(ptr, o, n) \
196({ \
197 __typeof__(*(ptr)) __ret; \
198 __ret = (__typeof__(*(ptr))) \
199 __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
200 sizeof(*(ptr))); \
201 __ret; \
202})
203
204#define cmpxchg_local(ptr, o, n) \
205({ \
206 __typeof__(*(ptr)) __ret; \
207 __ret = (__typeof__(*(ptr))) \
208 __cmpxchg((ptr), (unsigned long)(o), \
209 (unsigned long)(n), sizeof(*(ptr))); \
210 __ret; \
211})
10b663ae 212
5284e1b4
SC
213#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
214({\
215 int __ret;\
216 __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
217 (unsigned long)(o2), (unsigned long)(n1), \
218 (unsigned long)(n2), sizeof(*(ptr1)));\
219 __ret; \
220})
221
222#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
223({\
224 int __ret;\
225 __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
226 (unsigned long)(o2), (unsigned long)(n1), \
227 (unsigned long)(n2), sizeof(*(ptr1)));\
228 __ret; \
229})
230
f3eab718
SC
231#define _protect_cmpxchg_local(pcp, o, n) \
232({ \
233 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
234 preempt_disable(); \
235 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
236 preempt_enable(); \
237 __ret; \
238})
239
240#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
241#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
242#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
243#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
244
245#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
246({ \
247 int __ret; \
248 preempt_disable(); \
249 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
250 raw_cpu_ptr(&(ptr2)), \
251 o1, o2, n1, n2); \
252 preempt_enable(); \
253 __ret; \
254})
5284e1b4 255
a84b086b
CG
256#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
257#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
258
cf10b79a
WD
259#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
260
10b663ae 261#endif /* __ASM_CMPXCHG_H */
This page took 0.160215 seconds and 5 git commands to generate.