Commit | Line | Data |
---|---|---|
10b663ae CM |
1 | /* |
2 | * Based on arch/arm/include/asm/cmpxchg.h | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | #ifndef __ASM_CMPXCHG_H | |
19 | #define __ASM_CMPXCHG_H | |
20 | ||
21 | #include <linux/bug.h> | |
5284e1b4 | 22 | #include <linux/mmdebug.h> |
10b663ae | 23 | |
c342f782 | 24 | #include <asm/atomic.h> |
10b663ae | 25 | #include <asm/barrier.h> |
c8366ba0 | 26 | #include <asm/lse.h> |
10b663ae CM |
27 | |
28 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
29 | { | |
30 | unsigned long ret, tmp; | |
31 | ||
32 | switch (size) { | |
33 | case 1: | |
c8366ba0 WD |
34 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
35 | /* LL/SC */ | |
8e86f0b4 | 36 | "1: ldxrb %w0, %2\n" |
3a0310eb | 37 | " stlxrb %w1, %w3, %2\n" |
10b663ae | 38 | " cbnz %w1, 1b\n" |
c8366ba0 WD |
39 | " dmb ish", |
40 | /* LSE atomics */ | |
41 | " nop\n" | |
42 | " swpalb %w3, %w0, %2\n" | |
43 | " nop\n" | |
44 | " nop") | |
3a0310eb WD |
45 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) |
46 | : "r" (x) | |
95c41896 | 47 | : "memory"); |
10b663ae CM |
48 | break; |
49 | case 2: | |
c8366ba0 WD |
50 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
51 | /* LL/SC */ | |
8e86f0b4 | 52 | "1: ldxrh %w0, %2\n" |
3a0310eb | 53 | " stlxrh %w1, %w3, %2\n" |
10b663ae | 54 | " cbnz %w1, 1b\n" |
c8366ba0 WD |
55 | " dmb ish", |
56 | /* LSE atomics */ | |
57 | " nop\n" | |
58 | " swpalh %w3, %w0, %2\n" | |
59 | " nop\n" | |
60 | " nop") | |
3a0310eb WD |
61 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) |
62 | : "r" (x) | |
95c41896 | 63 | : "memory"); |
10b663ae CM |
64 | break; |
65 | case 4: | |
c8366ba0 WD |
66 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
67 | /* LL/SC */ | |
8e86f0b4 | 68 | "1: ldxr %w0, %2\n" |
3a0310eb | 69 | " stlxr %w1, %w3, %2\n" |
10b663ae | 70 | " cbnz %w1, 1b\n" |
c8366ba0 WD |
71 | " dmb ish", |
72 | /* LSE atomics */ | |
73 | " nop\n" | |
74 | " swpal %w3, %w0, %2\n" | |
75 | " nop\n" | |
76 | " nop") | |
3a0310eb WD |
77 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) |
78 | : "r" (x) | |
95c41896 | 79 | : "memory"); |
10b663ae CM |
80 | break; |
81 | case 8: | |
c8366ba0 WD |
82 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
83 | /* LL/SC */ | |
8e86f0b4 | 84 | "1: ldxr %0, %2\n" |
3a0310eb | 85 | " stlxr %w1, %3, %2\n" |
10b663ae | 86 | " cbnz %w1, 1b\n" |
c8366ba0 WD |
87 | " dmb ish", |
88 | /* LSE atomics */ | |
89 | " nop\n" | |
90 | " swpal %3, %0, %2\n" | |
91 | " nop\n" | |
92 | " nop") | |
3a0310eb WD |
93 | : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) |
94 | : "r" (x) | |
95c41896 | 95 | : "memory"); |
10b663ae CM |
96 | break; |
97 | default: | |
98 | BUILD_BUG(); | |
99 | } | |
100 | ||
101 | return ret; | |
102 | } | |
103 | ||
104 | #define xchg(ptr,x) \ | |
e1dfda9c WD |
105 | ({ \ |
106 | __typeof__(*(ptr)) __ret; \ | |
107 | __ret = (__typeof__(*(ptr))) \ | |
108 | __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ | |
109 | __ret; \ | |
110 | }) | |
10b663ae CM |
111 | |
112 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |
113 | unsigned long new, int size) | |
114 | { | |
10b663ae CM |
115 | switch (size) { |
116 | case 1: | |
c342f782 | 117 | return __cmpxchg_case_1(ptr, old, new); |
10b663ae | 118 | case 2: |
c342f782 | 119 | return __cmpxchg_case_2(ptr, old, new); |
10b663ae | 120 | case 4: |
c342f782 | 121 | return __cmpxchg_case_4(ptr, old, new); |
10b663ae | 122 | case 8: |
c342f782 | 123 | return __cmpxchg_case_8(ptr, old, new); |
10b663ae CM |
124 | default: |
125 | BUILD_BUG(); | |
126 | } | |
127 | ||
c342f782 | 128 | unreachable(); |
10b663ae CM |
129 | } |
130 | ||
131 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |
132 | unsigned long new, int size) | |
133 | { | |
c342f782 WD |
134 | switch (size) { |
135 | case 1: | |
136 | return __cmpxchg_case_mb_1(ptr, old, new); | |
137 | case 2: | |
138 | return __cmpxchg_case_mb_2(ptr, old, new); | |
139 | case 4: | |
140 | return __cmpxchg_case_mb_4(ptr, old, new); | |
141 | case 8: | |
142 | return __cmpxchg_case_mb_8(ptr, old, new); | |
143 | default: | |
144 | BUILD_BUG(); | |
145 | } | |
10b663ae | 146 | |
c342f782 | 147 | unreachable(); |
10b663ae CM |
148 | } |
149 | ||
60010e50 MH |
150 | #define cmpxchg(ptr, o, n) \ |
151 | ({ \ | |
152 | __typeof__(*(ptr)) __ret; \ | |
153 | __ret = (__typeof__(*(ptr))) \ | |
154 | __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \ | |
155 | sizeof(*(ptr))); \ | |
156 | __ret; \ | |
157 | }) | |
158 | ||
159 | #define cmpxchg_local(ptr, o, n) \ | |
160 | ({ \ | |
161 | __typeof__(*(ptr)) __ret; \ | |
162 | __ret = (__typeof__(*(ptr))) \ | |
163 | __cmpxchg((ptr), (unsigned long)(o), \ | |
164 | (unsigned long)(n), sizeof(*(ptr))); \ | |
165 | __ret; \ | |
166 | }) | |
10b663ae | 167 | |
e9a4b795 WD |
168 | #define system_has_cmpxchg_double() 1 |
169 | ||
170 | #define __cmpxchg_double_check(ptr1, ptr2) \ | |
171 | ({ \ | |
172 | if (sizeof(*(ptr1)) != 8) \ | |
173 | BUILD_BUG(); \ | |
174 | VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ | |
175 | }) | |
176 | ||
5284e1b4 SC |
177 | #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ |
178 | ({\ | |
179 | int __ret;\ | |
e9a4b795 WD |
180 | __cmpxchg_double_check(ptr1, ptr2); \ |
181 | __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ | |
182 | (unsigned long)(n1), (unsigned long)(n2), \ | |
183 | ptr1); \ | |
5284e1b4 SC |
184 | __ret; \ |
185 | }) | |
186 | ||
187 | #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ | |
188 | ({\ | |
189 | int __ret;\ | |
e9a4b795 WD |
190 | __cmpxchg_double_check(ptr1, ptr2); \ |
191 | __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ | |
192 | (unsigned long)(n1), (unsigned long)(n2), \ | |
193 | ptr1); \ | |
5284e1b4 SC |
194 | __ret; \ |
195 | }) | |
196 | ||
f3eab718 SC |
197 | #define _protect_cmpxchg_local(pcp, o, n) \ |
198 | ({ \ | |
199 | typeof(*raw_cpu_ptr(&(pcp))) __ret; \ | |
200 | preempt_disable(); \ | |
201 | __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ | |
202 | preempt_enable(); \ | |
203 | __ret; \ | |
204 | }) | |
205 | ||
206 | #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | |
207 | #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | |
208 | #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | |
209 | #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) | |
210 | ||
211 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ | |
212 | ({ \ | |
213 | int __ret; \ | |
214 | preempt_disable(); \ | |
215 | __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ | |
216 | raw_cpu_ptr(&(ptr2)), \ | |
217 | o1, o2, n1, n2); \ | |
218 | preempt_enable(); \ | |
219 | __ret; \ | |
220 | }) | |
5284e1b4 | 221 | |
a84b086b CG |
222 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) |
223 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | |
224 | ||
cf10b79a WD |
225 | #define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n)) |
226 | ||
10b663ae | 227 | #endif /* __ASM_CMPXCHG_H */ |