Commit | Line | Data |
---|---|---|
08e875c1 CM |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | #ifndef __ASM_SPINLOCK_H | |
17 | #define __ASM_SPINLOCK_H | |
18 | ||
81bb5c64 | 19 | #include <asm/lse.h> |
08e875c1 CM |
20 | #include <asm/spinlock_types.h> |
21 | #include <asm/processor.h> | |
22 | ||
23 | /* | |
24 | * Spinlock implementation. | |
25 | * | |
08e875c1 CM |
26 | * The memory barriers are implicit with the load-acquire and store-release |
27 | * instructions. | |
08e875c1 | 28 | */ |
d86b8da0 WD |
29 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
30 | { | |
31 | unsigned int tmp; | |
32 | arch_spinlock_t lockval; | |
c56bdcac | 33 | u32 owner; |
08e875c1 | 34 | |
38b850a7 WD |
35 | /* |
36 | * Ensure prior spin_lock operations to other locks have completed | |
37 | * on this CPU before we test whether "lock" is locked. | |
38 | */ | |
39 | smp_mb(); | |
c56bdcac | 40 | owner = READ_ONCE(lock->owner) << 16; |
38b850a7 | 41 | |
d86b8da0 WD |
42 | asm volatile( |
43 | " sevl\n" | |
44 | "1: wfe\n" | |
45 | "2: ldaxr %w0, %2\n" | |
c56bdcac | 46 | /* Is the lock free? */ |
d86b8da0 | 47 | " eor %w1, %w0, %w0, ror #16\n" |
c56bdcac WD |
48 | " cbz %w1, 3f\n" |
49 | /* Lock taken -- has there been a subsequent unlock->lock transition? */ | |
50 | " eor %w1, %w3, %w0, lsl #16\n" | |
51 | " cbz %w1, 1b\n" | |
52 | /* | |
53 | * The owner has been updated, so there was an unlock->lock | |
54 | * transition that we missed. That means we can rely on the | |
55 | * store-release of the unlock operation paired with the | |
56 | * load-acquire of the lock operation to publish any of our | |
57 | * previous stores to the new lock owner and therefore don't | |
58 | * need to bother with the writeback below. | |
59 | */ | |
60 | " b 4f\n" | |
61 | "3:\n" | |
62 | /* | |
63 | * Serialise against any concurrent lockers by writing back the | |
64 | * unlocked lock value | |
65 | */ | |
d86b8da0 WD |
66 | ARM64_LSE_ATOMIC_INSN( |
67 | /* LL/SC */ | |
68 | " stxr %w1, %w0, %2\n" | |
d86b8da0 | 69 | " nop\n" |
3a5facd0 WD |
70 | " nop\n", |
71 | /* LSE atomics */ | |
72 | " mov %w1, %w0\n" | |
73 | " cas %w0, %w0, %2\n" | |
74 | " eor %w1, %w1, %w0\n") | |
c56bdcac | 75 | /* Somebody else wrote to the lock, GOTO 10 and reload the value */ |
3a5facd0 | 76 | " cbnz %w1, 2b\n" |
c56bdcac | 77 | "4:" |
d86b8da0 | 78 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
c56bdcac | 79 | : "r" (owner) |
d86b8da0 WD |
80 | : "memory"); |
81 | } | |
08e875c1 CM |
82 | |
83 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | |
84 | ||
85 | static inline void arch_spin_lock(arch_spinlock_t *lock) | |
86 | { | |
87 | unsigned int tmp; | |
52ea2a56 | 88 | arch_spinlock_t lockval, newval; |
08e875c1 CM |
89 | |
90 | asm volatile( | |
52ea2a56 | 91 | /* Atomically increment the next ticket. */ |
81bb5c64 WD |
92 | ARM64_LSE_ATOMIC_INSN( |
93 | /* LL/SC */ | |
52ea2a56 WD |
94 | " prfm pstl1strm, %3\n" |
95 | "1: ldaxr %w0, %3\n" | |
96 | " add %w1, %w0, %w5\n" | |
97 | " stxr %w2, %w1, %3\n" | |
81bb5c64 WD |
98 | " cbnz %w2, 1b\n", |
99 | /* LSE atomics */ | |
100 | " mov %w2, %w5\n" | |
101 | " ldadda %w2, %w0, %3\n" | |
102 | " nop\n" | |
103 | " nop\n" | |
104 | " nop\n" | |
105 | ) | |
106 | ||
52ea2a56 WD |
107 | /* Did we get the lock? */ |
108 | " eor %w1, %w0, %w0, ror #16\n" | |
109 | " cbz %w1, 3f\n" | |
110 | /* | |
111 | * No: spin on the owner. Send a local event to avoid missing an | |
112 | * unlock before the exclusive load. | |
113 | */ | |
114 | " sevl\n" | |
115 | "2: wfe\n" | |
116 | " ldaxrh %w2, %4\n" | |
117 | " eor %w1, %w2, %w0, lsr #16\n" | |
118 | " cbnz %w1, 2b\n" | |
119 | /* We got the lock. Critical section starts here. */ | |
120 | "3:" | |
121 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) | |
122 | : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) | |
123 | : "memory"); | |
08e875c1 CM |
124 | } |
125 | ||
126 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | |
127 | { | |
128 | unsigned int tmp; | |
52ea2a56 | 129 | arch_spinlock_t lockval; |
08e875c1 | 130 | |
81bb5c64 WD |
131 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
132 | /* LL/SC */ | |
133 | " prfm pstl1strm, %2\n" | |
134 | "1: ldaxr %w0, %2\n" | |
135 | " eor %w1, %w0, %w0, ror #16\n" | |
136 | " cbnz %w1, 2f\n" | |
137 | " add %w0, %w0, %3\n" | |
138 | " stxr %w1, %w0, %2\n" | |
139 | " cbnz %w1, 1b\n" | |
140 | "2:", | |
141 | /* LSE atomics */ | |
142 | " ldr %w0, %2\n" | |
143 | " eor %w1, %w0, %w0, ror #16\n" | |
144 | " cbnz %w1, 1f\n" | |
145 | " add %w1, %w0, %3\n" | |
146 | " casa %w0, %w1, %2\n" | |
147 | " and %w1, %w1, #0xffff\n" | |
148 | " eor %w1, %w1, %w0, lsr #16\n" | |
149 | "1:") | |
52ea2a56 WD |
150 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
151 | : "I" (1 << TICKET_SHIFT) | |
152 | : "memory"); | |
08e875c1 CM |
153 | |
154 | return !tmp; | |
155 | } | |
156 | ||
157 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | |
158 | { | |
81bb5c64 WD |
159 | unsigned long tmp; |
160 | ||
161 | asm volatile(ARM64_LSE_ATOMIC_INSN( | |
162 | /* LL/SC */ | |
c1d7cd22 | 163 | " ldrh %w1, %0\n" |
81bb5c64 WD |
164 | " add %w1, %w1, #1\n" |
165 | " stlrh %w1, %0", | |
166 | /* LSE atomics */ | |
167 | " mov %w1, #1\n" | |
168 | " nop\n" | |
169 | " staddlh %w1, %0") | |
170 | : "=Q" (lock->owner), "=&r" (tmp) | |
171 | : | |
52ea2a56 WD |
172 | : "memory"); |
173 | } | |
174 | ||
5686b06c WD |
175 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
176 | { | |
177 | return lock.owner == lock.next; | |
178 | } | |
179 | ||
52ea2a56 WD |
180 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
181 | { | |
38b850a7 | 182 | smp_mb(); /* See arch_spin_unlock_wait */ |
af2e7aae | 183 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
52ea2a56 WD |
184 | } |
185 | ||
186 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |
187 | { | |
af2e7aae | 188 | arch_spinlock_t lockval = READ_ONCE(*lock); |
52ea2a56 | 189 | return (lockval.next - lockval.owner) > 1; |
08e875c1 | 190 | } |
52ea2a56 | 191 | #define arch_spin_is_contended arch_spin_is_contended |
08e875c1 CM |
192 | |
193 | /* | |
194 | * Write lock implementation. | |
195 | * | |
196 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is | |
197 | * exclusively held. | |
198 | * | |
199 | * The memory barriers are implicit with the load-acquire and store-release | |
200 | * instructions. | |
201 | */ | |
202 | ||
203 | static inline void arch_write_lock(arch_rwlock_t *rw) | |
204 | { | |
205 | unsigned int tmp; | |
206 | ||
81bb5c64 WD |
207 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
208 | /* LL/SC */ | |
08e875c1 CM |
209 | " sevl\n" |
210 | "1: wfe\n" | |
3a0310eb | 211 | "2: ldaxr %w0, %1\n" |
08e875c1 | 212 | " cbnz %w0, 1b\n" |
3a0310eb | 213 | " stxr %w0, %w2, %1\n" |
08e875c1 | 214 | " cbnz %w0, 2b\n" |
81bb5c64 WD |
215 | " nop", |
216 | /* LSE atomics */ | |
217 | "1: mov %w0, wzr\n" | |
218 | "2: casa %w0, %w2, %1\n" | |
219 | " cbz %w0, 3f\n" | |
220 | " ldxr %w0, %1\n" | |
221 | " cbz %w0, 2b\n" | |
222 | " wfe\n" | |
223 | " b 1b\n" | |
224 | "3:") | |
3a0310eb WD |
225 | : "=&r" (tmp), "+Q" (rw->lock) |
226 | : "r" (0x80000000) | |
95c41896 | 227 | : "memory"); |
08e875c1 CM |
228 | } |
229 | ||
230 | static inline int arch_write_trylock(arch_rwlock_t *rw) | |
231 | { | |
232 | unsigned int tmp; | |
233 | ||
81bb5c64 WD |
234 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
235 | /* LL/SC */ | |
9511ca19 WD |
236 | "1: ldaxr %w0, %1\n" |
237 | " cbnz %w0, 2f\n" | |
3a0310eb | 238 | " stxr %w0, %w2, %1\n" |
9511ca19 | 239 | " cbnz %w0, 1b\n" |
81bb5c64 WD |
240 | "2:", |
241 | /* LSE atomics */ | |
242 | " mov %w0, wzr\n" | |
243 | " casa %w0, %w2, %1\n" | |
244 | " nop\n" | |
245 | " nop") | |
3a0310eb WD |
246 | : "=&r" (tmp), "+Q" (rw->lock) |
247 | : "r" (0x80000000) | |
95c41896 | 248 | : "memory"); |
08e875c1 CM |
249 | |
250 | return !tmp; | |
251 | } | |
252 | ||
253 | static inline void arch_write_unlock(arch_rwlock_t *rw) | |
254 | { | |
81bb5c64 WD |
255 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
256 | " stlr wzr, %0", | |
257 | " swpl wzr, wzr, %0") | |
258 | : "=Q" (rw->lock) :: "memory"); | |
08e875c1 CM |
259 | } |
260 | ||
261 | /* write_can_lock - would write_trylock() succeed? */ | |
262 | #define arch_write_can_lock(x) ((x)->lock == 0) | |
263 | ||
264 | /* | |
265 | * Read lock implementation. | |
266 | * | |
267 | * It exclusively loads the lock value, increments it and stores the new value | |
268 | * back if positive and the CPU still exclusively owns the location. If the | |
269 | * value is negative, the lock is already held. | |
270 | * | |
271 | * During unlocking there may be multiple active read locks but no write lock. | |
272 | * | |
273 | * The memory barriers are implicit with the load-acquire and store-release | |
274 | * instructions. | |
81bb5c64 WD |
275 | * |
276 | * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC | |
277 | * and LSE implementations may exhibit different behaviour (although this | |
278 | * will have no effect on lockdep). | |
08e875c1 CM |
279 | */ |
280 | static inline void arch_read_lock(arch_rwlock_t *rw) | |
281 | { | |
282 | unsigned int tmp, tmp2; | |
283 | ||
284 | asm volatile( | |
285 | " sevl\n" | |
81bb5c64 WD |
286 | ARM64_LSE_ATOMIC_INSN( |
287 | /* LL/SC */ | |
08e875c1 | 288 | "1: wfe\n" |
3a0310eb | 289 | "2: ldaxr %w0, %2\n" |
08e875c1 CM |
290 | " add %w0, %w0, #1\n" |
291 | " tbnz %w0, #31, 1b\n" | |
3a0310eb | 292 | " stxr %w1, %w0, %2\n" |
81bb5c64 WD |
293 | " nop\n" |
294 | " cbnz %w1, 2b", | |
295 | /* LSE atomics */ | |
296 | "1: wfe\n" | |
297 | "2: ldxr %w0, %2\n" | |
298 | " adds %w1, %w0, #1\n" | |
299 | " tbnz %w1, #31, 1b\n" | |
300 | " casa %w0, %w1, %2\n" | |
301 | " sbc %w0, %w1, %w0\n" | |
302 | " cbnz %w0, 2b") | |
3a0310eb WD |
303 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
304 | : | |
81bb5c64 | 305 | : "cc", "memory"); |
08e875c1 CM |
306 | } |
307 | ||
308 | static inline void arch_read_unlock(arch_rwlock_t *rw) | |
309 | { | |
310 | unsigned int tmp, tmp2; | |
311 | ||
81bb5c64 WD |
312 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
313 | /* LL/SC */ | |
3a0310eb | 314 | "1: ldxr %w0, %2\n" |
08e875c1 | 315 | " sub %w0, %w0, #1\n" |
3a0310eb | 316 | " stlxr %w1, %w0, %2\n" |
81bb5c64 WD |
317 | " cbnz %w1, 1b", |
318 | /* LSE atomics */ | |
319 | " movn %w0, #0\n" | |
320 | " nop\n" | |
321 | " nop\n" | |
322 | " staddl %w0, %2") | |
3a0310eb WD |
323 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) |
324 | : | |
95c41896 | 325 | : "memory"); |
08e875c1 CM |
326 | } |
327 | ||
328 | static inline int arch_read_trylock(arch_rwlock_t *rw) | |
329 | { | |
81bb5c64 | 330 | unsigned int tmp, tmp2; |
08e875c1 | 331 | |
81bb5c64 WD |
332 | asm volatile(ARM64_LSE_ATOMIC_INSN( |
333 | /* LL/SC */ | |
334 | " mov %w1, #1\n" | |
9511ca19 | 335 | "1: ldaxr %w0, %2\n" |
08e875c1 | 336 | " add %w0, %w0, #1\n" |
9511ca19 | 337 | " tbnz %w0, #31, 2f\n" |
3a0310eb | 338 | " stxr %w1, %w0, %2\n" |
9511ca19 | 339 | " cbnz %w1, 1b\n" |
81bb5c64 WD |
340 | "2:", |
341 | /* LSE atomics */ | |
342 | " ldr %w0, %2\n" | |
343 | " adds %w1, %w0, #1\n" | |
344 | " tbnz %w1, #31, 1f\n" | |
345 | " casa %w0, %w1, %2\n" | |
346 | " sbc %w1, %w1, %w0\n" | |
347 | " nop\n" | |
348 | "1:") | |
349 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) | |
3a0310eb | 350 | : |
81bb5c64 | 351 | : "cc", "memory"); |
08e875c1 CM |
352 | |
353 | return !tmp2; | |
354 | } | |
355 | ||
356 | /* read_can_lock - would read_trylock() succeed? */ | |
357 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) | |
358 | ||
359 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | |
360 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
361 | ||
362 | #define arch_spin_relax(lock) cpu_relax() | |
363 | #define arch_read_relax(lock) cpu_relax() | |
364 | #define arch_write_relax(lock) cpu_relax() | |
365 | ||
366 | #endif /* __ASM_SPINLOCK_H */ |