arm64: rwlocks: don't fail trylock purely due to contention
[deliverable/linux.git] / arch / arm64 / include / asm / spinlock.h
1 /*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
18
19 #include <asm/spinlock_types.h>
20 #include <asm/processor.h>
21
22 /*
23 * Spinlock implementation.
24 *
25 * The memory barriers are implicit with the load-acquire and store-release
26 * instructions.
27 */
28
29 #define arch_spin_unlock_wait(lock) \
30 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
31
32 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
33
34 static inline void arch_spin_lock(arch_spinlock_t *lock)
35 {
36 unsigned int tmp;
37 arch_spinlock_t lockval, newval;
38
39 asm volatile(
40 /* Atomically increment the next ticket. */
41 " prfm pstl1strm, %3\n"
42 "1: ldaxr %w0, %3\n"
43 " add %w1, %w0, %w5\n"
44 " stxr %w2, %w1, %3\n"
45 " cbnz %w2, 1b\n"
46 /* Did we get the lock? */
47 " eor %w1, %w0, %w0, ror #16\n"
48 " cbz %w1, 3f\n"
49 /*
50 * No: spin on the owner. Send a local event to avoid missing an
51 * unlock before the exclusive load.
52 */
53 " sevl\n"
54 "2: wfe\n"
55 " ldaxrh %w2, %4\n"
56 " eor %w1, %w2, %w0, lsr #16\n"
57 " cbnz %w1, 2b\n"
58 /* We got the lock. Critical section starts here. */
59 "3:"
60 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
61 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
62 : "memory");
63 }
64
65 static inline int arch_spin_trylock(arch_spinlock_t *lock)
66 {
67 unsigned int tmp;
68 arch_spinlock_t lockval;
69
70 asm volatile(
71 " prfm pstl1strm, %2\n"
72 "1: ldaxr %w0, %2\n"
73 " eor %w1, %w0, %w0, ror #16\n"
74 " cbnz %w1, 2f\n"
75 " add %w0, %w0, %3\n"
76 " stxr %w1, %w0, %2\n"
77 " cbnz %w1, 1b\n"
78 "2:"
79 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
80 : "I" (1 << TICKET_SHIFT)
81 : "memory");
82
83 return !tmp;
84 }
85
86 static inline void arch_spin_unlock(arch_spinlock_t *lock)
87 {
88 asm volatile(
89 " stlrh %w1, %0\n"
90 : "=Q" (lock->owner)
91 : "r" (lock->owner + 1)
92 : "memory");
93 }
94
95 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
96 {
97 return lock.owner == lock.next;
98 }
99
100 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
101 {
102 return !arch_spin_value_unlocked(READ_ONCE(*lock));
103 }
104
105 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
106 {
107 arch_spinlock_t lockval = READ_ONCE(*lock);
108 return (lockval.next - lockval.owner) > 1;
109 }
110 #define arch_spin_is_contended arch_spin_is_contended
111
112 /*
113 * Write lock implementation.
114 *
115 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
116 * exclusively held.
117 *
118 * The memory barriers are implicit with the load-acquire and store-release
119 * instructions.
120 */
121
122 static inline void arch_write_lock(arch_rwlock_t *rw)
123 {
124 unsigned int tmp;
125
126 asm volatile(
127 " sevl\n"
128 "1: wfe\n"
129 "2: ldaxr %w0, %1\n"
130 " cbnz %w0, 1b\n"
131 " stxr %w0, %w2, %1\n"
132 " cbnz %w0, 2b\n"
133 : "=&r" (tmp), "+Q" (rw->lock)
134 : "r" (0x80000000)
135 : "memory");
136 }
137
138 static inline int arch_write_trylock(arch_rwlock_t *rw)
139 {
140 unsigned int tmp;
141
142 asm volatile(
143 "1: ldaxr %w0, %1\n"
144 " cbnz %w0, 2f\n"
145 " stxr %w0, %w2, %1\n"
146 " cbnz %w0, 1b\n"
147 "2:\n"
148 : "=&r" (tmp), "+Q" (rw->lock)
149 : "r" (0x80000000)
150 : "memory");
151
152 return !tmp;
153 }
154
155 static inline void arch_write_unlock(arch_rwlock_t *rw)
156 {
157 asm volatile(
158 " stlr %w1, %0\n"
159 : "=Q" (rw->lock) : "r" (0) : "memory");
160 }
161
162 /* write_can_lock - would write_trylock() succeed? */
163 #define arch_write_can_lock(x) ((x)->lock == 0)
164
165 /*
166 * Read lock implementation.
167 *
168 * It exclusively loads the lock value, increments it and stores the new value
169 * back if positive and the CPU still exclusively owns the location. If the
170 * value is negative, the lock is already held.
171 *
172 * During unlocking there may be multiple active read locks but no write lock.
173 *
174 * The memory barriers are implicit with the load-acquire and store-release
175 * instructions.
176 */
177 static inline void arch_read_lock(arch_rwlock_t *rw)
178 {
179 unsigned int tmp, tmp2;
180
181 asm volatile(
182 " sevl\n"
183 "1: wfe\n"
184 "2: ldaxr %w0, %2\n"
185 " add %w0, %w0, #1\n"
186 " tbnz %w0, #31, 1b\n"
187 " stxr %w1, %w0, %2\n"
188 " cbnz %w1, 2b\n"
189 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
190 :
191 : "memory");
192 }
193
194 static inline void arch_read_unlock(arch_rwlock_t *rw)
195 {
196 unsigned int tmp, tmp2;
197
198 asm volatile(
199 "1: ldxr %w0, %2\n"
200 " sub %w0, %w0, #1\n"
201 " stlxr %w1, %w0, %2\n"
202 " cbnz %w1, 1b\n"
203 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
204 :
205 : "memory");
206 }
207
208 static inline int arch_read_trylock(arch_rwlock_t *rw)
209 {
210 unsigned int tmp, tmp2 = 1;
211
212 asm volatile(
213 "1: ldaxr %w0, %2\n"
214 " add %w0, %w0, #1\n"
215 " tbnz %w0, #31, 2f\n"
216 " stxr %w1, %w0, %2\n"
217 " cbnz %w1, 1b\n"
218 "2:\n"
219 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
220 :
221 : "memory");
222
223 return !tmp2;
224 }
225
226 /* read_can_lock - would read_trylock() succeed? */
227 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
228
229 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
230 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
231
232 #define arch_spin_relax(lock) cpu_relax()
233 #define arch_read_relax(lock) cpu_relax()
234 #define arch_write_relax(lock) cpu_relax()
235
236 #endif /* __ASM_SPINLOCK_H */
This page took 0.045178 seconds and 5 git commands to generate.