Merge git://git.infradead.org/intel-iommu
[deliverable/linux.git] / arch / mips / include / asm / spinlock.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
f65e4fa8 6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
1da177e4
LT
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
2a31b033
RB
12#include <linux/compiler.h>
13
0004a9df 14#include <asm/barrier.h>
b0984c43 15#include <asm/compiler.h>
1da177e4
LT
16#include <asm/war.h>
17
18/*
19 * Your basic SMP spinlocks, allowing only a single CPU anywhere
2a31b033 20 *
70342287 21 * Simple spin lock operations. There are two variants, one clears IRQ's
2a31b033
RB
22 * on the local processor, one does not.
23 *
24 * These are fair FIFO ticket locks
25 *
26 * (the type definitions are in asm/spinlock_types.h)
1da177e4
LT
27 */
28
1da177e4
LT
29
30/*
2a31b033
RB
31 * Ticket locks are conceptually two parts, one indicating the current head of
32 * the queue, and the other indicating the current tail. The lock is acquired
33 * by atomically noting the tail and incrementing it by one (thus adding
34 * ourself to the queue and noting our position), then waiting until the head
35 * becomes equal to the the initial value of the tail.
1da177e4
LT
36 */
37
0199c4e6 38static inline int arch_spin_is_locked(arch_spinlock_t *lock)
2a31b033 39{
500c2e1f 40 u32 counters = ACCESS_ONCE(lock->lock);
2a31b033 41
500c2e1f 42 return ((counters >> 16) ^ counters) & 0xffff;
2a31b033
RB
43}
44
0199c4e6
TG
45#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
46#define arch_spin_unlock_wait(x) \
47 while (arch_spin_is_locked(x)) { cpu_relax(); }
2a31b033 48
0199c4e6 49static inline int arch_spin_is_contended(arch_spinlock_t *lock)
2a31b033 50{
500c2e1f 51 u32 counters = ACCESS_ONCE(lock->lock);
2a31b033 52
500c2e1f 53 return (((counters >> 16) - counters) & 0xffff) > 1;
2a31b033 54}
0199c4e6 55#define arch_spin_is_contended arch_spin_is_contended
2a31b033 56
0199c4e6 57static inline void arch_spin_lock(arch_spinlock_t *lock)
1da177e4 58{
2a31b033
RB
59 int my_ticket;
60 int tmp;
500c2e1f 61 int inc = 0x10000;
1da177e4
LT
62
63 if (R10000_LLSC_WAR) {
2a31b033 64 __asm__ __volatile__ (
0199c4e6 65 " .set push # arch_spin_lock \n"
2a31b033
RB
66 " .set noreorder \n"
67 " \n"
68 "1: ll %[ticket], %[ticket_ptr] \n"
500c2e1f 69 " addu %[my_ticket], %[ticket], %[inc] \n"
2a31b033
RB
70 " sc %[my_ticket], %[ticket_ptr] \n"
71 " beqzl %[my_ticket], 1b \n"
1da177e4 72 " nop \n"
500c2e1f
DD
73 " srl %[my_ticket], %[ticket], 16 \n"
74 " andi %[ticket], %[ticket], 0xffff \n"
2a31b033
RB
75 " bne %[ticket], %[my_ticket], 4f \n"
76 " subu %[ticket], %[my_ticket], %[ticket] \n"
77 "2: \n"
78 " .subsection 2 \n"
500c2e1f 79 "4: andi %[ticket], %[ticket], 0xffff \n"
0e6826c7 80 " sll %[ticket], 5 \n"
2a31b033
RB
81 " \n"
82 "6: bnez %[ticket], 6b \n"
83 " subu %[ticket], 1 \n"
84 " \n"
500c2e1f 85 " lhu %[ticket], %[serving_now_ptr] \n"
2a31b033
RB
86 " beq %[ticket], %[my_ticket], 2b \n"
87 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 88 " b 4b \n"
2a31b033
RB
89 " subu %[ticket], %[ticket], 1 \n"
90 " .previous \n"
91 " .set pop \n"
94bfb75a 92 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
500c2e1f 93 [serving_now_ptr] "+m" (lock->h.serving_now),
2a31b033 94 [ticket] "=&r" (tmp),
500c2e1f
DD
95 [my_ticket] "=&r" (my_ticket)
96 : [inc] "r" (inc));
1da177e4 97 } else {
2a31b033 98 __asm__ __volatile__ (
0199c4e6 99 " .set push # arch_spin_lock \n"
2a31b033
RB
100 " .set noreorder \n"
101 " \n"
500c2e1f
DD
102 "1: ll %[ticket], %[ticket_ptr] \n"
103 " addu %[my_ticket], %[ticket], %[inc] \n"
2a31b033 104 " sc %[my_ticket], %[ticket_ptr] \n"
500c2e1f
DD
105 " beqz %[my_ticket], 1b \n"
106 " srl %[my_ticket], %[ticket], 16 \n"
107 " andi %[ticket], %[ticket], 0xffff \n"
2a31b033
RB
108 " bne %[ticket], %[my_ticket], 4f \n"
109 " subu %[ticket], %[my_ticket], %[ticket] \n"
110 "2: \n"
f65e4fa8 111 " .subsection 2 \n"
2a31b033 112 "4: andi %[ticket], %[ticket], 0x1fff \n"
0e6826c7 113 " sll %[ticket], 5 \n"
2a31b033
RB
114 " \n"
115 "6: bnez %[ticket], 6b \n"
116 " subu %[ticket], 1 \n"
117 " \n"
500c2e1f 118 " lhu %[ticket], %[serving_now_ptr] \n"
2a31b033
RB
119 " beq %[ticket], %[my_ticket], 2b \n"
120 " subu %[ticket], %[my_ticket], %[ticket] \n"
0e6826c7 121 " b 4b \n"
2a31b033 122 " subu %[ticket], %[ticket], 1 \n"
f65e4fa8 123 " .previous \n"
2a31b033 124 " .set pop \n"
94bfb75a 125 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
500c2e1f 126 [serving_now_ptr] "+m" (lock->h.serving_now),
2a31b033 127 [ticket] "=&r" (tmp),
500c2e1f
DD
128 [my_ticket] "=&r" (my_ticket)
129 : [inc] "r" (inc));
1da177e4 130 }
0004a9df 131
17099b11 132 smp_llsc_mb();
1da177e4
LT
133}
134
0199c4e6 135static inline void arch_spin_unlock(arch_spinlock_t *lock)
1da177e4 136{
500c2e1f
DD
137 unsigned int serving_now = lock->h.serving_now + 1;
138 wmb();
139 lock->h.serving_now = (u16)serving_now;
140 nudge_writes();
1da177e4
LT
141}
142
0199c4e6 143static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
1da177e4 144{
2a31b033 145 int tmp, tmp2, tmp3;
500c2e1f 146 int inc = 0x10000;
1da177e4
LT
147
148 if (R10000_LLSC_WAR) {
2a31b033 149 __asm__ __volatile__ (
0199c4e6 150 " .set push # arch_spin_trylock \n"
2a31b033
RB
151 " .set noreorder \n"
152 " \n"
153 "1: ll %[ticket], %[ticket_ptr] \n"
500c2e1f 154 " srl %[my_ticket], %[ticket], 16 \n"
500c2e1f 155 " andi %[now_serving], %[ticket], 0xffff \n"
2a31b033 156 " bne %[my_ticket], %[now_serving], 3f \n"
500c2e1f 157 " addu %[ticket], %[ticket], %[inc] \n"
2a31b033
RB
158 " sc %[ticket], %[ticket_ptr] \n"
159 " beqzl %[ticket], 1b \n"
160 " li %[ticket], 1 \n"
161 "2: \n"
162 " .subsection 2 \n"
163 "3: b 2b \n"
164 " li %[ticket], 0 \n"
165 " .previous \n"
166 " .set pop \n"
94bfb75a 167 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
2a31b033
RB
168 [ticket] "=&r" (tmp),
169 [my_ticket] "=&r" (tmp2),
500c2e1f
DD
170 [now_serving] "=&r" (tmp3)
171 : [inc] "r" (inc));
1da177e4 172 } else {
2a31b033 173 __asm__ __volatile__ (
0199c4e6 174 " .set push # arch_spin_trylock \n"
2a31b033
RB
175 " .set noreorder \n"
176 " \n"
500c2e1f
DD
177 "1: ll %[ticket], %[ticket_ptr] \n"
178 " srl %[my_ticket], %[ticket], 16 \n"
500c2e1f 179 " andi %[now_serving], %[ticket], 0xffff \n"
2a31b033 180 " bne %[my_ticket], %[now_serving], 3f \n"
500c2e1f 181 " addu %[ticket], %[ticket], %[inc] \n"
2a31b033 182 " sc %[ticket], %[ticket_ptr] \n"
500c2e1f 183 " beqz %[ticket], 1b \n"
2a31b033
RB
184 " li %[ticket], 1 \n"
185 "2: \n"
f65e4fa8 186 " .subsection 2 \n"
2a31b033
RB
187 "3: b 2b \n"
188 " li %[ticket], 0 \n"
f65e4fa8 189 " .previous \n"
2a31b033 190 " .set pop \n"
94bfb75a 191 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
2a31b033
RB
192 [ticket] "=&r" (tmp),
193 [my_ticket] "=&r" (tmp2),
500c2e1f
DD
194 [now_serving] "=&r" (tmp3)
195 : [inc] "r" (inc));
1da177e4
LT
196 }
197
17099b11 198 smp_llsc_mb();
0004a9df 199
2a31b033 200 return tmp;
1da177e4
LT
201}
202
203/*
204 * Read-write spinlocks, allowing multiple readers but only one writer.
205 *
206 * NOTE! it is quite common to have readers in interrupts but no interrupt
207 * writers. For those circumstances we can "mix" irq-safe locks - any writer
208 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
209 * read-locks.
210 */
211
e3c48078
RB
212/*
213 * read_can_lock - would read_trylock() succeed?
214 * @lock: the rwlock in question.
215 */
e5931943 216#define arch_read_can_lock(rw) ((rw)->lock >= 0)
e3c48078
RB
217
218/*
219 * write_can_lock - would write_trylock() succeed?
220 * @lock: the rwlock in question.
221 */
70342287 222#define arch_write_can_lock(rw) (!(rw)->lock)
e3c48078 223
e5931943 224static inline void arch_read_lock(arch_rwlock_t *rw)
1da177e4
LT
225{
226 unsigned int tmp;
227
228 if (R10000_LLSC_WAR) {
229 __asm__ __volatile__(
e5931943 230 " .set noreorder # arch_read_lock \n"
1da177e4
LT
231 "1: ll %1, %2 \n"
232 " bltz %1, 1b \n"
233 " addu %1, 1 \n"
234 " sc %1, %0 \n"
235 " beqzl %1, 1b \n"
236 " nop \n"
1da177e4 237 " .set reorder \n"
94bfb75a
MC
238 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
239 : GCC_OFF_SMALL_ASM() (rw->lock)
1da177e4
LT
240 : "memory");
241 } else {
e01961ce
RB
242 do {
243 __asm__ __volatile__(
244 "1: ll %1, %2 # arch_read_lock \n"
245 " bltz %1, 1b \n"
246 " addu %1, 1 \n"
247 "2: sc %1, %0 \n"
94bfb75a
MC
248 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
249 : GCC_OFF_SMALL_ASM() (rw->lock)
e01961ce
RB
250 : "memory");
251 } while (unlikely(!tmp));
1da177e4 252 }
0004a9df 253
17099b11 254 smp_llsc_mb();
1da177e4
LT
255}
256
e5931943 257static inline void arch_read_unlock(arch_rwlock_t *rw)
1da177e4
LT
258{
259 unsigned int tmp;
260
f252ffd5 261 smp_mb__before_llsc();
0004a9df 262
1da177e4
LT
263 if (R10000_LLSC_WAR) {
264 __asm__ __volatile__(
e5931943 265 "1: ll %1, %2 # arch_read_unlock \n"
51822216 266 " addiu %1, -1 \n"
1da177e4
LT
267 " sc %1, %0 \n"
268 " beqzl %1, 1b \n"
94bfb75a
MC
269 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
270 : GCC_OFF_SMALL_ASM() (rw->lock)
1da177e4
LT
271 : "memory");
272 } else {
e01961ce
RB
273 do {
274 __asm__ __volatile__(
275 "1: ll %1, %2 # arch_read_unlock \n"
5753762c 276 " addiu %1, -1 \n"
e01961ce 277 " sc %1, %0 \n"
94bfb75a
MC
278 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
279 : GCC_OFF_SMALL_ASM() (rw->lock)
e01961ce
RB
280 : "memory");
281 } while (unlikely(!tmp));
1da177e4
LT
282 }
283}
284
e5931943 285static inline void arch_write_lock(arch_rwlock_t *rw)
1da177e4
LT
286{
287 unsigned int tmp;
288
289 if (R10000_LLSC_WAR) {
290 __asm__ __volatile__(
e5931943 291 " .set noreorder # arch_write_lock \n"
1da177e4
LT
292 "1: ll %1, %2 \n"
293 " bnez %1, 1b \n"
294 " lui %1, 0x8000 \n"
295 " sc %1, %0 \n"
296 " beqzl %1, 1b \n"
0004a9df 297 " nop \n"
1da177e4 298 " .set reorder \n"
94bfb75a
MC
299 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
300 : GCC_OFF_SMALL_ASM() (rw->lock)
1da177e4
LT
301 : "memory");
302 } else {
e01961ce
RB
303 do {
304 __asm__ __volatile__(
305 "1: ll %1, %2 # arch_write_lock \n"
306 " bnez %1, 1b \n"
307 " lui %1, 0x8000 \n"
308 "2: sc %1, %0 \n"
94bfb75a
MC
309 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
310 : GCC_OFF_SMALL_ASM() (rw->lock)
e01961ce
RB
311 : "memory");
312 } while (unlikely(!tmp));
1da177e4 313 }
0004a9df 314
17099b11 315 smp_llsc_mb();
1da177e4
LT
316}
317
e5931943 318static inline void arch_write_unlock(arch_rwlock_t *rw)
1da177e4 319{
0004a9df
RB
320 smp_mb();
321
1da177e4 322 __asm__ __volatile__(
e5931943 323 " # arch_write_unlock \n"
1da177e4
LT
324 " sw $0, %0 \n"
325 : "=m" (rw->lock)
326 : "m" (rw->lock)
327 : "memory");
328}
329
e5931943 330static inline int arch_read_trylock(arch_rwlock_t *rw)
65316fd1
RB
331{
332 unsigned int tmp;
333 int ret;
334
335 if (R10000_LLSC_WAR) {
336 __asm__ __volatile__(
e5931943 337 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
338 " li %2, 0 \n"
339 "1: ll %1, %3 \n"
d52c2d5a 340 " bltz %1, 2f \n"
65316fd1
RB
341 " addu %1, 1 \n"
342 " sc %1, %0 \n"
65316fd1 343 " .set reorder \n"
0004a9df
RB
344 " beqzl %1, 1b \n"
345 " nop \n"
17099b11 346 __WEAK_LLSC_MB
65316fd1
RB
347 " li %2, 1 \n"
348 "2: \n"
94bfb75a
MC
349 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
350 : GCC_OFF_SMALL_ASM() (rw->lock)
65316fd1
RB
351 : "memory");
352 } else {
353 __asm__ __volatile__(
e5931943 354 " .set noreorder # arch_read_trylock \n"
65316fd1
RB
355 " li %2, 0 \n"
356 "1: ll %1, %3 \n"
d52c2d5a 357 " bltz %1, 2f \n"
65316fd1
RB
358 " addu %1, 1 \n"
359 " sc %1, %0 \n"
360 " beqz %1, 1b \n"
0004a9df 361 " nop \n"
65316fd1 362 " .set reorder \n"
17099b11 363 __WEAK_LLSC_MB
65316fd1
RB
364 " li %2, 1 \n"
365 "2: \n"
94bfb75a
MC
366 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
367 : GCC_OFF_SMALL_ASM() (rw->lock)
65316fd1
RB
368 : "memory");
369 }
370
371 return ret;
372}
1da177e4 373
e5931943 374static inline int arch_write_trylock(arch_rwlock_t *rw)
1da177e4
LT
375{
376 unsigned int tmp;
377 int ret;
378
379 if (R10000_LLSC_WAR) {
380 __asm__ __volatile__(
e5931943 381 " .set noreorder # arch_write_trylock \n"
1da177e4
LT
382 " li %2, 0 \n"
383 "1: ll %1, %3 \n"
384 " bnez %1, 2f \n"
385 " lui %1, 0x8000 \n"
386 " sc %1, %0 \n"
387 " beqzl %1, 1b \n"
0004a9df 388 " nop \n"
17099b11 389 __WEAK_LLSC_MB
1da177e4
LT
390 " li %2, 1 \n"
391 " .set reorder \n"
392 "2: \n"
94bfb75a
MC
393 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
394 : GCC_OFF_SMALL_ASM() (rw->lock)
1da177e4
LT
395 : "memory");
396 } else {
e01961ce
RB
397 do {
398 __asm__ __volatile__(
399 " ll %1, %3 # arch_write_trylock \n"
400 " li %2, 0 \n"
401 " bnez %1, 2f \n"
402 " lui %1, 0x8000 \n"
403 " sc %1, %0 \n"
404 " li %2, 1 \n"
405 "2: \n"
94bfb75a 406 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
b0984c43 407 "=&r" (ret)
94bfb75a 408 : GCC_OFF_SMALL_ASM() (rw->lock)
e01961ce
RB
409 : "memory");
410 } while (unlikely(!tmp));
411
412 smp_llsc_mb();
1da177e4
LT
413 }
414
415 return ret;
416}
417
e5931943
TG
418#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
419#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
65316fd1 420
0199c4e6
TG
421#define arch_spin_relax(lock) cpu_relax()
422#define arch_read_relax(lock) cpu_relax()
423#define arch_write_relax(lock) cpu_relax()
ef6edc97 424
1da177e4 425#endif /* _ASM_SPINLOCK_H */
This page took 0.778367 seconds and 5 git commands to generate.