locking/rwsem: Avoid deceiving lock spinners
[deliverable/linux.git] / kernel / locking / rwsem-xadd.c
CommitLineData
1da177e4
LT
1/* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
ce6711f3
AS
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
fe6e674c 7 * and Michel Lespinasse <walken@google.com>
4fc828e2
DB
8 *
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
1da177e4
LT
11 */
12#include <linux/rwsem.h>
13#include <linux/sched.h>
14#include <linux/init.h>
8bc3bcc9 15#include <linux/export.h>
4fc828e2 16#include <linux/sched/rt.h>
7a215f89 17#include <linux/osq_lock.h>
4fc828e2 18
7a215f89 19#include "rwsem.h"
1da177e4 20
3cf2f34e
TC
21/*
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
24 *
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
27 * (X*ACTIVE_BIAS)
28 *
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
31 *
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 *
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
45 * (ACTIVE_WRITE_BIAS)
46 *
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
49 * (WAITING_BIAS)
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
52 *
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
55 *
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
60 *
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
66 * steal the lock.
67 *
68 */
69
4ea2176d
IM
70/*
71 * Initialize an rwsem:
72 */
73void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
75{
76#ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /*
78 * Make sure we are not reinitializing a held semaphore:
79 */
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
4dfbb9d8 81 lockdep_init_map(&sem->dep_map, name, key, 0);
4ea2176d
IM
82#endif
83 sem->count = RWSEM_UNLOCKED_VALUE;
ddb6c9b5 84 raw_spin_lock_init(&sem->wait_lock);
4ea2176d 85 INIT_LIST_HEAD(&sem->wait_list);
5db6c6fe 86#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
4fc828e2 87 sem->owner = NULL;
4d9d951e 88 osq_lock_init(&sem->osq);
4fc828e2 89#endif
4ea2176d
IM
90}
91
92EXPORT_SYMBOL(__init_rwsem);
93
e2d57f78
ML
94enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
97};
98
1da177e4
LT
99struct rwsem_waiter {
100 struct list_head list;
101 struct task_struct *task;
e2d57f78 102 enum rwsem_waiter_type type;
1da177e4
LT
103};
104
fe6e674c
ML
105enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
109};
70bdc6e0 110
1da177e4
LT
111/*
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
345af7bf 116 * - there must be someone on the queue
1da177e4
LT
117 * - the spinlock must be held by the caller
118 * - woken process blocks are discarded from the list after having task zeroed
119 * - writers are only woken if downgrading is false
120 */
70bdc6e0 121static struct rw_semaphore *
fe6e674c 122__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
1da177e4
LT
123{
124 struct rwsem_waiter *waiter;
125 struct task_struct *tsk;
126 struct list_head *next;
b5f54181 127 long oldcount, woken, loop, adjustment;
1da177e4 128
345af7bf 129 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
8cf5322c 130 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
fe6e674c 131 if (wake_type == RWSEM_WAKE_ANY)
8cf5322c
ML
132 /* Wake writer at the front of the queue, but do not
133 * grant it the lock yet as we want other writers
134 * to be able to steal it. Readers, on the other hand,
135 * will block as they will notice the queued writer.
136 */
137 wake_up_process(waiter->task);
345af7bf 138 goto out;
8cf5322c 139 }
1da177e4 140
fe6e674c
ML
141 /* Writers might steal the lock before we grant it to the next reader.
142 * We prefer to do the first reader grant before counting readers
143 * so we can bail out early if a writer stole the lock.
70bdc6e0 144 */
fe6e674c
ML
145 adjustment = 0;
146 if (wake_type != RWSEM_WAKE_READ_OWNED) {
147 adjustment = RWSEM_ACTIVE_READ_BIAS;
148 try_reader_grant:
149 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
150 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
151 /* A writer stole the lock. Undo our reader grant. */
152 if (rwsem_atomic_update(-adjustment, sem) &
153 RWSEM_ACTIVE_MASK)
154 goto out;
155 /* Last active locker left. Retry waking readers. */
156 goto try_reader_grant;
157 }
158 }
1da177e4 159
345af7bf
ML
160 /* Grant an infinite number of read locks to the readers at the front
161 * of the queue. Note we increment the 'active part' of the count by
162 * the number of readers before waking any processes up.
1da177e4 163 */
1da177e4
LT
164 woken = 0;
165 do {
166 woken++;
167
168 if (waiter->list.next == &sem->wait_list)
169 break;
170
171 waiter = list_entry(waiter->list.next,
172 struct rwsem_waiter, list);
173
e2d57f78 174 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
1da177e4 175
fe6e674c 176 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
e2d57f78 177 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
fd41b334
ML
178 /* hit end of list above */
179 adjustment -= RWSEM_WAITING_BIAS;
1da177e4 180
fe6e674c
ML
181 if (adjustment)
182 rwsem_atomic_add(adjustment, sem);
1da177e4
LT
183
184 next = sem->wait_list.next;
8cf5322c
ML
185 loop = woken;
186 do {
1da177e4
LT
187 waiter = list_entry(next, struct rwsem_waiter, list);
188 next = waiter->list.next;
189 tsk = waiter->task;
49e4b2bc
DB
190 /*
191 * Make sure we do not wakeup the next reader before
192 * setting the nil condition to grant the next reader;
193 * otherwise we could miss the wakeup on the other
194 * side and end up sleeping again. See the pairing
195 * in rwsem_down_read_failed().
196 */
d59dd462 197 smp_mb();
1da177e4
LT
198 waiter->task = NULL;
199 wake_up_process(tsk);
200 put_task_struct(tsk);
8cf5322c 201 } while (--loop);
1da177e4
LT
202
203 sem->wait_list.next = next;
204 next->prev = &sem->wait_list;
205
206 out:
1da177e4 207 return sem;
ce6711f3
AS
208}
209
1da177e4 210/*
4fc828e2 211 * Wait for the read lock to be granted
1da177e4 212 */
3ebae4f3 213__visible
1e78277c 214struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
1da177e4 215{
b5f54181 216 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
a8618a0e 217 struct rwsem_waiter waiter;
1da177e4 218 struct task_struct *tsk = current;
1da177e4 219
1da177e4 220 /* set up my own style of waitqueue */
a8618a0e 221 waiter.task = tsk;
da16922c 222 waiter.type = RWSEM_WAITING_FOR_READ;
1da177e4
LT
223 get_task_struct(tsk);
224
f7dd1cee 225 raw_spin_lock_irq(&sem->wait_lock);
fd41b334
ML
226 if (list_empty(&sem->wait_list))
227 adjustment += RWSEM_WAITING_BIAS;
a8618a0e 228 list_add_tail(&waiter.list, &sem->wait_list);
1da177e4 229
70bdc6e0 230 /* we're now waiting on the lock, but no longer actively locking */
1da177e4
LT
231 count = rwsem_atomic_update(adjustment, sem);
232
25c39325
ML
233 /* If there are no active locks, wake the front queued process(es).
234 *
235 * If there are no writers and we are first in the queue,
236 * wake our own waiter to join the existing active readers !
237 */
238 if (count == RWSEM_WAITING_BIAS ||
239 (count > RWSEM_WAITING_BIAS &&
240 adjustment != -RWSEM_ACTIVE_READ_BIAS))
fe6e674c 241 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
1da177e4 242
ddb6c9b5 243 raw_spin_unlock_irq(&sem->wait_lock);
1da177e4
LT
244
245 /* wait to be given the lock */
f7dd1cee
ML
246 while (true) {
247 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
a8618a0e 248 if (!waiter.task)
1da177e4
LT
249 break;
250 schedule();
1da177e4
LT
251 }
252
73105994 253 __set_task_state(tsk, TASK_RUNNING);
1da177e4
LT
254 return sem;
255}
db0e716a 256EXPORT_SYMBOL(rwsem_down_read_failed);
1da177e4 257
4fc828e2
DB
258static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
259{
debfab74
JL
260 /*
261 * Try acquiring the write lock. Check count first in order
262 * to reduce unnecessary expensive cmpxchg() operations.
263 */
264 if (count == RWSEM_WAITING_BIAS &&
265 cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
266 RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
267 if (!list_is_singular(&sem->wait_list))
268 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
7a215f89 269 rwsem_set_owner(sem);
debfab74 270 return true;
4fc828e2 271 }
debfab74 272
4fc828e2
DB
273 return false;
274}
275
5db6c6fe 276#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
1da177e4 277/*
4fc828e2
DB
278 * Try to acquire write lock before the writer has been put on wait queue.
279 */
280static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
281{
282 long old, count = ACCESS_ONCE(sem->count);
283
284 while (true) {
285 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
286 return false;
287
288 old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
7a215f89
DB
289 if (old == count) {
290 rwsem_set_owner(sem);
4fc828e2 291 return true;
7a215f89 292 }
4fc828e2
DB
293
294 count = old;
295 }
296}
297
298static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
299{
300 struct task_struct *owner;
37e95624 301 bool on_cpu = false;
4fc828e2
DB
302
303 if (need_resched())
37e95624 304 return false;
4fc828e2
DB
305
306 rcu_read_lock();
307 owner = ACCESS_ONCE(sem->owner);
308 if (owner)
309 on_cpu = owner->on_cpu;
310 rcu_read_unlock();
311
312 /*
37e95624
JL
313 * If sem->owner is not set, yet we have just recently entered the
314 * slowpath, then there is a possibility reader(s) may have the lock.
315 * To be safe, avoid spinning in these situations.
4fc828e2
DB
316 */
317 return on_cpu;
318}
319
320static inline bool owner_running(struct rw_semaphore *sem,
321 struct task_struct *owner)
322{
323 if (sem->owner != owner)
324 return false;
325
326 /*
327 * Ensure we emit the owner->on_cpu, dereference _after_ checking
328 * sem->owner still matches owner, if that fails, owner might
329 * point to free()d memory, if it still matches, the rcu_read_lock()
330 * ensures the memory stays valid.
331 */
332 barrier();
333
334 return owner->on_cpu;
335}
336
337static noinline
338bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
339{
b3fd4f03
DB
340 long count;
341
4fc828e2
DB
342 rcu_read_lock();
343 while (owner_running(sem, owner)) {
b3fd4f03
DB
344 /* abort spinning when need_resched */
345 if (need_resched()) {
346 rcu_read_unlock();
347 return false;
348 }
4fc828e2 349
3a6bfbc9 350 cpu_relax_lowlatency();
4fc828e2
DB
351 }
352 rcu_read_unlock();
353
b3fd4f03
DB
354 if (READ_ONCE(sem->owner))
355 return true; /* new owner, continue spinning */
356
4fc828e2 357 /*
b3fd4f03
DB
358 * When the owner is not set, the lock could be free or
359 * held by readers. Check the counter to verify the
360 * state.
4fc828e2 361 */
b3fd4f03
DB
362 count = READ_ONCE(sem->count);
363 return (count == 0 || count == RWSEM_WAITING_BIAS);
4fc828e2
DB
364}
365
366static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
367{
368 struct task_struct *owner;
369 bool taken = false;
370
371 preempt_disable();
372
373 /* sem->wait_lock should not be held when doing optimistic spinning */
374 if (!rwsem_can_spin_on_owner(sem))
375 goto done;
376
377 if (!osq_lock(&sem->osq))
378 goto done;
379
380 while (true) {
381 owner = ACCESS_ONCE(sem->owner);
382 if (owner && !rwsem_spin_on_owner(sem, owner))
383 break;
384
385 /* wait_lock will be acquired if write_lock is obtained */
386 if (rwsem_try_write_lock_unqueued(sem)) {
387 taken = true;
388 break;
389 }
390
391 /*
392 * When there's no owner, we might have preempted between the
393 * owner acquiring the lock and setting the owner field. If
394 * we're an RT task that will live-lock because we won't let
395 * the owner complete.
396 */
397 if (!owner && (need_resched() || rt_task(current)))
398 break;
399
400 /*
401 * The cpu_relax() call is a compiler barrier which forces
402 * everything in this loop to be re-loaded. We don't need
403 * memory barriers as we'll eventually observe the right
404 * values at the cost of a few extra spins.
405 */
3a6bfbc9 406 cpu_relax_lowlatency();
4fc828e2
DB
407 }
408 osq_unlock(&sem->osq);
409done:
410 preempt_enable();
411 return taken;
412}
413
414#else
415static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
416{
417 return false;
418}
419#endif
420
421/*
422 * Wait until we successfully acquire the write lock
1da177e4 423 */
3ebae4f3 424__visible
d1233754 425struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
1da177e4 426{
4fc828e2
DB
427 long count;
428 bool waiting = true; /* any queued threads before us */
1e78277c 429 struct rwsem_waiter waiter;
1e78277c 430
4fc828e2
DB
431 /* undo write bias from down_write operation, stop active locking */
432 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
433
434 /* do optimistic spinning and steal lock if possible */
435 if (rwsem_optimistic_spin(sem))
436 return sem;
437
438 /*
439 * Optimistic spinning failed, proceed to the slowpath
440 * and block until we can acquire the sem.
441 */
442 waiter.task = current;
023fe4f7 443 waiter.type = RWSEM_WAITING_FOR_WRITE;
1e78277c
ML
444
445 raw_spin_lock_irq(&sem->wait_lock);
4fc828e2
DB
446
447 /* account for this before adding a new element to the list */
1e78277c 448 if (list_empty(&sem->wait_list))
4fc828e2
DB
449 waiting = false;
450
1e78277c
ML
451 list_add_tail(&waiter.list, &sem->wait_list);
452
453 /* we're now waiting on the lock, but no longer actively locking */
4fc828e2
DB
454 if (waiting) {
455 count = ACCESS_ONCE(sem->count);
1e78277c 456
4fc828e2 457 /*
0cc3d011
AM
458 * If there were already threads queued before us and there are
459 * no active writers, the lock must be read owned; so we try to
460 * wake any read locks that were queued ahead of us.
4fc828e2
DB
461 */
462 if (count > RWSEM_WAITING_BIAS)
463 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
464
465 } else
466 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
1e78277c 467
023fe4f7 468 /* wait until we successfully acquire the lock */
4fc828e2 469 set_current_state(TASK_UNINTERRUPTIBLE);
1e78277c 470 while (true) {
4fc828e2
DB
471 if (rwsem_try_write_lock(count, sem))
472 break;
1e78277c 473 raw_spin_unlock_irq(&sem->wait_lock);
a7d2c573
ML
474
475 /* Block until there are no active lockers. */
476 do {
477 schedule();
4fc828e2 478 set_current_state(TASK_UNINTERRUPTIBLE);
9b0fc9c0 479 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
a7d2c573 480
023fe4f7 481 raw_spin_lock_irq(&sem->wait_lock);
1e78277c 482 }
4fc828e2 483 __set_current_state(TASK_RUNNING);
1e78277c 484
023fe4f7
ML
485 list_del(&waiter.list);
486 raw_spin_unlock_irq(&sem->wait_lock);
1e78277c
ML
487
488 return sem;
1da177e4 489}
db0e716a 490EXPORT_SYMBOL(rwsem_down_write_failed);
1da177e4
LT
491
492/*
493 * handle waking up a waiter on the semaphore
494 * - up_read/up_write has decremented the active part of count if we come here
495 */
3ebae4f3 496__visible
d1233754 497struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1da177e4
LT
498{
499 unsigned long flags;
500
ddb6c9b5 501 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1da177e4
LT
502
503 /* do nothing if list empty */
504 if (!list_empty(&sem->wait_list))
70bdc6e0 505 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
1da177e4 506
ddb6c9b5 507 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1da177e4 508
1da177e4
LT
509 return sem;
510}
db0e716a 511EXPORT_SYMBOL(rwsem_wake);
1da177e4
LT
512
513/*
514 * downgrade a write lock into a read lock
515 * - caller incremented waiting part of count and discovered it still negative
516 * - just wake up any readers at the front of the queue
517 */
3ebae4f3 518__visible
d1233754 519struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1da177e4
LT
520{
521 unsigned long flags;
522
ddb6c9b5 523 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1da177e4
LT
524
525 /* do nothing if list empty */
526 if (!list_empty(&sem->wait_list))
70bdc6e0 527 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
1da177e4 528
ddb6c9b5 529 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1da177e4 530
1da177e4
LT
531 return sem;
532}
1da177e4 533EXPORT_SYMBOL(rwsem_downgrade_wake);
This page took 2.888113 seconds and 5 git commands to generate.