locking/rwsem: Rework zeroing reader waiter->task
[deliverable/linux.git] / kernel / locking / rwsem-xadd.c
CommitLineData
1da177e4
LT
1/* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
ce6711f3
AS
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
fe6e674c 7 * and Michel Lespinasse <walken@google.com>
4fc828e2
DB
8 *
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
1da177e4
LT
11 */
12#include <linux/rwsem.h>
13#include <linux/sched.h>
14#include <linux/init.h>
8bc3bcc9 15#include <linux/export.h>
4fc828e2 16#include <linux/sched/rt.h>
7a215f89 17#include <linux/osq_lock.h>
4fc828e2 18
7a215f89 19#include "rwsem.h"
1da177e4 20
3cf2f34e
TC
21/*
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
24 *
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
27 * (X*ACTIVE_BIAS)
28 *
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
31 *
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 *
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
45 * (ACTIVE_WRITE_BIAS)
46 *
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
49 * (WAITING_BIAS)
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
52 *
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
55 *
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
60 *
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
66 * steal the lock.
67 *
68 */
69
4ea2176d
IM
70/*
71 * Initialize an rwsem:
72 */
73void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
75{
76#ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /*
78 * Make sure we are not reinitializing a held semaphore:
79 */
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
4dfbb9d8 81 lockdep_init_map(&sem->dep_map, name, key, 0);
4ea2176d
IM
82#endif
83 sem->count = RWSEM_UNLOCKED_VALUE;
ddb6c9b5 84 raw_spin_lock_init(&sem->wait_lock);
4ea2176d 85 INIT_LIST_HEAD(&sem->wait_list);
5db6c6fe 86#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
4fc828e2 87 sem->owner = NULL;
4d9d951e 88 osq_lock_init(&sem->osq);
4fc828e2 89#endif
4ea2176d
IM
90}
91
92EXPORT_SYMBOL(__init_rwsem);
93
e2d57f78
ML
94enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
97};
98
1da177e4
LT
99struct rwsem_waiter {
100 struct list_head list;
101 struct task_struct *task;
e2d57f78 102 enum rwsem_waiter_type type;
1da177e4
LT
103};
104
fe6e674c
ML
105enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
109};
70bdc6e0 110
1da177e4
LT
111/*
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
345af7bf 116 * - there must be someone on the queue
133e89ef
DB
117 * - the wait_lock must be held by the caller
118 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
119 * to actually wakeup the blocked task(s) and drop the reference count,
120 * preferably when the wait_lock is released
1da177e4 121 * - woken process blocks are discarded from the list after having task zeroed
133e89ef 122 * - writers are only marked woken if downgrading is false
1da177e4 123 */
70bdc6e0 124static struct rw_semaphore *
133e89ef
DB
125__rwsem_mark_wake(struct rw_semaphore *sem,
126 enum rwsem_wake_type wake_type, struct wake_q_head *wake_q)
1da177e4
LT
127{
128 struct rwsem_waiter *waiter;
129 struct task_struct *tsk;
130 struct list_head *next;
b5f54181 131 long oldcount, woken, loop, adjustment;
1da177e4 132
345af7bf 133 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
8cf5322c 134 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
133e89ef
DB
135 if (wake_type == RWSEM_WAKE_ANY) {
136 /*
137 * Mark writer at the front of the queue for wakeup.
138 * Until the task is actually later awoken later by
139 * the caller, other writers are able to steal it.
140 * Readers, on the other hand, will block as they
141 * will notice the queued writer.
8cf5322c 142 */
133e89ef
DB
143 wake_q_add(wake_q, waiter->task);
144 }
345af7bf 145 goto out;
8cf5322c 146 }
1da177e4 147
fe6e674c
ML
148 /* Writers might steal the lock before we grant it to the next reader.
149 * We prefer to do the first reader grant before counting readers
150 * so we can bail out early if a writer stole the lock.
70bdc6e0 151 */
fe6e674c
ML
152 adjustment = 0;
153 if (wake_type != RWSEM_WAKE_READ_OWNED) {
154 adjustment = RWSEM_ACTIVE_READ_BIAS;
155 try_reader_grant:
156 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
157 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
158 /* A writer stole the lock. Undo our reader grant. */
159 if (rwsem_atomic_update(-adjustment, sem) &
160 RWSEM_ACTIVE_MASK)
161 goto out;
162 /* Last active locker left. Retry waking readers. */
163 goto try_reader_grant;
164 }
165 }
1da177e4 166
345af7bf
ML
167 /* Grant an infinite number of read locks to the readers at the front
168 * of the queue. Note we increment the 'active part' of the count by
169 * the number of readers before waking any processes up.
1da177e4 170 */
1da177e4
LT
171 woken = 0;
172 do {
173 woken++;
174
175 if (waiter->list.next == &sem->wait_list)
176 break;
177
178 waiter = list_entry(waiter->list.next,
179 struct rwsem_waiter, list);
180
e2d57f78 181 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
1da177e4 182
fe6e674c 183 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
e2d57f78 184 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
fd41b334
ML
185 /* hit end of list above */
186 adjustment -= RWSEM_WAITING_BIAS;
1da177e4 187
fe6e674c
ML
188 if (adjustment)
189 rwsem_atomic_add(adjustment, sem);
1da177e4
LT
190
191 next = sem->wait_list.next;
8cf5322c
ML
192 loop = woken;
193 do {
1da177e4
LT
194 waiter = list_entry(next, struct rwsem_waiter, list);
195 next = waiter->list.next;
196 tsk = waiter->task;
e3851390
DB
197
198 wake_q_add(wake_q, tsk);
49e4b2bc 199 /*
e3851390
DB
200 * Ensure that the last operation is setting the reader
201 * waiter to nil such that rwsem_down_read_failed() cannot
202 * race with do_exit() by always holding a reference count
203 * to the task to wakeup.
49e4b2bc 204 */
e3851390 205 smp_store_release(&waiter->task, NULL);
8cf5322c 206 } while (--loop);
1da177e4
LT
207
208 sem->wait_list.next = next;
209 next->prev = &sem->wait_list;
210
211 out:
1da177e4 212 return sem;
ce6711f3
AS
213}
214
1da177e4 215/*
4fc828e2 216 * Wait for the read lock to be granted
1da177e4 217 */
3ebae4f3 218__visible
1e78277c 219struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
1da177e4 220{
b5f54181 221 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
a8618a0e 222 struct rwsem_waiter waiter;
1da177e4 223 struct task_struct *tsk = current;
133e89ef 224 WAKE_Q(wake_q);
1da177e4 225
1da177e4 226 /* set up my own style of waitqueue */
a8618a0e 227 waiter.task = tsk;
da16922c 228 waiter.type = RWSEM_WAITING_FOR_READ;
1da177e4 229
f7dd1cee 230 raw_spin_lock_irq(&sem->wait_lock);
fd41b334
ML
231 if (list_empty(&sem->wait_list))
232 adjustment += RWSEM_WAITING_BIAS;
a8618a0e 233 list_add_tail(&waiter.list, &sem->wait_list);
1da177e4 234
70bdc6e0 235 /* we're now waiting on the lock, but no longer actively locking */
1da177e4
LT
236 count = rwsem_atomic_update(adjustment, sem);
237
25c39325
ML
238 /* If there are no active locks, wake the front queued process(es).
239 *
240 * If there are no writers and we are first in the queue,
241 * wake our own waiter to join the existing active readers !
242 */
243 if (count == RWSEM_WAITING_BIAS ||
244 (count > RWSEM_WAITING_BIAS &&
245 adjustment != -RWSEM_ACTIVE_READ_BIAS))
133e89ef 246 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1da177e4 247
ddb6c9b5 248 raw_spin_unlock_irq(&sem->wait_lock);
133e89ef 249 wake_up_q(&wake_q);
1da177e4
LT
250
251 /* wait to be given the lock */
f7dd1cee
ML
252 while (true) {
253 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
a8618a0e 254 if (!waiter.task)
1da177e4
LT
255 break;
256 schedule();
1da177e4
LT
257 }
258
73105994 259 __set_task_state(tsk, TASK_RUNNING);
1da177e4
LT
260 return sem;
261}
db0e716a 262EXPORT_SYMBOL(rwsem_down_read_failed);
1da177e4 263
4fc828e2
DB
264static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
265{
debfab74
JL
266 /*
267 * Try acquiring the write lock. Check count first in order
268 * to reduce unnecessary expensive cmpxchg() operations.
269 */
270 if (count == RWSEM_WAITING_BIAS &&
00eb4bab 271 cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
debfab74
JL
272 RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
273 if (!list_is_singular(&sem->wait_list))
274 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
7a215f89 275 rwsem_set_owner(sem);
debfab74 276 return true;
4fc828e2 277 }
debfab74 278
4fc828e2
DB
279 return false;
280}
281
5db6c6fe 282#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
1da177e4 283/*
4fc828e2
DB
284 * Try to acquire write lock before the writer has been put on wait queue.
285 */
286static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
287{
4d3199e4 288 long old, count = READ_ONCE(sem->count);
4fc828e2
DB
289
290 while (true) {
291 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
292 return false;
293
00eb4bab
DB
294 old = cmpxchg_acquire(&sem->count, count,
295 count + RWSEM_ACTIVE_WRITE_BIAS);
7a215f89
DB
296 if (old == count) {
297 rwsem_set_owner(sem);
4fc828e2 298 return true;
7a215f89 299 }
4fc828e2
DB
300
301 count = old;
302 }
303}
304
305static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
306{
307 struct task_struct *owner;
1a993670 308 bool ret = true;
4fc828e2
DB
309
310 if (need_resched())
37e95624 311 return false;
4fc828e2
DB
312
313 rcu_read_lock();
4d3199e4 314 owner = READ_ONCE(sem->owner);
1a993670 315 if (!owner) {
4d3199e4 316 long count = READ_ONCE(sem->count);
1a993670
DB
317 /*
318 * If sem->owner is not set, yet we have just recently entered the
319 * slowpath with the lock being active, then there is a possibility
320 * reader(s) may have the lock. To be safe, bail spinning in these
321 * situations.
322 */
323 if (count & RWSEM_ACTIVE_MASK)
324 ret = false;
325 goto done;
326 }
4fc828e2 327
1a993670
DB
328 ret = owner->on_cpu;
329done:
330 rcu_read_unlock();
331 return ret;
4fc828e2
DB
332}
333
4fc828e2
DB
334static noinline
335bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
336{
b3fd4f03
DB
337 long count;
338
4fc828e2 339 rcu_read_lock();
9198f6ed
JL
340 while (sem->owner == owner) {
341 /*
342 * Ensure we emit the owner->on_cpu, dereference _after_
343 * checking sem->owner still matches owner, if that fails,
344 * owner might point to free()d memory, if it still matches,
345 * the rcu_read_lock() ensures the memory stays valid.
346 */
347 barrier();
348
349 /* abort spinning when need_resched or owner is not running */
350 if (!owner->on_cpu || need_resched()) {
b3fd4f03
DB
351 rcu_read_unlock();
352 return false;
353 }
4fc828e2 354
3a6bfbc9 355 cpu_relax_lowlatency();
4fc828e2
DB
356 }
357 rcu_read_unlock();
358
b3fd4f03
DB
359 if (READ_ONCE(sem->owner))
360 return true; /* new owner, continue spinning */
361
4fc828e2 362 /*
b3fd4f03
DB
363 * When the owner is not set, the lock could be free or
364 * held by readers. Check the counter to verify the
365 * state.
4fc828e2 366 */
b3fd4f03
DB
367 count = READ_ONCE(sem->count);
368 return (count == 0 || count == RWSEM_WAITING_BIAS);
4fc828e2
DB
369}
370
371static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
372{
373 struct task_struct *owner;
374 bool taken = false;
375
376 preempt_disable();
377
378 /* sem->wait_lock should not be held when doing optimistic spinning */
379 if (!rwsem_can_spin_on_owner(sem))
380 goto done;
381
382 if (!osq_lock(&sem->osq))
383 goto done;
384
385 while (true) {
4d3199e4 386 owner = READ_ONCE(sem->owner);
4fc828e2
DB
387 if (owner && !rwsem_spin_on_owner(sem, owner))
388 break;
389
390 /* wait_lock will be acquired if write_lock is obtained */
391 if (rwsem_try_write_lock_unqueued(sem)) {
392 taken = true;
393 break;
394 }
395
396 /*
397 * When there's no owner, we might have preempted between the
398 * owner acquiring the lock and setting the owner field. If
399 * we're an RT task that will live-lock because we won't let
400 * the owner complete.
401 */
402 if (!owner && (need_resched() || rt_task(current)))
403 break;
404
405 /*
406 * The cpu_relax() call is a compiler barrier which forces
407 * everything in this loop to be re-loaded. We don't need
408 * memory barriers as we'll eventually observe the right
409 * values at the cost of a few extra spins.
410 */
3a6bfbc9 411 cpu_relax_lowlatency();
4fc828e2
DB
412 }
413 osq_unlock(&sem->osq);
414done:
415 preempt_enable();
416 return taken;
417}
418
59aabfc7
WL
419/*
420 * Return true if the rwsem has active spinner
421 */
422static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
423{
424 return osq_is_locked(&sem->osq);
425}
426
4fc828e2
DB
427#else
428static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
429{
430 return false;
431}
59aabfc7
WL
432
433static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
434{
435 return false;
436}
4fc828e2
DB
437#endif
438
439/*
440 * Wait until we successfully acquire the write lock
1da177e4 441 */
d4799608
MH
442static inline struct rw_semaphore *
443__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
1da177e4 444{
4fc828e2
DB
445 long count;
446 bool waiting = true; /* any queued threads before us */
1e78277c 447 struct rwsem_waiter waiter;
d4799608 448 struct rw_semaphore *ret = sem;
133e89ef 449 WAKE_Q(wake_q);
1e78277c 450
4fc828e2
DB
451 /* undo write bias from down_write operation, stop active locking */
452 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
453
454 /* do optimistic spinning and steal lock if possible */
455 if (rwsem_optimistic_spin(sem))
456 return sem;
457
458 /*
459 * Optimistic spinning failed, proceed to the slowpath
460 * and block until we can acquire the sem.
461 */
462 waiter.task = current;
023fe4f7 463 waiter.type = RWSEM_WAITING_FOR_WRITE;
1e78277c
ML
464
465 raw_spin_lock_irq(&sem->wait_lock);
4fc828e2
DB
466
467 /* account for this before adding a new element to the list */
1e78277c 468 if (list_empty(&sem->wait_list))
4fc828e2
DB
469 waiting = false;
470
1e78277c
ML
471 list_add_tail(&waiter.list, &sem->wait_list);
472
473 /* we're now waiting on the lock, but no longer actively locking */
4fc828e2 474 if (waiting) {
4d3199e4 475 count = READ_ONCE(sem->count);
1e78277c 476
4fc828e2 477 /*
0cc3d011
AM
478 * If there were already threads queued before us and there are
479 * no active writers, the lock must be read owned; so we try to
480 * wake any read locks that were queued ahead of us.
4fc828e2 481 */
133e89ef
DB
482 if (count > RWSEM_WAITING_BIAS) {
483 WAKE_Q(wake_q);
484
485 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
486 /*
487 * The wakeup is normally called _after_ the wait_lock
488 * is released, but given that we are proactively waking
489 * readers we can deal with the wake_q overhead as it is
490 * similar to releasing and taking the wait_lock again
491 * for attempting rwsem_try_write_lock().
492 */
493 wake_up_q(&wake_q);
494 }
4fc828e2
DB
495
496 } else
497 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
1e78277c 498
023fe4f7 499 /* wait until we successfully acquire the lock */
d4799608 500 set_current_state(state);
1e78277c 501 while (true) {
4fc828e2
DB
502 if (rwsem_try_write_lock(count, sem))
503 break;
1e78277c 504 raw_spin_unlock_irq(&sem->wait_lock);
a7d2c573
ML
505
506 /* Block until there are no active lockers. */
507 do {
04cafed7
PZ
508 if (signal_pending_state(state, current))
509 goto out_nolock;
510
a7d2c573 511 schedule();
d4799608 512 set_current_state(state);
9b0fc9c0 513 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
a7d2c573 514
023fe4f7 515 raw_spin_lock_irq(&sem->wait_lock);
1e78277c 516 }
4fc828e2 517 __set_current_state(TASK_RUNNING);
023fe4f7
ML
518 list_del(&waiter.list);
519 raw_spin_unlock_irq(&sem->wait_lock);
1e78277c 520
d4799608 521 return ret;
04cafed7
PZ
522
523out_nolock:
524 __set_current_state(TASK_RUNNING);
525 raw_spin_lock_irq(&sem->wait_lock);
526 list_del(&waiter.list);
527 if (list_empty(&sem->wait_list))
528 rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
529 else
133e89ef 530 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
04cafed7 531 raw_spin_unlock_irq(&sem->wait_lock);
133e89ef 532 wake_up_q(&wake_q);
04cafed7
PZ
533
534 return ERR_PTR(-EINTR);
d4799608
MH
535}
536
537__visible struct rw_semaphore * __sched
538rwsem_down_write_failed(struct rw_semaphore *sem)
539{
540 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
1da177e4 541}
db0e716a 542EXPORT_SYMBOL(rwsem_down_write_failed);
1da177e4 543
d4799608
MH
544__visible struct rw_semaphore * __sched
545rwsem_down_write_failed_killable(struct rw_semaphore *sem)
546{
547 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
548}
549EXPORT_SYMBOL(rwsem_down_write_failed_killable);
550
1da177e4
LT
551/*
552 * handle waking up a waiter on the semaphore
553 * - up_read/up_write has decremented the active part of count if we come here
554 */
3ebae4f3 555__visible
d1233754 556struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1da177e4
LT
557{
558 unsigned long flags;
133e89ef 559 WAKE_Q(wake_q);
1da177e4 560
59aabfc7
WL
561 /*
562 * If a spinner is present, it is not necessary to do the wakeup.
563 * Try to do wakeup only if the trylock succeeds to minimize
564 * spinlock contention which may introduce too much delay in the
565 * unlock operation.
566 *
567 * spinning writer up_write/up_read caller
568 * --------------- -----------------------
569 * [S] osq_unlock() [L] osq
570 * MB RMB
571 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
572 *
573 * Here, it is important to make sure that there won't be a missed
574 * wakeup while the rwsem is free and the only spinning writer goes
575 * to sleep without taking the rwsem. Even when the spinning writer
576 * is just going to break out of the waiting loop, it will still do
577 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
578 * rwsem_has_spinner() is true, it will guarantee at least one
579 * trylock attempt on the rwsem later on.
580 */
581 if (rwsem_has_spinner(sem)) {
582 /*
583 * The smp_rmb() here is to make sure that the spinner
584 * state is consulted before reading the wait_lock.
585 */
586 smp_rmb();
587 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
588 return sem;
589 goto locked;
590 }
ddb6c9b5 591 raw_spin_lock_irqsave(&sem->wait_lock, flags);
59aabfc7 592locked:
1da177e4
LT
593
594 /* do nothing if list empty */
595 if (!list_empty(&sem->wait_list))
133e89ef 596 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
1da177e4 597
ddb6c9b5 598 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133e89ef 599 wake_up_q(&wake_q);
1da177e4 600
1da177e4
LT
601 return sem;
602}
db0e716a 603EXPORT_SYMBOL(rwsem_wake);
1da177e4
LT
604
605/*
606 * downgrade a write lock into a read lock
607 * - caller incremented waiting part of count and discovered it still negative
608 * - just wake up any readers at the front of the queue
609 */
3ebae4f3 610__visible
d1233754 611struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1da177e4
LT
612{
613 unsigned long flags;
133e89ef 614 WAKE_Q(wake_q);
1da177e4 615
ddb6c9b5 616 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1da177e4
LT
617
618 /* do nothing if list empty */
619 if (!list_empty(&sem->wait_list))
133e89ef 620 sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
1da177e4 621
ddb6c9b5 622 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133e89ef 623 wake_up_q(&wake_q);
1da177e4 624
1da177e4
LT
625 return sem;
626}
1da177e4 627EXPORT_SYMBOL(rwsem_downgrade_wake);
This page took 0.738718 seconds and 5 git commands to generate.