Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[deliverable/linux.git] / kernel / locking / rwsem-xadd.c
1 /* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
8 *
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
11 */
12 #include <linux/rwsem.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/rt.h>
17 #include <linux/osq_lock.h>
18
19 #include "rwsem.h"
20
21 /*
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
24 *
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
27 * (X*ACTIVE_BIAS)
28 *
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
31 *
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 *
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
45 * (ACTIVE_WRITE_BIAS)
46 *
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
49 * (WAITING_BIAS)
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
52 *
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
55 *
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
60 *
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
66 * steal the lock.
67 *
68 */
69
70 /*
71 * Initialize an rwsem:
72 */
73 void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
75 {
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /*
78 * Make sure we are not reinitializing a held semaphore:
79 */
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81 lockdep_init_map(&sem->dep_map, name, key, 0);
82 #endif
83 sem->count = RWSEM_UNLOCKED_VALUE;
84 raw_spin_lock_init(&sem->wait_lock);
85 INIT_LIST_HEAD(&sem->wait_list);
86 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
87 sem->owner = NULL;
88 osq_lock_init(&sem->osq);
89 #endif
90 }
91
92 EXPORT_SYMBOL(__init_rwsem);
93
94 enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
97 };
98
99 struct rwsem_waiter {
100 struct list_head list;
101 struct task_struct *task;
102 enum rwsem_waiter_type type;
103 };
104
105 enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
109 };
110
111 /*
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116 * - there must be someone on the queue
117 * - the spinlock must be held by the caller
118 * - woken process blocks are discarded from the list after having task zeroed
119 * - writers are only woken if downgrading is false
120 */
121 static struct rw_semaphore *
122 __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
123 {
124 struct rwsem_waiter *waiter;
125 struct task_struct *tsk;
126 struct list_head *next;
127 long oldcount, woken, loop, adjustment;
128
129 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
130 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
131 if (wake_type == RWSEM_WAKE_ANY)
132 /* Wake writer at the front of the queue, but do not
133 * grant it the lock yet as we want other writers
134 * to be able to steal it. Readers, on the other hand,
135 * will block as they will notice the queued writer.
136 */
137 wake_up_process(waiter->task);
138 goto out;
139 }
140
141 /* Writers might steal the lock before we grant it to the next reader.
142 * We prefer to do the first reader grant before counting readers
143 * so we can bail out early if a writer stole the lock.
144 */
145 adjustment = 0;
146 if (wake_type != RWSEM_WAKE_READ_OWNED) {
147 adjustment = RWSEM_ACTIVE_READ_BIAS;
148 try_reader_grant:
149 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
150 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
151 /* A writer stole the lock. Undo our reader grant. */
152 if (rwsem_atomic_update(-adjustment, sem) &
153 RWSEM_ACTIVE_MASK)
154 goto out;
155 /* Last active locker left. Retry waking readers. */
156 goto try_reader_grant;
157 }
158 }
159
160 /* Grant an infinite number of read locks to the readers at the front
161 * of the queue. Note we increment the 'active part' of the count by
162 * the number of readers before waking any processes up.
163 */
164 woken = 0;
165 do {
166 woken++;
167
168 if (waiter->list.next == &sem->wait_list)
169 break;
170
171 waiter = list_entry(waiter->list.next,
172 struct rwsem_waiter, list);
173
174 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
175
176 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
177 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
178 /* hit end of list above */
179 adjustment -= RWSEM_WAITING_BIAS;
180
181 if (adjustment)
182 rwsem_atomic_add(adjustment, sem);
183
184 next = sem->wait_list.next;
185 loop = woken;
186 do {
187 waiter = list_entry(next, struct rwsem_waiter, list);
188 next = waiter->list.next;
189 tsk = waiter->task;
190 /*
191 * Make sure we do not wakeup the next reader before
192 * setting the nil condition to grant the next reader;
193 * otherwise we could miss the wakeup on the other
194 * side and end up sleeping again. See the pairing
195 * in rwsem_down_read_failed().
196 */
197 smp_mb();
198 waiter->task = NULL;
199 wake_up_process(tsk);
200 put_task_struct(tsk);
201 } while (--loop);
202
203 sem->wait_list.next = next;
204 next->prev = &sem->wait_list;
205
206 out:
207 return sem;
208 }
209
210 /*
211 * Wait for the read lock to be granted
212 */
213 __visible
214 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
215 {
216 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
217 struct rwsem_waiter waiter;
218 struct task_struct *tsk = current;
219
220 /* set up my own style of waitqueue */
221 waiter.task = tsk;
222 waiter.type = RWSEM_WAITING_FOR_READ;
223 get_task_struct(tsk);
224
225 raw_spin_lock_irq(&sem->wait_lock);
226 if (list_empty(&sem->wait_list))
227 adjustment += RWSEM_WAITING_BIAS;
228 list_add_tail(&waiter.list, &sem->wait_list);
229
230 /* we're now waiting on the lock, but no longer actively locking */
231 count = rwsem_atomic_update(adjustment, sem);
232
233 /* If there are no active locks, wake the front queued process(es).
234 *
235 * If there are no writers and we are first in the queue,
236 * wake our own waiter to join the existing active readers !
237 */
238 if (count == RWSEM_WAITING_BIAS ||
239 (count > RWSEM_WAITING_BIAS &&
240 adjustment != -RWSEM_ACTIVE_READ_BIAS))
241 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
242
243 raw_spin_unlock_irq(&sem->wait_lock);
244
245 /* wait to be given the lock */
246 while (true) {
247 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
248 if (!waiter.task)
249 break;
250 schedule();
251 }
252
253 __set_task_state(tsk, TASK_RUNNING);
254 return sem;
255 }
256 EXPORT_SYMBOL(rwsem_down_read_failed);
257
258 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
259 {
260 /*
261 * Try acquiring the write lock. Check count first in order
262 * to reduce unnecessary expensive cmpxchg() operations.
263 */
264 if (count == RWSEM_WAITING_BIAS &&
265 cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
266 RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
267 if (!list_is_singular(&sem->wait_list))
268 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
269 rwsem_set_owner(sem);
270 return true;
271 }
272
273 return false;
274 }
275
276 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
277 /*
278 * Try to acquire write lock before the writer has been put on wait queue.
279 */
280 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
281 {
282 long old, count = READ_ONCE(sem->count);
283
284 while (true) {
285 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
286 return false;
287
288 old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
289 if (old == count) {
290 rwsem_set_owner(sem);
291 return true;
292 }
293
294 count = old;
295 }
296 }
297
298 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
299 {
300 struct task_struct *owner;
301 bool ret = true;
302
303 if (need_resched())
304 return false;
305
306 rcu_read_lock();
307 owner = READ_ONCE(sem->owner);
308 if (!owner) {
309 long count = READ_ONCE(sem->count);
310 /*
311 * If sem->owner is not set, yet we have just recently entered the
312 * slowpath with the lock being active, then there is a possibility
313 * reader(s) may have the lock. To be safe, bail spinning in these
314 * situations.
315 */
316 if (count & RWSEM_ACTIVE_MASK)
317 ret = false;
318 goto done;
319 }
320
321 ret = owner->on_cpu;
322 done:
323 rcu_read_unlock();
324 return ret;
325 }
326
327 static noinline
328 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
329 {
330 long count;
331
332 rcu_read_lock();
333 while (sem->owner == owner) {
334 /*
335 * Ensure we emit the owner->on_cpu, dereference _after_
336 * checking sem->owner still matches owner, if that fails,
337 * owner might point to free()d memory, if it still matches,
338 * the rcu_read_lock() ensures the memory stays valid.
339 */
340 barrier();
341
342 /* abort spinning when need_resched or owner is not running */
343 if (!owner->on_cpu || need_resched()) {
344 rcu_read_unlock();
345 return false;
346 }
347
348 cpu_relax_lowlatency();
349 }
350 rcu_read_unlock();
351
352 if (READ_ONCE(sem->owner))
353 return true; /* new owner, continue spinning */
354
355 /*
356 * When the owner is not set, the lock could be free or
357 * held by readers. Check the counter to verify the
358 * state.
359 */
360 count = READ_ONCE(sem->count);
361 return (count == 0 || count == RWSEM_WAITING_BIAS);
362 }
363
364 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
365 {
366 struct task_struct *owner;
367 bool taken = false;
368
369 preempt_disable();
370
371 /* sem->wait_lock should not be held when doing optimistic spinning */
372 if (!rwsem_can_spin_on_owner(sem))
373 goto done;
374
375 if (!osq_lock(&sem->osq))
376 goto done;
377
378 while (true) {
379 owner = READ_ONCE(sem->owner);
380 if (owner && !rwsem_spin_on_owner(sem, owner))
381 break;
382
383 /* wait_lock will be acquired if write_lock is obtained */
384 if (rwsem_try_write_lock_unqueued(sem)) {
385 taken = true;
386 break;
387 }
388
389 /*
390 * When there's no owner, we might have preempted between the
391 * owner acquiring the lock and setting the owner field. If
392 * we're an RT task that will live-lock because we won't let
393 * the owner complete.
394 */
395 if (!owner && (need_resched() || rt_task(current)))
396 break;
397
398 /*
399 * The cpu_relax() call is a compiler barrier which forces
400 * everything in this loop to be re-loaded. We don't need
401 * memory barriers as we'll eventually observe the right
402 * values at the cost of a few extra spins.
403 */
404 cpu_relax_lowlatency();
405 }
406 osq_unlock(&sem->osq);
407 done:
408 preempt_enable();
409 return taken;
410 }
411
412 /*
413 * Return true if the rwsem has active spinner
414 */
415 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
416 {
417 return osq_is_locked(&sem->osq);
418 }
419
420 #else
421 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
422 {
423 return false;
424 }
425
426 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
427 {
428 return false;
429 }
430 #endif
431
432 /*
433 * Wait until we successfully acquire the write lock
434 */
435 __visible
436 struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
437 {
438 long count;
439 bool waiting = true; /* any queued threads before us */
440 struct rwsem_waiter waiter;
441
442 /* undo write bias from down_write operation, stop active locking */
443 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
444
445 /* do optimistic spinning and steal lock if possible */
446 if (rwsem_optimistic_spin(sem))
447 return sem;
448
449 /*
450 * Optimistic spinning failed, proceed to the slowpath
451 * and block until we can acquire the sem.
452 */
453 waiter.task = current;
454 waiter.type = RWSEM_WAITING_FOR_WRITE;
455
456 raw_spin_lock_irq(&sem->wait_lock);
457
458 /* account for this before adding a new element to the list */
459 if (list_empty(&sem->wait_list))
460 waiting = false;
461
462 list_add_tail(&waiter.list, &sem->wait_list);
463
464 /* we're now waiting on the lock, but no longer actively locking */
465 if (waiting) {
466 count = READ_ONCE(sem->count);
467
468 /*
469 * If there were already threads queued before us and there are
470 * no active writers, the lock must be read owned; so we try to
471 * wake any read locks that were queued ahead of us.
472 */
473 if (count > RWSEM_WAITING_BIAS)
474 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
475
476 } else
477 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
478
479 /* wait until we successfully acquire the lock */
480 set_current_state(TASK_UNINTERRUPTIBLE);
481 while (true) {
482 if (rwsem_try_write_lock(count, sem))
483 break;
484 raw_spin_unlock_irq(&sem->wait_lock);
485
486 /* Block until there are no active lockers. */
487 do {
488 schedule();
489 set_current_state(TASK_UNINTERRUPTIBLE);
490 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
491
492 raw_spin_lock_irq(&sem->wait_lock);
493 }
494 __set_current_state(TASK_RUNNING);
495
496 list_del(&waiter.list);
497 raw_spin_unlock_irq(&sem->wait_lock);
498
499 return sem;
500 }
501 EXPORT_SYMBOL(rwsem_down_write_failed);
502
503 /*
504 * handle waking up a waiter on the semaphore
505 * - up_read/up_write has decremented the active part of count if we come here
506 */
507 __visible
508 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
509 {
510 unsigned long flags;
511
512 /*
513 * If a spinner is present, it is not necessary to do the wakeup.
514 * Try to do wakeup only if the trylock succeeds to minimize
515 * spinlock contention which may introduce too much delay in the
516 * unlock operation.
517 *
518 * spinning writer up_write/up_read caller
519 * --------------- -----------------------
520 * [S] osq_unlock() [L] osq
521 * MB RMB
522 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
523 *
524 * Here, it is important to make sure that there won't be a missed
525 * wakeup while the rwsem is free and the only spinning writer goes
526 * to sleep without taking the rwsem. Even when the spinning writer
527 * is just going to break out of the waiting loop, it will still do
528 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
529 * rwsem_has_spinner() is true, it will guarantee at least one
530 * trylock attempt on the rwsem later on.
531 */
532 if (rwsem_has_spinner(sem)) {
533 /*
534 * The smp_rmb() here is to make sure that the spinner
535 * state is consulted before reading the wait_lock.
536 */
537 smp_rmb();
538 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
539 return sem;
540 goto locked;
541 }
542 raw_spin_lock_irqsave(&sem->wait_lock, flags);
543 locked:
544
545 /* do nothing if list empty */
546 if (!list_empty(&sem->wait_list))
547 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
548
549 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
550
551 return sem;
552 }
553 EXPORT_SYMBOL(rwsem_wake);
554
555 /*
556 * downgrade a write lock into a read lock
557 * - caller incremented waiting part of count and discovered it still negative
558 * - just wake up any readers at the front of the queue
559 */
560 __visible
561 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
562 {
563 unsigned long flags;
564
565 raw_spin_lock_irqsave(&sem->wait_lock, flags);
566
567 /* do nothing if list empty */
568 if (!list_empty(&sem->wait_list))
569 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
570
571 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
572
573 return sem;
574 }
575 EXPORT_SYMBOL(rwsem_downgrade_wake);
This page took 0.046229 seconds and 5 git commands to generate.