Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem.c: R/W semaphores: contention handling functions |
2 | * | |
3 | * Written by David Howells (dhowells@redhat.com). | |
4 | * Derived from arch/i386/kernel/semaphore.c | |
ce6711f3 AS |
5 | * |
6 | * Writer lock-stealing by Alex Shi <alex.shi@intel.com> | |
fe6e674c | 7 | * and Michel Lespinasse <walken@google.com> |
4fc828e2 DB |
8 | * |
9 | * Optimistic spinning by Tim Chen <tim.c.chen@intel.com> | |
10 | * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes. | |
1da177e4 LT |
11 | */ |
12 | #include <linux/rwsem.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/init.h> | |
8bc3bcc9 | 15 | #include <linux/export.h> |
4fc828e2 | 16 | #include <linux/sched/rt.h> |
7a215f89 | 17 | #include <linux/osq_lock.h> |
4fc828e2 | 18 | |
7a215f89 | 19 | #include "rwsem.h" |
1da177e4 | 20 | |
3cf2f34e TC |
21 | /* |
22 | * Guide to the rw_semaphore's count field for common values. | |
23 | * (32-bit case illustrated, similar for 64-bit) | |
24 | * | |
25 | * 0x0000000X (1) X readers active or attempting lock, no writer waiting | |
26 | * X = #active_readers + #readers attempting to lock | |
27 | * (X*ACTIVE_BIAS) | |
28 | * | |
29 | * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or | |
30 | * attempting to read lock or write lock. | |
31 | * | |
32 | * 0xffff000X (1) X readers active or attempting lock, with waiters for lock | |
33 | * X = #active readers + # readers attempting lock | |
34 | * (X*ACTIVE_BIAS + WAITING_BIAS) | |
35 | * (2) 1 writer attempting lock, no waiters for lock | |
36 | * X-1 = #active readers + #readers attempting lock | |
37 | * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) | |
38 | * (3) 1 writer active, no waiters for lock | |
39 | * X-1 = #active readers + #readers attempting lock | |
40 | * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS) | |
41 | * | |
42 | * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock | |
43 | * (WAITING_BIAS + ACTIVE_BIAS) | |
44 | * (2) 1 writer active or attempting lock, no waiters for lock | |
45 | * (ACTIVE_WRITE_BIAS) | |
46 | * | |
47 | * 0xffff0000 (1) There are writers or readers queued but none active | |
48 | * or in the process of attempting lock. | |
49 | * (WAITING_BIAS) | |
50 | * Note: writer can attempt to steal lock for this count by adding | |
51 | * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count | |
52 | * | |
53 | * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue. | |
54 | * (ACTIVE_WRITE_BIAS + WAITING_BIAS) | |
55 | * | |
56 | * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking | |
57 | * the count becomes more than 0 for successful lock acquisition, | |
58 | * i.e. the case where there are only readers or nobody has lock. | |
59 | * (1st and 2nd case above). | |
60 | * | |
61 | * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and | |
62 | * checking the count becomes ACTIVE_WRITE_BIAS for successful lock | |
63 | * acquisition (i.e. nobody else has lock or attempts lock). If | |
64 | * unsuccessful, in rwsem_down_write_failed, we'll check to see if there | |
65 | * are only waiters but none active (5th case above), and attempt to | |
66 | * steal the lock. | |
67 | * | |
68 | */ | |
69 | ||
4ea2176d IM |
70 | /* |
71 | * Initialize an rwsem: | |
72 | */ | |
73 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
74 | struct lock_class_key *key) | |
75 | { | |
76 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
77 | /* | |
78 | * Make sure we are not reinitializing a held semaphore: | |
79 | */ | |
80 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 81 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d IM |
82 | #endif |
83 | sem->count = RWSEM_UNLOCKED_VALUE; | |
ddb6c9b5 | 84 | raw_spin_lock_init(&sem->wait_lock); |
4ea2176d | 85 | INIT_LIST_HEAD(&sem->wait_list); |
5db6c6fe | 86 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
4fc828e2 | 87 | sem->owner = NULL; |
4d9d951e | 88 | osq_lock_init(&sem->osq); |
4fc828e2 | 89 | #endif |
4ea2176d IM |
90 | } |
91 | ||
92 | EXPORT_SYMBOL(__init_rwsem); | |
93 | ||
e2d57f78 ML |
94 | enum rwsem_waiter_type { |
95 | RWSEM_WAITING_FOR_WRITE, | |
96 | RWSEM_WAITING_FOR_READ | |
97 | }; | |
98 | ||
1da177e4 LT |
99 | struct rwsem_waiter { |
100 | struct list_head list; | |
101 | struct task_struct *task; | |
e2d57f78 | 102 | enum rwsem_waiter_type type; |
1da177e4 LT |
103 | }; |
104 | ||
fe6e674c ML |
105 | enum rwsem_wake_type { |
106 | RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ | |
107 | RWSEM_WAKE_READERS, /* Wake readers only */ | |
108 | RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ | |
109 | }; | |
70bdc6e0 | 110 | |
1da177e4 LT |
111 | /* |
112 | * handle the lock release when processes blocked on it that can now run | |
113 | * - if we come here from up_xxxx(), then: | |
114 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | |
115 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | |
345af7bf | 116 | * - there must be someone on the queue |
133e89ef DB |
117 | * - the wait_lock must be held by the caller |
118 | * - tasks are marked for wakeup, the caller must later invoke wake_up_q() | |
119 | * to actually wakeup the blocked task(s) and drop the reference count, | |
120 | * preferably when the wait_lock is released | |
1da177e4 | 121 | * - woken process blocks are discarded from the list after having task zeroed |
133e89ef | 122 | * - writers are only marked woken if downgrading is false |
1da177e4 | 123 | */ |
70bdc6e0 | 124 | static struct rw_semaphore * |
133e89ef DB |
125 | __rwsem_mark_wake(struct rw_semaphore *sem, |
126 | enum rwsem_wake_type wake_type, struct wake_q_head *wake_q) | |
1da177e4 LT |
127 | { |
128 | struct rwsem_waiter *waiter; | |
129 | struct task_struct *tsk; | |
130 | struct list_head *next; | |
b5f54181 | 131 | long oldcount, woken, loop, adjustment; |
1da177e4 | 132 | |
345af7bf | 133 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
8cf5322c | 134 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
133e89ef DB |
135 | if (wake_type == RWSEM_WAKE_ANY) { |
136 | /* | |
137 | * Mark writer at the front of the queue for wakeup. | |
138 | * Until the task is actually later awoken later by | |
139 | * the caller, other writers are able to steal it. | |
140 | * Readers, on the other hand, will block as they | |
141 | * will notice the queued writer. | |
8cf5322c | 142 | */ |
133e89ef DB |
143 | wake_q_add(wake_q, waiter->task); |
144 | } | |
345af7bf | 145 | goto out; |
8cf5322c | 146 | } |
1da177e4 | 147 | |
fe6e674c ML |
148 | /* Writers might steal the lock before we grant it to the next reader. |
149 | * We prefer to do the first reader grant before counting readers | |
150 | * so we can bail out early if a writer stole the lock. | |
70bdc6e0 | 151 | */ |
fe6e674c ML |
152 | adjustment = 0; |
153 | if (wake_type != RWSEM_WAKE_READ_OWNED) { | |
154 | adjustment = RWSEM_ACTIVE_READ_BIAS; | |
155 | try_reader_grant: | |
156 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | |
157 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { | |
158 | /* A writer stole the lock. Undo our reader grant. */ | |
159 | if (rwsem_atomic_update(-adjustment, sem) & | |
160 | RWSEM_ACTIVE_MASK) | |
161 | goto out; | |
162 | /* Last active locker left. Retry waking readers. */ | |
163 | goto try_reader_grant; | |
164 | } | |
165 | } | |
1da177e4 | 166 | |
345af7bf ML |
167 | /* Grant an infinite number of read locks to the readers at the front |
168 | * of the queue. Note we increment the 'active part' of the count by | |
169 | * the number of readers before waking any processes up. | |
1da177e4 | 170 | */ |
1da177e4 LT |
171 | woken = 0; |
172 | do { | |
173 | woken++; | |
174 | ||
175 | if (waiter->list.next == &sem->wait_list) | |
176 | break; | |
177 | ||
178 | waiter = list_entry(waiter->list.next, | |
179 | struct rwsem_waiter, list); | |
180 | ||
e2d57f78 | 181 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
1da177e4 | 182 | |
fe6e674c | 183 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; |
e2d57f78 | 184 | if (waiter->type != RWSEM_WAITING_FOR_WRITE) |
fd41b334 ML |
185 | /* hit end of list above */ |
186 | adjustment -= RWSEM_WAITING_BIAS; | |
1da177e4 | 187 | |
fe6e674c ML |
188 | if (adjustment) |
189 | rwsem_atomic_add(adjustment, sem); | |
1da177e4 LT |
190 | |
191 | next = sem->wait_list.next; | |
8cf5322c ML |
192 | loop = woken; |
193 | do { | |
1da177e4 LT |
194 | waiter = list_entry(next, struct rwsem_waiter, list); |
195 | next = waiter->list.next; | |
196 | tsk = waiter->task; | |
e3851390 DB |
197 | |
198 | wake_q_add(wake_q, tsk); | |
49e4b2bc | 199 | /* |
e3851390 DB |
200 | * Ensure that the last operation is setting the reader |
201 | * waiter to nil such that rwsem_down_read_failed() cannot | |
202 | * race with do_exit() by always holding a reference count | |
203 | * to the task to wakeup. | |
49e4b2bc | 204 | */ |
e3851390 | 205 | smp_store_release(&waiter->task, NULL); |
8cf5322c | 206 | } while (--loop); |
1da177e4 LT |
207 | |
208 | sem->wait_list.next = next; | |
209 | next->prev = &sem->wait_list; | |
210 | ||
211 | out: | |
1da177e4 | 212 | return sem; |
ce6711f3 AS |
213 | } |
214 | ||
1da177e4 | 215 | /* |
4fc828e2 | 216 | * Wait for the read lock to be granted |
1da177e4 | 217 | */ |
3ebae4f3 | 218 | __visible |
1e78277c | 219 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) |
1da177e4 | 220 | { |
b5f54181 | 221 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; |
a8618a0e | 222 | struct rwsem_waiter waiter; |
1da177e4 | 223 | struct task_struct *tsk = current; |
133e89ef | 224 | WAKE_Q(wake_q); |
1da177e4 | 225 | |
1da177e4 | 226 | /* set up my own style of waitqueue */ |
a8618a0e | 227 | waiter.task = tsk; |
da16922c | 228 | waiter.type = RWSEM_WAITING_FOR_READ; |
1da177e4 | 229 | |
f7dd1cee | 230 | raw_spin_lock_irq(&sem->wait_lock); |
fd41b334 ML |
231 | if (list_empty(&sem->wait_list)) |
232 | adjustment += RWSEM_WAITING_BIAS; | |
a8618a0e | 233 | list_add_tail(&waiter.list, &sem->wait_list); |
1da177e4 | 234 | |
70bdc6e0 | 235 | /* we're now waiting on the lock, but no longer actively locking */ |
1da177e4 LT |
236 | count = rwsem_atomic_update(adjustment, sem); |
237 | ||
25c39325 ML |
238 | /* If there are no active locks, wake the front queued process(es). |
239 | * | |
240 | * If there are no writers and we are first in the queue, | |
241 | * wake our own waiter to join the existing active readers ! | |
242 | */ | |
243 | if (count == RWSEM_WAITING_BIAS || | |
244 | (count > RWSEM_WAITING_BIAS && | |
245 | adjustment != -RWSEM_ACTIVE_READ_BIAS)) | |
133e89ef | 246 | sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); |
1da177e4 | 247 | |
ddb6c9b5 | 248 | raw_spin_unlock_irq(&sem->wait_lock); |
133e89ef | 249 | wake_up_q(&wake_q); |
1da177e4 LT |
250 | |
251 | /* wait to be given the lock */ | |
f7dd1cee ML |
252 | while (true) { |
253 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
a8618a0e | 254 | if (!waiter.task) |
1da177e4 LT |
255 | break; |
256 | schedule(); | |
1da177e4 LT |
257 | } |
258 | ||
73105994 | 259 | __set_task_state(tsk, TASK_RUNNING); |
1da177e4 LT |
260 | return sem; |
261 | } | |
db0e716a | 262 | EXPORT_SYMBOL(rwsem_down_read_failed); |
1da177e4 | 263 | |
c0fcb6c2 JL |
264 | /* |
265 | * This function must be called with the sem->wait_lock held to prevent | |
266 | * race conditions between checking the rwsem wait list and setting the | |
267 | * sem->count accordingly. | |
268 | */ | |
4fc828e2 DB |
269 | static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) |
270 | { | |
debfab74 | 271 | /* |
c0fcb6c2 | 272 | * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS. |
debfab74 | 273 | */ |
c0fcb6c2 JL |
274 | if (count != RWSEM_WAITING_BIAS) |
275 | return false; | |
276 | ||
277 | /* | |
278 | * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there | |
279 | * are other tasks on the wait list, we need to add on WAITING_BIAS. | |
280 | */ | |
281 | count = list_is_singular(&sem->wait_list) ? | |
282 | RWSEM_ACTIVE_WRITE_BIAS : | |
283 | RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS; | |
284 | ||
285 | if (cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) == RWSEM_WAITING_BIAS) { | |
7a215f89 | 286 | rwsem_set_owner(sem); |
debfab74 | 287 | return true; |
4fc828e2 | 288 | } |
debfab74 | 289 | |
4fc828e2 DB |
290 | return false; |
291 | } | |
292 | ||
5db6c6fe | 293 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
1da177e4 | 294 | /* |
4fc828e2 DB |
295 | * Try to acquire write lock before the writer has been put on wait queue. |
296 | */ | |
297 | static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | |
298 | { | |
4d3199e4 | 299 | long old, count = READ_ONCE(sem->count); |
4fc828e2 DB |
300 | |
301 | while (true) { | |
302 | if (!(count == 0 || count == RWSEM_WAITING_BIAS)) | |
303 | return false; | |
304 | ||
00eb4bab DB |
305 | old = cmpxchg_acquire(&sem->count, count, |
306 | count + RWSEM_ACTIVE_WRITE_BIAS); | |
7a215f89 DB |
307 | if (old == count) { |
308 | rwsem_set_owner(sem); | |
4fc828e2 | 309 | return true; |
7a215f89 | 310 | } |
4fc828e2 DB |
311 | |
312 | count = old; | |
313 | } | |
314 | } | |
315 | ||
316 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |
317 | { | |
318 | struct task_struct *owner; | |
1a993670 | 319 | bool ret = true; |
4fc828e2 DB |
320 | |
321 | if (need_resched()) | |
37e95624 | 322 | return false; |
4fc828e2 DB |
323 | |
324 | rcu_read_lock(); | |
4d3199e4 | 325 | owner = READ_ONCE(sem->owner); |
1a993670 | 326 | if (!owner) { |
4d3199e4 | 327 | long count = READ_ONCE(sem->count); |
1a993670 DB |
328 | /* |
329 | * If sem->owner is not set, yet we have just recently entered the | |
330 | * slowpath with the lock being active, then there is a possibility | |
331 | * reader(s) may have the lock. To be safe, bail spinning in these | |
332 | * situations. | |
333 | */ | |
334 | if (count & RWSEM_ACTIVE_MASK) | |
335 | ret = false; | |
336 | goto done; | |
337 | } | |
4fc828e2 | 338 | |
1a993670 DB |
339 | ret = owner->on_cpu; |
340 | done: | |
341 | rcu_read_unlock(); | |
342 | return ret; | |
4fc828e2 DB |
343 | } |
344 | ||
4fc828e2 DB |
345 | static noinline |
346 | bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) | |
347 | { | |
b3fd4f03 DB |
348 | long count; |
349 | ||
4fc828e2 | 350 | rcu_read_lock(); |
9198f6ed JL |
351 | while (sem->owner == owner) { |
352 | /* | |
353 | * Ensure we emit the owner->on_cpu, dereference _after_ | |
354 | * checking sem->owner still matches owner, if that fails, | |
355 | * owner might point to free()d memory, if it still matches, | |
356 | * the rcu_read_lock() ensures the memory stays valid. | |
357 | */ | |
358 | barrier(); | |
359 | ||
360 | /* abort spinning when need_resched or owner is not running */ | |
361 | if (!owner->on_cpu || need_resched()) { | |
b3fd4f03 DB |
362 | rcu_read_unlock(); |
363 | return false; | |
364 | } | |
4fc828e2 | 365 | |
3a6bfbc9 | 366 | cpu_relax_lowlatency(); |
4fc828e2 DB |
367 | } |
368 | rcu_read_unlock(); | |
369 | ||
b3fd4f03 DB |
370 | if (READ_ONCE(sem->owner)) |
371 | return true; /* new owner, continue spinning */ | |
372 | ||
4fc828e2 | 373 | /* |
b3fd4f03 DB |
374 | * When the owner is not set, the lock could be free or |
375 | * held by readers. Check the counter to verify the | |
376 | * state. | |
4fc828e2 | 377 | */ |
b3fd4f03 DB |
378 | count = READ_ONCE(sem->count); |
379 | return (count == 0 || count == RWSEM_WAITING_BIAS); | |
4fc828e2 DB |
380 | } |
381 | ||
382 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |
383 | { | |
384 | struct task_struct *owner; | |
385 | bool taken = false; | |
386 | ||
387 | preempt_disable(); | |
388 | ||
389 | /* sem->wait_lock should not be held when doing optimistic spinning */ | |
390 | if (!rwsem_can_spin_on_owner(sem)) | |
391 | goto done; | |
392 | ||
393 | if (!osq_lock(&sem->osq)) | |
394 | goto done; | |
395 | ||
396 | while (true) { | |
4d3199e4 | 397 | owner = READ_ONCE(sem->owner); |
4fc828e2 DB |
398 | if (owner && !rwsem_spin_on_owner(sem, owner)) |
399 | break; | |
400 | ||
401 | /* wait_lock will be acquired if write_lock is obtained */ | |
402 | if (rwsem_try_write_lock_unqueued(sem)) { | |
403 | taken = true; | |
404 | break; | |
405 | } | |
406 | ||
407 | /* | |
408 | * When there's no owner, we might have preempted between the | |
409 | * owner acquiring the lock and setting the owner field. If | |
410 | * we're an RT task that will live-lock because we won't let | |
411 | * the owner complete. | |
412 | */ | |
413 | if (!owner && (need_resched() || rt_task(current))) | |
414 | break; | |
415 | ||
416 | /* | |
417 | * The cpu_relax() call is a compiler barrier which forces | |
418 | * everything in this loop to be re-loaded. We don't need | |
419 | * memory barriers as we'll eventually observe the right | |
420 | * values at the cost of a few extra spins. | |
421 | */ | |
3a6bfbc9 | 422 | cpu_relax_lowlatency(); |
4fc828e2 DB |
423 | } |
424 | osq_unlock(&sem->osq); | |
425 | done: | |
426 | preempt_enable(); | |
427 | return taken; | |
428 | } | |
429 | ||
59aabfc7 WL |
430 | /* |
431 | * Return true if the rwsem has active spinner | |
432 | */ | |
433 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | |
434 | { | |
435 | return osq_is_locked(&sem->osq); | |
436 | } | |
437 | ||
4fc828e2 DB |
438 | #else |
439 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | |
440 | { | |
441 | return false; | |
442 | } | |
59aabfc7 WL |
443 | |
444 | static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | |
445 | { | |
446 | return false; | |
447 | } | |
4fc828e2 DB |
448 | #endif |
449 | ||
450 | /* | |
451 | * Wait until we successfully acquire the write lock | |
1da177e4 | 452 | */ |
d4799608 MH |
453 | static inline struct rw_semaphore * |
454 | __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) | |
1da177e4 | 455 | { |
4fc828e2 DB |
456 | long count; |
457 | bool waiting = true; /* any queued threads before us */ | |
1e78277c | 458 | struct rwsem_waiter waiter; |
d4799608 | 459 | struct rw_semaphore *ret = sem; |
133e89ef | 460 | WAKE_Q(wake_q); |
1e78277c | 461 | |
4fc828e2 DB |
462 | /* undo write bias from down_write operation, stop active locking */ |
463 | count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); | |
464 | ||
465 | /* do optimistic spinning and steal lock if possible */ | |
466 | if (rwsem_optimistic_spin(sem)) | |
467 | return sem; | |
468 | ||
469 | /* | |
470 | * Optimistic spinning failed, proceed to the slowpath | |
471 | * and block until we can acquire the sem. | |
472 | */ | |
473 | waiter.task = current; | |
023fe4f7 | 474 | waiter.type = RWSEM_WAITING_FOR_WRITE; |
1e78277c ML |
475 | |
476 | raw_spin_lock_irq(&sem->wait_lock); | |
4fc828e2 DB |
477 | |
478 | /* account for this before adding a new element to the list */ | |
1e78277c | 479 | if (list_empty(&sem->wait_list)) |
4fc828e2 DB |
480 | waiting = false; |
481 | ||
1e78277c ML |
482 | list_add_tail(&waiter.list, &sem->wait_list); |
483 | ||
484 | /* we're now waiting on the lock, but no longer actively locking */ | |
4fc828e2 | 485 | if (waiting) { |
4d3199e4 | 486 | count = READ_ONCE(sem->count); |
1e78277c | 487 | |
4fc828e2 | 488 | /* |
0cc3d011 AM |
489 | * If there were already threads queued before us and there are |
490 | * no active writers, the lock must be read owned; so we try to | |
491 | * wake any read locks that were queued ahead of us. | |
4fc828e2 | 492 | */ |
133e89ef DB |
493 | if (count > RWSEM_WAITING_BIAS) { |
494 | WAKE_Q(wake_q); | |
495 | ||
496 | sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q); | |
497 | /* | |
498 | * The wakeup is normally called _after_ the wait_lock | |
499 | * is released, but given that we are proactively waking | |
500 | * readers we can deal with the wake_q overhead as it is | |
501 | * similar to releasing and taking the wait_lock again | |
502 | * for attempting rwsem_try_write_lock(). | |
503 | */ | |
504 | wake_up_q(&wake_q); | |
505 | } | |
4fc828e2 DB |
506 | |
507 | } else | |
508 | count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); | |
1e78277c | 509 | |
023fe4f7 | 510 | /* wait until we successfully acquire the lock */ |
d4799608 | 511 | set_current_state(state); |
1e78277c | 512 | while (true) { |
4fc828e2 DB |
513 | if (rwsem_try_write_lock(count, sem)) |
514 | break; | |
1e78277c | 515 | raw_spin_unlock_irq(&sem->wait_lock); |
a7d2c573 ML |
516 | |
517 | /* Block until there are no active lockers. */ | |
518 | do { | |
04cafed7 PZ |
519 | if (signal_pending_state(state, current)) |
520 | goto out_nolock; | |
521 | ||
a7d2c573 | 522 | schedule(); |
d4799608 | 523 | set_current_state(state); |
9b0fc9c0 | 524 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); |
a7d2c573 | 525 | |
023fe4f7 | 526 | raw_spin_lock_irq(&sem->wait_lock); |
1e78277c | 527 | } |
4fc828e2 | 528 | __set_current_state(TASK_RUNNING); |
023fe4f7 ML |
529 | list_del(&waiter.list); |
530 | raw_spin_unlock_irq(&sem->wait_lock); | |
1e78277c | 531 | |
d4799608 | 532 | return ret; |
04cafed7 PZ |
533 | |
534 | out_nolock: | |
535 | __set_current_state(TASK_RUNNING); | |
536 | raw_spin_lock_irq(&sem->wait_lock); | |
537 | list_del(&waiter.list); | |
538 | if (list_empty(&sem->wait_list)) | |
539 | rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem); | |
540 | else | |
133e89ef | 541 | __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); |
04cafed7 | 542 | raw_spin_unlock_irq(&sem->wait_lock); |
133e89ef | 543 | wake_up_q(&wake_q); |
04cafed7 PZ |
544 | |
545 | return ERR_PTR(-EINTR); | |
d4799608 MH |
546 | } |
547 | ||
548 | __visible struct rw_semaphore * __sched | |
549 | rwsem_down_write_failed(struct rw_semaphore *sem) | |
550 | { | |
551 | return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE); | |
1da177e4 | 552 | } |
db0e716a | 553 | EXPORT_SYMBOL(rwsem_down_write_failed); |
1da177e4 | 554 | |
d4799608 MH |
555 | __visible struct rw_semaphore * __sched |
556 | rwsem_down_write_failed_killable(struct rw_semaphore *sem) | |
557 | { | |
558 | return __rwsem_down_write_failed_common(sem, TASK_KILLABLE); | |
559 | } | |
560 | EXPORT_SYMBOL(rwsem_down_write_failed_killable); | |
561 | ||
1da177e4 LT |
562 | /* |
563 | * handle waking up a waiter on the semaphore | |
564 | * - up_read/up_write has decremented the active part of count if we come here | |
565 | */ | |
3ebae4f3 | 566 | __visible |
d1233754 | 567 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
1da177e4 LT |
568 | { |
569 | unsigned long flags; | |
133e89ef | 570 | WAKE_Q(wake_q); |
1da177e4 | 571 | |
59aabfc7 WL |
572 | /* |
573 | * If a spinner is present, it is not necessary to do the wakeup. | |
574 | * Try to do wakeup only if the trylock succeeds to minimize | |
575 | * spinlock contention which may introduce too much delay in the | |
576 | * unlock operation. | |
577 | * | |
578 | * spinning writer up_write/up_read caller | |
579 | * --------------- ----------------------- | |
580 | * [S] osq_unlock() [L] osq | |
581 | * MB RMB | |
582 | * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock) | |
583 | * | |
584 | * Here, it is important to make sure that there won't be a missed | |
585 | * wakeup while the rwsem is free and the only spinning writer goes | |
586 | * to sleep without taking the rwsem. Even when the spinning writer | |
587 | * is just going to break out of the waiting loop, it will still do | |
588 | * a trylock in rwsem_down_write_failed() before sleeping. IOW, if | |
589 | * rwsem_has_spinner() is true, it will guarantee at least one | |
590 | * trylock attempt on the rwsem later on. | |
591 | */ | |
592 | if (rwsem_has_spinner(sem)) { | |
593 | /* | |
594 | * The smp_rmb() here is to make sure that the spinner | |
595 | * state is consulted before reading the wait_lock. | |
596 | */ | |
597 | smp_rmb(); | |
598 | if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags)) | |
599 | return sem; | |
600 | goto locked; | |
601 | } | |
ddb6c9b5 | 602 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
59aabfc7 | 603 | locked: |
1da177e4 LT |
604 | |
605 | /* do nothing if list empty */ | |
606 | if (!list_empty(&sem->wait_list)) | |
133e89ef | 607 | sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); |
1da177e4 | 608 | |
ddb6c9b5 | 609 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
133e89ef | 610 | wake_up_q(&wake_q); |
1da177e4 | 611 | |
1da177e4 LT |
612 | return sem; |
613 | } | |
db0e716a | 614 | EXPORT_SYMBOL(rwsem_wake); |
1da177e4 LT |
615 | |
616 | /* | |
617 | * downgrade a write lock into a read lock | |
618 | * - caller incremented waiting part of count and discovered it still negative | |
619 | * - just wake up any readers at the front of the queue | |
620 | */ | |
3ebae4f3 | 621 | __visible |
d1233754 | 622 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
1da177e4 LT |
623 | { |
624 | unsigned long flags; | |
133e89ef | 625 | WAKE_Q(wake_q); |
1da177e4 | 626 | |
ddb6c9b5 | 627 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
628 | |
629 | /* do nothing if list empty */ | |
630 | if (!list_empty(&sem->wait_list)) | |
133e89ef | 631 | sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); |
1da177e4 | 632 | |
ddb6c9b5 | 633 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
133e89ef | 634 | wake_up_q(&wake_q); |
1da177e4 | 635 | |
1da177e4 LT |
636 | return sem; |
637 | } | |
1da177e4 | 638 | EXPORT_SYMBOL(rwsem_downgrade_wake); |