Commit | Line | Data |
---|---|---|
6053ee3b IM |
1 | /* |
2 | * kernel/mutex.c | |
3 | * | |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
0d66bf6d PZ |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
14 | * from the -rt tree, where it was originally implemented for rtmutexes | |
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | |
16 | * and Sven Dietrich. | |
17 | * | |
6053ee3b IM |
18 | * Also see Documentation/mutex-design.txt. |
19 | */ | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/sched.h> | |
8bd75c77 | 22 | #include <linux/sched/rt.h> |
9984de1a | 23 | #include <linux/export.h> |
6053ee3b IM |
24 | #include <linux/spinlock.h> |
25 | #include <linux/interrupt.h> | |
9a11b49a | 26 | #include <linux/debug_locks.h> |
6053ee3b IM |
27 | |
28 | /* | |
29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | |
30 | * which forces all calls into the slowpath: | |
31 | */ | |
32 | #ifdef CONFIG_DEBUG_MUTEXES | |
33 | # include "mutex-debug.h" | |
34 | # include <asm-generic/mutex-null.h> | |
35 | #else | |
36 | # include "mutex.h" | |
37 | # include <asm/mutex.h> | |
38 | #endif | |
39 | ||
0dc8c730 | 40 | /* |
cc189d25 WL |
41 | * A negative mutex count indicates that waiters are sleeping waiting for the |
42 | * mutex. | |
0dc8c730 | 43 | */ |
0dc8c730 | 44 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
0dc8c730 | 45 | |
ef5d4707 IM |
46 | void |
47 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |
6053ee3b IM |
48 | { |
49 | atomic_set(&lock->count, 1); | |
50 | spin_lock_init(&lock->wait_lock); | |
51 | INIT_LIST_HEAD(&lock->wait_list); | |
0d66bf6d | 52 | mutex_clear_owner(lock); |
2bd2c92c WL |
53 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
54 | lock->spin_mlock = NULL; | |
55 | #endif | |
6053ee3b | 56 | |
ef5d4707 | 57 | debug_mutex_init(lock, name, key); |
6053ee3b IM |
58 | } |
59 | ||
60 | EXPORT_SYMBOL(__mutex_init); | |
61 | ||
e4564f79 | 62 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
63 | /* |
64 | * We split the mutex lock/unlock logic into separate fastpath and | |
65 | * slowpath functions, to reduce the register pressure on the fastpath. | |
66 | * We also put the fastpath first in the kernel image, to make sure the | |
67 | * branch is predicted by the CPU as default-untaken. | |
68 | */ | |
7918baa5 | 69 | static __used noinline void __sched |
9a11b49a | 70 | __mutex_lock_slowpath(atomic_t *lock_count); |
6053ee3b | 71 | |
ef5dc121 | 72 | /** |
6053ee3b IM |
73 | * mutex_lock - acquire the mutex |
74 | * @lock: the mutex to be acquired | |
75 | * | |
76 | * Lock the mutex exclusively for this task. If the mutex is not | |
77 | * available right now, it will sleep until it can get it. | |
78 | * | |
79 | * The mutex must later on be released by the same task that | |
80 | * acquired it. Recursive locking is not allowed. The task | |
81 | * may not exit without first unlocking the mutex. Also, kernel | |
82 | * memory where the mutex resides mutex must not be freed with | |
83 | * the mutex still locked. The mutex must first be initialized | |
84 | * (or statically defined) before it can be locked. memset()-ing | |
85 | * the mutex to 0 is not allowed. | |
86 | * | |
87 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
88 | * checks that will enforce the restrictions and will also do | |
89 | * deadlock debugging. ) | |
90 | * | |
91 | * This function is similar to (but not equivalent to) down(). | |
92 | */ | |
b09d2501 | 93 | void __sched mutex_lock(struct mutex *lock) |
6053ee3b | 94 | { |
c544bdb1 | 95 | might_sleep(); |
6053ee3b IM |
96 | /* |
97 | * The locking fastpath is the 1->0 transition from | |
98 | * 'unlocked' into 'locked' state. | |
6053ee3b IM |
99 | */ |
100 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | |
0d66bf6d | 101 | mutex_set_owner(lock); |
6053ee3b IM |
102 | } |
103 | ||
104 | EXPORT_SYMBOL(mutex_lock); | |
e4564f79 | 105 | #endif |
6053ee3b | 106 | |
41fcb9f2 | 107 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
2bd2c92c WL |
108 | /* |
109 | * In order to avoid a stampede of mutex spinners from acquiring the mutex | |
110 | * more or less simultaneously, the spinners need to acquire a MCS lock | |
111 | * first before spinning on the owner field. | |
112 | * | |
113 | * We don't inline mspin_lock() so that perf can correctly account for the | |
114 | * time spent in this lock function. | |
115 | */ | |
116 | struct mspin_node { | |
117 | struct mspin_node *next ; | |
118 | int locked; /* 1 if lock acquired */ | |
119 | }; | |
120 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) | |
121 | ||
122 | static noinline | |
123 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) | |
124 | { | |
125 | struct mspin_node *prev; | |
126 | ||
127 | /* Init node */ | |
128 | node->locked = 0; | |
129 | node->next = NULL; | |
130 | ||
131 | prev = xchg(lock, node); | |
132 | if (likely(prev == NULL)) { | |
133 | /* Lock acquired */ | |
134 | node->locked = 1; | |
135 | return; | |
136 | } | |
137 | ACCESS_ONCE(prev->next) = node; | |
138 | smp_wmb(); | |
139 | /* Wait until the lock holder passes the lock down */ | |
140 | while (!ACCESS_ONCE(node->locked)) | |
141 | arch_mutex_cpu_relax(); | |
142 | } | |
143 | ||
144 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) | |
145 | { | |
146 | struct mspin_node *next = ACCESS_ONCE(node->next); | |
147 | ||
148 | if (likely(!next)) { | |
149 | /* | |
150 | * Release the lock by setting it to NULL | |
151 | */ | |
152 | if (cmpxchg(lock, node, NULL) == node) | |
153 | return; | |
154 | /* Wait until the next pointer is set */ | |
155 | while (!(next = ACCESS_ONCE(node->next))) | |
156 | arch_mutex_cpu_relax(); | |
157 | } | |
158 | ACCESS_ONCE(next->locked) = 1; | |
159 | smp_wmb(); | |
160 | } | |
161 | ||
41fcb9f2 WL |
162 | /* |
163 | * Mutex spinning code migrated from kernel/sched/core.c | |
164 | */ | |
165 | ||
166 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | |
167 | { | |
168 | if (lock->owner != owner) | |
169 | return false; | |
170 | ||
171 | /* | |
172 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | |
173 | * lock->owner still matches owner, if that fails, owner might | |
174 | * point to free()d memory, if it still matches, the rcu_read_lock() | |
175 | * ensures the memory stays valid. | |
176 | */ | |
177 | barrier(); | |
178 | ||
179 | return owner->on_cpu; | |
180 | } | |
181 | ||
182 | /* | |
183 | * Look out! "owner" is an entirely speculative pointer | |
184 | * access and not reliable. | |
185 | */ | |
186 | static noinline | |
187 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |
188 | { | |
189 | rcu_read_lock(); | |
190 | while (owner_running(lock, owner)) { | |
191 | if (need_resched()) | |
192 | break; | |
193 | ||
194 | arch_mutex_cpu_relax(); | |
195 | } | |
196 | rcu_read_unlock(); | |
197 | ||
198 | /* | |
199 | * We break out the loop above on need_resched() and when the | |
200 | * owner changed, which is a sign for heavy contention. Return | |
201 | * success only when lock->owner is NULL. | |
202 | */ | |
203 | return lock->owner == NULL; | |
204 | } | |
2bd2c92c WL |
205 | |
206 | /* | |
207 | * Initial check for entering the mutex spinning loop | |
208 | */ | |
209 | static inline int mutex_can_spin_on_owner(struct mutex *lock) | |
210 | { | |
211 | int retval = 1; | |
212 | ||
213 | rcu_read_lock(); | |
214 | if (lock->owner) | |
215 | retval = lock->owner->on_cpu; | |
216 | rcu_read_unlock(); | |
217 | /* | |
218 | * if lock->owner is not set, the mutex owner may have just acquired | |
219 | * it and not set the owner yet or the mutex has been released. | |
220 | */ | |
221 | return retval; | |
222 | } | |
41fcb9f2 WL |
223 | #endif |
224 | ||
7918baa5 | 225 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
6053ee3b | 226 | |
ef5dc121 | 227 | /** |
6053ee3b IM |
228 | * mutex_unlock - release the mutex |
229 | * @lock: the mutex to be released | |
230 | * | |
231 | * Unlock a mutex that has been locked by this task previously. | |
232 | * | |
233 | * This function must not be used in interrupt context. Unlocking | |
234 | * of a not locked mutex is not allowed. | |
235 | * | |
236 | * This function is similar to (but not equivalent to) up(). | |
237 | */ | |
7ad5b3a5 | 238 | void __sched mutex_unlock(struct mutex *lock) |
6053ee3b IM |
239 | { |
240 | /* | |
241 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
242 | * into 'unlocked' state: | |
6053ee3b | 243 | */ |
0d66bf6d PZ |
244 | #ifndef CONFIG_DEBUG_MUTEXES |
245 | /* | |
246 | * When debugging is enabled we must not clear the owner before time, | |
247 | * the slow path will always be taken, and that clears the owner field | |
248 | * after verifying that it was indeed current. | |
249 | */ | |
250 | mutex_clear_owner(lock); | |
251 | #endif | |
6053ee3b IM |
252 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
253 | } | |
254 | ||
255 | EXPORT_SYMBOL(mutex_unlock); | |
256 | ||
040a0a37 ML |
257 | /** |
258 | * ww_mutex_unlock - release the w/w mutex | |
259 | * @lock: the mutex to be released | |
260 | * | |
261 | * Unlock a mutex that has been locked by this task previously with any of the | |
262 | * ww_mutex_lock* functions (with or without an acquire context). It is | |
263 | * forbidden to release the locks after releasing the acquire context. | |
264 | * | |
265 | * This function must not be used in interrupt context. Unlocking | |
266 | * of a unlocked mutex is not allowed. | |
267 | */ | |
268 | void __sched ww_mutex_unlock(struct ww_mutex *lock) | |
269 | { | |
270 | /* | |
271 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
272 | * into 'unlocked' state: | |
273 | */ | |
274 | if (lock->ctx) { | |
275 | #ifdef CONFIG_DEBUG_MUTEXES | |
276 | DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); | |
277 | #endif | |
278 | if (lock->ctx->acquired > 0) | |
279 | lock->ctx->acquired--; | |
280 | lock->ctx = NULL; | |
281 | } | |
282 | ||
283 | #ifndef CONFIG_DEBUG_MUTEXES | |
284 | /* | |
285 | * When debugging is enabled we must not clear the owner before time, | |
286 | * the slow path will always be taken, and that clears the owner field | |
287 | * after verifying that it was indeed current. | |
288 | */ | |
289 | mutex_clear_owner(&lock->base); | |
290 | #endif | |
291 | __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); | |
292 | } | |
293 | EXPORT_SYMBOL(ww_mutex_unlock); | |
294 | ||
295 | static inline int __sched | |
296 | __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) | |
297 | { | |
298 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | |
299 | struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); | |
300 | ||
301 | if (!hold_ctx) | |
302 | return 0; | |
303 | ||
304 | if (unlikely(ctx == hold_ctx)) | |
305 | return -EALREADY; | |
306 | ||
307 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && | |
308 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { | |
309 | #ifdef CONFIG_DEBUG_MUTEXES | |
310 | DEBUG_LOCKS_WARN_ON(ctx->contending_lock); | |
311 | ctx->contending_lock = ww; | |
312 | #endif | |
313 | return -EDEADLK; | |
314 | } | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, | |
320 | struct ww_acquire_ctx *ww_ctx) | |
321 | { | |
322 | #ifdef CONFIG_DEBUG_MUTEXES | |
323 | /* | |
324 | * If this WARN_ON triggers, you used ww_mutex_lock to acquire, | |
325 | * but released with a normal mutex_unlock in this call. | |
326 | * | |
327 | * This should never happen, always use ww_mutex_unlock. | |
328 | */ | |
329 | DEBUG_LOCKS_WARN_ON(ww->ctx); | |
330 | ||
331 | /* | |
332 | * Not quite done after calling ww_acquire_done() ? | |
333 | */ | |
334 | DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); | |
335 | ||
336 | if (ww_ctx->contending_lock) { | |
337 | /* | |
338 | * After -EDEADLK you tried to | |
339 | * acquire a different ww_mutex? Bad! | |
340 | */ | |
341 | DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); | |
342 | ||
343 | /* | |
344 | * You called ww_mutex_lock after receiving -EDEADLK, | |
345 | * but 'forgot' to unlock everything else first? | |
346 | */ | |
347 | DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); | |
348 | ww_ctx->contending_lock = NULL; | |
349 | } | |
350 | ||
351 | /* | |
352 | * Naughty, using a different class will lead to undefined behavior! | |
353 | */ | |
354 | DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); | |
355 | #endif | |
356 | ww_ctx->acquired++; | |
357 | } | |
358 | ||
359 | /* | |
360 | * after acquiring lock with fastpath or when we lost out in contested | |
361 | * slowpath, set ctx and wake up any waiters so they can recheck. | |
362 | * | |
363 | * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, | |
364 | * as the fastpath and opportunistic spinning are disabled in that case. | |
365 | */ | |
366 | static __always_inline void | |
367 | ww_mutex_set_context_fastpath(struct ww_mutex *lock, | |
368 | struct ww_acquire_ctx *ctx) | |
369 | { | |
370 | unsigned long flags; | |
371 | struct mutex_waiter *cur; | |
372 | ||
373 | ww_mutex_lock_acquired(lock, ctx); | |
374 | ||
375 | lock->ctx = ctx; | |
376 | ||
377 | /* | |
378 | * The lock->ctx update should be visible on all cores before | |
379 | * the atomic read is done, otherwise contended waiters might be | |
380 | * missed. The contended waiters will either see ww_ctx == NULL | |
381 | * and keep spinning, or it will acquire wait_lock, add itself | |
382 | * to waiter list and sleep. | |
383 | */ | |
384 | smp_mb(); /* ^^^ */ | |
385 | ||
386 | /* | |
387 | * Check if lock is contended, if not there is nobody to wake up | |
388 | */ | |
389 | if (likely(atomic_read(&lock->base.count) == 0)) | |
390 | return; | |
391 | ||
392 | /* | |
393 | * Uh oh, we raced in fastpath, wake up everyone in this case, | |
394 | * so they can see the new lock->ctx. | |
395 | */ | |
396 | spin_lock_mutex(&lock->base.wait_lock, flags); | |
397 | list_for_each_entry(cur, &lock->base.wait_list, list) { | |
398 | debug_mutex_wake_waiter(&lock->base, cur); | |
399 | wake_up_process(cur->task); | |
400 | } | |
401 | spin_unlock_mutex(&lock->base.wait_lock, flags); | |
402 | } | |
403 | ||
6053ee3b IM |
404 | /* |
405 | * Lock a mutex (possibly interruptible), slowpath: | |
406 | */ | |
040a0a37 | 407 | static __always_inline int __sched |
e4564f79 | 408 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
040a0a37 ML |
409 | struct lockdep_map *nest_lock, unsigned long ip, |
410 | struct ww_acquire_ctx *ww_ctx) | |
6053ee3b IM |
411 | { |
412 | struct task_struct *task = current; | |
413 | struct mutex_waiter waiter; | |
1fb00c6c | 414 | unsigned long flags; |
040a0a37 | 415 | int ret; |
6053ee3b | 416 | |
41719b03 | 417 | preempt_disable(); |
e4c70a66 | 418 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
c0226027 FW |
419 | |
420 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | |
0d66bf6d PZ |
421 | /* |
422 | * Optimistic spinning. | |
423 | * | |
424 | * We try to spin for acquisition when we find that there are no | |
425 | * pending waiters and the lock owner is currently running on a | |
426 | * (different) CPU. | |
427 | * | |
428 | * The rationale is that if the lock owner is running, it is likely to | |
429 | * release the lock soon. | |
430 | * | |
431 | * Since this needs the lock owner, and this mutex implementation | |
432 | * doesn't track the owner atomically in the lock field, we need to | |
433 | * track it non-atomically. | |
434 | * | |
435 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | |
436 | * to serialize everything. | |
2bd2c92c WL |
437 | * |
438 | * The mutex spinners are queued up using MCS lock so that only one | |
439 | * spinner can compete for the mutex. However, if mutex spinning isn't | |
440 | * going to happen, there is no point in going through the lock/unlock | |
441 | * overhead. | |
0d66bf6d | 442 | */ |
2bd2c92c WL |
443 | if (!mutex_can_spin_on_owner(lock)) |
444 | goto slowpath; | |
0d66bf6d PZ |
445 | |
446 | for (;;) { | |
c6eb3dda | 447 | struct task_struct *owner; |
2bd2c92c | 448 | struct mspin_node node; |
0d66bf6d | 449 | |
040a0a37 ML |
450 | if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { |
451 | struct ww_mutex *ww; | |
452 | ||
453 | ww = container_of(lock, struct ww_mutex, base); | |
454 | /* | |
455 | * If ww->ctx is set the contents are undefined, only | |
456 | * by acquiring wait_lock there is a guarantee that | |
457 | * they are not invalid when reading. | |
458 | * | |
459 | * As such, when deadlock detection needs to be | |
460 | * performed the optimistic spinning cannot be done. | |
461 | */ | |
462 | if (ACCESS_ONCE(ww->ctx)) | |
463 | break; | |
464 | } | |
465 | ||
0d66bf6d PZ |
466 | /* |
467 | * If there's an owner, wait for it to either | |
468 | * release the lock or go to sleep. | |
469 | */ | |
2bd2c92c | 470 | mspin_lock(MLOCK(lock), &node); |
0d66bf6d | 471 | owner = ACCESS_ONCE(lock->owner); |
2bd2c92c WL |
472 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
473 | mspin_unlock(MLOCK(lock), &node); | |
0d66bf6d | 474 | break; |
2bd2c92c | 475 | } |
0d66bf6d | 476 | |
0dc8c730 WL |
477 | if ((atomic_read(&lock->count) == 1) && |
478 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { | |
ac6e60ee | 479 | lock_acquired(&lock->dep_map, ip); |
040a0a37 ML |
480 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
481 | struct ww_mutex *ww; | |
482 | ww = container_of(lock, struct ww_mutex, base); | |
483 | ||
484 | ww_mutex_set_context_fastpath(ww, ww_ctx); | |
485 | } | |
486 | ||
ac6e60ee | 487 | mutex_set_owner(lock); |
2bd2c92c | 488 | mspin_unlock(MLOCK(lock), &node); |
ac6e60ee CM |
489 | preempt_enable(); |
490 | return 0; | |
491 | } | |
2bd2c92c | 492 | mspin_unlock(MLOCK(lock), &node); |
ac6e60ee | 493 | |
0d66bf6d PZ |
494 | /* |
495 | * When there's no owner, we might have preempted between the | |
496 | * owner acquiring the lock and setting the owner field. If | |
497 | * we're an RT task that will live-lock because we won't let | |
498 | * the owner complete. | |
499 | */ | |
500 | if (!owner && (need_resched() || rt_task(task))) | |
501 | break; | |
502 | ||
0d66bf6d PZ |
503 | /* |
504 | * The cpu_relax() call is a compiler barrier which forces | |
505 | * everything in this loop to be re-loaded. We don't need | |
506 | * memory barriers as we'll eventually observe the right | |
507 | * values at the cost of a few extra spins. | |
508 | */ | |
335d7afb | 509 | arch_mutex_cpu_relax(); |
0d66bf6d | 510 | } |
2bd2c92c | 511 | slowpath: |
0d66bf6d | 512 | #endif |
1fb00c6c | 513 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b | 514 | |
9a11b49a | 515 | debug_mutex_lock_common(lock, &waiter); |
c9f4f06d | 516 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
6053ee3b IM |
517 | |
518 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
519 | list_add_tail(&waiter.list, &lock->wait_list); | |
520 | waiter.task = task; | |
521 | ||
0dc8c730 | 522 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) |
4fe87745 PZ |
523 | goto done; |
524 | ||
e4564f79 | 525 | lock_contended(&lock->dep_map, ip); |
4fe87745 | 526 | |
6053ee3b IM |
527 | for (;;) { |
528 | /* | |
529 | * Lets try to take the lock again - this is needed even if | |
530 | * we get here for the first time (shortly after failing to | |
531 | * acquire the lock), to make sure that we get a wakeup once | |
532 | * it's unlocked. Later on, if we sleep, this is the | |
533 | * operation that gives us the lock. We xchg it to -1, so | |
534 | * that when we release the lock, we properly wake up the | |
535 | * other waiters: | |
536 | */ | |
0dc8c730 WL |
537 | if (MUTEX_SHOW_NO_WAITER(lock) && |
538 | (atomic_xchg(&lock->count, -1) == 1)) | |
6053ee3b IM |
539 | break; |
540 | ||
541 | /* | |
542 | * got a signal? (This code gets eliminated in the | |
543 | * TASK_UNINTERRUPTIBLE case.) | |
544 | */ | |
6ad36762 | 545 | if (unlikely(signal_pending_state(state, task))) { |
040a0a37 ML |
546 | ret = -EINTR; |
547 | goto err; | |
548 | } | |
6053ee3b | 549 | |
040a0a37 ML |
550 | if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) { |
551 | ret = __mutex_lock_check_stamp(lock, ww_ctx); | |
552 | if (ret) | |
553 | goto err; | |
6053ee3b | 554 | } |
040a0a37 | 555 | |
6053ee3b IM |
556 | __set_task_state(task, state); |
557 | ||
25985edc | 558 | /* didn't get the lock, go to sleep: */ |
1fb00c6c | 559 | spin_unlock_mutex(&lock->wait_lock, flags); |
bd2f5536 | 560 | schedule_preempt_disabled(); |
1fb00c6c | 561 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
562 | } |
563 | ||
4fe87745 | 564 | done: |
c7e78cff | 565 | lock_acquired(&lock->dep_map, ip); |
6053ee3b | 566 | /* got the lock - rejoice! */ |
0d66bf6d PZ |
567 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
568 | mutex_set_owner(lock); | |
6053ee3b | 569 | |
040a0a37 ML |
570 | if (!__builtin_constant_p(ww_ctx == NULL)) { |
571 | struct ww_mutex *ww = container_of(lock, | |
572 | struct ww_mutex, | |
573 | base); | |
574 | struct mutex_waiter *cur; | |
575 | ||
576 | /* | |
577 | * This branch gets optimized out for the common case, | |
578 | * and is only important for ww_mutex_lock. | |
579 | */ | |
580 | ||
581 | ww_mutex_lock_acquired(ww, ww_ctx); | |
582 | ww->ctx = ww_ctx; | |
583 | ||
584 | /* | |
585 | * Give any possible sleeping processes the chance to wake up, | |
586 | * so they can recheck if they have to back off. | |
587 | */ | |
588 | list_for_each_entry(cur, &lock->wait_list, list) { | |
589 | debug_mutex_wake_waiter(lock, cur); | |
590 | wake_up_process(cur->task); | |
591 | } | |
592 | } | |
593 | ||
6053ee3b IM |
594 | /* set it to 0 if there are no waiters left: */ |
595 | if (likely(list_empty(&lock->wait_list))) | |
596 | atomic_set(&lock->count, 0); | |
597 | ||
1fb00c6c | 598 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
599 | |
600 | debug_mutex_free_waiter(&waiter); | |
41719b03 | 601 | preempt_enable(); |
6053ee3b | 602 | |
6053ee3b | 603 | return 0; |
040a0a37 ML |
604 | |
605 | err: | |
606 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | |
607 | spin_unlock_mutex(&lock->wait_lock, flags); | |
608 | debug_mutex_free_waiter(&waiter); | |
609 | mutex_release(&lock->dep_map, 1, ip); | |
610 | preempt_enable(); | |
611 | return ret; | |
6053ee3b IM |
612 | } |
613 | ||
ef5d4707 IM |
614 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
615 | void __sched | |
616 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | |
617 | { | |
618 | might_sleep(); | |
040a0a37 ML |
619 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
620 | subclass, NULL, _RET_IP_, NULL); | |
ef5d4707 IM |
621 | } |
622 | ||
623 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | |
d63a5a74 | 624 | |
e4c70a66 PZ |
625 | void __sched |
626 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | |
627 | { | |
628 | might_sleep(); | |
040a0a37 ML |
629 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
630 | 0, nest, _RET_IP_, NULL); | |
e4c70a66 PZ |
631 | } |
632 | ||
633 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | |
634 | ||
ad776537 LH |
635 | int __sched |
636 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | |
637 | { | |
638 | might_sleep(); | |
040a0a37 ML |
639 | return __mutex_lock_common(lock, TASK_KILLABLE, |
640 | subclass, NULL, _RET_IP_, NULL); | |
ad776537 LH |
641 | } |
642 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | |
643 | ||
d63a5a74 N |
644 | int __sched |
645 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |
646 | { | |
647 | might_sleep(); | |
0d66bf6d | 648 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
040a0a37 | 649 | subclass, NULL, _RET_IP_, NULL); |
d63a5a74 N |
650 | } |
651 | ||
652 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | |
040a0a37 | 653 | |
23010027 DV |
654 | static inline int |
655 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
656 | { | |
657 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | |
658 | unsigned tmp; | |
659 | ||
660 | if (ctx->deadlock_inject_countdown-- == 0) { | |
661 | tmp = ctx->deadlock_inject_interval; | |
662 | if (tmp > UINT_MAX/4) | |
663 | tmp = UINT_MAX; | |
664 | else | |
665 | tmp = tmp*2 + tmp + tmp/2; | |
666 | ||
667 | ctx->deadlock_inject_interval = tmp; | |
668 | ctx->deadlock_inject_countdown = tmp; | |
669 | ctx->contending_lock = lock; | |
670 | ||
671 | ww_mutex_unlock(lock); | |
672 | ||
673 | return -EDEADLK; | |
674 | } | |
675 | #endif | |
676 | ||
677 | return 0; | |
678 | } | |
040a0a37 ML |
679 | |
680 | int __sched | |
681 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
682 | { | |
23010027 DV |
683 | int ret; |
684 | ||
040a0a37 | 685 | might_sleep(); |
23010027 | 686 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
040a0a37 | 687 | 0, &ctx->dep_map, _RET_IP_, ctx); |
23010027 DV |
688 | if (!ret && ctx->acquired > 0) |
689 | return ww_mutex_deadlock_injection(lock, ctx); | |
690 | ||
691 | return ret; | |
040a0a37 ML |
692 | } |
693 | EXPORT_SYMBOL_GPL(__ww_mutex_lock); | |
694 | ||
695 | int __sched | |
696 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
697 | { | |
23010027 DV |
698 | int ret; |
699 | ||
040a0a37 | 700 | might_sleep(); |
23010027 DV |
701 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
702 | 0, &ctx->dep_map, _RET_IP_, ctx); | |
703 | ||
704 | if (!ret && ctx->acquired > 0) | |
705 | return ww_mutex_deadlock_injection(lock, ctx); | |
706 | ||
707 | return ret; | |
040a0a37 ML |
708 | } |
709 | EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); | |
710 | ||
ef5d4707 IM |
711 | #endif |
712 | ||
6053ee3b IM |
713 | /* |
714 | * Release the lock, slowpath: | |
715 | */ | |
7ad5b3a5 | 716 | static inline void |
ef5d4707 | 717 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
6053ee3b | 718 | { |
02706647 | 719 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6c | 720 | unsigned long flags; |
6053ee3b | 721 | |
1fb00c6c | 722 | spin_lock_mutex(&lock->wait_lock, flags); |
ef5d4707 | 723 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
9a11b49a | 724 | debug_mutex_unlock(lock); |
6053ee3b IM |
725 | |
726 | /* | |
727 | * some architectures leave the lock unlocked in the fastpath failure | |
728 | * case, others need to leave it locked. In the later case we have to | |
729 | * unlock it here | |
730 | */ | |
731 | if (__mutex_slowpath_needs_to_unlock()) | |
732 | atomic_set(&lock->count, 1); | |
733 | ||
6053ee3b IM |
734 | if (!list_empty(&lock->wait_list)) { |
735 | /* get the first entry from the wait-list: */ | |
736 | struct mutex_waiter *waiter = | |
737 | list_entry(lock->wait_list.next, | |
738 | struct mutex_waiter, list); | |
739 | ||
740 | debug_mutex_wake_waiter(lock, waiter); | |
741 | ||
742 | wake_up_process(waiter->task); | |
743 | } | |
744 | ||
1fb00c6c | 745 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
746 | } |
747 | ||
9a11b49a IM |
748 | /* |
749 | * Release the lock, slowpath: | |
750 | */ | |
7918baa5 | 751 | static __used noinline void |
9a11b49a IM |
752 | __mutex_unlock_slowpath(atomic_t *lock_count) |
753 | { | |
ef5d4707 | 754 | __mutex_unlock_common_slowpath(lock_count, 1); |
9a11b49a IM |
755 | } |
756 | ||
e4564f79 | 757 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
6053ee3b IM |
758 | /* |
759 | * Here come the less common (and hence less performance-critical) APIs: | |
760 | * mutex_lock_interruptible() and mutex_trylock(). | |
761 | */ | |
7ad5b3a5 | 762 | static noinline int __sched |
a41b56ef | 763 | __mutex_lock_killable_slowpath(struct mutex *lock); |
ad776537 | 764 | |
7ad5b3a5 | 765 | static noinline int __sched |
a41b56ef | 766 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
6053ee3b | 767 | |
ef5dc121 RD |
768 | /** |
769 | * mutex_lock_interruptible - acquire the mutex, interruptible | |
6053ee3b IM |
770 | * @lock: the mutex to be acquired |
771 | * | |
772 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
773 | * been acquired or sleep until the mutex becomes available. If a | |
774 | * signal arrives while waiting for the lock then this function | |
775 | * returns -EINTR. | |
776 | * | |
777 | * This function is similar to (but not equivalent to) down_interruptible(). | |
778 | */ | |
7ad5b3a5 | 779 | int __sched mutex_lock_interruptible(struct mutex *lock) |
6053ee3b | 780 | { |
0d66bf6d PZ |
781 | int ret; |
782 | ||
c544bdb1 | 783 | might_sleep(); |
a41b56ef ML |
784 | ret = __mutex_fastpath_lock_retval(&lock->count); |
785 | if (likely(!ret)) { | |
0d66bf6d | 786 | mutex_set_owner(lock); |
a41b56ef ML |
787 | return 0; |
788 | } else | |
789 | return __mutex_lock_interruptible_slowpath(lock); | |
6053ee3b IM |
790 | } |
791 | ||
792 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
793 | ||
7ad5b3a5 | 794 | int __sched mutex_lock_killable(struct mutex *lock) |
ad776537 | 795 | { |
0d66bf6d PZ |
796 | int ret; |
797 | ||
ad776537 | 798 | might_sleep(); |
a41b56ef ML |
799 | ret = __mutex_fastpath_lock_retval(&lock->count); |
800 | if (likely(!ret)) { | |
0d66bf6d | 801 | mutex_set_owner(lock); |
a41b56ef ML |
802 | return 0; |
803 | } else | |
804 | return __mutex_lock_killable_slowpath(lock); | |
ad776537 LH |
805 | } |
806 | EXPORT_SYMBOL(mutex_lock_killable); | |
807 | ||
7918baa5 | 808 | static __used noinline void __sched |
e4564f79 PZ |
809 | __mutex_lock_slowpath(atomic_t *lock_count) |
810 | { | |
811 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
812 | ||
040a0a37 ML |
813 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, |
814 | NULL, _RET_IP_, NULL); | |
e4564f79 PZ |
815 | } |
816 | ||
7ad5b3a5 | 817 | static noinline int __sched |
a41b56ef | 818 | __mutex_lock_killable_slowpath(struct mutex *lock) |
ad776537 | 819 | { |
040a0a37 ML |
820 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, |
821 | NULL, _RET_IP_, NULL); | |
ad776537 LH |
822 | } |
823 | ||
7ad5b3a5 | 824 | static noinline int __sched |
a41b56ef | 825 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
6053ee3b | 826 | { |
040a0a37 ML |
827 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, |
828 | NULL, _RET_IP_, NULL); | |
829 | } | |
830 | ||
831 | static noinline int __sched | |
832 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
833 | { | |
834 | return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, | |
835 | NULL, _RET_IP_, ctx); | |
6053ee3b | 836 | } |
040a0a37 ML |
837 | |
838 | static noinline int __sched | |
839 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, | |
840 | struct ww_acquire_ctx *ctx) | |
841 | { | |
842 | return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, | |
843 | NULL, _RET_IP_, ctx); | |
844 | } | |
845 | ||
e4564f79 | 846 | #endif |
6053ee3b IM |
847 | |
848 | /* | |
849 | * Spinlock based trylock, we take the spinlock and check whether we | |
850 | * can get the lock: | |
851 | */ | |
852 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |
853 | { | |
854 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
1fb00c6c | 855 | unsigned long flags; |
6053ee3b IM |
856 | int prev; |
857 | ||
1fb00c6c | 858 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
859 | |
860 | prev = atomic_xchg(&lock->count, -1); | |
ef5d4707 | 861 | if (likely(prev == 1)) { |
0d66bf6d | 862 | mutex_set_owner(lock); |
ef5d4707 IM |
863 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
864 | } | |
0d66bf6d | 865 | |
6053ee3b IM |
866 | /* Set it back to 0 if there are no waiters: */ |
867 | if (likely(list_empty(&lock->wait_list))) | |
868 | atomic_set(&lock->count, 0); | |
869 | ||
1fb00c6c | 870 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
871 | |
872 | return prev == 1; | |
873 | } | |
874 | ||
ef5dc121 RD |
875 | /** |
876 | * mutex_trylock - try to acquire the mutex, without waiting | |
6053ee3b IM |
877 | * @lock: the mutex to be acquired |
878 | * | |
879 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
880 | * has been acquired successfully, and 0 on contention. | |
881 | * | |
882 | * NOTE: this function follows the spin_trylock() convention, so | |
ef5dc121 | 883 | * it is negated from the down_trylock() return values! Be careful |
6053ee3b IM |
884 | * about this when converting semaphore users to mutexes. |
885 | * | |
886 | * This function must not be used in interrupt context. The | |
887 | * mutex must be released by the same task that acquired it. | |
888 | */ | |
7ad5b3a5 | 889 | int __sched mutex_trylock(struct mutex *lock) |
6053ee3b | 890 | { |
0d66bf6d PZ |
891 | int ret; |
892 | ||
893 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | |
894 | if (ret) | |
895 | mutex_set_owner(lock); | |
896 | ||
897 | return ret; | |
6053ee3b | 898 | } |
6053ee3b | 899 | EXPORT_SYMBOL(mutex_trylock); |
a511e3f9 | 900 | |
040a0a37 ML |
901 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
902 | int __sched | |
903 | __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
904 | { | |
905 | int ret; | |
906 | ||
907 | might_sleep(); | |
908 | ||
909 | ret = __mutex_fastpath_lock_retval(&lock->base.count); | |
910 | ||
911 | if (likely(!ret)) { | |
912 | ww_mutex_set_context_fastpath(lock, ctx); | |
913 | mutex_set_owner(&lock->base); | |
914 | } else | |
915 | ret = __ww_mutex_lock_slowpath(lock, ctx); | |
916 | return ret; | |
917 | } | |
918 | EXPORT_SYMBOL(__ww_mutex_lock); | |
919 | ||
920 | int __sched | |
921 | __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |
922 | { | |
923 | int ret; | |
924 | ||
925 | might_sleep(); | |
926 | ||
927 | ret = __mutex_fastpath_lock_retval(&lock->base.count); | |
928 | ||
929 | if (likely(!ret)) { | |
930 | ww_mutex_set_context_fastpath(lock, ctx); | |
931 | mutex_set_owner(&lock->base); | |
932 | } else | |
933 | ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); | |
934 | return ret; | |
935 | } | |
936 | EXPORT_SYMBOL(__ww_mutex_lock_interruptible); | |
937 | ||
938 | #endif | |
939 | ||
a511e3f9 AM |
940 | /** |
941 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 | |
942 | * @cnt: the atomic which we are to dec | |
943 | * @lock: the mutex to return holding if we dec to 0 | |
944 | * | |
945 | * return true and hold lock if we dec to 0, return false otherwise | |
946 | */ | |
947 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |
948 | { | |
949 | /* dec if we can't possibly hit 0 */ | |
950 | if (atomic_add_unless(cnt, -1, 1)) | |
951 | return 0; | |
952 | /* we might hit 0, so take the lock */ | |
953 | mutex_lock(lock); | |
954 | if (!atomic_dec_and_test(cnt)) { | |
955 | /* when we actually did the dec, we didn't hit 0 */ | |
956 | mutex_unlock(lock); | |
957 | return 0; | |
958 | } | |
959 | /* we hit 0, and we hold the lock */ | |
960 | return 1; | |
961 | } | |
962 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |