mutex: Queue mutex spinners with MCS lock to reduce cacheline contention
[deliverable/linux.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
0d66bf6d
PZ
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
6053ee3b
IM
18 * Also see Documentation/mutex-design.txt.
19 */
20#include <linux/mutex.h>
21#include <linux/sched.h>
8bd75c77 22#include <linux/sched/rt.h>
9984de1a 23#include <linux/export.h>
6053ee3b
IM
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
9a11b49a 26#include <linux/debug_locks.h>
6053ee3b
IM
27
28/*
29 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
30 * which forces all calls into the slowpath:
31 */
32#ifdef CONFIG_DEBUG_MUTEXES
33# include "mutex-debug.h"
34# include <asm-generic/mutex-null.h>
35#else
36# include "mutex.h"
37# include <asm/mutex.h>
38#endif
39
0dc8c730
WL
40/*
41 * A mutex count of -1 indicates that waiters are sleeping waiting for the
42 * mutex. Some architectures can allow any negative number, not just -1, for
43 * this purpose.
44 */
45#ifdef __ARCH_ALLOW_ANY_NEGATIVE_MUTEX_COUNT
46#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
47#else
48#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) != -1)
49#endif
50
ef5d4707
IM
51void
52__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b
IM
53{
54 atomic_set(&lock->count, 1);
55 spin_lock_init(&lock->wait_lock);
56 INIT_LIST_HEAD(&lock->wait_list);
0d66bf6d 57 mutex_clear_owner(lock);
2bd2c92c
WL
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 lock->spin_mlock = NULL;
60#endif
6053ee3b 61
ef5d4707 62 debug_mutex_init(lock, name, key);
6053ee3b
IM
63}
64
65EXPORT_SYMBOL(__mutex_init);
66
e4564f79 67#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
68/*
69 * We split the mutex lock/unlock logic into separate fastpath and
70 * slowpath functions, to reduce the register pressure on the fastpath.
71 * We also put the fastpath first in the kernel image, to make sure the
72 * branch is predicted by the CPU as default-untaken.
73 */
7918baa5 74static __used noinline void __sched
9a11b49a 75__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b 76
ef5dc121 77/**
6053ee3b
IM
78 * mutex_lock - acquire the mutex
79 * @lock: the mutex to be acquired
80 *
81 * Lock the mutex exclusively for this task. If the mutex is not
82 * available right now, it will sleep until it can get it.
83 *
84 * The mutex must later on be released by the same task that
85 * acquired it. Recursive locking is not allowed. The task
86 * may not exit without first unlocking the mutex. Also, kernel
87 * memory where the mutex resides mutex must not be freed with
88 * the mutex still locked. The mutex must first be initialized
89 * (or statically defined) before it can be locked. memset()-ing
90 * the mutex to 0 is not allowed.
91 *
92 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
93 * checks that will enforce the restrictions and will also do
94 * deadlock debugging. )
95 *
96 * This function is similar to (but not equivalent to) down().
97 */
b09d2501 98void __sched mutex_lock(struct mutex *lock)
6053ee3b 99{
c544bdb1 100 might_sleep();
6053ee3b
IM
101 /*
102 * The locking fastpath is the 1->0 transition from
103 * 'unlocked' into 'locked' state.
6053ee3b
IM
104 */
105 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
0d66bf6d 106 mutex_set_owner(lock);
6053ee3b
IM
107}
108
109EXPORT_SYMBOL(mutex_lock);
e4564f79 110#endif
6053ee3b 111
41fcb9f2 112#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
2bd2c92c
WL
113/*
114 * In order to avoid a stampede of mutex spinners from acquiring the mutex
115 * more or less simultaneously, the spinners need to acquire a MCS lock
116 * first before spinning on the owner field.
117 *
118 * We don't inline mspin_lock() so that perf can correctly account for the
119 * time spent in this lock function.
120 */
121struct mspin_node {
122 struct mspin_node *next ;
123 int locked; /* 1 if lock acquired */
124};
125#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
126
127static noinline
128void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
129{
130 struct mspin_node *prev;
131
132 /* Init node */
133 node->locked = 0;
134 node->next = NULL;
135
136 prev = xchg(lock, node);
137 if (likely(prev == NULL)) {
138 /* Lock acquired */
139 node->locked = 1;
140 return;
141 }
142 ACCESS_ONCE(prev->next) = node;
143 smp_wmb();
144 /* Wait until the lock holder passes the lock down */
145 while (!ACCESS_ONCE(node->locked))
146 arch_mutex_cpu_relax();
147}
148
149static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
150{
151 struct mspin_node *next = ACCESS_ONCE(node->next);
152
153 if (likely(!next)) {
154 /*
155 * Release the lock by setting it to NULL
156 */
157 if (cmpxchg(lock, node, NULL) == node)
158 return;
159 /* Wait until the next pointer is set */
160 while (!(next = ACCESS_ONCE(node->next)))
161 arch_mutex_cpu_relax();
162 }
163 ACCESS_ONCE(next->locked) = 1;
164 smp_wmb();
165}
166
41fcb9f2
WL
167/*
168 * Mutex spinning code migrated from kernel/sched/core.c
169 */
170
171static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
172{
173 if (lock->owner != owner)
174 return false;
175
176 /*
177 * Ensure we emit the owner->on_cpu, dereference _after_ checking
178 * lock->owner still matches owner, if that fails, owner might
179 * point to free()d memory, if it still matches, the rcu_read_lock()
180 * ensures the memory stays valid.
181 */
182 barrier();
183
184 return owner->on_cpu;
185}
186
187/*
188 * Look out! "owner" is an entirely speculative pointer
189 * access and not reliable.
190 */
191static noinline
192int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
193{
194 rcu_read_lock();
195 while (owner_running(lock, owner)) {
196 if (need_resched())
197 break;
198
199 arch_mutex_cpu_relax();
200 }
201 rcu_read_unlock();
202
203 /*
204 * We break out the loop above on need_resched() and when the
205 * owner changed, which is a sign for heavy contention. Return
206 * success only when lock->owner is NULL.
207 */
208 return lock->owner == NULL;
209}
2bd2c92c
WL
210
211/*
212 * Initial check for entering the mutex spinning loop
213 */
214static inline int mutex_can_spin_on_owner(struct mutex *lock)
215{
216 int retval = 1;
217
218 rcu_read_lock();
219 if (lock->owner)
220 retval = lock->owner->on_cpu;
221 rcu_read_unlock();
222 /*
223 * if lock->owner is not set, the mutex owner may have just acquired
224 * it and not set the owner yet or the mutex has been released.
225 */
226 return retval;
227}
41fcb9f2
WL
228#endif
229
7918baa5 230static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b 231
ef5dc121 232/**
6053ee3b
IM
233 * mutex_unlock - release the mutex
234 * @lock: the mutex to be released
235 *
236 * Unlock a mutex that has been locked by this task previously.
237 *
238 * This function must not be used in interrupt context. Unlocking
239 * of a not locked mutex is not allowed.
240 *
241 * This function is similar to (but not equivalent to) up().
242 */
7ad5b3a5 243void __sched mutex_unlock(struct mutex *lock)
6053ee3b
IM
244{
245 /*
246 * The unlocking fastpath is the 0->1 transition from 'locked'
247 * into 'unlocked' state:
6053ee3b 248 */
0d66bf6d
PZ
249#ifndef CONFIG_DEBUG_MUTEXES
250 /*
251 * When debugging is enabled we must not clear the owner before time,
252 * the slow path will always be taken, and that clears the owner field
253 * after verifying that it was indeed current.
254 */
255 mutex_clear_owner(lock);
256#endif
6053ee3b
IM
257 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
258}
259
260EXPORT_SYMBOL(mutex_unlock);
261
262/*
263 * Lock a mutex (possibly interruptible), slowpath:
264 */
265static inline int __sched
e4564f79 266__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
e4c70a66 267 struct lockdep_map *nest_lock, unsigned long ip)
6053ee3b
IM
268{
269 struct task_struct *task = current;
270 struct mutex_waiter waiter;
1fb00c6c 271 unsigned long flags;
6053ee3b 272
41719b03 273 preempt_disable();
e4c70a66 274 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
c0226027
FW
275
276#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d
PZ
277 /*
278 * Optimistic spinning.
279 *
280 * We try to spin for acquisition when we find that there are no
281 * pending waiters and the lock owner is currently running on a
282 * (different) CPU.
283 *
284 * The rationale is that if the lock owner is running, it is likely to
285 * release the lock soon.
286 *
287 * Since this needs the lock owner, and this mutex implementation
288 * doesn't track the owner atomically in the lock field, we need to
289 * track it non-atomically.
290 *
291 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
292 * to serialize everything.
2bd2c92c
WL
293 *
294 * The mutex spinners are queued up using MCS lock so that only one
295 * spinner can compete for the mutex. However, if mutex spinning isn't
296 * going to happen, there is no point in going through the lock/unlock
297 * overhead.
0d66bf6d 298 */
2bd2c92c
WL
299 if (!mutex_can_spin_on_owner(lock))
300 goto slowpath;
0d66bf6d
PZ
301
302 for (;;) {
c6eb3dda 303 struct task_struct *owner;
2bd2c92c 304 struct mspin_node node;
0d66bf6d 305
0d66bf6d
PZ
306 /*
307 * If there's an owner, wait for it to either
308 * release the lock or go to sleep.
309 */
2bd2c92c 310 mspin_lock(MLOCK(lock), &node);
0d66bf6d 311 owner = ACCESS_ONCE(lock->owner);
2bd2c92c
WL
312 if (owner && !mutex_spin_on_owner(lock, owner)) {
313 mspin_unlock(MLOCK(lock), &node);
0d66bf6d 314 break;
2bd2c92c 315 }
0d66bf6d 316
0dc8c730
WL
317 if ((atomic_read(&lock->count) == 1) &&
318 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
ac6e60ee
CM
319 lock_acquired(&lock->dep_map, ip);
320 mutex_set_owner(lock);
2bd2c92c 321 mspin_unlock(MLOCK(lock), &node);
ac6e60ee
CM
322 preempt_enable();
323 return 0;
324 }
2bd2c92c 325 mspin_unlock(MLOCK(lock), &node);
ac6e60ee 326
0d66bf6d
PZ
327 /*
328 * When there's no owner, we might have preempted between the
329 * owner acquiring the lock and setting the owner field. If
330 * we're an RT task that will live-lock because we won't let
331 * the owner complete.
332 */
333 if (!owner && (need_resched() || rt_task(task)))
334 break;
335
0d66bf6d
PZ
336 /*
337 * The cpu_relax() call is a compiler barrier which forces
338 * everything in this loop to be re-loaded. We don't need
339 * memory barriers as we'll eventually observe the right
340 * values at the cost of a few extra spins.
341 */
335d7afb 342 arch_mutex_cpu_relax();
0d66bf6d 343 }
2bd2c92c 344slowpath:
0d66bf6d 345#endif
1fb00c6c 346 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 347
9a11b49a 348 debug_mutex_lock_common(lock, &waiter);
c9f4f06d 349 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b
IM
350
351 /* add waiting tasks to the end of the waitqueue (FIFO): */
352 list_add_tail(&waiter.list, &lock->wait_list);
353 waiter.task = task;
354
0dc8c730 355 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
4fe87745
PZ
356 goto done;
357
e4564f79 358 lock_contended(&lock->dep_map, ip);
4fe87745 359
6053ee3b
IM
360 for (;;) {
361 /*
362 * Lets try to take the lock again - this is needed even if
363 * we get here for the first time (shortly after failing to
364 * acquire the lock), to make sure that we get a wakeup once
365 * it's unlocked. Later on, if we sleep, this is the
366 * operation that gives us the lock. We xchg it to -1, so
367 * that when we release the lock, we properly wake up the
368 * other waiters:
369 */
0dc8c730
WL
370 if (MUTEX_SHOW_NO_WAITER(lock) &&
371 (atomic_xchg(&lock->count, -1) == 1))
6053ee3b
IM
372 break;
373
374 /*
375 * got a signal? (This code gets eliminated in the
376 * TASK_UNINTERRUPTIBLE case.)
377 */
6ad36762 378 if (unlikely(signal_pending_state(state, task))) {
ad776537
LH
379 mutex_remove_waiter(lock, &waiter,
380 task_thread_info(task));
e4564f79 381 mutex_release(&lock->dep_map, 1, ip);
1fb00c6c 382 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
383
384 debug_mutex_free_waiter(&waiter);
41719b03 385 preempt_enable();
6053ee3b
IM
386 return -EINTR;
387 }
388 __set_task_state(task, state);
389
25985edc 390 /* didn't get the lock, go to sleep: */
1fb00c6c 391 spin_unlock_mutex(&lock->wait_lock, flags);
bd2f5536 392 schedule_preempt_disabled();
1fb00c6c 393 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
394 }
395
4fe87745 396done:
c7e78cff 397 lock_acquired(&lock->dep_map, ip);
6053ee3b 398 /* got the lock - rejoice! */
0d66bf6d
PZ
399 mutex_remove_waiter(lock, &waiter, current_thread_info());
400 mutex_set_owner(lock);
6053ee3b
IM
401
402 /* set it to 0 if there are no waiters left: */
403 if (likely(list_empty(&lock->wait_list)))
404 atomic_set(&lock->count, 0);
405
1fb00c6c 406 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
407
408 debug_mutex_free_waiter(&waiter);
41719b03 409 preempt_enable();
6053ee3b 410
6053ee3b
IM
411 return 0;
412}
413
ef5d4707
IM
414#ifdef CONFIG_DEBUG_LOCK_ALLOC
415void __sched
416mutex_lock_nested(struct mutex *lock, unsigned int subclass)
417{
418 might_sleep();
e4c70a66 419 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
ef5d4707
IM
420}
421
422EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74 423
e4c70a66
PZ
424void __sched
425_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
426{
427 might_sleep();
428 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
429}
430
431EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
432
ad776537
LH
433int __sched
434mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
435{
436 might_sleep();
e4c70a66 437 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
ad776537
LH
438}
439EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
440
d63a5a74
N
441int __sched
442mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
443{
444 might_sleep();
0d66bf6d 445 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
e4c70a66 446 subclass, NULL, _RET_IP_);
d63a5a74
N
447}
448
449EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707
IM
450#endif
451
6053ee3b
IM
452/*
453 * Release the lock, slowpath:
454 */
7ad5b3a5 455static inline void
ef5d4707 456__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b 457{
02706647 458 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 459 unsigned long flags;
6053ee3b 460
1fb00c6c 461 spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707 462 mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a 463 debug_mutex_unlock(lock);
6053ee3b
IM
464
465 /*
466 * some architectures leave the lock unlocked in the fastpath failure
467 * case, others need to leave it locked. In the later case we have to
468 * unlock it here
469 */
470 if (__mutex_slowpath_needs_to_unlock())
471 atomic_set(&lock->count, 1);
472
6053ee3b
IM
473 if (!list_empty(&lock->wait_list)) {
474 /* get the first entry from the wait-list: */
475 struct mutex_waiter *waiter =
476 list_entry(lock->wait_list.next,
477 struct mutex_waiter, list);
478
479 debug_mutex_wake_waiter(lock, waiter);
480
481 wake_up_process(waiter->task);
482 }
483
1fb00c6c 484 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
485}
486
9a11b49a
IM
487/*
488 * Release the lock, slowpath:
489 */
7918baa5 490static __used noinline void
9a11b49a
IM
491__mutex_unlock_slowpath(atomic_t *lock_count)
492{
ef5d4707 493 __mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a
IM
494}
495
e4564f79 496#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
497/*
498 * Here come the less common (and hence less performance-critical) APIs:
499 * mutex_lock_interruptible() and mutex_trylock().
500 */
7ad5b3a5 501static noinline int __sched
ad776537
LH
502__mutex_lock_killable_slowpath(atomic_t *lock_count);
503
7ad5b3a5 504static noinline int __sched
9a11b49a 505__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b 506
ef5dc121
RD
507/**
508 * mutex_lock_interruptible - acquire the mutex, interruptible
6053ee3b
IM
509 * @lock: the mutex to be acquired
510 *
511 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
512 * been acquired or sleep until the mutex becomes available. If a
513 * signal arrives while waiting for the lock then this function
514 * returns -EINTR.
515 *
516 * This function is similar to (but not equivalent to) down_interruptible().
517 */
7ad5b3a5 518int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b 519{
0d66bf6d
PZ
520 int ret;
521
c544bdb1 522 might_sleep();
0d66bf6d 523 ret = __mutex_fastpath_lock_retval
6053ee3b 524 (&lock->count, __mutex_lock_interruptible_slowpath);
0d66bf6d
PZ
525 if (!ret)
526 mutex_set_owner(lock);
527
528 return ret;
6053ee3b
IM
529}
530
531EXPORT_SYMBOL(mutex_lock_interruptible);
532
7ad5b3a5 533int __sched mutex_lock_killable(struct mutex *lock)
ad776537 534{
0d66bf6d
PZ
535 int ret;
536
ad776537 537 might_sleep();
0d66bf6d 538 ret = __mutex_fastpath_lock_retval
ad776537 539 (&lock->count, __mutex_lock_killable_slowpath);
0d66bf6d
PZ
540 if (!ret)
541 mutex_set_owner(lock);
542
543 return ret;
ad776537
LH
544}
545EXPORT_SYMBOL(mutex_lock_killable);
546
7918baa5 547static __used noinline void __sched
e4564f79
PZ
548__mutex_lock_slowpath(atomic_t *lock_count)
549{
550 struct mutex *lock = container_of(lock_count, struct mutex, count);
551
e4c70a66 552 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
e4564f79
PZ
553}
554
7ad5b3a5 555static noinline int __sched
ad776537
LH
556__mutex_lock_killable_slowpath(atomic_t *lock_count)
557{
558 struct mutex *lock = container_of(lock_count, struct mutex, count);
559
e4c70a66 560 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
ad776537
LH
561}
562
7ad5b3a5 563static noinline int __sched
9a11b49a 564__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b
IM
565{
566 struct mutex *lock = container_of(lock_count, struct mutex, count);
567
e4c70a66 568 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
6053ee3b 569}
e4564f79 570#endif
6053ee3b
IM
571
572/*
573 * Spinlock based trylock, we take the spinlock and check whether we
574 * can get the lock:
575 */
576static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
577{
578 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 579 unsigned long flags;
6053ee3b
IM
580 int prev;
581
1fb00c6c 582 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
583
584 prev = atomic_xchg(&lock->count, -1);
ef5d4707 585 if (likely(prev == 1)) {
0d66bf6d 586 mutex_set_owner(lock);
ef5d4707
IM
587 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
588 }
0d66bf6d 589
6053ee3b
IM
590 /* Set it back to 0 if there are no waiters: */
591 if (likely(list_empty(&lock->wait_list)))
592 atomic_set(&lock->count, 0);
593
1fb00c6c 594 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
595
596 return prev == 1;
597}
598
ef5dc121
RD
599/**
600 * mutex_trylock - try to acquire the mutex, without waiting
6053ee3b
IM
601 * @lock: the mutex to be acquired
602 *
603 * Try to acquire the mutex atomically. Returns 1 if the mutex
604 * has been acquired successfully, and 0 on contention.
605 *
606 * NOTE: this function follows the spin_trylock() convention, so
ef5dc121 607 * it is negated from the down_trylock() return values! Be careful
6053ee3b
IM
608 * about this when converting semaphore users to mutexes.
609 *
610 * This function must not be used in interrupt context. The
611 * mutex must be released by the same task that acquired it.
612 */
7ad5b3a5 613int __sched mutex_trylock(struct mutex *lock)
6053ee3b 614{
0d66bf6d
PZ
615 int ret;
616
617 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
618 if (ret)
619 mutex_set_owner(lock);
620
621 return ret;
6053ee3b 622}
6053ee3b 623EXPORT_SYMBOL(mutex_trylock);
a511e3f9
AM
624
625/**
626 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
627 * @cnt: the atomic which we are to dec
628 * @lock: the mutex to return holding if we dec to 0
629 *
630 * return true and hold lock if we dec to 0, return false otherwise
631 */
632int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
633{
634 /* dec if we can't possibly hit 0 */
635 if (atomic_add_unless(cnt, -1, 1))
636 return 0;
637 /* we might hit 0, so take the lock */
638 mutex_lock(lock);
639 if (!atomic_dec_and_test(cnt)) {
640 /* when we actually did the dec, we didn't hit 0 */
641 mutex_unlock(lock);
642 return 0;
643 }
644 /* we hit 0, and we hold the lock */
645 return 1;
646}
647EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
This page took 0.507639 seconds and 5 git commands to generate.