rtmutex: Detect changes in the pi lock chain
[deliverable/linux.git] / kernel / locking / rtmutex.c
CommitLineData
23f78d4a
IM
1/*
2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3 *
4 * started by Ingo Molnar and Thomas Gleixner.
5 *
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
d07fe82c
SR
10 *
11 * See Documentation/rt-mutex-design.txt for details.
23f78d4a
IM
12 */
13#include <linux/spinlock.h>
9984de1a 14#include <linux/export.h>
23f78d4a 15#include <linux/sched.h>
8bd75c77 16#include <linux/sched/rt.h>
fb00aca4 17#include <linux/sched/deadline.h>
23f78d4a
IM
18#include <linux/timer.h>
19
20#include "rtmutex_common.h"
21
23f78d4a
IM
22/*
23 * lock->owner state tracking:
24 *
8161239a
LJ
25 * lock->owner holds the task_struct pointer of the owner. Bit 0
26 * is used to keep track of the "lock has waiters" state.
23f78d4a 27 *
8161239a
LJ
28 * owner bit0
29 * NULL 0 lock is free (fast acquire possible)
30 * NULL 1 lock is free and has waiters and the top waiter
31 * is going to take the lock*
32 * taskpointer 0 lock is held (fast release possible)
33 * taskpointer 1 lock is held and has waiters**
23f78d4a
IM
34 *
35 * The fast atomic compare exchange based acquire and release is only
8161239a
LJ
36 * possible when bit 0 of lock->owner is 0.
37 *
38 * (*) It also can be a transitional state when grabbing the lock
39 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40 * we need to set the bit0 before looking at the lock, and the owner may be
41 * NULL in this small time, hence this can be a transitional state.
23f78d4a 42 *
8161239a
LJ
43 * (**) There is a small time when bit 0 is set but there are no
44 * waiters. This can happen when grabbing the lock in the slow path.
45 * To prevent a cmpxchg of the owner releasing the lock, we need to
46 * set this bit before looking at the lock.
23f78d4a
IM
47 */
48
bd197234 49static void
8161239a 50rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
23f78d4a 51{
8161239a 52 unsigned long val = (unsigned long)owner;
23f78d4a
IM
53
54 if (rt_mutex_has_waiters(lock))
55 val |= RT_MUTEX_HAS_WAITERS;
56
57 lock->owner = (struct task_struct *)val;
58}
59
60static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61{
62 lock->owner = (struct task_struct *)
63 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64}
65
66static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67{
68 if (!rt_mutex_has_waiters(lock))
69 clear_rt_mutex_waiters(lock);
70}
71
bd197234
TG
72/*
73 * We can speed up the acquire/release, if the architecture
74 * supports cmpxchg and if there's no debugging state to be set up
75 */
76#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
78static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79{
80 unsigned long owner, *p = (unsigned long *) &lock->owner;
81
82 do {
83 owner = *p;
84 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85}
86#else
87# define rt_mutex_cmpxchg(l,c,n) (0)
88static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
89{
90 lock->owner = (struct task_struct *)
91 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
92}
93#endif
94
fb00aca4
PZ
95static inline int
96rt_mutex_waiter_less(struct rt_mutex_waiter *left,
97 struct rt_mutex_waiter *right)
98{
2d3d891d 99 if (left->prio < right->prio)
fb00aca4
PZ
100 return 1;
101
102 /*
2d3d891d
DF
103 * If both waiters have dl_prio(), we check the deadlines of the
104 * associated tasks.
105 * If left waiter has a dl_prio(), and we didn't return 1 above,
106 * then right waiter has a dl_prio() too.
fb00aca4 107 */
2d3d891d 108 if (dl_prio(left->prio))
fb00aca4
PZ
109 return (left->task->dl.deadline < right->task->dl.deadline);
110
111 return 0;
112}
113
114static void
115rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
116{
117 struct rb_node **link = &lock->waiters.rb_node;
118 struct rb_node *parent = NULL;
119 struct rt_mutex_waiter *entry;
120 int leftmost = 1;
121
122 while (*link) {
123 parent = *link;
124 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
125 if (rt_mutex_waiter_less(waiter, entry)) {
126 link = &parent->rb_left;
127 } else {
128 link = &parent->rb_right;
129 leftmost = 0;
130 }
131 }
132
133 if (leftmost)
134 lock->waiters_leftmost = &waiter->tree_entry;
135
136 rb_link_node(&waiter->tree_entry, parent, link);
137 rb_insert_color(&waiter->tree_entry, &lock->waiters);
138}
139
140static void
141rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
142{
143 if (RB_EMPTY_NODE(&waiter->tree_entry))
144 return;
145
146 if (lock->waiters_leftmost == &waiter->tree_entry)
147 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
148
149 rb_erase(&waiter->tree_entry, &lock->waiters);
150 RB_CLEAR_NODE(&waiter->tree_entry);
151}
152
153static void
154rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
155{
156 struct rb_node **link = &task->pi_waiters.rb_node;
157 struct rb_node *parent = NULL;
158 struct rt_mutex_waiter *entry;
159 int leftmost = 1;
160
161 while (*link) {
162 parent = *link;
163 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
164 if (rt_mutex_waiter_less(waiter, entry)) {
165 link = &parent->rb_left;
166 } else {
167 link = &parent->rb_right;
168 leftmost = 0;
169 }
170 }
171
172 if (leftmost)
173 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
174
175 rb_link_node(&waiter->pi_tree_entry, parent, link);
176 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
177}
178
179static void
180rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
181{
182 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
183 return;
184
185 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
186 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
187
188 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
189 RB_CLEAR_NODE(&waiter->pi_tree_entry);
190}
191
23f78d4a 192/*
fb00aca4 193 * Calculate task priority from the waiter tree priority
23f78d4a 194 *
fb00aca4 195 * Return task->normal_prio when the waiter tree is empty or when
23f78d4a
IM
196 * the waiter is not allowed to do priority boosting
197 */
198int rt_mutex_getprio(struct task_struct *task)
199{
200 if (likely(!task_has_pi_waiters(task)))
201 return task->normal_prio;
202
2d3d891d 203 return min(task_top_pi_waiter(task)->prio,
23f78d4a
IM
204 task->normal_prio);
205}
206
2d3d891d
DF
207struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
208{
209 if (likely(!task_has_pi_waiters(task)))
210 return NULL;
211
212 return task_top_pi_waiter(task)->task;
213}
214
c365c292
TG
215/*
216 * Called by sched_setscheduler() to check whether the priority change
217 * is overruled by a possible priority boosting.
218 */
219int rt_mutex_check_prio(struct task_struct *task, int newprio)
220{
221 if (!task_has_pi_waiters(task))
222 return 0;
223
224 return task_top_pi_waiter(task)->task->prio <= newprio;
225}
226
23f78d4a
IM
227/*
228 * Adjust the priority of a task, after its pi_waiters got modified.
229 *
230 * This can be both boosting and unboosting. task->pi_lock must be held.
231 */
bd197234 232static void __rt_mutex_adjust_prio(struct task_struct *task)
23f78d4a
IM
233{
234 int prio = rt_mutex_getprio(task);
235
2d3d891d 236 if (task->prio != prio || dl_prio(prio))
23f78d4a
IM
237 rt_mutex_setprio(task, prio);
238}
239
240/*
241 * Adjust task priority (undo boosting). Called from the exit path of
242 * rt_mutex_slowunlock() and rt_mutex_slowlock().
243 *
244 * (Note: We do this outside of the protection of lock->wait_lock to
245 * allow the lock to be taken while or before we readjust the priority
246 * of task. We do not use the spin_xx_mutex() variants here as we are
247 * outside of the debug path.)
248 */
249static void rt_mutex_adjust_prio(struct task_struct *task)
250{
251 unsigned long flags;
252
1d615482 253 raw_spin_lock_irqsave(&task->pi_lock, flags);
23f78d4a 254 __rt_mutex_adjust_prio(task);
1d615482 255 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
23f78d4a
IM
256}
257
258/*
259 * Max number of times we'll walk the boosting chain:
260 */
261int max_lock_depth = 1024;
262
82084984
TG
263static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
264{
265 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
266}
267
23f78d4a
IM
268/*
269 * Adjust the priority chain. Also used for deadlock detection.
270 * Decreases task's usage by one - may thus free the task.
0c106173 271 *
82084984
TG
272 * @task: the task owning the mutex (owner) for which a chain walk is
273 * probably needed
0c106173 274 * @deadlock_detect: do we have to carry out deadlock detection?
82084984
TG
275 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
276 * things for a task that has just got its priority adjusted, and
277 * is waiting on a mutex)
278 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
279 * we dropped its pi_lock. Is never dereferenced, only used for
280 * comparison to detect lock chain changes.
0c106173 281 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
82084984
TG
282 * its priority to the mutex owner (can be NULL in the case
283 * depicted above or if the top waiter is gone away and we are
284 * actually deboosting the owner)
285 * @top_task: the current top waiter
0c106173 286 *
23f78d4a
IM
287 * Returns 0 or -EDEADLK.
288 */
bd197234
TG
289static int rt_mutex_adjust_prio_chain(struct task_struct *task,
290 int deadlock_detect,
291 struct rt_mutex *orig_lock,
82084984 292 struct rt_mutex *next_lock,
bd197234
TG
293 struct rt_mutex_waiter *orig_waiter,
294 struct task_struct *top_task)
23f78d4a
IM
295{
296 struct rt_mutex *lock;
297 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
298 int detect_deadlock, ret = 0, depth = 0;
299 unsigned long flags;
300
301 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
302 deadlock_detect);
303
304 /*
305 * The (de)boosting is a step by step approach with a lot of
306 * pitfalls. We want this to be preemptible and we want hold a
307 * maximum of two locks per step. So we have to check
308 * carefully whether things change under us.
309 */
310 again:
311 if (++depth > max_lock_depth) {
312 static int prev_max;
313
314 /*
315 * Print this only once. If the admin changes the limit,
316 * print a new message when reaching the limit again.
317 */
318 if (prev_max != max_lock_depth) {
319 prev_max = max_lock_depth;
320 printk(KERN_WARNING "Maximum lock depth %d reached "
321 "task: %s (%d)\n", max_lock_depth,
ba25f9dc 322 top_task->comm, task_pid_nr(top_task));
23f78d4a
IM
323 }
324 put_task_struct(task);
325
3d5c9340 326 return -EDEADLK;
23f78d4a
IM
327 }
328 retry:
329 /*
330 * Task can not go away as we did a get_task() before !
331 */
1d615482 332 raw_spin_lock_irqsave(&task->pi_lock, flags);
23f78d4a
IM
333
334 waiter = task->pi_blocked_on;
335 /*
336 * Check whether the end of the boosting chain has been
337 * reached or the state of the chain has changed while we
338 * dropped the locks.
339 */
8161239a 340 if (!waiter)
23f78d4a
IM
341 goto out_unlock_pi;
342
1a539a87
TG
343 /*
344 * Check the orig_waiter state. After we dropped the locks,
8161239a 345 * the previous owner of the lock might have released the lock.
1a539a87 346 */
8161239a 347 if (orig_waiter && !rt_mutex_owner(orig_lock))
1a539a87
TG
348 goto out_unlock_pi;
349
82084984
TG
350 /*
351 * We dropped all locks after taking a refcount on @task, so
352 * the task might have moved on in the lock chain or even left
353 * the chain completely and blocks now on an unrelated lock or
354 * on @orig_lock.
355 *
356 * We stored the lock on which @task was blocked in @next_lock,
357 * so we can detect the chain change.
358 */
359 if (next_lock != waiter->lock)
360 goto out_unlock_pi;
361
1a539a87
TG
362 /*
363 * Drop out, when the task has no waiters. Note,
364 * top_waiter can be NULL, when we are in the deboosting
365 * mode!
366 */
397335f0
TG
367 if (top_waiter) {
368 if (!task_has_pi_waiters(task))
369 goto out_unlock_pi;
370 /*
371 * If deadlock detection is off, we stop here if we
372 * are not the top pi waiter of the task.
373 */
374 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
375 goto out_unlock_pi;
376 }
23f78d4a
IM
377
378 /*
379 * When deadlock detection is off then we check, if further
380 * priority adjustment is necessary.
381 */
2d3d891d 382 if (!detect_deadlock && waiter->prio == task->prio)
23f78d4a
IM
383 goto out_unlock_pi;
384
385 lock = waiter->lock;
d209d74d 386 if (!raw_spin_trylock(&lock->wait_lock)) {
1d615482 387 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
23f78d4a
IM
388 cpu_relax();
389 goto retry;
390 }
391
397335f0
TG
392 /*
393 * Deadlock detection. If the lock is the same as the original
394 * lock which caused us to walk the lock chain or if the
395 * current lock is owned by the task which initiated the chain
396 * walk, we detected a deadlock.
397 */
95e02ca9 398 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
23f78d4a 399 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
d209d74d 400 raw_spin_unlock(&lock->wait_lock);
3d5c9340 401 ret = -EDEADLK;
23f78d4a
IM
402 goto out_unlock_pi;
403 }
404
405 top_waiter = rt_mutex_top_waiter(lock);
406
407 /* Requeue the waiter */
fb00aca4 408 rt_mutex_dequeue(lock, waiter);
2d3d891d 409 waiter->prio = task->prio;
fb00aca4 410 rt_mutex_enqueue(lock, waiter);
23f78d4a
IM
411
412 /* Release the task */
1d615482 413 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
8161239a
LJ
414 if (!rt_mutex_owner(lock)) {
415 /*
416 * If the requeue above changed the top waiter, then we need
417 * to wake the new top waiter up to try to get the lock.
418 */
419
420 if (top_waiter != rt_mutex_top_waiter(lock))
421 wake_up_process(rt_mutex_top_waiter(lock)->task);
422 raw_spin_unlock(&lock->wait_lock);
423 goto out_put_task;
424 }
23f78d4a
IM
425 put_task_struct(task);
426
427 /* Grab the next task */
428 task = rt_mutex_owner(lock);
db630637 429 get_task_struct(task);
1d615482 430 raw_spin_lock_irqsave(&task->pi_lock, flags);
23f78d4a
IM
431
432 if (waiter == rt_mutex_top_waiter(lock)) {
433 /* Boost the owner */
fb00aca4
PZ
434 rt_mutex_dequeue_pi(task, top_waiter);
435 rt_mutex_enqueue_pi(task, waiter);
23f78d4a
IM
436 __rt_mutex_adjust_prio(task);
437
438 } else if (top_waiter == waiter) {
439 /* Deboost the owner */
fb00aca4 440 rt_mutex_dequeue_pi(task, waiter);
23f78d4a 441 waiter = rt_mutex_top_waiter(lock);
fb00aca4 442 rt_mutex_enqueue_pi(task, waiter);
23f78d4a
IM
443 __rt_mutex_adjust_prio(task);
444 }
445
82084984
TG
446 /*
447 * Check whether the task which owns the current lock is pi
448 * blocked itself. If yes we store a pointer to the lock for
449 * the lock chain change detection above. After we dropped
450 * task->pi_lock next_lock cannot be dereferenced anymore.
451 */
452 next_lock = task_blocked_on_lock(task);
453
1d615482 454 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
23f78d4a
IM
455
456 top_waiter = rt_mutex_top_waiter(lock);
d209d74d 457 raw_spin_unlock(&lock->wait_lock);
23f78d4a 458
82084984
TG
459 /*
460 * We reached the end of the lock chain. Stop right here. No
461 * point to go back just to figure that out.
462 */
463 if (!next_lock)
464 goto out_put_task;
465
23f78d4a
IM
466 if (!detect_deadlock && waiter != top_waiter)
467 goto out_put_task;
468
469 goto again;
470
471 out_unlock_pi:
1d615482 472 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
23f78d4a
IM
473 out_put_task:
474 put_task_struct(task);
36c8b586 475
23f78d4a
IM
476 return ret;
477}
478
23f78d4a
IM
479/*
480 * Try to take an rt-mutex
481 *
23f78d4a 482 * Must be called with lock->wait_lock held.
8161239a
LJ
483 *
484 * @lock: the lock to be acquired.
485 * @task: the task which wants to acquire the lock
486 * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
23f78d4a 487 */
8161239a
LJ
488static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
489 struct rt_mutex_waiter *waiter)
23f78d4a
IM
490{
491 /*
492 * We have to be careful here if the atomic speedups are
493 * enabled, such that, when
494 * - no other waiter is on the lock
495 * - the lock has been released since we did the cmpxchg
496 * the lock can be released or taken while we are doing the
497 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
498 *
499 * The atomic acquire/release aware variant of
500 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
501 * the WAITERS bit, the atomic release / acquire can not
502 * happen anymore and lock->wait_lock protects us from the
503 * non-atomic case.
504 *
505 * Note, that this might set lock->owner =
506 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
507 * any more. This is fixed up when we take the ownership.
508 * This is the transitional state explained at the top of this file.
509 */
510 mark_rt_mutex_waiters(lock);
511
8161239a 512 if (rt_mutex_owner(lock))
23f78d4a
IM
513 return 0;
514
8161239a
LJ
515 /*
516 * It will get the lock because of one of these conditions:
517 * 1) there is no waiter
518 * 2) higher priority than waiters
519 * 3) it is top waiter
520 */
521 if (rt_mutex_has_waiters(lock)) {
2d3d891d 522 if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
8161239a
LJ
523 if (!waiter || waiter != rt_mutex_top_waiter(lock))
524 return 0;
525 }
526 }
527
528 if (waiter || rt_mutex_has_waiters(lock)) {
529 unsigned long flags;
530 struct rt_mutex_waiter *top;
531
532 raw_spin_lock_irqsave(&task->pi_lock, flags);
533
534 /* remove the queued waiter. */
535 if (waiter) {
fb00aca4 536 rt_mutex_dequeue(lock, waiter);
8161239a
LJ
537 task->pi_blocked_on = NULL;
538 }
539
540 /*
541 * We have to enqueue the top waiter(if it exists) into
542 * task->pi_waiters list.
543 */
544 if (rt_mutex_has_waiters(lock)) {
545 top = rt_mutex_top_waiter(lock);
fb00aca4 546 rt_mutex_enqueue_pi(task, top);
8161239a
LJ
547 }
548 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
549 }
550
23f78d4a 551 /* We got the lock. */
9a11b49a 552 debug_rt_mutex_lock(lock);
23f78d4a 553
8161239a 554 rt_mutex_set_owner(lock, task);
23f78d4a 555
8161239a 556 rt_mutex_deadlock_account_lock(lock, task);
23f78d4a
IM
557
558 return 1;
559}
560
561/*
562 * Task blocks on lock.
563 *
564 * Prepare waiter and propagate pi chain
565 *
566 * This must be called with lock->wait_lock held.
567 */
568static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
569 struct rt_mutex_waiter *waiter,
8dac456a 570 struct task_struct *task,
9a11b49a 571 int detect_deadlock)
23f78d4a 572{
36c8b586 573 struct task_struct *owner = rt_mutex_owner(lock);
23f78d4a 574 struct rt_mutex_waiter *top_waiter = waiter;
82084984 575 struct rt_mutex *next_lock;
db630637 576 int chain_walk = 0, res;
82084984 577 unsigned long flags;
23f78d4a 578
397335f0
TG
579 /*
580 * Early deadlock detection. We really don't want the task to
581 * enqueue on itself just to untangle the mess later. It's not
582 * only an optimization. We drop the locks, so another waiter
583 * can come in before the chain walk detects the deadlock. So
584 * the other will detect the deadlock and return -EDEADLOCK,
585 * which is wrong, as the other waiter is not in a deadlock
586 * situation.
587 */
3d5c9340 588 if (owner == task)
397335f0
TG
589 return -EDEADLK;
590
1d615482 591 raw_spin_lock_irqsave(&task->pi_lock, flags);
8dac456a
DH
592 __rt_mutex_adjust_prio(task);
593 waiter->task = task;
23f78d4a 594 waiter->lock = lock;
2d3d891d 595 waiter->prio = task->prio;
23f78d4a
IM
596
597 /* Get the top priority waiter on the lock */
598 if (rt_mutex_has_waiters(lock))
599 top_waiter = rt_mutex_top_waiter(lock);
fb00aca4 600 rt_mutex_enqueue(lock, waiter);
23f78d4a 601
8dac456a 602 task->pi_blocked_on = waiter;
23f78d4a 603
1d615482 604 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
23f78d4a 605
8161239a
LJ
606 if (!owner)
607 return 0;
608
82084984 609 raw_spin_lock_irqsave(&owner->pi_lock, flags);
23f78d4a 610 if (waiter == rt_mutex_top_waiter(lock)) {
fb00aca4
PZ
611 rt_mutex_dequeue_pi(owner, top_waiter);
612 rt_mutex_enqueue_pi(owner, waiter);
23f78d4a
IM
613
614 __rt_mutex_adjust_prio(owner);
db630637
SR
615 if (owner->pi_blocked_on)
616 chain_walk = 1;
82084984 617 } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
db630637 618 chain_walk = 1;
82084984 619 }
db630637 620
82084984
TG
621 /* Store the lock on which owner is blocked or NULL */
622 next_lock = task_blocked_on_lock(owner);
623
624 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
625 /*
626 * Even if full deadlock detection is on, if the owner is not
627 * blocked itself, we can avoid finding this out in the chain
628 * walk.
629 */
630 if (!chain_walk || !next_lock)
23f78d4a
IM
631 return 0;
632
db630637
SR
633 /*
634 * The owner can't disappear while holding a lock,
635 * so the owner struct is protected by wait_lock.
636 * Gets dropped in rt_mutex_adjust_prio_chain()!
637 */
638 get_task_struct(owner);
639
d209d74d 640 raw_spin_unlock(&lock->wait_lock);
23f78d4a 641
82084984
TG
642 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
643 next_lock, waiter, task);
23f78d4a 644
d209d74d 645 raw_spin_lock(&lock->wait_lock);
23f78d4a
IM
646
647 return res;
648}
649
650/*
651 * Wake up the next waiter on the lock.
652 *
8161239a 653 * Remove the top waiter from the current tasks waiter list and wake it up.
23f78d4a
IM
654 *
655 * Called with lock->wait_lock held.
656 */
657static void wakeup_next_waiter(struct rt_mutex *lock)
658{
659 struct rt_mutex_waiter *waiter;
23f78d4a
IM
660 unsigned long flags;
661
1d615482 662 raw_spin_lock_irqsave(&current->pi_lock, flags);
23f78d4a
IM
663
664 waiter = rt_mutex_top_waiter(lock);
23f78d4a
IM
665
666 /*
667 * Remove it from current->pi_waiters. We do not adjust a
668 * possible priority boost right now. We execute wakeup in the
669 * boosted mode and go back to normal after releasing
670 * lock->wait_lock.
671 */
fb00aca4 672 rt_mutex_dequeue_pi(current, waiter);
23f78d4a 673
8161239a 674 rt_mutex_set_owner(lock, NULL);
23f78d4a 675
1d615482 676 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
23f78d4a 677
8161239a 678 wake_up_process(waiter->task);
23f78d4a
IM
679}
680
681/*
8161239a 682 * Remove a waiter from a lock and give up
23f78d4a 683 *
8161239a
LJ
684 * Must be called with lock->wait_lock held and
685 * have just failed to try_to_take_rt_mutex().
23f78d4a 686 */
bd197234
TG
687static void remove_waiter(struct rt_mutex *lock,
688 struct rt_mutex_waiter *waiter)
23f78d4a
IM
689{
690 int first = (waiter == rt_mutex_top_waiter(lock));
36c8b586 691 struct task_struct *owner = rt_mutex_owner(lock);
82084984 692 struct rt_mutex *next_lock = NULL;
23f78d4a
IM
693 unsigned long flags;
694
1d615482 695 raw_spin_lock_irqsave(&current->pi_lock, flags);
fb00aca4 696 rt_mutex_dequeue(lock, waiter);
23f78d4a 697 current->pi_blocked_on = NULL;
1d615482 698 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
23f78d4a 699
8161239a
LJ
700 if (!owner)
701 return;
702
703 if (first) {
23f78d4a 704
1d615482 705 raw_spin_lock_irqsave(&owner->pi_lock, flags);
23f78d4a 706
fb00aca4 707 rt_mutex_dequeue_pi(owner, waiter);
23f78d4a
IM
708
709 if (rt_mutex_has_waiters(lock)) {
710 struct rt_mutex_waiter *next;
711
712 next = rt_mutex_top_waiter(lock);
fb00aca4 713 rt_mutex_enqueue_pi(owner, next);
23f78d4a
IM
714 }
715 __rt_mutex_adjust_prio(owner);
716
82084984
TG
717 /* Store the lock on which owner is blocked or NULL */
718 next_lock = task_blocked_on_lock(owner);
db630637 719
1d615482 720 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
23f78d4a
IM
721 }
722
82084984 723 if (!next_lock)
23f78d4a
IM
724 return;
725
db630637
SR
726 /* gets dropped in rt_mutex_adjust_prio_chain()! */
727 get_task_struct(owner);
728
d209d74d 729 raw_spin_unlock(&lock->wait_lock);
23f78d4a 730
82084984 731 rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
23f78d4a 732
d209d74d 733 raw_spin_lock(&lock->wait_lock);
23f78d4a
IM
734}
735
95e02ca9
TG
736/*
737 * Recheck the pi chain, in case we got a priority setting
738 *
739 * Called from sched_setscheduler
740 */
741void rt_mutex_adjust_pi(struct task_struct *task)
742{
743 struct rt_mutex_waiter *waiter;
82084984 744 struct rt_mutex *next_lock;
95e02ca9
TG
745 unsigned long flags;
746
1d615482 747 raw_spin_lock_irqsave(&task->pi_lock, flags);
95e02ca9
TG
748
749 waiter = task->pi_blocked_on;
2d3d891d
DF
750 if (!waiter || (waiter->prio == task->prio &&
751 !dl_prio(task->prio))) {
1d615482 752 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
95e02ca9
TG
753 return;
754 }
82084984 755 next_lock = waiter->lock;
1d615482 756 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
95e02ca9 757
db630637
SR
758 /* gets dropped in rt_mutex_adjust_prio_chain()! */
759 get_task_struct(task);
82084984
TG
760
761 rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
95e02ca9
TG
762}
763
8dac456a
DH
764/**
765 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
766 * @lock: the rt_mutex to take
767 * @state: the state the task should block in (TASK_INTERRUPTIBLE
768 * or TASK_UNINTERRUPTIBLE)
769 * @timeout: the pre-initialized and started timer, or NULL for none
770 * @waiter: the pre-initialized rt_mutex_waiter
8dac456a
DH
771 *
772 * lock->wait_lock must be held by the caller.
23f78d4a
IM
773 */
774static int __sched
8dac456a
DH
775__rt_mutex_slowlock(struct rt_mutex *lock, int state,
776 struct hrtimer_sleeper *timeout,
8161239a 777 struct rt_mutex_waiter *waiter)
23f78d4a 778{
23f78d4a
IM
779 int ret = 0;
780
23f78d4a
IM
781 for (;;) {
782 /* Try to acquire the lock: */
8161239a 783 if (try_to_take_rt_mutex(lock, current, waiter))
23f78d4a
IM
784 break;
785
786 /*
787 * TASK_INTERRUPTIBLE checks for signals and
788 * timeout. Ignored otherwise.
789 */
790 if (unlikely(state == TASK_INTERRUPTIBLE)) {
791 /* Signal pending? */
792 if (signal_pending(current))
793 ret = -EINTR;
794 if (timeout && !timeout->task)
795 ret = -ETIMEDOUT;
796 if (ret)
797 break;
798 }
799
d209d74d 800 raw_spin_unlock(&lock->wait_lock);
23f78d4a 801
8dac456a 802 debug_rt_mutex_print_deadlock(waiter);
23f78d4a 803
8161239a 804 schedule_rt_mutex(lock);
23f78d4a 805
d209d74d 806 raw_spin_lock(&lock->wait_lock);
23f78d4a
IM
807 set_current_state(state);
808 }
809
8dac456a
DH
810 return ret;
811}
812
3d5c9340
TG
813static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
814 struct rt_mutex_waiter *w)
815{
816 /*
817 * If the result is not -EDEADLOCK or the caller requested
818 * deadlock detection, nothing to do here.
819 */
820 if (res != -EDEADLOCK || detect_deadlock)
821 return;
822
823 /*
824 * Yell lowdly and stop the task right here.
825 */
826 rt_mutex_print_deadlock(w);
827 while (1) {
828 set_current_state(TASK_INTERRUPTIBLE);
829 schedule();
830 }
831}
832
8dac456a
DH
833/*
834 * Slow path lock function:
835 */
836static int __sched
837rt_mutex_slowlock(struct rt_mutex *lock, int state,
838 struct hrtimer_sleeper *timeout,
839 int detect_deadlock)
840{
841 struct rt_mutex_waiter waiter;
842 int ret = 0;
843
844 debug_rt_mutex_init_waiter(&waiter);
fb00aca4
PZ
845 RB_CLEAR_NODE(&waiter.pi_tree_entry);
846 RB_CLEAR_NODE(&waiter.tree_entry);
8dac456a 847
d209d74d 848 raw_spin_lock(&lock->wait_lock);
8dac456a
DH
849
850 /* Try to acquire the lock again: */
8161239a 851 if (try_to_take_rt_mutex(lock, current, NULL)) {
d209d74d 852 raw_spin_unlock(&lock->wait_lock);
8dac456a
DH
853 return 0;
854 }
855
856 set_current_state(state);
857
858 /* Setup the timer, when timeout != NULL */
859 if (unlikely(timeout)) {
860 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
861 if (!hrtimer_active(&timeout->timer))
862 timeout->task = NULL;
863 }
864
8161239a
LJ
865 ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
866
867 if (likely(!ret))
868 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
8dac456a 869
23f78d4a
IM
870 set_current_state(TASK_RUNNING);
871
3d5c9340 872 if (unlikely(ret)) {
9a11b49a 873 remove_waiter(lock, &waiter);
3d5c9340
TG
874 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
875 }
23f78d4a
IM
876
877 /*
878 * try_to_take_rt_mutex() sets the waiter bit
879 * unconditionally. We might have to fix that up.
880 */
881 fixup_rt_mutex_waiters(lock);
882
d209d74d 883 raw_spin_unlock(&lock->wait_lock);
23f78d4a
IM
884
885 /* Remove pending timer: */
886 if (unlikely(timeout))
887 hrtimer_cancel(&timeout->timer);
888
23f78d4a
IM
889 debug_rt_mutex_free_waiter(&waiter);
890
891 return ret;
892}
893
894/*
895 * Slow path try-lock function:
896 */
897static inline int
9a11b49a 898rt_mutex_slowtrylock(struct rt_mutex *lock)
23f78d4a
IM
899{
900 int ret = 0;
901
d209d74d 902 raw_spin_lock(&lock->wait_lock);
23f78d4a
IM
903
904 if (likely(rt_mutex_owner(lock) != current)) {
905
8161239a 906 ret = try_to_take_rt_mutex(lock, current, NULL);
23f78d4a
IM
907 /*
908 * try_to_take_rt_mutex() sets the lock waiters
909 * bit unconditionally. Clean this up.
910 */
911 fixup_rt_mutex_waiters(lock);
912 }
913
d209d74d 914 raw_spin_unlock(&lock->wait_lock);
23f78d4a
IM
915
916 return ret;
917}
918
919/*
920 * Slow path to release a rt-mutex:
921 */
922static void __sched
923rt_mutex_slowunlock(struct rt_mutex *lock)
924{
d209d74d 925 raw_spin_lock(&lock->wait_lock);
23f78d4a
IM
926
927 debug_rt_mutex_unlock(lock);
928
929 rt_mutex_deadlock_account_unlock(current);
930
931 if (!rt_mutex_has_waiters(lock)) {
932 lock->owner = NULL;
d209d74d 933 raw_spin_unlock(&lock->wait_lock);
23f78d4a
IM
934 return;
935 }
936
937 wakeup_next_waiter(lock);
938
d209d74d 939 raw_spin_unlock(&lock->wait_lock);
23f78d4a
IM
940
941 /* Undo pi boosting if necessary: */
942 rt_mutex_adjust_prio(current);
943}
944
945/*
946 * debug aware fast / slowpath lock,trylock,unlock
947 *
948 * The atomic acquire/release ops are compiled away, when either the
949 * architecture does not support cmpxchg or when debugging is enabled.
950 */
951static inline int
952rt_mutex_fastlock(struct rt_mutex *lock, int state,
953 int detect_deadlock,
954 int (*slowfn)(struct rt_mutex *lock, int state,
955 struct hrtimer_sleeper *timeout,
9a11b49a 956 int detect_deadlock))
23f78d4a
IM
957{
958 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
959 rt_mutex_deadlock_account_lock(lock, current);
960 return 0;
961 } else
9a11b49a 962 return slowfn(lock, state, NULL, detect_deadlock);
23f78d4a
IM
963}
964
965static inline int
966rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
967 struct hrtimer_sleeper *timeout, int detect_deadlock,
968 int (*slowfn)(struct rt_mutex *lock, int state,
969 struct hrtimer_sleeper *timeout,
9a11b49a 970 int detect_deadlock))
23f78d4a
IM
971{
972 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
973 rt_mutex_deadlock_account_lock(lock, current);
974 return 0;
975 } else
9a11b49a 976 return slowfn(lock, state, timeout, detect_deadlock);
23f78d4a
IM
977}
978
979static inline int
980rt_mutex_fasttrylock(struct rt_mutex *lock,
9a11b49a 981 int (*slowfn)(struct rt_mutex *lock))
23f78d4a
IM
982{
983 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
984 rt_mutex_deadlock_account_lock(lock, current);
985 return 1;
986 }
9a11b49a 987 return slowfn(lock);
23f78d4a
IM
988}
989
990static inline void
991rt_mutex_fastunlock(struct rt_mutex *lock,
992 void (*slowfn)(struct rt_mutex *lock))
993{
994 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
995 rt_mutex_deadlock_account_unlock(current);
996 else
997 slowfn(lock);
998}
999
1000/**
1001 * rt_mutex_lock - lock a rt_mutex
1002 *
1003 * @lock: the rt_mutex to be locked
1004 */
1005void __sched rt_mutex_lock(struct rt_mutex *lock)
1006{
1007 might_sleep();
1008
1009 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
1010}
1011EXPORT_SYMBOL_GPL(rt_mutex_lock);
1012
1013/**
1014 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1015 *
1016 * @lock: the rt_mutex to be locked
1017 * @detect_deadlock: deadlock detection on/off
1018 *
1019 * Returns:
1020 * 0 on success
1021 * -EINTR when interrupted by a signal
1022 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
1023 */
1024int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
1025 int detect_deadlock)
1026{
1027 might_sleep();
1028
1029 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
1030 detect_deadlock, rt_mutex_slowlock);
1031}
1032EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1033
1034/**
23b94b96
LH
1035 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1036 * the timeout structure is provided
1037 * by the caller
23f78d4a
IM
1038 *
1039 * @lock: the rt_mutex to be locked
1040 * @timeout: timeout structure or NULL (no timeout)
1041 * @detect_deadlock: deadlock detection on/off
1042 *
1043 * Returns:
1044 * 0 on success
1045 * -EINTR when interrupted by a signal
3ac49a1c 1046 * -ETIMEDOUT when the timeout expired
23f78d4a
IM
1047 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
1048 */
1049int
1050rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
1051 int detect_deadlock)
1052{
1053 might_sleep();
1054
1055 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1056 detect_deadlock, rt_mutex_slowlock);
1057}
1058EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1059
1060/**
1061 * rt_mutex_trylock - try to lock a rt_mutex
1062 *
1063 * @lock: the rt_mutex to be locked
1064 *
1065 * Returns 1 on success and 0 on contention
1066 */
1067int __sched rt_mutex_trylock(struct rt_mutex *lock)
1068{
1069 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1070}
1071EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1072
1073/**
1074 * rt_mutex_unlock - unlock a rt_mutex
1075 *
1076 * @lock: the rt_mutex to be unlocked
1077 */
1078void __sched rt_mutex_unlock(struct rt_mutex *lock)
1079{
1080 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1081}
1082EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1083
23b94b96 1084/**
23f78d4a
IM
1085 * rt_mutex_destroy - mark a mutex unusable
1086 * @lock: the mutex to be destroyed
1087 *
1088 * This function marks the mutex uninitialized, and any subsequent
1089 * use of the mutex is forbidden. The mutex must not be locked when
1090 * this function is called.
1091 */
1092void rt_mutex_destroy(struct rt_mutex *lock)
1093{
1094 WARN_ON(rt_mutex_is_locked(lock));
1095#ifdef CONFIG_DEBUG_RT_MUTEXES
1096 lock->magic = NULL;
1097#endif
1098}
1099
1100EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1101
1102/**
1103 * __rt_mutex_init - initialize the rt lock
1104 *
1105 * @lock: the rt lock to be initialized
1106 *
1107 * Initialize the rt lock to unlocked state.
1108 *
1109 * Initializing of a locked rt lock is not allowed
1110 */
1111void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1112{
1113 lock->owner = NULL;
d209d74d 1114 raw_spin_lock_init(&lock->wait_lock);
fb00aca4
PZ
1115 lock->waiters = RB_ROOT;
1116 lock->waiters_leftmost = NULL;
23f78d4a
IM
1117
1118 debug_rt_mutex_init(lock, name);
1119}
1120EXPORT_SYMBOL_GPL(__rt_mutex_init);
0cdbee99
IM
1121
1122/**
1123 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1124 * proxy owner
1125 *
1126 * @lock: the rt_mutex to be locked
1127 * @proxy_owner:the task to set as owner
1128 *
1129 * No locking. Caller has to do serializing itself
1130 * Special API call for PI-futex support
1131 */
1132void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1133 struct task_struct *proxy_owner)
1134{
1135 __rt_mutex_init(lock, NULL);
9a11b49a 1136 debug_rt_mutex_proxy_lock(lock, proxy_owner);
8161239a 1137 rt_mutex_set_owner(lock, proxy_owner);
0cdbee99
IM
1138 rt_mutex_deadlock_account_lock(lock, proxy_owner);
1139}
1140
1141/**
1142 * rt_mutex_proxy_unlock - release a lock on behalf of owner
1143 *
1144 * @lock: the rt_mutex to be locked
1145 *
1146 * No locking. Caller has to do serializing itself
1147 * Special API call for PI-futex support
1148 */
1149void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1150 struct task_struct *proxy_owner)
1151{
1152 debug_rt_mutex_proxy_unlock(lock);
8161239a 1153 rt_mutex_set_owner(lock, NULL);
0cdbee99
IM
1154 rt_mutex_deadlock_account_unlock(proxy_owner);
1155}
1156
8dac456a
DH
1157/**
1158 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1159 * @lock: the rt_mutex to take
1160 * @waiter: the pre-initialized rt_mutex_waiter
1161 * @task: the task to prepare
1162 * @detect_deadlock: perform deadlock detection (1) or not (0)
1163 *
1164 * Returns:
1165 * 0 - task blocked on lock
1166 * 1 - acquired the lock for task, caller should wake it up
1167 * <0 - error
1168 *
1169 * Special API call for FUTEX_REQUEUE_PI support.
1170 */
1171int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1172 struct rt_mutex_waiter *waiter,
1173 struct task_struct *task, int detect_deadlock)
1174{
1175 int ret;
1176
d209d74d 1177 raw_spin_lock(&lock->wait_lock);
8dac456a 1178
8161239a 1179 if (try_to_take_rt_mutex(lock, task, NULL)) {
d209d74d 1180 raw_spin_unlock(&lock->wait_lock);
8dac456a
DH
1181 return 1;
1182 }
1183
3d5c9340
TG
1184 /* We enforce deadlock detection for futexes */
1185 ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
8dac456a 1186
8161239a 1187 if (ret && !rt_mutex_owner(lock)) {
8dac456a
DH
1188 /*
1189 * Reset the return value. We might have
1190 * returned with -EDEADLK and the owner
1191 * released the lock while we were walking the
1192 * pi chain. Let the waiter sort it out.
1193 */
1194 ret = 0;
1195 }
8161239a
LJ
1196
1197 if (unlikely(ret))
1198 remove_waiter(lock, waiter);
1199
d209d74d 1200 raw_spin_unlock(&lock->wait_lock);
8dac456a
DH
1201
1202 debug_rt_mutex_print_deadlock(waiter);
1203
1204 return ret;
1205}
1206
0cdbee99
IM
1207/**
1208 * rt_mutex_next_owner - return the next owner of the lock
1209 *
1210 * @lock: the rt lock query
1211 *
1212 * Returns the next owner of the lock or NULL
1213 *
1214 * Caller has to serialize against other accessors to the lock
1215 * itself.
1216 *
1217 * Special API call for PI-futex support
1218 */
1219struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1220{
1221 if (!rt_mutex_has_waiters(lock))
1222 return NULL;
1223
1224 return rt_mutex_top_waiter(lock)->task;
1225}
8dac456a
DH
1226
1227/**
1228 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1229 * @lock: the rt_mutex we were woken on
1230 * @to: the timeout, null if none. hrtimer should already have
1231 * been started.
1232 * @waiter: the pre-initialized rt_mutex_waiter
1233 * @detect_deadlock: perform deadlock detection (1) or not (0)
1234 *
1235 * Complete the lock acquisition started our behalf by another thread.
1236 *
1237 * Returns:
1238 * 0 - success
1239 * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1240 *
1241 * Special API call for PI-futex requeue support
1242 */
1243int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1244 struct hrtimer_sleeper *to,
1245 struct rt_mutex_waiter *waiter,
1246 int detect_deadlock)
1247{
1248 int ret;
1249
d209d74d 1250 raw_spin_lock(&lock->wait_lock);
8dac456a
DH
1251
1252 set_current_state(TASK_INTERRUPTIBLE);
1253
8161239a 1254 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
8dac456a
DH
1255
1256 set_current_state(TASK_RUNNING);
1257
8161239a 1258 if (unlikely(ret))
8dac456a
DH
1259 remove_waiter(lock, waiter);
1260
1261 /*
1262 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1263 * have to fix that up.
1264 */
1265 fixup_rt_mutex_waiters(lock);
1266
d209d74d 1267 raw_spin_unlock(&lock->wait_lock);
8dac456a 1268
8dac456a
DH
1269 return ret;
1270}
This page took 0.574164 seconds and 5 git commands to generate.