Commit | Line | Data |
---|---|---|
23f78d4a IM |
1 | /* |
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
3 | * | |
4 | * started by Ingo Molnar and Thomas Gleixner. | |
5 | * | |
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
9 | * Copyright (C) 2006 Esben Nielsen | |
d07fe82c SR |
10 | * |
11 | * See Documentation/rt-mutex-design.txt for details. | |
23f78d4a IM |
12 | */ |
13 | #include <linux/spinlock.h> | |
9984de1a | 14 | #include <linux/export.h> |
23f78d4a | 15 | #include <linux/sched.h> |
8bd75c77 | 16 | #include <linux/sched/rt.h> |
fb00aca4 | 17 | #include <linux/sched/deadline.h> |
23f78d4a IM |
18 | #include <linux/timer.h> |
19 | ||
20 | #include "rtmutex_common.h" | |
21 | ||
23f78d4a IM |
22 | /* |
23 | * lock->owner state tracking: | |
24 | * | |
8161239a LJ |
25 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
26 | * is used to keep track of the "lock has waiters" state. | |
23f78d4a | 27 | * |
8161239a LJ |
28 | * owner bit0 |
29 | * NULL 0 lock is free (fast acquire possible) | |
30 | * NULL 1 lock is free and has waiters and the top waiter | |
31 | * is going to take the lock* | |
32 | * taskpointer 0 lock is held (fast release possible) | |
33 | * taskpointer 1 lock is held and has waiters** | |
23f78d4a IM |
34 | * |
35 | * The fast atomic compare exchange based acquire and release is only | |
8161239a LJ |
36 | * possible when bit 0 of lock->owner is 0. |
37 | * | |
38 | * (*) It also can be a transitional state when grabbing the lock | |
39 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | |
40 | * we need to set the bit0 before looking at the lock, and the owner may be | |
41 | * NULL in this small time, hence this can be a transitional state. | |
23f78d4a | 42 | * |
8161239a LJ |
43 | * (**) There is a small time when bit 0 is set but there are no |
44 | * waiters. This can happen when grabbing the lock in the slow path. | |
45 | * To prevent a cmpxchg of the owner releasing the lock, we need to | |
46 | * set this bit before looking at the lock. | |
23f78d4a IM |
47 | */ |
48 | ||
bd197234 | 49 | static void |
8161239a | 50 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) |
23f78d4a | 51 | { |
8161239a | 52 | unsigned long val = (unsigned long)owner; |
23f78d4a IM |
53 | |
54 | if (rt_mutex_has_waiters(lock)) | |
55 | val |= RT_MUTEX_HAS_WAITERS; | |
56 | ||
57 | lock->owner = (struct task_struct *)val; | |
58 | } | |
59 | ||
60 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
61 | { | |
62 | lock->owner = (struct task_struct *) | |
63 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
64 | } | |
65 | ||
66 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
67 | { | |
68 | if (!rt_mutex_has_waiters(lock)) | |
69 | clear_rt_mutex_waiters(lock); | |
70 | } | |
71 | ||
bd197234 TG |
72 | /* |
73 | * We can speed up the acquire/release, if the architecture | |
74 | * supports cmpxchg and if there's no debugging state to be set up | |
75 | */ | |
76 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | |
77 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | |
78 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
79 | { | |
80 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
81 | ||
82 | do { | |
83 | owner = *p; | |
84 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | |
85 | } | |
86 | #else | |
87 | # define rt_mutex_cmpxchg(l,c,n) (0) | |
88 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
89 | { | |
90 | lock->owner = (struct task_struct *) | |
91 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
92 | } | |
93 | #endif | |
94 | ||
fb00aca4 PZ |
95 | static inline int |
96 | rt_mutex_waiter_less(struct rt_mutex_waiter *left, | |
97 | struct rt_mutex_waiter *right) | |
98 | { | |
2d3d891d | 99 | if (left->prio < right->prio) |
fb00aca4 PZ |
100 | return 1; |
101 | ||
102 | /* | |
2d3d891d DF |
103 | * If both waiters have dl_prio(), we check the deadlines of the |
104 | * associated tasks. | |
105 | * If left waiter has a dl_prio(), and we didn't return 1 above, | |
106 | * then right waiter has a dl_prio() too. | |
fb00aca4 | 107 | */ |
2d3d891d | 108 | if (dl_prio(left->prio)) |
fb00aca4 PZ |
109 | return (left->task->dl.deadline < right->task->dl.deadline); |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
114 | static void | |
115 | rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | |
116 | { | |
117 | struct rb_node **link = &lock->waiters.rb_node; | |
118 | struct rb_node *parent = NULL; | |
119 | struct rt_mutex_waiter *entry; | |
120 | int leftmost = 1; | |
121 | ||
122 | while (*link) { | |
123 | parent = *link; | |
124 | entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); | |
125 | if (rt_mutex_waiter_less(waiter, entry)) { | |
126 | link = &parent->rb_left; | |
127 | } else { | |
128 | link = &parent->rb_right; | |
129 | leftmost = 0; | |
130 | } | |
131 | } | |
132 | ||
133 | if (leftmost) | |
134 | lock->waiters_leftmost = &waiter->tree_entry; | |
135 | ||
136 | rb_link_node(&waiter->tree_entry, parent, link); | |
137 | rb_insert_color(&waiter->tree_entry, &lock->waiters); | |
138 | } | |
139 | ||
140 | static void | |
141 | rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | |
142 | { | |
143 | if (RB_EMPTY_NODE(&waiter->tree_entry)) | |
144 | return; | |
145 | ||
146 | if (lock->waiters_leftmost == &waiter->tree_entry) | |
147 | lock->waiters_leftmost = rb_next(&waiter->tree_entry); | |
148 | ||
149 | rb_erase(&waiter->tree_entry, &lock->waiters); | |
150 | RB_CLEAR_NODE(&waiter->tree_entry); | |
151 | } | |
152 | ||
153 | static void | |
154 | rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | |
155 | { | |
156 | struct rb_node **link = &task->pi_waiters.rb_node; | |
157 | struct rb_node *parent = NULL; | |
158 | struct rt_mutex_waiter *entry; | |
159 | int leftmost = 1; | |
160 | ||
161 | while (*link) { | |
162 | parent = *link; | |
163 | entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); | |
164 | if (rt_mutex_waiter_less(waiter, entry)) { | |
165 | link = &parent->rb_left; | |
166 | } else { | |
167 | link = &parent->rb_right; | |
168 | leftmost = 0; | |
169 | } | |
170 | } | |
171 | ||
172 | if (leftmost) | |
173 | task->pi_waiters_leftmost = &waiter->pi_tree_entry; | |
174 | ||
175 | rb_link_node(&waiter->pi_tree_entry, parent, link); | |
176 | rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); | |
177 | } | |
178 | ||
179 | static void | |
180 | rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | |
181 | { | |
182 | if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) | |
183 | return; | |
184 | ||
185 | if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) | |
186 | task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); | |
187 | ||
188 | rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); | |
189 | RB_CLEAR_NODE(&waiter->pi_tree_entry); | |
190 | } | |
191 | ||
23f78d4a | 192 | /* |
fb00aca4 | 193 | * Calculate task priority from the waiter tree priority |
23f78d4a | 194 | * |
fb00aca4 | 195 | * Return task->normal_prio when the waiter tree is empty or when |
23f78d4a IM |
196 | * the waiter is not allowed to do priority boosting |
197 | */ | |
198 | int rt_mutex_getprio(struct task_struct *task) | |
199 | { | |
200 | if (likely(!task_has_pi_waiters(task))) | |
201 | return task->normal_prio; | |
202 | ||
2d3d891d | 203 | return min(task_top_pi_waiter(task)->prio, |
23f78d4a IM |
204 | task->normal_prio); |
205 | } | |
206 | ||
2d3d891d DF |
207 | struct task_struct *rt_mutex_get_top_task(struct task_struct *task) |
208 | { | |
209 | if (likely(!task_has_pi_waiters(task))) | |
210 | return NULL; | |
211 | ||
212 | return task_top_pi_waiter(task)->task; | |
213 | } | |
214 | ||
c365c292 TG |
215 | /* |
216 | * Called by sched_setscheduler() to check whether the priority change | |
217 | * is overruled by a possible priority boosting. | |
218 | */ | |
219 | int rt_mutex_check_prio(struct task_struct *task, int newprio) | |
220 | { | |
221 | if (!task_has_pi_waiters(task)) | |
222 | return 0; | |
223 | ||
224 | return task_top_pi_waiter(task)->task->prio <= newprio; | |
225 | } | |
226 | ||
23f78d4a IM |
227 | /* |
228 | * Adjust the priority of a task, after its pi_waiters got modified. | |
229 | * | |
230 | * This can be both boosting and unboosting. task->pi_lock must be held. | |
231 | */ | |
bd197234 | 232 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
23f78d4a IM |
233 | { |
234 | int prio = rt_mutex_getprio(task); | |
235 | ||
2d3d891d | 236 | if (task->prio != prio || dl_prio(prio)) |
23f78d4a IM |
237 | rt_mutex_setprio(task, prio); |
238 | } | |
239 | ||
240 | /* | |
241 | * Adjust task priority (undo boosting). Called from the exit path of | |
242 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | |
243 | * | |
244 | * (Note: We do this outside of the protection of lock->wait_lock to | |
245 | * allow the lock to be taken while or before we readjust the priority | |
246 | * of task. We do not use the spin_xx_mutex() variants here as we are | |
247 | * outside of the debug path.) | |
248 | */ | |
249 | static void rt_mutex_adjust_prio(struct task_struct *task) | |
250 | { | |
251 | unsigned long flags; | |
252 | ||
1d615482 | 253 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a | 254 | __rt_mutex_adjust_prio(task); |
1d615482 | 255 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
256 | } |
257 | ||
258 | /* | |
259 | * Max number of times we'll walk the boosting chain: | |
260 | */ | |
261 | int max_lock_depth = 1024; | |
262 | ||
263 | /* | |
264 | * Adjust the priority chain. Also used for deadlock detection. | |
265 | * Decreases task's usage by one - may thus free the task. | |
0c106173 JL |
266 | * |
267 | * @task: the task owning the mutex (owner) for which a chain walk is probably | |
268 | * needed | |
269 | * @deadlock_detect: do we have to carry out deadlock detection? | |
270 | * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck | |
271 | * things for a task that has just got its priority adjusted, and | |
272 | * is waiting on a mutex) | |
273 | * @orig_waiter: rt_mutex_waiter struct for the task that has just donated | |
274 | * its priority to the mutex owner (can be NULL in the case | |
275 | * depicted above or if the top waiter is gone away and we are | |
276 | * actually deboosting the owner) | |
277 | * @top_task: the current top waiter | |
278 | * | |
23f78d4a IM |
279 | * Returns 0 or -EDEADLK. |
280 | */ | |
bd197234 TG |
281 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
282 | int deadlock_detect, | |
283 | struct rt_mutex *orig_lock, | |
284 | struct rt_mutex_waiter *orig_waiter, | |
285 | struct task_struct *top_task) | |
23f78d4a IM |
286 | { |
287 | struct rt_mutex *lock; | |
288 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | |
289 | int detect_deadlock, ret = 0, depth = 0; | |
290 | unsigned long flags; | |
291 | ||
292 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | |
293 | deadlock_detect); | |
294 | ||
295 | /* | |
296 | * The (de)boosting is a step by step approach with a lot of | |
297 | * pitfalls. We want this to be preemptible and we want hold a | |
298 | * maximum of two locks per step. So we have to check | |
299 | * carefully whether things change under us. | |
300 | */ | |
301 | again: | |
302 | if (++depth > max_lock_depth) { | |
303 | static int prev_max; | |
304 | ||
305 | /* | |
306 | * Print this only once. If the admin changes the limit, | |
307 | * print a new message when reaching the limit again. | |
308 | */ | |
309 | if (prev_max != max_lock_depth) { | |
310 | prev_max = max_lock_depth; | |
311 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
312 | "task: %s (%d)\n", max_lock_depth, | |
ba25f9dc | 313 | top_task->comm, task_pid_nr(top_task)); |
23f78d4a IM |
314 | } |
315 | put_task_struct(task); | |
316 | ||
317 | return deadlock_detect ? -EDEADLK : 0; | |
318 | } | |
319 | retry: | |
320 | /* | |
321 | * Task can not go away as we did a get_task() before ! | |
322 | */ | |
1d615482 | 323 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
324 | |
325 | waiter = task->pi_blocked_on; | |
326 | /* | |
327 | * Check whether the end of the boosting chain has been | |
328 | * reached or the state of the chain has changed while we | |
329 | * dropped the locks. | |
330 | */ | |
8161239a | 331 | if (!waiter) |
23f78d4a IM |
332 | goto out_unlock_pi; |
333 | ||
1a539a87 TG |
334 | /* |
335 | * Check the orig_waiter state. After we dropped the locks, | |
8161239a | 336 | * the previous owner of the lock might have released the lock. |
1a539a87 | 337 | */ |
8161239a | 338 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
1a539a87 TG |
339 | goto out_unlock_pi; |
340 | ||
341 | /* | |
342 | * Drop out, when the task has no waiters. Note, | |
343 | * top_waiter can be NULL, when we are in the deboosting | |
344 | * mode! | |
345 | */ | |
397335f0 TG |
346 | if (top_waiter) { |
347 | if (!task_has_pi_waiters(task)) | |
348 | goto out_unlock_pi; | |
349 | /* | |
350 | * If deadlock detection is off, we stop here if we | |
351 | * are not the top pi waiter of the task. | |
352 | */ | |
353 | if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) | |
354 | goto out_unlock_pi; | |
355 | } | |
23f78d4a IM |
356 | |
357 | /* | |
358 | * When deadlock detection is off then we check, if further | |
359 | * priority adjustment is necessary. | |
360 | */ | |
2d3d891d | 361 | if (!detect_deadlock && waiter->prio == task->prio) |
23f78d4a IM |
362 | goto out_unlock_pi; |
363 | ||
364 | lock = waiter->lock; | |
d209d74d | 365 | if (!raw_spin_trylock(&lock->wait_lock)) { |
1d615482 | 366 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
367 | cpu_relax(); |
368 | goto retry; | |
369 | } | |
370 | ||
397335f0 TG |
371 | /* |
372 | * Deadlock detection. If the lock is the same as the original | |
373 | * lock which caused us to walk the lock chain or if the | |
374 | * current lock is owned by the task which initiated the chain | |
375 | * walk, we detected a deadlock. | |
376 | */ | |
95e02ca9 | 377 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
23f78d4a | 378 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
d209d74d | 379 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
380 | ret = deadlock_detect ? -EDEADLK : 0; |
381 | goto out_unlock_pi; | |
382 | } | |
383 | ||
384 | top_waiter = rt_mutex_top_waiter(lock); | |
385 | ||
386 | /* Requeue the waiter */ | |
fb00aca4 | 387 | rt_mutex_dequeue(lock, waiter); |
2d3d891d | 388 | waiter->prio = task->prio; |
fb00aca4 | 389 | rt_mutex_enqueue(lock, waiter); |
23f78d4a IM |
390 | |
391 | /* Release the task */ | |
1d615482 | 392 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
8161239a LJ |
393 | if (!rt_mutex_owner(lock)) { |
394 | /* | |
395 | * If the requeue above changed the top waiter, then we need | |
396 | * to wake the new top waiter up to try to get the lock. | |
397 | */ | |
398 | ||
399 | if (top_waiter != rt_mutex_top_waiter(lock)) | |
400 | wake_up_process(rt_mutex_top_waiter(lock)->task); | |
401 | raw_spin_unlock(&lock->wait_lock); | |
402 | goto out_put_task; | |
403 | } | |
23f78d4a IM |
404 | put_task_struct(task); |
405 | ||
406 | /* Grab the next task */ | |
407 | task = rt_mutex_owner(lock); | |
db630637 | 408 | get_task_struct(task); |
1d615482 | 409 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
410 | |
411 | if (waiter == rt_mutex_top_waiter(lock)) { | |
412 | /* Boost the owner */ | |
fb00aca4 PZ |
413 | rt_mutex_dequeue_pi(task, top_waiter); |
414 | rt_mutex_enqueue_pi(task, waiter); | |
23f78d4a IM |
415 | __rt_mutex_adjust_prio(task); |
416 | ||
417 | } else if (top_waiter == waiter) { | |
418 | /* Deboost the owner */ | |
fb00aca4 | 419 | rt_mutex_dequeue_pi(task, waiter); |
23f78d4a | 420 | waiter = rt_mutex_top_waiter(lock); |
fb00aca4 | 421 | rt_mutex_enqueue_pi(task, waiter); |
23f78d4a IM |
422 | __rt_mutex_adjust_prio(task); |
423 | } | |
424 | ||
1d615482 | 425 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
426 | |
427 | top_waiter = rt_mutex_top_waiter(lock); | |
d209d74d | 428 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
429 | |
430 | if (!detect_deadlock && waiter != top_waiter) | |
431 | goto out_put_task; | |
432 | ||
433 | goto again; | |
434 | ||
435 | out_unlock_pi: | |
1d615482 | 436 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
437 | out_put_task: |
438 | put_task_struct(task); | |
36c8b586 | 439 | |
23f78d4a IM |
440 | return ret; |
441 | } | |
442 | ||
23f78d4a IM |
443 | /* |
444 | * Try to take an rt-mutex | |
445 | * | |
23f78d4a | 446 | * Must be called with lock->wait_lock held. |
8161239a LJ |
447 | * |
448 | * @lock: the lock to be acquired. | |
449 | * @task: the task which wants to acquire the lock | |
450 | * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) | |
23f78d4a | 451 | */ |
8161239a LJ |
452 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
453 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
454 | { |
455 | /* | |
456 | * We have to be careful here if the atomic speedups are | |
457 | * enabled, such that, when | |
458 | * - no other waiter is on the lock | |
459 | * - the lock has been released since we did the cmpxchg | |
460 | * the lock can be released or taken while we are doing the | |
461 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | |
462 | * | |
463 | * The atomic acquire/release aware variant of | |
464 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | |
465 | * the WAITERS bit, the atomic release / acquire can not | |
466 | * happen anymore and lock->wait_lock protects us from the | |
467 | * non-atomic case. | |
468 | * | |
469 | * Note, that this might set lock->owner = | |
470 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | |
471 | * any more. This is fixed up when we take the ownership. | |
472 | * This is the transitional state explained at the top of this file. | |
473 | */ | |
474 | mark_rt_mutex_waiters(lock); | |
475 | ||
8161239a | 476 | if (rt_mutex_owner(lock)) |
23f78d4a IM |
477 | return 0; |
478 | ||
8161239a LJ |
479 | /* |
480 | * It will get the lock because of one of these conditions: | |
481 | * 1) there is no waiter | |
482 | * 2) higher priority than waiters | |
483 | * 3) it is top waiter | |
484 | */ | |
485 | if (rt_mutex_has_waiters(lock)) { | |
2d3d891d | 486 | if (task->prio >= rt_mutex_top_waiter(lock)->prio) { |
8161239a LJ |
487 | if (!waiter || waiter != rt_mutex_top_waiter(lock)) |
488 | return 0; | |
489 | } | |
490 | } | |
491 | ||
492 | if (waiter || rt_mutex_has_waiters(lock)) { | |
493 | unsigned long flags; | |
494 | struct rt_mutex_waiter *top; | |
495 | ||
496 | raw_spin_lock_irqsave(&task->pi_lock, flags); | |
497 | ||
498 | /* remove the queued waiter. */ | |
499 | if (waiter) { | |
fb00aca4 | 500 | rt_mutex_dequeue(lock, waiter); |
8161239a LJ |
501 | task->pi_blocked_on = NULL; |
502 | } | |
503 | ||
504 | /* | |
505 | * We have to enqueue the top waiter(if it exists) into | |
506 | * task->pi_waiters list. | |
507 | */ | |
508 | if (rt_mutex_has_waiters(lock)) { | |
509 | top = rt_mutex_top_waiter(lock); | |
fb00aca4 | 510 | rt_mutex_enqueue_pi(task, top); |
8161239a LJ |
511 | } |
512 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | |
513 | } | |
514 | ||
23f78d4a | 515 | /* We got the lock. */ |
9a11b49a | 516 | debug_rt_mutex_lock(lock); |
23f78d4a | 517 | |
8161239a | 518 | rt_mutex_set_owner(lock, task); |
23f78d4a | 519 | |
8161239a | 520 | rt_mutex_deadlock_account_lock(lock, task); |
23f78d4a IM |
521 | |
522 | return 1; | |
523 | } | |
524 | ||
525 | /* | |
526 | * Task blocks on lock. | |
527 | * | |
528 | * Prepare waiter and propagate pi chain | |
529 | * | |
530 | * This must be called with lock->wait_lock held. | |
531 | */ | |
532 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
533 | struct rt_mutex_waiter *waiter, | |
8dac456a | 534 | struct task_struct *task, |
9a11b49a | 535 | int detect_deadlock) |
23f78d4a | 536 | { |
36c8b586 | 537 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 538 | struct rt_mutex_waiter *top_waiter = waiter; |
23f78d4a | 539 | unsigned long flags; |
db630637 | 540 | int chain_walk = 0, res; |
23f78d4a | 541 | |
397335f0 TG |
542 | /* |
543 | * Early deadlock detection. We really don't want the task to | |
544 | * enqueue on itself just to untangle the mess later. It's not | |
545 | * only an optimization. We drop the locks, so another waiter | |
546 | * can come in before the chain walk detects the deadlock. So | |
547 | * the other will detect the deadlock and return -EDEADLOCK, | |
548 | * which is wrong, as the other waiter is not in a deadlock | |
549 | * situation. | |
550 | */ | |
551 | if (detect_deadlock && owner == task) | |
552 | return -EDEADLK; | |
553 | ||
1d615482 | 554 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
8dac456a DH |
555 | __rt_mutex_adjust_prio(task); |
556 | waiter->task = task; | |
23f78d4a | 557 | waiter->lock = lock; |
2d3d891d | 558 | waiter->prio = task->prio; |
23f78d4a IM |
559 | |
560 | /* Get the top priority waiter on the lock */ | |
561 | if (rt_mutex_has_waiters(lock)) | |
562 | top_waiter = rt_mutex_top_waiter(lock); | |
fb00aca4 | 563 | rt_mutex_enqueue(lock, waiter); |
23f78d4a | 564 | |
8dac456a | 565 | task->pi_blocked_on = waiter; |
23f78d4a | 566 | |
1d615482 | 567 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a | 568 | |
8161239a LJ |
569 | if (!owner) |
570 | return 0; | |
571 | ||
23f78d4a | 572 | if (waiter == rt_mutex_top_waiter(lock)) { |
1d615482 | 573 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
fb00aca4 PZ |
574 | rt_mutex_dequeue_pi(owner, top_waiter); |
575 | rt_mutex_enqueue_pi(owner, waiter); | |
23f78d4a IM |
576 | |
577 | __rt_mutex_adjust_prio(owner); | |
db630637 SR |
578 | if (owner->pi_blocked_on) |
579 | chain_walk = 1; | |
1d615482 | 580 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
23f78d4a | 581 | } |
db630637 SR |
582 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) |
583 | chain_walk = 1; | |
584 | ||
585 | if (!chain_walk) | |
23f78d4a IM |
586 | return 0; |
587 | ||
db630637 SR |
588 | /* |
589 | * The owner can't disappear while holding a lock, | |
590 | * so the owner struct is protected by wait_lock. | |
591 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
592 | */ | |
593 | get_task_struct(owner); | |
594 | ||
d209d74d | 595 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 596 | |
95e02ca9 | 597 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
8dac456a | 598 | task); |
23f78d4a | 599 | |
d209d74d | 600 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
601 | |
602 | return res; | |
603 | } | |
604 | ||
605 | /* | |
606 | * Wake up the next waiter on the lock. | |
607 | * | |
8161239a | 608 | * Remove the top waiter from the current tasks waiter list and wake it up. |
23f78d4a IM |
609 | * |
610 | * Called with lock->wait_lock held. | |
611 | */ | |
612 | static void wakeup_next_waiter(struct rt_mutex *lock) | |
613 | { | |
614 | struct rt_mutex_waiter *waiter; | |
23f78d4a IM |
615 | unsigned long flags; |
616 | ||
1d615482 | 617 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a IM |
618 | |
619 | waiter = rt_mutex_top_waiter(lock); | |
23f78d4a IM |
620 | |
621 | /* | |
622 | * Remove it from current->pi_waiters. We do not adjust a | |
623 | * possible priority boost right now. We execute wakeup in the | |
624 | * boosted mode and go back to normal after releasing | |
625 | * lock->wait_lock. | |
626 | */ | |
fb00aca4 | 627 | rt_mutex_dequeue_pi(current, waiter); |
23f78d4a | 628 | |
8161239a | 629 | rt_mutex_set_owner(lock, NULL); |
23f78d4a | 630 | |
1d615482 | 631 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 632 | |
8161239a | 633 | wake_up_process(waiter->task); |
23f78d4a IM |
634 | } |
635 | ||
636 | /* | |
8161239a | 637 | * Remove a waiter from a lock and give up |
23f78d4a | 638 | * |
8161239a LJ |
639 | * Must be called with lock->wait_lock held and |
640 | * have just failed to try_to_take_rt_mutex(). | |
23f78d4a | 641 | */ |
bd197234 TG |
642 | static void remove_waiter(struct rt_mutex *lock, |
643 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
644 | { |
645 | int first = (waiter == rt_mutex_top_waiter(lock)); | |
36c8b586 | 646 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 647 | unsigned long flags; |
db630637 | 648 | int chain_walk = 0; |
23f78d4a | 649 | |
1d615482 | 650 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
fb00aca4 | 651 | rt_mutex_dequeue(lock, waiter); |
23f78d4a | 652 | current->pi_blocked_on = NULL; |
1d615482 | 653 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 654 | |
8161239a LJ |
655 | if (!owner) |
656 | return; | |
657 | ||
658 | if (first) { | |
23f78d4a | 659 | |
1d615482 | 660 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a | 661 | |
fb00aca4 | 662 | rt_mutex_dequeue_pi(owner, waiter); |
23f78d4a IM |
663 | |
664 | if (rt_mutex_has_waiters(lock)) { | |
665 | struct rt_mutex_waiter *next; | |
666 | ||
667 | next = rt_mutex_top_waiter(lock); | |
fb00aca4 | 668 | rt_mutex_enqueue_pi(owner, next); |
23f78d4a IM |
669 | } |
670 | __rt_mutex_adjust_prio(owner); | |
671 | ||
db630637 SR |
672 | if (owner->pi_blocked_on) |
673 | chain_walk = 1; | |
674 | ||
1d615482 | 675 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
23f78d4a IM |
676 | } |
677 | ||
db630637 | 678 | if (!chain_walk) |
23f78d4a IM |
679 | return; |
680 | ||
db630637 SR |
681 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
682 | get_task_struct(owner); | |
683 | ||
d209d74d | 684 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 685 | |
9a11b49a | 686 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
23f78d4a | 687 | |
d209d74d | 688 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
689 | } |
690 | ||
95e02ca9 TG |
691 | /* |
692 | * Recheck the pi chain, in case we got a priority setting | |
693 | * | |
694 | * Called from sched_setscheduler | |
695 | */ | |
696 | void rt_mutex_adjust_pi(struct task_struct *task) | |
697 | { | |
698 | struct rt_mutex_waiter *waiter; | |
699 | unsigned long flags; | |
700 | ||
1d615482 | 701 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
95e02ca9 TG |
702 | |
703 | waiter = task->pi_blocked_on; | |
2d3d891d DF |
704 | if (!waiter || (waiter->prio == task->prio && |
705 | !dl_prio(task->prio))) { | |
1d615482 | 706 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 TG |
707 | return; |
708 | } | |
709 | ||
1d615482 | 710 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 | 711 | |
db630637 SR |
712 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
713 | get_task_struct(task); | |
9a11b49a | 714 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
95e02ca9 TG |
715 | } |
716 | ||
8dac456a DH |
717 | /** |
718 | * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop | |
719 | * @lock: the rt_mutex to take | |
720 | * @state: the state the task should block in (TASK_INTERRUPTIBLE | |
721 | * or TASK_UNINTERRUPTIBLE) | |
722 | * @timeout: the pre-initialized and started timer, or NULL for none | |
723 | * @waiter: the pre-initialized rt_mutex_waiter | |
8dac456a DH |
724 | * |
725 | * lock->wait_lock must be held by the caller. | |
23f78d4a IM |
726 | */ |
727 | static int __sched | |
8dac456a DH |
728 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
729 | struct hrtimer_sleeper *timeout, | |
8161239a | 730 | struct rt_mutex_waiter *waiter) |
23f78d4a | 731 | { |
23f78d4a IM |
732 | int ret = 0; |
733 | ||
23f78d4a IM |
734 | for (;;) { |
735 | /* Try to acquire the lock: */ | |
8161239a | 736 | if (try_to_take_rt_mutex(lock, current, waiter)) |
23f78d4a IM |
737 | break; |
738 | ||
739 | /* | |
740 | * TASK_INTERRUPTIBLE checks for signals and | |
741 | * timeout. Ignored otherwise. | |
742 | */ | |
743 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | |
744 | /* Signal pending? */ | |
745 | if (signal_pending(current)) | |
746 | ret = -EINTR; | |
747 | if (timeout && !timeout->task) | |
748 | ret = -ETIMEDOUT; | |
749 | if (ret) | |
750 | break; | |
751 | } | |
752 | ||
d209d74d | 753 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 754 | |
8dac456a | 755 | debug_rt_mutex_print_deadlock(waiter); |
23f78d4a | 756 | |
8161239a | 757 | schedule_rt_mutex(lock); |
23f78d4a | 758 | |
d209d74d | 759 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
760 | set_current_state(state); |
761 | } | |
762 | ||
8dac456a DH |
763 | return ret; |
764 | } | |
765 | ||
766 | /* | |
767 | * Slow path lock function: | |
768 | */ | |
769 | static int __sched | |
770 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
771 | struct hrtimer_sleeper *timeout, | |
772 | int detect_deadlock) | |
773 | { | |
774 | struct rt_mutex_waiter waiter; | |
775 | int ret = 0; | |
776 | ||
777 | debug_rt_mutex_init_waiter(&waiter); | |
fb00aca4 PZ |
778 | RB_CLEAR_NODE(&waiter.pi_tree_entry); |
779 | RB_CLEAR_NODE(&waiter.tree_entry); | |
8dac456a | 780 | |
d209d74d | 781 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
782 | |
783 | /* Try to acquire the lock again: */ | |
8161239a | 784 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
d209d74d | 785 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
786 | return 0; |
787 | } | |
788 | ||
789 | set_current_state(state); | |
790 | ||
791 | /* Setup the timer, when timeout != NULL */ | |
792 | if (unlikely(timeout)) { | |
793 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | |
794 | if (!hrtimer_active(&timeout->timer)) | |
795 | timeout->task = NULL; | |
796 | } | |
797 | ||
8161239a LJ |
798 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); |
799 | ||
800 | if (likely(!ret)) | |
801 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | |
8dac456a | 802 | |
23f78d4a IM |
803 | set_current_state(TASK_RUNNING); |
804 | ||
8161239a | 805 | if (unlikely(ret)) |
9a11b49a | 806 | remove_waiter(lock, &waiter); |
23f78d4a IM |
807 | |
808 | /* | |
809 | * try_to_take_rt_mutex() sets the waiter bit | |
810 | * unconditionally. We might have to fix that up. | |
811 | */ | |
812 | fixup_rt_mutex_waiters(lock); | |
813 | ||
d209d74d | 814 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
815 | |
816 | /* Remove pending timer: */ | |
817 | if (unlikely(timeout)) | |
818 | hrtimer_cancel(&timeout->timer); | |
819 | ||
23f78d4a IM |
820 | debug_rt_mutex_free_waiter(&waiter); |
821 | ||
822 | return ret; | |
823 | } | |
824 | ||
825 | /* | |
826 | * Slow path try-lock function: | |
827 | */ | |
828 | static inline int | |
9a11b49a | 829 | rt_mutex_slowtrylock(struct rt_mutex *lock) |
23f78d4a IM |
830 | { |
831 | int ret = 0; | |
832 | ||
d209d74d | 833 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
834 | |
835 | if (likely(rt_mutex_owner(lock) != current)) { | |
836 | ||
8161239a | 837 | ret = try_to_take_rt_mutex(lock, current, NULL); |
23f78d4a IM |
838 | /* |
839 | * try_to_take_rt_mutex() sets the lock waiters | |
840 | * bit unconditionally. Clean this up. | |
841 | */ | |
842 | fixup_rt_mutex_waiters(lock); | |
843 | } | |
844 | ||
d209d74d | 845 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
846 | |
847 | return ret; | |
848 | } | |
849 | ||
850 | /* | |
851 | * Slow path to release a rt-mutex: | |
852 | */ | |
853 | static void __sched | |
854 | rt_mutex_slowunlock(struct rt_mutex *lock) | |
855 | { | |
d209d74d | 856 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
857 | |
858 | debug_rt_mutex_unlock(lock); | |
859 | ||
860 | rt_mutex_deadlock_account_unlock(current); | |
861 | ||
862 | if (!rt_mutex_has_waiters(lock)) { | |
863 | lock->owner = NULL; | |
d209d74d | 864 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
865 | return; |
866 | } | |
867 | ||
868 | wakeup_next_waiter(lock); | |
869 | ||
d209d74d | 870 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
871 | |
872 | /* Undo pi boosting if necessary: */ | |
873 | rt_mutex_adjust_prio(current); | |
874 | } | |
875 | ||
876 | /* | |
877 | * debug aware fast / slowpath lock,trylock,unlock | |
878 | * | |
879 | * The atomic acquire/release ops are compiled away, when either the | |
880 | * architecture does not support cmpxchg or when debugging is enabled. | |
881 | */ | |
882 | static inline int | |
883 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
884 | int detect_deadlock, | |
885 | int (*slowfn)(struct rt_mutex *lock, int state, | |
886 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 887 | int detect_deadlock)) |
23f78d4a IM |
888 | { |
889 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
890 | rt_mutex_deadlock_account_lock(lock, current); | |
891 | return 0; | |
892 | } else | |
9a11b49a | 893 | return slowfn(lock, state, NULL, detect_deadlock); |
23f78d4a IM |
894 | } |
895 | ||
896 | static inline int | |
897 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
898 | struct hrtimer_sleeper *timeout, int detect_deadlock, | |
899 | int (*slowfn)(struct rt_mutex *lock, int state, | |
900 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 901 | int detect_deadlock)) |
23f78d4a IM |
902 | { |
903 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
904 | rt_mutex_deadlock_account_lock(lock, current); | |
905 | return 0; | |
906 | } else | |
9a11b49a | 907 | return slowfn(lock, state, timeout, detect_deadlock); |
23f78d4a IM |
908 | } |
909 | ||
910 | static inline int | |
911 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
9a11b49a | 912 | int (*slowfn)(struct rt_mutex *lock)) |
23f78d4a IM |
913 | { |
914 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
915 | rt_mutex_deadlock_account_lock(lock, current); | |
916 | return 1; | |
917 | } | |
9a11b49a | 918 | return slowfn(lock); |
23f78d4a IM |
919 | } |
920 | ||
921 | static inline void | |
922 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
923 | void (*slowfn)(struct rt_mutex *lock)) | |
924 | { | |
925 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | |
926 | rt_mutex_deadlock_account_unlock(current); | |
927 | else | |
928 | slowfn(lock); | |
929 | } | |
930 | ||
931 | /** | |
932 | * rt_mutex_lock - lock a rt_mutex | |
933 | * | |
934 | * @lock: the rt_mutex to be locked | |
935 | */ | |
936 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
937 | { | |
938 | might_sleep(); | |
939 | ||
940 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | |
941 | } | |
942 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
943 | ||
944 | /** | |
945 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
946 | * | |
947 | * @lock: the rt_mutex to be locked | |
948 | * @detect_deadlock: deadlock detection on/off | |
949 | * | |
950 | * Returns: | |
951 | * 0 on success | |
952 | * -EINTR when interrupted by a signal | |
953 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
954 | */ | |
955 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
956 | int detect_deadlock) | |
957 | { | |
958 | might_sleep(); | |
959 | ||
960 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | |
961 | detect_deadlock, rt_mutex_slowlock); | |
962 | } | |
963 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
964 | ||
965 | /** | |
23b94b96 LH |
966 | * rt_mutex_timed_lock - lock a rt_mutex interruptible |
967 | * the timeout structure is provided | |
968 | * by the caller | |
23f78d4a IM |
969 | * |
970 | * @lock: the rt_mutex to be locked | |
971 | * @timeout: timeout structure or NULL (no timeout) | |
972 | * @detect_deadlock: deadlock detection on/off | |
973 | * | |
974 | * Returns: | |
975 | * 0 on success | |
976 | * -EINTR when interrupted by a signal | |
3ac49a1c | 977 | * -ETIMEDOUT when the timeout expired |
23f78d4a IM |
978 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) |
979 | */ | |
980 | int | |
981 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | |
982 | int detect_deadlock) | |
983 | { | |
984 | might_sleep(); | |
985 | ||
986 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
987 | detect_deadlock, rt_mutex_slowlock); | |
988 | } | |
989 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
990 | ||
991 | /** | |
992 | * rt_mutex_trylock - try to lock a rt_mutex | |
993 | * | |
994 | * @lock: the rt_mutex to be locked | |
995 | * | |
996 | * Returns 1 on success and 0 on contention | |
997 | */ | |
998 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
999 | { | |
1000 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | |
1001 | } | |
1002 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
1003 | ||
1004 | /** | |
1005 | * rt_mutex_unlock - unlock a rt_mutex | |
1006 | * | |
1007 | * @lock: the rt_mutex to be unlocked | |
1008 | */ | |
1009 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
1010 | { | |
1011 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | |
1012 | } | |
1013 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
1014 | ||
23b94b96 | 1015 | /** |
23f78d4a IM |
1016 | * rt_mutex_destroy - mark a mutex unusable |
1017 | * @lock: the mutex to be destroyed | |
1018 | * | |
1019 | * This function marks the mutex uninitialized, and any subsequent | |
1020 | * use of the mutex is forbidden. The mutex must not be locked when | |
1021 | * this function is called. | |
1022 | */ | |
1023 | void rt_mutex_destroy(struct rt_mutex *lock) | |
1024 | { | |
1025 | WARN_ON(rt_mutex_is_locked(lock)); | |
1026 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
1027 | lock->magic = NULL; | |
1028 | #endif | |
1029 | } | |
1030 | ||
1031 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |
1032 | ||
1033 | /** | |
1034 | * __rt_mutex_init - initialize the rt lock | |
1035 | * | |
1036 | * @lock: the rt lock to be initialized | |
1037 | * | |
1038 | * Initialize the rt lock to unlocked state. | |
1039 | * | |
1040 | * Initializing of a locked rt lock is not allowed | |
1041 | */ | |
1042 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |
1043 | { | |
1044 | lock->owner = NULL; | |
d209d74d | 1045 | raw_spin_lock_init(&lock->wait_lock); |
fb00aca4 PZ |
1046 | lock->waiters = RB_ROOT; |
1047 | lock->waiters_leftmost = NULL; | |
23f78d4a IM |
1048 | |
1049 | debug_rt_mutex_init(lock, name); | |
1050 | } | |
1051 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
0cdbee99 IM |
1052 | |
1053 | /** | |
1054 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
1055 | * proxy owner | |
1056 | * | |
1057 | * @lock: the rt_mutex to be locked | |
1058 | * @proxy_owner:the task to set as owner | |
1059 | * | |
1060 | * No locking. Caller has to do serializing itself | |
1061 | * Special API call for PI-futex support | |
1062 | */ | |
1063 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |
1064 | struct task_struct *proxy_owner) | |
1065 | { | |
1066 | __rt_mutex_init(lock, NULL); | |
9a11b49a | 1067 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
8161239a | 1068 | rt_mutex_set_owner(lock, proxy_owner); |
0cdbee99 IM |
1069 | rt_mutex_deadlock_account_lock(lock, proxy_owner); |
1070 | } | |
1071 | ||
1072 | /** | |
1073 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
1074 | * | |
1075 | * @lock: the rt_mutex to be locked | |
1076 | * | |
1077 | * No locking. Caller has to do serializing itself | |
1078 | * Special API call for PI-futex support | |
1079 | */ | |
1080 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |
1081 | struct task_struct *proxy_owner) | |
1082 | { | |
1083 | debug_rt_mutex_proxy_unlock(lock); | |
8161239a | 1084 | rt_mutex_set_owner(lock, NULL); |
0cdbee99 IM |
1085 | rt_mutex_deadlock_account_unlock(proxy_owner); |
1086 | } | |
1087 | ||
8dac456a DH |
1088 | /** |
1089 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
1090 | * @lock: the rt_mutex to take | |
1091 | * @waiter: the pre-initialized rt_mutex_waiter | |
1092 | * @task: the task to prepare | |
1093 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1094 | * | |
1095 | * Returns: | |
1096 | * 0 - task blocked on lock | |
1097 | * 1 - acquired the lock for task, caller should wake it up | |
1098 | * <0 - error | |
1099 | * | |
1100 | * Special API call for FUTEX_REQUEUE_PI support. | |
1101 | */ | |
1102 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |
1103 | struct rt_mutex_waiter *waiter, | |
1104 | struct task_struct *task, int detect_deadlock) | |
1105 | { | |
1106 | int ret; | |
1107 | ||
d209d74d | 1108 | raw_spin_lock(&lock->wait_lock); |
8dac456a | 1109 | |
8161239a | 1110 | if (try_to_take_rt_mutex(lock, task, NULL)) { |
d209d74d | 1111 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1112 | return 1; |
1113 | } | |
1114 | ||
1115 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | |
1116 | ||
8161239a | 1117 | if (ret && !rt_mutex_owner(lock)) { |
8dac456a DH |
1118 | /* |
1119 | * Reset the return value. We might have | |
1120 | * returned with -EDEADLK and the owner | |
1121 | * released the lock while we were walking the | |
1122 | * pi chain. Let the waiter sort it out. | |
1123 | */ | |
1124 | ret = 0; | |
1125 | } | |
8161239a LJ |
1126 | |
1127 | if (unlikely(ret)) | |
1128 | remove_waiter(lock, waiter); | |
1129 | ||
d209d74d | 1130 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1131 | |
1132 | debug_rt_mutex_print_deadlock(waiter); | |
1133 | ||
1134 | return ret; | |
1135 | } | |
1136 | ||
0cdbee99 IM |
1137 | /** |
1138 | * rt_mutex_next_owner - return the next owner of the lock | |
1139 | * | |
1140 | * @lock: the rt lock query | |
1141 | * | |
1142 | * Returns the next owner of the lock or NULL | |
1143 | * | |
1144 | * Caller has to serialize against other accessors to the lock | |
1145 | * itself. | |
1146 | * | |
1147 | * Special API call for PI-futex support | |
1148 | */ | |
1149 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |
1150 | { | |
1151 | if (!rt_mutex_has_waiters(lock)) | |
1152 | return NULL; | |
1153 | ||
1154 | return rt_mutex_top_waiter(lock)->task; | |
1155 | } | |
8dac456a DH |
1156 | |
1157 | /** | |
1158 | * rt_mutex_finish_proxy_lock() - Complete lock acquisition | |
1159 | * @lock: the rt_mutex we were woken on | |
1160 | * @to: the timeout, null if none. hrtimer should already have | |
1161 | * been started. | |
1162 | * @waiter: the pre-initialized rt_mutex_waiter | |
1163 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1164 | * | |
1165 | * Complete the lock acquisition started our behalf by another thread. | |
1166 | * | |
1167 | * Returns: | |
1168 | * 0 - success | |
1169 | * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK | |
1170 | * | |
1171 | * Special API call for PI-futex requeue support | |
1172 | */ | |
1173 | int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |
1174 | struct hrtimer_sleeper *to, | |
1175 | struct rt_mutex_waiter *waiter, | |
1176 | int detect_deadlock) | |
1177 | { | |
1178 | int ret; | |
1179 | ||
d209d74d | 1180 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
1181 | |
1182 | set_current_state(TASK_INTERRUPTIBLE); | |
1183 | ||
8161239a | 1184 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
8dac456a DH |
1185 | |
1186 | set_current_state(TASK_RUNNING); | |
1187 | ||
8161239a | 1188 | if (unlikely(ret)) |
8dac456a DH |
1189 | remove_waiter(lock, waiter); |
1190 | ||
1191 | /* | |
1192 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
1193 | * have to fix that up. | |
1194 | */ | |
1195 | fixup_rt_mutex_waiters(lock); | |
1196 | ||
d209d74d | 1197 | raw_spin_unlock(&lock->wait_lock); |
8dac456a | 1198 | |
8dac456a DH |
1199 | return ret; |
1200 | } |