Commit | Line | Data |
---|---|---|
23f78d4a IM |
1 | /* |
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
3 | * | |
4 | * started by Ingo Molnar and Thomas Gleixner. | |
5 | * | |
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
9 | * Copyright (C) 2006 Esben Nielsen | |
d07fe82c SR |
10 | * |
11 | * See Documentation/rt-mutex-design.txt for details. | |
23f78d4a IM |
12 | */ |
13 | #include <linux/spinlock.h> | |
9984de1a | 14 | #include <linux/export.h> |
23f78d4a | 15 | #include <linux/sched.h> |
8bd75c77 | 16 | #include <linux/sched/rt.h> |
fb00aca4 | 17 | #include <linux/sched/deadline.h> |
23f78d4a IM |
18 | #include <linux/timer.h> |
19 | ||
20 | #include "rtmutex_common.h" | |
21 | ||
23f78d4a IM |
22 | /* |
23 | * lock->owner state tracking: | |
24 | * | |
8161239a LJ |
25 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
26 | * is used to keep track of the "lock has waiters" state. | |
23f78d4a | 27 | * |
8161239a LJ |
28 | * owner bit0 |
29 | * NULL 0 lock is free (fast acquire possible) | |
30 | * NULL 1 lock is free and has waiters and the top waiter | |
31 | * is going to take the lock* | |
32 | * taskpointer 0 lock is held (fast release possible) | |
33 | * taskpointer 1 lock is held and has waiters** | |
23f78d4a IM |
34 | * |
35 | * The fast atomic compare exchange based acquire and release is only | |
8161239a LJ |
36 | * possible when bit 0 of lock->owner is 0. |
37 | * | |
38 | * (*) It also can be a transitional state when grabbing the lock | |
39 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | |
40 | * we need to set the bit0 before looking at the lock, and the owner may be | |
41 | * NULL in this small time, hence this can be a transitional state. | |
23f78d4a | 42 | * |
8161239a LJ |
43 | * (**) There is a small time when bit 0 is set but there are no |
44 | * waiters. This can happen when grabbing the lock in the slow path. | |
45 | * To prevent a cmpxchg of the owner releasing the lock, we need to | |
46 | * set this bit before looking at the lock. | |
23f78d4a IM |
47 | */ |
48 | ||
bd197234 | 49 | static void |
8161239a | 50 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) |
23f78d4a | 51 | { |
8161239a | 52 | unsigned long val = (unsigned long)owner; |
23f78d4a IM |
53 | |
54 | if (rt_mutex_has_waiters(lock)) | |
55 | val |= RT_MUTEX_HAS_WAITERS; | |
56 | ||
57 | lock->owner = (struct task_struct *)val; | |
58 | } | |
59 | ||
60 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
61 | { | |
62 | lock->owner = (struct task_struct *) | |
63 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
64 | } | |
65 | ||
66 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
67 | { | |
68 | if (!rt_mutex_has_waiters(lock)) | |
69 | clear_rt_mutex_waiters(lock); | |
70 | } | |
71 | ||
bd197234 TG |
72 | /* |
73 | * We can speed up the acquire/release, if the architecture | |
74 | * supports cmpxchg and if there's no debugging state to be set up | |
75 | */ | |
76 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | |
77 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | |
78 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
79 | { | |
80 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
81 | ||
82 | do { | |
83 | owner = *p; | |
84 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | |
85 | } | |
27e35715 TG |
86 | |
87 | /* | |
88 | * Safe fastpath aware unlock: | |
89 | * 1) Clear the waiters bit | |
90 | * 2) Drop lock->wait_lock | |
91 | * 3) Try to unlock the lock with cmpxchg | |
92 | */ | |
93 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) | |
94 | __releases(lock->wait_lock) | |
95 | { | |
96 | struct task_struct *owner = rt_mutex_owner(lock); | |
97 | ||
98 | clear_rt_mutex_waiters(lock); | |
99 | raw_spin_unlock(&lock->wait_lock); | |
100 | /* | |
101 | * If a new waiter comes in between the unlock and the cmpxchg | |
102 | * we have two situations: | |
103 | * | |
104 | * unlock(wait_lock); | |
105 | * lock(wait_lock); | |
106 | * cmpxchg(p, owner, 0) == owner | |
107 | * mark_rt_mutex_waiters(lock); | |
108 | * acquire(lock); | |
109 | * or: | |
110 | * | |
111 | * unlock(wait_lock); | |
112 | * lock(wait_lock); | |
113 | * mark_rt_mutex_waiters(lock); | |
114 | * | |
115 | * cmpxchg(p, owner, 0) != owner | |
116 | * enqueue_waiter(); | |
117 | * unlock(wait_lock); | |
118 | * lock(wait_lock); | |
119 | * wake waiter(); | |
120 | * unlock(wait_lock); | |
121 | * lock(wait_lock); | |
122 | * acquire(lock); | |
123 | */ | |
124 | return rt_mutex_cmpxchg(lock, owner, NULL); | |
125 | } | |
126 | ||
bd197234 TG |
127 | #else |
128 | # define rt_mutex_cmpxchg(l,c,n) (0) | |
129 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
130 | { | |
131 | lock->owner = (struct task_struct *) | |
132 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
133 | } | |
27e35715 TG |
134 | |
135 | /* | |
136 | * Simple slow path only version: lock->owner is protected by lock->wait_lock. | |
137 | */ | |
138 | static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) | |
139 | __releases(lock->wait_lock) | |
140 | { | |
141 | lock->owner = NULL; | |
142 | raw_spin_unlock(&lock->wait_lock); | |
143 | return true; | |
144 | } | |
bd197234 TG |
145 | #endif |
146 | ||
fb00aca4 PZ |
147 | static inline int |
148 | rt_mutex_waiter_less(struct rt_mutex_waiter *left, | |
149 | struct rt_mutex_waiter *right) | |
150 | { | |
2d3d891d | 151 | if (left->prio < right->prio) |
fb00aca4 PZ |
152 | return 1; |
153 | ||
154 | /* | |
2d3d891d DF |
155 | * If both waiters have dl_prio(), we check the deadlines of the |
156 | * associated tasks. | |
157 | * If left waiter has a dl_prio(), and we didn't return 1 above, | |
158 | * then right waiter has a dl_prio() too. | |
fb00aca4 | 159 | */ |
2d3d891d | 160 | if (dl_prio(left->prio)) |
fb00aca4 PZ |
161 | return (left->task->dl.deadline < right->task->dl.deadline); |
162 | ||
163 | return 0; | |
164 | } | |
165 | ||
166 | static void | |
167 | rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | |
168 | { | |
169 | struct rb_node **link = &lock->waiters.rb_node; | |
170 | struct rb_node *parent = NULL; | |
171 | struct rt_mutex_waiter *entry; | |
172 | int leftmost = 1; | |
173 | ||
174 | while (*link) { | |
175 | parent = *link; | |
176 | entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); | |
177 | if (rt_mutex_waiter_less(waiter, entry)) { | |
178 | link = &parent->rb_left; | |
179 | } else { | |
180 | link = &parent->rb_right; | |
181 | leftmost = 0; | |
182 | } | |
183 | } | |
184 | ||
185 | if (leftmost) | |
186 | lock->waiters_leftmost = &waiter->tree_entry; | |
187 | ||
188 | rb_link_node(&waiter->tree_entry, parent, link); | |
189 | rb_insert_color(&waiter->tree_entry, &lock->waiters); | |
190 | } | |
191 | ||
192 | static void | |
193 | rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | |
194 | { | |
195 | if (RB_EMPTY_NODE(&waiter->tree_entry)) | |
196 | return; | |
197 | ||
198 | if (lock->waiters_leftmost == &waiter->tree_entry) | |
199 | lock->waiters_leftmost = rb_next(&waiter->tree_entry); | |
200 | ||
201 | rb_erase(&waiter->tree_entry, &lock->waiters); | |
202 | RB_CLEAR_NODE(&waiter->tree_entry); | |
203 | } | |
204 | ||
205 | static void | |
206 | rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | |
207 | { | |
208 | struct rb_node **link = &task->pi_waiters.rb_node; | |
209 | struct rb_node *parent = NULL; | |
210 | struct rt_mutex_waiter *entry; | |
211 | int leftmost = 1; | |
212 | ||
213 | while (*link) { | |
214 | parent = *link; | |
215 | entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); | |
216 | if (rt_mutex_waiter_less(waiter, entry)) { | |
217 | link = &parent->rb_left; | |
218 | } else { | |
219 | link = &parent->rb_right; | |
220 | leftmost = 0; | |
221 | } | |
222 | } | |
223 | ||
224 | if (leftmost) | |
225 | task->pi_waiters_leftmost = &waiter->pi_tree_entry; | |
226 | ||
227 | rb_link_node(&waiter->pi_tree_entry, parent, link); | |
228 | rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); | |
229 | } | |
230 | ||
231 | static void | |
232 | rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | |
233 | { | |
234 | if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) | |
235 | return; | |
236 | ||
237 | if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) | |
238 | task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); | |
239 | ||
240 | rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); | |
241 | RB_CLEAR_NODE(&waiter->pi_tree_entry); | |
242 | } | |
243 | ||
23f78d4a | 244 | /* |
fb00aca4 | 245 | * Calculate task priority from the waiter tree priority |
23f78d4a | 246 | * |
fb00aca4 | 247 | * Return task->normal_prio when the waiter tree is empty or when |
23f78d4a IM |
248 | * the waiter is not allowed to do priority boosting |
249 | */ | |
250 | int rt_mutex_getprio(struct task_struct *task) | |
251 | { | |
252 | if (likely(!task_has_pi_waiters(task))) | |
253 | return task->normal_prio; | |
254 | ||
2d3d891d | 255 | return min(task_top_pi_waiter(task)->prio, |
23f78d4a IM |
256 | task->normal_prio); |
257 | } | |
258 | ||
2d3d891d DF |
259 | struct task_struct *rt_mutex_get_top_task(struct task_struct *task) |
260 | { | |
261 | if (likely(!task_has_pi_waiters(task))) | |
262 | return NULL; | |
263 | ||
264 | return task_top_pi_waiter(task)->task; | |
265 | } | |
266 | ||
c365c292 TG |
267 | /* |
268 | * Called by sched_setscheduler() to check whether the priority change | |
269 | * is overruled by a possible priority boosting. | |
270 | */ | |
271 | int rt_mutex_check_prio(struct task_struct *task, int newprio) | |
272 | { | |
273 | if (!task_has_pi_waiters(task)) | |
274 | return 0; | |
275 | ||
276 | return task_top_pi_waiter(task)->task->prio <= newprio; | |
277 | } | |
278 | ||
23f78d4a IM |
279 | /* |
280 | * Adjust the priority of a task, after its pi_waiters got modified. | |
281 | * | |
282 | * This can be both boosting and unboosting. task->pi_lock must be held. | |
283 | */ | |
bd197234 | 284 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
23f78d4a IM |
285 | { |
286 | int prio = rt_mutex_getprio(task); | |
287 | ||
2d3d891d | 288 | if (task->prio != prio || dl_prio(prio)) |
23f78d4a IM |
289 | rt_mutex_setprio(task, prio); |
290 | } | |
291 | ||
292 | /* | |
293 | * Adjust task priority (undo boosting). Called from the exit path of | |
294 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | |
295 | * | |
296 | * (Note: We do this outside of the protection of lock->wait_lock to | |
297 | * allow the lock to be taken while or before we readjust the priority | |
298 | * of task. We do not use the spin_xx_mutex() variants here as we are | |
299 | * outside of the debug path.) | |
300 | */ | |
301 | static void rt_mutex_adjust_prio(struct task_struct *task) | |
302 | { | |
303 | unsigned long flags; | |
304 | ||
1d615482 | 305 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a | 306 | __rt_mutex_adjust_prio(task); |
1d615482 | 307 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
308 | } |
309 | ||
310 | /* | |
311 | * Max number of times we'll walk the boosting chain: | |
312 | */ | |
313 | int max_lock_depth = 1024; | |
314 | ||
82084984 TG |
315 | static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) |
316 | { | |
317 | return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; | |
318 | } | |
319 | ||
23f78d4a IM |
320 | /* |
321 | * Adjust the priority chain. Also used for deadlock detection. | |
322 | * Decreases task's usage by one - may thus free the task. | |
0c106173 | 323 | * |
82084984 TG |
324 | * @task: the task owning the mutex (owner) for which a chain walk is |
325 | * probably needed | |
0c106173 | 326 | * @deadlock_detect: do we have to carry out deadlock detection? |
82084984 TG |
327 | * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck |
328 | * things for a task that has just got its priority adjusted, and | |
329 | * is waiting on a mutex) | |
330 | * @next_lock: the mutex on which the owner of @orig_lock was blocked before | |
331 | * we dropped its pi_lock. Is never dereferenced, only used for | |
332 | * comparison to detect lock chain changes. | |
0c106173 | 333 | * @orig_waiter: rt_mutex_waiter struct for the task that has just donated |
82084984 TG |
334 | * its priority to the mutex owner (can be NULL in the case |
335 | * depicted above or if the top waiter is gone away and we are | |
336 | * actually deboosting the owner) | |
337 | * @top_task: the current top waiter | |
0c106173 | 338 | * |
23f78d4a IM |
339 | * Returns 0 or -EDEADLK. |
340 | */ | |
bd197234 TG |
341 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
342 | int deadlock_detect, | |
343 | struct rt_mutex *orig_lock, | |
82084984 | 344 | struct rt_mutex *next_lock, |
bd197234 TG |
345 | struct rt_mutex_waiter *orig_waiter, |
346 | struct task_struct *top_task) | |
23f78d4a IM |
347 | { |
348 | struct rt_mutex *lock; | |
349 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | |
350 | int detect_deadlock, ret = 0, depth = 0; | |
351 | unsigned long flags; | |
352 | ||
353 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | |
354 | deadlock_detect); | |
355 | ||
356 | /* | |
357 | * The (de)boosting is a step by step approach with a lot of | |
358 | * pitfalls. We want this to be preemptible and we want hold a | |
359 | * maximum of two locks per step. So we have to check | |
360 | * carefully whether things change under us. | |
361 | */ | |
362 | again: | |
363 | if (++depth > max_lock_depth) { | |
364 | static int prev_max; | |
365 | ||
366 | /* | |
367 | * Print this only once. If the admin changes the limit, | |
368 | * print a new message when reaching the limit again. | |
369 | */ | |
370 | if (prev_max != max_lock_depth) { | |
371 | prev_max = max_lock_depth; | |
372 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
373 | "task: %s (%d)\n", max_lock_depth, | |
ba25f9dc | 374 | top_task->comm, task_pid_nr(top_task)); |
23f78d4a IM |
375 | } |
376 | put_task_struct(task); | |
377 | ||
3d5c9340 | 378 | return -EDEADLK; |
23f78d4a IM |
379 | } |
380 | retry: | |
381 | /* | |
382 | * Task can not go away as we did a get_task() before ! | |
383 | */ | |
1d615482 | 384 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
385 | |
386 | waiter = task->pi_blocked_on; | |
387 | /* | |
388 | * Check whether the end of the boosting chain has been | |
389 | * reached or the state of the chain has changed while we | |
390 | * dropped the locks. | |
391 | */ | |
8161239a | 392 | if (!waiter) |
23f78d4a IM |
393 | goto out_unlock_pi; |
394 | ||
1a539a87 TG |
395 | /* |
396 | * Check the orig_waiter state. After we dropped the locks, | |
8161239a | 397 | * the previous owner of the lock might have released the lock. |
1a539a87 | 398 | */ |
8161239a | 399 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
1a539a87 TG |
400 | goto out_unlock_pi; |
401 | ||
82084984 TG |
402 | /* |
403 | * We dropped all locks after taking a refcount on @task, so | |
404 | * the task might have moved on in the lock chain or even left | |
405 | * the chain completely and blocks now on an unrelated lock or | |
406 | * on @orig_lock. | |
407 | * | |
408 | * We stored the lock on which @task was blocked in @next_lock, | |
409 | * so we can detect the chain change. | |
410 | */ | |
411 | if (next_lock != waiter->lock) | |
412 | goto out_unlock_pi; | |
413 | ||
1a539a87 TG |
414 | /* |
415 | * Drop out, when the task has no waiters. Note, | |
416 | * top_waiter can be NULL, when we are in the deboosting | |
417 | * mode! | |
418 | */ | |
397335f0 TG |
419 | if (top_waiter) { |
420 | if (!task_has_pi_waiters(task)) | |
421 | goto out_unlock_pi; | |
422 | /* | |
423 | * If deadlock detection is off, we stop here if we | |
424 | * are not the top pi waiter of the task. | |
425 | */ | |
426 | if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) | |
427 | goto out_unlock_pi; | |
428 | } | |
23f78d4a IM |
429 | |
430 | /* | |
431 | * When deadlock detection is off then we check, if further | |
432 | * priority adjustment is necessary. | |
433 | */ | |
2d3d891d | 434 | if (!detect_deadlock && waiter->prio == task->prio) |
23f78d4a IM |
435 | goto out_unlock_pi; |
436 | ||
437 | lock = waiter->lock; | |
d209d74d | 438 | if (!raw_spin_trylock(&lock->wait_lock)) { |
1d615482 | 439 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
440 | cpu_relax(); |
441 | goto retry; | |
442 | } | |
443 | ||
397335f0 TG |
444 | /* |
445 | * Deadlock detection. If the lock is the same as the original | |
446 | * lock which caused us to walk the lock chain or if the | |
447 | * current lock is owned by the task which initiated the chain | |
448 | * walk, we detected a deadlock. | |
449 | */ | |
95e02ca9 | 450 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
23f78d4a | 451 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
d209d74d | 452 | raw_spin_unlock(&lock->wait_lock); |
3d5c9340 | 453 | ret = -EDEADLK; |
23f78d4a IM |
454 | goto out_unlock_pi; |
455 | } | |
456 | ||
457 | top_waiter = rt_mutex_top_waiter(lock); | |
458 | ||
459 | /* Requeue the waiter */ | |
fb00aca4 | 460 | rt_mutex_dequeue(lock, waiter); |
2d3d891d | 461 | waiter->prio = task->prio; |
fb00aca4 | 462 | rt_mutex_enqueue(lock, waiter); |
23f78d4a IM |
463 | |
464 | /* Release the task */ | |
1d615482 | 465 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
2ffa5a5c TG |
466 | put_task_struct(task); |
467 | ||
8161239a LJ |
468 | if (!rt_mutex_owner(lock)) { |
469 | /* | |
470 | * If the requeue above changed the top waiter, then we need | |
471 | * to wake the new top waiter up to try to get the lock. | |
472 | */ | |
473 | ||
474 | if (top_waiter != rt_mutex_top_waiter(lock)) | |
475 | wake_up_process(rt_mutex_top_waiter(lock)->task); | |
476 | raw_spin_unlock(&lock->wait_lock); | |
2ffa5a5c | 477 | return 0; |
8161239a | 478 | } |
23f78d4a IM |
479 | |
480 | /* Grab the next task */ | |
481 | task = rt_mutex_owner(lock); | |
db630637 | 482 | get_task_struct(task); |
1d615482 | 483 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
23f78d4a IM |
484 | |
485 | if (waiter == rt_mutex_top_waiter(lock)) { | |
486 | /* Boost the owner */ | |
fb00aca4 PZ |
487 | rt_mutex_dequeue_pi(task, top_waiter); |
488 | rt_mutex_enqueue_pi(task, waiter); | |
23f78d4a IM |
489 | __rt_mutex_adjust_prio(task); |
490 | ||
491 | } else if (top_waiter == waiter) { | |
492 | /* Deboost the owner */ | |
fb00aca4 | 493 | rt_mutex_dequeue_pi(task, waiter); |
23f78d4a | 494 | waiter = rt_mutex_top_waiter(lock); |
fb00aca4 | 495 | rt_mutex_enqueue_pi(task, waiter); |
23f78d4a IM |
496 | __rt_mutex_adjust_prio(task); |
497 | } | |
498 | ||
82084984 TG |
499 | /* |
500 | * Check whether the task which owns the current lock is pi | |
501 | * blocked itself. If yes we store a pointer to the lock for | |
502 | * the lock chain change detection above. After we dropped | |
503 | * task->pi_lock next_lock cannot be dereferenced anymore. | |
504 | */ | |
505 | next_lock = task_blocked_on_lock(task); | |
506 | ||
1d615482 | 507 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
508 | |
509 | top_waiter = rt_mutex_top_waiter(lock); | |
d209d74d | 510 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 511 | |
82084984 TG |
512 | /* |
513 | * We reached the end of the lock chain. Stop right here. No | |
514 | * point to go back just to figure that out. | |
515 | */ | |
516 | if (!next_lock) | |
517 | goto out_put_task; | |
518 | ||
23f78d4a IM |
519 | if (!detect_deadlock && waiter != top_waiter) |
520 | goto out_put_task; | |
521 | ||
522 | goto again; | |
523 | ||
524 | out_unlock_pi: | |
1d615482 | 525 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a IM |
526 | out_put_task: |
527 | put_task_struct(task); | |
36c8b586 | 528 | |
23f78d4a IM |
529 | return ret; |
530 | } | |
531 | ||
23f78d4a IM |
532 | /* |
533 | * Try to take an rt-mutex | |
534 | * | |
23f78d4a | 535 | * Must be called with lock->wait_lock held. |
8161239a | 536 | * |
358c331f TG |
537 | * @lock: The lock to be acquired. |
538 | * @task: The task which wants to acquire the lock | |
539 | * @waiter: The waiter that is queued to the lock's wait list if the | |
540 | * callsite called task_blocked_on_lock(), otherwise NULL | |
23f78d4a | 541 | */ |
8161239a | 542 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
358c331f | 543 | struct rt_mutex_waiter *waiter) |
23f78d4a | 544 | { |
358c331f TG |
545 | unsigned long flags; |
546 | ||
23f78d4a | 547 | /* |
358c331f TG |
548 | * Before testing whether we can acquire @lock, we set the |
549 | * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all | |
550 | * other tasks which try to modify @lock into the slow path | |
551 | * and they serialize on @lock->wait_lock. | |
23f78d4a | 552 | * |
358c331f TG |
553 | * The RT_MUTEX_HAS_WAITERS bit can have a transitional state |
554 | * as explained at the top of this file if and only if: | |
23f78d4a | 555 | * |
358c331f TG |
556 | * - There is a lock owner. The caller must fixup the |
557 | * transient state if it does a trylock or leaves the lock | |
558 | * function due to a signal or timeout. | |
559 | * | |
560 | * - @task acquires the lock and there are no other | |
561 | * waiters. This is undone in rt_mutex_set_owner(@task) at | |
562 | * the end of this function. | |
23f78d4a IM |
563 | */ |
564 | mark_rt_mutex_waiters(lock); | |
565 | ||
358c331f TG |
566 | /* |
567 | * If @lock has an owner, give up. | |
568 | */ | |
8161239a | 569 | if (rt_mutex_owner(lock)) |
23f78d4a IM |
570 | return 0; |
571 | ||
8161239a | 572 | /* |
358c331f TG |
573 | * If @waiter != NULL, @task has already enqueued the waiter |
574 | * into @lock waiter list. If @waiter == NULL then this is a | |
575 | * trylock attempt. | |
8161239a | 576 | */ |
358c331f TG |
577 | if (waiter) { |
578 | /* | |
579 | * If waiter is not the highest priority waiter of | |
580 | * @lock, give up. | |
581 | */ | |
582 | if (waiter != rt_mutex_top_waiter(lock)) | |
583 | return 0; | |
8161239a | 584 | |
358c331f TG |
585 | /* |
586 | * We can acquire the lock. Remove the waiter from the | |
587 | * lock waiters list. | |
588 | */ | |
589 | rt_mutex_dequeue(lock, waiter); | |
8161239a | 590 | |
358c331f | 591 | } else { |
8161239a | 592 | /* |
358c331f TG |
593 | * If the lock has waiters already we check whether @task is |
594 | * eligible to take over the lock. | |
595 | * | |
596 | * If there are no other waiters, @task can acquire | |
597 | * the lock. @task->pi_blocked_on is NULL, so it does | |
598 | * not need to be dequeued. | |
8161239a LJ |
599 | */ |
600 | if (rt_mutex_has_waiters(lock)) { | |
358c331f TG |
601 | /* |
602 | * If @task->prio is greater than or equal to | |
603 | * the top waiter priority (kernel view), | |
604 | * @task lost. | |
605 | */ | |
606 | if (task->prio >= rt_mutex_top_waiter(lock)->prio) | |
607 | return 0; | |
608 | ||
609 | /* | |
610 | * The current top waiter stays enqueued. We | |
611 | * don't have to change anything in the lock | |
612 | * waiters order. | |
613 | */ | |
614 | } else { | |
615 | /* | |
616 | * No waiters. Take the lock without the | |
617 | * pi_lock dance.@task->pi_blocked_on is NULL | |
618 | * and we have no waiters to enqueue in @task | |
619 | * pi waiters list. | |
620 | */ | |
621 | goto takeit; | |
8161239a | 622 | } |
8161239a LJ |
623 | } |
624 | ||
358c331f TG |
625 | /* |
626 | * Clear @task->pi_blocked_on. Requires protection by | |
627 | * @task->pi_lock. Redundant operation for the @waiter == NULL | |
628 | * case, but conditionals are more expensive than a redundant | |
629 | * store. | |
630 | */ | |
631 | raw_spin_lock_irqsave(&task->pi_lock, flags); | |
632 | task->pi_blocked_on = NULL; | |
633 | /* | |
634 | * Finish the lock acquisition. @task is the new owner. If | |
635 | * other waiters exist we have to insert the highest priority | |
636 | * waiter into @task->pi_waiters list. | |
637 | */ | |
638 | if (rt_mutex_has_waiters(lock)) | |
639 | rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); | |
640 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | |
641 | ||
642 | takeit: | |
23f78d4a | 643 | /* We got the lock. */ |
9a11b49a | 644 | debug_rt_mutex_lock(lock); |
23f78d4a | 645 | |
358c331f TG |
646 | /* |
647 | * This either preserves the RT_MUTEX_HAS_WAITERS bit if there | |
648 | * are still waiters or clears it. | |
649 | */ | |
8161239a | 650 | rt_mutex_set_owner(lock, task); |
23f78d4a | 651 | |
8161239a | 652 | rt_mutex_deadlock_account_lock(lock, task); |
23f78d4a IM |
653 | |
654 | return 1; | |
655 | } | |
656 | ||
657 | /* | |
658 | * Task blocks on lock. | |
659 | * | |
660 | * Prepare waiter and propagate pi chain | |
661 | * | |
662 | * This must be called with lock->wait_lock held. | |
663 | */ | |
664 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
665 | struct rt_mutex_waiter *waiter, | |
8dac456a | 666 | struct task_struct *task, |
9a11b49a | 667 | int detect_deadlock) |
23f78d4a | 668 | { |
36c8b586 | 669 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 670 | struct rt_mutex_waiter *top_waiter = waiter; |
82084984 | 671 | struct rt_mutex *next_lock; |
db630637 | 672 | int chain_walk = 0, res; |
82084984 | 673 | unsigned long flags; |
23f78d4a | 674 | |
397335f0 TG |
675 | /* |
676 | * Early deadlock detection. We really don't want the task to | |
677 | * enqueue on itself just to untangle the mess later. It's not | |
678 | * only an optimization. We drop the locks, so another waiter | |
679 | * can come in before the chain walk detects the deadlock. So | |
680 | * the other will detect the deadlock and return -EDEADLOCK, | |
681 | * which is wrong, as the other waiter is not in a deadlock | |
682 | * situation. | |
683 | */ | |
3d5c9340 | 684 | if (owner == task) |
397335f0 TG |
685 | return -EDEADLK; |
686 | ||
1d615482 | 687 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
8dac456a DH |
688 | __rt_mutex_adjust_prio(task); |
689 | waiter->task = task; | |
23f78d4a | 690 | waiter->lock = lock; |
2d3d891d | 691 | waiter->prio = task->prio; |
23f78d4a IM |
692 | |
693 | /* Get the top priority waiter on the lock */ | |
694 | if (rt_mutex_has_waiters(lock)) | |
695 | top_waiter = rt_mutex_top_waiter(lock); | |
fb00aca4 | 696 | rt_mutex_enqueue(lock, waiter); |
23f78d4a | 697 | |
8dac456a | 698 | task->pi_blocked_on = waiter; |
23f78d4a | 699 | |
1d615482 | 700 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
23f78d4a | 701 | |
8161239a LJ |
702 | if (!owner) |
703 | return 0; | |
704 | ||
82084984 | 705 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a | 706 | if (waiter == rt_mutex_top_waiter(lock)) { |
fb00aca4 PZ |
707 | rt_mutex_dequeue_pi(owner, top_waiter); |
708 | rt_mutex_enqueue_pi(owner, waiter); | |
23f78d4a IM |
709 | |
710 | __rt_mutex_adjust_prio(owner); | |
db630637 SR |
711 | if (owner->pi_blocked_on) |
712 | chain_walk = 1; | |
82084984 | 713 | } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { |
db630637 | 714 | chain_walk = 1; |
82084984 | 715 | } |
db630637 | 716 | |
82084984 TG |
717 | /* Store the lock on which owner is blocked or NULL */ |
718 | next_lock = task_blocked_on_lock(owner); | |
719 | ||
720 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); | |
721 | /* | |
722 | * Even if full deadlock detection is on, if the owner is not | |
723 | * blocked itself, we can avoid finding this out in the chain | |
724 | * walk. | |
725 | */ | |
726 | if (!chain_walk || !next_lock) | |
23f78d4a IM |
727 | return 0; |
728 | ||
db630637 SR |
729 | /* |
730 | * The owner can't disappear while holding a lock, | |
731 | * so the owner struct is protected by wait_lock. | |
732 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
733 | */ | |
734 | get_task_struct(owner); | |
735 | ||
d209d74d | 736 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 737 | |
82084984 TG |
738 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, |
739 | next_lock, waiter, task); | |
23f78d4a | 740 | |
d209d74d | 741 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
742 | |
743 | return res; | |
744 | } | |
745 | ||
746 | /* | |
747 | * Wake up the next waiter on the lock. | |
748 | * | |
27e35715 TG |
749 | * Remove the top waiter from the current tasks pi waiter list and |
750 | * wake it up. | |
23f78d4a IM |
751 | * |
752 | * Called with lock->wait_lock held. | |
753 | */ | |
754 | static void wakeup_next_waiter(struct rt_mutex *lock) | |
755 | { | |
756 | struct rt_mutex_waiter *waiter; | |
23f78d4a IM |
757 | unsigned long flags; |
758 | ||
1d615482 | 759 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
23f78d4a IM |
760 | |
761 | waiter = rt_mutex_top_waiter(lock); | |
23f78d4a IM |
762 | |
763 | /* | |
764 | * Remove it from current->pi_waiters. We do not adjust a | |
765 | * possible priority boost right now. We execute wakeup in the | |
766 | * boosted mode and go back to normal after releasing | |
767 | * lock->wait_lock. | |
768 | */ | |
fb00aca4 | 769 | rt_mutex_dequeue_pi(current, waiter); |
23f78d4a | 770 | |
27e35715 TG |
771 | /* |
772 | * As we are waking up the top waiter, and the waiter stays | |
773 | * queued on the lock until it gets the lock, this lock | |
774 | * obviously has waiters. Just set the bit here and this has | |
775 | * the added benefit of forcing all new tasks into the | |
776 | * slow path making sure no task of lower priority than | |
777 | * the top waiter can steal this lock. | |
778 | */ | |
779 | lock->owner = (void *) RT_MUTEX_HAS_WAITERS; | |
23f78d4a | 780 | |
1d615482 | 781 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 782 | |
27e35715 TG |
783 | /* |
784 | * It's safe to dereference waiter as it cannot go away as | |
785 | * long as we hold lock->wait_lock. The waiter task needs to | |
786 | * acquire it in order to dequeue the waiter. | |
787 | */ | |
8161239a | 788 | wake_up_process(waiter->task); |
23f78d4a IM |
789 | } |
790 | ||
791 | /* | |
8161239a | 792 | * Remove a waiter from a lock and give up |
23f78d4a | 793 | * |
8161239a LJ |
794 | * Must be called with lock->wait_lock held and |
795 | * have just failed to try_to_take_rt_mutex(). | |
23f78d4a | 796 | */ |
bd197234 TG |
797 | static void remove_waiter(struct rt_mutex *lock, |
798 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
799 | { |
800 | int first = (waiter == rt_mutex_top_waiter(lock)); | |
36c8b586 | 801 | struct task_struct *owner = rt_mutex_owner(lock); |
82084984 | 802 | struct rt_mutex *next_lock = NULL; |
23f78d4a IM |
803 | unsigned long flags; |
804 | ||
1d615482 | 805 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
fb00aca4 | 806 | rt_mutex_dequeue(lock, waiter); |
23f78d4a | 807 | current->pi_blocked_on = NULL; |
1d615482 | 808 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
23f78d4a | 809 | |
8161239a LJ |
810 | if (!owner) |
811 | return; | |
812 | ||
813 | if (first) { | |
23f78d4a | 814 | |
1d615482 | 815 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
23f78d4a | 816 | |
fb00aca4 | 817 | rt_mutex_dequeue_pi(owner, waiter); |
23f78d4a IM |
818 | |
819 | if (rt_mutex_has_waiters(lock)) { | |
820 | struct rt_mutex_waiter *next; | |
821 | ||
822 | next = rt_mutex_top_waiter(lock); | |
fb00aca4 | 823 | rt_mutex_enqueue_pi(owner, next); |
23f78d4a IM |
824 | } |
825 | __rt_mutex_adjust_prio(owner); | |
826 | ||
82084984 TG |
827 | /* Store the lock on which owner is blocked or NULL */ |
828 | next_lock = task_blocked_on_lock(owner); | |
db630637 | 829 | |
1d615482 | 830 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
23f78d4a IM |
831 | } |
832 | ||
82084984 | 833 | if (!next_lock) |
23f78d4a IM |
834 | return; |
835 | ||
db630637 SR |
836 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
837 | get_task_struct(owner); | |
838 | ||
d209d74d | 839 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 840 | |
82084984 | 841 | rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); |
23f78d4a | 842 | |
d209d74d | 843 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
844 | } |
845 | ||
95e02ca9 TG |
846 | /* |
847 | * Recheck the pi chain, in case we got a priority setting | |
848 | * | |
849 | * Called from sched_setscheduler | |
850 | */ | |
851 | void rt_mutex_adjust_pi(struct task_struct *task) | |
852 | { | |
853 | struct rt_mutex_waiter *waiter; | |
82084984 | 854 | struct rt_mutex *next_lock; |
95e02ca9 TG |
855 | unsigned long flags; |
856 | ||
1d615482 | 857 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
95e02ca9 TG |
858 | |
859 | waiter = task->pi_blocked_on; | |
2d3d891d DF |
860 | if (!waiter || (waiter->prio == task->prio && |
861 | !dl_prio(task->prio))) { | |
1d615482 | 862 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 TG |
863 | return; |
864 | } | |
82084984 | 865 | next_lock = waiter->lock; |
1d615482 | 866 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
95e02ca9 | 867 | |
db630637 SR |
868 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
869 | get_task_struct(task); | |
82084984 TG |
870 | |
871 | rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); | |
95e02ca9 TG |
872 | } |
873 | ||
8dac456a DH |
874 | /** |
875 | * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop | |
876 | * @lock: the rt_mutex to take | |
877 | * @state: the state the task should block in (TASK_INTERRUPTIBLE | |
878 | * or TASK_UNINTERRUPTIBLE) | |
879 | * @timeout: the pre-initialized and started timer, or NULL for none | |
880 | * @waiter: the pre-initialized rt_mutex_waiter | |
8dac456a DH |
881 | * |
882 | * lock->wait_lock must be held by the caller. | |
23f78d4a IM |
883 | */ |
884 | static int __sched | |
8dac456a DH |
885 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
886 | struct hrtimer_sleeper *timeout, | |
8161239a | 887 | struct rt_mutex_waiter *waiter) |
23f78d4a | 888 | { |
23f78d4a IM |
889 | int ret = 0; |
890 | ||
23f78d4a IM |
891 | for (;;) { |
892 | /* Try to acquire the lock: */ | |
8161239a | 893 | if (try_to_take_rt_mutex(lock, current, waiter)) |
23f78d4a IM |
894 | break; |
895 | ||
896 | /* | |
897 | * TASK_INTERRUPTIBLE checks for signals and | |
898 | * timeout. Ignored otherwise. | |
899 | */ | |
900 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | |
901 | /* Signal pending? */ | |
902 | if (signal_pending(current)) | |
903 | ret = -EINTR; | |
904 | if (timeout && !timeout->task) | |
905 | ret = -ETIMEDOUT; | |
906 | if (ret) | |
907 | break; | |
908 | } | |
909 | ||
d209d74d | 910 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a | 911 | |
8dac456a | 912 | debug_rt_mutex_print_deadlock(waiter); |
23f78d4a | 913 | |
8161239a | 914 | schedule_rt_mutex(lock); |
23f78d4a | 915 | |
d209d74d | 916 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
917 | set_current_state(state); |
918 | } | |
919 | ||
8dac456a DH |
920 | return ret; |
921 | } | |
922 | ||
3d5c9340 TG |
923 | static void rt_mutex_handle_deadlock(int res, int detect_deadlock, |
924 | struct rt_mutex_waiter *w) | |
925 | { | |
926 | /* | |
927 | * If the result is not -EDEADLOCK or the caller requested | |
928 | * deadlock detection, nothing to do here. | |
929 | */ | |
930 | if (res != -EDEADLOCK || detect_deadlock) | |
931 | return; | |
932 | ||
933 | /* | |
934 | * Yell lowdly and stop the task right here. | |
935 | */ | |
936 | rt_mutex_print_deadlock(w); | |
937 | while (1) { | |
938 | set_current_state(TASK_INTERRUPTIBLE); | |
939 | schedule(); | |
940 | } | |
941 | } | |
942 | ||
8dac456a DH |
943 | /* |
944 | * Slow path lock function: | |
945 | */ | |
946 | static int __sched | |
947 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
948 | struct hrtimer_sleeper *timeout, | |
949 | int detect_deadlock) | |
950 | { | |
951 | struct rt_mutex_waiter waiter; | |
952 | int ret = 0; | |
953 | ||
954 | debug_rt_mutex_init_waiter(&waiter); | |
fb00aca4 PZ |
955 | RB_CLEAR_NODE(&waiter.pi_tree_entry); |
956 | RB_CLEAR_NODE(&waiter.tree_entry); | |
8dac456a | 957 | |
d209d74d | 958 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
959 | |
960 | /* Try to acquire the lock again: */ | |
8161239a | 961 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
d209d74d | 962 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
963 | return 0; |
964 | } | |
965 | ||
966 | set_current_state(state); | |
967 | ||
968 | /* Setup the timer, when timeout != NULL */ | |
969 | if (unlikely(timeout)) { | |
970 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | |
971 | if (!hrtimer_active(&timeout->timer)) | |
972 | timeout->task = NULL; | |
973 | } | |
974 | ||
8161239a LJ |
975 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); |
976 | ||
977 | if (likely(!ret)) | |
978 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | |
8dac456a | 979 | |
23f78d4a IM |
980 | set_current_state(TASK_RUNNING); |
981 | ||
3d5c9340 | 982 | if (unlikely(ret)) { |
9a11b49a | 983 | remove_waiter(lock, &waiter); |
3d5c9340 TG |
984 | rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); |
985 | } | |
23f78d4a IM |
986 | |
987 | /* | |
988 | * try_to_take_rt_mutex() sets the waiter bit | |
989 | * unconditionally. We might have to fix that up. | |
990 | */ | |
991 | fixup_rt_mutex_waiters(lock); | |
992 | ||
d209d74d | 993 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
994 | |
995 | /* Remove pending timer: */ | |
996 | if (unlikely(timeout)) | |
997 | hrtimer_cancel(&timeout->timer); | |
998 | ||
23f78d4a IM |
999 | debug_rt_mutex_free_waiter(&waiter); |
1000 | ||
1001 | return ret; | |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * Slow path try-lock function: | |
1006 | */ | |
88f2b4c1 | 1007 | static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) |
23f78d4a | 1008 | { |
88f2b4c1 TG |
1009 | int ret; |
1010 | ||
1011 | /* | |
1012 | * If the lock already has an owner we fail to get the lock. | |
1013 | * This can be done without taking the @lock->wait_lock as | |
1014 | * it is only being read, and this is a trylock anyway. | |
1015 | */ | |
1016 | if (rt_mutex_owner(lock)) | |
1017 | return 0; | |
23f78d4a | 1018 | |
88f2b4c1 TG |
1019 | /* |
1020 | * The mutex has currently no owner. Lock the wait lock and | |
1021 | * try to acquire the lock. | |
1022 | */ | |
d209d74d | 1023 | raw_spin_lock(&lock->wait_lock); |
23f78d4a | 1024 | |
88f2b4c1 | 1025 | ret = try_to_take_rt_mutex(lock, current, NULL); |
23f78d4a | 1026 | |
88f2b4c1 TG |
1027 | /* |
1028 | * try_to_take_rt_mutex() sets the lock waiters bit | |
1029 | * unconditionally. Clean this up. | |
1030 | */ | |
1031 | fixup_rt_mutex_waiters(lock); | |
23f78d4a | 1032 | |
d209d74d | 1033 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
1034 | |
1035 | return ret; | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * Slow path to release a rt-mutex: | |
1040 | */ | |
1041 | static void __sched | |
1042 | rt_mutex_slowunlock(struct rt_mutex *lock) | |
1043 | { | |
d209d74d | 1044 | raw_spin_lock(&lock->wait_lock); |
23f78d4a IM |
1045 | |
1046 | debug_rt_mutex_unlock(lock); | |
1047 | ||
1048 | rt_mutex_deadlock_account_unlock(current); | |
1049 | ||
27e35715 TG |
1050 | /* |
1051 | * We must be careful here if the fast path is enabled. If we | |
1052 | * have no waiters queued we cannot set owner to NULL here | |
1053 | * because of: | |
1054 | * | |
1055 | * foo->lock->owner = NULL; | |
1056 | * rtmutex_lock(foo->lock); <- fast path | |
1057 | * free = atomic_dec_and_test(foo->refcnt); | |
1058 | * rtmutex_unlock(foo->lock); <- fast path | |
1059 | * if (free) | |
1060 | * kfree(foo); | |
1061 | * raw_spin_unlock(foo->lock->wait_lock); | |
1062 | * | |
1063 | * So for the fastpath enabled kernel: | |
1064 | * | |
1065 | * Nothing can set the waiters bit as long as we hold | |
1066 | * lock->wait_lock. So we do the following sequence: | |
1067 | * | |
1068 | * owner = rt_mutex_owner(lock); | |
1069 | * clear_rt_mutex_waiters(lock); | |
1070 | * raw_spin_unlock(&lock->wait_lock); | |
1071 | * if (cmpxchg(&lock->owner, owner, 0) == owner) | |
1072 | * return; | |
1073 | * goto retry; | |
1074 | * | |
1075 | * The fastpath disabled variant is simple as all access to | |
1076 | * lock->owner is serialized by lock->wait_lock: | |
1077 | * | |
1078 | * lock->owner = NULL; | |
1079 | * raw_spin_unlock(&lock->wait_lock); | |
1080 | */ | |
1081 | while (!rt_mutex_has_waiters(lock)) { | |
1082 | /* Drops lock->wait_lock ! */ | |
1083 | if (unlock_rt_mutex_safe(lock) == true) | |
1084 | return; | |
1085 | /* Relock the rtmutex and try again */ | |
1086 | raw_spin_lock(&lock->wait_lock); | |
23f78d4a IM |
1087 | } |
1088 | ||
27e35715 TG |
1089 | /* |
1090 | * The wakeup next waiter path does not suffer from the above | |
1091 | * race. See the comments there. | |
1092 | */ | |
23f78d4a IM |
1093 | wakeup_next_waiter(lock); |
1094 | ||
d209d74d | 1095 | raw_spin_unlock(&lock->wait_lock); |
23f78d4a IM |
1096 | |
1097 | /* Undo pi boosting if necessary: */ | |
1098 | rt_mutex_adjust_prio(current); | |
1099 | } | |
1100 | ||
1101 | /* | |
1102 | * debug aware fast / slowpath lock,trylock,unlock | |
1103 | * | |
1104 | * The atomic acquire/release ops are compiled away, when either the | |
1105 | * architecture does not support cmpxchg or when debugging is enabled. | |
1106 | */ | |
1107 | static inline int | |
1108 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
1109 | int detect_deadlock, | |
1110 | int (*slowfn)(struct rt_mutex *lock, int state, | |
1111 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 1112 | int detect_deadlock)) |
23f78d4a IM |
1113 | { |
1114 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
1115 | rt_mutex_deadlock_account_lock(lock, current); | |
1116 | return 0; | |
1117 | } else | |
9a11b49a | 1118 | return slowfn(lock, state, NULL, detect_deadlock); |
23f78d4a IM |
1119 | } |
1120 | ||
1121 | static inline int | |
1122 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
1123 | struct hrtimer_sleeper *timeout, int detect_deadlock, | |
1124 | int (*slowfn)(struct rt_mutex *lock, int state, | |
1125 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 1126 | int detect_deadlock)) |
23f78d4a IM |
1127 | { |
1128 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
1129 | rt_mutex_deadlock_account_lock(lock, current); | |
1130 | return 0; | |
1131 | } else | |
9a11b49a | 1132 | return slowfn(lock, state, timeout, detect_deadlock); |
23f78d4a IM |
1133 | } |
1134 | ||
1135 | static inline int | |
1136 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
9a11b49a | 1137 | int (*slowfn)(struct rt_mutex *lock)) |
23f78d4a IM |
1138 | { |
1139 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
1140 | rt_mutex_deadlock_account_lock(lock, current); | |
1141 | return 1; | |
1142 | } | |
9a11b49a | 1143 | return slowfn(lock); |
23f78d4a IM |
1144 | } |
1145 | ||
1146 | static inline void | |
1147 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
1148 | void (*slowfn)(struct rt_mutex *lock)) | |
1149 | { | |
1150 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | |
1151 | rt_mutex_deadlock_account_unlock(current); | |
1152 | else | |
1153 | slowfn(lock); | |
1154 | } | |
1155 | ||
1156 | /** | |
1157 | * rt_mutex_lock - lock a rt_mutex | |
1158 | * | |
1159 | * @lock: the rt_mutex to be locked | |
1160 | */ | |
1161 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
1162 | { | |
1163 | might_sleep(); | |
1164 | ||
1165 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | |
1166 | } | |
1167 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
1168 | ||
1169 | /** | |
1170 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
1171 | * | |
1172 | * @lock: the rt_mutex to be locked | |
1173 | * @detect_deadlock: deadlock detection on/off | |
1174 | * | |
1175 | * Returns: | |
1176 | * 0 on success | |
1177 | * -EINTR when interrupted by a signal | |
1178 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
1179 | */ | |
1180 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
1181 | int detect_deadlock) | |
1182 | { | |
1183 | might_sleep(); | |
1184 | ||
1185 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | |
1186 | detect_deadlock, rt_mutex_slowlock); | |
1187 | } | |
1188 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
1189 | ||
1190 | /** | |
23b94b96 LH |
1191 | * rt_mutex_timed_lock - lock a rt_mutex interruptible |
1192 | * the timeout structure is provided | |
1193 | * by the caller | |
23f78d4a IM |
1194 | * |
1195 | * @lock: the rt_mutex to be locked | |
1196 | * @timeout: timeout structure or NULL (no timeout) | |
1197 | * @detect_deadlock: deadlock detection on/off | |
1198 | * | |
1199 | * Returns: | |
1200 | * 0 on success | |
1201 | * -EINTR when interrupted by a signal | |
3ac49a1c | 1202 | * -ETIMEDOUT when the timeout expired |
23f78d4a IM |
1203 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) |
1204 | */ | |
1205 | int | |
1206 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | |
1207 | int detect_deadlock) | |
1208 | { | |
1209 | might_sleep(); | |
1210 | ||
1211 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
1212 | detect_deadlock, rt_mutex_slowlock); | |
1213 | } | |
1214 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
1215 | ||
1216 | /** | |
1217 | * rt_mutex_trylock - try to lock a rt_mutex | |
1218 | * | |
1219 | * @lock: the rt_mutex to be locked | |
1220 | * | |
1221 | * Returns 1 on success and 0 on contention | |
1222 | */ | |
1223 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
1224 | { | |
1225 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | |
1226 | } | |
1227 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
1228 | ||
1229 | /** | |
1230 | * rt_mutex_unlock - unlock a rt_mutex | |
1231 | * | |
1232 | * @lock: the rt_mutex to be unlocked | |
1233 | */ | |
1234 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
1235 | { | |
1236 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | |
1237 | } | |
1238 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
1239 | ||
23b94b96 | 1240 | /** |
23f78d4a IM |
1241 | * rt_mutex_destroy - mark a mutex unusable |
1242 | * @lock: the mutex to be destroyed | |
1243 | * | |
1244 | * This function marks the mutex uninitialized, and any subsequent | |
1245 | * use of the mutex is forbidden. The mutex must not be locked when | |
1246 | * this function is called. | |
1247 | */ | |
1248 | void rt_mutex_destroy(struct rt_mutex *lock) | |
1249 | { | |
1250 | WARN_ON(rt_mutex_is_locked(lock)); | |
1251 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
1252 | lock->magic = NULL; | |
1253 | #endif | |
1254 | } | |
1255 | ||
1256 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |
1257 | ||
1258 | /** | |
1259 | * __rt_mutex_init - initialize the rt lock | |
1260 | * | |
1261 | * @lock: the rt lock to be initialized | |
1262 | * | |
1263 | * Initialize the rt lock to unlocked state. | |
1264 | * | |
1265 | * Initializing of a locked rt lock is not allowed | |
1266 | */ | |
1267 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |
1268 | { | |
1269 | lock->owner = NULL; | |
d209d74d | 1270 | raw_spin_lock_init(&lock->wait_lock); |
fb00aca4 PZ |
1271 | lock->waiters = RB_ROOT; |
1272 | lock->waiters_leftmost = NULL; | |
23f78d4a IM |
1273 | |
1274 | debug_rt_mutex_init(lock, name); | |
1275 | } | |
1276 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
0cdbee99 IM |
1277 | |
1278 | /** | |
1279 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
1280 | * proxy owner | |
1281 | * | |
1282 | * @lock: the rt_mutex to be locked | |
1283 | * @proxy_owner:the task to set as owner | |
1284 | * | |
1285 | * No locking. Caller has to do serializing itself | |
1286 | * Special API call for PI-futex support | |
1287 | */ | |
1288 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |
1289 | struct task_struct *proxy_owner) | |
1290 | { | |
1291 | __rt_mutex_init(lock, NULL); | |
9a11b49a | 1292 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
8161239a | 1293 | rt_mutex_set_owner(lock, proxy_owner); |
0cdbee99 IM |
1294 | rt_mutex_deadlock_account_lock(lock, proxy_owner); |
1295 | } | |
1296 | ||
1297 | /** | |
1298 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
1299 | * | |
1300 | * @lock: the rt_mutex to be locked | |
1301 | * | |
1302 | * No locking. Caller has to do serializing itself | |
1303 | * Special API call for PI-futex support | |
1304 | */ | |
1305 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |
1306 | struct task_struct *proxy_owner) | |
1307 | { | |
1308 | debug_rt_mutex_proxy_unlock(lock); | |
8161239a | 1309 | rt_mutex_set_owner(lock, NULL); |
0cdbee99 IM |
1310 | rt_mutex_deadlock_account_unlock(proxy_owner); |
1311 | } | |
1312 | ||
8dac456a DH |
1313 | /** |
1314 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | |
1315 | * @lock: the rt_mutex to take | |
1316 | * @waiter: the pre-initialized rt_mutex_waiter | |
1317 | * @task: the task to prepare | |
1318 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1319 | * | |
1320 | * Returns: | |
1321 | * 0 - task blocked on lock | |
1322 | * 1 - acquired the lock for task, caller should wake it up | |
1323 | * <0 - error | |
1324 | * | |
1325 | * Special API call for FUTEX_REQUEUE_PI support. | |
1326 | */ | |
1327 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |
1328 | struct rt_mutex_waiter *waiter, | |
1329 | struct task_struct *task, int detect_deadlock) | |
1330 | { | |
1331 | int ret; | |
1332 | ||
d209d74d | 1333 | raw_spin_lock(&lock->wait_lock); |
8dac456a | 1334 | |
8161239a | 1335 | if (try_to_take_rt_mutex(lock, task, NULL)) { |
d209d74d | 1336 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1337 | return 1; |
1338 | } | |
1339 | ||
3d5c9340 TG |
1340 | /* We enforce deadlock detection for futexes */ |
1341 | ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); | |
8dac456a | 1342 | |
8161239a | 1343 | if (ret && !rt_mutex_owner(lock)) { |
8dac456a DH |
1344 | /* |
1345 | * Reset the return value. We might have | |
1346 | * returned with -EDEADLK and the owner | |
1347 | * released the lock while we were walking the | |
1348 | * pi chain. Let the waiter sort it out. | |
1349 | */ | |
1350 | ret = 0; | |
1351 | } | |
8161239a LJ |
1352 | |
1353 | if (unlikely(ret)) | |
1354 | remove_waiter(lock, waiter); | |
1355 | ||
d209d74d | 1356 | raw_spin_unlock(&lock->wait_lock); |
8dac456a DH |
1357 | |
1358 | debug_rt_mutex_print_deadlock(waiter); | |
1359 | ||
1360 | return ret; | |
1361 | } | |
1362 | ||
0cdbee99 IM |
1363 | /** |
1364 | * rt_mutex_next_owner - return the next owner of the lock | |
1365 | * | |
1366 | * @lock: the rt lock query | |
1367 | * | |
1368 | * Returns the next owner of the lock or NULL | |
1369 | * | |
1370 | * Caller has to serialize against other accessors to the lock | |
1371 | * itself. | |
1372 | * | |
1373 | * Special API call for PI-futex support | |
1374 | */ | |
1375 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |
1376 | { | |
1377 | if (!rt_mutex_has_waiters(lock)) | |
1378 | return NULL; | |
1379 | ||
1380 | return rt_mutex_top_waiter(lock)->task; | |
1381 | } | |
8dac456a DH |
1382 | |
1383 | /** | |
1384 | * rt_mutex_finish_proxy_lock() - Complete lock acquisition | |
1385 | * @lock: the rt_mutex we were woken on | |
1386 | * @to: the timeout, null if none. hrtimer should already have | |
1387 | * been started. | |
1388 | * @waiter: the pre-initialized rt_mutex_waiter | |
1389 | * @detect_deadlock: perform deadlock detection (1) or not (0) | |
1390 | * | |
1391 | * Complete the lock acquisition started our behalf by another thread. | |
1392 | * | |
1393 | * Returns: | |
1394 | * 0 - success | |
1395 | * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK | |
1396 | * | |
1397 | * Special API call for PI-futex requeue support | |
1398 | */ | |
1399 | int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |
1400 | struct hrtimer_sleeper *to, | |
1401 | struct rt_mutex_waiter *waiter, | |
1402 | int detect_deadlock) | |
1403 | { | |
1404 | int ret; | |
1405 | ||
d209d74d | 1406 | raw_spin_lock(&lock->wait_lock); |
8dac456a DH |
1407 | |
1408 | set_current_state(TASK_INTERRUPTIBLE); | |
1409 | ||
8161239a | 1410 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
8dac456a DH |
1411 | |
1412 | set_current_state(TASK_RUNNING); | |
1413 | ||
8161239a | 1414 | if (unlikely(ret)) |
8dac456a DH |
1415 | remove_waiter(lock, waiter); |
1416 | ||
1417 | /* | |
1418 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | |
1419 | * have to fix that up. | |
1420 | */ | |
1421 | fixup_rt_mutex_waiters(lock); | |
1422 | ||
d209d74d | 1423 | raw_spin_unlock(&lock->wait_lock); |
8dac456a | 1424 | |
8dac456a DH |
1425 | return ret; |
1426 | } |