Commit | Line | Data |
---|---|---|
23f78d4a IM |
1 | /* |
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
3 | * | |
4 | * started by Ingo Molnar and Thomas Gleixner. | |
5 | * | |
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
9 | * Copyright (C) 2006 Esben Nielsen | |
d07fe82c SR |
10 | * |
11 | * See Documentation/rt-mutex-design.txt for details. | |
23f78d4a IM |
12 | */ |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/timer.h> | |
17 | ||
18 | #include "rtmutex_common.h" | |
19 | ||
20 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
21 | # include "rtmutex-debug.h" | |
22 | #else | |
23 | # include "rtmutex.h" | |
24 | #endif | |
25 | ||
26 | /* | |
27 | * lock->owner state tracking: | |
28 | * | |
29 | * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1 | |
30 | * are used to keep track of the "owner is pending" and "lock has | |
31 | * waiters" state. | |
32 | * | |
33 | * owner bit1 bit0 | |
34 | * NULL 0 0 lock is free (fast acquire possible) | |
35 | * NULL 0 1 invalid state | |
36 | * NULL 1 0 Transitional State* | |
37 | * NULL 1 1 invalid state | |
38 | * taskpointer 0 0 lock is held (fast release possible) | |
39 | * taskpointer 0 1 task is pending owner | |
40 | * taskpointer 1 0 lock is held and has waiters | |
41 | * taskpointer 1 1 task is pending owner and lock has more waiters | |
42 | * | |
43 | * Pending ownership is assigned to the top (highest priority) | |
44 | * waiter of the lock, when the lock is released. The thread is woken | |
45 | * up and can now take the lock. Until the lock is taken (bit 0 | |
46 | * cleared) a competing higher priority thread can steal the lock | |
47 | * which puts the woken up thread back on the waiters list. | |
48 | * | |
49 | * The fast atomic compare exchange based acquire and release is only | |
50 | * possible when bit 0 and 1 of lock->owner are 0. | |
51 | * | |
52 | * (*) There's a small time where the owner can be NULL and the | |
53 | * "lock has waiters" bit is set. This can happen when grabbing the lock. | |
54 | * To prevent a cmpxchg of the owner releasing the lock, we need to set this | |
55 | * bit before looking at the lock, hence the reason this is a transitional | |
56 | * state. | |
57 | */ | |
58 | ||
bd197234 | 59 | static void |
23f78d4a IM |
60 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, |
61 | unsigned long mask) | |
62 | { | |
63 | unsigned long val = (unsigned long)owner | mask; | |
64 | ||
65 | if (rt_mutex_has_waiters(lock)) | |
66 | val |= RT_MUTEX_HAS_WAITERS; | |
67 | ||
68 | lock->owner = (struct task_struct *)val; | |
69 | } | |
70 | ||
71 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
72 | { | |
73 | lock->owner = (struct task_struct *) | |
74 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
75 | } | |
76 | ||
77 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
78 | { | |
79 | if (!rt_mutex_has_waiters(lock)) | |
80 | clear_rt_mutex_waiters(lock); | |
81 | } | |
82 | ||
bd197234 TG |
83 | /* |
84 | * We can speed up the acquire/release, if the architecture | |
85 | * supports cmpxchg and if there's no debugging state to be set up | |
86 | */ | |
87 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | |
88 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | |
89 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
90 | { | |
91 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
92 | ||
93 | do { | |
94 | owner = *p; | |
95 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | |
96 | } | |
97 | #else | |
98 | # define rt_mutex_cmpxchg(l,c,n) (0) | |
99 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
100 | { | |
101 | lock->owner = (struct task_struct *) | |
102 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
103 | } | |
104 | #endif | |
105 | ||
23f78d4a IM |
106 | /* |
107 | * Calculate task priority from the waiter list priority | |
108 | * | |
109 | * Return task->normal_prio when the waiter list is empty or when | |
110 | * the waiter is not allowed to do priority boosting | |
111 | */ | |
112 | int rt_mutex_getprio(struct task_struct *task) | |
113 | { | |
114 | if (likely(!task_has_pi_waiters(task))) | |
115 | return task->normal_prio; | |
116 | ||
117 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | |
118 | task->normal_prio); | |
119 | } | |
120 | ||
121 | /* | |
122 | * Adjust the priority of a task, after its pi_waiters got modified. | |
123 | * | |
124 | * This can be both boosting and unboosting. task->pi_lock must be held. | |
125 | */ | |
bd197234 | 126 | static void __rt_mutex_adjust_prio(struct task_struct *task) |
23f78d4a IM |
127 | { |
128 | int prio = rt_mutex_getprio(task); | |
129 | ||
130 | if (task->prio != prio) | |
131 | rt_mutex_setprio(task, prio); | |
132 | } | |
133 | ||
134 | /* | |
135 | * Adjust task priority (undo boosting). Called from the exit path of | |
136 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | |
137 | * | |
138 | * (Note: We do this outside of the protection of lock->wait_lock to | |
139 | * allow the lock to be taken while or before we readjust the priority | |
140 | * of task. We do not use the spin_xx_mutex() variants here as we are | |
141 | * outside of the debug path.) | |
142 | */ | |
143 | static void rt_mutex_adjust_prio(struct task_struct *task) | |
144 | { | |
145 | unsigned long flags; | |
146 | ||
147 | spin_lock_irqsave(&task->pi_lock, flags); | |
148 | __rt_mutex_adjust_prio(task); | |
149 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
150 | } | |
151 | ||
152 | /* | |
153 | * Max number of times we'll walk the boosting chain: | |
154 | */ | |
155 | int max_lock_depth = 1024; | |
156 | ||
157 | /* | |
158 | * Adjust the priority chain. Also used for deadlock detection. | |
159 | * Decreases task's usage by one - may thus free the task. | |
160 | * Returns 0 or -EDEADLK. | |
161 | */ | |
bd197234 TG |
162 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
163 | int deadlock_detect, | |
164 | struct rt_mutex *orig_lock, | |
165 | struct rt_mutex_waiter *orig_waiter, | |
166 | struct task_struct *top_task) | |
23f78d4a IM |
167 | { |
168 | struct rt_mutex *lock; | |
169 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | |
170 | int detect_deadlock, ret = 0, depth = 0; | |
171 | unsigned long flags; | |
172 | ||
173 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | |
174 | deadlock_detect); | |
175 | ||
176 | /* | |
177 | * The (de)boosting is a step by step approach with a lot of | |
178 | * pitfalls. We want this to be preemptible and we want hold a | |
179 | * maximum of two locks per step. So we have to check | |
180 | * carefully whether things change under us. | |
181 | */ | |
182 | again: | |
183 | if (++depth > max_lock_depth) { | |
184 | static int prev_max; | |
185 | ||
186 | /* | |
187 | * Print this only once. If the admin changes the limit, | |
188 | * print a new message when reaching the limit again. | |
189 | */ | |
190 | if (prev_max != max_lock_depth) { | |
191 | prev_max = max_lock_depth; | |
192 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
193 | "task: %s (%d)\n", max_lock_depth, | |
95e02ca9 | 194 | top_task->comm, top_task->pid); |
23f78d4a IM |
195 | } |
196 | put_task_struct(task); | |
197 | ||
198 | return deadlock_detect ? -EDEADLK : 0; | |
199 | } | |
200 | retry: | |
201 | /* | |
202 | * Task can not go away as we did a get_task() before ! | |
203 | */ | |
204 | spin_lock_irqsave(&task->pi_lock, flags); | |
205 | ||
206 | waiter = task->pi_blocked_on; | |
207 | /* | |
208 | * Check whether the end of the boosting chain has been | |
209 | * reached or the state of the chain has changed while we | |
210 | * dropped the locks. | |
211 | */ | |
212 | if (!waiter || !waiter->task) | |
213 | goto out_unlock_pi; | |
214 | ||
1a539a87 TG |
215 | /* |
216 | * Check the orig_waiter state. After we dropped the locks, | |
217 | * the previous owner of the lock might have released the lock | |
218 | * and made us the pending owner: | |
219 | */ | |
220 | if (orig_waiter && !orig_waiter->task) | |
221 | goto out_unlock_pi; | |
222 | ||
223 | /* | |
224 | * Drop out, when the task has no waiters. Note, | |
225 | * top_waiter can be NULL, when we are in the deboosting | |
226 | * mode! | |
227 | */ | |
23f78d4a IM |
228 | if (top_waiter && (!task_has_pi_waiters(task) || |
229 | top_waiter != task_top_pi_waiter(task))) | |
230 | goto out_unlock_pi; | |
231 | ||
232 | /* | |
233 | * When deadlock detection is off then we check, if further | |
234 | * priority adjustment is necessary. | |
235 | */ | |
236 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | |
237 | goto out_unlock_pi; | |
238 | ||
239 | lock = waiter->lock; | |
240 | if (!spin_trylock(&lock->wait_lock)) { | |
241 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
242 | cpu_relax(); | |
243 | goto retry; | |
244 | } | |
245 | ||
246 | /* Deadlock detection */ | |
95e02ca9 | 247 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
23f78d4a IM |
248 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
249 | spin_unlock(&lock->wait_lock); | |
250 | ret = deadlock_detect ? -EDEADLK : 0; | |
251 | goto out_unlock_pi; | |
252 | } | |
253 | ||
254 | top_waiter = rt_mutex_top_waiter(lock); | |
255 | ||
256 | /* Requeue the waiter */ | |
257 | plist_del(&waiter->list_entry, &lock->wait_list); | |
258 | waiter->list_entry.prio = task->prio; | |
259 | plist_add(&waiter->list_entry, &lock->wait_list); | |
260 | ||
261 | /* Release the task */ | |
262 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
263 | put_task_struct(task); | |
264 | ||
265 | /* Grab the next task */ | |
266 | task = rt_mutex_owner(lock); | |
db630637 | 267 | get_task_struct(task); |
23f78d4a IM |
268 | spin_lock_irqsave(&task->pi_lock, flags); |
269 | ||
270 | if (waiter == rt_mutex_top_waiter(lock)) { | |
271 | /* Boost the owner */ | |
272 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | |
273 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
274 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
275 | __rt_mutex_adjust_prio(task); | |
276 | ||
277 | } else if (top_waiter == waiter) { | |
278 | /* Deboost the owner */ | |
279 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | |
280 | waiter = rt_mutex_top_waiter(lock); | |
281 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
282 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
283 | __rt_mutex_adjust_prio(task); | |
284 | } | |
285 | ||
23f78d4a IM |
286 | spin_unlock_irqrestore(&task->pi_lock, flags); |
287 | ||
288 | top_waiter = rt_mutex_top_waiter(lock); | |
289 | spin_unlock(&lock->wait_lock); | |
290 | ||
291 | if (!detect_deadlock && waiter != top_waiter) | |
292 | goto out_put_task; | |
293 | ||
294 | goto again; | |
295 | ||
296 | out_unlock_pi: | |
297 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
298 | out_put_task: | |
299 | put_task_struct(task); | |
36c8b586 | 300 | |
23f78d4a IM |
301 | return ret; |
302 | } | |
303 | ||
304 | /* | |
305 | * Optimization: check if we can steal the lock from the | |
306 | * assigned pending owner [which might not have taken the | |
307 | * lock yet]: | |
308 | */ | |
309 | static inline int try_to_steal_lock(struct rt_mutex *lock) | |
310 | { | |
311 | struct task_struct *pendowner = rt_mutex_owner(lock); | |
312 | struct rt_mutex_waiter *next; | |
313 | unsigned long flags; | |
314 | ||
315 | if (!rt_mutex_owner_pending(lock)) | |
316 | return 0; | |
317 | ||
318 | if (pendowner == current) | |
319 | return 1; | |
320 | ||
321 | spin_lock_irqsave(&pendowner->pi_lock, flags); | |
322 | if (current->prio >= pendowner->prio) { | |
323 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
324 | return 0; | |
325 | } | |
326 | ||
327 | /* | |
328 | * Check if a waiter is enqueued on the pending owners | |
329 | * pi_waiters list. Remove it and readjust pending owners | |
330 | * priority. | |
331 | */ | |
332 | if (likely(!rt_mutex_has_waiters(lock))) { | |
333 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
334 | return 1; | |
335 | } | |
336 | ||
337 | /* No chain handling, pending owner is not blocked on anything: */ | |
338 | next = rt_mutex_top_waiter(lock); | |
339 | plist_del(&next->pi_list_entry, &pendowner->pi_waiters); | |
340 | __rt_mutex_adjust_prio(pendowner); | |
341 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
342 | ||
343 | /* | |
344 | * We are going to steal the lock and a waiter was | |
345 | * enqueued on the pending owners pi_waiters queue. So | |
346 | * we have to enqueue this waiter into | |
347 | * current->pi_waiters list. This covers the case, | |
348 | * where current is boosted because it holds another | |
349 | * lock and gets unboosted because the booster is | |
350 | * interrupted, so we would delay a waiter with higher | |
351 | * priority as current->normal_prio. | |
352 | * | |
353 | * Note: in the rare case of a SCHED_OTHER task changing | |
354 | * its priority and thus stealing the lock, next->task | |
355 | * might be current: | |
356 | */ | |
357 | if (likely(next->task != current)) { | |
358 | spin_lock_irqsave(¤t->pi_lock, flags); | |
359 | plist_add(&next->pi_list_entry, ¤t->pi_waiters); | |
360 | __rt_mutex_adjust_prio(current); | |
361 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
362 | } | |
363 | return 1; | |
364 | } | |
365 | ||
366 | /* | |
367 | * Try to take an rt-mutex | |
368 | * | |
369 | * This fails | |
370 | * - when the lock has a real owner | |
371 | * - when a different pending owner exists and has higher priority than current | |
372 | * | |
373 | * Must be called with lock->wait_lock held. | |
374 | */ | |
9a11b49a | 375 | static int try_to_take_rt_mutex(struct rt_mutex *lock) |
23f78d4a IM |
376 | { |
377 | /* | |
378 | * We have to be careful here if the atomic speedups are | |
379 | * enabled, such that, when | |
380 | * - no other waiter is on the lock | |
381 | * - the lock has been released since we did the cmpxchg | |
382 | * the lock can be released or taken while we are doing the | |
383 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | |
384 | * | |
385 | * The atomic acquire/release aware variant of | |
386 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | |
387 | * the WAITERS bit, the atomic release / acquire can not | |
388 | * happen anymore and lock->wait_lock protects us from the | |
389 | * non-atomic case. | |
390 | * | |
391 | * Note, that this might set lock->owner = | |
392 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | |
393 | * any more. This is fixed up when we take the ownership. | |
394 | * This is the transitional state explained at the top of this file. | |
395 | */ | |
396 | mark_rt_mutex_waiters(lock); | |
397 | ||
398 | if (rt_mutex_owner(lock) && !try_to_steal_lock(lock)) | |
399 | return 0; | |
400 | ||
401 | /* We got the lock. */ | |
9a11b49a | 402 | debug_rt_mutex_lock(lock); |
23f78d4a IM |
403 | |
404 | rt_mutex_set_owner(lock, current, 0); | |
405 | ||
406 | rt_mutex_deadlock_account_lock(lock, current); | |
407 | ||
408 | return 1; | |
409 | } | |
410 | ||
411 | /* | |
412 | * Task blocks on lock. | |
413 | * | |
414 | * Prepare waiter and propagate pi chain | |
415 | * | |
416 | * This must be called with lock->wait_lock held. | |
417 | */ | |
418 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
419 | struct rt_mutex_waiter *waiter, | |
9a11b49a | 420 | int detect_deadlock) |
23f78d4a | 421 | { |
36c8b586 | 422 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 423 | struct rt_mutex_waiter *top_waiter = waiter; |
23f78d4a | 424 | unsigned long flags; |
db630637 | 425 | int chain_walk = 0, res; |
23f78d4a IM |
426 | |
427 | spin_lock_irqsave(¤t->pi_lock, flags); | |
428 | __rt_mutex_adjust_prio(current); | |
429 | waiter->task = current; | |
430 | waiter->lock = lock; | |
431 | plist_node_init(&waiter->list_entry, current->prio); | |
432 | plist_node_init(&waiter->pi_list_entry, current->prio); | |
433 | ||
434 | /* Get the top priority waiter on the lock */ | |
435 | if (rt_mutex_has_waiters(lock)) | |
436 | top_waiter = rt_mutex_top_waiter(lock); | |
437 | plist_add(&waiter->list_entry, &lock->wait_list); | |
438 | ||
439 | current->pi_blocked_on = waiter; | |
440 | ||
441 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
442 | ||
443 | if (waiter == rt_mutex_top_waiter(lock)) { | |
444 | spin_lock_irqsave(&owner->pi_lock, flags); | |
445 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); | |
446 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | |
447 | ||
448 | __rt_mutex_adjust_prio(owner); | |
db630637 SR |
449 | if (owner->pi_blocked_on) |
450 | chain_walk = 1; | |
23f78d4a IM |
451 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
452 | } | |
db630637 SR |
453 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) |
454 | chain_walk = 1; | |
455 | ||
456 | if (!chain_walk) | |
23f78d4a IM |
457 | return 0; |
458 | ||
db630637 SR |
459 | /* |
460 | * The owner can't disappear while holding a lock, | |
461 | * so the owner struct is protected by wait_lock. | |
462 | * Gets dropped in rt_mutex_adjust_prio_chain()! | |
463 | */ | |
464 | get_task_struct(owner); | |
465 | ||
23f78d4a IM |
466 | spin_unlock(&lock->wait_lock); |
467 | ||
95e02ca9 | 468 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
9a11b49a | 469 | current); |
23f78d4a IM |
470 | |
471 | spin_lock(&lock->wait_lock); | |
472 | ||
473 | return res; | |
474 | } | |
475 | ||
476 | /* | |
477 | * Wake up the next waiter on the lock. | |
478 | * | |
479 | * Remove the top waiter from the current tasks waiter list and from | |
480 | * the lock waiter list. Set it as pending owner. Then wake it up. | |
481 | * | |
482 | * Called with lock->wait_lock held. | |
483 | */ | |
484 | static void wakeup_next_waiter(struct rt_mutex *lock) | |
485 | { | |
486 | struct rt_mutex_waiter *waiter; | |
487 | struct task_struct *pendowner; | |
488 | unsigned long flags; | |
489 | ||
490 | spin_lock_irqsave(¤t->pi_lock, flags); | |
491 | ||
492 | waiter = rt_mutex_top_waiter(lock); | |
493 | plist_del(&waiter->list_entry, &lock->wait_list); | |
494 | ||
495 | /* | |
496 | * Remove it from current->pi_waiters. We do not adjust a | |
497 | * possible priority boost right now. We execute wakeup in the | |
498 | * boosted mode and go back to normal after releasing | |
499 | * lock->wait_lock. | |
500 | */ | |
501 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | |
502 | pendowner = waiter->task; | |
503 | waiter->task = NULL; | |
504 | ||
505 | rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); | |
506 | ||
507 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
508 | ||
509 | /* | |
510 | * Clear the pi_blocked_on variable and enqueue a possible | |
511 | * waiter into the pi_waiters list of the pending owner. This | |
512 | * prevents that in case the pending owner gets unboosted a | |
513 | * waiter with higher priority than pending-owner->normal_prio | |
514 | * is blocked on the unboosted (pending) owner. | |
515 | */ | |
516 | spin_lock_irqsave(&pendowner->pi_lock, flags); | |
517 | ||
518 | WARN_ON(!pendowner->pi_blocked_on); | |
519 | WARN_ON(pendowner->pi_blocked_on != waiter); | |
520 | WARN_ON(pendowner->pi_blocked_on->lock != lock); | |
521 | ||
522 | pendowner->pi_blocked_on = NULL; | |
523 | ||
524 | if (rt_mutex_has_waiters(lock)) { | |
525 | struct rt_mutex_waiter *next; | |
526 | ||
527 | next = rt_mutex_top_waiter(lock); | |
528 | plist_add(&next->pi_list_entry, &pendowner->pi_waiters); | |
529 | } | |
530 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
531 | ||
532 | wake_up_process(pendowner); | |
533 | } | |
534 | ||
535 | /* | |
536 | * Remove a waiter from a lock | |
537 | * | |
538 | * Must be called with lock->wait_lock held | |
539 | */ | |
bd197234 TG |
540 | static void remove_waiter(struct rt_mutex *lock, |
541 | struct rt_mutex_waiter *waiter) | |
23f78d4a IM |
542 | { |
543 | int first = (waiter == rt_mutex_top_waiter(lock)); | |
36c8b586 | 544 | struct task_struct *owner = rt_mutex_owner(lock); |
23f78d4a | 545 | unsigned long flags; |
db630637 | 546 | int chain_walk = 0; |
23f78d4a IM |
547 | |
548 | spin_lock_irqsave(¤t->pi_lock, flags); | |
549 | plist_del(&waiter->list_entry, &lock->wait_list); | |
550 | waiter->task = NULL; | |
551 | current->pi_blocked_on = NULL; | |
552 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
553 | ||
554 | if (first && owner != current) { | |
555 | ||
556 | spin_lock_irqsave(&owner->pi_lock, flags); | |
557 | ||
558 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | |
559 | ||
560 | if (rt_mutex_has_waiters(lock)) { | |
561 | struct rt_mutex_waiter *next; | |
562 | ||
563 | next = rt_mutex_top_waiter(lock); | |
564 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | |
565 | } | |
566 | __rt_mutex_adjust_prio(owner); | |
567 | ||
db630637 SR |
568 | if (owner->pi_blocked_on) |
569 | chain_walk = 1; | |
570 | ||
23f78d4a IM |
571 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
572 | } | |
573 | ||
574 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | |
575 | ||
db630637 | 576 | if (!chain_walk) |
23f78d4a IM |
577 | return; |
578 | ||
db630637 SR |
579 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
580 | get_task_struct(owner); | |
581 | ||
23f78d4a IM |
582 | spin_unlock(&lock->wait_lock); |
583 | ||
9a11b49a | 584 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
23f78d4a IM |
585 | |
586 | spin_lock(&lock->wait_lock); | |
587 | } | |
588 | ||
95e02ca9 TG |
589 | /* |
590 | * Recheck the pi chain, in case we got a priority setting | |
591 | * | |
592 | * Called from sched_setscheduler | |
593 | */ | |
594 | void rt_mutex_adjust_pi(struct task_struct *task) | |
595 | { | |
596 | struct rt_mutex_waiter *waiter; | |
597 | unsigned long flags; | |
598 | ||
599 | spin_lock_irqsave(&task->pi_lock, flags); | |
600 | ||
601 | waiter = task->pi_blocked_on; | |
602 | if (!waiter || waiter->list_entry.prio == task->prio) { | |
603 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
604 | return; | |
605 | } | |
606 | ||
95e02ca9 TG |
607 | spin_unlock_irqrestore(&task->pi_lock, flags); |
608 | ||
db630637 SR |
609 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
610 | get_task_struct(task); | |
9a11b49a | 611 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
95e02ca9 TG |
612 | } |
613 | ||
23f78d4a IM |
614 | /* |
615 | * Slow path lock function: | |
616 | */ | |
617 | static int __sched | |
618 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
619 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 620 | int detect_deadlock) |
23f78d4a IM |
621 | { |
622 | struct rt_mutex_waiter waiter; | |
623 | int ret = 0; | |
624 | ||
625 | debug_rt_mutex_init_waiter(&waiter); | |
626 | waiter.task = NULL; | |
627 | ||
628 | spin_lock(&lock->wait_lock); | |
629 | ||
630 | /* Try to acquire the lock again: */ | |
9a11b49a | 631 | if (try_to_take_rt_mutex(lock)) { |
23f78d4a IM |
632 | spin_unlock(&lock->wait_lock); |
633 | return 0; | |
634 | } | |
635 | ||
636 | set_current_state(state); | |
637 | ||
638 | /* Setup the timer, when timeout != NULL */ | |
639 | if (unlikely(timeout)) | |
640 | hrtimer_start(&timeout->timer, timeout->timer.expires, | |
c9cb2e3d | 641 | HRTIMER_MODE_ABS); |
23f78d4a IM |
642 | |
643 | for (;;) { | |
644 | /* Try to acquire the lock: */ | |
9a11b49a | 645 | if (try_to_take_rt_mutex(lock)) |
23f78d4a IM |
646 | break; |
647 | ||
648 | /* | |
649 | * TASK_INTERRUPTIBLE checks for signals and | |
650 | * timeout. Ignored otherwise. | |
651 | */ | |
652 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | |
653 | /* Signal pending? */ | |
654 | if (signal_pending(current)) | |
655 | ret = -EINTR; | |
656 | if (timeout && !timeout->task) | |
657 | ret = -ETIMEDOUT; | |
658 | if (ret) | |
659 | break; | |
660 | } | |
661 | ||
662 | /* | |
663 | * waiter.task is NULL the first time we come here and | |
664 | * when we have been woken up by the previous owner | |
665 | * but the lock got stolen by a higher prio task. | |
666 | */ | |
667 | if (!waiter.task) { | |
668 | ret = task_blocks_on_rt_mutex(lock, &waiter, | |
9a11b49a | 669 | detect_deadlock); |
23f78d4a IM |
670 | /* |
671 | * If we got woken up by the owner then start loop | |
672 | * all over without going into schedule to try | |
673 | * to get the lock now: | |
674 | */ | |
c0d1d2bf TG |
675 | if (unlikely(!waiter.task)) { |
676 | /* | |
677 | * Reset the return value. We might | |
678 | * have returned with -EDEADLK and the | |
679 | * owner released the lock while we | |
680 | * were walking the pi chain. | |
681 | */ | |
682 | ret = 0; | |
23f78d4a | 683 | continue; |
c0d1d2bf | 684 | } |
23f78d4a IM |
685 | if (unlikely(ret)) |
686 | break; | |
687 | } | |
95e02ca9 | 688 | |
23f78d4a IM |
689 | spin_unlock(&lock->wait_lock); |
690 | ||
691 | debug_rt_mutex_print_deadlock(&waiter); | |
692 | ||
61a87122 TG |
693 | if (waiter.task) |
694 | schedule_rt_mutex(lock); | |
23f78d4a IM |
695 | |
696 | spin_lock(&lock->wait_lock); | |
697 | set_current_state(state); | |
698 | } | |
699 | ||
700 | set_current_state(TASK_RUNNING); | |
701 | ||
702 | if (unlikely(waiter.task)) | |
9a11b49a | 703 | remove_waiter(lock, &waiter); |
23f78d4a IM |
704 | |
705 | /* | |
706 | * try_to_take_rt_mutex() sets the waiter bit | |
707 | * unconditionally. We might have to fix that up. | |
708 | */ | |
709 | fixup_rt_mutex_waiters(lock); | |
710 | ||
711 | spin_unlock(&lock->wait_lock); | |
712 | ||
713 | /* Remove pending timer: */ | |
714 | if (unlikely(timeout)) | |
715 | hrtimer_cancel(&timeout->timer); | |
716 | ||
717 | /* | |
718 | * Readjust priority, when we did not get the lock. We might | |
719 | * have been the pending owner and boosted. Since we did not | |
720 | * take the lock, the PI boost has to go. | |
721 | */ | |
722 | if (unlikely(ret)) | |
723 | rt_mutex_adjust_prio(current); | |
724 | ||
725 | debug_rt_mutex_free_waiter(&waiter); | |
726 | ||
727 | return ret; | |
728 | } | |
729 | ||
730 | /* | |
731 | * Slow path try-lock function: | |
732 | */ | |
733 | static inline int | |
9a11b49a | 734 | rt_mutex_slowtrylock(struct rt_mutex *lock) |
23f78d4a IM |
735 | { |
736 | int ret = 0; | |
737 | ||
738 | spin_lock(&lock->wait_lock); | |
739 | ||
740 | if (likely(rt_mutex_owner(lock) != current)) { | |
741 | ||
9a11b49a | 742 | ret = try_to_take_rt_mutex(lock); |
23f78d4a IM |
743 | /* |
744 | * try_to_take_rt_mutex() sets the lock waiters | |
745 | * bit unconditionally. Clean this up. | |
746 | */ | |
747 | fixup_rt_mutex_waiters(lock); | |
748 | } | |
749 | ||
750 | spin_unlock(&lock->wait_lock); | |
751 | ||
752 | return ret; | |
753 | } | |
754 | ||
755 | /* | |
756 | * Slow path to release a rt-mutex: | |
757 | */ | |
758 | static void __sched | |
759 | rt_mutex_slowunlock(struct rt_mutex *lock) | |
760 | { | |
761 | spin_lock(&lock->wait_lock); | |
762 | ||
763 | debug_rt_mutex_unlock(lock); | |
764 | ||
765 | rt_mutex_deadlock_account_unlock(current); | |
766 | ||
767 | if (!rt_mutex_has_waiters(lock)) { | |
768 | lock->owner = NULL; | |
769 | spin_unlock(&lock->wait_lock); | |
770 | return; | |
771 | } | |
772 | ||
773 | wakeup_next_waiter(lock); | |
774 | ||
775 | spin_unlock(&lock->wait_lock); | |
776 | ||
777 | /* Undo pi boosting if necessary: */ | |
778 | rt_mutex_adjust_prio(current); | |
779 | } | |
780 | ||
781 | /* | |
782 | * debug aware fast / slowpath lock,trylock,unlock | |
783 | * | |
784 | * The atomic acquire/release ops are compiled away, when either the | |
785 | * architecture does not support cmpxchg or when debugging is enabled. | |
786 | */ | |
787 | static inline int | |
788 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
789 | int detect_deadlock, | |
790 | int (*slowfn)(struct rt_mutex *lock, int state, | |
791 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 792 | int detect_deadlock)) |
23f78d4a IM |
793 | { |
794 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
795 | rt_mutex_deadlock_account_lock(lock, current); | |
796 | return 0; | |
797 | } else | |
9a11b49a | 798 | return slowfn(lock, state, NULL, detect_deadlock); |
23f78d4a IM |
799 | } |
800 | ||
801 | static inline int | |
802 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
803 | struct hrtimer_sleeper *timeout, int detect_deadlock, | |
804 | int (*slowfn)(struct rt_mutex *lock, int state, | |
805 | struct hrtimer_sleeper *timeout, | |
9a11b49a | 806 | int detect_deadlock)) |
23f78d4a IM |
807 | { |
808 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
809 | rt_mutex_deadlock_account_lock(lock, current); | |
810 | return 0; | |
811 | } else | |
9a11b49a | 812 | return slowfn(lock, state, timeout, detect_deadlock); |
23f78d4a IM |
813 | } |
814 | ||
815 | static inline int | |
816 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
9a11b49a | 817 | int (*slowfn)(struct rt_mutex *lock)) |
23f78d4a IM |
818 | { |
819 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
820 | rt_mutex_deadlock_account_lock(lock, current); | |
821 | return 1; | |
822 | } | |
9a11b49a | 823 | return slowfn(lock); |
23f78d4a IM |
824 | } |
825 | ||
826 | static inline void | |
827 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
828 | void (*slowfn)(struct rt_mutex *lock)) | |
829 | { | |
830 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | |
831 | rt_mutex_deadlock_account_unlock(current); | |
832 | else | |
833 | slowfn(lock); | |
834 | } | |
835 | ||
836 | /** | |
837 | * rt_mutex_lock - lock a rt_mutex | |
838 | * | |
839 | * @lock: the rt_mutex to be locked | |
840 | */ | |
841 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
842 | { | |
843 | might_sleep(); | |
844 | ||
845 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | |
846 | } | |
847 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
848 | ||
849 | /** | |
850 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
851 | * | |
852 | * @lock: the rt_mutex to be locked | |
853 | * @detect_deadlock: deadlock detection on/off | |
854 | * | |
855 | * Returns: | |
856 | * 0 on success | |
857 | * -EINTR when interrupted by a signal | |
858 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
859 | */ | |
860 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
861 | int detect_deadlock) | |
862 | { | |
863 | might_sleep(); | |
864 | ||
865 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | |
866 | detect_deadlock, rt_mutex_slowlock); | |
867 | } | |
868 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
869 | ||
870 | /** | |
871 | * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible | |
872 | * the timeout structure is provided | |
873 | * by the caller | |
874 | * | |
875 | * @lock: the rt_mutex to be locked | |
876 | * @timeout: timeout structure or NULL (no timeout) | |
877 | * @detect_deadlock: deadlock detection on/off | |
878 | * | |
879 | * Returns: | |
880 | * 0 on success | |
881 | * -EINTR when interrupted by a signal | |
882 | * -ETIMEOUT when the timeout expired | |
883 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
884 | */ | |
885 | int | |
886 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | |
887 | int detect_deadlock) | |
888 | { | |
889 | might_sleep(); | |
890 | ||
891 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
892 | detect_deadlock, rt_mutex_slowlock); | |
893 | } | |
894 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
895 | ||
896 | /** | |
897 | * rt_mutex_trylock - try to lock a rt_mutex | |
898 | * | |
899 | * @lock: the rt_mutex to be locked | |
900 | * | |
901 | * Returns 1 on success and 0 on contention | |
902 | */ | |
903 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
904 | { | |
905 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | |
906 | } | |
907 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
908 | ||
909 | /** | |
910 | * rt_mutex_unlock - unlock a rt_mutex | |
911 | * | |
912 | * @lock: the rt_mutex to be unlocked | |
913 | */ | |
914 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
915 | { | |
916 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | |
917 | } | |
918 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
919 | ||
920 | /*** | |
921 | * rt_mutex_destroy - mark a mutex unusable | |
922 | * @lock: the mutex to be destroyed | |
923 | * | |
924 | * This function marks the mutex uninitialized, and any subsequent | |
925 | * use of the mutex is forbidden. The mutex must not be locked when | |
926 | * this function is called. | |
927 | */ | |
928 | void rt_mutex_destroy(struct rt_mutex *lock) | |
929 | { | |
930 | WARN_ON(rt_mutex_is_locked(lock)); | |
931 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
932 | lock->magic = NULL; | |
933 | #endif | |
934 | } | |
935 | ||
936 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |
937 | ||
938 | /** | |
939 | * __rt_mutex_init - initialize the rt lock | |
940 | * | |
941 | * @lock: the rt lock to be initialized | |
942 | * | |
943 | * Initialize the rt lock to unlocked state. | |
944 | * | |
945 | * Initializing of a locked rt lock is not allowed | |
946 | */ | |
947 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |
948 | { | |
949 | lock->owner = NULL; | |
950 | spin_lock_init(&lock->wait_lock); | |
951 | plist_head_init(&lock->wait_list, &lock->wait_lock); | |
952 | ||
953 | debug_rt_mutex_init(lock, name); | |
954 | } | |
955 | EXPORT_SYMBOL_GPL(__rt_mutex_init); | |
0cdbee99 IM |
956 | |
957 | /** | |
958 | * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a | |
959 | * proxy owner | |
960 | * | |
961 | * @lock: the rt_mutex to be locked | |
962 | * @proxy_owner:the task to set as owner | |
963 | * | |
964 | * No locking. Caller has to do serializing itself | |
965 | * Special API call for PI-futex support | |
966 | */ | |
967 | void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |
968 | struct task_struct *proxy_owner) | |
969 | { | |
970 | __rt_mutex_init(lock, NULL); | |
9a11b49a | 971 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
0cdbee99 IM |
972 | rt_mutex_set_owner(lock, proxy_owner, 0); |
973 | rt_mutex_deadlock_account_lock(lock, proxy_owner); | |
974 | } | |
975 | ||
976 | /** | |
977 | * rt_mutex_proxy_unlock - release a lock on behalf of owner | |
978 | * | |
979 | * @lock: the rt_mutex to be locked | |
980 | * | |
981 | * No locking. Caller has to do serializing itself | |
982 | * Special API call for PI-futex support | |
983 | */ | |
984 | void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |
985 | struct task_struct *proxy_owner) | |
986 | { | |
987 | debug_rt_mutex_proxy_unlock(lock); | |
988 | rt_mutex_set_owner(lock, NULL, 0); | |
989 | rt_mutex_deadlock_account_unlock(proxy_owner); | |
990 | } | |
991 | ||
992 | /** | |
993 | * rt_mutex_next_owner - return the next owner of the lock | |
994 | * | |
995 | * @lock: the rt lock query | |
996 | * | |
997 | * Returns the next owner of the lock or NULL | |
998 | * | |
999 | * Caller has to serialize against other accessors to the lock | |
1000 | * itself. | |
1001 | * | |
1002 | * Special API call for PI-futex support | |
1003 | */ | |
1004 | struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |
1005 | { | |
1006 | if (!rt_mutex_has_waiters(lock)) | |
1007 | return NULL; | |
1008 | ||
1009 | return rt_mutex_top_waiter(lock)->task; | |
1010 | } |