Commit | Line | Data |
---|---|---|
23f78d4a IM |
1 | /* |
2 | * RT-Mutexes: simple blocking mutual exclusion locks with PI support | |
3 | * | |
4 | * started by Ingo Molnar and Thomas Gleixner. | |
5 | * | |
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
7 | * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | |
8 | * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt | |
9 | * Copyright (C) 2006 Esben Nielsen | |
10 | */ | |
11 | #include <linux/spinlock.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/timer.h> | |
15 | ||
16 | #include "rtmutex_common.h" | |
17 | ||
18 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
19 | # include "rtmutex-debug.h" | |
20 | #else | |
21 | # include "rtmutex.h" | |
22 | #endif | |
23 | ||
24 | /* | |
25 | * lock->owner state tracking: | |
26 | * | |
27 | * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1 | |
28 | * are used to keep track of the "owner is pending" and "lock has | |
29 | * waiters" state. | |
30 | * | |
31 | * owner bit1 bit0 | |
32 | * NULL 0 0 lock is free (fast acquire possible) | |
33 | * NULL 0 1 invalid state | |
34 | * NULL 1 0 Transitional State* | |
35 | * NULL 1 1 invalid state | |
36 | * taskpointer 0 0 lock is held (fast release possible) | |
37 | * taskpointer 0 1 task is pending owner | |
38 | * taskpointer 1 0 lock is held and has waiters | |
39 | * taskpointer 1 1 task is pending owner and lock has more waiters | |
40 | * | |
41 | * Pending ownership is assigned to the top (highest priority) | |
42 | * waiter of the lock, when the lock is released. The thread is woken | |
43 | * up and can now take the lock. Until the lock is taken (bit 0 | |
44 | * cleared) a competing higher priority thread can steal the lock | |
45 | * which puts the woken up thread back on the waiters list. | |
46 | * | |
47 | * The fast atomic compare exchange based acquire and release is only | |
48 | * possible when bit 0 and 1 of lock->owner are 0. | |
49 | * | |
50 | * (*) There's a small time where the owner can be NULL and the | |
51 | * "lock has waiters" bit is set. This can happen when grabbing the lock. | |
52 | * To prevent a cmpxchg of the owner releasing the lock, we need to set this | |
53 | * bit before looking at the lock, hence the reason this is a transitional | |
54 | * state. | |
55 | */ | |
56 | ||
57 | static void | |
58 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, | |
59 | unsigned long mask) | |
60 | { | |
61 | unsigned long val = (unsigned long)owner | mask; | |
62 | ||
63 | if (rt_mutex_has_waiters(lock)) | |
64 | val |= RT_MUTEX_HAS_WAITERS; | |
65 | ||
66 | lock->owner = (struct task_struct *)val; | |
67 | } | |
68 | ||
69 | static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |
70 | { | |
71 | lock->owner = (struct task_struct *) | |
72 | ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); | |
73 | } | |
74 | ||
75 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | |
76 | { | |
77 | if (!rt_mutex_has_waiters(lock)) | |
78 | clear_rt_mutex_waiters(lock); | |
79 | } | |
80 | ||
81 | /* | |
82 | * We can speed up the acquire/release, if the architecture | |
83 | * supports cmpxchg and if there's no debugging state to be set up | |
84 | */ | |
85 | #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) | |
86 | # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) | |
87 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
88 | { | |
89 | unsigned long owner, *p = (unsigned long *) &lock->owner; | |
90 | ||
91 | do { | |
92 | owner = *p; | |
93 | } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); | |
94 | } | |
95 | #else | |
96 | # define rt_mutex_cmpxchg(l,c,n) (0) | |
97 | static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |
98 | { | |
99 | lock->owner = (struct task_struct *) | |
100 | ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); | |
101 | } | |
102 | #endif | |
103 | ||
104 | /* | |
105 | * Calculate task priority from the waiter list priority | |
106 | * | |
107 | * Return task->normal_prio when the waiter list is empty or when | |
108 | * the waiter is not allowed to do priority boosting | |
109 | */ | |
110 | int rt_mutex_getprio(struct task_struct *task) | |
111 | { | |
112 | if (likely(!task_has_pi_waiters(task))) | |
113 | return task->normal_prio; | |
114 | ||
115 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | |
116 | task->normal_prio); | |
117 | } | |
118 | ||
119 | /* | |
120 | * Adjust the priority of a task, after its pi_waiters got modified. | |
121 | * | |
122 | * This can be both boosting and unboosting. task->pi_lock must be held. | |
123 | */ | |
124 | static void __rt_mutex_adjust_prio(struct task_struct *task) | |
125 | { | |
126 | int prio = rt_mutex_getprio(task); | |
127 | ||
128 | if (task->prio != prio) | |
129 | rt_mutex_setprio(task, prio); | |
130 | } | |
131 | ||
132 | /* | |
133 | * Adjust task priority (undo boosting). Called from the exit path of | |
134 | * rt_mutex_slowunlock() and rt_mutex_slowlock(). | |
135 | * | |
136 | * (Note: We do this outside of the protection of lock->wait_lock to | |
137 | * allow the lock to be taken while or before we readjust the priority | |
138 | * of task. We do not use the spin_xx_mutex() variants here as we are | |
139 | * outside of the debug path.) | |
140 | */ | |
141 | static void rt_mutex_adjust_prio(struct task_struct *task) | |
142 | { | |
143 | unsigned long flags; | |
144 | ||
145 | spin_lock_irqsave(&task->pi_lock, flags); | |
146 | __rt_mutex_adjust_prio(task); | |
147 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
148 | } | |
149 | ||
150 | /* | |
151 | * Max number of times we'll walk the boosting chain: | |
152 | */ | |
153 | int max_lock_depth = 1024; | |
154 | ||
155 | /* | |
156 | * Adjust the priority chain. Also used for deadlock detection. | |
157 | * Decreases task's usage by one - may thus free the task. | |
158 | * Returns 0 or -EDEADLK. | |
159 | */ | |
160 | static int rt_mutex_adjust_prio_chain(task_t *task, | |
161 | int deadlock_detect, | |
162 | struct rt_mutex *orig_lock, | |
163 | struct rt_mutex_waiter *orig_waiter | |
164 | __IP_DECL__) | |
165 | { | |
166 | struct rt_mutex *lock; | |
167 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | |
168 | int detect_deadlock, ret = 0, depth = 0; | |
169 | unsigned long flags; | |
170 | ||
171 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | |
172 | deadlock_detect); | |
173 | ||
174 | /* | |
175 | * The (de)boosting is a step by step approach with a lot of | |
176 | * pitfalls. We want this to be preemptible and we want hold a | |
177 | * maximum of two locks per step. So we have to check | |
178 | * carefully whether things change under us. | |
179 | */ | |
180 | again: | |
181 | if (++depth > max_lock_depth) { | |
182 | static int prev_max; | |
183 | ||
184 | /* | |
185 | * Print this only once. If the admin changes the limit, | |
186 | * print a new message when reaching the limit again. | |
187 | */ | |
188 | if (prev_max != max_lock_depth) { | |
189 | prev_max = max_lock_depth; | |
190 | printk(KERN_WARNING "Maximum lock depth %d reached " | |
191 | "task: %s (%d)\n", max_lock_depth, | |
192 | current->comm, current->pid); | |
193 | } | |
194 | put_task_struct(task); | |
195 | ||
196 | return deadlock_detect ? -EDEADLK : 0; | |
197 | } | |
198 | retry: | |
199 | /* | |
200 | * Task can not go away as we did a get_task() before ! | |
201 | */ | |
202 | spin_lock_irqsave(&task->pi_lock, flags); | |
203 | ||
204 | waiter = task->pi_blocked_on; | |
205 | /* | |
206 | * Check whether the end of the boosting chain has been | |
207 | * reached or the state of the chain has changed while we | |
208 | * dropped the locks. | |
209 | */ | |
210 | if (!waiter || !waiter->task) | |
211 | goto out_unlock_pi; | |
212 | ||
213 | if (top_waiter && (!task_has_pi_waiters(task) || | |
214 | top_waiter != task_top_pi_waiter(task))) | |
215 | goto out_unlock_pi; | |
216 | ||
217 | /* | |
218 | * When deadlock detection is off then we check, if further | |
219 | * priority adjustment is necessary. | |
220 | */ | |
221 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | |
222 | goto out_unlock_pi; | |
223 | ||
224 | lock = waiter->lock; | |
225 | if (!spin_trylock(&lock->wait_lock)) { | |
226 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
227 | cpu_relax(); | |
228 | goto retry; | |
229 | } | |
230 | ||
231 | /* Deadlock detection */ | |
232 | if (lock == orig_lock || rt_mutex_owner(lock) == current) { | |
233 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | |
234 | spin_unlock(&lock->wait_lock); | |
235 | ret = deadlock_detect ? -EDEADLK : 0; | |
236 | goto out_unlock_pi; | |
237 | } | |
238 | ||
239 | top_waiter = rt_mutex_top_waiter(lock); | |
240 | ||
241 | /* Requeue the waiter */ | |
242 | plist_del(&waiter->list_entry, &lock->wait_list); | |
243 | waiter->list_entry.prio = task->prio; | |
244 | plist_add(&waiter->list_entry, &lock->wait_list); | |
245 | ||
246 | /* Release the task */ | |
247 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
248 | put_task_struct(task); | |
249 | ||
250 | /* Grab the next task */ | |
251 | task = rt_mutex_owner(lock); | |
252 | spin_lock_irqsave(&task->pi_lock, flags); | |
253 | ||
254 | if (waiter == rt_mutex_top_waiter(lock)) { | |
255 | /* Boost the owner */ | |
256 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | |
257 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
258 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
259 | __rt_mutex_adjust_prio(task); | |
260 | ||
261 | } else if (top_waiter == waiter) { | |
262 | /* Deboost the owner */ | |
263 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | |
264 | waiter = rt_mutex_top_waiter(lock); | |
265 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | |
266 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | |
267 | __rt_mutex_adjust_prio(task); | |
268 | } | |
269 | ||
270 | get_task_struct(task); | |
271 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
272 | ||
273 | top_waiter = rt_mutex_top_waiter(lock); | |
274 | spin_unlock(&lock->wait_lock); | |
275 | ||
276 | if (!detect_deadlock && waiter != top_waiter) | |
277 | goto out_put_task; | |
278 | ||
279 | goto again; | |
280 | ||
281 | out_unlock_pi: | |
282 | spin_unlock_irqrestore(&task->pi_lock, flags); | |
283 | out_put_task: | |
284 | put_task_struct(task); | |
285 | return ret; | |
286 | } | |
287 | ||
288 | /* | |
289 | * Optimization: check if we can steal the lock from the | |
290 | * assigned pending owner [which might not have taken the | |
291 | * lock yet]: | |
292 | */ | |
293 | static inline int try_to_steal_lock(struct rt_mutex *lock) | |
294 | { | |
295 | struct task_struct *pendowner = rt_mutex_owner(lock); | |
296 | struct rt_mutex_waiter *next; | |
297 | unsigned long flags; | |
298 | ||
299 | if (!rt_mutex_owner_pending(lock)) | |
300 | return 0; | |
301 | ||
302 | if (pendowner == current) | |
303 | return 1; | |
304 | ||
305 | spin_lock_irqsave(&pendowner->pi_lock, flags); | |
306 | if (current->prio >= pendowner->prio) { | |
307 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
308 | return 0; | |
309 | } | |
310 | ||
311 | /* | |
312 | * Check if a waiter is enqueued on the pending owners | |
313 | * pi_waiters list. Remove it and readjust pending owners | |
314 | * priority. | |
315 | */ | |
316 | if (likely(!rt_mutex_has_waiters(lock))) { | |
317 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
318 | return 1; | |
319 | } | |
320 | ||
321 | /* No chain handling, pending owner is not blocked on anything: */ | |
322 | next = rt_mutex_top_waiter(lock); | |
323 | plist_del(&next->pi_list_entry, &pendowner->pi_waiters); | |
324 | __rt_mutex_adjust_prio(pendowner); | |
325 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
326 | ||
327 | /* | |
328 | * We are going to steal the lock and a waiter was | |
329 | * enqueued on the pending owners pi_waiters queue. So | |
330 | * we have to enqueue this waiter into | |
331 | * current->pi_waiters list. This covers the case, | |
332 | * where current is boosted because it holds another | |
333 | * lock and gets unboosted because the booster is | |
334 | * interrupted, so we would delay a waiter with higher | |
335 | * priority as current->normal_prio. | |
336 | * | |
337 | * Note: in the rare case of a SCHED_OTHER task changing | |
338 | * its priority and thus stealing the lock, next->task | |
339 | * might be current: | |
340 | */ | |
341 | if (likely(next->task != current)) { | |
342 | spin_lock_irqsave(¤t->pi_lock, flags); | |
343 | plist_add(&next->pi_list_entry, ¤t->pi_waiters); | |
344 | __rt_mutex_adjust_prio(current); | |
345 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
346 | } | |
347 | return 1; | |
348 | } | |
349 | ||
350 | /* | |
351 | * Try to take an rt-mutex | |
352 | * | |
353 | * This fails | |
354 | * - when the lock has a real owner | |
355 | * - when a different pending owner exists and has higher priority than current | |
356 | * | |
357 | * Must be called with lock->wait_lock held. | |
358 | */ | |
359 | static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | |
360 | { | |
361 | /* | |
362 | * We have to be careful here if the atomic speedups are | |
363 | * enabled, such that, when | |
364 | * - no other waiter is on the lock | |
365 | * - the lock has been released since we did the cmpxchg | |
366 | * the lock can be released or taken while we are doing the | |
367 | * checks and marking the lock with RT_MUTEX_HAS_WAITERS. | |
368 | * | |
369 | * The atomic acquire/release aware variant of | |
370 | * mark_rt_mutex_waiters uses a cmpxchg loop. After setting | |
371 | * the WAITERS bit, the atomic release / acquire can not | |
372 | * happen anymore and lock->wait_lock protects us from the | |
373 | * non-atomic case. | |
374 | * | |
375 | * Note, that this might set lock->owner = | |
376 | * RT_MUTEX_HAS_WAITERS in the case the lock is not contended | |
377 | * any more. This is fixed up when we take the ownership. | |
378 | * This is the transitional state explained at the top of this file. | |
379 | */ | |
380 | mark_rt_mutex_waiters(lock); | |
381 | ||
382 | if (rt_mutex_owner(lock) && !try_to_steal_lock(lock)) | |
383 | return 0; | |
384 | ||
385 | /* We got the lock. */ | |
386 | debug_rt_mutex_lock(lock __IP__); | |
387 | ||
388 | rt_mutex_set_owner(lock, current, 0); | |
389 | ||
390 | rt_mutex_deadlock_account_lock(lock, current); | |
391 | ||
392 | return 1; | |
393 | } | |
394 | ||
395 | /* | |
396 | * Task blocks on lock. | |
397 | * | |
398 | * Prepare waiter and propagate pi chain | |
399 | * | |
400 | * This must be called with lock->wait_lock held. | |
401 | */ | |
402 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |
403 | struct rt_mutex_waiter *waiter, | |
404 | int detect_deadlock | |
405 | __IP_DECL__) | |
406 | { | |
407 | struct rt_mutex_waiter *top_waiter = waiter; | |
408 | task_t *owner = rt_mutex_owner(lock); | |
409 | int boost = 0, res; | |
410 | unsigned long flags; | |
411 | ||
412 | spin_lock_irqsave(¤t->pi_lock, flags); | |
413 | __rt_mutex_adjust_prio(current); | |
414 | waiter->task = current; | |
415 | waiter->lock = lock; | |
416 | plist_node_init(&waiter->list_entry, current->prio); | |
417 | plist_node_init(&waiter->pi_list_entry, current->prio); | |
418 | ||
419 | /* Get the top priority waiter on the lock */ | |
420 | if (rt_mutex_has_waiters(lock)) | |
421 | top_waiter = rt_mutex_top_waiter(lock); | |
422 | plist_add(&waiter->list_entry, &lock->wait_list); | |
423 | ||
424 | current->pi_blocked_on = waiter; | |
425 | ||
426 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
427 | ||
428 | if (waiter == rt_mutex_top_waiter(lock)) { | |
429 | spin_lock_irqsave(&owner->pi_lock, flags); | |
430 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); | |
431 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | |
432 | ||
433 | __rt_mutex_adjust_prio(owner); | |
434 | if (owner->pi_blocked_on) { | |
435 | boost = 1; | |
436 | get_task_struct(owner); | |
437 | } | |
438 | spin_unlock_irqrestore(&owner->pi_lock, flags); | |
439 | } | |
440 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { | |
441 | spin_lock_irqsave(&owner->pi_lock, flags); | |
442 | if (owner->pi_blocked_on) { | |
443 | boost = 1; | |
444 | get_task_struct(owner); | |
445 | } | |
446 | spin_unlock_irqrestore(&owner->pi_lock, flags); | |
447 | } | |
448 | if (!boost) | |
449 | return 0; | |
450 | ||
451 | spin_unlock(&lock->wait_lock); | |
452 | ||
453 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, | |
454 | waiter __IP__); | |
455 | ||
456 | spin_lock(&lock->wait_lock); | |
457 | ||
458 | return res; | |
459 | } | |
460 | ||
461 | /* | |
462 | * Wake up the next waiter on the lock. | |
463 | * | |
464 | * Remove the top waiter from the current tasks waiter list and from | |
465 | * the lock waiter list. Set it as pending owner. Then wake it up. | |
466 | * | |
467 | * Called with lock->wait_lock held. | |
468 | */ | |
469 | static void wakeup_next_waiter(struct rt_mutex *lock) | |
470 | { | |
471 | struct rt_mutex_waiter *waiter; | |
472 | struct task_struct *pendowner; | |
473 | unsigned long flags; | |
474 | ||
475 | spin_lock_irqsave(¤t->pi_lock, flags); | |
476 | ||
477 | waiter = rt_mutex_top_waiter(lock); | |
478 | plist_del(&waiter->list_entry, &lock->wait_list); | |
479 | ||
480 | /* | |
481 | * Remove it from current->pi_waiters. We do not adjust a | |
482 | * possible priority boost right now. We execute wakeup in the | |
483 | * boosted mode and go back to normal after releasing | |
484 | * lock->wait_lock. | |
485 | */ | |
486 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | |
487 | pendowner = waiter->task; | |
488 | waiter->task = NULL; | |
489 | ||
490 | rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); | |
491 | ||
492 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
493 | ||
494 | /* | |
495 | * Clear the pi_blocked_on variable and enqueue a possible | |
496 | * waiter into the pi_waiters list of the pending owner. This | |
497 | * prevents that in case the pending owner gets unboosted a | |
498 | * waiter with higher priority than pending-owner->normal_prio | |
499 | * is blocked on the unboosted (pending) owner. | |
500 | */ | |
501 | spin_lock_irqsave(&pendowner->pi_lock, flags); | |
502 | ||
503 | WARN_ON(!pendowner->pi_blocked_on); | |
504 | WARN_ON(pendowner->pi_blocked_on != waiter); | |
505 | WARN_ON(pendowner->pi_blocked_on->lock != lock); | |
506 | ||
507 | pendowner->pi_blocked_on = NULL; | |
508 | ||
509 | if (rt_mutex_has_waiters(lock)) { | |
510 | struct rt_mutex_waiter *next; | |
511 | ||
512 | next = rt_mutex_top_waiter(lock); | |
513 | plist_add(&next->pi_list_entry, &pendowner->pi_waiters); | |
514 | } | |
515 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | |
516 | ||
517 | wake_up_process(pendowner); | |
518 | } | |
519 | ||
520 | /* | |
521 | * Remove a waiter from a lock | |
522 | * | |
523 | * Must be called with lock->wait_lock held | |
524 | */ | |
525 | static void remove_waiter(struct rt_mutex *lock, | |
526 | struct rt_mutex_waiter *waiter __IP_DECL__) | |
527 | { | |
528 | int first = (waiter == rt_mutex_top_waiter(lock)); | |
529 | int boost = 0; | |
530 | task_t *owner = rt_mutex_owner(lock); | |
531 | unsigned long flags; | |
532 | ||
533 | spin_lock_irqsave(¤t->pi_lock, flags); | |
534 | plist_del(&waiter->list_entry, &lock->wait_list); | |
535 | waiter->task = NULL; | |
536 | current->pi_blocked_on = NULL; | |
537 | spin_unlock_irqrestore(¤t->pi_lock, flags); | |
538 | ||
539 | if (first && owner != current) { | |
540 | ||
541 | spin_lock_irqsave(&owner->pi_lock, flags); | |
542 | ||
543 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | |
544 | ||
545 | if (rt_mutex_has_waiters(lock)) { | |
546 | struct rt_mutex_waiter *next; | |
547 | ||
548 | next = rt_mutex_top_waiter(lock); | |
549 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | |
550 | } | |
551 | __rt_mutex_adjust_prio(owner); | |
552 | ||
553 | if (owner->pi_blocked_on) { | |
554 | boost = 1; | |
555 | get_task_struct(owner); | |
556 | } | |
557 | spin_unlock_irqrestore(&owner->pi_lock, flags); | |
558 | } | |
559 | ||
560 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | |
561 | ||
562 | if (!boost) | |
563 | return; | |
564 | ||
565 | spin_unlock(&lock->wait_lock); | |
566 | ||
567 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL __IP__); | |
568 | ||
569 | spin_lock(&lock->wait_lock); | |
570 | } | |
571 | ||
572 | /* | |
573 | * Slow path lock function: | |
574 | */ | |
575 | static int __sched | |
576 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | |
577 | struct hrtimer_sleeper *timeout, | |
578 | int detect_deadlock __IP_DECL__) | |
579 | { | |
580 | struct rt_mutex_waiter waiter; | |
581 | int ret = 0; | |
582 | ||
583 | debug_rt_mutex_init_waiter(&waiter); | |
584 | waiter.task = NULL; | |
585 | ||
586 | spin_lock(&lock->wait_lock); | |
587 | ||
588 | /* Try to acquire the lock again: */ | |
589 | if (try_to_take_rt_mutex(lock __IP__)) { | |
590 | spin_unlock(&lock->wait_lock); | |
591 | return 0; | |
592 | } | |
593 | ||
594 | set_current_state(state); | |
595 | ||
596 | /* Setup the timer, when timeout != NULL */ | |
597 | if (unlikely(timeout)) | |
598 | hrtimer_start(&timeout->timer, timeout->timer.expires, | |
599 | HRTIMER_ABS); | |
600 | ||
601 | for (;;) { | |
602 | /* Try to acquire the lock: */ | |
603 | if (try_to_take_rt_mutex(lock __IP__)) | |
604 | break; | |
605 | ||
606 | /* | |
607 | * TASK_INTERRUPTIBLE checks for signals and | |
608 | * timeout. Ignored otherwise. | |
609 | */ | |
610 | if (unlikely(state == TASK_INTERRUPTIBLE)) { | |
611 | /* Signal pending? */ | |
612 | if (signal_pending(current)) | |
613 | ret = -EINTR; | |
614 | if (timeout && !timeout->task) | |
615 | ret = -ETIMEDOUT; | |
616 | if (ret) | |
617 | break; | |
618 | } | |
619 | ||
620 | /* | |
621 | * waiter.task is NULL the first time we come here and | |
622 | * when we have been woken up by the previous owner | |
623 | * but the lock got stolen by a higher prio task. | |
624 | */ | |
625 | if (!waiter.task) { | |
626 | ret = task_blocks_on_rt_mutex(lock, &waiter, | |
627 | detect_deadlock __IP__); | |
628 | /* | |
629 | * If we got woken up by the owner then start loop | |
630 | * all over without going into schedule to try | |
631 | * to get the lock now: | |
632 | */ | |
633 | if (unlikely(!waiter.task)) | |
634 | continue; | |
635 | ||
636 | if (unlikely(ret)) | |
637 | break; | |
638 | } | |
639 | spin_unlock(&lock->wait_lock); | |
640 | ||
641 | debug_rt_mutex_print_deadlock(&waiter); | |
642 | ||
61a87122 TG |
643 | if (waiter.task) |
644 | schedule_rt_mutex(lock); | |
23f78d4a IM |
645 | |
646 | spin_lock(&lock->wait_lock); | |
647 | set_current_state(state); | |
648 | } | |
649 | ||
650 | set_current_state(TASK_RUNNING); | |
651 | ||
652 | if (unlikely(waiter.task)) | |
653 | remove_waiter(lock, &waiter __IP__); | |
654 | ||
655 | /* | |
656 | * try_to_take_rt_mutex() sets the waiter bit | |
657 | * unconditionally. We might have to fix that up. | |
658 | */ | |
659 | fixup_rt_mutex_waiters(lock); | |
660 | ||
661 | spin_unlock(&lock->wait_lock); | |
662 | ||
663 | /* Remove pending timer: */ | |
664 | if (unlikely(timeout)) | |
665 | hrtimer_cancel(&timeout->timer); | |
666 | ||
667 | /* | |
668 | * Readjust priority, when we did not get the lock. We might | |
669 | * have been the pending owner and boosted. Since we did not | |
670 | * take the lock, the PI boost has to go. | |
671 | */ | |
672 | if (unlikely(ret)) | |
673 | rt_mutex_adjust_prio(current); | |
674 | ||
675 | debug_rt_mutex_free_waiter(&waiter); | |
676 | ||
677 | return ret; | |
678 | } | |
679 | ||
680 | /* | |
681 | * Slow path try-lock function: | |
682 | */ | |
683 | static inline int | |
684 | rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) | |
685 | { | |
686 | int ret = 0; | |
687 | ||
688 | spin_lock(&lock->wait_lock); | |
689 | ||
690 | if (likely(rt_mutex_owner(lock) != current)) { | |
691 | ||
692 | ret = try_to_take_rt_mutex(lock __IP__); | |
693 | /* | |
694 | * try_to_take_rt_mutex() sets the lock waiters | |
695 | * bit unconditionally. Clean this up. | |
696 | */ | |
697 | fixup_rt_mutex_waiters(lock); | |
698 | } | |
699 | ||
700 | spin_unlock(&lock->wait_lock); | |
701 | ||
702 | return ret; | |
703 | } | |
704 | ||
705 | /* | |
706 | * Slow path to release a rt-mutex: | |
707 | */ | |
708 | static void __sched | |
709 | rt_mutex_slowunlock(struct rt_mutex *lock) | |
710 | { | |
711 | spin_lock(&lock->wait_lock); | |
712 | ||
713 | debug_rt_mutex_unlock(lock); | |
714 | ||
715 | rt_mutex_deadlock_account_unlock(current); | |
716 | ||
717 | if (!rt_mutex_has_waiters(lock)) { | |
718 | lock->owner = NULL; | |
719 | spin_unlock(&lock->wait_lock); | |
720 | return; | |
721 | } | |
722 | ||
723 | wakeup_next_waiter(lock); | |
724 | ||
725 | spin_unlock(&lock->wait_lock); | |
726 | ||
727 | /* Undo pi boosting if necessary: */ | |
728 | rt_mutex_adjust_prio(current); | |
729 | } | |
730 | ||
731 | /* | |
732 | * debug aware fast / slowpath lock,trylock,unlock | |
733 | * | |
734 | * The atomic acquire/release ops are compiled away, when either the | |
735 | * architecture does not support cmpxchg or when debugging is enabled. | |
736 | */ | |
737 | static inline int | |
738 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | |
739 | int detect_deadlock, | |
740 | int (*slowfn)(struct rt_mutex *lock, int state, | |
741 | struct hrtimer_sleeper *timeout, | |
742 | int detect_deadlock __IP_DECL__)) | |
743 | { | |
744 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
745 | rt_mutex_deadlock_account_lock(lock, current); | |
746 | return 0; | |
747 | } else | |
748 | return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); | |
749 | } | |
750 | ||
751 | static inline int | |
752 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |
753 | struct hrtimer_sleeper *timeout, int detect_deadlock, | |
754 | int (*slowfn)(struct rt_mutex *lock, int state, | |
755 | struct hrtimer_sleeper *timeout, | |
756 | int detect_deadlock __IP_DECL__)) | |
757 | { | |
758 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
759 | rt_mutex_deadlock_account_lock(lock, current); | |
760 | return 0; | |
761 | } else | |
762 | return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); | |
763 | } | |
764 | ||
765 | static inline int | |
766 | rt_mutex_fasttrylock(struct rt_mutex *lock, | |
767 | int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) | |
768 | { | |
769 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | |
770 | rt_mutex_deadlock_account_lock(lock, current); | |
771 | return 1; | |
772 | } | |
773 | return slowfn(lock __RET_IP__); | |
774 | } | |
775 | ||
776 | static inline void | |
777 | rt_mutex_fastunlock(struct rt_mutex *lock, | |
778 | void (*slowfn)(struct rt_mutex *lock)) | |
779 | { | |
780 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | |
781 | rt_mutex_deadlock_account_unlock(current); | |
782 | else | |
783 | slowfn(lock); | |
784 | } | |
785 | ||
786 | /** | |
787 | * rt_mutex_lock - lock a rt_mutex | |
788 | * | |
789 | * @lock: the rt_mutex to be locked | |
790 | */ | |
791 | void __sched rt_mutex_lock(struct rt_mutex *lock) | |
792 | { | |
793 | might_sleep(); | |
794 | ||
795 | rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); | |
796 | } | |
797 | EXPORT_SYMBOL_GPL(rt_mutex_lock); | |
798 | ||
799 | /** | |
800 | * rt_mutex_lock_interruptible - lock a rt_mutex interruptible | |
801 | * | |
802 | * @lock: the rt_mutex to be locked | |
803 | * @detect_deadlock: deadlock detection on/off | |
804 | * | |
805 | * Returns: | |
806 | * 0 on success | |
807 | * -EINTR when interrupted by a signal | |
808 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
809 | */ | |
810 | int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
811 | int detect_deadlock) | |
812 | { | |
813 | might_sleep(); | |
814 | ||
815 | return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, | |
816 | detect_deadlock, rt_mutex_slowlock); | |
817 | } | |
818 | EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); | |
819 | ||
820 | /** | |
821 | * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible | |
822 | * the timeout structure is provided | |
823 | * by the caller | |
824 | * | |
825 | * @lock: the rt_mutex to be locked | |
826 | * @timeout: timeout structure or NULL (no timeout) | |
827 | * @detect_deadlock: deadlock detection on/off | |
828 | * | |
829 | * Returns: | |
830 | * 0 on success | |
831 | * -EINTR when interrupted by a signal | |
832 | * -ETIMEOUT when the timeout expired | |
833 | * -EDEADLK when the lock would deadlock (when deadlock detection is on) | |
834 | */ | |
835 | int | |
836 | rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, | |
837 | int detect_deadlock) | |
838 | { | |
839 | might_sleep(); | |
840 | ||
841 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, | |
842 | detect_deadlock, rt_mutex_slowlock); | |
843 | } | |
844 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | |
845 | ||
846 | /** | |
847 | * rt_mutex_trylock - try to lock a rt_mutex | |
848 | * | |
849 | * @lock: the rt_mutex to be locked | |
850 | * | |
851 | * Returns 1 on success and 0 on contention | |
852 | */ | |
853 | int __sched rt_mutex_trylock(struct rt_mutex *lock) | |
854 | { | |
855 | return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); | |
856 | } | |
857 | EXPORT_SYMBOL_GPL(rt_mutex_trylock); | |
858 | ||
859 | /** | |
860 | * rt_mutex_unlock - unlock a rt_mutex | |
861 | * | |
862 | * @lock: the rt_mutex to be unlocked | |
863 | */ | |
864 | void __sched rt_mutex_unlock(struct rt_mutex *lock) | |
865 | { | |
866 | rt_mutex_fastunlock(lock, rt_mutex_slowunlock); | |
867 | } | |
868 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | |
869 | ||
870 | /*** | |
871 | * rt_mutex_destroy - mark a mutex unusable | |
872 | * @lock: the mutex to be destroyed | |
873 | * | |
874 | * This function marks the mutex uninitialized, and any subsequent | |
875 | * use of the mutex is forbidden. The mutex must not be locked when | |
876 | * this function is called. | |
877 | */ | |
878 | void rt_mutex_destroy(struct rt_mutex *lock) | |
879 | { | |
880 | WARN_ON(rt_mutex_is_locked(lock)); | |
881 | #ifdef CONFIG_DEBUG_RT_MUTEXES | |
882 | lock->magic = NULL; | |
883 | #endif | |
884 | } | |
885 | ||
886 | EXPORT_SYMBOL_GPL(rt_mutex_destroy); | |
887 | ||
888 | /** | |
889 | * __rt_mutex_init - initialize the rt lock | |
890 | * | |
891 | * @lock: the rt lock to be initialized | |
892 | * | |
893 | * Initialize the rt lock to unlocked state. | |
894 | * | |
895 | * Initializing of a locked rt lock is not allowed | |
896 | */ | |
897 | void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |
898 | { | |
899 | lock->owner = NULL; | |
900 | spin_lock_init(&lock->wait_lock); | |
901 | plist_head_init(&lock->wait_list, &lock->wait_lock); | |
902 | ||
903 | debug_rt_mutex_init(lock, name); | |
904 | } | |
905 | EXPORT_SYMBOL_GPL(__rt_mutex_init); |