Commit | Line | Data |
---|---|---|
6053ee3b IM |
1 | /* |
2 | * kernel/mutex.c | |
3 | * | |
4 | * Mutexes: blocking mutual exclusion locks | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | |
11 | * David Howells for suggestions and improvements. | |
12 | * | |
13 | * Also see Documentation/mutex-design.txt. | |
14 | */ | |
15 | #include <linux/mutex.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/spinlock.h> | |
19 | #include <linux/interrupt.h> | |
20 | ||
21 | /* | |
22 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, | |
23 | * which forces all calls into the slowpath: | |
24 | */ | |
25 | #ifdef CONFIG_DEBUG_MUTEXES | |
26 | # include "mutex-debug.h" | |
27 | # include <asm-generic/mutex-null.h> | |
28 | #else | |
29 | # include "mutex.h" | |
30 | # include <asm/mutex.h> | |
31 | #endif | |
32 | ||
33 | /*** | |
34 | * mutex_init - initialize the mutex | |
35 | * @lock: the mutex to be initialized | |
36 | * | |
37 | * Initialize the mutex to unlocked state. | |
38 | * | |
39 | * It is not allowed to initialize an already locked mutex. | |
40 | */ | |
41 | void fastcall __mutex_init(struct mutex *lock, const char *name) | |
42 | { | |
43 | atomic_set(&lock->count, 1); | |
44 | spin_lock_init(&lock->wait_lock); | |
45 | INIT_LIST_HEAD(&lock->wait_list); | |
46 | ||
47 | debug_mutex_init(lock, name); | |
48 | } | |
49 | ||
50 | EXPORT_SYMBOL(__mutex_init); | |
51 | ||
52 | /* | |
53 | * We split the mutex lock/unlock logic into separate fastpath and | |
54 | * slowpath functions, to reduce the register pressure on the fastpath. | |
55 | * We also put the fastpath first in the kernel image, to make sure the | |
56 | * branch is predicted by the CPU as default-untaken. | |
57 | */ | |
58 | static void fastcall noinline __sched | |
59 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | |
60 | ||
61 | /*** | |
62 | * mutex_lock - acquire the mutex | |
63 | * @lock: the mutex to be acquired | |
64 | * | |
65 | * Lock the mutex exclusively for this task. If the mutex is not | |
66 | * available right now, it will sleep until it can get it. | |
67 | * | |
68 | * The mutex must later on be released by the same task that | |
69 | * acquired it. Recursive locking is not allowed. The task | |
70 | * may not exit without first unlocking the mutex. Also, kernel | |
71 | * memory where the mutex resides mutex must not be freed with | |
72 | * the mutex still locked. The mutex must first be initialized | |
73 | * (or statically defined) before it can be locked. memset()-ing | |
74 | * the mutex to 0 is not allowed. | |
75 | * | |
76 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging | |
77 | * checks that will enforce the restrictions and will also do | |
78 | * deadlock debugging. ) | |
79 | * | |
80 | * This function is similar to (but not equivalent to) down(). | |
81 | */ | |
82 | void fastcall __sched mutex_lock(struct mutex *lock) | |
83 | { | |
c544bdb1 | 84 | might_sleep(); |
6053ee3b IM |
85 | /* |
86 | * The locking fastpath is the 1->0 transition from | |
87 | * 'unlocked' into 'locked' state. | |
6053ee3b IM |
88 | */ |
89 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | |
90 | } | |
91 | ||
92 | EXPORT_SYMBOL(mutex_lock); | |
93 | ||
94 | static void fastcall noinline __sched | |
95 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); | |
96 | ||
97 | /*** | |
98 | * mutex_unlock - release the mutex | |
99 | * @lock: the mutex to be released | |
100 | * | |
101 | * Unlock a mutex that has been locked by this task previously. | |
102 | * | |
103 | * This function must not be used in interrupt context. Unlocking | |
104 | * of a not locked mutex is not allowed. | |
105 | * | |
106 | * This function is similar to (but not equivalent to) up(). | |
107 | */ | |
108 | void fastcall __sched mutex_unlock(struct mutex *lock) | |
109 | { | |
110 | /* | |
111 | * The unlocking fastpath is the 0->1 transition from 'locked' | |
112 | * into 'unlocked' state: | |
6053ee3b IM |
113 | */ |
114 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | |
115 | } | |
116 | ||
117 | EXPORT_SYMBOL(mutex_unlock); | |
118 | ||
119 | /* | |
120 | * Lock a mutex (possibly interruptible), slowpath: | |
121 | */ | |
122 | static inline int __sched | |
123 | __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | |
124 | { | |
125 | struct task_struct *task = current; | |
126 | struct mutex_waiter waiter; | |
127 | unsigned int old_val; | |
1fb00c6c | 128 | unsigned long flags; |
6053ee3b IM |
129 | |
130 | debug_mutex_init_waiter(&waiter); | |
131 | ||
1fb00c6c | 132 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
133 | |
134 | debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | |
135 | ||
136 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | |
137 | list_add_tail(&waiter.list, &lock->wait_list); | |
138 | waiter.task = task; | |
139 | ||
140 | for (;;) { | |
141 | /* | |
142 | * Lets try to take the lock again - this is needed even if | |
143 | * we get here for the first time (shortly after failing to | |
144 | * acquire the lock), to make sure that we get a wakeup once | |
145 | * it's unlocked. Later on, if we sleep, this is the | |
146 | * operation that gives us the lock. We xchg it to -1, so | |
147 | * that when we release the lock, we properly wake up the | |
148 | * other waiters: | |
149 | */ | |
150 | old_val = atomic_xchg(&lock->count, -1); | |
151 | if (old_val == 1) | |
152 | break; | |
153 | ||
154 | /* | |
155 | * got a signal? (This code gets eliminated in the | |
156 | * TASK_UNINTERRUPTIBLE case.) | |
157 | */ | |
158 | if (unlikely(state == TASK_INTERRUPTIBLE && | |
159 | signal_pending(task))) { | |
160 | mutex_remove_waiter(lock, &waiter, task->thread_info); | |
1fb00c6c | 161 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
162 | |
163 | debug_mutex_free_waiter(&waiter); | |
164 | return -EINTR; | |
165 | } | |
166 | __set_task_state(task, state); | |
167 | ||
168 | /* didnt get the lock, go to sleep: */ | |
1fb00c6c | 169 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b | 170 | schedule(); |
1fb00c6c | 171 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
172 | } |
173 | ||
174 | /* got the lock - rejoice! */ | |
175 | mutex_remove_waiter(lock, &waiter, task->thread_info); | |
176 | debug_mutex_set_owner(lock, task->thread_info __IP__); | |
177 | ||
178 | /* set it to 0 if there are no waiters left: */ | |
179 | if (likely(list_empty(&lock->wait_list))) | |
180 | atomic_set(&lock->count, 0); | |
181 | ||
1fb00c6c | 182 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
183 | |
184 | debug_mutex_free_waiter(&waiter); | |
185 | ||
186 | DEBUG_WARN_ON(list_empty(&lock->held_list)); | |
187 | DEBUG_WARN_ON(lock->owner != task->thread_info); | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
192 | static void fastcall noinline __sched | |
193 | __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) | |
194 | { | |
195 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
196 | ||
197 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); | |
198 | } | |
199 | ||
200 | /* | |
201 | * Release the lock, slowpath: | |
202 | */ | |
203 | static fastcall noinline void | |
204 | __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | |
205 | { | |
02706647 | 206 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
1fb00c6c | 207 | unsigned long flags; |
6053ee3b IM |
208 | |
209 | DEBUG_WARN_ON(lock->owner != current_thread_info()); | |
210 | ||
1fb00c6c | 211 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
212 | |
213 | /* | |
214 | * some architectures leave the lock unlocked in the fastpath failure | |
215 | * case, others need to leave it locked. In the later case we have to | |
216 | * unlock it here | |
217 | */ | |
218 | if (__mutex_slowpath_needs_to_unlock()) | |
219 | atomic_set(&lock->count, 1); | |
220 | ||
221 | debug_mutex_unlock(lock); | |
222 | ||
223 | if (!list_empty(&lock->wait_list)) { | |
224 | /* get the first entry from the wait-list: */ | |
225 | struct mutex_waiter *waiter = | |
226 | list_entry(lock->wait_list.next, | |
227 | struct mutex_waiter, list); | |
228 | ||
229 | debug_mutex_wake_waiter(lock, waiter); | |
230 | ||
231 | wake_up_process(waiter->task); | |
232 | } | |
233 | ||
234 | debug_mutex_clear_owner(lock); | |
235 | ||
1fb00c6c | 236 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
237 | } |
238 | ||
239 | /* | |
240 | * Here come the less common (and hence less performance-critical) APIs: | |
241 | * mutex_lock_interruptible() and mutex_trylock(). | |
242 | */ | |
243 | static int fastcall noinline __sched | |
244 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | |
245 | ||
246 | /*** | |
247 | * mutex_lock_interruptible - acquire the mutex, interruptable | |
248 | * @lock: the mutex to be acquired | |
249 | * | |
250 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has | |
251 | * been acquired or sleep until the mutex becomes available. If a | |
252 | * signal arrives while waiting for the lock then this function | |
253 | * returns -EINTR. | |
254 | * | |
255 | * This function is similar to (but not equivalent to) down_interruptible(). | |
256 | */ | |
257 | int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | |
258 | { | |
c544bdb1 | 259 | might_sleep(); |
6053ee3b IM |
260 | return __mutex_fastpath_lock_retval |
261 | (&lock->count, __mutex_lock_interruptible_slowpath); | |
262 | } | |
263 | ||
264 | EXPORT_SYMBOL(mutex_lock_interruptible); | |
265 | ||
266 | static int fastcall noinline __sched | |
267 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | |
268 | { | |
269 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
270 | ||
271 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); | |
272 | } | |
273 | ||
274 | /* | |
275 | * Spinlock based trylock, we take the spinlock and check whether we | |
276 | * can get the lock: | |
277 | */ | |
278 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |
279 | { | |
280 | struct mutex *lock = container_of(lock_count, struct mutex, count); | |
1fb00c6c | 281 | unsigned long flags; |
6053ee3b IM |
282 | int prev; |
283 | ||
1fb00c6c | 284 | spin_lock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
285 | |
286 | prev = atomic_xchg(&lock->count, -1); | |
287 | if (likely(prev == 1)) | |
288 | debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); | |
289 | /* Set it back to 0 if there are no waiters: */ | |
290 | if (likely(list_empty(&lock->wait_list))) | |
291 | atomic_set(&lock->count, 0); | |
292 | ||
1fb00c6c | 293 | spin_unlock_mutex(&lock->wait_lock, flags); |
6053ee3b IM |
294 | |
295 | return prev == 1; | |
296 | } | |
297 | ||
298 | /*** | |
299 | * mutex_trylock - try acquire the mutex, without waiting | |
300 | * @lock: the mutex to be acquired | |
301 | * | |
302 | * Try to acquire the mutex atomically. Returns 1 if the mutex | |
303 | * has been acquired successfully, and 0 on contention. | |
304 | * | |
305 | * NOTE: this function follows the spin_trylock() convention, so | |
306 | * it is negated to the down_trylock() return values! Be careful | |
307 | * about this when converting semaphore users to mutexes. | |
308 | * | |
309 | * This function must not be used in interrupt context. The | |
310 | * mutex must be released by the same task that acquired it. | |
311 | */ | |
312 | int fastcall mutex_trylock(struct mutex *lock) | |
313 | { | |
314 | return __mutex_fastpath_trylock(&lock->count, | |
315 | __mutex_trylock_slowpath); | |
316 | } | |
317 | ||
318 | EXPORT_SYMBOL(mutex_trylock); |