20c468356b9029d686d658a6dee546373b73a00b
[deliverable/linux.git] / kernel / futex.c
1 /*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/export.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62 #include <linux/ptrace.h>
63 #include <linux/sched/rt.h>
64 #include <linux/hugetlb.h>
65 #include <linux/freezer.h>
66 #include <linux/bootmem.h>
67 #include <linux/fault-inject.h>
68
69 #include <asm/futex.h>
70
71 #include "locking/rtmutex_common.h"
72
73 /*
74 * READ this before attempting to hack on futexes!
75 *
76 * Basic futex operation and ordering guarantees
77 * =============================================
78 *
79 * The waiter reads the futex value in user space and calls
80 * futex_wait(). This function computes the hash bucket and acquires
81 * the hash bucket lock. After that it reads the futex user space value
82 * again and verifies that the data has not changed. If it has not changed
83 * it enqueues itself into the hash bucket, releases the hash bucket lock
84 * and schedules.
85 *
86 * The waker side modifies the user space value of the futex and calls
87 * futex_wake(). This function computes the hash bucket and acquires the
88 * hash bucket lock. Then it looks for waiters on that futex in the hash
89 * bucket and wakes them.
90 *
91 * In futex wake up scenarios where no tasks are blocked on a futex, taking
92 * the hb spinlock can be avoided and simply return. In order for this
93 * optimization to work, ordering guarantees must exist so that the waiter
94 * being added to the list is acknowledged when the list is concurrently being
95 * checked by the waker, avoiding scenarios like the following:
96 *
97 * CPU 0 CPU 1
98 * val = *futex;
99 * sys_futex(WAIT, futex, val);
100 * futex_wait(futex, val);
101 * uval = *futex;
102 * *futex = newval;
103 * sys_futex(WAKE, futex);
104 * futex_wake(futex);
105 * if (queue_empty())
106 * return;
107 * if (uval == val)
108 * lock(hash_bucket(futex));
109 * queue();
110 * unlock(hash_bucket(futex));
111 * schedule();
112 *
113 * This would cause the waiter on CPU 0 to wait forever because it
114 * missed the transition of the user space value from val to newval
115 * and the waker did not find the waiter in the hash bucket queue.
116 *
117 * The correct serialization ensures that a waiter either observes
118 * the changed user space value before blocking or is woken by a
119 * concurrent waker:
120 *
121 * CPU 0 CPU 1
122 * val = *futex;
123 * sys_futex(WAIT, futex, val);
124 * futex_wait(futex, val);
125 *
126 * waiters++; (a)
127 * mb(); (A) <-- paired with -.
128 * |
129 * lock(hash_bucket(futex)); |
130 * |
131 * uval = *futex; |
132 * | *futex = newval;
133 * | sys_futex(WAKE, futex);
134 * | futex_wake(futex);
135 * |
136 * `-------> mb(); (B)
137 * if (uval == val)
138 * queue();
139 * unlock(hash_bucket(futex));
140 * schedule(); if (waiters)
141 * lock(hash_bucket(futex));
142 * else wake_waiters(futex);
143 * waiters--; (b) unlock(hash_bucket(futex));
144 *
145 * Where (A) orders the waiters increment and the futex value read through
146 * atomic operations (see hb_waiters_inc) and where (B) orders the write
147 * to futex and the waiters read -- this is done by the barriers for both
148 * shared and private futexes in get_futex_key_refs().
149 *
150 * This yields the following case (where X:=waiters, Y:=futex):
151 *
152 * X = Y = 0
153 *
154 * w[X]=1 w[Y]=1
155 * MB MB
156 * r[Y]=y r[X]=x
157 *
158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
159 * the guarantee that we cannot both miss the futex variable change and the
160 * enqueue.
161 *
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
165 *
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
172 */
173
174 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
175 int __read_mostly futex_cmpxchg_enabled;
176 #endif
177
178 /*
179 * Futex flags used to encode options to functions and preserve them across
180 * restarts.
181 */
182 #define FLAGS_SHARED 0x01
183 #define FLAGS_CLOCKRT 0x02
184 #define FLAGS_HAS_TIMEOUT 0x04
185
186 /*
187 * Priority Inheritance state:
188 */
189 struct futex_pi_state {
190 /*
191 * list of 'owned' pi_state instances - these have to be
192 * cleaned up in do_exit() if the task exits prematurely:
193 */
194 struct list_head list;
195
196 /*
197 * The PI object:
198 */
199 struct rt_mutex pi_mutex;
200
201 struct task_struct *owner;
202 atomic_t refcount;
203
204 union futex_key key;
205 };
206
207 /**
208 * struct futex_q - The hashed futex queue entry, one per waiting task
209 * @list: priority-sorted list of tasks waiting on this futex
210 * @task: the task waiting on the futex
211 * @lock_ptr: the hash bucket lock
212 * @key: the key the futex is hashed on
213 * @pi_state: optional priority inheritance state
214 * @rt_waiter: rt_waiter storage for use with requeue_pi
215 * @requeue_pi_key: the requeue_pi target futex key
216 * @bitset: bitset for the optional bitmasked wakeup
217 *
218 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
219 * we can wake only the relevant ones (hashed queues may be shared).
220 *
221 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
222 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
223 * The order of wakeup is always to make the first condition true, then
224 * the second.
225 *
226 * PI futexes are typically woken before they are removed from the hash list via
227 * the rt_mutex code. See unqueue_me_pi().
228 */
229 struct futex_q {
230 struct plist_node list;
231
232 struct task_struct *task;
233 spinlock_t *lock_ptr;
234 union futex_key key;
235 struct futex_pi_state *pi_state;
236 struct rt_mutex_waiter *rt_waiter;
237 union futex_key *requeue_pi_key;
238 u32 bitset;
239 };
240
241 static const struct futex_q futex_q_init = {
242 /* list gets initialized in queue_me()*/
243 .key = FUTEX_KEY_INIT,
244 .bitset = FUTEX_BITSET_MATCH_ANY
245 };
246
247 /*
248 * Hash buckets are shared by all the futex_keys that hash to the same
249 * location. Each key may have multiple futex_q structures, one for each task
250 * waiting on a futex.
251 */
252 struct futex_hash_bucket {
253 atomic_t waiters;
254 spinlock_t lock;
255 struct plist_head chain;
256 } ____cacheline_aligned_in_smp;
257
258 /*
259 * The base of the bucket array and its size are always used together
260 * (after initialization only in hash_futex()), so ensure that they
261 * reside in the same cacheline.
262 */
263 static struct {
264 struct futex_hash_bucket *queues;
265 unsigned long hashsize;
266 } __futex_data __read_mostly __aligned(2*sizeof(long));
267 #define futex_queues (__futex_data.queues)
268 #define futex_hashsize (__futex_data.hashsize)
269
270
271 /*
272 * Fault injections for futexes.
273 */
274 #ifdef CONFIG_FAIL_FUTEX
275
276 static struct {
277 struct fault_attr attr;
278
279 bool ignore_private;
280 } fail_futex = {
281 .attr = FAULT_ATTR_INITIALIZER,
282 .ignore_private = false,
283 };
284
285 static int __init setup_fail_futex(char *str)
286 {
287 return setup_fault_attr(&fail_futex.attr, str);
288 }
289 __setup("fail_futex=", setup_fail_futex);
290
291 static bool should_fail_futex(bool fshared)
292 {
293 if (fail_futex.ignore_private && !fshared)
294 return false;
295
296 return should_fail(&fail_futex.attr, 1);
297 }
298
299 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
300
301 static int __init fail_futex_debugfs(void)
302 {
303 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
304 struct dentry *dir;
305
306 dir = fault_create_debugfs_attr("fail_futex", NULL,
307 &fail_futex.attr);
308 if (IS_ERR(dir))
309 return PTR_ERR(dir);
310
311 if (!debugfs_create_bool("ignore-private", mode, dir,
312 &fail_futex.ignore_private)) {
313 debugfs_remove_recursive(dir);
314 return -ENOMEM;
315 }
316
317 return 0;
318 }
319
320 late_initcall(fail_futex_debugfs);
321
322 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
323
324 #else
325 static inline bool should_fail_futex(bool fshared)
326 {
327 return false;
328 }
329 #endif /* CONFIG_FAIL_FUTEX */
330
331 static inline void futex_get_mm(union futex_key *key)
332 {
333 atomic_inc(&key->private.mm->mm_count);
334 /*
335 * Ensure futex_get_mm() implies a full barrier such that
336 * get_futex_key() implies a full barrier. This is relied upon
337 * as full barrier (B), see the ordering comment above.
338 */
339 smp_mb__after_atomic();
340 }
341
342 /*
343 * Reflects a new waiter being added to the waitqueue.
344 */
345 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
346 {
347 #ifdef CONFIG_SMP
348 atomic_inc(&hb->waiters);
349 /*
350 * Full barrier (A), see the ordering comment above.
351 */
352 smp_mb__after_atomic();
353 #endif
354 }
355
356 /*
357 * Reflects a waiter being removed from the waitqueue by wakeup
358 * paths.
359 */
360 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
361 {
362 #ifdef CONFIG_SMP
363 atomic_dec(&hb->waiters);
364 #endif
365 }
366
367 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
368 {
369 #ifdef CONFIG_SMP
370 return atomic_read(&hb->waiters);
371 #else
372 return 1;
373 #endif
374 }
375
376 /*
377 * We hash on the keys returned from get_futex_key (see below).
378 */
379 static struct futex_hash_bucket *hash_futex(union futex_key *key)
380 {
381 u32 hash = jhash2((u32*)&key->both.word,
382 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
383 key->both.offset);
384 return &futex_queues[hash & (futex_hashsize - 1)];
385 }
386
387 /*
388 * Return 1 if two futex_keys are equal, 0 otherwise.
389 */
390 static inline int match_futex(union futex_key *key1, union futex_key *key2)
391 {
392 return (key1 && key2
393 && key1->both.word == key2->both.word
394 && key1->both.ptr == key2->both.ptr
395 && key1->both.offset == key2->both.offset);
396 }
397
398 /*
399 * Take a reference to the resource addressed by a key.
400 * Can be called while holding spinlocks.
401 *
402 */
403 static void get_futex_key_refs(union futex_key *key)
404 {
405 if (!key->both.ptr)
406 return;
407
408 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
409 case FUT_OFF_INODE:
410 ihold(key->shared.inode); /* implies MB (B) */
411 break;
412 case FUT_OFF_MMSHARED:
413 futex_get_mm(key); /* implies MB (B) */
414 break;
415 default:
416 /*
417 * Private futexes do not hold reference on an inode or
418 * mm, therefore the only purpose of calling get_futex_key_refs
419 * is because we need the barrier for the lockless waiter check.
420 */
421 smp_mb(); /* explicit MB (B) */
422 }
423 }
424
425 /*
426 * Drop a reference to the resource addressed by a key.
427 * The hash bucket spinlock must not be held. This is
428 * a no-op for private futexes, see comment in the get
429 * counterpart.
430 */
431 static void drop_futex_key_refs(union futex_key *key)
432 {
433 if (!key->both.ptr) {
434 /* If we're here then we tried to put a key we failed to get */
435 WARN_ON_ONCE(1);
436 return;
437 }
438
439 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
440 case FUT_OFF_INODE:
441 iput(key->shared.inode);
442 break;
443 case FUT_OFF_MMSHARED:
444 mmdrop(key->private.mm);
445 break;
446 }
447 }
448
449 /**
450 * get_futex_key() - Get parameters which are the keys for a futex
451 * @uaddr: virtual address of the futex
452 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
453 * @key: address where result is stored.
454 * @rw: mapping needs to be read/write (values: VERIFY_READ,
455 * VERIFY_WRITE)
456 *
457 * Return: a negative error code or 0
458 *
459 * The key words are stored in *key on success.
460 *
461 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
462 * offset_within_page). For private mappings, it's (uaddr, current->mm).
463 * We can usually work out the index without swapping in the page.
464 *
465 * lock_page() might sleep, the caller should not hold a spinlock.
466 */
467 static int
468 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
469 {
470 unsigned long address = (unsigned long)uaddr;
471 struct mm_struct *mm = current->mm;
472 struct page *page, *page_head;
473 int err, ro = 0;
474
475 /*
476 * The futex address must be "naturally" aligned.
477 */
478 key->both.offset = address % PAGE_SIZE;
479 if (unlikely((address % sizeof(u32)) != 0))
480 return -EINVAL;
481 address -= key->both.offset;
482
483 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
484 return -EFAULT;
485
486 if (unlikely(should_fail_futex(fshared)))
487 return -EFAULT;
488
489 /*
490 * PROCESS_PRIVATE futexes are fast.
491 * As the mm cannot disappear under us and the 'key' only needs
492 * virtual address, we dont even have to find the underlying vma.
493 * Note : We do have to check 'uaddr' is a valid user address,
494 * but access_ok() should be faster than find_vma()
495 */
496 if (!fshared) {
497 key->private.mm = mm;
498 key->private.address = address;
499 get_futex_key_refs(key); /* implies MB (B) */
500 return 0;
501 }
502
503 again:
504 /* Ignore any VERIFY_READ mapping (futex common case) */
505 if (unlikely(should_fail_futex(fshared)))
506 return -EFAULT;
507
508 err = get_user_pages_fast(address, 1, 1, &page);
509 /*
510 * If write access is not required (eg. FUTEX_WAIT), try
511 * and get read-only access.
512 */
513 if (err == -EFAULT && rw == VERIFY_READ) {
514 err = get_user_pages_fast(address, 1, 0, &page);
515 ro = 1;
516 }
517 if (err < 0)
518 return err;
519 else
520 err = 0;
521
522 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
523 page_head = page;
524 if (unlikely(PageTail(page))) {
525 put_page(page);
526 /* serialize against __split_huge_page_splitting() */
527 local_irq_disable();
528 if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
529 page_head = compound_head(page);
530 /*
531 * page_head is valid pointer but we must pin
532 * it before taking the PG_lock and/or
533 * PG_compound_lock. The moment we re-enable
534 * irqs __split_huge_page_splitting() can
535 * return and the head page can be freed from
536 * under us. We can't take the PG_lock and/or
537 * PG_compound_lock on a page that could be
538 * freed from under us.
539 */
540 if (page != page_head) {
541 get_page(page_head);
542 put_page(page);
543 }
544 local_irq_enable();
545 } else {
546 local_irq_enable();
547 goto again;
548 }
549 }
550 #else
551 page_head = compound_head(page);
552 if (page != page_head) {
553 get_page(page_head);
554 put_page(page);
555 }
556 #endif
557
558 lock_page(page_head);
559
560 /*
561 * If page_head->mapping is NULL, then it cannot be a PageAnon
562 * page; but it might be the ZERO_PAGE or in the gate area or
563 * in a special mapping (all cases which we are happy to fail);
564 * or it may have been a good file page when get_user_pages_fast
565 * found it, but truncated or holepunched or subjected to
566 * invalidate_complete_page2 before we got the page lock (also
567 * cases which we are happy to fail). And we hold a reference,
568 * so refcount care in invalidate_complete_page's remove_mapping
569 * prevents drop_caches from setting mapping to NULL beneath us.
570 *
571 * The case we do have to guard against is when memory pressure made
572 * shmem_writepage move it from filecache to swapcache beneath us:
573 * an unlikely race, but we do need to retry for page_head->mapping.
574 */
575 if (!page_head->mapping) {
576 int shmem_swizzled = PageSwapCache(page_head);
577 unlock_page(page_head);
578 put_page(page_head);
579 if (shmem_swizzled)
580 goto again;
581 return -EFAULT;
582 }
583
584 /*
585 * Private mappings are handled in a simple way.
586 *
587 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
588 * it's a read-only handle, it's expected that futexes attach to
589 * the object not the particular process.
590 */
591 if (PageAnon(page_head)) {
592 /*
593 * A RO anonymous page will never change and thus doesn't make
594 * sense for futex operations.
595 */
596 if (unlikely(should_fail_futex(fshared)) || ro) {
597 err = -EFAULT;
598 goto out;
599 }
600
601 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
602 key->private.mm = mm;
603 key->private.address = address;
604 } else {
605 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
606 key->shared.inode = page_head->mapping->host;
607 key->shared.pgoff = basepage_index(page);
608 }
609
610 get_futex_key_refs(key); /* implies MB (B) */
611
612 out:
613 unlock_page(page_head);
614 put_page(page_head);
615 return err;
616 }
617
618 static inline void put_futex_key(union futex_key *key)
619 {
620 drop_futex_key_refs(key);
621 }
622
623 /**
624 * fault_in_user_writeable() - Fault in user address and verify RW access
625 * @uaddr: pointer to faulting user space address
626 *
627 * Slow path to fixup the fault we just took in the atomic write
628 * access to @uaddr.
629 *
630 * We have no generic implementation of a non-destructive write to the
631 * user address. We know that we faulted in the atomic pagefault
632 * disabled section so we can as well avoid the #PF overhead by
633 * calling get_user_pages() right away.
634 */
635 static int fault_in_user_writeable(u32 __user *uaddr)
636 {
637 struct mm_struct *mm = current->mm;
638 int ret;
639
640 down_read(&mm->mmap_sem);
641 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
642 FAULT_FLAG_WRITE);
643 up_read(&mm->mmap_sem);
644
645 return ret < 0 ? ret : 0;
646 }
647
648 /**
649 * futex_top_waiter() - Return the highest priority waiter on a futex
650 * @hb: the hash bucket the futex_q's reside in
651 * @key: the futex key (to distinguish it from other futex futex_q's)
652 *
653 * Must be called with the hb lock held.
654 */
655 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
656 union futex_key *key)
657 {
658 struct futex_q *this;
659
660 plist_for_each_entry(this, &hb->chain, list) {
661 if (match_futex(&this->key, key))
662 return this;
663 }
664 return NULL;
665 }
666
667 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
668 u32 uval, u32 newval)
669 {
670 int ret;
671
672 pagefault_disable();
673 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
674 pagefault_enable();
675
676 return ret;
677 }
678
679 static int get_futex_value_locked(u32 *dest, u32 __user *from)
680 {
681 int ret;
682
683 pagefault_disable();
684 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
685 pagefault_enable();
686
687 return ret ? -EFAULT : 0;
688 }
689
690
691 /*
692 * PI code:
693 */
694 static int refill_pi_state_cache(void)
695 {
696 struct futex_pi_state *pi_state;
697
698 if (likely(current->pi_state_cache))
699 return 0;
700
701 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
702
703 if (!pi_state)
704 return -ENOMEM;
705
706 INIT_LIST_HEAD(&pi_state->list);
707 /* pi_mutex gets initialized later */
708 pi_state->owner = NULL;
709 atomic_set(&pi_state->refcount, 1);
710 pi_state->key = FUTEX_KEY_INIT;
711
712 current->pi_state_cache = pi_state;
713
714 return 0;
715 }
716
717 static struct futex_pi_state * alloc_pi_state(void)
718 {
719 struct futex_pi_state *pi_state = current->pi_state_cache;
720
721 WARN_ON(!pi_state);
722 current->pi_state_cache = NULL;
723
724 return pi_state;
725 }
726
727 /*
728 * Drops a reference to the pi_state object and frees or caches it
729 * when the last reference is gone.
730 *
731 * Must be called with the hb lock held.
732 */
733 static void put_pi_state(struct futex_pi_state *pi_state)
734 {
735 if (!pi_state)
736 return;
737
738 if (!atomic_dec_and_test(&pi_state->refcount))
739 return;
740
741 /*
742 * If pi_state->owner is NULL, the owner is most probably dying
743 * and has cleaned up the pi_state already
744 */
745 if (pi_state->owner) {
746 raw_spin_lock_irq(&pi_state->owner->pi_lock);
747 list_del_init(&pi_state->list);
748 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
749
750 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
751 }
752
753 if (current->pi_state_cache)
754 kfree(pi_state);
755 else {
756 /*
757 * pi_state->list is already empty.
758 * clear pi_state->owner.
759 * refcount is at 0 - put it back to 1.
760 */
761 pi_state->owner = NULL;
762 atomic_set(&pi_state->refcount, 1);
763 current->pi_state_cache = pi_state;
764 }
765 }
766
767 /*
768 * Look up the task based on what TID userspace gave us.
769 * We dont trust it.
770 */
771 static struct task_struct * futex_find_get_task(pid_t pid)
772 {
773 struct task_struct *p;
774
775 rcu_read_lock();
776 p = find_task_by_vpid(pid);
777 if (p)
778 get_task_struct(p);
779
780 rcu_read_unlock();
781
782 return p;
783 }
784
785 /*
786 * This task is holding PI mutexes at exit time => bad.
787 * Kernel cleans up PI-state, but userspace is likely hosed.
788 * (Robust-futex cleanup is separate and might save the day for userspace.)
789 */
790 void exit_pi_state_list(struct task_struct *curr)
791 {
792 struct list_head *next, *head = &curr->pi_state_list;
793 struct futex_pi_state *pi_state;
794 struct futex_hash_bucket *hb;
795 union futex_key key = FUTEX_KEY_INIT;
796
797 if (!futex_cmpxchg_enabled)
798 return;
799 /*
800 * We are a ZOMBIE and nobody can enqueue itself on
801 * pi_state_list anymore, but we have to be careful
802 * versus waiters unqueueing themselves:
803 */
804 raw_spin_lock_irq(&curr->pi_lock);
805 while (!list_empty(head)) {
806
807 next = head->next;
808 pi_state = list_entry(next, struct futex_pi_state, list);
809 key = pi_state->key;
810 hb = hash_futex(&key);
811 raw_spin_unlock_irq(&curr->pi_lock);
812
813 spin_lock(&hb->lock);
814
815 raw_spin_lock_irq(&curr->pi_lock);
816 /*
817 * We dropped the pi-lock, so re-check whether this
818 * task still owns the PI-state:
819 */
820 if (head->next != next) {
821 spin_unlock(&hb->lock);
822 continue;
823 }
824
825 WARN_ON(pi_state->owner != curr);
826 WARN_ON(list_empty(&pi_state->list));
827 list_del_init(&pi_state->list);
828 pi_state->owner = NULL;
829 raw_spin_unlock_irq(&curr->pi_lock);
830
831 rt_mutex_unlock(&pi_state->pi_mutex);
832
833 spin_unlock(&hb->lock);
834
835 raw_spin_lock_irq(&curr->pi_lock);
836 }
837 raw_spin_unlock_irq(&curr->pi_lock);
838 }
839
840 /*
841 * We need to check the following states:
842 *
843 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
844 *
845 * [1] NULL | --- | --- | 0 | 0/1 | Valid
846 * [2] NULL | --- | --- | >0 | 0/1 | Valid
847 *
848 * [3] Found | NULL | -- | Any | 0/1 | Invalid
849 *
850 * [4] Found | Found | NULL | 0 | 1 | Valid
851 * [5] Found | Found | NULL | >0 | 1 | Invalid
852 *
853 * [6] Found | Found | task | 0 | 1 | Valid
854 *
855 * [7] Found | Found | NULL | Any | 0 | Invalid
856 *
857 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
858 * [9] Found | Found | task | 0 | 0 | Invalid
859 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
860 *
861 * [1] Indicates that the kernel can acquire the futex atomically. We
862 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
863 *
864 * [2] Valid, if TID does not belong to a kernel thread. If no matching
865 * thread is found then it indicates that the owner TID has died.
866 *
867 * [3] Invalid. The waiter is queued on a non PI futex
868 *
869 * [4] Valid state after exit_robust_list(), which sets the user space
870 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
871 *
872 * [5] The user space value got manipulated between exit_robust_list()
873 * and exit_pi_state_list()
874 *
875 * [6] Valid state after exit_pi_state_list() which sets the new owner in
876 * the pi_state but cannot access the user space value.
877 *
878 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
879 *
880 * [8] Owner and user space value match
881 *
882 * [9] There is no transient state which sets the user space TID to 0
883 * except exit_robust_list(), but this is indicated by the
884 * FUTEX_OWNER_DIED bit. See [4]
885 *
886 * [10] There is no transient state which leaves owner and user space
887 * TID out of sync.
888 */
889
890 /*
891 * Validate that the existing waiter has a pi_state and sanity check
892 * the pi_state against the user space value. If correct, attach to
893 * it.
894 */
895 static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
896 struct futex_pi_state **ps)
897 {
898 pid_t pid = uval & FUTEX_TID_MASK;
899
900 /*
901 * Userspace might have messed up non-PI and PI futexes [3]
902 */
903 if (unlikely(!pi_state))
904 return -EINVAL;
905
906 WARN_ON(!atomic_read(&pi_state->refcount));
907
908 /*
909 * Handle the owner died case:
910 */
911 if (uval & FUTEX_OWNER_DIED) {
912 /*
913 * exit_pi_state_list sets owner to NULL and wakes the
914 * topmost waiter. The task which acquires the
915 * pi_state->rt_mutex will fixup owner.
916 */
917 if (!pi_state->owner) {
918 /*
919 * No pi state owner, but the user space TID
920 * is not 0. Inconsistent state. [5]
921 */
922 if (pid)
923 return -EINVAL;
924 /*
925 * Take a ref on the state and return success. [4]
926 */
927 goto out_state;
928 }
929
930 /*
931 * If TID is 0, then either the dying owner has not
932 * yet executed exit_pi_state_list() or some waiter
933 * acquired the rtmutex in the pi state, but did not
934 * yet fixup the TID in user space.
935 *
936 * Take a ref on the state and return success. [6]
937 */
938 if (!pid)
939 goto out_state;
940 } else {
941 /*
942 * If the owner died bit is not set, then the pi_state
943 * must have an owner. [7]
944 */
945 if (!pi_state->owner)
946 return -EINVAL;
947 }
948
949 /*
950 * Bail out if user space manipulated the futex value. If pi
951 * state exists then the owner TID must be the same as the
952 * user space TID. [9/10]
953 */
954 if (pid != task_pid_vnr(pi_state->owner))
955 return -EINVAL;
956 out_state:
957 atomic_inc(&pi_state->refcount);
958 *ps = pi_state;
959 return 0;
960 }
961
962 /*
963 * Lookup the task for the TID provided from user space and attach to
964 * it after doing proper sanity checks.
965 */
966 static int attach_to_pi_owner(u32 uval, union futex_key *key,
967 struct futex_pi_state **ps)
968 {
969 pid_t pid = uval & FUTEX_TID_MASK;
970 struct futex_pi_state *pi_state;
971 struct task_struct *p;
972
973 /*
974 * We are the first waiter - try to look up the real owner and attach
975 * the new pi_state to it, but bail out when TID = 0 [1]
976 */
977 if (!pid)
978 return -ESRCH;
979 p = futex_find_get_task(pid);
980 if (!p)
981 return -ESRCH;
982
983 if (unlikely(p->flags & PF_KTHREAD)) {
984 put_task_struct(p);
985 return -EPERM;
986 }
987
988 /*
989 * We need to look at the task state flags to figure out,
990 * whether the task is exiting. To protect against the do_exit
991 * change of the task flags, we do this protected by
992 * p->pi_lock:
993 */
994 raw_spin_lock_irq(&p->pi_lock);
995 if (unlikely(p->flags & PF_EXITING)) {
996 /*
997 * The task is on the way out. When PF_EXITPIDONE is
998 * set, we know that the task has finished the
999 * cleanup:
1000 */
1001 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1002
1003 raw_spin_unlock_irq(&p->pi_lock);
1004 put_task_struct(p);
1005 return ret;
1006 }
1007
1008 /*
1009 * No existing pi state. First waiter. [2]
1010 */
1011 pi_state = alloc_pi_state();
1012
1013 /*
1014 * Initialize the pi_mutex in locked state and make @p
1015 * the owner of it:
1016 */
1017 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1018
1019 /* Store the key for possible exit cleanups: */
1020 pi_state->key = *key;
1021
1022 WARN_ON(!list_empty(&pi_state->list));
1023 list_add(&pi_state->list, &p->pi_state_list);
1024 pi_state->owner = p;
1025 raw_spin_unlock_irq(&p->pi_lock);
1026
1027 put_task_struct(p);
1028
1029 *ps = pi_state;
1030
1031 return 0;
1032 }
1033
1034 static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1035 union futex_key *key, struct futex_pi_state **ps)
1036 {
1037 struct futex_q *match = futex_top_waiter(hb, key);
1038
1039 /*
1040 * If there is a waiter on that futex, validate it and
1041 * attach to the pi_state when the validation succeeds.
1042 */
1043 if (match)
1044 return attach_to_pi_state(uval, match->pi_state, ps);
1045
1046 /*
1047 * We are the first waiter - try to look up the owner based on
1048 * @uval and attach to it.
1049 */
1050 return attach_to_pi_owner(uval, key, ps);
1051 }
1052
1053 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1054 {
1055 u32 uninitialized_var(curval);
1056
1057 if (unlikely(should_fail_futex(true)))
1058 return -EFAULT;
1059
1060 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1061 return -EFAULT;
1062
1063 /*If user space value changed, let the caller retry */
1064 return curval != uval ? -EAGAIN : 0;
1065 }
1066
1067 /**
1068 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1069 * @uaddr: the pi futex user address
1070 * @hb: the pi futex hash bucket
1071 * @key: the futex key associated with uaddr and hb
1072 * @ps: the pi_state pointer where we store the result of the
1073 * lookup
1074 * @task: the task to perform the atomic lock work for. This will
1075 * be "current" except in the case of requeue pi.
1076 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1077 *
1078 * Return:
1079 * 0 - ready to wait;
1080 * 1 - acquired the lock;
1081 * <0 - error
1082 *
1083 * The hb->lock and futex_key refs shall be held by the caller.
1084 */
1085 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1086 union futex_key *key,
1087 struct futex_pi_state **ps,
1088 struct task_struct *task, int set_waiters)
1089 {
1090 u32 uval, newval, vpid = task_pid_vnr(task);
1091 struct futex_q *match;
1092 int ret;
1093
1094 /*
1095 * Read the user space value first so we can validate a few
1096 * things before proceeding further.
1097 */
1098 if (get_futex_value_locked(&uval, uaddr))
1099 return -EFAULT;
1100
1101 if (unlikely(should_fail_futex(true)))
1102 return -EFAULT;
1103
1104 /*
1105 * Detect deadlocks.
1106 */
1107 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1108 return -EDEADLK;
1109
1110 if ((unlikely(should_fail_futex(true))))
1111 return -EDEADLK;
1112
1113 /*
1114 * Lookup existing state first. If it exists, try to attach to
1115 * its pi_state.
1116 */
1117 match = futex_top_waiter(hb, key);
1118 if (match)
1119 return attach_to_pi_state(uval, match->pi_state, ps);
1120
1121 /*
1122 * No waiter and user TID is 0. We are here because the
1123 * waiters or the owner died bit is set or called from
1124 * requeue_cmp_pi or for whatever reason something took the
1125 * syscall.
1126 */
1127 if (!(uval & FUTEX_TID_MASK)) {
1128 /*
1129 * We take over the futex. No other waiters and the user space
1130 * TID is 0. We preserve the owner died bit.
1131 */
1132 newval = uval & FUTEX_OWNER_DIED;
1133 newval |= vpid;
1134
1135 /* The futex requeue_pi code can enforce the waiters bit */
1136 if (set_waiters)
1137 newval |= FUTEX_WAITERS;
1138
1139 ret = lock_pi_update_atomic(uaddr, uval, newval);
1140 /* If the take over worked, return 1 */
1141 return ret < 0 ? ret : 1;
1142 }
1143
1144 /*
1145 * First waiter. Set the waiters bit before attaching ourself to
1146 * the owner. If owner tries to unlock, it will be forced into
1147 * the kernel and blocked on hb->lock.
1148 */
1149 newval = uval | FUTEX_WAITERS;
1150 ret = lock_pi_update_atomic(uaddr, uval, newval);
1151 if (ret)
1152 return ret;
1153 /*
1154 * If the update of the user space value succeeded, we try to
1155 * attach to the owner. If that fails, no harm done, we only
1156 * set the FUTEX_WAITERS bit in the user space variable.
1157 */
1158 return attach_to_pi_owner(uval, key, ps);
1159 }
1160
1161 /**
1162 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1163 * @q: The futex_q to unqueue
1164 *
1165 * The q->lock_ptr must not be NULL and must be held by the caller.
1166 */
1167 static void __unqueue_futex(struct futex_q *q)
1168 {
1169 struct futex_hash_bucket *hb;
1170
1171 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1172 || WARN_ON(plist_node_empty(&q->list)))
1173 return;
1174
1175 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1176 plist_del(&q->list, &hb->chain);
1177 hb_waiters_dec(hb);
1178 }
1179
1180 /*
1181 * The hash bucket lock must be held when this is called.
1182 * Afterwards, the futex_q must not be accessed. Callers
1183 * must ensure to later call wake_up_q() for the actual
1184 * wakeups to occur.
1185 */
1186 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1187 {
1188 struct task_struct *p = q->task;
1189
1190 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1191 return;
1192
1193 /*
1194 * Queue the task for later wakeup for after we've released
1195 * the hb->lock. wake_q_add() grabs reference to p.
1196 */
1197 wake_q_add(wake_q, p);
1198 __unqueue_futex(q);
1199 /*
1200 * The waiting task can free the futex_q as soon as
1201 * q->lock_ptr = NULL is written, without taking any locks. A
1202 * memory barrier is required here to prevent the following
1203 * store to lock_ptr from getting ahead of the plist_del.
1204 */
1205 smp_wmb();
1206 q->lock_ptr = NULL;
1207 }
1208
1209 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1210 struct futex_hash_bucket *hb)
1211 {
1212 struct task_struct *new_owner;
1213 struct futex_pi_state *pi_state = this->pi_state;
1214 u32 uninitialized_var(curval), newval;
1215 WAKE_Q(wake_q);
1216 bool deboost;
1217 int ret = 0;
1218
1219 if (!pi_state)
1220 return -EINVAL;
1221
1222 /*
1223 * If current does not own the pi_state then the futex is
1224 * inconsistent and user space fiddled with the futex value.
1225 */
1226 if (pi_state->owner != current)
1227 return -EINVAL;
1228
1229 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
1230 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1231
1232 /*
1233 * It is possible that the next waiter (the one that brought
1234 * this owner to the kernel) timed out and is no longer
1235 * waiting on the lock.
1236 */
1237 if (!new_owner)
1238 new_owner = this->task;
1239
1240 /*
1241 * We pass it to the next owner. The WAITERS bit is always
1242 * kept enabled while there is PI state around. We cleanup the
1243 * owner died bit, because we are the owner.
1244 */
1245 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1246
1247 if (unlikely(should_fail_futex(true)))
1248 ret = -EFAULT;
1249
1250 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1251 ret = -EFAULT;
1252 else if (curval != uval)
1253 ret = -EINVAL;
1254 if (ret) {
1255 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1256 return ret;
1257 }
1258
1259 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1260 WARN_ON(list_empty(&pi_state->list));
1261 list_del_init(&pi_state->list);
1262 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1263
1264 raw_spin_lock_irq(&new_owner->pi_lock);
1265 WARN_ON(!list_empty(&pi_state->list));
1266 list_add(&pi_state->list, &new_owner->pi_state_list);
1267 pi_state->owner = new_owner;
1268 raw_spin_unlock_irq(&new_owner->pi_lock);
1269
1270 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1271
1272 deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1273
1274 /*
1275 * First unlock HB so the waiter does not spin on it once he got woken
1276 * up. Second wake up the waiter before the priority is adjusted. If we
1277 * deboost first (and lose our higher priority), then the task might get
1278 * scheduled away before the wake up can take place.
1279 */
1280 spin_unlock(&hb->lock);
1281 wake_up_q(&wake_q);
1282 if (deboost)
1283 rt_mutex_adjust_prio(current);
1284
1285 return 0;
1286 }
1287
1288 /*
1289 * Express the locking dependencies for lockdep:
1290 */
1291 static inline void
1292 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1293 {
1294 if (hb1 <= hb2) {
1295 spin_lock(&hb1->lock);
1296 if (hb1 < hb2)
1297 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1298 } else { /* hb1 > hb2 */
1299 spin_lock(&hb2->lock);
1300 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1301 }
1302 }
1303
1304 static inline void
1305 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1306 {
1307 spin_unlock(&hb1->lock);
1308 if (hb1 != hb2)
1309 spin_unlock(&hb2->lock);
1310 }
1311
1312 /*
1313 * Wake up waiters matching bitset queued on this futex (uaddr).
1314 */
1315 static int
1316 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1317 {
1318 struct futex_hash_bucket *hb;
1319 struct futex_q *this, *next;
1320 union futex_key key = FUTEX_KEY_INIT;
1321 int ret;
1322 WAKE_Q(wake_q);
1323
1324 if (!bitset)
1325 return -EINVAL;
1326
1327 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
1328 if (unlikely(ret != 0))
1329 goto out;
1330
1331 hb = hash_futex(&key);
1332
1333 /* Make sure we really have tasks to wakeup */
1334 if (!hb_waiters_pending(hb))
1335 goto out_put_key;
1336
1337 spin_lock(&hb->lock);
1338
1339 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1340 if (match_futex (&this->key, &key)) {
1341 if (this->pi_state || this->rt_waiter) {
1342 ret = -EINVAL;
1343 break;
1344 }
1345
1346 /* Check if one of the bits is set in both bitsets */
1347 if (!(this->bitset & bitset))
1348 continue;
1349
1350 mark_wake_futex(&wake_q, this);
1351 if (++ret >= nr_wake)
1352 break;
1353 }
1354 }
1355
1356 spin_unlock(&hb->lock);
1357 wake_up_q(&wake_q);
1358 out_put_key:
1359 put_futex_key(&key);
1360 out:
1361 return ret;
1362 }
1363
1364 /*
1365 * Wake up all waiters hashed on the physical page that is mapped
1366 * to this virtual address:
1367 */
1368 static int
1369 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1370 int nr_wake, int nr_wake2, int op)
1371 {
1372 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1373 struct futex_hash_bucket *hb1, *hb2;
1374 struct futex_q *this, *next;
1375 int ret, op_ret;
1376 WAKE_Q(wake_q);
1377
1378 retry:
1379 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1380 if (unlikely(ret != 0))
1381 goto out;
1382 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1383 if (unlikely(ret != 0))
1384 goto out_put_key1;
1385
1386 hb1 = hash_futex(&key1);
1387 hb2 = hash_futex(&key2);
1388
1389 retry_private:
1390 double_lock_hb(hb1, hb2);
1391 op_ret = futex_atomic_op_inuser(op, uaddr2);
1392 if (unlikely(op_ret < 0)) {
1393
1394 double_unlock_hb(hb1, hb2);
1395
1396 #ifndef CONFIG_MMU
1397 /*
1398 * we don't get EFAULT from MMU faults if we don't have an MMU,
1399 * but we might get them from range checking
1400 */
1401 ret = op_ret;
1402 goto out_put_keys;
1403 #endif
1404
1405 if (unlikely(op_ret != -EFAULT)) {
1406 ret = op_ret;
1407 goto out_put_keys;
1408 }
1409
1410 ret = fault_in_user_writeable(uaddr2);
1411 if (ret)
1412 goto out_put_keys;
1413
1414 if (!(flags & FLAGS_SHARED))
1415 goto retry_private;
1416
1417 put_futex_key(&key2);
1418 put_futex_key(&key1);
1419 goto retry;
1420 }
1421
1422 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1423 if (match_futex (&this->key, &key1)) {
1424 if (this->pi_state || this->rt_waiter) {
1425 ret = -EINVAL;
1426 goto out_unlock;
1427 }
1428 mark_wake_futex(&wake_q, this);
1429 if (++ret >= nr_wake)
1430 break;
1431 }
1432 }
1433
1434 if (op_ret > 0) {
1435 op_ret = 0;
1436 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1437 if (match_futex (&this->key, &key2)) {
1438 if (this->pi_state || this->rt_waiter) {
1439 ret = -EINVAL;
1440 goto out_unlock;
1441 }
1442 mark_wake_futex(&wake_q, this);
1443 if (++op_ret >= nr_wake2)
1444 break;
1445 }
1446 }
1447 ret += op_ret;
1448 }
1449
1450 out_unlock:
1451 double_unlock_hb(hb1, hb2);
1452 wake_up_q(&wake_q);
1453 out_put_keys:
1454 put_futex_key(&key2);
1455 out_put_key1:
1456 put_futex_key(&key1);
1457 out:
1458 return ret;
1459 }
1460
1461 /**
1462 * requeue_futex() - Requeue a futex_q from one hb to another
1463 * @q: the futex_q to requeue
1464 * @hb1: the source hash_bucket
1465 * @hb2: the target hash_bucket
1466 * @key2: the new key for the requeued futex_q
1467 */
1468 static inline
1469 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1470 struct futex_hash_bucket *hb2, union futex_key *key2)
1471 {
1472
1473 /*
1474 * If key1 and key2 hash to the same bucket, no need to
1475 * requeue.
1476 */
1477 if (likely(&hb1->chain != &hb2->chain)) {
1478 plist_del(&q->list, &hb1->chain);
1479 hb_waiters_dec(hb1);
1480 plist_add(&q->list, &hb2->chain);
1481 hb_waiters_inc(hb2);
1482 q->lock_ptr = &hb2->lock;
1483 }
1484 get_futex_key_refs(key2);
1485 q->key = *key2;
1486 }
1487
1488 /**
1489 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1490 * @q: the futex_q
1491 * @key: the key of the requeue target futex
1492 * @hb: the hash_bucket of the requeue target futex
1493 *
1494 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1495 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1496 * to the requeue target futex so the waiter can detect the wakeup on the right
1497 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1498 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1499 * to protect access to the pi_state to fixup the owner later. Must be called
1500 * with both q->lock_ptr and hb->lock held.
1501 */
1502 static inline
1503 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1504 struct futex_hash_bucket *hb)
1505 {
1506 get_futex_key_refs(key);
1507 q->key = *key;
1508
1509 __unqueue_futex(q);
1510
1511 WARN_ON(!q->rt_waiter);
1512 q->rt_waiter = NULL;
1513
1514 q->lock_ptr = &hb->lock;
1515
1516 wake_up_state(q->task, TASK_NORMAL);
1517 }
1518
1519 /**
1520 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1521 * @pifutex: the user address of the to futex
1522 * @hb1: the from futex hash bucket, must be locked by the caller
1523 * @hb2: the to futex hash bucket, must be locked by the caller
1524 * @key1: the from futex key
1525 * @key2: the to futex key
1526 * @ps: address to store the pi_state pointer
1527 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1528 *
1529 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1530 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1531 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1532 * hb1 and hb2 must be held by the caller.
1533 *
1534 * Return:
1535 * 0 - failed to acquire the lock atomically;
1536 * >0 - acquired the lock, return value is vpid of the top_waiter
1537 * <0 - error
1538 */
1539 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1540 struct futex_hash_bucket *hb1,
1541 struct futex_hash_bucket *hb2,
1542 union futex_key *key1, union futex_key *key2,
1543 struct futex_pi_state **ps, int set_waiters)
1544 {
1545 struct futex_q *top_waiter = NULL;
1546 u32 curval;
1547 int ret, vpid;
1548
1549 if (get_futex_value_locked(&curval, pifutex))
1550 return -EFAULT;
1551
1552 if (unlikely(should_fail_futex(true)))
1553 return -EFAULT;
1554
1555 /*
1556 * Find the top_waiter and determine if there are additional waiters.
1557 * If the caller intends to requeue more than 1 waiter to pifutex,
1558 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1559 * as we have means to handle the possible fault. If not, don't set
1560 * the bit unecessarily as it will force the subsequent unlock to enter
1561 * the kernel.
1562 */
1563 top_waiter = futex_top_waiter(hb1, key1);
1564
1565 /* There are no waiters, nothing for us to do. */
1566 if (!top_waiter)
1567 return 0;
1568
1569 /* Ensure we requeue to the expected futex. */
1570 if (!match_futex(top_waiter->requeue_pi_key, key2))
1571 return -EINVAL;
1572
1573 /*
1574 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1575 * the contended case or if set_waiters is 1. The pi_state is returned
1576 * in ps in contended cases.
1577 */
1578 vpid = task_pid_vnr(top_waiter->task);
1579 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1580 set_waiters);
1581 if (ret == 1) {
1582 requeue_pi_wake_futex(top_waiter, key2, hb2);
1583 return vpid;
1584 }
1585 return ret;
1586 }
1587
1588 /**
1589 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1590 * @uaddr1: source futex user address
1591 * @flags: futex flags (FLAGS_SHARED, etc.)
1592 * @uaddr2: target futex user address
1593 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1594 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1595 * @cmpval: @uaddr1 expected value (or %NULL)
1596 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1597 * pi futex (pi to pi requeue is not supported)
1598 *
1599 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1600 * uaddr2 atomically on behalf of the top waiter.
1601 *
1602 * Return:
1603 * >=0 - on success, the number of tasks requeued or woken;
1604 * <0 - on error
1605 */
1606 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1607 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1608 u32 *cmpval, int requeue_pi)
1609 {
1610 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1611 int drop_count = 0, task_count = 0, ret;
1612 struct futex_pi_state *pi_state = NULL;
1613 struct futex_hash_bucket *hb1, *hb2;
1614 struct futex_q *this, *next;
1615 WAKE_Q(wake_q);
1616
1617 if (requeue_pi) {
1618 /*
1619 * Requeue PI only works on two distinct uaddrs. This
1620 * check is only valid for private futexes. See below.
1621 */
1622 if (uaddr1 == uaddr2)
1623 return -EINVAL;
1624
1625 /*
1626 * requeue_pi requires a pi_state, try to allocate it now
1627 * without any locks in case it fails.
1628 */
1629 if (refill_pi_state_cache())
1630 return -ENOMEM;
1631 /*
1632 * requeue_pi must wake as many tasks as it can, up to nr_wake
1633 * + nr_requeue, since it acquires the rt_mutex prior to
1634 * returning to userspace, so as to not leave the rt_mutex with
1635 * waiters and no owner. However, second and third wake-ups
1636 * cannot be predicted as they involve race conditions with the
1637 * first wake and a fault while looking up the pi_state. Both
1638 * pthread_cond_signal() and pthread_cond_broadcast() should
1639 * use nr_wake=1.
1640 */
1641 if (nr_wake != 1)
1642 return -EINVAL;
1643 }
1644
1645 retry:
1646 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1647 if (unlikely(ret != 0))
1648 goto out;
1649 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1650 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1651 if (unlikely(ret != 0))
1652 goto out_put_key1;
1653
1654 /*
1655 * The check above which compares uaddrs is not sufficient for
1656 * shared futexes. We need to compare the keys:
1657 */
1658 if (requeue_pi && match_futex(&key1, &key2)) {
1659 ret = -EINVAL;
1660 goto out_put_keys;
1661 }
1662
1663 hb1 = hash_futex(&key1);
1664 hb2 = hash_futex(&key2);
1665
1666 retry_private:
1667 hb_waiters_inc(hb2);
1668 double_lock_hb(hb1, hb2);
1669
1670 if (likely(cmpval != NULL)) {
1671 u32 curval;
1672
1673 ret = get_futex_value_locked(&curval, uaddr1);
1674
1675 if (unlikely(ret)) {
1676 double_unlock_hb(hb1, hb2);
1677 hb_waiters_dec(hb2);
1678
1679 ret = get_user(curval, uaddr1);
1680 if (ret)
1681 goto out_put_keys;
1682
1683 if (!(flags & FLAGS_SHARED))
1684 goto retry_private;
1685
1686 put_futex_key(&key2);
1687 put_futex_key(&key1);
1688 goto retry;
1689 }
1690 if (curval != *cmpval) {
1691 ret = -EAGAIN;
1692 goto out_unlock;
1693 }
1694 }
1695
1696 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1697 /*
1698 * Attempt to acquire uaddr2 and wake the top waiter. If we
1699 * intend to requeue waiters, force setting the FUTEX_WAITERS
1700 * bit. We force this here where we are able to easily handle
1701 * faults rather in the requeue loop below.
1702 */
1703 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1704 &key2, &pi_state, nr_requeue);
1705
1706 /*
1707 * At this point the top_waiter has either taken uaddr2 or is
1708 * waiting on it. If the former, then the pi_state will not
1709 * exist yet, look it up one more time to ensure we have a
1710 * reference to it. If the lock was taken, ret contains the
1711 * vpid of the top waiter task.
1712 * If the lock was not taken, we have pi_state and an initial
1713 * refcount on it. In case of an error we have nothing.
1714 */
1715 if (ret > 0) {
1716 WARN_ON(pi_state);
1717 drop_count++;
1718 task_count++;
1719 /*
1720 * If we acquired the lock, then the user space value
1721 * of uaddr2 should be vpid. It cannot be changed by
1722 * the top waiter as it is blocked on hb2 lock if it
1723 * tries to do so. If something fiddled with it behind
1724 * our back the pi state lookup might unearth it. So
1725 * we rather use the known value than rereading and
1726 * handing potential crap to lookup_pi_state.
1727 *
1728 * If that call succeeds then we have pi_state and an
1729 * initial refcount on it.
1730 */
1731 ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
1732 }
1733
1734 switch (ret) {
1735 case 0:
1736 /* We hold a reference on the pi state. */
1737 break;
1738 case -EFAULT:
1739 put_pi_state(pi_state);
1740 pi_state = NULL;
1741 double_unlock_hb(hb1, hb2);
1742 hb_waiters_dec(hb2);
1743 put_futex_key(&key2);
1744 put_futex_key(&key1);
1745 ret = fault_in_user_writeable(uaddr2);
1746 if (!ret)
1747 goto retry;
1748 goto out;
1749 case -EAGAIN:
1750 /*
1751 * Two reasons for this:
1752 * - Owner is exiting and we just wait for the
1753 * exit to complete.
1754 * - The user space value changed.
1755 */
1756 put_pi_state(pi_state);
1757 pi_state = NULL;
1758 double_unlock_hb(hb1, hb2);
1759 hb_waiters_dec(hb2);
1760 put_futex_key(&key2);
1761 put_futex_key(&key1);
1762 cond_resched();
1763 goto retry;
1764 default:
1765 goto out_unlock;
1766 }
1767 }
1768
1769 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1770 if (task_count - nr_wake >= nr_requeue)
1771 break;
1772
1773 if (!match_futex(&this->key, &key1))
1774 continue;
1775
1776 /*
1777 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1778 * be paired with each other and no other futex ops.
1779 *
1780 * We should never be requeueing a futex_q with a pi_state,
1781 * which is awaiting a futex_unlock_pi().
1782 */
1783 if ((requeue_pi && !this->rt_waiter) ||
1784 (!requeue_pi && this->rt_waiter) ||
1785 this->pi_state) {
1786 ret = -EINVAL;
1787 break;
1788 }
1789
1790 /*
1791 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1792 * lock, we already woke the top_waiter. If not, it will be
1793 * woken by futex_unlock_pi().
1794 */
1795 if (++task_count <= nr_wake && !requeue_pi) {
1796 mark_wake_futex(&wake_q, this);
1797 continue;
1798 }
1799
1800 /* Ensure we requeue to the expected futex for requeue_pi. */
1801 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1802 ret = -EINVAL;
1803 break;
1804 }
1805
1806 /*
1807 * Requeue nr_requeue waiters and possibly one more in the case
1808 * of requeue_pi if we couldn't acquire the lock atomically.
1809 */
1810 if (requeue_pi) {
1811 /*
1812 * Prepare the waiter to take the rt_mutex. Take a
1813 * refcount on the pi_state and store the pointer in
1814 * the futex_q object of the waiter.
1815 */
1816 atomic_inc(&pi_state->refcount);
1817 this->pi_state = pi_state;
1818 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1819 this->rt_waiter,
1820 this->task);
1821 if (ret == 1) {
1822 /*
1823 * We got the lock. We do neither drop the
1824 * refcount on pi_state nor clear
1825 * this->pi_state because the waiter needs the
1826 * pi_state for cleaning up the user space
1827 * value. It will drop the refcount after
1828 * doing so.
1829 */
1830 requeue_pi_wake_futex(this, &key2, hb2);
1831 drop_count++;
1832 continue;
1833 } else if (ret) {
1834 /*
1835 * rt_mutex_start_proxy_lock() detected a
1836 * potential deadlock when we tried to queue
1837 * that waiter. Drop the pi_state reference
1838 * which we took above and remove the pointer
1839 * to the state from the waiters futex_q
1840 * object.
1841 */
1842 this->pi_state = NULL;
1843 put_pi_state(pi_state);
1844 goto out_unlock;
1845 }
1846 }
1847 requeue_futex(this, hb1, hb2, &key2);
1848 drop_count++;
1849 }
1850
1851 out_unlock:
1852 /*
1853 * We took an extra initial reference to the pi_state either
1854 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
1855 * need to drop it here again.
1856 */
1857 put_pi_state(pi_state);
1858 double_unlock_hb(hb1, hb2);
1859 wake_up_q(&wake_q);
1860 hb_waiters_dec(hb2);
1861
1862 /*
1863 * drop_futex_key_refs() must be called outside the spinlocks. During
1864 * the requeue we moved futex_q's from the hash bucket at key1 to the
1865 * one at key2 and updated their key pointer. We no longer need to
1866 * hold the references to key1.
1867 */
1868 while (--drop_count >= 0)
1869 drop_futex_key_refs(&key1);
1870
1871 out_put_keys:
1872 put_futex_key(&key2);
1873 out_put_key1:
1874 put_futex_key(&key1);
1875 out:
1876 return ret ? ret : task_count;
1877 }
1878
1879 /* The key must be already stored in q->key. */
1880 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1881 __acquires(&hb->lock)
1882 {
1883 struct futex_hash_bucket *hb;
1884
1885 hb = hash_futex(&q->key);
1886
1887 /*
1888 * Increment the counter before taking the lock so that
1889 * a potential waker won't miss a to-be-slept task that is
1890 * waiting for the spinlock. This is safe as all queue_lock()
1891 * users end up calling queue_me(). Similarly, for housekeeping,
1892 * decrement the counter at queue_unlock() when some error has
1893 * occurred and we don't end up adding the task to the list.
1894 */
1895 hb_waiters_inc(hb);
1896
1897 q->lock_ptr = &hb->lock;
1898
1899 spin_lock(&hb->lock); /* implies MB (A) */
1900 return hb;
1901 }
1902
1903 static inline void
1904 queue_unlock(struct futex_hash_bucket *hb)
1905 __releases(&hb->lock)
1906 {
1907 spin_unlock(&hb->lock);
1908 hb_waiters_dec(hb);
1909 }
1910
1911 /**
1912 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1913 * @q: The futex_q to enqueue
1914 * @hb: The destination hash bucket
1915 *
1916 * The hb->lock must be held by the caller, and is released here. A call to
1917 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1918 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1919 * or nothing if the unqueue is done as part of the wake process and the unqueue
1920 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1921 * an example).
1922 */
1923 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1924 __releases(&hb->lock)
1925 {
1926 int prio;
1927
1928 /*
1929 * The priority used to register this element is
1930 * - either the real thread-priority for the real-time threads
1931 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1932 * - or MAX_RT_PRIO for non-RT threads.
1933 * Thus, all RT-threads are woken first in priority order, and
1934 * the others are woken last, in FIFO order.
1935 */
1936 prio = min(current->normal_prio, MAX_RT_PRIO);
1937
1938 plist_node_init(&q->list, prio);
1939 plist_add(&q->list, &hb->chain);
1940 q->task = current;
1941 spin_unlock(&hb->lock);
1942 }
1943
1944 /**
1945 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1946 * @q: The futex_q to unqueue
1947 *
1948 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1949 * be paired with exactly one earlier call to queue_me().
1950 *
1951 * Return:
1952 * 1 - if the futex_q was still queued (and we removed unqueued it);
1953 * 0 - if the futex_q was already removed by the waking thread
1954 */
1955 static int unqueue_me(struct futex_q *q)
1956 {
1957 spinlock_t *lock_ptr;
1958 int ret = 0;
1959
1960 /* In the common case we don't take the spinlock, which is nice. */
1961 retry:
1962 lock_ptr = q->lock_ptr;
1963 barrier();
1964 if (lock_ptr != NULL) {
1965 spin_lock(lock_ptr);
1966 /*
1967 * q->lock_ptr can change between reading it and
1968 * spin_lock(), causing us to take the wrong lock. This
1969 * corrects the race condition.
1970 *
1971 * Reasoning goes like this: if we have the wrong lock,
1972 * q->lock_ptr must have changed (maybe several times)
1973 * between reading it and the spin_lock(). It can
1974 * change again after the spin_lock() but only if it was
1975 * already changed before the spin_lock(). It cannot,
1976 * however, change back to the original value. Therefore
1977 * we can detect whether we acquired the correct lock.
1978 */
1979 if (unlikely(lock_ptr != q->lock_ptr)) {
1980 spin_unlock(lock_ptr);
1981 goto retry;
1982 }
1983 __unqueue_futex(q);
1984
1985 BUG_ON(q->pi_state);
1986
1987 spin_unlock(lock_ptr);
1988 ret = 1;
1989 }
1990
1991 drop_futex_key_refs(&q->key);
1992 return ret;
1993 }
1994
1995 /*
1996 * PI futexes can not be requeued and must remove themself from the
1997 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1998 * and dropped here.
1999 */
2000 static void unqueue_me_pi(struct futex_q *q)
2001 __releases(q->lock_ptr)
2002 {
2003 __unqueue_futex(q);
2004
2005 BUG_ON(!q->pi_state);
2006 put_pi_state(q->pi_state);
2007 q->pi_state = NULL;
2008
2009 spin_unlock(q->lock_ptr);
2010 }
2011
2012 /*
2013 * Fixup the pi_state owner with the new owner.
2014 *
2015 * Must be called with hash bucket lock held and mm->sem held for non
2016 * private futexes.
2017 */
2018 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2019 struct task_struct *newowner)
2020 {
2021 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2022 struct futex_pi_state *pi_state = q->pi_state;
2023 struct task_struct *oldowner = pi_state->owner;
2024 u32 uval, uninitialized_var(curval), newval;
2025 int ret;
2026
2027 /* Owner died? */
2028 if (!pi_state->owner)
2029 newtid |= FUTEX_OWNER_DIED;
2030
2031 /*
2032 * We are here either because we stole the rtmutex from the
2033 * previous highest priority waiter or we are the highest priority
2034 * waiter but failed to get the rtmutex the first time.
2035 * We have to replace the newowner TID in the user space variable.
2036 * This must be atomic as we have to preserve the owner died bit here.
2037 *
2038 * Note: We write the user space value _before_ changing the pi_state
2039 * because we can fault here. Imagine swapped out pages or a fork
2040 * that marked all the anonymous memory readonly for cow.
2041 *
2042 * Modifying pi_state _before_ the user space value would
2043 * leave the pi_state in an inconsistent state when we fault
2044 * here, because we need to drop the hash bucket lock to
2045 * handle the fault. This might be observed in the PID check
2046 * in lookup_pi_state.
2047 */
2048 retry:
2049 if (get_futex_value_locked(&uval, uaddr))
2050 goto handle_fault;
2051
2052 while (1) {
2053 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2054
2055 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
2056 goto handle_fault;
2057 if (curval == uval)
2058 break;
2059 uval = curval;
2060 }
2061
2062 /*
2063 * We fixed up user space. Now we need to fix the pi_state
2064 * itself.
2065 */
2066 if (pi_state->owner != NULL) {
2067 raw_spin_lock_irq(&pi_state->owner->pi_lock);
2068 WARN_ON(list_empty(&pi_state->list));
2069 list_del_init(&pi_state->list);
2070 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
2071 }
2072
2073 pi_state->owner = newowner;
2074
2075 raw_spin_lock_irq(&newowner->pi_lock);
2076 WARN_ON(!list_empty(&pi_state->list));
2077 list_add(&pi_state->list, &newowner->pi_state_list);
2078 raw_spin_unlock_irq(&newowner->pi_lock);
2079 return 0;
2080
2081 /*
2082 * To handle the page fault we need to drop the hash bucket
2083 * lock here. That gives the other task (either the highest priority
2084 * waiter itself or the task which stole the rtmutex) the
2085 * chance to try the fixup of the pi_state. So once we are
2086 * back from handling the fault we need to check the pi_state
2087 * after reacquiring the hash bucket lock and before trying to
2088 * do another fixup. When the fixup has been done already we
2089 * simply return.
2090 */
2091 handle_fault:
2092 spin_unlock(q->lock_ptr);
2093
2094 ret = fault_in_user_writeable(uaddr);
2095
2096 spin_lock(q->lock_ptr);
2097
2098 /*
2099 * Check if someone else fixed it for us:
2100 */
2101 if (pi_state->owner != oldowner)
2102 return 0;
2103
2104 if (ret)
2105 return ret;
2106
2107 goto retry;
2108 }
2109
2110 static long futex_wait_restart(struct restart_block *restart);
2111
2112 /**
2113 * fixup_owner() - Post lock pi_state and corner case management
2114 * @uaddr: user address of the futex
2115 * @q: futex_q (contains pi_state and access to the rt_mutex)
2116 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2117 *
2118 * After attempting to lock an rt_mutex, this function is called to cleanup
2119 * the pi_state owner as well as handle race conditions that may allow us to
2120 * acquire the lock. Must be called with the hb lock held.
2121 *
2122 * Return:
2123 * 1 - success, lock taken;
2124 * 0 - success, lock not taken;
2125 * <0 - on error (-EFAULT)
2126 */
2127 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2128 {
2129 struct task_struct *owner;
2130 int ret = 0;
2131
2132 if (locked) {
2133 /*
2134 * Got the lock. We might not be the anticipated owner if we
2135 * did a lock-steal - fix up the PI-state in that case:
2136 */
2137 if (q->pi_state->owner != current)
2138 ret = fixup_pi_state_owner(uaddr, q, current);
2139 goto out;
2140 }
2141
2142 /*
2143 * Catch the rare case, where the lock was released when we were on the
2144 * way back before we locked the hash bucket.
2145 */
2146 if (q->pi_state->owner == current) {
2147 /*
2148 * Try to get the rt_mutex now. This might fail as some other
2149 * task acquired the rt_mutex after we removed ourself from the
2150 * rt_mutex waiters list.
2151 */
2152 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2153 locked = 1;
2154 goto out;
2155 }
2156
2157 /*
2158 * pi_state is incorrect, some other task did a lock steal and
2159 * we returned due to timeout or signal without taking the
2160 * rt_mutex. Too late.
2161 */
2162 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
2163 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
2164 if (!owner)
2165 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
2166 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
2167 ret = fixup_pi_state_owner(uaddr, q, owner);
2168 goto out;
2169 }
2170
2171 /*
2172 * Paranoia check. If we did not take the lock, then we should not be
2173 * the owner of the rt_mutex.
2174 */
2175 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2176 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2177 "pi-state %p\n", ret,
2178 q->pi_state->pi_mutex.owner,
2179 q->pi_state->owner);
2180
2181 out:
2182 return ret ? ret : locked;
2183 }
2184
2185 /**
2186 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2187 * @hb: the futex hash bucket, must be locked by the caller
2188 * @q: the futex_q to queue up on
2189 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2190 */
2191 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2192 struct hrtimer_sleeper *timeout)
2193 {
2194 /*
2195 * The task state is guaranteed to be set before another task can
2196 * wake it. set_current_state() is implemented using smp_store_mb() and
2197 * queue_me() calls spin_unlock() upon completion, both serializing
2198 * access to the hash list and forcing another memory barrier.
2199 */
2200 set_current_state(TASK_INTERRUPTIBLE);
2201 queue_me(q, hb);
2202
2203 /* Arm the timer */
2204 if (timeout)
2205 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2206
2207 /*
2208 * If we have been removed from the hash list, then another task
2209 * has tried to wake us, and we can skip the call to schedule().
2210 */
2211 if (likely(!plist_node_empty(&q->list))) {
2212 /*
2213 * If the timer has already expired, current will already be
2214 * flagged for rescheduling. Only call schedule if there
2215 * is no timeout, or if it has yet to expire.
2216 */
2217 if (!timeout || timeout->task)
2218 freezable_schedule();
2219 }
2220 __set_current_state(TASK_RUNNING);
2221 }
2222
2223 /**
2224 * futex_wait_setup() - Prepare to wait on a futex
2225 * @uaddr: the futex userspace address
2226 * @val: the expected value
2227 * @flags: futex flags (FLAGS_SHARED, etc.)
2228 * @q: the associated futex_q
2229 * @hb: storage for hash_bucket pointer to be returned to caller
2230 *
2231 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2232 * compare it with the expected value. Handle atomic faults internally.
2233 * Return with the hb lock held and a q.key reference on success, and unlocked
2234 * with no q.key reference on failure.
2235 *
2236 * Return:
2237 * 0 - uaddr contains val and hb has been locked;
2238 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2239 */
2240 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2241 struct futex_q *q, struct futex_hash_bucket **hb)
2242 {
2243 u32 uval;
2244 int ret;
2245
2246 /*
2247 * Access the page AFTER the hash-bucket is locked.
2248 * Order is important:
2249 *
2250 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2251 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2252 *
2253 * The basic logical guarantee of a futex is that it blocks ONLY
2254 * if cond(var) is known to be true at the time of blocking, for
2255 * any cond. If we locked the hash-bucket after testing *uaddr, that
2256 * would open a race condition where we could block indefinitely with
2257 * cond(var) false, which would violate the guarantee.
2258 *
2259 * On the other hand, we insert q and release the hash-bucket only
2260 * after testing *uaddr. This guarantees that futex_wait() will NOT
2261 * absorb a wakeup if *uaddr does not match the desired values
2262 * while the syscall executes.
2263 */
2264 retry:
2265 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
2266 if (unlikely(ret != 0))
2267 return ret;
2268
2269 retry_private:
2270 *hb = queue_lock(q);
2271
2272 ret = get_futex_value_locked(&uval, uaddr);
2273
2274 if (ret) {
2275 queue_unlock(*hb);
2276
2277 ret = get_user(uval, uaddr);
2278 if (ret)
2279 goto out;
2280
2281 if (!(flags & FLAGS_SHARED))
2282 goto retry_private;
2283
2284 put_futex_key(&q->key);
2285 goto retry;
2286 }
2287
2288 if (uval != val) {
2289 queue_unlock(*hb);
2290 ret = -EWOULDBLOCK;
2291 }
2292
2293 out:
2294 if (ret)
2295 put_futex_key(&q->key);
2296 return ret;
2297 }
2298
2299 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2300 ktime_t *abs_time, u32 bitset)
2301 {
2302 struct hrtimer_sleeper timeout, *to = NULL;
2303 struct restart_block *restart;
2304 struct futex_hash_bucket *hb;
2305 struct futex_q q = futex_q_init;
2306 int ret;
2307
2308 if (!bitset)
2309 return -EINVAL;
2310 q.bitset = bitset;
2311
2312 if (abs_time) {
2313 to = &timeout;
2314
2315 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2316 CLOCK_REALTIME : CLOCK_MONOTONIC,
2317 HRTIMER_MODE_ABS);
2318 hrtimer_init_sleeper(to, current);
2319 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2320 current->timer_slack_ns);
2321 }
2322
2323 retry:
2324 /*
2325 * Prepare to wait on uaddr. On success, holds hb lock and increments
2326 * q.key refs.
2327 */
2328 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2329 if (ret)
2330 goto out;
2331
2332 /* queue_me and wait for wakeup, timeout, or a signal. */
2333 futex_wait_queue_me(hb, &q, to);
2334
2335 /* If we were woken (and unqueued), we succeeded, whatever. */
2336 ret = 0;
2337 /* unqueue_me() drops q.key ref */
2338 if (!unqueue_me(&q))
2339 goto out;
2340 ret = -ETIMEDOUT;
2341 if (to && !to->task)
2342 goto out;
2343
2344 /*
2345 * We expect signal_pending(current), but we might be the
2346 * victim of a spurious wakeup as well.
2347 */
2348 if (!signal_pending(current))
2349 goto retry;
2350
2351 ret = -ERESTARTSYS;
2352 if (!abs_time)
2353 goto out;
2354
2355 restart = &current->restart_block;
2356 restart->fn = futex_wait_restart;
2357 restart->futex.uaddr = uaddr;
2358 restart->futex.val = val;
2359 restart->futex.time = abs_time->tv64;
2360 restart->futex.bitset = bitset;
2361 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2362
2363 ret = -ERESTART_RESTARTBLOCK;
2364
2365 out:
2366 if (to) {
2367 hrtimer_cancel(&to->timer);
2368 destroy_hrtimer_on_stack(&to->timer);
2369 }
2370 return ret;
2371 }
2372
2373
2374 static long futex_wait_restart(struct restart_block *restart)
2375 {
2376 u32 __user *uaddr = restart->futex.uaddr;
2377 ktime_t t, *tp = NULL;
2378
2379 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2380 t.tv64 = restart->futex.time;
2381 tp = &t;
2382 }
2383 restart->fn = do_no_restart_syscall;
2384
2385 return (long)futex_wait(uaddr, restart->futex.flags,
2386 restart->futex.val, tp, restart->futex.bitset);
2387 }
2388
2389
2390 /*
2391 * Userspace tried a 0 -> TID atomic transition of the futex value
2392 * and failed. The kernel side here does the whole locking operation:
2393 * if there are waiters then it will block as a consequence of relying
2394 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2395 * a 0 value of the futex too.).
2396 *
2397 * Also serves as futex trylock_pi()'ing, and due semantics.
2398 */
2399 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2400 ktime_t *time, int trylock)
2401 {
2402 struct hrtimer_sleeper timeout, *to = NULL;
2403 struct futex_hash_bucket *hb;
2404 struct futex_q q = futex_q_init;
2405 int res, ret;
2406
2407 if (refill_pi_state_cache())
2408 return -ENOMEM;
2409
2410 if (time) {
2411 to = &timeout;
2412 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2413 HRTIMER_MODE_ABS);
2414 hrtimer_init_sleeper(to, current);
2415 hrtimer_set_expires(&to->timer, *time);
2416 }
2417
2418 retry:
2419 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2420 if (unlikely(ret != 0))
2421 goto out;
2422
2423 retry_private:
2424 hb = queue_lock(&q);
2425
2426 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2427 if (unlikely(ret)) {
2428 /*
2429 * Atomic work succeeded and we got the lock,
2430 * or failed. Either way, we do _not_ block.
2431 */
2432 switch (ret) {
2433 case 1:
2434 /* We got the lock. */
2435 ret = 0;
2436 goto out_unlock_put_key;
2437 case -EFAULT:
2438 goto uaddr_faulted;
2439 case -EAGAIN:
2440 /*
2441 * Two reasons for this:
2442 * - Task is exiting and we just wait for the
2443 * exit to complete.
2444 * - The user space value changed.
2445 */
2446 queue_unlock(hb);
2447 put_futex_key(&q.key);
2448 cond_resched();
2449 goto retry;
2450 default:
2451 goto out_unlock_put_key;
2452 }
2453 }
2454
2455 /*
2456 * Only actually queue now that the atomic ops are done:
2457 */
2458 queue_me(&q, hb);
2459
2460 WARN_ON(!q.pi_state);
2461 /*
2462 * Block on the PI mutex:
2463 */
2464 if (!trylock) {
2465 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2466 } else {
2467 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2468 /* Fixup the trylock return value: */
2469 ret = ret ? 0 : -EWOULDBLOCK;
2470 }
2471
2472 spin_lock(q.lock_ptr);
2473 /*
2474 * Fixup the pi_state owner and possibly acquire the lock if we
2475 * haven't already.
2476 */
2477 res = fixup_owner(uaddr, &q, !ret);
2478 /*
2479 * If fixup_owner() returned an error, proprogate that. If it acquired
2480 * the lock, clear our -ETIMEDOUT or -EINTR.
2481 */
2482 if (res)
2483 ret = (res < 0) ? res : 0;
2484
2485 /*
2486 * If fixup_owner() faulted and was unable to handle the fault, unlock
2487 * it and return the fault to userspace.
2488 */
2489 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2490 rt_mutex_unlock(&q.pi_state->pi_mutex);
2491
2492 /* Unqueue and drop the lock */
2493 unqueue_me_pi(&q);
2494
2495 goto out_put_key;
2496
2497 out_unlock_put_key:
2498 queue_unlock(hb);
2499
2500 out_put_key:
2501 put_futex_key(&q.key);
2502 out:
2503 if (to)
2504 destroy_hrtimer_on_stack(&to->timer);
2505 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2506
2507 uaddr_faulted:
2508 queue_unlock(hb);
2509
2510 ret = fault_in_user_writeable(uaddr);
2511 if (ret)
2512 goto out_put_key;
2513
2514 if (!(flags & FLAGS_SHARED))
2515 goto retry_private;
2516
2517 put_futex_key(&q.key);
2518 goto retry;
2519 }
2520
2521 /*
2522 * Userspace attempted a TID -> 0 atomic transition, and failed.
2523 * This is the in-kernel slowpath: we look up the PI state (if any),
2524 * and do the rt-mutex unlock.
2525 */
2526 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2527 {
2528 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2529 union futex_key key = FUTEX_KEY_INIT;
2530 struct futex_hash_bucket *hb;
2531 struct futex_q *match;
2532 int ret;
2533
2534 retry:
2535 if (get_user(uval, uaddr))
2536 return -EFAULT;
2537 /*
2538 * We release only a lock we actually own:
2539 */
2540 if ((uval & FUTEX_TID_MASK) != vpid)
2541 return -EPERM;
2542
2543 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2544 if (ret)
2545 return ret;
2546
2547 hb = hash_futex(&key);
2548 spin_lock(&hb->lock);
2549
2550 /*
2551 * Check waiters first. We do not trust user space values at
2552 * all and we at least want to know if user space fiddled
2553 * with the futex value instead of blindly unlocking.
2554 */
2555 match = futex_top_waiter(hb, &key);
2556 if (match) {
2557 ret = wake_futex_pi(uaddr, uval, match, hb);
2558 /*
2559 * In case of success wake_futex_pi dropped the hash
2560 * bucket lock.
2561 */
2562 if (!ret)
2563 goto out_putkey;
2564 /*
2565 * The atomic access to the futex value generated a
2566 * pagefault, so retry the user-access and the wakeup:
2567 */
2568 if (ret == -EFAULT)
2569 goto pi_faulted;
2570 /*
2571 * wake_futex_pi has detected invalid state. Tell user
2572 * space.
2573 */
2574 goto out_unlock;
2575 }
2576
2577 /*
2578 * We have no kernel internal state, i.e. no waiters in the
2579 * kernel. Waiters which are about to queue themselves are stuck
2580 * on hb->lock. So we can safely ignore them. We do neither
2581 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2582 * owner.
2583 */
2584 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
2585 goto pi_faulted;
2586
2587 /*
2588 * If uval has changed, let user space handle it.
2589 */
2590 ret = (curval == uval) ? 0 : -EAGAIN;
2591
2592 out_unlock:
2593 spin_unlock(&hb->lock);
2594 out_putkey:
2595 put_futex_key(&key);
2596 return ret;
2597
2598 pi_faulted:
2599 spin_unlock(&hb->lock);
2600 put_futex_key(&key);
2601
2602 ret = fault_in_user_writeable(uaddr);
2603 if (!ret)
2604 goto retry;
2605
2606 return ret;
2607 }
2608
2609 /**
2610 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2611 * @hb: the hash_bucket futex_q was original enqueued on
2612 * @q: the futex_q woken while waiting to be requeued
2613 * @key2: the futex_key of the requeue target futex
2614 * @timeout: the timeout associated with the wait (NULL if none)
2615 *
2616 * Detect if the task was woken on the initial futex as opposed to the requeue
2617 * target futex. If so, determine if it was a timeout or a signal that caused
2618 * the wakeup and return the appropriate error code to the caller. Must be
2619 * called with the hb lock held.
2620 *
2621 * Return:
2622 * 0 = no early wakeup detected;
2623 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2624 */
2625 static inline
2626 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2627 struct futex_q *q, union futex_key *key2,
2628 struct hrtimer_sleeper *timeout)
2629 {
2630 int ret = 0;
2631
2632 /*
2633 * With the hb lock held, we avoid races while we process the wakeup.
2634 * We only need to hold hb (and not hb2) to ensure atomicity as the
2635 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2636 * It can't be requeued from uaddr2 to something else since we don't
2637 * support a PI aware source futex for requeue.
2638 */
2639 if (!match_futex(&q->key, key2)) {
2640 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2641 /*
2642 * We were woken prior to requeue by a timeout or a signal.
2643 * Unqueue the futex_q and determine which it was.
2644 */
2645 plist_del(&q->list, &hb->chain);
2646 hb_waiters_dec(hb);
2647
2648 /* Handle spurious wakeups gracefully */
2649 ret = -EWOULDBLOCK;
2650 if (timeout && !timeout->task)
2651 ret = -ETIMEDOUT;
2652 else if (signal_pending(current))
2653 ret = -ERESTARTNOINTR;
2654 }
2655 return ret;
2656 }
2657
2658 /**
2659 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2660 * @uaddr: the futex we initially wait on (non-pi)
2661 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2662 * the same type, no requeueing from private to shared, etc.
2663 * @val: the expected value of uaddr
2664 * @abs_time: absolute timeout
2665 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2666 * @uaddr2: the pi futex we will take prior to returning to user-space
2667 *
2668 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2669 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2670 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2671 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2672 * without one, the pi logic would not know which task to boost/deboost, if
2673 * there was a need to.
2674 *
2675 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2676 * via the following--
2677 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2678 * 2) wakeup on uaddr2 after a requeue
2679 * 3) signal
2680 * 4) timeout
2681 *
2682 * If 3, cleanup and return -ERESTARTNOINTR.
2683 *
2684 * If 2, we may then block on trying to take the rt_mutex and return via:
2685 * 5) successful lock
2686 * 6) signal
2687 * 7) timeout
2688 * 8) other lock acquisition failure
2689 *
2690 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2691 *
2692 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2693 *
2694 * Return:
2695 * 0 - On success;
2696 * <0 - On error
2697 */
2698 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2699 u32 val, ktime_t *abs_time, u32 bitset,
2700 u32 __user *uaddr2)
2701 {
2702 struct hrtimer_sleeper timeout, *to = NULL;
2703 struct rt_mutex_waiter rt_waiter;
2704 struct rt_mutex *pi_mutex = NULL;
2705 struct futex_hash_bucket *hb;
2706 union futex_key key2 = FUTEX_KEY_INIT;
2707 struct futex_q q = futex_q_init;
2708 int res, ret;
2709
2710 if (uaddr == uaddr2)
2711 return -EINVAL;
2712
2713 if (!bitset)
2714 return -EINVAL;
2715
2716 if (abs_time) {
2717 to = &timeout;
2718 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2719 CLOCK_REALTIME : CLOCK_MONOTONIC,
2720 HRTIMER_MODE_ABS);
2721 hrtimer_init_sleeper(to, current);
2722 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2723 current->timer_slack_ns);
2724 }
2725
2726 /*
2727 * The waiter is allocated on our stack, manipulated by the requeue
2728 * code while we sleep on uaddr.
2729 */
2730 debug_rt_mutex_init_waiter(&rt_waiter);
2731 RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2732 RB_CLEAR_NODE(&rt_waiter.tree_entry);
2733 rt_waiter.task = NULL;
2734
2735 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2736 if (unlikely(ret != 0))
2737 goto out;
2738
2739 q.bitset = bitset;
2740 q.rt_waiter = &rt_waiter;
2741 q.requeue_pi_key = &key2;
2742
2743 /*
2744 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2745 * count.
2746 */
2747 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2748 if (ret)
2749 goto out_key2;
2750
2751 /*
2752 * The check above which compares uaddrs is not sufficient for
2753 * shared futexes. We need to compare the keys:
2754 */
2755 if (match_futex(&q.key, &key2)) {
2756 queue_unlock(hb);
2757 ret = -EINVAL;
2758 goto out_put_keys;
2759 }
2760
2761 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2762 futex_wait_queue_me(hb, &q, to);
2763
2764 spin_lock(&hb->lock);
2765 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2766 spin_unlock(&hb->lock);
2767 if (ret)
2768 goto out_put_keys;
2769
2770 /*
2771 * In order for us to be here, we know our q.key == key2, and since
2772 * we took the hb->lock above, we also know that futex_requeue() has
2773 * completed and we no longer have to concern ourselves with a wakeup
2774 * race with the atomic proxy lock acquisition by the requeue code. The
2775 * futex_requeue dropped our key1 reference and incremented our key2
2776 * reference count.
2777 */
2778
2779 /* Check if the requeue code acquired the second futex for us. */
2780 if (!q.rt_waiter) {
2781 /*
2782 * Got the lock. We might not be the anticipated owner if we
2783 * did a lock-steal - fix up the PI-state in that case.
2784 */
2785 if (q.pi_state && (q.pi_state->owner != current)) {
2786 spin_lock(q.lock_ptr);
2787 ret = fixup_pi_state_owner(uaddr2, &q, current);
2788 /*
2789 * Drop the reference to the pi state which
2790 * the requeue_pi() code acquired for us.
2791 */
2792 put_pi_state(q.pi_state);
2793 spin_unlock(q.lock_ptr);
2794 }
2795 } else {
2796 /*
2797 * We have been woken up by futex_unlock_pi(), a timeout, or a
2798 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2799 * the pi_state.
2800 */
2801 WARN_ON(!q.pi_state);
2802 pi_mutex = &q.pi_state->pi_mutex;
2803 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
2804 debug_rt_mutex_free_waiter(&rt_waiter);
2805
2806 spin_lock(q.lock_ptr);
2807 /*
2808 * Fixup the pi_state owner and possibly acquire the lock if we
2809 * haven't already.
2810 */
2811 res = fixup_owner(uaddr2, &q, !ret);
2812 /*
2813 * If fixup_owner() returned an error, proprogate that. If it
2814 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2815 */
2816 if (res)
2817 ret = (res < 0) ? res : 0;
2818
2819 /* Unqueue and drop the lock. */
2820 unqueue_me_pi(&q);
2821 }
2822
2823 /*
2824 * If fixup_pi_state_owner() faulted and was unable to handle the
2825 * fault, unlock the rt_mutex and return the fault to userspace.
2826 */
2827 if (ret == -EFAULT) {
2828 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2829 rt_mutex_unlock(pi_mutex);
2830 } else if (ret == -EINTR) {
2831 /*
2832 * We've already been requeued, but cannot restart by calling
2833 * futex_lock_pi() directly. We could restart this syscall, but
2834 * it would detect that the user space "val" changed and return
2835 * -EWOULDBLOCK. Save the overhead of the restart and return
2836 * -EWOULDBLOCK directly.
2837 */
2838 ret = -EWOULDBLOCK;
2839 }
2840
2841 out_put_keys:
2842 put_futex_key(&q.key);
2843 out_key2:
2844 put_futex_key(&key2);
2845
2846 out:
2847 if (to) {
2848 hrtimer_cancel(&to->timer);
2849 destroy_hrtimer_on_stack(&to->timer);
2850 }
2851 return ret;
2852 }
2853
2854 /*
2855 * Support for robust futexes: the kernel cleans up held futexes at
2856 * thread exit time.
2857 *
2858 * Implementation: user-space maintains a per-thread list of locks it
2859 * is holding. Upon do_exit(), the kernel carefully walks this list,
2860 * and marks all locks that are owned by this thread with the
2861 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2862 * always manipulated with the lock held, so the list is private and
2863 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2864 * field, to allow the kernel to clean up if the thread dies after
2865 * acquiring the lock, but just before it could have added itself to
2866 * the list. There can only be one such pending lock.
2867 */
2868
2869 /**
2870 * sys_set_robust_list() - Set the robust-futex list head of a task
2871 * @head: pointer to the list-head
2872 * @len: length of the list-head, as userspace expects
2873 */
2874 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2875 size_t, len)
2876 {
2877 if (!futex_cmpxchg_enabled)
2878 return -ENOSYS;
2879 /*
2880 * The kernel knows only one size for now:
2881 */
2882 if (unlikely(len != sizeof(*head)))
2883 return -EINVAL;
2884
2885 current->robust_list = head;
2886
2887 return 0;
2888 }
2889
2890 /**
2891 * sys_get_robust_list() - Get the robust-futex list head of a task
2892 * @pid: pid of the process [zero for current task]
2893 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2894 * @len_ptr: pointer to a length field, the kernel fills in the header size
2895 */
2896 SYSCALL_DEFINE3(get_robust_list, int, pid,
2897 struct robust_list_head __user * __user *, head_ptr,
2898 size_t __user *, len_ptr)
2899 {
2900 struct robust_list_head __user *head;
2901 unsigned long ret;
2902 struct task_struct *p;
2903
2904 if (!futex_cmpxchg_enabled)
2905 return -ENOSYS;
2906
2907 rcu_read_lock();
2908
2909 ret = -ESRCH;
2910 if (!pid)
2911 p = current;
2912 else {
2913 p = find_task_by_vpid(pid);
2914 if (!p)
2915 goto err_unlock;
2916 }
2917
2918 ret = -EPERM;
2919 if (!ptrace_may_access(p, PTRACE_MODE_READ))
2920 goto err_unlock;
2921
2922 head = p->robust_list;
2923 rcu_read_unlock();
2924
2925 if (put_user(sizeof(*head), len_ptr))
2926 return -EFAULT;
2927 return put_user(head, head_ptr);
2928
2929 err_unlock:
2930 rcu_read_unlock();
2931
2932 return ret;
2933 }
2934
2935 /*
2936 * Process a futex-list entry, check whether it's owned by the
2937 * dying task, and do notification if so:
2938 */
2939 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2940 {
2941 u32 uval, uninitialized_var(nval), mval;
2942
2943 retry:
2944 if (get_user(uval, uaddr))
2945 return -1;
2946
2947 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2948 /*
2949 * Ok, this dying thread is truly holding a futex
2950 * of interest. Set the OWNER_DIED bit atomically
2951 * via cmpxchg, and if the value had FUTEX_WAITERS
2952 * set, wake up a waiter (if any). (We have to do a
2953 * futex_wake() even if OWNER_DIED is already set -
2954 * to handle the rare but possible case of recursive
2955 * thread-death.) The rest of the cleanup is done in
2956 * userspace.
2957 */
2958 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2959 /*
2960 * We are not holding a lock here, but we want to have
2961 * the pagefault_disable/enable() protection because
2962 * we want to handle the fault gracefully. If the
2963 * access fails we try to fault in the futex with R/W
2964 * verification via get_user_pages. get_user() above
2965 * does not guarantee R/W access. If that fails we
2966 * give up and leave the futex locked.
2967 */
2968 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2969 if (fault_in_user_writeable(uaddr))
2970 return -1;
2971 goto retry;
2972 }
2973 if (nval != uval)
2974 goto retry;
2975
2976 /*
2977 * Wake robust non-PI futexes here. The wakeup of
2978 * PI futexes happens in exit_pi_state():
2979 */
2980 if (!pi && (uval & FUTEX_WAITERS))
2981 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2982 }
2983 return 0;
2984 }
2985
2986 /*
2987 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2988 */
2989 static inline int fetch_robust_entry(struct robust_list __user **entry,
2990 struct robust_list __user * __user *head,
2991 unsigned int *pi)
2992 {
2993 unsigned long uentry;
2994
2995 if (get_user(uentry, (unsigned long __user *)head))
2996 return -EFAULT;
2997
2998 *entry = (void __user *)(uentry & ~1UL);
2999 *pi = uentry & 1;
3000
3001 return 0;
3002 }
3003
3004 /*
3005 * Walk curr->robust_list (very carefully, it's a userspace list!)
3006 * and mark any locks found there dead, and notify any waiters.
3007 *
3008 * We silently return on any sign of list-walking problem.
3009 */
3010 void exit_robust_list(struct task_struct *curr)
3011 {
3012 struct robust_list_head __user *head = curr->robust_list;
3013 struct robust_list __user *entry, *next_entry, *pending;
3014 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3015 unsigned int uninitialized_var(next_pi);
3016 unsigned long futex_offset;
3017 int rc;
3018
3019 if (!futex_cmpxchg_enabled)
3020 return;
3021
3022 /*
3023 * Fetch the list head (which was registered earlier, via
3024 * sys_set_robust_list()):
3025 */
3026 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3027 return;
3028 /*
3029 * Fetch the relative futex offset:
3030 */
3031 if (get_user(futex_offset, &head->futex_offset))
3032 return;
3033 /*
3034 * Fetch any possibly pending lock-add first, and handle it
3035 * if it exists:
3036 */
3037 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3038 return;
3039
3040 next_entry = NULL; /* avoid warning with gcc */
3041 while (entry != &head->list) {
3042 /*
3043 * Fetch the next entry in the list before calling
3044 * handle_futex_death:
3045 */
3046 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3047 /*
3048 * A pending lock might already be on the list, so
3049 * don't process it twice:
3050 */
3051 if (entry != pending)
3052 if (handle_futex_death((void __user *)entry + futex_offset,
3053 curr, pi))
3054 return;
3055 if (rc)
3056 return;
3057 entry = next_entry;
3058 pi = next_pi;
3059 /*
3060 * Avoid excessively long or circular lists:
3061 */
3062 if (!--limit)
3063 break;
3064
3065 cond_resched();
3066 }
3067
3068 if (pending)
3069 handle_futex_death((void __user *)pending + futex_offset,
3070 curr, pip);
3071 }
3072
3073 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3074 u32 __user *uaddr2, u32 val2, u32 val3)
3075 {
3076 int cmd = op & FUTEX_CMD_MASK;
3077 unsigned int flags = 0;
3078
3079 if (!(op & FUTEX_PRIVATE_FLAG))
3080 flags |= FLAGS_SHARED;
3081
3082 if (op & FUTEX_CLOCK_REALTIME) {
3083 flags |= FLAGS_CLOCKRT;
3084 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
3085 return -ENOSYS;
3086 }
3087
3088 switch (cmd) {
3089 case FUTEX_LOCK_PI:
3090 case FUTEX_UNLOCK_PI:
3091 case FUTEX_TRYLOCK_PI:
3092 case FUTEX_WAIT_REQUEUE_PI:
3093 case FUTEX_CMP_REQUEUE_PI:
3094 if (!futex_cmpxchg_enabled)
3095 return -ENOSYS;
3096 }
3097
3098 switch (cmd) {
3099 case FUTEX_WAIT:
3100 val3 = FUTEX_BITSET_MATCH_ANY;
3101 case FUTEX_WAIT_BITSET:
3102 return futex_wait(uaddr, flags, val, timeout, val3);
3103 case FUTEX_WAKE:
3104 val3 = FUTEX_BITSET_MATCH_ANY;
3105 case FUTEX_WAKE_BITSET:
3106 return futex_wake(uaddr, flags, val, val3);
3107 case FUTEX_REQUEUE:
3108 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3109 case FUTEX_CMP_REQUEUE:
3110 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3111 case FUTEX_WAKE_OP:
3112 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3113 case FUTEX_LOCK_PI:
3114 return futex_lock_pi(uaddr, flags, timeout, 0);
3115 case FUTEX_UNLOCK_PI:
3116 return futex_unlock_pi(uaddr, flags);
3117 case FUTEX_TRYLOCK_PI:
3118 return futex_lock_pi(uaddr, flags, NULL, 1);
3119 case FUTEX_WAIT_REQUEUE_PI:
3120 val3 = FUTEX_BITSET_MATCH_ANY;
3121 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3122 uaddr2);
3123 case FUTEX_CMP_REQUEUE_PI:
3124 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3125 }
3126 return -ENOSYS;
3127 }
3128
3129
3130 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3131 struct timespec __user *, utime, u32 __user *, uaddr2,
3132 u32, val3)
3133 {
3134 struct timespec ts;
3135 ktime_t t, *tp = NULL;
3136 u32 val2 = 0;
3137 int cmd = op & FUTEX_CMD_MASK;
3138
3139 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3140 cmd == FUTEX_WAIT_BITSET ||
3141 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3142 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3143 return -EFAULT;
3144 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
3145 return -EFAULT;
3146 if (!timespec_valid(&ts))
3147 return -EINVAL;
3148
3149 t = timespec_to_ktime(ts);
3150 if (cmd == FUTEX_WAIT)
3151 t = ktime_add_safe(ktime_get(), t);
3152 tp = &t;
3153 }
3154 /*
3155 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3156 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3157 */
3158 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3159 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3160 val2 = (u32) (unsigned long) utime;
3161
3162 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3163 }
3164
3165 static void __init futex_detect_cmpxchg(void)
3166 {
3167 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3168 u32 curval;
3169
3170 /*
3171 * This will fail and we want it. Some arch implementations do
3172 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3173 * functionality. We want to know that before we call in any
3174 * of the complex code paths. Also we want to prevent
3175 * registration of robust lists in that case. NULL is
3176 * guaranteed to fault and we get -EFAULT on functional
3177 * implementation, the non-functional ones will return
3178 * -ENOSYS.
3179 */
3180 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3181 futex_cmpxchg_enabled = 1;
3182 #endif
3183 }
3184
3185 static int __init futex_init(void)
3186 {
3187 unsigned int futex_shift;
3188 unsigned long i;
3189
3190 #if CONFIG_BASE_SMALL
3191 futex_hashsize = 16;
3192 #else
3193 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3194 #endif
3195
3196 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3197 futex_hashsize, 0,
3198 futex_hashsize < 256 ? HASH_SMALL : 0,
3199 &futex_shift, NULL,
3200 futex_hashsize, futex_hashsize);
3201 futex_hashsize = 1UL << futex_shift;
3202
3203 futex_detect_cmpxchg();
3204
3205 for (i = 0; i < futex_hashsize; i++) {
3206 atomic_set(&futex_queues[i].waiters, 0);
3207 plist_head_init(&futex_queues[i].chain);
3208 spin_lock_init(&futex_queues[i].lock);
3209 }
3210
3211 return 0;
3212 }
3213 __initcall(futex_init);
This page took 0.111618 seconds and 4 git commands to generate.