futex: Fix errors in nested key ref-counting
[deliverable/linux.git] / kernel / futex.c
CommitLineData
1da177e4
LT
1/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
0771dfef
IM
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
c87e2837
IM
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
34f01cc1
ED
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
52400ba9
DH
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
1da177e4
LT
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
7ed20e1a 57#include <linux/signal.h>
9adef58b 58#include <linux/module.h>
fd5eea42 59#include <linux/magic.h>
b488893a
PE
60#include <linux/pid.h>
61#include <linux/nsproxy.h>
62
4732efbe 63#include <asm/futex.h>
1da177e4 64
c87e2837
IM
65#include "rtmutex_common.h"
66
a0c1e907
TG
67int __read_mostly futex_cmpxchg_enabled;
68
1da177e4
LT
69#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
c87e2837
IM
71/*
72 * Priority Inheritance state:
73 */
74struct futex_pi_state {
75 /*
76 * list of 'owned' pi_state instances - these have to be
77 * cleaned up in do_exit() if the task exits prematurely:
78 */
79 struct list_head list;
80
81 /*
82 * The PI object:
83 */
84 struct rt_mutex pi_mutex;
85
86 struct task_struct *owner;
87 atomic_t refcount;
88
89 union futex_key key;
90};
91
d8d88fbb
DH
92/**
93 * struct futex_q - The hashed futex queue entry, one per waiting task
94 * @task: the task waiting on the futex
95 * @lock_ptr: the hash bucket lock
96 * @key: the key the futex is hashed on
97 * @pi_state: optional priority inheritance state
98 * @rt_waiter: rt_waiter storage for use with requeue_pi
99 * @requeue_pi_key: the requeue_pi target futex key
100 * @bitset: bitset for the optional bitmasked wakeup
101 *
102 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
1da177e4
LT
103 * we can wake only the relevant ones (hashed queues may be shared).
104 *
105 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
ec92d082 106 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
1da177e4 107 * The order of wakup is always to make the first condition true, then
d8d88fbb
DH
108 * the second.
109 *
110 * PI futexes are typically woken before they are removed from the hash list via
111 * the rt_mutex code. See unqueue_me_pi().
1da177e4
LT
112 */
113struct futex_q {
ec92d082 114 struct plist_node list;
1da177e4 115
d8d88fbb 116 struct task_struct *task;
1da177e4 117 spinlock_t *lock_ptr;
1da177e4 118 union futex_key key;
c87e2837 119 struct futex_pi_state *pi_state;
52400ba9 120 struct rt_mutex_waiter *rt_waiter;
84bc4af5 121 union futex_key *requeue_pi_key;
cd689985 122 u32 bitset;
1da177e4
LT
123};
124
125/*
b2d0994b
DH
126 * Hash buckets are shared by all the futex_keys that hash to the same
127 * location. Each key may have multiple futex_q structures, one for each task
128 * waiting on a futex.
1da177e4
LT
129 */
130struct futex_hash_bucket {
ec92d082
PP
131 spinlock_t lock;
132 struct plist_head chain;
1da177e4
LT
133};
134
135static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
136
1da177e4
LT
137/*
138 * We hash on the keys returned from get_futex_key (see below).
139 */
140static struct futex_hash_bucket *hash_futex(union futex_key *key)
141{
142 u32 hash = jhash2((u32*)&key->both.word,
143 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
144 key->both.offset);
145 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
146}
147
148/*
149 * Return 1 if two futex_keys are equal, 0 otherwise.
150 */
151static inline int match_futex(union futex_key *key1, union futex_key *key2)
152{
2bc87203
DH
153 return (key1 && key2
154 && key1->both.word == key2->both.word
1da177e4
LT
155 && key1->both.ptr == key2->both.ptr
156 && key1->both.offset == key2->both.offset);
157}
158
38d47c1b
PZ
159/*
160 * Take a reference to the resource addressed by a key.
161 * Can be called while holding spinlocks.
162 *
163 */
164static void get_futex_key_refs(union futex_key *key)
165{
166 if (!key->both.ptr)
167 return;
168
169 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
170 case FUT_OFF_INODE:
171 atomic_inc(&key->shared.inode->i_count);
172 break;
173 case FUT_OFF_MMSHARED:
174 atomic_inc(&key->private.mm->mm_count);
175 break;
176 }
177}
178
179/*
180 * Drop a reference to the resource addressed by a key.
181 * The hash bucket spinlock must not be held.
182 */
183static void drop_futex_key_refs(union futex_key *key)
184{
90621c40
DH
185 if (!key->both.ptr) {
186 /* If we're here then we tried to put a key we failed to get */
187 WARN_ON_ONCE(1);
38d47c1b 188 return;
90621c40 189 }
38d47c1b
PZ
190
191 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
192 case FUT_OFF_INODE:
193 iput(key->shared.inode);
194 break;
195 case FUT_OFF_MMSHARED:
196 mmdrop(key->private.mm);
197 break;
198 }
199}
200
34f01cc1 201/**
d96ee56c
DH
202 * get_futex_key() - Get parameters which are the keys for a futex
203 * @uaddr: virtual address of the futex
204 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
205 * @key: address where result is stored.
34f01cc1
ED
206 *
207 * Returns a negative error code or 0
208 * The key words are stored in *key on success.
1da177e4 209 *
f3a43f3f 210 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
1da177e4
LT
211 * offset_within_page). For private mappings, it's (uaddr, current->mm).
212 * We can usually work out the index without swapping in the page.
213 *
b2d0994b 214 * lock_page() might sleep, the caller should not hold a spinlock.
1da177e4 215 */
64d1304a 216static int
7485d0d3 217get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
1da177e4 218{
e2970f2f 219 unsigned long address = (unsigned long)uaddr;
1da177e4 220 struct mm_struct *mm = current->mm;
1da177e4
LT
221 struct page *page;
222 int err;
223
224 /*
225 * The futex address must be "naturally" aligned.
226 */
e2970f2f 227 key->both.offset = address % PAGE_SIZE;
34f01cc1 228 if (unlikely((address % sizeof(u32)) != 0))
1da177e4 229 return -EINVAL;
e2970f2f 230 address -= key->both.offset;
1da177e4 231
34f01cc1
ED
232 /*
233 * PROCESS_PRIVATE futexes are fast.
234 * As the mm cannot disappear under us and the 'key' only needs
235 * virtual address, we dont even have to find the underlying vma.
236 * Note : We do have to check 'uaddr' is a valid user address,
237 * but access_ok() should be faster than find_vma()
238 */
239 if (!fshared) {
7485d0d3 240 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
34f01cc1
ED
241 return -EFAULT;
242 key->private.mm = mm;
243 key->private.address = address;
42569c39 244 get_futex_key_refs(key);
34f01cc1
ED
245 return 0;
246 }
1da177e4 247
38d47c1b 248again:
7485d0d3 249 err = get_user_pages_fast(address, 1, 1, &page);
38d47c1b
PZ
250 if (err < 0)
251 return err;
252
ce2ae53b 253 page = compound_head(page);
38d47c1b
PZ
254 lock_page(page);
255 if (!page->mapping) {
256 unlock_page(page);
257 put_page(page);
258 goto again;
259 }
1da177e4
LT
260
261 /*
262 * Private mappings are handled in a simple way.
263 *
264 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
265 * it's a read-only handle, it's expected that futexes attach to
38d47c1b 266 * the object not the particular process.
1da177e4 267 */
38d47c1b
PZ
268 if (PageAnon(page)) {
269 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
1da177e4 270 key->private.mm = mm;
e2970f2f 271 key->private.address = address;
38d47c1b
PZ
272 } else {
273 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
274 key->shared.inode = page->mapping->host;
275 key->shared.pgoff = page->index;
1da177e4
LT
276 }
277
38d47c1b 278 get_futex_key_refs(key);
1da177e4 279
38d47c1b
PZ
280 unlock_page(page);
281 put_page(page);
282 return 0;
1da177e4
LT
283}
284
38d47c1b 285static inline
c2f9f201 286void put_futex_key(int fshared, union futex_key *key)
1da177e4 287{
38d47c1b 288 drop_futex_key_refs(key);
1da177e4
LT
289}
290
d96ee56c
DH
291/**
292 * fault_in_user_writeable() - Fault in user address and verify RW access
d0725992
TG
293 * @uaddr: pointer to faulting user space address
294 *
295 * Slow path to fixup the fault we just took in the atomic write
296 * access to @uaddr.
297 *
298 * We have no generic implementation of a non destructive write to the
299 * user address. We know that we faulted in the atomic pagefault
300 * disabled section so we can as well avoid the #PF overhead by
301 * calling get_user_pages() right away.
302 */
303static int fault_in_user_writeable(u32 __user *uaddr)
304{
722d0172
AK
305 struct mm_struct *mm = current->mm;
306 int ret;
307
308 down_read(&mm->mmap_sem);
309 ret = get_user_pages(current, mm, (unsigned long)uaddr,
310 1, 1, 0, NULL, NULL);
311 up_read(&mm->mmap_sem);
312
d0725992
TG
313 return ret < 0 ? ret : 0;
314}
315
4b1c486b
DH
316/**
317 * futex_top_waiter() - Return the highest priority waiter on a futex
d96ee56c
DH
318 * @hb: the hash bucket the futex_q's reside in
319 * @key: the futex key (to distinguish it from other futex futex_q's)
4b1c486b
DH
320 *
321 * Must be called with the hb lock held.
322 */
323static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
324 union futex_key *key)
325{
326 struct futex_q *this;
327
328 plist_for_each_entry(this, &hb->chain, list) {
329 if (match_futex(&this->key, key))
330 return this;
331 }
332 return NULL;
333}
334
36cf3b5c
TG
335static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
336{
337 u32 curval;
338
339 pagefault_disable();
340 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
341 pagefault_enable();
342
343 return curval;
344}
345
346static int get_futex_value_locked(u32 *dest, u32 __user *from)
1da177e4
LT
347{
348 int ret;
349
a866374a 350 pagefault_disable();
e2970f2f 351 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
a866374a 352 pagefault_enable();
1da177e4
LT
353
354 return ret ? -EFAULT : 0;
355}
356
c87e2837
IM
357
358/*
359 * PI code:
360 */
361static int refill_pi_state_cache(void)
362{
363 struct futex_pi_state *pi_state;
364
365 if (likely(current->pi_state_cache))
366 return 0;
367
4668edc3 368 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
c87e2837
IM
369
370 if (!pi_state)
371 return -ENOMEM;
372
c87e2837
IM
373 INIT_LIST_HEAD(&pi_state->list);
374 /* pi_mutex gets initialized later */
375 pi_state->owner = NULL;
376 atomic_set(&pi_state->refcount, 1);
38d47c1b 377 pi_state->key = FUTEX_KEY_INIT;
c87e2837
IM
378
379 current->pi_state_cache = pi_state;
380
381 return 0;
382}
383
384static struct futex_pi_state * alloc_pi_state(void)
385{
386 struct futex_pi_state *pi_state = current->pi_state_cache;
387
388 WARN_ON(!pi_state);
389 current->pi_state_cache = NULL;
390
391 return pi_state;
392}
393
394static void free_pi_state(struct futex_pi_state *pi_state)
395{
396 if (!atomic_dec_and_test(&pi_state->refcount))
397 return;
398
399 /*
400 * If pi_state->owner is NULL, the owner is most probably dying
401 * and has cleaned up the pi_state already
402 */
403 if (pi_state->owner) {
1d615482 404 raw_spin_lock_irq(&pi_state->owner->pi_lock);
c87e2837 405 list_del_init(&pi_state->list);
1d615482 406 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
c87e2837
IM
407
408 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
409 }
410
411 if (current->pi_state_cache)
412 kfree(pi_state);
413 else {
414 /*
415 * pi_state->list is already empty.
416 * clear pi_state->owner.
417 * refcount is at 0 - put it back to 1.
418 */
419 pi_state->owner = NULL;
420 atomic_set(&pi_state->refcount, 1);
421 current->pi_state_cache = pi_state;
422 }
423}
424
425/*
426 * Look up the task based on what TID userspace gave us.
427 * We dont trust it.
428 */
429static struct task_struct * futex_find_get_task(pid_t pid)
430{
431 struct task_struct *p;
432
d359b549 433 rcu_read_lock();
228ebcbe 434 p = find_task_by_vpid(pid);
7a0ea09a
MH
435 if (p)
436 get_task_struct(p);
a06381fe 437
d359b549 438 rcu_read_unlock();
c87e2837
IM
439
440 return p;
441}
442
443/*
444 * This task is holding PI mutexes at exit time => bad.
445 * Kernel cleans up PI-state, but userspace is likely hosed.
446 * (Robust-futex cleanup is separate and might save the day for userspace.)
447 */
448void exit_pi_state_list(struct task_struct *curr)
449{
c87e2837
IM
450 struct list_head *next, *head = &curr->pi_state_list;
451 struct futex_pi_state *pi_state;
627371d7 452 struct futex_hash_bucket *hb;
38d47c1b 453 union futex_key key = FUTEX_KEY_INIT;
c87e2837 454
a0c1e907
TG
455 if (!futex_cmpxchg_enabled)
456 return;
c87e2837
IM
457 /*
458 * We are a ZOMBIE and nobody can enqueue itself on
459 * pi_state_list anymore, but we have to be careful
627371d7 460 * versus waiters unqueueing themselves:
c87e2837 461 */
1d615482 462 raw_spin_lock_irq(&curr->pi_lock);
c87e2837
IM
463 while (!list_empty(head)) {
464
465 next = head->next;
466 pi_state = list_entry(next, struct futex_pi_state, list);
467 key = pi_state->key;
627371d7 468 hb = hash_futex(&key);
1d615482 469 raw_spin_unlock_irq(&curr->pi_lock);
c87e2837 470
c87e2837
IM
471 spin_lock(&hb->lock);
472
1d615482 473 raw_spin_lock_irq(&curr->pi_lock);
627371d7
IM
474 /*
475 * We dropped the pi-lock, so re-check whether this
476 * task still owns the PI-state:
477 */
c87e2837
IM
478 if (head->next != next) {
479 spin_unlock(&hb->lock);
480 continue;
481 }
482
c87e2837 483 WARN_ON(pi_state->owner != curr);
627371d7
IM
484 WARN_ON(list_empty(&pi_state->list));
485 list_del_init(&pi_state->list);
c87e2837 486 pi_state->owner = NULL;
1d615482 487 raw_spin_unlock_irq(&curr->pi_lock);
c87e2837
IM
488
489 rt_mutex_unlock(&pi_state->pi_mutex);
490
491 spin_unlock(&hb->lock);
492
1d615482 493 raw_spin_lock_irq(&curr->pi_lock);
c87e2837 494 }
1d615482 495 raw_spin_unlock_irq(&curr->pi_lock);
c87e2837
IM
496}
497
498static int
d0aa7a70
PP
499lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
500 union futex_key *key, struct futex_pi_state **ps)
c87e2837
IM
501{
502 struct futex_pi_state *pi_state = NULL;
503 struct futex_q *this, *next;
ec92d082 504 struct plist_head *head;
c87e2837 505 struct task_struct *p;
778e9a9c 506 pid_t pid = uval & FUTEX_TID_MASK;
c87e2837
IM
507
508 head = &hb->chain;
509
ec92d082 510 plist_for_each_entry_safe(this, next, head, list) {
d0aa7a70 511 if (match_futex(&this->key, key)) {
c87e2837
IM
512 /*
513 * Another waiter already exists - bump up
514 * the refcount and return its pi_state:
515 */
516 pi_state = this->pi_state;
06a9ec29
TG
517 /*
518 * Userspace might have messed up non PI and PI futexes
519 */
520 if (unlikely(!pi_state))
521 return -EINVAL;
522
627371d7 523 WARN_ON(!atomic_read(&pi_state->refcount));
59647b6a
TG
524
525 /*
526 * When pi_state->owner is NULL then the owner died
527 * and another waiter is on the fly. pi_state->owner
528 * is fixed up by the task which acquires
529 * pi_state->rt_mutex.
530 *
531 * We do not check for pid == 0 which can happen when
532 * the owner died and robust_list_exit() cleared the
533 * TID.
534 */
535 if (pid && pi_state->owner) {
536 /*
537 * Bail out if user space manipulated the
538 * futex value.
539 */
540 if (pid != task_pid_vnr(pi_state->owner))
541 return -EINVAL;
542 }
627371d7 543
c87e2837 544 atomic_inc(&pi_state->refcount);
d0aa7a70 545 *ps = pi_state;
c87e2837
IM
546
547 return 0;
548 }
549 }
550
551 /*
e3f2ddea 552 * We are the first waiter - try to look up the real owner and attach
778e9a9c 553 * the new pi_state to it, but bail out when TID = 0
c87e2837 554 */
778e9a9c 555 if (!pid)
e3f2ddea 556 return -ESRCH;
c87e2837 557 p = futex_find_get_task(pid);
7a0ea09a
MH
558 if (!p)
559 return -ESRCH;
778e9a9c
AK
560
561 /*
562 * We need to look at the task state flags to figure out,
563 * whether the task is exiting. To protect against the do_exit
564 * change of the task flags, we do this protected by
565 * p->pi_lock:
566 */
1d615482 567 raw_spin_lock_irq(&p->pi_lock);
778e9a9c
AK
568 if (unlikely(p->flags & PF_EXITING)) {
569 /*
570 * The task is on the way out. When PF_EXITPIDONE is
571 * set, we know that the task has finished the
572 * cleanup:
573 */
574 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
575
1d615482 576 raw_spin_unlock_irq(&p->pi_lock);
778e9a9c
AK
577 put_task_struct(p);
578 return ret;
579 }
c87e2837
IM
580
581 pi_state = alloc_pi_state();
582
583 /*
584 * Initialize the pi_mutex in locked state and make 'p'
585 * the owner of it:
586 */
587 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
588
589 /* Store the key for possible exit cleanups: */
d0aa7a70 590 pi_state->key = *key;
c87e2837 591
627371d7 592 WARN_ON(!list_empty(&pi_state->list));
c87e2837
IM
593 list_add(&pi_state->list, &p->pi_state_list);
594 pi_state->owner = p;
1d615482 595 raw_spin_unlock_irq(&p->pi_lock);
c87e2837
IM
596
597 put_task_struct(p);
598
d0aa7a70 599 *ps = pi_state;
c87e2837
IM
600
601 return 0;
602}
603
1a52084d 604/**
d96ee56c 605 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
bab5bc9e
DH
606 * @uaddr: the pi futex user address
607 * @hb: the pi futex hash bucket
608 * @key: the futex key associated with uaddr and hb
609 * @ps: the pi_state pointer where we store the result of the
610 * lookup
611 * @task: the task to perform the atomic lock work for. This will
612 * be "current" except in the case of requeue pi.
613 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1a52084d
DH
614 *
615 * Returns:
616 * 0 - ready to wait
617 * 1 - acquired the lock
618 * <0 - error
619 *
620 * The hb->lock and futex_key refs shall be held by the caller.
621 */
622static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
623 union futex_key *key,
624 struct futex_pi_state **ps,
bab5bc9e 625 struct task_struct *task, int set_waiters)
1a52084d
DH
626{
627 int lock_taken, ret, ownerdied = 0;
628 u32 uval, newval, curval;
629
630retry:
631 ret = lock_taken = 0;
632
633 /*
634 * To avoid races, we attempt to take the lock here again
635 * (by doing a 0 -> TID atomic cmpxchg), while holding all
636 * the locks. It will most likely not succeed.
637 */
638 newval = task_pid_vnr(task);
bab5bc9e
DH
639 if (set_waiters)
640 newval |= FUTEX_WAITERS;
1a52084d
DH
641
642 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
643
644 if (unlikely(curval == -EFAULT))
645 return -EFAULT;
646
647 /*
648 * Detect deadlocks.
649 */
650 if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
651 return -EDEADLK;
652
653 /*
654 * Surprise - we got the lock. Just return to userspace:
655 */
656 if (unlikely(!curval))
657 return 1;
658
659 uval = curval;
660
661 /*
662 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
663 * to wake at the next unlock.
664 */
665 newval = curval | FUTEX_WAITERS;
666
667 /*
668 * There are two cases, where a futex might have no owner (the
669 * owner TID is 0): OWNER_DIED. We take over the futex in this
670 * case. We also do an unconditional take over, when the owner
671 * of the futex died.
672 *
673 * This is safe as we are protected by the hash bucket lock !
674 */
675 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
676 /* Keep the OWNER_DIED bit */
677 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
678 ownerdied = 0;
679 lock_taken = 1;
680 }
681
682 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
683
684 if (unlikely(curval == -EFAULT))
685 return -EFAULT;
686 if (unlikely(curval != uval))
687 goto retry;
688
689 /*
690 * We took the lock due to owner died take over.
691 */
692 if (unlikely(lock_taken))
693 return 1;
694
695 /*
696 * We dont have the lock. Look up the PI state (or create it if
697 * we are the first waiter):
698 */
699 ret = lookup_pi_state(uval, hb, key, ps);
700
701 if (unlikely(ret)) {
702 switch (ret) {
703 case -ESRCH:
704 /*
705 * No owner found for this futex. Check if the
706 * OWNER_DIED bit is set to figure out whether
707 * this is a robust futex or not.
708 */
709 if (get_futex_value_locked(&curval, uaddr))
710 return -EFAULT;
711
712 /*
713 * We simply start over in case of a robust
714 * futex. The code above will take the futex
715 * and return happy.
716 */
717 if (curval & FUTEX_OWNER_DIED) {
718 ownerdied = 1;
719 goto retry;
720 }
721 default:
722 break;
723 }
724 }
725
726 return ret;
727}
728
1da177e4
LT
729/*
730 * The hash bucket lock must be held when this is called.
731 * Afterwards, the futex_q must not be accessed.
732 */
733static void wake_futex(struct futex_q *q)
734{
f1a11e05
TG
735 struct task_struct *p = q->task;
736
1da177e4 737 /*
f1a11e05
TG
738 * We set q->lock_ptr = NULL _before_ we wake up the task. If
739 * a non futex wake up happens on another CPU then the task
740 * might exit and p would dereference a non existing task
741 * struct. Prevent this by holding a reference on p across the
742 * wake up.
1da177e4 743 */
f1a11e05
TG
744 get_task_struct(p);
745
746 plist_del(&q->list, &q->list.plist);
1da177e4 747 /*
f1a11e05
TG
748 * The waiting task can free the futex_q as soon as
749 * q->lock_ptr = NULL is written, without taking any locks. A
750 * memory barrier is required here to prevent the following
751 * store to lock_ptr from getting ahead of the plist_del.
1da177e4 752 */
ccdea2f8 753 smp_wmb();
1da177e4 754 q->lock_ptr = NULL;
f1a11e05
TG
755
756 wake_up_state(p, TASK_NORMAL);
757 put_task_struct(p);
1da177e4
LT
758}
759
c87e2837
IM
760static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
761{
762 struct task_struct *new_owner;
763 struct futex_pi_state *pi_state = this->pi_state;
764 u32 curval, newval;
765
766 if (!pi_state)
767 return -EINVAL;
768
51246bfd
TG
769 /*
770 * If current does not own the pi_state then the futex is
771 * inconsistent and user space fiddled with the futex value.
772 */
773 if (pi_state->owner != current)
774 return -EINVAL;
775
d209d74d 776 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
c87e2837
IM
777 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
778
779 /*
780 * This happens when we have stolen the lock and the original
781 * pending owner did not enqueue itself back on the rt_mutex.
782 * Thats not a tragedy. We know that way, that a lock waiter
783 * is on the fly. We make the futex_q waiter the pending owner.
784 */
785 if (!new_owner)
786 new_owner = this->task;
787
788 /*
789 * We pass it to the next owner. (The WAITERS bit is always
790 * kept enabled while there is PI state around. We must also
791 * preserve the owner died bit.)
792 */
e3f2ddea 793 if (!(uval & FUTEX_OWNER_DIED)) {
778e9a9c
AK
794 int ret = 0;
795
b488893a 796 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
e3f2ddea 797
36cf3b5c 798 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
778e9a9c 799
e3f2ddea 800 if (curval == -EFAULT)
778e9a9c 801 ret = -EFAULT;
cde898fa 802 else if (curval != uval)
778e9a9c
AK
803 ret = -EINVAL;
804 if (ret) {
d209d74d 805 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
778e9a9c
AK
806 return ret;
807 }
e3f2ddea 808 }
c87e2837 809
1d615482 810 raw_spin_lock_irq(&pi_state->owner->pi_lock);
627371d7
IM
811 WARN_ON(list_empty(&pi_state->list));
812 list_del_init(&pi_state->list);
1d615482 813 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
627371d7 814
1d615482 815 raw_spin_lock_irq(&new_owner->pi_lock);
627371d7 816 WARN_ON(!list_empty(&pi_state->list));
c87e2837
IM
817 list_add(&pi_state->list, &new_owner->pi_state_list);
818 pi_state->owner = new_owner;
1d615482 819 raw_spin_unlock_irq(&new_owner->pi_lock);
627371d7 820
d209d74d 821 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
c87e2837
IM
822 rt_mutex_unlock(&pi_state->pi_mutex);
823
824 return 0;
825}
826
827static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
828{
829 u32 oldval;
830
831 /*
832 * There is no waiter, so we unlock the futex. The owner died
833 * bit has not to be preserved here. We are the owner:
834 */
36cf3b5c 835 oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
c87e2837
IM
836
837 if (oldval == -EFAULT)
838 return oldval;
839 if (oldval != uval)
840 return -EAGAIN;
841
842 return 0;
843}
844
8b8f319f
IM
845/*
846 * Express the locking dependencies for lockdep:
847 */
848static inline void
849double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
850{
851 if (hb1 <= hb2) {
852 spin_lock(&hb1->lock);
853 if (hb1 < hb2)
854 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
855 } else { /* hb1 > hb2 */
856 spin_lock(&hb2->lock);
857 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
858 }
859}
860
5eb3dc62
DH
861static inline void
862double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
863{
f061d351 864 spin_unlock(&hb1->lock);
88f502fe
IM
865 if (hb1 != hb2)
866 spin_unlock(&hb2->lock);
5eb3dc62
DH
867}
868
1da177e4 869/*
b2d0994b 870 * Wake up waiters matching bitset queued on this futex (uaddr).
1da177e4 871 */
c2f9f201 872static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
1da177e4 873{
e2970f2f 874 struct futex_hash_bucket *hb;
1da177e4 875 struct futex_q *this, *next;
ec92d082 876 struct plist_head *head;
38d47c1b 877 union futex_key key = FUTEX_KEY_INIT;
1da177e4
LT
878 int ret;
879
cd689985
TG
880 if (!bitset)
881 return -EINVAL;
882
7485d0d3 883 ret = get_futex_key(uaddr, fshared, &key);
1da177e4
LT
884 if (unlikely(ret != 0))
885 goto out;
886
e2970f2f
IM
887 hb = hash_futex(&key);
888 spin_lock(&hb->lock);
889 head = &hb->chain;
1da177e4 890
ec92d082 891 plist_for_each_entry_safe(this, next, head, list) {
1da177e4 892 if (match_futex (&this->key, &key)) {
52400ba9 893 if (this->pi_state || this->rt_waiter) {
ed6f7b10
IM
894 ret = -EINVAL;
895 break;
896 }
cd689985
TG
897
898 /* Check if one of the bits is set in both bitsets */
899 if (!(this->bitset & bitset))
900 continue;
901
1da177e4
LT
902 wake_futex(this);
903 if (++ret >= nr_wake)
904 break;
905 }
906 }
907
e2970f2f 908 spin_unlock(&hb->lock);
38d47c1b 909 put_futex_key(fshared, &key);
42d35d48 910out:
1da177e4
LT
911 return ret;
912}
913
4732efbe
JJ
914/*
915 * Wake up all waiters hashed on the physical page that is mapped
916 * to this virtual address:
917 */
e2970f2f 918static int
c2f9f201 919futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
e2970f2f 920 int nr_wake, int nr_wake2, int op)
4732efbe 921{
38d47c1b 922 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
e2970f2f 923 struct futex_hash_bucket *hb1, *hb2;
ec92d082 924 struct plist_head *head;
4732efbe 925 struct futex_q *this, *next;
e4dc5b7a 926 int ret, op_ret;
4732efbe 927
e4dc5b7a 928retry:
7485d0d3 929 ret = get_futex_key(uaddr1, fshared, &key1);
4732efbe
JJ
930 if (unlikely(ret != 0))
931 goto out;
7485d0d3 932 ret = get_futex_key(uaddr2, fshared, &key2);
4732efbe 933 if (unlikely(ret != 0))
42d35d48 934 goto out_put_key1;
4732efbe 935
e2970f2f
IM
936 hb1 = hash_futex(&key1);
937 hb2 = hash_futex(&key2);
4732efbe 938
e4dc5b7a 939retry_private:
eaaea803 940 double_lock_hb(hb1, hb2);
e2970f2f 941 op_ret = futex_atomic_op_inuser(op, uaddr2);
4732efbe 942 if (unlikely(op_ret < 0)) {
4732efbe 943
5eb3dc62 944 double_unlock_hb(hb1, hb2);
4732efbe 945
7ee1dd3f 946#ifndef CONFIG_MMU
e2970f2f
IM
947 /*
948 * we don't get EFAULT from MMU faults if we don't have an MMU,
949 * but we might get them from range checking
950 */
7ee1dd3f 951 ret = op_ret;
42d35d48 952 goto out_put_keys;
7ee1dd3f
DH
953#endif
954
796f8d9b
DG
955 if (unlikely(op_ret != -EFAULT)) {
956 ret = op_ret;
42d35d48 957 goto out_put_keys;
796f8d9b
DG
958 }
959
d0725992 960 ret = fault_in_user_writeable(uaddr2);
4732efbe 961 if (ret)
de87fcc1 962 goto out_put_keys;
4732efbe 963
e4dc5b7a
DH
964 if (!fshared)
965 goto retry_private;
966
de87fcc1
DH
967 put_futex_key(fshared, &key2);
968 put_futex_key(fshared, &key1);
e4dc5b7a 969 goto retry;
4732efbe
JJ
970 }
971
e2970f2f 972 head = &hb1->chain;
4732efbe 973
ec92d082 974 plist_for_each_entry_safe(this, next, head, list) {
4732efbe
JJ
975 if (match_futex (&this->key, &key1)) {
976 wake_futex(this);
977 if (++ret >= nr_wake)
978 break;
979 }
980 }
981
982 if (op_ret > 0) {
e2970f2f 983 head = &hb2->chain;
4732efbe
JJ
984
985 op_ret = 0;
ec92d082 986 plist_for_each_entry_safe(this, next, head, list) {
4732efbe
JJ
987 if (match_futex (&this->key, &key2)) {
988 wake_futex(this);
989 if (++op_ret >= nr_wake2)
990 break;
991 }
992 }
993 ret += op_ret;
994 }
995
5eb3dc62 996 double_unlock_hb(hb1, hb2);
42d35d48 997out_put_keys:
38d47c1b 998 put_futex_key(fshared, &key2);
42d35d48 999out_put_key1:
38d47c1b 1000 put_futex_key(fshared, &key1);
42d35d48 1001out:
4732efbe
JJ
1002 return ret;
1003}
1004
9121e478
DH
1005/**
1006 * requeue_futex() - Requeue a futex_q from one hb to another
1007 * @q: the futex_q to requeue
1008 * @hb1: the source hash_bucket
1009 * @hb2: the target hash_bucket
1010 * @key2: the new key for the requeued futex_q
1011 */
1012static inline
1013void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1014 struct futex_hash_bucket *hb2, union futex_key *key2)
1015{
1016
1017 /*
1018 * If key1 and key2 hash to the same bucket, no need to
1019 * requeue.
1020 */
1021 if (likely(&hb1->chain != &hb2->chain)) {
1022 plist_del(&q->list, &hb1->chain);
1023 plist_add(&q->list, &hb2->chain);
1024 q->lock_ptr = &hb2->lock;
1025#ifdef CONFIG_DEBUG_PI_LIST
a2672459 1026 q->list.plist.spinlock = &hb2->lock;
9121e478
DH
1027#endif
1028 }
1029 get_futex_key_refs(key2);
1030 q->key = *key2;
1031}
1032
52400ba9
DH
1033/**
1034 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
d96ee56c
DH
1035 * @q: the futex_q
1036 * @key: the key of the requeue target futex
1037 * @hb: the hash_bucket of the requeue target futex
52400ba9
DH
1038 *
1039 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1040 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1041 * to the requeue target futex so the waiter can detect the wakeup on the right
1042 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
beda2c7e
DH
1043 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1044 * to protect access to the pi_state to fixup the owner later. Must be called
1045 * with both q->lock_ptr and hb->lock held.
52400ba9
DH
1046 */
1047static inline
beda2c7e
DH
1048void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1049 struct futex_hash_bucket *hb)
52400ba9 1050{
52400ba9
DH
1051 get_futex_key_refs(key);
1052 q->key = *key;
1053
1054 WARN_ON(plist_node_empty(&q->list));
1055 plist_del(&q->list, &q->list.plist);
1056
1057 WARN_ON(!q->rt_waiter);
1058 q->rt_waiter = NULL;
1059
beda2c7e
DH
1060 q->lock_ptr = &hb->lock;
1061#ifdef CONFIG_DEBUG_PI_LIST
a2672459 1062 q->list.plist.spinlock = &hb->lock;
beda2c7e
DH
1063#endif
1064
f1a11e05 1065 wake_up_state(q->task, TASK_NORMAL);
52400ba9
DH
1066}
1067
1068/**
1069 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
bab5bc9e
DH
1070 * @pifutex: the user address of the to futex
1071 * @hb1: the from futex hash bucket, must be locked by the caller
1072 * @hb2: the to futex hash bucket, must be locked by the caller
1073 * @key1: the from futex key
1074 * @key2: the to futex key
1075 * @ps: address to store the pi_state pointer
1076 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
52400ba9
DH
1077 *
1078 * Try and get the lock on behalf of the top waiter if we can do it atomically.
bab5bc9e
DH
1079 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1080 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1081 * hb1 and hb2 must be held by the caller.
52400ba9
DH
1082 *
1083 * Returns:
1084 * 0 - failed to acquire the lock atomicly
1085 * 1 - acquired the lock
1086 * <0 - error
1087 */
1088static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1089 struct futex_hash_bucket *hb1,
1090 struct futex_hash_bucket *hb2,
1091 union futex_key *key1, union futex_key *key2,
bab5bc9e 1092 struct futex_pi_state **ps, int set_waiters)
52400ba9 1093{
bab5bc9e 1094 struct futex_q *top_waiter = NULL;
52400ba9
DH
1095 u32 curval;
1096 int ret;
1097
1098 if (get_futex_value_locked(&curval, pifutex))
1099 return -EFAULT;
1100
bab5bc9e
DH
1101 /*
1102 * Find the top_waiter and determine if there are additional waiters.
1103 * If the caller intends to requeue more than 1 waiter to pifutex,
1104 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1105 * as we have means to handle the possible fault. If not, don't set
1106 * the bit unecessarily as it will force the subsequent unlock to enter
1107 * the kernel.
1108 */
52400ba9
DH
1109 top_waiter = futex_top_waiter(hb1, key1);
1110
1111 /* There are no waiters, nothing for us to do. */
1112 if (!top_waiter)
1113 return 0;
1114
84bc4af5
DH
1115 /* Ensure we requeue to the expected futex. */
1116 if (!match_futex(top_waiter->requeue_pi_key, key2))
1117 return -EINVAL;
1118
52400ba9 1119 /*
bab5bc9e
DH
1120 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1121 * the contended case or if set_waiters is 1. The pi_state is returned
1122 * in ps in contended cases.
52400ba9 1123 */
bab5bc9e
DH
1124 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1125 set_waiters);
52400ba9 1126 if (ret == 1)
beda2c7e 1127 requeue_pi_wake_futex(top_waiter, key2, hb2);
52400ba9
DH
1128
1129 return ret;
1130}
1131
1132/**
1133 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1134 * uaddr1: source futex user address
1135 * uaddr2: target futex user address
1136 * nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1137 * nr_requeue: number of waiters to requeue (0-INT_MAX)
1138 * requeue_pi: if we are attempting to requeue from a non-pi futex to a
1139 * pi futex (pi to pi requeue is not supported)
1140 *
1141 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1142 * uaddr2 atomically on behalf of the top waiter.
1143 *
1144 * Returns:
1145 * >=0 - on success, the number of tasks requeued or woken
1146 * <0 - on error
1da177e4 1147 */
c2f9f201 1148static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
52400ba9
DH
1149 int nr_wake, int nr_requeue, u32 *cmpval,
1150 int requeue_pi)
1da177e4 1151{
38d47c1b 1152 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
52400ba9
DH
1153 int drop_count = 0, task_count = 0, ret;
1154 struct futex_pi_state *pi_state = NULL;
e2970f2f 1155 struct futex_hash_bucket *hb1, *hb2;
ec92d082 1156 struct plist_head *head1;
1da177e4 1157 struct futex_q *this, *next;
52400ba9
DH
1158 u32 curval2;
1159
1160 if (requeue_pi) {
1161 /*
1162 * requeue_pi requires a pi_state, try to allocate it now
1163 * without any locks in case it fails.
1164 */
1165 if (refill_pi_state_cache())
1166 return -ENOMEM;
1167 /*
1168 * requeue_pi must wake as many tasks as it can, up to nr_wake
1169 * + nr_requeue, since it acquires the rt_mutex prior to
1170 * returning to userspace, so as to not leave the rt_mutex with
1171 * waiters and no owner. However, second and third wake-ups
1172 * cannot be predicted as they involve race conditions with the
1173 * first wake and a fault while looking up the pi_state. Both
1174 * pthread_cond_signal() and pthread_cond_broadcast() should
1175 * use nr_wake=1.
1176 */
1177 if (nr_wake != 1)
1178 return -EINVAL;
1179 }
1da177e4 1180
42d35d48 1181retry:
52400ba9
DH
1182 if (pi_state != NULL) {
1183 /*
1184 * We will have to lookup the pi_state again, so free this one
1185 * to keep the accounting correct.
1186 */
1187 free_pi_state(pi_state);
1188 pi_state = NULL;
1189 }
1190
7485d0d3 1191 ret = get_futex_key(uaddr1, fshared, &key1);
1da177e4
LT
1192 if (unlikely(ret != 0))
1193 goto out;
7485d0d3 1194 ret = get_futex_key(uaddr2, fshared, &key2);
1da177e4 1195 if (unlikely(ret != 0))
42d35d48 1196 goto out_put_key1;
1da177e4 1197
e2970f2f
IM
1198 hb1 = hash_futex(&key1);
1199 hb2 = hash_futex(&key2);
1da177e4 1200
e4dc5b7a 1201retry_private:
8b8f319f 1202 double_lock_hb(hb1, hb2);
1da177e4 1203
e2970f2f
IM
1204 if (likely(cmpval != NULL)) {
1205 u32 curval;
1da177e4 1206
e2970f2f 1207 ret = get_futex_value_locked(&curval, uaddr1);
1da177e4
LT
1208
1209 if (unlikely(ret)) {
5eb3dc62 1210 double_unlock_hb(hb1, hb2);
1da177e4 1211
e2970f2f 1212 ret = get_user(curval, uaddr1);
e4dc5b7a
DH
1213 if (ret)
1214 goto out_put_keys;
1da177e4 1215
e4dc5b7a
DH
1216 if (!fshared)
1217 goto retry_private;
1da177e4 1218
e4dc5b7a
DH
1219 put_futex_key(fshared, &key2);
1220 put_futex_key(fshared, &key1);
1221 goto retry;
1da177e4 1222 }
e2970f2f 1223 if (curval != *cmpval) {
1da177e4
LT
1224 ret = -EAGAIN;
1225 goto out_unlock;
1226 }
1227 }
1228
52400ba9 1229 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
bab5bc9e
DH
1230 /*
1231 * Attempt to acquire uaddr2 and wake the top waiter. If we
1232 * intend to requeue waiters, force setting the FUTEX_WAITERS
1233 * bit. We force this here where we are able to easily handle
1234 * faults rather in the requeue loop below.
1235 */
52400ba9 1236 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
bab5bc9e 1237 &key2, &pi_state, nr_requeue);
52400ba9
DH
1238
1239 /*
1240 * At this point the top_waiter has either taken uaddr2 or is
1241 * waiting on it. If the former, then the pi_state will not
1242 * exist yet, look it up one more time to ensure we have a
1243 * reference to it.
1244 */
1245 if (ret == 1) {
1246 WARN_ON(pi_state);
89061d3d 1247 drop_count++;
52400ba9
DH
1248 task_count++;
1249 ret = get_futex_value_locked(&curval2, uaddr2);
1250 if (!ret)
1251 ret = lookup_pi_state(curval2, hb2, &key2,
1252 &pi_state);
1253 }
1254
1255 switch (ret) {
1256 case 0:
1257 break;
1258 case -EFAULT:
1259 double_unlock_hb(hb1, hb2);
1260 put_futex_key(fshared, &key2);
1261 put_futex_key(fshared, &key1);
d0725992 1262 ret = fault_in_user_writeable(uaddr2);
52400ba9
DH
1263 if (!ret)
1264 goto retry;
1265 goto out;
1266 case -EAGAIN:
1267 /* The owner was exiting, try again. */
1268 double_unlock_hb(hb1, hb2);
1269 put_futex_key(fshared, &key2);
1270 put_futex_key(fshared, &key1);
1271 cond_resched();
1272 goto retry;
1273 default:
1274 goto out_unlock;
1275 }
1276 }
1277
e2970f2f 1278 head1 = &hb1->chain;
ec92d082 1279 plist_for_each_entry_safe(this, next, head1, list) {
52400ba9
DH
1280 if (task_count - nr_wake >= nr_requeue)
1281 break;
1282
1283 if (!match_futex(&this->key, &key1))
1da177e4 1284 continue;
52400ba9 1285
392741e0
DH
1286 /*
1287 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1288 * be paired with each other and no other futex ops.
1289 */
1290 if ((requeue_pi && !this->rt_waiter) ||
1291 (!requeue_pi && this->rt_waiter)) {
1292 ret = -EINVAL;
1293 break;
1294 }
52400ba9
DH
1295
1296 /*
1297 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1298 * lock, we already woke the top_waiter. If not, it will be
1299 * woken by futex_unlock_pi().
1300 */
1301 if (++task_count <= nr_wake && !requeue_pi) {
1da177e4 1302 wake_futex(this);
52400ba9
DH
1303 continue;
1304 }
1da177e4 1305
84bc4af5
DH
1306 /* Ensure we requeue to the expected futex for requeue_pi. */
1307 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1308 ret = -EINVAL;
1309 break;
1310 }
1311
52400ba9
DH
1312 /*
1313 * Requeue nr_requeue waiters and possibly one more in the case
1314 * of requeue_pi if we couldn't acquire the lock atomically.
1315 */
1316 if (requeue_pi) {
1317 /* Prepare the waiter to take the rt_mutex. */
1318 atomic_inc(&pi_state->refcount);
1319 this->pi_state = pi_state;
1320 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1321 this->rt_waiter,
1322 this->task, 1);
1323 if (ret == 1) {
1324 /* We got the lock. */
beda2c7e 1325 requeue_pi_wake_futex(this, &key2, hb2);
89061d3d 1326 drop_count++;
52400ba9
DH
1327 continue;
1328 } else if (ret) {
1329 /* -EDEADLK */
1330 this->pi_state = NULL;
1331 free_pi_state(pi_state);
1332 goto out_unlock;
1333 }
1da177e4 1334 }
52400ba9
DH
1335 requeue_futex(this, hb1, hb2, &key2);
1336 drop_count++;
1da177e4
LT
1337 }
1338
1339out_unlock:
5eb3dc62 1340 double_unlock_hb(hb1, hb2);
1da177e4 1341
cd84a42f
DH
1342 /*
1343 * drop_futex_key_refs() must be called outside the spinlocks. During
1344 * the requeue we moved futex_q's from the hash bucket at key1 to the
1345 * one at key2 and updated their key pointer. We no longer need to
1346 * hold the references to key1.
1347 */
1da177e4 1348 while (--drop_count >= 0)
9adef58b 1349 drop_futex_key_refs(&key1);
1da177e4 1350
42d35d48 1351out_put_keys:
38d47c1b 1352 put_futex_key(fshared, &key2);
42d35d48 1353out_put_key1:
38d47c1b 1354 put_futex_key(fshared, &key1);
42d35d48 1355out:
52400ba9
DH
1356 if (pi_state != NULL)
1357 free_pi_state(pi_state);
1358 return ret ? ret : task_count;
1da177e4
LT
1359}
1360
1361/* The key must be already stored in q->key. */
82af7aca 1362static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1da177e4 1363{
e2970f2f 1364 struct futex_hash_bucket *hb;
1da177e4 1365
e2970f2f
IM
1366 hb = hash_futex(&q->key);
1367 q->lock_ptr = &hb->lock;
1da177e4 1368
e2970f2f
IM
1369 spin_lock(&hb->lock);
1370 return hb;
1da177e4
LT
1371}
1372
d40d65c8
DH
1373static inline void
1374queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1375{
1376 spin_unlock(&hb->lock);
d40d65c8
DH
1377}
1378
1379/**
1380 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1381 * @q: The futex_q to enqueue
1382 * @hb: The destination hash bucket
1383 *
1384 * The hb->lock must be held by the caller, and is released here. A call to
1385 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1386 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1387 * or nothing if the unqueue is done as part of the wake process and the unqueue
1388 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1389 * an example).
1390 */
82af7aca 1391static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1da177e4 1392{
ec92d082
PP
1393 int prio;
1394
1395 /*
1396 * The priority used to register this element is
1397 * - either the real thread-priority for the real-time threads
1398 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1399 * - or MAX_RT_PRIO for non-RT threads.
1400 * Thus, all RT-threads are woken first in priority order, and
1401 * the others are woken last, in FIFO order.
1402 */
1403 prio = min(current->normal_prio, MAX_RT_PRIO);
1404
1405 plist_node_init(&q->list, prio);
1406#ifdef CONFIG_DEBUG_PI_LIST
a2672459 1407 q->list.plist.spinlock = &hb->lock;
ec92d082
PP
1408#endif
1409 plist_add(&q->list, &hb->chain);
c87e2837 1410 q->task = current;
e2970f2f 1411 spin_unlock(&hb->lock);
1da177e4
LT
1412}
1413
d40d65c8
DH
1414/**
1415 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1416 * @q: The futex_q to unqueue
1417 *
1418 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1419 * be paired with exactly one earlier call to queue_me().
1420 *
1421 * Returns:
1422 * 1 - if the futex_q was still queued (and we removed unqueued it)
1423 * 0 - if the futex_q was already removed by the waking thread
1da177e4 1424 */
1da177e4
LT
1425static int unqueue_me(struct futex_q *q)
1426{
1da177e4 1427 spinlock_t *lock_ptr;
e2970f2f 1428 int ret = 0;
1da177e4
LT
1429
1430 /* In the common case we don't take the spinlock, which is nice. */
42d35d48 1431retry:
1da177e4 1432 lock_ptr = q->lock_ptr;
e91467ec 1433 barrier();
c80544dc 1434 if (lock_ptr != NULL) {
1da177e4
LT
1435 spin_lock(lock_ptr);
1436 /*
1437 * q->lock_ptr can change between reading it and
1438 * spin_lock(), causing us to take the wrong lock. This
1439 * corrects the race condition.
1440 *
1441 * Reasoning goes like this: if we have the wrong lock,
1442 * q->lock_ptr must have changed (maybe several times)
1443 * between reading it and the spin_lock(). It can
1444 * change again after the spin_lock() but only if it was
1445 * already changed before the spin_lock(). It cannot,
1446 * however, change back to the original value. Therefore
1447 * we can detect whether we acquired the correct lock.
1448 */
1449 if (unlikely(lock_ptr != q->lock_ptr)) {
1450 spin_unlock(lock_ptr);
1451 goto retry;
1452 }
ec92d082
PP
1453 WARN_ON(plist_node_empty(&q->list));
1454 plist_del(&q->list, &q->list.plist);
c87e2837
IM
1455
1456 BUG_ON(q->pi_state);
1457
1da177e4
LT
1458 spin_unlock(lock_ptr);
1459 ret = 1;
1460 }
1461
9adef58b 1462 drop_futex_key_refs(&q->key);
1da177e4
LT
1463 return ret;
1464}
1465
c87e2837
IM
1466/*
1467 * PI futexes can not be requeued and must remove themself from the
d0aa7a70
PP
1468 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1469 * and dropped here.
c87e2837 1470 */
d0aa7a70 1471static void unqueue_me_pi(struct futex_q *q)
c87e2837 1472{
ec92d082
PP
1473 WARN_ON(plist_node_empty(&q->list));
1474 plist_del(&q->list, &q->list.plist);
c87e2837
IM
1475
1476 BUG_ON(!q->pi_state);
1477 free_pi_state(q->pi_state);
1478 q->pi_state = NULL;
1479
d0aa7a70 1480 spin_unlock(q->lock_ptr);
c87e2837
IM
1481}
1482
d0aa7a70 1483/*
cdf71a10 1484 * Fixup the pi_state owner with the new owner.
d0aa7a70 1485 *
778e9a9c
AK
1486 * Must be called with hash bucket lock held and mm->sem held for non
1487 * private futexes.
d0aa7a70 1488 */
778e9a9c 1489static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
c2f9f201 1490 struct task_struct *newowner, int fshared)
d0aa7a70 1491{
cdf71a10 1492 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
d0aa7a70 1493 struct futex_pi_state *pi_state = q->pi_state;
1b7558e4 1494 struct task_struct *oldowner = pi_state->owner;
d0aa7a70 1495 u32 uval, curval, newval;
e4dc5b7a 1496 int ret;
d0aa7a70
PP
1497
1498 /* Owner died? */
1b7558e4
TG
1499 if (!pi_state->owner)
1500 newtid |= FUTEX_OWNER_DIED;
1501
1502 /*
1503 * We are here either because we stole the rtmutex from the
1504 * pending owner or we are the pending owner which failed to
1505 * get the rtmutex. We have to replace the pending owner TID
1506 * in the user space variable. This must be atomic as we have
1507 * to preserve the owner died bit here.
1508 *
b2d0994b
DH
1509 * Note: We write the user space value _before_ changing the pi_state
1510 * because we can fault here. Imagine swapped out pages or a fork
1511 * that marked all the anonymous memory readonly for cow.
1b7558e4
TG
1512 *
1513 * Modifying pi_state _before_ the user space value would
1514 * leave the pi_state in an inconsistent state when we fault
1515 * here, because we need to drop the hash bucket lock to
1516 * handle the fault. This might be observed in the PID check
1517 * in lookup_pi_state.
1518 */
1519retry:
1520 if (get_futex_value_locked(&uval, uaddr))
1521 goto handle_fault;
1522
1523 while (1) {
1524 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1525
1526 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1527
1528 if (curval == -EFAULT)
1529 goto handle_fault;
1530 if (curval == uval)
1531 break;
1532 uval = curval;
1533 }
1534
1535 /*
1536 * We fixed up user space. Now we need to fix the pi_state
1537 * itself.
1538 */
d0aa7a70 1539 if (pi_state->owner != NULL) {
1d615482 1540 raw_spin_lock_irq(&pi_state->owner->pi_lock);
d0aa7a70
PP
1541 WARN_ON(list_empty(&pi_state->list));
1542 list_del_init(&pi_state->list);
1d615482 1543 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1b7558e4 1544 }
d0aa7a70 1545
cdf71a10 1546 pi_state->owner = newowner;
d0aa7a70 1547
1d615482 1548 raw_spin_lock_irq(&newowner->pi_lock);
d0aa7a70 1549 WARN_ON(!list_empty(&pi_state->list));
cdf71a10 1550 list_add(&pi_state->list, &newowner->pi_state_list);
1d615482 1551 raw_spin_unlock_irq(&newowner->pi_lock);
1b7558e4 1552 return 0;
d0aa7a70 1553
d0aa7a70 1554 /*
1b7558e4
TG
1555 * To handle the page fault we need to drop the hash bucket
1556 * lock here. That gives the other task (either the pending
1557 * owner itself or the task which stole the rtmutex) the
1558 * chance to try the fixup of the pi_state. So once we are
1559 * back from handling the fault we need to check the pi_state
1560 * after reacquiring the hash bucket lock and before trying to
1561 * do another fixup. When the fixup has been done already we
1562 * simply return.
d0aa7a70 1563 */
1b7558e4
TG
1564handle_fault:
1565 spin_unlock(q->lock_ptr);
778e9a9c 1566
d0725992 1567 ret = fault_in_user_writeable(uaddr);
778e9a9c 1568
1b7558e4 1569 spin_lock(q->lock_ptr);
778e9a9c 1570
1b7558e4
TG
1571 /*
1572 * Check if someone else fixed it for us:
1573 */
1574 if (pi_state->owner != oldowner)
1575 return 0;
1576
1577 if (ret)
1578 return ret;
1579
1580 goto retry;
d0aa7a70
PP
1581}
1582
34f01cc1
ED
1583/*
1584 * In case we must use restart_block to restart a futex_wait,
ce6bd420 1585 * we encode in the 'flags' shared capability
34f01cc1 1586 */
1acdac10
TG
1587#define FLAGS_SHARED 0x01
1588#define FLAGS_CLOCKRT 0x02
a72188d8 1589#define FLAGS_HAS_TIMEOUT 0x04
34f01cc1 1590
72c1bbf3 1591static long futex_wait_restart(struct restart_block *restart);
36cf3b5c 1592
dd973998
DH
1593/**
1594 * fixup_owner() - Post lock pi_state and corner case management
1595 * @uaddr: user address of the futex
1596 * @fshared: whether the futex is shared (1) or not (0)
1597 * @q: futex_q (contains pi_state and access to the rt_mutex)
1598 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1599 *
1600 * After attempting to lock an rt_mutex, this function is called to cleanup
1601 * the pi_state owner as well as handle race conditions that may allow us to
1602 * acquire the lock. Must be called with the hb lock held.
1603 *
1604 * Returns:
1605 * 1 - success, lock taken
1606 * 0 - success, lock not taken
1607 * <0 - on error (-EFAULT)
1608 */
1609static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
1610 int locked)
1611{
1612 struct task_struct *owner;
1613 int ret = 0;
1614
1615 if (locked) {
1616 /*
1617 * Got the lock. We might not be the anticipated owner if we
1618 * did a lock-steal - fix up the PI-state in that case:
1619 */
1620 if (q->pi_state->owner != current)
1621 ret = fixup_pi_state_owner(uaddr, q, current, fshared);
1622 goto out;
1623 }
1624
1625 /*
1626 * Catch the rare case, where the lock was released when we were on the
1627 * way back before we locked the hash bucket.
1628 */
1629 if (q->pi_state->owner == current) {
1630 /*
1631 * Try to get the rt_mutex now. This might fail as some other
1632 * task acquired the rt_mutex after we removed ourself from the
1633 * rt_mutex waiters list.
1634 */
1635 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1636 locked = 1;
1637 goto out;
1638 }
1639
1640 /*
1641 * pi_state is incorrect, some other task did a lock steal and
1642 * we returned due to timeout or signal without taking the
1643 * rt_mutex. Too late. We can access the rt_mutex_owner without
1644 * locking, as the other task is now blocked on the hash bucket
1645 * lock. Fix the state up.
1646 */
1647 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1648 ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
1649 goto out;
1650 }
1651
1652 /*
1653 * Paranoia check. If we did not take the lock, then we should not be
1654 * the owner, nor the pending owner, of the rt_mutex.
1655 */
1656 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1657 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1658 "pi-state %p\n", ret,
1659 q->pi_state->pi_mutex.owner,
1660 q->pi_state->owner);
1661
1662out:
1663 return ret ? ret : locked;
1664}
1665
ca5f9524
DH
1666/**
1667 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1668 * @hb: the futex hash bucket, must be locked by the caller
1669 * @q: the futex_q to queue up on
1670 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
ca5f9524
DH
1671 */
1672static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
f1a11e05 1673 struct hrtimer_sleeper *timeout)
ca5f9524 1674{
9beba3c5
DH
1675 /*
1676 * The task state is guaranteed to be set before another task can
1677 * wake it. set_current_state() is implemented using set_mb() and
1678 * queue_me() calls spin_unlock() upon completion, both serializing
1679 * access to the hash list and forcing another memory barrier.
1680 */
f1a11e05 1681 set_current_state(TASK_INTERRUPTIBLE);
0729e196 1682 queue_me(q, hb);
ca5f9524
DH
1683
1684 /* Arm the timer */
1685 if (timeout) {
1686 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1687 if (!hrtimer_active(&timeout->timer))
1688 timeout->task = NULL;
1689 }
1690
1691 /*
0729e196
DH
1692 * If we have been removed from the hash list, then another task
1693 * has tried to wake us, and we can skip the call to schedule().
ca5f9524
DH
1694 */
1695 if (likely(!plist_node_empty(&q->list))) {
1696 /*
1697 * If the timer has already expired, current will already be
1698 * flagged for rescheduling. Only call schedule if there
1699 * is no timeout, or if it has yet to expire.
1700 */
1701 if (!timeout || timeout->task)
1702 schedule();
1703 }
1704 __set_current_state(TASK_RUNNING);
1705}
1706
f801073f
DH
1707/**
1708 * futex_wait_setup() - Prepare to wait on a futex
1709 * @uaddr: the futex userspace address
1710 * @val: the expected value
1711 * @fshared: whether the futex is shared (1) or not (0)
1712 * @q: the associated futex_q
1713 * @hb: storage for hash_bucket pointer to be returned to caller
1714 *
1715 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1716 * compare it with the expected value. Handle atomic faults internally.
1717 * Return with the hb lock held and a q.key reference on success, and unlocked
1718 * with no q.key reference on failure.
1719 *
1720 * Returns:
1721 * 0 - uaddr contains val and hb has been locked
1722 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1723 */
1724static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
1725 struct futex_q *q, struct futex_hash_bucket **hb)
1da177e4 1726{
e2970f2f
IM
1727 u32 uval;
1728 int ret;
1da177e4 1729
1da177e4 1730 /*
b2d0994b 1731 * Access the page AFTER the hash-bucket is locked.
1da177e4
LT
1732 * Order is important:
1733 *
1734 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1735 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1736 *
1737 * The basic logical guarantee of a futex is that it blocks ONLY
1738 * if cond(var) is known to be true at the time of blocking, for
1739 * any cond. If we queued after testing *uaddr, that would open
1740 * a race condition where we could block indefinitely with
1741 * cond(var) false, which would violate the guarantee.
1742 *
1743 * A consequence is that futex_wait() can return zero and absorb
1744 * a wakeup when *uaddr != val on entry to the syscall. This is
1745 * rare, but normal.
1da177e4 1746 */
f801073f
DH
1747retry:
1748 q->key = FUTEX_KEY_INIT;
7485d0d3 1749 ret = get_futex_key(uaddr, fshared, &q->key);
f801073f 1750 if (unlikely(ret != 0))
a5a2a0c7 1751 return ret;
f801073f
DH
1752
1753retry_private:
1754 *hb = queue_lock(q);
1755
e2970f2f 1756 ret = get_futex_value_locked(&uval, uaddr);
1da177e4 1757
f801073f
DH
1758 if (ret) {
1759 queue_unlock(q, *hb);
1da177e4 1760
e2970f2f 1761 ret = get_user(uval, uaddr);
e4dc5b7a 1762 if (ret)
f801073f 1763 goto out;
1da177e4 1764
e4dc5b7a
DH
1765 if (!fshared)
1766 goto retry_private;
1767
f801073f 1768 put_futex_key(fshared, &q->key);
e4dc5b7a 1769 goto retry;
1da177e4 1770 }
ca5f9524 1771
f801073f
DH
1772 if (uval != val) {
1773 queue_unlock(q, *hb);
1774 ret = -EWOULDBLOCK;
2fff78c7 1775 }
1da177e4 1776
f801073f
DH
1777out:
1778 if (ret)
1779 put_futex_key(fshared, &q->key);
1780 return ret;
1781}
1782
1783static int futex_wait(u32 __user *uaddr, int fshared,
1784 u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1785{
1786 struct hrtimer_sleeper timeout, *to = NULL;
f801073f
DH
1787 struct restart_block *restart;
1788 struct futex_hash_bucket *hb;
1789 struct futex_q q;
1790 int ret;
1791
1792 if (!bitset)
1793 return -EINVAL;
1794
1795 q.pi_state = NULL;
1796 q.bitset = bitset;
52400ba9 1797 q.rt_waiter = NULL;
84bc4af5 1798 q.requeue_pi_key = NULL;
f801073f
DH
1799
1800 if (abs_time) {
1801 to = &timeout;
1802
1803 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
1804 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1805 hrtimer_init_sleeper(to, current);
1806 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1807 current->timer_slack_ns);
1808 }
1809
d58e6576 1810retry:
7ada876a
DH
1811 /*
1812 * Prepare to wait on uaddr. On success, holds hb lock and increments
1813 * q.key refs.
1814 */
f801073f
DH
1815 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1816 if (ret)
1817 goto out;
1818
ca5f9524 1819 /* queue_me and wait for wakeup, timeout, or a signal. */
f1a11e05 1820 futex_wait_queue_me(hb, &q, to);
1da177e4
LT
1821
1822 /* If we were woken (and unqueued), we succeeded, whatever. */
2fff78c7 1823 ret = 0;
7ada876a 1824 /* unqueue_me() drops q.key ref */
1da177e4 1825 if (!unqueue_me(&q))
7ada876a 1826 goto out;
2fff78c7 1827 ret = -ETIMEDOUT;
ca5f9524 1828 if (to && !to->task)
7ada876a 1829 goto out;
72c1bbf3 1830
e2970f2f 1831 /*
d58e6576
TG
1832 * We expect signal_pending(current), but we might be the
1833 * victim of a spurious wakeup as well.
e2970f2f 1834 */
7ada876a 1835 if (!signal_pending(current))
d58e6576 1836 goto retry;
d58e6576 1837
2fff78c7 1838 ret = -ERESTARTSYS;
c19384b5 1839 if (!abs_time)
7ada876a 1840 goto out;
1da177e4 1841
2fff78c7
PZ
1842 restart = &current_thread_info()->restart_block;
1843 restart->fn = futex_wait_restart;
1844 restart->futex.uaddr = (u32 *)uaddr;
1845 restart->futex.val = val;
1846 restart->futex.time = abs_time->tv64;
1847 restart->futex.bitset = bitset;
a72188d8 1848 restart->futex.flags = FLAGS_HAS_TIMEOUT;
2fff78c7
PZ
1849
1850 if (fshared)
1851 restart->futex.flags |= FLAGS_SHARED;
1852 if (clockrt)
1853 restart->futex.flags |= FLAGS_CLOCKRT;
42d35d48 1854
2fff78c7
PZ
1855 ret = -ERESTART_RESTARTBLOCK;
1856
42d35d48 1857out:
ca5f9524
DH
1858 if (to) {
1859 hrtimer_cancel(&to->timer);
1860 destroy_hrtimer_on_stack(&to->timer);
1861 }
c87e2837
IM
1862 return ret;
1863}
1864
72c1bbf3
NP
1865
1866static long futex_wait_restart(struct restart_block *restart)
1867{
ce6bd420 1868 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
c2f9f201 1869 int fshared = 0;
a72188d8 1870 ktime_t t, *tp = NULL;
72c1bbf3 1871
a72188d8
DH
1872 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1873 t.tv64 = restart->futex.time;
1874 tp = &t;
1875 }
72c1bbf3 1876 restart->fn = do_no_restart_syscall;
ce6bd420 1877 if (restart->futex.flags & FLAGS_SHARED)
c2f9f201 1878 fshared = 1;
a72188d8 1879 return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
1acdac10
TG
1880 restart->futex.bitset,
1881 restart->futex.flags & FLAGS_CLOCKRT);
72c1bbf3
NP
1882}
1883
1884
c87e2837
IM
1885/*
1886 * Userspace tried a 0 -> TID atomic transition of the futex value
1887 * and failed. The kernel side here does the whole locking operation:
1888 * if there are waiters then it will block, it does PI, etc. (Due to
1889 * races the kernel might see a 0 value of the futex too.)
1890 */
c2f9f201 1891static int futex_lock_pi(u32 __user *uaddr, int fshared,
34f01cc1 1892 int detect, ktime_t *time, int trylock)
c87e2837 1893{
c5780e97 1894 struct hrtimer_sleeper timeout, *to = NULL;
c87e2837 1895 struct futex_hash_bucket *hb;
c87e2837 1896 struct futex_q q;
dd973998 1897 int res, ret;
c87e2837
IM
1898
1899 if (refill_pi_state_cache())
1900 return -ENOMEM;
1901
c19384b5 1902 if (time) {
c5780e97 1903 to = &timeout;
237fc6e7
TG
1904 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1905 HRTIMER_MODE_ABS);
c5780e97 1906 hrtimer_init_sleeper(to, current);
cc584b21 1907 hrtimer_set_expires(&to->timer, *time);
c5780e97
TG
1908 }
1909
c87e2837 1910 q.pi_state = NULL;
52400ba9 1911 q.rt_waiter = NULL;
84bc4af5 1912 q.requeue_pi_key = NULL;
42d35d48 1913retry:
38d47c1b 1914 q.key = FUTEX_KEY_INIT;
7485d0d3 1915 ret = get_futex_key(uaddr, fshared, &q.key);
c87e2837 1916 if (unlikely(ret != 0))
42d35d48 1917 goto out;
c87e2837 1918
e4dc5b7a 1919retry_private:
82af7aca 1920 hb = queue_lock(&q);
c87e2837 1921
bab5bc9e 1922 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
c87e2837 1923 if (unlikely(ret)) {
778e9a9c 1924 switch (ret) {
1a52084d
DH
1925 case 1:
1926 /* We got the lock. */
1927 ret = 0;
1928 goto out_unlock_put_key;
1929 case -EFAULT:
1930 goto uaddr_faulted;
778e9a9c
AK
1931 case -EAGAIN:
1932 /*
1933 * Task is exiting and we just wait for the
1934 * exit to complete.
1935 */
1936 queue_unlock(&q, hb);
de87fcc1 1937 put_futex_key(fshared, &q.key);
778e9a9c
AK
1938 cond_resched();
1939 goto retry;
778e9a9c 1940 default:
42d35d48 1941 goto out_unlock_put_key;
c87e2837 1942 }
c87e2837
IM
1943 }
1944
1945 /*
1946 * Only actually queue now that the atomic ops are done:
1947 */
82af7aca 1948 queue_me(&q, hb);
c87e2837 1949
c87e2837
IM
1950 WARN_ON(!q.pi_state);
1951 /*
1952 * Block on the PI mutex:
1953 */
1954 if (!trylock)
1955 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1956 else {
1957 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1958 /* Fixup the trylock return value: */
1959 ret = ret ? 0 : -EWOULDBLOCK;
1960 }
1961
a99e4e41 1962 spin_lock(q.lock_ptr);
dd973998
DH
1963 /*
1964 * Fixup the pi_state owner and possibly acquire the lock if we
1965 * haven't already.
1966 */
1967 res = fixup_owner(uaddr, fshared, &q, !ret);
1968 /*
1969 * If fixup_owner() returned an error, proprogate that. If it acquired
1970 * the lock, clear our -ETIMEDOUT or -EINTR.
1971 */
1972 if (res)
1973 ret = (res < 0) ? res : 0;
c87e2837 1974
e8f6386c 1975 /*
dd973998
DH
1976 * If fixup_owner() faulted and was unable to handle the fault, unlock
1977 * it and return the fault to userspace.
e8f6386c
DH
1978 */
1979 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1980 rt_mutex_unlock(&q.pi_state->pi_mutex);
1981
778e9a9c
AK
1982 /* Unqueue and drop the lock */
1983 unqueue_me_pi(&q);
c87e2837 1984
5ecb01cf 1985 goto out_put_key;
c87e2837 1986
42d35d48 1987out_unlock_put_key:
c87e2837
IM
1988 queue_unlock(&q, hb);
1989
42d35d48 1990out_put_key:
38d47c1b 1991 put_futex_key(fshared, &q.key);
42d35d48 1992out:
237fc6e7
TG
1993 if (to)
1994 destroy_hrtimer_on_stack(&to->timer);
dd973998 1995 return ret != -EINTR ? ret : -ERESTARTNOINTR;
c87e2837 1996
42d35d48 1997uaddr_faulted:
778e9a9c
AK
1998 queue_unlock(&q, hb);
1999
d0725992 2000 ret = fault_in_user_writeable(uaddr);
e4dc5b7a
DH
2001 if (ret)
2002 goto out_put_key;
c87e2837 2003
e4dc5b7a
DH
2004 if (!fshared)
2005 goto retry_private;
2006
2007 put_futex_key(fshared, &q.key);
2008 goto retry;
c87e2837
IM
2009}
2010
c87e2837
IM
2011/*
2012 * Userspace attempted a TID -> 0 atomic transition, and failed.
2013 * This is the in-kernel slowpath: we look up the PI state (if any),
2014 * and do the rt-mutex unlock.
2015 */
c2f9f201 2016static int futex_unlock_pi(u32 __user *uaddr, int fshared)
c87e2837
IM
2017{
2018 struct futex_hash_bucket *hb;
2019 struct futex_q *this, *next;
2020 u32 uval;
ec92d082 2021 struct plist_head *head;
38d47c1b 2022 union futex_key key = FUTEX_KEY_INIT;
e4dc5b7a 2023 int ret;
c87e2837
IM
2024
2025retry:
2026 if (get_user(uval, uaddr))
2027 return -EFAULT;
2028 /*
2029 * We release only a lock we actually own:
2030 */
b488893a 2031 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
c87e2837 2032 return -EPERM;
c87e2837 2033
7485d0d3 2034 ret = get_futex_key(uaddr, fshared, &key);
c87e2837
IM
2035 if (unlikely(ret != 0))
2036 goto out;
2037
2038 hb = hash_futex(&key);
2039 spin_lock(&hb->lock);
2040
c87e2837
IM
2041 /*
2042 * To avoid races, try to do the TID -> 0 atomic transition
2043 * again. If it succeeds then we can return without waking
2044 * anyone else up:
2045 */
36cf3b5c 2046 if (!(uval & FUTEX_OWNER_DIED))
b488893a 2047 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
36cf3b5c 2048
c87e2837
IM
2049
2050 if (unlikely(uval == -EFAULT))
2051 goto pi_faulted;
2052 /*
2053 * Rare case: we managed to release the lock atomically,
2054 * no need to wake anyone else up:
2055 */
b488893a 2056 if (unlikely(uval == task_pid_vnr(current)))
c87e2837
IM
2057 goto out_unlock;
2058
2059 /*
2060 * Ok, other tasks may need to be woken up - check waiters
2061 * and do the wakeup if necessary:
2062 */
2063 head = &hb->chain;
2064
ec92d082 2065 plist_for_each_entry_safe(this, next, head, list) {
c87e2837
IM
2066 if (!match_futex (&this->key, &key))
2067 continue;
2068 ret = wake_futex_pi(uaddr, uval, this);
2069 /*
2070 * The atomic access to the futex value
2071 * generated a pagefault, so retry the
2072 * user-access and the wakeup:
2073 */
2074 if (ret == -EFAULT)
2075 goto pi_faulted;
2076 goto out_unlock;
2077 }
2078 /*
2079 * No waiters - kernel unlocks the futex:
2080 */
e3f2ddea
IM
2081 if (!(uval & FUTEX_OWNER_DIED)) {
2082 ret = unlock_futex_pi(uaddr, uval);
2083 if (ret == -EFAULT)
2084 goto pi_faulted;
2085 }
c87e2837
IM
2086
2087out_unlock:
2088 spin_unlock(&hb->lock);
38d47c1b 2089 put_futex_key(fshared, &key);
c87e2837 2090
42d35d48 2091out:
c87e2837
IM
2092 return ret;
2093
2094pi_faulted:
778e9a9c 2095 spin_unlock(&hb->lock);
e4dc5b7a 2096 put_futex_key(fshared, &key);
c87e2837 2097
d0725992 2098 ret = fault_in_user_writeable(uaddr);
b5686363 2099 if (!ret)
c87e2837
IM
2100 goto retry;
2101
1da177e4
LT
2102 return ret;
2103}
2104
52400ba9
DH
2105/**
2106 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2107 * @hb: the hash_bucket futex_q was original enqueued on
2108 * @q: the futex_q woken while waiting to be requeued
2109 * @key2: the futex_key of the requeue target futex
2110 * @timeout: the timeout associated with the wait (NULL if none)
2111 *
2112 * Detect if the task was woken on the initial futex as opposed to the requeue
2113 * target futex. If so, determine if it was a timeout or a signal that caused
2114 * the wakeup and return the appropriate error code to the caller. Must be
2115 * called with the hb lock held.
2116 *
2117 * Returns
2118 * 0 - no early wakeup detected
1c840c14 2119 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
52400ba9
DH
2120 */
2121static inline
2122int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2123 struct futex_q *q, union futex_key *key2,
2124 struct hrtimer_sleeper *timeout)
2125{
2126 int ret = 0;
2127
2128 /*
2129 * With the hb lock held, we avoid races while we process the wakeup.
2130 * We only need to hold hb (and not hb2) to ensure atomicity as the
2131 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2132 * It can't be requeued from uaddr2 to something else since we don't
2133 * support a PI aware source futex for requeue.
2134 */
2135 if (!match_futex(&q->key, key2)) {
2136 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2137 /*
2138 * We were woken prior to requeue by a timeout or a signal.
2139 * Unqueue the futex_q and determine which it was.
2140 */
2141 plist_del(&q->list, &q->list.plist);
52400ba9 2142
d58e6576 2143 /* Handle spurious wakeups gracefully */
11df6ddd 2144 ret = -EWOULDBLOCK;
52400ba9
DH
2145 if (timeout && !timeout->task)
2146 ret = -ETIMEDOUT;
d58e6576 2147 else if (signal_pending(current))
1c840c14 2148 ret = -ERESTARTNOINTR;
52400ba9
DH
2149 }
2150 return ret;
2151}
2152
2153/**
2154 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
56ec1607 2155 * @uaddr: the futex we initially wait on (non-pi)
52400ba9
DH
2156 * @fshared: whether the futexes are shared (1) or not (0). They must be
2157 * the same type, no requeueing from private to shared, etc.
2158 * @val: the expected value of uaddr
2159 * @abs_time: absolute timeout
56ec1607 2160 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
52400ba9
DH
2161 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2162 * @uaddr2: the pi futex we will take prior to returning to user-space
2163 *
2164 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2165 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
2166 * complete the acquisition of the rt_mutex prior to returning to userspace.
2167 * This ensures the rt_mutex maintains an owner when it has waiters; without
2168 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2169 * need to.
2170 *
2171 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2172 * via the following:
2173 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
cc6db4e6
DH
2174 * 2) wakeup on uaddr2 after a requeue
2175 * 3) signal
2176 * 4) timeout
52400ba9 2177 *
cc6db4e6 2178 * If 3, cleanup and return -ERESTARTNOINTR.
52400ba9
DH
2179 *
2180 * If 2, we may then block on trying to take the rt_mutex and return via:
2181 * 5) successful lock
2182 * 6) signal
2183 * 7) timeout
2184 * 8) other lock acquisition failure
2185 *
cc6db4e6 2186 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
52400ba9
DH
2187 *
2188 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2189 *
2190 * Returns:
2191 * 0 - On success
2192 * <0 - On error
2193 */
2194static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2195 u32 val, ktime_t *abs_time, u32 bitset,
2196 int clockrt, u32 __user *uaddr2)
2197{
2198 struct hrtimer_sleeper timeout, *to = NULL;
2199 struct rt_mutex_waiter rt_waiter;
2200 struct rt_mutex *pi_mutex = NULL;
52400ba9
DH
2201 struct futex_hash_bucket *hb;
2202 union futex_key key2;
2203 struct futex_q q;
2204 int res, ret;
52400ba9
DH
2205
2206 if (!bitset)
2207 return -EINVAL;
2208
2209 if (abs_time) {
2210 to = &timeout;
2211 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
2212 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2213 hrtimer_init_sleeper(to, current);
2214 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2215 current->timer_slack_ns);
2216 }
2217
2218 /*
2219 * The waiter is allocated on our stack, manipulated by the requeue
2220 * code while we sleep on uaddr.
2221 */
2222 debug_rt_mutex_init_waiter(&rt_waiter);
2223 rt_waiter.task = NULL;
2224
52400ba9 2225 key2 = FUTEX_KEY_INIT;
7485d0d3 2226 ret = get_futex_key(uaddr2, fshared, &key2);
52400ba9
DH
2227 if (unlikely(ret != 0))
2228 goto out;
2229
84bc4af5
DH
2230 q.pi_state = NULL;
2231 q.bitset = bitset;
2232 q.rt_waiter = &rt_waiter;
2233 q.requeue_pi_key = &key2;
2234
7ada876a
DH
2235 /*
2236 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2237 * count.
2238 */
52400ba9 2239 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
c8b15a70
TG
2240 if (ret)
2241 goto out_key2;
52400ba9
DH
2242
2243 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
f1a11e05 2244 futex_wait_queue_me(hb, &q, to);
52400ba9
DH
2245
2246 spin_lock(&hb->lock);
2247 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2248 spin_unlock(&hb->lock);
2249 if (ret)
2250 goto out_put_keys;
2251
2252 /*
2253 * In order for us to be here, we know our q.key == key2, and since
2254 * we took the hb->lock above, we also know that futex_requeue() has
2255 * completed and we no longer have to concern ourselves with a wakeup
7ada876a
DH
2256 * race with the atomic proxy lock acquisition by the requeue code. The
2257 * futex_requeue dropped our key1 reference and incremented our key2
2258 * reference count.
52400ba9
DH
2259 */
2260
2261 /* Check if the requeue code acquired the second futex for us. */
2262 if (!q.rt_waiter) {
2263 /*
2264 * Got the lock. We might not be the anticipated owner if we
2265 * did a lock-steal - fix up the PI-state in that case.
2266 */
2267 if (q.pi_state && (q.pi_state->owner != current)) {
2268 spin_lock(q.lock_ptr);
2269 ret = fixup_pi_state_owner(uaddr2, &q, current,
2270 fshared);
2271 spin_unlock(q.lock_ptr);
2272 }
2273 } else {
2274 /*
2275 * We have been woken up by futex_unlock_pi(), a timeout, or a
2276 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2277 * the pi_state.
2278 */
2279 WARN_ON(!&q.pi_state);
2280 pi_mutex = &q.pi_state->pi_mutex;
2281 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2282 debug_rt_mutex_free_waiter(&rt_waiter);
2283
2284 spin_lock(q.lock_ptr);
2285 /*
2286 * Fixup the pi_state owner and possibly acquire the lock if we
2287 * haven't already.
2288 */
2289 res = fixup_owner(uaddr2, fshared, &q, !ret);
2290 /*
2291 * If fixup_owner() returned an error, proprogate that. If it
56ec1607 2292 * acquired the lock, clear -ETIMEDOUT or -EINTR.
52400ba9
DH
2293 */
2294 if (res)
2295 ret = (res < 0) ? res : 0;
2296
2297 /* Unqueue and drop the lock. */
2298 unqueue_me_pi(&q);
2299 }
2300
2301 /*
2302 * If fixup_pi_state_owner() faulted and was unable to handle the
2303 * fault, unlock the rt_mutex and return the fault to userspace.
2304 */
2305 if (ret == -EFAULT) {
2306 if (rt_mutex_owner(pi_mutex) == current)
2307 rt_mutex_unlock(pi_mutex);
2308 } else if (ret == -EINTR) {
52400ba9 2309 /*
cc6db4e6
DH
2310 * We've already been requeued, but cannot restart by calling
2311 * futex_lock_pi() directly. We could restart this syscall, but
2312 * it would detect that the user space "val" changed and return
2313 * -EWOULDBLOCK. Save the overhead of the restart and return
2314 * -EWOULDBLOCK directly.
52400ba9 2315 */
2070887f 2316 ret = -EWOULDBLOCK;
52400ba9
DH
2317 }
2318
2319out_put_keys:
2320 put_futex_key(fshared, &q.key);
c8b15a70 2321out_key2:
52400ba9
DH
2322 put_futex_key(fshared, &key2);
2323
2324out:
2325 if (to) {
2326 hrtimer_cancel(&to->timer);
2327 destroy_hrtimer_on_stack(&to->timer);
2328 }
2329 return ret;
2330}
2331
0771dfef
IM
2332/*
2333 * Support for robust futexes: the kernel cleans up held futexes at
2334 * thread exit time.
2335 *
2336 * Implementation: user-space maintains a per-thread list of locks it
2337 * is holding. Upon do_exit(), the kernel carefully walks this list,
2338 * and marks all locks that are owned by this thread with the
c87e2837 2339 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
0771dfef
IM
2340 * always manipulated with the lock held, so the list is private and
2341 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2342 * field, to allow the kernel to clean up if the thread dies after
2343 * acquiring the lock, but just before it could have added itself to
2344 * the list. There can only be one such pending lock.
2345 */
2346
2347/**
d96ee56c
DH
2348 * sys_set_robust_list() - Set the robust-futex list head of a task
2349 * @head: pointer to the list-head
2350 * @len: length of the list-head, as userspace expects
0771dfef 2351 */
836f92ad
HC
2352SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2353 size_t, len)
0771dfef 2354{
a0c1e907
TG
2355 if (!futex_cmpxchg_enabled)
2356 return -ENOSYS;
0771dfef
IM
2357 /*
2358 * The kernel knows only one size for now:
2359 */
2360 if (unlikely(len != sizeof(*head)))
2361 return -EINVAL;
2362
2363 current->robust_list = head;
2364
2365 return 0;
2366}
2367
2368/**
d96ee56c
DH
2369 * sys_get_robust_list() - Get the robust-futex list head of a task
2370 * @pid: pid of the process [zero for current task]
2371 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2372 * @len_ptr: pointer to a length field, the kernel fills in the header size
0771dfef 2373 */
836f92ad
HC
2374SYSCALL_DEFINE3(get_robust_list, int, pid,
2375 struct robust_list_head __user * __user *, head_ptr,
2376 size_t __user *, len_ptr)
0771dfef 2377{
ba46df98 2378 struct robust_list_head __user *head;
0771dfef 2379 unsigned long ret;
c69e8d9c 2380 const struct cred *cred = current_cred(), *pcred;
0771dfef 2381
a0c1e907
TG
2382 if (!futex_cmpxchg_enabled)
2383 return -ENOSYS;
2384
0771dfef
IM
2385 if (!pid)
2386 head = current->robust_list;
2387 else {
2388 struct task_struct *p;
2389
2390 ret = -ESRCH;
aaa2a97e 2391 rcu_read_lock();
228ebcbe 2392 p = find_task_by_vpid(pid);
0771dfef
IM
2393 if (!p)
2394 goto err_unlock;
2395 ret = -EPERM;
c69e8d9c
DH
2396 pcred = __task_cred(p);
2397 if (cred->euid != pcred->euid &&
2398 cred->euid != pcred->uid &&
76aac0e9 2399 !capable(CAP_SYS_PTRACE))
0771dfef
IM
2400 goto err_unlock;
2401 head = p->robust_list;
aaa2a97e 2402 rcu_read_unlock();
0771dfef
IM
2403 }
2404
2405 if (put_user(sizeof(*head), len_ptr))
2406 return -EFAULT;
2407 return put_user(head, head_ptr);
2408
2409err_unlock:
aaa2a97e 2410 rcu_read_unlock();
0771dfef
IM
2411
2412 return ret;
2413}
2414
2415/*
2416 * Process a futex-list entry, check whether it's owned by the
2417 * dying task, and do notification if so:
2418 */
e3f2ddea 2419int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
0771dfef 2420{
e3f2ddea 2421 u32 uval, nval, mval;
0771dfef 2422
8f17d3a5
IM
2423retry:
2424 if (get_user(uval, uaddr))
0771dfef
IM
2425 return -1;
2426
b488893a 2427 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
0771dfef
IM
2428 /*
2429 * Ok, this dying thread is truly holding a futex
2430 * of interest. Set the OWNER_DIED bit atomically
2431 * via cmpxchg, and if the value had FUTEX_WAITERS
2432 * set, wake up a waiter (if any). (We have to do a
2433 * futex_wake() even if OWNER_DIED is already set -
2434 * to handle the rare but possible case of recursive
2435 * thread-death.) The rest of the cleanup is done in
2436 * userspace.
2437 */
e3f2ddea
IM
2438 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2439 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2440
c87e2837
IM
2441 if (nval == -EFAULT)
2442 return -1;
2443
2444 if (nval != uval)
8f17d3a5 2445 goto retry;
0771dfef 2446
e3f2ddea
IM
2447 /*
2448 * Wake robust non-PI futexes here. The wakeup of
2449 * PI futexes happens in exit_pi_state():
2450 */
36cf3b5c 2451 if (!pi && (uval & FUTEX_WAITERS))
c2f9f201 2452 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
0771dfef
IM
2453 }
2454 return 0;
2455}
2456
e3f2ddea
IM
2457/*
2458 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2459 */
2460static inline int fetch_robust_entry(struct robust_list __user **entry,
ba46df98
AV
2461 struct robust_list __user * __user *head,
2462 int *pi)
e3f2ddea
IM
2463{
2464 unsigned long uentry;
2465
ba46df98 2466 if (get_user(uentry, (unsigned long __user *)head))
e3f2ddea
IM
2467 return -EFAULT;
2468
ba46df98 2469 *entry = (void __user *)(uentry & ~1UL);
e3f2ddea
IM
2470 *pi = uentry & 1;
2471
2472 return 0;
2473}
2474
0771dfef
IM
2475/*
2476 * Walk curr->robust_list (very carefully, it's a userspace list!)
2477 * and mark any locks found there dead, and notify any waiters.
2478 *
2479 * We silently return on any sign of list-walking problem.
2480 */
2481void exit_robust_list(struct task_struct *curr)
2482{
2483 struct robust_list_head __user *head = curr->robust_list;
9f96cb1e
MS
2484 struct robust_list __user *entry, *next_entry, *pending;
2485 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
0771dfef 2486 unsigned long futex_offset;
9f96cb1e 2487 int rc;
0771dfef 2488
a0c1e907
TG
2489 if (!futex_cmpxchg_enabled)
2490 return;
2491
0771dfef
IM
2492 /*
2493 * Fetch the list head (which was registered earlier, via
2494 * sys_set_robust_list()):
2495 */
e3f2ddea 2496 if (fetch_robust_entry(&entry, &head->list.next, &pi))
0771dfef
IM
2497 return;
2498 /*
2499 * Fetch the relative futex offset:
2500 */
2501 if (get_user(futex_offset, &head->futex_offset))
2502 return;
2503 /*
2504 * Fetch any possibly pending lock-add first, and handle it
2505 * if it exists:
2506 */
e3f2ddea 2507 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
0771dfef 2508 return;
e3f2ddea 2509
9f96cb1e 2510 next_entry = NULL; /* avoid warning with gcc */
0771dfef 2511 while (entry != &head->list) {
9f96cb1e
MS
2512 /*
2513 * Fetch the next entry in the list before calling
2514 * handle_futex_death:
2515 */
2516 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
0771dfef
IM
2517 /*
2518 * A pending lock might already be on the list, so
c87e2837 2519 * don't process it twice:
0771dfef
IM
2520 */
2521 if (entry != pending)
ba46df98 2522 if (handle_futex_death((void __user *)entry + futex_offset,
e3f2ddea 2523 curr, pi))
0771dfef 2524 return;
9f96cb1e 2525 if (rc)
0771dfef 2526 return;
9f96cb1e
MS
2527 entry = next_entry;
2528 pi = next_pi;
0771dfef
IM
2529 /*
2530 * Avoid excessively long or circular lists:
2531 */
2532 if (!--limit)
2533 break;
2534
2535 cond_resched();
2536 }
9f96cb1e
MS
2537
2538 if (pending)
2539 handle_futex_death((void __user *)pending + futex_offset,
2540 curr, pip);
0771dfef
IM
2541}
2542
c19384b5 2543long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
e2970f2f 2544 u32 __user *uaddr2, u32 val2, u32 val3)
1da177e4 2545{
1acdac10 2546 int clockrt, ret = -ENOSYS;
34f01cc1 2547 int cmd = op & FUTEX_CMD_MASK;
c2f9f201 2548 int fshared = 0;
34f01cc1
ED
2549
2550 if (!(op & FUTEX_PRIVATE_FLAG))
c2f9f201 2551 fshared = 1;
1da177e4 2552
1acdac10 2553 clockrt = op & FUTEX_CLOCK_REALTIME;
52400ba9 2554 if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
1acdac10 2555 return -ENOSYS;
1da177e4 2556
34f01cc1 2557 switch (cmd) {
1da177e4 2558 case FUTEX_WAIT:
cd689985
TG
2559 val3 = FUTEX_BITSET_MATCH_ANY;
2560 case FUTEX_WAIT_BITSET:
1acdac10 2561 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
1da177e4
LT
2562 break;
2563 case FUTEX_WAKE:
cd689985
TG
2564 val3 = FUTEX_BITSET_MATCH_ANY;
2565 case FUTEX_WAKE_BITSET:
2566 ret = futex_wake(uaddr, fshared, val, val3);
1da177e4 2567 break;
1da177e4 2568 case FUTEX_REQUEUE:
52400ba9 2569 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
1da177e4
LT
2570 break;
2571 case FUTEX_CMP_REQUEUE:
52400ba9
DH
2572 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2573 0);
1da177e4 2574 break;
4732efbe 2575 case FUTEX_WAKE_OP:
34f01cc1 2576 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
4732efbe 2577 break;
c87e2837 2578 case FUTEX_LOCK_PI:
a0c1e907
TG
2579 if (futex_cmpxchg_enabled)
2580 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
c87e2837
IM
2581 break;
2582 case FUTEX_UNLOCK_PI:
a0c1e907
TG
2583 if (futex_cmpxchg_enabled)
2584 ret = futex_unlock_pi(uaddr, fshared);
c87e2837
IM
2585 break;
2586 case FUTEX_TRYLOCK_PI:
a0c1e907
TG
2587 if (futex_cmpxchg_enabled)
2588 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
c87e2837 2589 break;
52400ba9
DH
2590 case FUTEX_WAIT_REQUEUE_PI:
2591 val3 = FUTEX_BITSET_MATCH_ANY;
2592 ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
2593 clockrt, uaddr2);
2594 break;
52400ba9
DH
2595 case FUTEX_CMP_REQUEUE_PI:
2596 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2597 1);
2598 break;
1da177e4
LT
2599 default:
2600 ret = -ENOSYS;
2601 }
2602 return ret;
2603}
2604
2605
17da2bd9
HC
2606SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2607 struct timespec __user *, utime, u32 __user *, uaddr2,
2608 u32, val3)
1da177e4 2609{
c19384b5
PP
2610 struct timespec ts;
2611 ktime_t t, *tp = NULL;
e2970f2f 2612 u32 val2 = 0;
34f01cc1 2613 int cmd = op & FUTEX_CMD_MASK;
1da177e4 2614
cd689985 2615 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
52400ba9
DH
2616 cmd == FUTEX_WAIT_BITSET ||
2617 cmd == FUTEX_WAIT_REQUEUE_PI)) {
c19384b5 2618 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
1da177e4 2619 return -EFAULT;
c19384b5 2620 if (!timespec_valid(&ts))
9741ef96 2621 return -EINVAL;
c19384b5
PP
2622
2623 t = timespec_to_ktime(ts);
34f01cc1 2624 if (cmd == FUTEX_WAIT)
5a7780e7 2625 t = ktime_add_safe(ktime_get(), t);
c19384b5 2626 tp = &t;
1da177e4
LT
2627 }
2628 /*
52400ba9 2629 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
f54f0986 2630 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
1da177e4 2631 */
f54f0986 2632 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
ba9c22f2 2633 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
e2970f2f 2634 val2 = (u32) (unsigned long) utime;
1da177e4 2635
c19384b5 2636 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1da177e4
LT
2637}
2638
f6d107fb 2639static int __init futex_init(void)
1da177e4 2640{
a0c1e907 2641 u32 curval;
3e4ab747 2642 int i;
95362fa9 2643
a0c1e907
TG
2644 /*
2645 * This will fail and we want it. Some arch implementations do
2646 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2647 * functionality. We want to know that before we call in any
2648 * of the complex code paths. Also we want to prevent
2649 * registration of robust lists in that case. NULL is
2650 * guaranteed to fault and we get -EFAULT on functional
2651 * implementation, the non functional ones will return
2652 * -ENOSYS.
2653 */
2654 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2655 if (curval == -EFAULT)
2656 futex_cmpxchg_enabled = 1;
2657
3e4ab747
TG
2658 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2659 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2660 spin_lock_init(&futex_queues[i].lock);
2661 }
2662
1da177e4
LT
2663 return 0;
2664}
f6d107fb 2665__initcall(futex_init);
This page took 0.674427 seconds and 5 git commands to generate.