projects
/
deliverable
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
pid namespaces: changes to show virtual ids to user
[deliverable/linux.git]
/
kernel
/
futex.c
diff --git
a/kernel/futex.c
b/kernel/futex.c
index 3415e9ad1391adc6422c9324b8efdc1bbd6c7ce2..86b2600381b616db0d157ac9e172a3fc55315be4 100644
(file)
--- a/
kernel/futex.c
+++ b/
kernel/futex.c
@@
-52,6
+52,10
@@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/module.h>
+#include <linux/magic.h>
+#include <linux/pid.h>
+#include <linux/nsproxy.h>
+
#include <asm/futex.h>
#include "rtmutex_common.h"
#include <asm/futex.h>
#include "rtmutex_common.h"
@@
-292,7
+296,7
@@
EXPORT_SYMBOL_GPL(get_futex_key_refs);
*/
void drop_futex_key_refs(union futex_key *key)
{
*/
void drop_futex_key_refs(union futex_key *key)
{
- if (
key->both.ptr == 0
)
+ if (
!key->both.ptr
)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
@@
-442,7
+446,8
@@
static struct task_struct * futex_find_get_task(pid_t pid)
struct task_struct *p;
rcu_read_lock();
struct task_struct *p;
rcu_read_lock();
- p = find_task_by_pid(pid);
+ p = find_task_by_pid_ns(pid,
+ current->nsproxy->pid_ns);
if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
p = ERR_PTR(-ESRCH);
if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
p = ERR_PTR(-ESRCH);
@@
-652,7
+657,7
@@
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!(uval & FUTEX_OWNER_DIED)) {
int ret = 0;
if (!(uval & FUTEX_OWNER_DIED)) {
int ret = 0;
- newval = FUTEX_WAITERS |
new_owner->pid
;
+ newval = FUTEX_WAITERS |
task_pid_vnr(new_owner)
;
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
@@
-1045,7
+1050,7
@@
static int unqueue_me(struct futex_q *q)
retry:
lock_ptr = q->lock_ptr;
barrier();
retry:
lock_ptr = q->lock_ptr;
barrier();
- if (lock_ptr !=
0
) {
+ if (lock_ptr !=
NULL
) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
@@
-1105,7
+1110,7
@@
static void unqueue_me_pi(struct futex_q *q)
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct task_struct *curr)
{
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct task_struct *curr)
{
- u32 newtid =
curr->pid
| FUTEX_WAITERS;
+ u32 newtid =
task_pid_vnr(curr)
| FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, curval, newval;
int ret;
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, curval, newval;
int ret;
@@
-1367,7
+1372,7
@@
static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
*/
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
*/
- newval =
current->pid
;
+ newval =
task_pid_vnr(current)
;
curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
@@
-1378,7
+1383,7
@@
static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* Detect deadlocks. In case of REQUEUE_PI this is a valid
* situation and we return success to user space.
*/
* Detect deadlocks. In case of REQUEUE_PI this is a valid
* situation and we return success to user space.
*/
- if (unlikely((curval & FUTEX_TID_MASK) ==
current->pid
)) {
+ if (unlikely((curval & FUTEX_TID_MASK) ==
task_pid_vnr(current)
)) {
ret = -EDEADLK;
goto out_unlock_release_sem;
}
ret = -EDEADLK;
goto out_unlock_release_sem;
}
@@
-1407,7
+1412,7
@@
static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
*/
if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
/* Keep the OWNER_DIED bit */
*/
if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
/* Keep the OWNER_DIED bit */
- newval = (curval & ~FUTEX_TID_MASK) |
current->pid
;
+ newval = (curval & ~FUTEX_TID_MASK) |
task_pid_vnr(current)
;
ownerdied = 0;
lock_taken = 1;
}
ownerdied = 0;
lock_taken = 1;
}
@@
-1586,7
+1591,7
@@
retry:
/*
* We release only a lock we actually own:
*/
/*
* We release only a lock we actually own:
*/
- if ((uval & FUTEX_TID_MASK) !=
current->pid
)
+ if ((uval & FUTEX_TID_MASK) !=
task_pid_vnr(current)
)
return -EPERM;
/*
* First take all the futex related locks:
return -EPERM;
/*
* First take all the futex related locks:
@@
-1607,7
+1612,7
@@
retry_unlocked:
* anyone else up:
*/
if (!(uval & FUTEX_OWNER_DIED))
* anyone else up:
*/
if (!(uval & FUTEX_OWNER_DIED))
- uval = cmpxchg_futex_value_locked(uaddr,
current->pid
, 0);
+ uval = cmpxchg_futex_value_locked(uaddr,
task_pid_vnr(current)
, 0);
if (unlikely(uval == -EFAULT))
if (unlikely(uval == -EFAULT))
@@
-1616,7
+1621,7
@@
retry_unlocked:
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
- if (unlikely(uval ==
current->pid
))
+ if (unlikely(uval ==
task_pid_vnr(current)
))
goto out_unlock;
/*
goto out_unlock;
/*
@@
-1670,6
+1675,7
@@
pi_faulted:
attempt);
if (ret)
goto out;
attempt);
if (ret)
goto out;
+ uval = 0;
goto retry_unlocked;
}
goto retry_unlocked;
}
@@
-1852,7
+1858,8
@@
sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
ret = -ESRCH;
rcu_read_lock();
ret = -ESRCH;
rcu_read_lock();
- p = find_task_by_pid(pid);
+ p = find_task_by_pid_ns(pid,
+ current->nsproxy->pid_ns);
if (!p)
goto err_unlock;
ret = -EPERM;
if (!p)
goto err_unlock;
ret = -EPERM;
@@
-1885,7
+1892,7
@@
retry:
if (get_user(uval, uaddr))
return -1;
if (get_user(uval, uaddr))
return -1;
- if ((uval & FUTEX_TID_MASK) ==
curr->pid
) {
+ if ((uval & FUTEX_TID_MASK) ==
task_pid_vnr(curr)
) {
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
@@
-1942,9
+1949,10
@@
static inline int fetch_robust_entry(struct robust_list __user **entry,
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
- struct robust_list __user *entry, *pending;
- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+ struct robust_list __user *entry, *
next_entry, *
pending;
+ unsigned int limit = ROBUST_LIST_LIMIT, pi,
next_pi,
pip;
unsigned long futex_offset;
unsigned long futex_offset;
+ int rc;
/*
* Fetch the list head (which was registered earlier, via
/*
* Fetch the list head (which was registered earlier, via
@@
-1964,11
+1972,13
@@
void exit_robust_list(struct task_struct *curr)
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
- if (pending)
- handle_futex_death((void __user *)pending + futex_offset,
- curr, pip);
-
+ next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
while (entry != &head->list) {
+ /*
+ * Fetch the next entry in the list before calling
+ * handle_futex_death:
+ */
+ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* don't process it twice:
/*
* A pending lock might already be on the list, so
* don't process it twice:
@@
-1977,11
+1987,10
@@
void exit_robust_list(struct task_struct *curr)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
- /*
- * Fetch the next entry in the list:
- */
- if (fetch_robust_entry(&entry, &entry->next, &pi))
+ if (rc)
return;
return;
+ entry = next_entry;
+ pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
/*
* Avoid excessively long or circular lists:
*/
@@
-1990,6
+1999,10
@@
void exit_robust_list(struct task_struct *curr)
cond_resched();
}
cond_resched();
}
+
+ if (pending)
+ handle_futex_death((void __user *)pending + futex_offset,
+ curr, pip);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
@@
-2073,7
+2086,7
@@
static int futexfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
{
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "futex", NULL,
0xBAD1DEA
, mnt);
+ return get_sb_pseudo(fs_type, "futex", NULL,
FUTEXFS_SUPER_MAGIC
, mnt);
}
static struct file_system_type futex_fs_type = {
}
static struct file_system_type futex_fs_type = {
This page took
0.02811 seconds
and
5
git commands to generate.