X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=fs%2Feventpoll.c;h=221086fef1743545c143a270bd1bc228cfb33561;hb=f3de272b821accbc8387211977c2de4f38468d05;hp=77b9953624f480e37d80fb13010fb3e1bc8b3c51;hpb=b981d8b3f5e008ff10d993be633ad00564fc22cd;p=deliverable%2Flinux.git diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 77b9953624f4..221086fef174 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -257,25 +257,6 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1, (p1->file < p2->file ? -1 : p1->fd - p2->fd)); } -/* Special initialization for the RB tree node to detect linkage */ -static inline void ep_rb_initnode(struct rb_node *n) -{ - rb_set_parent(n, n); -} - -/* Removes a node from the RB tree and marks it for a fast is-linked check */ -static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r) -{ - rb_erase(n, r); - rb_set_parent(n, n); -} - -/* Fast check to verify that the item is linked to the main RB tree */ -static inline int ep_rb_linked(struct rb_node *n) -{ - return rb_parent(n) != n; -} - /* Tells us if the item is currently linked */ static inline int ep_is_linked(struct list_head *p) { @@ -283,13 +264,13 @@ static inline int ep_is_linked(struct list_head *p) } /* Get the "struct epitem" from a wait queue pointer */ -static inline struct epitem * ep_item_from_wait(wait_queue_t *p) +static inline struct epitem *ep_item_from_wait(wait_queue_t *p) { return container_of(p, struct eppoll_entry, wait)->base; } /* Get the "struct epitem" from an epoll queue wrapper */ -static inline struct epitem * ep_item_from_epqueue(poll_table *p) +static inline struct epitem *ep_item_from_epqueue(poll_table *p) { return container_of(p, struct ep_pqueue, pt)->epi; } @@ -325,15 +306,14 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) int wake_nests = 0; unsigned long flags; struct task_struct *this_task = current; - struct list_head *lsthead = &psw->wake_task_list, *lnk; + struct list_head *lsthead = &psw->wake_task_list; struct wake_task_node *tncur; struct wake_task_node tnode; spin_lock_irqsave(&psw->lock, flags); /* Try to see if the current task is already inside this wakeup call */ - list_for_each(lnk, lsthead) { - tncur = list_entry(lnk, struct wake_task_node, llink); + list_for_each_entry(tncur, lsthead, llink) { if (tncur->wq == wq || (tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) { @@ -354,7 +334,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) spin_unlock_irqrestore(&psw->lock, flags); /* Do really wake up now */ - wake_up(wq); + wake_up_nested(wq, 1 + wake_nests); /* Remove the current task from the list */ spin_lock_irqsave(&psw->lock, flags); @@ -412,8 +392,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) list_del_init(&epi->fllink); spin_unlock(&file->f_ep_lock); - if (ep_rb_linked(&epi->rbn)) - ep_rb_erase(&epi->rbn, &ep->rbr); + rb_erase(&epi->rbn, &ep->rbr); spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) @@ -463,7 +442,7 @@ static void ep_free(struct eventpoll *ep) * holding "epmutex" we can be sure that no file cleanup code will hit * us during this operation. So we can avoid the lock on "ep->lock". */ - while ((rbp = rb_first(&ep->rbr)) != 0) { + while ((rbp = rb_first(&ep->rbr)) != NULL) { epi = rb_entry(rbp, struct epitem, rbn); ep_remove(ep, epi); } @@ -657,8 +636,7 @@ is_linked: * wait list. */ if (waitqueue_active(&ep->wq)) - __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | - TASK_INTERRUPTIBLE); + wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; @@ -730,7 +708,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, goto error_return; /* Item initialization follow here ... */ - ep_rb_initnode(&epi->rbn); INIT_LIST_HEAD(&epi->rdllink); INIT_LIST_HEAD(&epi->fllink); INIT_LIST_HEAD(&epi->pwqlist); @@ -781,7 +758,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) - __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE); + wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } @@ -855,8 +832,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) - __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | - TASK_INTERRUPTIBLE); + wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } @@ -979,8 +955,7 @@ errxit: * wait list (delayed after we release the lock). */ if (waitqueue_active(&ep->wq)) - __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | - TASK_INTERRUPTIBLE); + wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } @@ -1266,7 +1241,7 @@ error_return: return error; } -#ifdef TIF_RESTORE_SIGMASK +#ifdef HAVE_SET_RESTORE_SIGMASK /* * Implement the event wait interface for the eventpoll file. It is the kernel @@ -1304,7 +1279,7 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, if (error == -EINTR) { memcpy(¤t->saved_sigmask, &sigsaved, sizeof(sigsaved)); - set_thread_flag(TIF_RESTORE_SIGMASK); + set_restore_sigmask(); } else sigprocmask(SIG_SETMASK, &sigsaved, NULL); } @@ -1312,7 +1287,7 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events, return error; } -#endif /* #ifdef TIF_RESTORE_SIGMASK */ +#endif /* HAVE_SET_RESTORE_SIGMASK */ static int __init eventpoll_init(void) { @@ -1334,4 +1309,3 @@ static int __init eventpoll_init(void) return 0; } fs_initcall(eventpoll_init); -