2 * fs/eventpoll.c ( Efficent event polling implementation )
3 * Copyright (C) 2001,...,2006 Davide Libenzi
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * Davide Libenzi <davidel@xmailserver.org>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
18 #include <linux/file.h>
19 #include <linux/signal.h>
20 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/poll.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/hash.h>
27 #include <linux/spinlock.h>
28 #include <linux/syscalls.h>
29 #include <linux/rwsem.h>
30 #include <linux/rbtree.h>
31 #include <linux/wait.h>
32 #include <linux/eventpoll.h>
33 #include <linux/mount.h>
34 #include <linux/bitops.h>
35 #include <linux/mutex.h>
36 #include <linux/anon_inodes.h>
37 #include <asm/uaccess.h>
38 #include <asm/system.h>
41 #include <asm/atomic.h>
42 #include <asm/semaphore.h>
47 * There are three level of locking required by epoll :
50 * 2) ep->sem (rw_semaphore)
51 * 3) ep->lock (rw_lock)
53 * The acquire order is the one listed above, from 1 to 3.
54 * We need a spinlock (ep->lock) because we manipulate objects
55 * from inside the poll callback, that might be triggered from
56 * a wake_up() that in turn might be called from IRQ context.
57 * So we can't sleep inside the poll callback and hence we need
58 * a spinlock. During the event transfer loop (from kernel to
59 * user space) we could end up sleeping due a copy_to_user(), so
60 * we need a lock that will allow us to sleep. This lock is a
61 * read-write semaphore (ep->sem). It is acquired on read during
62 * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL)
63 * and during eventpoll_release_file(). Then we also need a global
64 * semaphore to serialize eventpoll_release_file() and ep_free().
65 * This semaphore is acquired by ep_free() during the epoll file
66 * cleanup path and it is also acquired by eventpoll_release_file()
67 * if a file has been pushed inside an epoll set and it is then
68 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
69 * It is possible to drop the "ep->sem" and to use the global
70 * semaphore "epmutex" (together with "ep->lock") to have it working,
71 * but having "ep->sem" will make the interface more scalable.
72 * Events that require holding "epmutex" are very rare, while for
73 * normal operations the epoll private "ep->sem" will guarantee
74 * a greater scalability.
81 #define DPRINTK(x) printk x
82 #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
83 #else /* #if DEBUG_EPOLL > 0 */
84 #define DPRINTK(x) (void) 0
85 #define DNPRINTK(n, x) (void) 0
86 #endif /* #if DEBUG_EPOLL > 0 */
91 #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
92 #else /* #if DEBUG_EPI != 0 */
93 #define EPI_SLAB_DEBUG 0
94 #endif /* #if DEBUG_EPI != 0 */
96 /* Epoll private bits inside the event mask */
97 #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
99 /* Maximum number of poll wake up nests we are allowing */
100 #define EP_MAX_POLLWAKE_NESTS 4
102 /* Maximum msec timeout value storeable in a long int */
103 #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
105 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
108 struct epoll_filefd
{
114 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
115 * It is used to keep track on all tasks that are currently inside the wake_up() code
116 * to 1) short-circuit the one coming from the same task and same wait queue head
117 * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting
118 * 3) let go the ones coming from other tasks.
120 struct wake_task_node
{
121 struct list_head llink
;
122 struct task_struct
*task
;
123 wait_queue_head_t
*wq
;
127 * This is used to implement the safe poll wake up avoiding to reenter
128 * the poll callback from inside wake_up().
130 struct poll_safewake
{
131 struct list_head wake_task_list
;
136 * This structure is stored inside the "private_data" member of the file
137 * structure and rapresent the main data sructure for the eventpoll
141 /* Protect the this structure access */
145 * This semaphore is used to ensure that files are not removed
146 * while epoll is using them. This is read-held during the event
147 * collection loop and it is write-held during the file cleanup
148 * path, the epoll file exit code and the ctl operations.
150 struct rw_semaphore sem
;
152 /* Wait queue used by sys_epoll_wait() */
153 wait_queue_head_t wq
;
155 /* Wait queue used by file->poll() */
156 wait_queue_head_t poll_wait
;
158 /* List of ready file descriptors */
159 struct list_head rdllist
;
161 /* RB-Tree root used to store monitored fd structs */
165 /* Wait structure used by the poll hooks */
166 struct eppoll_entry
{
167 /* List header used to link this structure to the "struct epitem" */
168 struct list_head llink
;
170 /* The "base" pointer is set to the container "struct epitem" */
174 * Wait queue item that will be linked to the target file wait
179 /* The wait queue head that linked the "wait" wait queue item */
180 wait_queue_head_t
*whead
;
184 * Each file descriptor added to the eventpoll interface will
185 * have an entry of this type linked to the "rbr" RB tree.
188 /* RB-Tree node used to link this structure to the eventpoll rb-tree */
191 /* List header used to link this structure to the eventpoll ready list */
192 struct list_head rdllink
;
194 /* The file descriptor information this item refers to */
195 struct epoll_filefd ffd
;
197 /* Number of active wait queue attached to poll operations */
200 /* List containing poll wait queues */
201 struct list_head pwqlist
;
203 /* The "container" of this item */
204 struct eventpoll
*ep
;
206 /* The structure that describe the interested events and the source fd */
207 struct epoll_event event
;
210 * Used to keep track of the usage count of the structure. This avoids
211 * that the structure will desappear from underneath our processing.
215 /* List header used to link this item to the "struct file" items list */
216 struct list_head fllink
;
219 /* Wrapper struct used by poll queueing */
227 static void ep_poll_safewake_init(struct poll_safewake
*psw
);
228 static void ep_poll_safewake(struct poll_safewake
*psw
, wait_queue_head_t
*wq
);
229 static int ep_alloc(struct eventpoll
**pep
);
230 static void ep_free(struct eventpoll
*ep
);
231 static struct epitem
*ep_find(struct eventpoll
*ep
, struct file
*file
, int fd
);
232 static void ep_use_epitem(struct epitem
*epi
);
233 static void ep_release_epitem(struct epitem
*epi
);
234 static void ep_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*whead
,
236 static void ep_rbtree_insert(struct eventpoll
*ep
, struct epitem
*epi
);
237 static int ep_insert(struct eventpoll
*ep
, struct epoll_event
*event
,
238 struct file
*tfile
, int fd
);
239 static int ep_modify(struct eventpoll
*ep
, struct epitem
*epi
,
240 struct epoll_event
*event
);
241 static void ep_unregister_pollwait(struct eventpoll
*ep
, struct epitem
*epi
);
242 static int ep_unlink(struct eventpoll
*ep
, struct epitem
*epi
);
243 static int ep_remove(struct eventpoll
*ep
, struct epitem
*epi
);
244 static int ep_poll_callback(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
245 static int ep_eventpoll_close(struct inode
*inode
, struct file
*file
);
246 static unsigned int ep_eventpoll_poll(struct file
*file
, poll_table
*wait
);
247 static int ep_send_events(struct eventpoll
*ep
, struct list_head
*txlist
,
248 struct epoll_event __user
*events
, int maxevents
);
249 static int ep_events_transfer(struct eventpoll
*ep
,
250 struct epoll_event __user
*events
,
252 static int ep_poll(struct eventpoll
*ep
, struct epoll_event __user
*events
,
253 int maxevents
, long timeout
);
256 * This semaphore is used to serialize ep_free() and eventpoll_release_file().
258 static struct mutex epmutex
;
260 /* Safe wake up implementation */
261 static struct poll_safewake psw
;
263 /* Slab cache used to allocate "struct epitem" */
264 static struct kmem_cache
*epi_cache __read_mostly
;
266 /* Slab cache used to allocate "struct eppoll_entry" */
267 static struct kmem_cache
*pwq_cache __read_mostly
;
269 /* File callbacks that implement the eventpoll file behaviour */
270 static const struct file_operations eventpoll_fops
= {
271 .release
= ep_eventpoll_close
,
272 .poll
= ep_eventpoll_poll
277 /* Fast test to see if the file is an evenpoll file */
278 static inline int is_file_epoll(struct file
*f
)
280 return f
->f_op
== &eventpoll_fops
;
283 /* Setup the structure that is used as key for the rb-tree */
284 static inline void ep_set_ffd(struct epoll_filefd
*ffd
,
285 struct file
*file
, int fd
)
291 /* Compare rb-tree keys */
292 static inline int ep_cmp_ffd(struct epoll_filefd
*p1
,
293 struct epoll_filefd
*p2
)
295 return (p1
->file
> p2
->file
? +1:
296 (p1
->file
< p2
->file
? -1 : p1
->fd
- p2
->fd
));
299 /* Special initialization for the rb-tree node to detect linkage */
300 static inline void ep_rb_initnode(struct rb_node
*n
)
305 /* Removes a node from the rb-tree and marks it for a fast is-linked check */
306 static inline void ep_rb_erase(struct rb_node
*n
, struct rb_root
*r
)
312 /* Fast check to verify that the item is linked to the main rb-tree */
313 static inline int ep_rb_linked(struct rb_node
*n
)
315 return rb_parent(n
) != n
;
318 /* Tells us if the item is currently linked */
319 static inline int ep_is_linked(struct list_head
*p
)
321 return !list_empty(p
);
324 /* Get the "struct epitem" from a wait queue pointer */
325 static inline struct epitem
* ep_item_from_wait(wait_queue_t
*p
)
327 return container_of(p
, struct eppoll_entry
, wait
)->base
;
330 /* Get the "struct epitem" from an epoll queue wrapper */
331 static inline struct epitem
* ep_item_from_epqueue(poll_table
*p
)
333 return container_of(p
, struct ep_pqueue
, pt
)->epi
;
336 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
337 static inline int ep_op_has_event(int op
)
339 return op
!= EPOLL_CTL_DEL
;
342 /* Initialize the poll safe wake up structure */
343 static void ep_poll_safewake_init(struct poll_safewake
*psw
)
346 INIT_LIST_HEAD(&psw
->wake_task_list
);
347 spin_lock_init(&psw
->lock
);
352 * Perform a safe wake up of the poll wait list. The problem is that
353 * with the new callback'd wake up system, it is possible that the
354 * poll callback is reentered from inside the call to wake_up() done
355 * on the poll wait queue head. The rule is that we cannot reenter the
356 * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times,
357 * and we cannot reenter the same wait queue head at all. This will
358 * enable to have a hierarchy of epoll file descriptor of no more than
359 * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock
360 * because this one gets called by the poll callback, that in turn is called
361 * from inside a wake_up(), that might be called from irq context.
363 static void ep_poll_safewake(struct poll_safewake
*psw
, wait_queue_head_t
*wq
)
367 struct task_struct
*this_task
= current
;
368 struct list_head
*lsthead
= &psw
->wake_task_list
, *lnk
;
369 struct wake_task_node
*tncur
;
370 struct wake_task_node tnode
;
372 spin_lock_irqsave(&psw
->lock
, flags
);
374 /* Try to see if the current task is already inside this wakeup call */
375 list_for_each(lnk
, lsthead
) {
376 tncur
= list_entry(lnk
, struct wake_task_node
, llink
);
378 if (tncur
->wq
== wq
||
379 (tncur
->task
== this_task
&& ++wake_nests
> EP_MAX_POLLWAKE_NESTS
)) {
381 * Ops ... loop detected or maximum nest level reached.
382 * We abort this wake by breaking the cycle itself.
384 spin_unlock_irqrestore(&psw
->lock
, flags
);
389 /* Add the current task to the list */
390 tnode
.task
= this_task
;
392 list_add(&tnode
.llink
, lsthead
);
394 spin_unlock_irqrestore(&psw
->lock
, flags
);
396 /* Do really wake up now */
399 /* Remove the current task from the list */
400 spin_lock_irqsave(&psw
->lock
, flags
);
401 list_del(&tnode
.llink
);
402 spin_unlock_irqrestore(&psw
->lock
, flags
);
407 * This is called from eventpoll_release() to unlink files from the eventpoll
408 * interface. We need to have this facility to cleanup correctly files that are
409 * closed without being removed from the eventpoll interface.
411 void eventpoll_release_file(struct file
*file
)
413 struct list_head
*lsthead
= &file
->f_ep_links
;
414 struct eventpoll
*ep
;
418 * We don't want to get "file->f_ep_lock" because it is not
419 * necessary. It is not necessary because we're in the "struct file"
420 * cleanup path, and this means that noone is using this file anymore.
421 * The only hit might come from ep_free() but by holding the semaphore
422 * will correctly serialize the operation. We do need to acquire
423 * "ep->sem" after "epmutex" because ep_remove() requires it when called
424 * from anywhere but ep_free().
426 mutex_lock(&epmutex
);
428 while (!list_empty(lsthead
)) {
429 epi
= list_first_entry(lsthead
, struct epitem
, fllink
);
432 list_del_init(&epi
->fllink
);
433 down_write(&ep
->sem
);
438 mutex_unlock(&epmutex
);
443 * It opens an eventpoll file descriptor by suggesting a storage of "size"
444 * file descriptors. The size parameter is just an hint about how to size
445 * data structures. It won't prevent the user to store more than "size"
446 * file descriptors inside the epoll interface. It is the kernel part of
447 * the userspace epoll_create(2).
449 asmlinkage
long sys_epoll_create(int size
)
452 struct eventpoll
*ep
;
456 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d)\n",
460 * Sanity check on the size parameter, and create the internal data
461 * structure ( "struct eventpoll" ).
464 if (size
<= 0 || (error
= ep_alloc(&ep
)) != 0)
468 * Creates all the items needed to setup an eventpoll file. That is,
469 * a file structure, and inode and a free file descriptor.
471 error
= anon_inode_getfd(&fd
, &inode
, &file
, "[eventpoll]",
472 &eventpoll_fops
, ep
);
476 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d) = %d\n",
485 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d) = %d\n",
486 current
, size
, error
));
492 * The following function implements the controller interface for
493 * the eventpoll file that enables the insertion/removal/change of
494 * file descriptors inside the interest set. It represents
495 * the kernel part of the user space epoll_ctl(2).
498 sys_epoll_ctl(int epfd
, int op
, int fd
, struct epoll_event __user
*event
)
501 struct file
*file
, *tfile
;
502 struct eventpoll
*ep
;
504 struct epoll_event epds
;
506 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
507 current
, epfd
, op
, fd
, event
));
510 if (ep_op_has_event(op
) &&
511 copy_from_user(&epds
, event
, sizeof(struct epoll_event
)))
514 /* Get the "struct file *" for the eventpoll file */
520 /* Get the "struct file *" for the target file */
525 /* The target file descriptor must support poll */
527 if (!tfile
->f_op
|| !tfile
->f_op
->poll
)
531 * We have to check that the file structure underneath the file descriptor
532 * the user passed to us _is_ an eventpoll file. And also we do not permit
533 * adding an epoll file descriptor inside itself.
536 if (file
== tfile
|| !is_file_epoll(file
))
540 * At this point it is safe to assume that the "private_data" contains
541 * our own data structure.
543 ep
= file
->private_data
;
545 down_write(&ep
->sem
);
547 /* Try to lookup the file inside our RB tree */
548 epi
= ep_find(ep
, tfile
, fd
);
554 epds
.events
|= POLLERR
| POLLHUP
;
556 error
= ep_insert(ep
, &epds
, tfile
, fd
);
562 error
= ep_remove(ep
, epi
);
568 epds
.events
|= POLLERR
| POLLHUP
;
569 error
= ep_modify(ep
, epi
, &epds
);
576 * The function ep_find() increments the usage count of the structure
577 * so, if this is not NULL, we need to release it.
580 ep_release_epitem(epi
);
589 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
590 current
, epfd
, op
, fd
, event
, error
));
597 * Implement the event wait interface for the eventpoll file. It is the kernel
598 * part of the user space epoll_wait(2).
600 asmlinkage
long sys_epoll_wait(int epfd
, struct epoll_event __user
*events
,
601 int maxevents
, int timeout
)
605 struct eventpoll
*ep
;
607 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
608 current
, epfd
, events
, maxevents
, timeout
));
610 /* The maximum number of event must be greater than zero */
611 if (maxevents
<= 0 || maxevents
> EP_MAX_EVENTS
)
614 /* Verify that the area passed by the user is writeable */
615 if (!access_ok(VERIFY_WRITE
, events
, maxevents
* sizeof(struct epoll_event
))) {
620 /* Get the "struct file *" for the eventpoll file */
627 * We have to check that the file structure underneath the fd
628 * the user passed to us _is_ an eventpoll file.
631 if (!is_file_epoll(file
))
635 * At this point it is safe to assume that the "private_data" contains
636 * our own data structure.
638 ep
= file
->private_data
;
640 /* Time to fish for events ... */
641 error
= ep_poll(ep
, events
, maxevents
, timeout
);
646 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
647 current
, epfd
, events
, maxevents
, timeout
, error
));
653 #ifdef TIF_RESTORE_SIGMASK
656 * Implement the event wait interface for the eventpoll file. It is the kernel
657 * part of the user space epoll_pwait(2).
659 asmlinkage
long sys_epoll_pwait(int epfd
, struct epoll_event __user
*events
,
660 int maxevents
, int timeout
, const sigset_t __user
*sigmask
,
664 sigset_t ksigmask
, sigsaved
;
667 * If the caller wants a certain signal mask to be set during the wait,
671 if (sigsetsize
!= sizeof(sigset_t
))
673 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
675 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
676 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
679 error
= sys_epoll_wait(epfd
, events
, maxevents
, timeout
);
682 * If we changed the signal mask, we need to restore the original one.
683 * In case we've got a signal while waiting, we do not restore the
684 * signal mask yet, and we allow do_signal() to deliver the signal on
685 * the way back to userspace, before the signal mask is restored.
688 if (error
== -EINTR
) {
689 memcpy(¤t
->saved_sigmask
, &sigsaved
,
691 set_thread_flag(TIF_RESTORE_SIGMASK
);
693 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
699 #endif /* #ifdef TIF_RESTORE_SIGMASK */
702 static int ep_alloc(struct eventpoll
**pep
)
704 struct eventpoll
*ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
709 rwlock_init(&ep
->lock
);
710 init_rwsem(&ep
->sem
);
711 init_waitqueue_head(&ep
->wq
);
712 init_waitqueue_head(&ep
->poll_wait
);
713 INIT_LIST_HEAD(&ep
->rdllist
);
718 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_alloc() ep=%p\n",
724 static void ep_free(struct eventpoll
*ep
)
729 /* We need to release all tasks waiting for these file */
730 if (waitqueue_active(&ep
->poll_wait
))
731 ep_poll_safewake(&psw
, &ep
->poll_wait
);
734 * We need to lock this because we could be hit by
735 * eventpoll_release_file() while we're freeing the "struct eventpoll".
736 * We do not need to hold "ep->sem" here because the epoll file
737 * is on the way to be removed and no one has references to it
738 * anymore. The only hit might come from eventpoll_release_file() but
739 * holding "epmutex" is sufficent here.
741 mutex_lock(&epmutex
);
744 * Walks through the whole tree by unregistering poll callbacks.
746 for (rbp
= rb_first(&ep
->rbr
); rbp
; rbp
= rb_next(rbp
)) {
747 epi
= rb_entry(rbp
, struct epitem
, rbn
);
749 ep_unregister_pollwait(ep
, epi
);
753 * Walks through the whole tree by freeing each "struct epitem". At this
754 * point we are sure no poll callbacks will be lingering around, and also by
755 * write-holding "sem" we can be sure that no file cleanup code will hit
756 * us during this operation. So we can avoid the lock on "ep->lock".
758 while ((rbp
= rb_first(&ep
->rbr
)) != 0) {
759 epi
= rb_entry(rbp
, struct epitem
, rbn
);
763 mutex_unlock(&epmutex
);
768 * Search the file inside the eventpoll tree. It add usage count to
769 * the returned item, so the caller must call ep_release_epitem()
770 * after finished using the "struct epitem".
772 static struct epitem
*ep_find(struct eventpoll
*ep
, struct file
*file
, int fd
)
777 struct epitem
*epi
, *epir
= NULL
;
778 struct epoll_filefd ffd
;
780 ep_set_ffd(&ffd
, file
, fd
);
781 read_lock_irqsave(&ep
->lock
, flags
);
782 for (rbp
= ep
->rbr
.rb_node
; rbp
; ) {
783 epi
= rb_entry(rbp
, struct epitem
, rbn
);
784 kcmp
= ep_cmp_ffd(&ffd
, &epi
->ffd
);
795 read_unlock_irqrestore(&ep
->lock
, flags
);
797 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_find(%p) -> %p\n",
798 current
, file
, epir
));
805 * Increment the usage count of the "struct epitem" making it sure
806 * that the user will have a valid pointer to reference.
808 static void ep_use_epitem(struct epitem
*epi
)
811 atomic_inc(&epi
->usecnt
);
816 * Decrement ( release ) the usage count by signaling that the user
817 * has finished using the structure. It might lead to freeing the
818 * structure itself if the count goes to zero.
820 static void ep_release_epitem(struct epitem
*epi
)
823 if (atomic_dec_and_test(&epi
->usecnt
))
824 kmem_cache_free(epi_cache
, epi
);
829 * This is the callback that is used to add our wait queue to the
830 * target file wakeup lists.
832 static void ep_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*whead
,
835 struct epitem
*epi
= ep_item_from_epqueue(pt
);
836 struct eppoll_entry
*pwq
;
838 if (epi
->nwait
>= 0 && (pwq
= kmem_cache_alloc(pwq_cache
, GFP_KERNEL
))) {
839 init_waitqueue_func_entry(&pwq
->wait
, ep_poll_callback
);
842 add_wait_queue(whead
, &pwq
->wait
);
843 list_add_tail(&pwq
->llink
, &epi
->pwqlist
);
846 /* We have to signal that an error occurred */
852 static void ep_rbtree_insert(struct eventpoll
*ep
, struct epitem
*epi
)
855 struct rb_node
**p
= &ep
->rbr
.rb_node
, *parent
= NULL
;
860 epic
= rb_entry(parent
, struct epitem
, rbn
);
861 kcmp
= ep_cmp_ffd(&epi
->ffd
, &epic
->ffd
);
863 p
= &parent
->rb_right
;
865 p
= &parent
->rb_left
;
867 rb_link_node(&epi
->rbn
, parent
, p
);
868 rb_insert_color(&epi
->rbn
, &ep
->rbr
);
872 static int ep_insert(struct eventpoll
*ep
, struct epoll_event
*event
,
873 struct file
*tfile
, int fd
)
875 int error
, revents
, pwake
= 0;
878 struct ep_pqueue epq
;
881 if (!(epi
= kmem_cache_alloc(epi_cache
, GFP_KERNEL
)))
884 /* Item initialization follow here ... */
885 ep_rb_initnode(&epi
->rbn
);
886 INIT_LIST_HEAD(&epi
->rdllink
);
887 INIT_LIST_HEAD(&epi
->fllink
);
888 INIT_LIST_HEAD(&epi
->pwqlist
);
890 ep_set_ffd(&epi
->ffd
, tfile
, fd
);
892 atomic_set(&epi
->usecnt
, 1);
895 /* Initialize the poll table using the queue callback */
897 init_poll_funcptr(&epq
.pt
, ep_ptable_queue_proc
);
900 * Attach the item to the poll hooks and get current event bits.
901 * We can safely use the file* here because its usage count has
902 * been increased by the caller of this function.
904 revents
= tfile
->f_op
->poll(tfile
, &epq
.pt
);
907 * We have to check if something went wrong during the poll wait queue
908 * install process. Namely an allocation for a wait queue failed due
909 * high memory pressure.
914 /* Add the current item to the list of active epoll hook for this file */
915 spin_lock(&tfile
->f_ep_lock
);
916 list_add_tail(&epi
->fllink
, &tfile
->f_ep_links
);
917 spin_unlock(&tfile
->f_ep_lock
);
919 /* We have to drop the new item inside our item list to keep track of it */
920 write_lock_irqsave(&ep
->lock
, flags
);
922 /* Add the current item to the rb-tree */
923 ep_rbtree_insert(ep
, epi
);
925 /* If the file is already "ready" we drop it inside the ready list */
926 if ((revents
& event
->events
) && !ep_is_linked(&epi
->rdllink
)) {
927 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
929 /* Notify waiting tasks that events are available */
930 if (waitqueue_active(&ep
->wq
))
931 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE
);
932 if (waitqueue_active(&ep
->poll_wait
))
936 write_unlock_irqrestore(&ep
->lock
, flags
);
938 /* We have to call this outside the lock */
940 ep_poll_safewake(&psw
, &ep
->poll_wait
);
942 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_insert(%p, %p, %d)\n",
943 current
, ep
, tfile
, fd
));
948 ep_unregister_pollwait(ep
, epi
);
951 * We need to do this because an event could have been arrived on some
952 * allocated wait queue.
954 write_lock_irqsave(&ep
->lock
, flags
);
955 if (ep_is_linked(&epi
->rdllink
))
956 list_del_init(&epi
->rdllink
);
957 write_unlock_irqrestore(&ep
->lock
, flags
);
959 kmem_cache_free(epi_cache
, epi
);
966 * Modify the interest event mask by dropping an event if the new mask
967 * has a match in the current file status.
969 static int ep_modify(struct eventpoll
*ep
, struct epitem
*epi
, struct epoll_event
*event
)
972 unsigned int revents
;
976 * Set the new event interest mask before calling f_op->poll(), otherwise
977 * a potential race might occur. In fact if we do this operation inside
978 * the lock, an event might happen between the f_op->poll() call and the
979 * new event set registering.
981 epi
->event
.events
= event
->events
;
984 * Get current event bits. We can safely use the file* here because
985 * its usage count has been increased by the caller of this function.
987 revents
= epi
->ffd
.file
->f_op
->poll(epi
->ffd
.file
, NULL
);
989 write_lock_irqsave(&ep
->lock
, flags
);
991 /* Copy the data member from inside the lock */
992 epi
->event
.data
= event
->data
;
995 * If the item is not linked to the RB tree it means that it's on its
996 * way toward the removal. Do nothing in this case.
998 if (ep_rb_linked(&epi
->rbn
)) {
1000 * If the item is "hot" and it is not registered inside the ready
1001 * list, push it inside. If the item is not "hot" and it is currently
1002 * registered inside the ready list, unlink it.
1004 if (revents
& event
->events
) {
1005 if (!ep_is_linked(&epi
->rdllink
)) {
1006 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1008 /* Notify waiting tasks that events are available */
1009 if (waitqueue_active(&ep
->wq
))
1010 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
|
1011 TASK_INTERRUPTIBLE
);
1012 if (waitqueue_active(&ep
->poll_wait
))
1018 write_unlock_irqrestore(&ep
->lock
, flags
);
1020 /* We have to call this outside the lock */
1022 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1029 * This function unregister poll callbacks from the associated file descriptor.
1030 * Since this must be called without holding "ep->lock" the atomic exchange trick
1031 * will protect us from multiple unregister.
1033 static void ep_unregister_pollwait(struct eventpoll
*ep
, struct epitem
*epi
)
1036 struct list_head
*lsthead
= &epi
->pwqlist
;
1037 struct eppoll_entry
*pwq
;
1039 /* This is called without locks, so we need the atomic exchange */
1040 nwait
= xchg(&epi
->nwait
, 0);
1043 while (!list_empty(lsthead
)) {
1044 pwq
= list_first_entry(lsthead
, struct eppoll_entry
, llink
);
1046 list_del_init(&pwq
->llink
);
1047 remove_wait_queue(pwq
->whead
, &pwq
->wait
);
1048 kmem_cache_free(pwq_cache
, pwq
);
1055 * Unlink the "struct epitem" from all places it might have been hooked up.
1056 * This function must be called with write IRQ lock on "ep->lock".
1058 static int ep_unlink(struct eventpoll
*ep
, struct epitem
*epi
)
1063 * It can happen that this one is called for an item already unlinked.
1064 * The check protect us from doing a double unlink ( crash ).
1067 if (!ep_rb_linked(&epi
->rbn
))
1071 * Clear the event mask for the unlinked item. This will avoid item
1072 * notifications to be sent after the unlink operation from inside
1073 * the kernel->userspace event transfer loop.
1075 epi
->event
.events
= 0;
1078 * At this point is safe to do the job, unlink the item from our rb-tree.
1079 * This operation togheter with the above check closes the door to
1082 ep_rb_erase(&epi
->rbn
, &ep
->rbr
);
1085 * If the item we are going to remove is inside the ready file descriptors
1086 * we want to remove it from this list to avoid stale events.
1088 if (ep_is_linked(&epi
->rdllink
))
1089 list_del_init(&epi
->rdllink
);
1094 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
1095 current
, ep
, epi
->ffd
.file
, error
));
1102 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
1103 * all the associated resources.
1105 static int ep_remove(struct eventpoll
*ep
, struct epitem
*epi
)
1108 unsigned long flags
;
1109 struct file
*file
= epi
->ffd
.file
;
1112 * Removes poll wait queue hooks. We _have_ to do this without holding
1113 * the "ep->lock" otherwise a deadlock might occur. This because of the
1114 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
1115 * queue head lock when unregistering the wait queue. The wakeup callback
1116 * will run by holding the wait queue head lock and will call our callback
1117 * that will try to get "ep->lock".
1119 ep_unregister_pollwait(ep
, epi
);
1121 /* Remove the current item from the list of epoll hooks */
1122 spin_lock(&file
->f_ep_lock
);
1123 if (ep_is_linked(&epi
->fllink
))
1124 list_del_init(&epi
->fllink
);
1125 spin_unlock(&file
->f_ep_lock
);
1127 /* We need to acquire the write IRQ lock before calling ep_unlink() */
1128 write_lock_irqsave(&ep
->lock
, flags
);
1130 /* Really unlink the item from the RB tree */
1131 error
= ep_unlink(ep
, epi
);
1133 write_unlock_irqrestore(&ep
->lock
, flags
);
1138 /* At this point it is safe to free the eventpoll item */
1139 ep_release_epitem(epi
);
1143 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_remove(%p, %p) = %d\n",
1144 current
, ep
, file
, error
));
1151 * This is the callback that is passed to the wait queue wakeup
1152 * machanism. It is called by the stored file descriptors when they
1153 * have events to report.
1155 static int ep_poll_callback(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
1158 unsigned long flags
;
1159 struct epitem
*epi
= ep_item_from_wait(wait
);
1160 struct eventpoll
*ep
= epi
->ep
;
1162 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
1163 current
, epi
->ffd
.file
, epi
, ep
));
1165 write_lock_irqsave(&ep
->lock
, flags
);
1168 * If the event mask does not contain any poll(2) event, we consider the
1169 * descriptor to be disabled. This condition is likely the effect of the
1170 * EPOLLONESHOT bit that disables the descriptor when an event is received,
1171 * until the next EPOLL_CTL_MOD will be issued.
1173 if (!(epi
->event
.events
& ~EP_PRIVATE_BITS
))
1176 /* If this file is already in the ready list we exit soon */
1177 if (ep_is_linked(&epi
->rdllink
))
1180 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1184 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1187 if (waitqueue_active(&ep
->wq
))
1188 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
|
1189 TASK_INTERRUPTIBLE
);
1190 if (waitqueue_active(&ep
->poll_wait
))
1194 write_unlock_irqrestore(&ep
->lock
, flags
);
1196 /* We have to call this outside the lock */
1198 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1204 static int ep_eventpoll_close(struct inode
*inode
, struct file
*file
)
1206 struct eventpoll
*ep
= file
->private_data
;
1213 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: close() ep=%p\n", current
, ep
));
1218 static unsigned int ep_eventpoll_poll(struct file
*file
, poll_table
*wait
)
1220 unsigned int pollflags
= 0;
1221 unsigned long flags
;
1222 struct eventpoll
*ep
= file
->private_data
;
1224 /* Insert inside our poll wait queue */
1225 poll_wait(file
, &ep
->poll_wait
, wait
);
1227 /* Check our condition */
1228 read_lock_irqsave(&ep
->lock
, flags
);
1229 if (!list_empty(&ep
->rdllist
))
1230 pollflags
= POLLIN
| POLLRDNORM
;
1231 read_unlock_irqrestore(&ep
->lock
, flags
);
1238 * This function is called without holding the "ep->lock" since the call to
1239 * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
1240 * because of the way poll() is traditionally implemented in Linux.
1242 static int ep_send_events(struct eventpoll
*ep
, struct list_head
*txlist
,
1243 struct epoll_event __user
*events
, int maxevents
)
1245 int eventcnt
, error
= -EFAULT
, pwake
= 0;
1246 unsigned int revents
;
1247 unsigned long flags
;
1249 struct list_head injlist
;
1251 INIT_LIST_HEAD(&injlist
);
1254 * We can loop without lock because this is a task private list.
1255 * We just splice'd out the ep->rdllist in ep_collect_ready_items().
1256 * Items cannot vanish during the loop because we are holding "sem" in
1259 for (eventcnt
= 0; !list_empty(txlist
) && eventcnt
< maxevents
;) {
1260 epi
= list_first_entry(txlist
, struct epitem
, rdllink
);
1261 prefetch(epi
->rdllink
.next
);
1264 * Get the ready file event set. We can safely use the file
1265 * because we are holding the "sem" in read and this will
1266 * guarantee that both the file and the item will not vanish.
1268 revents
= epi
->ffd
.file
->f_op
->poll(epi
->ffd
.file
, NULL
);
1269 revents
&= epi
->event
.events
;
1272 * Is the event mask intersect the caller-requested one,
1273 * deliver the event to userspace. Again, we are holding
1274 * "sem" in read, so no operations coming from userspace
1275 * can change the item.
1278 if (__put_user(revents
,
1279 &events
[eventcnt
].events
) ||
1280 __put_user(epi
->event
.data
,
1281 &events
[eventcnt
].data
))
1283 if (epi
->event
.events
& EPOLLONESHOT
)
1284 epi
->event
.events
&= EP_PRIVATE_BITS
;
1289 * This is tricky. We are holding the "sem" in read, and this
1290 * means that the operations that can change the "linked" status
1291 * of the epoll item (epi->rbn and epi->rdllink), cannot touch
1292 * them. Also, since we are "linked" from a epi->rdllink POV
1293 * (the item is linked to our transmission list we just
1294 * spliced), the ep_poll_callback() cannot touch us either,
1295 * because of the check present in there. Another parallel
1296 * epoll_wait() will not get the same result set, since we
1297 * spliced the ready list before. Note that list_del() still
1298 * shows the item as linked to the test in ep_poll_callback().
1300 list_del(&epi
->rdllink
);
1301 if (!(epi
->event
.events
& EPOLLET
) &&
1302 (revents
& epi
->event
.events
))
1303 list_add_tail(&epi
->rdllink
, &injlist
);
1306 * Be sure the item is totally detached before re-init
1307 * the list_head. After INIT_LIST_HEAD() is committed,
1308 * the ep_poll_callback() can requeue the item again,
1309 * but we don't care since we are already past it.
1312 INIT_LIST_HEAD(&epi
->rdllink
);
1320 * If the re-injection list or the txlist are not empty, re-splice
1321 * them to the ready list and do proper wakeups.
1323 if (!list_empty(&injlist
) || !list_empty(txlist
)) {
1324 write_lock_irqsave(&ep
->lock
, flags
);
1326 list_splice(txlist
, &ep
->rdllist
);
1327 list_splice(&injlist
, &ep
->rdllist
);
1329 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1332 if (waitqueue_active(&ep
->wq
))
1333 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
|
1334 TASK_INTERRUPTIBLE
);
1335 if (waitqueue_active(&ep
->poll_wait
))
1338 write_unlock_irqrestore(&ep
->lock
, flags
);
1341 /* We have to call this outside the lock */
1343 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1345 return eventcnt
== 0 ? error
: eventcnt
;
1350 * Perform the transfer of events to user space.
1352 static int ep_events_transfer(struct eventpoll
*ep
,
1353 struct epoll_event __user
*events
, int maxevents
)
1356 unsigned long flags
;
1357 struct list_head txlist
;
1359 INIT_LIST_HEAD(&txlist
);
1362 * We need to lock this because we could be hit by
1363 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
1365 down_read(&ep
->sem
);
1368 * Steal the ready list, and re-init the original one to the
1371 write_lock_irqsave(&ep
->lock
, flags
);
1372 list_splice(&ep
->rdllist
, &txlist
);
1373 INIT_LIST_HEAD(&ep
->rdllist
);
1374 write_unlock_irqrestore(&ep
->lock
, flags
);
1376 /* Build result set in userspace */
1377 eventcnt
= ep_send_events(ep
, &txlist
, events
, maxevents
);
1385 static int ep_poll(struct eventpoll
*ep
, struct epoll_event __user
*events
,
1386 int maxevents
, long timeout
)
1389 unsigned long flags
;
1394 * Calculate the timeout by checking for the "infinite" value ( -1 )
1395 * and the overflow condition. The passed timeout is in milliseconds,
1396 * that why (t * HZ) / 1000.
1398 jtimeout
= (timeout
< 0 || timeout
>= EP_MAX_MSTIMEO
) ?
1399 MAX_SCHEDULE_TIMEOUT
: (timeout
* HZ
+ 999) / 1000;
1402 write_lock_irqsave(&ep
->lock
, flags
);
1405 if (list_empty(&ep
->rdllist
)) {
1407 * We don't have any available event to return to the caller.
1408 * We need to sleep here, and we will be wake up by
1409 * ep_poll_callback() when events will become available.
1411 init_waitqueue_entry(&wait
, current
);
1412 __add_wait_queue(&ep
->wq
, &wait
);
1416 * We don't want to sleep if the ep_poll_callback() sends us
1417 * a wakeup in between. That's why we set the task state
1418 * to TASK_INTERRUPTIBLE before doing the checks.
1420 set_current_state(TASK_INTERRUPTIBLE
);
1421 if (!list_empty(&ep
->rdllist
) || !jtimeout
)
1423 if (signal_pending(current
)) {
1428 write_unlock_irqrestore(&ep
->lock
, flags
);
1429 jtimeout
= schedule_timeout(jtimeout
);
1430 write_lock_irqsave(&ep
->lock
, flags
);
1432 __remove_wait_queue(&ep
->wq
, &wait
);
1434 set_current_state(TASK_RUNNING
);
1437 /* Is it worth to try to dig for events ? */
1438 eavail
= !list_empty(&ep
->rdllist
);
1440 write_unlock_irqrestore(&ep
->lock
, flags
);
1443 * Try to transfer events to user space. In case we get 0 events and
1444 * there's still timeout left over, we go trying again in search of
1447 if (!res
&& eavail
&&
1448 !(res
= ep_events_transfer(ep
, events
, maxevents
)) && jtimeout
)
1454 static int __init
eventpoll_init(void)
1456 mutex_init(&epmutex
);
1458 /* Initialize the structure used to perform safe poll wait head wake ups */
1459 ep_poll_safewake_init(&psw
);
1461 /* Allocates slab cache used to allocate "struct epitem" items */
1462 epi_cache
= kmem_cache_create("eventpoll_epi", sizeof(struct epitem
),
1463 0, SLAB_HWCACHE_ALIGN
|EPI_SLAB_DEBUG
|SLAB_PANIC
,
1466 /* Allocates slab cache used to allocate "struct eppoll_entry" */
1467 pwq_cache
= kmem_cache_create("eventpoll_pwq",
1468 sizeof(struct eppoll_entry
), 0,
1469 EPI_SLAB_DEBUG
|SLAB_PANIC
, NULL
, NULL
);
1473 fs_initcall(eventpoll_init
);