2 * fs/eventpoll.c (Efficient event retrieval implementation)
3 * Copyright (C) 2001,...,2009 Davide Libenzi
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * Davide Libenzi <davidel@xmailserver.org>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
18 #include <linux/file.h>
19 #include <linux/signal.h>
20 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/poll.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/hash.h>
27 #include <linux/spinlock.h>
28 #include <linux/syscalls.h>
29 #include <linux/rbtree.h>
30 #include <linux/wait.h>
31 #include <linux/eventpoll.h>
32 #include <linux/mount.h>
33 #include <linux/bitops.h>
34 #include <linux/mutex.h>
35 #include <linux/anon_inodes.h>
36 #include <asm/uaccess.h>
37 #include <asm/system.h>
40 #include <asm/atomic.h>
44 * There are three level of locking required by epoll :
48 * 3) ep->lock (spinlock)
50 * The acquire order is the one listed above, from 1 to 3.
51 * We need a spinlock (ep->lock) because we manipulate objects
52 * from inside the poll callback, that might be triggered from
53 * a wake_up() that in turn might be called from IRQ context.
54 * So we can't sleep inside the poll callback and hence we need
55 * a spinlock. During the event transfer loop (from kernel to
56 * user space) we could end up sleeping due a copy_to_user(), so
57 * we need a lock that will allow us to sleep. This lock is a
58 * mutex (ep->mtx). It is acquired during the event transfer loop,
59 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
60 * Then we also need a global mutex to serialize eventpoll_release_file()
62 * This mutex is acquired by ep_free() during the epoll file
63 * cleanup path and it is also acquired by eventpoll_release_file()
64 * if a file has been pushed inside an epoll set and it is then
65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
66 * It is possible to drop the "ep->mtx" and to use the global
67 * mutex "epmutex" (together with "ep->lock") to have it working,
68 * but having "ep->mtx" will make the interface more scalable.
69 * Events that require holding "epmutex" are very rare, while for
70 * normal operations the epoll private "ep->mtx" will guarantee
71 * a better scalability.
77 #define DPRINTK(x) printk x
78 #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
79 #else /* #if DEBUG_EPOLL > 0 */
80 #define DPRINTK(x) (void) 0
81 #define DNPRINTK(n, x) (void) 0
82 #endif /* #if DEBUG_EPOLL > 0 */
87 #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
88 #else /* #if DEBUG_EPI != 0 */
89 #define EPI_SLAB_DEBUG 0
90 #endif /* #if DEBUG_EPI != 0 */
92 /* Epoll private bits inside the event mask */
93 #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
95 /* Maximum number of nesting allowed inside epoll sets */
96 #define EP_MAX_NESTS 4
98 /* Maximum msec timeout value storeable in a long int */
99 #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
101 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
103 #define EP_UNACTIVE_PTR ((void *) -1L)
105 #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
107 struct epoll_filefd
{
113 * Structure used to track possible nested calls, for too deep recursions
116 struct nested_call_node
{
117 struct list_head llink
;
118 struct task_struct
*task
;
123 * This structure is used as collector for nested calls, to check for
124 * maximum recursion dept and loop cycles.
126 struct nested_calls
{
127 struct list_head tasks_call_list
;
132 * Each file descriptor added to the eventpoll interface will
133 * have an entry of this type linked to the "rbr" RB tree.
136 /* RB tree node used to link this structure to the eventpoll RB tree */
139 /* List header used to link this structure to the eventpoll ready list */
140 struct list_head rdllink
;
143 * Works together "struct eventpoll"->ovflist in keeping the
144 * single linked chain of items.
148 /* The file descriptor information this item refers to */
149 struct epoll_filefd ffd
;
151 /* Number of active wait queue attached to poll operations */
154 /* List containing poll wait queues */
155 struct list_head pwqlist
;
157 /* The "container" of this item */
158 struct eventpoll
*ep
;
160 /* List header used to link this item to the "struct file" items list */
161 struct list_head fllink
;
163 /* The structure that describe the interested events and the source fd */
164 struct epoll_event event
;
168 * This structure is stored inside the "private_data" member of the file
169 * structure and rapresent the main data sructure for the eventpoll
173 /* Protect the this structure access */
177 * This mutex is used to ensure that files are not removed
178 * while epoll is using them. This is held during the event
179 * collection loop, the file cleanup path, the epoll file exit
180 * code and the ctl operations.
184 /* Wait queue used by sys_epoll_wait() */
185 wait_queue_head_t wq
;
187 /* Wait queue used by file->poll() */
188 wait_queue_head_t poll_wait
;
190 /* List of ready file descriptors */
191 struct list_head rdllist
;
193 /* RB tree root used to store monitored fd structs */
197 * This is a single linked list that chains all the "struct epitem" that
198 * happened while transfering ready events to userspace w/out
201 struct epitem
*ovflist
;
203 /* The user that created the eventpoll descriptor */
204 struct user_struct
*user
;
207 /* Wait structure used by the poll hooks */
208 struct eppoll_entry
{
209 /* List header used to link this structure to the "struct epitem" */
210 struct list_head llink
;
212 /* The "base" pointer is set to the container "struct epitem" */
216 * Wait queue item that will be linked to the target file wait
221 /* The wait queue head that linked the "wait" wait queue item */
222 wait_queue_head_t
*whead
;
225 /* Wrapper struct used by poll queueing */
231 /* Used by the ep_send_events() function as callback private data */
232 struct ep_send_events_data
{
234 struct epoll_event __user
*events
;
238 * Configuration options available inside /proc/sys/fs/epoll/
240 /* Maximum number of epoll watched descriptors, per user */
241 static int max_user_watches __read_mostly
;
244 * This mutex is used to serialize ep_free() and eventpoll_release_file().
246 static DEFINE_MUTEX(epmutex
);
248 /* Used for safe wake up implementation */
249 static struct nested_calls poll_safewake_ncalls
;
251 /* Used to call file's f_op->poll() under the nested calls boundaries */
252 static struct nested_calls poll_readywalk_ncalls
;
254 /* Slab cache used to allocate "struct epitem" */
255 static struct kmem_cache
*epi_cache __read_mostly
;
257 /* Slab cache used to allocate "struct eppoll_entry" */
258 static struct kmem_cache
*pwq_cache __read_mostly
;
262 #include <linux/sysctl.h>
266 ctl_table epoll_table
[] = {
268 .procname
= "max_user_watches",
269 .data
= &max_user_watches
,
270 .maxlen
= sizeof(int),
272 .proc_handler
= &proc_dointvec_minmax
,
277 #endif /* CONFIG_SYSCTL */
280 /* Setup the structure that is used as key for the RB tree */
281 static inline void ep_set_ffd(struct epoll_filefd
*ffd
,
282 struct file
*file
, int fd
)
288 /* Compare RB tree keys */
289 static inline int ep_cmp_ffd(struct epoll_filefd
*p1
,
290 struct epoll_filefd
*p2
)
292 return (p1
->file
> p2
->file
? +1:
293 (p1
->file
< p2
->file
? -1 : p1
->fd
- p2
->fd
));
296 /* Tells us if the item is currently linked */
297 static inline int ep_is_linked(struct list_head
*p
)
299 return !list_empty(p
);
302 /* Get the "struct epitem" from a wait queue pointer */
303 static inline struct epitem
*ep_item_from_wait(wait_queue_t
*p
)
305 return container_of(p
, struct eppoll_entry
, wait
)->base
;
308 /* Get the "struct epitem" from an epoll queue wrapper */
309 static inline struct epitem
*ep_item_from_epqueue(poll_table
*p
)
311 return container_of(p
, struct ep_pqueue
, pt
)->epi
;
314 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
315 static inline int ep_op_has_event(int op
)
317 return op
!= EPOLL_CTL_DEL
;
320 /* Initialize the poll safe wake up structure */
321 static void ep_nested_calls_init(struct nested_calls
*ncalls
)
323 INIT_LIST_HEAD(&ncalls
->tasks_call_list
);
324 spin_lock_init(&ncalls
->lock
);
328 * ep_call_nested - Perform a bound (possibly) nested call, by checking
329 * that the recursion limit is not exceeded, and that
330 * the same nested call (by the meaning of same cookie) is
333 * @ncalls: Pointer to the nested_calls structure to be used for this call.
334 * @max_nests: Maximum number of allowed nesting calls.
335 * @nproc: Nested call core function pointer.
336 * @priv: Opaque data to be passed to the @nproc callback.
337 * @cookie: Cookie to be used to identify this nested call.
339 * Returns: Returns the code returned by the @nproc callback, or -1 if
340 * the maximum recursion limit has been exceeded.
342 static int ep_call_nested(struct nested_calls
*ncalls
, int max_nests
,
343 int (*nproc
)(void *, void *, int), void *priv
,
346 int error
, call_nests
= 0;
348 struct task_struct
*this_task
= current
;
349 struct list_head
*lsthead
= &ncalls
->tasks_call_list
;
350 struct nested_call_node
*tncur
;
351 struct nested_call_node tnode
;
353 spin_lock_irqsave(&ncalls
->lock
, flags
);
356 * Try to see if the current task is already inside this wakeup call.
357 * We use a list here, since the population inside this set is always
360 list_for_each_entry(tncur
, lsthead
, llink
) {
361 if (tncur
->task
== this_task
&&
362 (tncur
->cookie
== cookie
|| ++call_nests
> max_nests
)) {
364 * Ops ... loop detected or maximum nest level reached.
365 * We abort this wake by breaking the cycle itself.
367 spin_unlock_irqrestore(&ncalls
->lock
, flags
);
373 /* Add the current task and cookie to the list */
374 tnode
.task
= this_task
;
375 tnode
.cookie
= cookie
;
376 list_add(&tnode
.llink
, lsthead
);
378 spin_unlock_irqrestore(&ncalls
->lock
, flags
);
380 /* Call the nested function */
381 error
= (*nproc
)(priv
, cookie
, call_nests
);
383 /* Remove the current task from the list */
384 spin_lock_irqsave(&ncalls
->lock
, flags
);
385 list_del(&tnode
.llink
);
386 spin_unlock_irqrestore(&ncalls
->lock
, flags
);
391 static int ep_poll_wakeup_proc(void *priv
, void *cookie
, int call_nests
)
393 wake_up_nested((wait_queue_head_t
*) cookie
, 1 + call_nests
);
398 * Perform a safe wake up of the poll wait list. The problem is that
399 * with the new callback'd wake up system, it is possible that the
400 * poll callback is reentered from inside the call to wake_up() done
401 * on the poll wait queue head. The rule is that we cannot reenter the
402 * wake up code from the same task more than EP_MAX_NESTS times,
403 * and we cannot reenter the same wait queue head at all. This will
404 * enable to have a hierarchy of epoll file descriptor of no more than
407 static void ep_poll_safewake(wait_queue_head_t
*wq
)
409 ep_call_nested(&poll_safewake_ncalls
, EP_MAX_NESTS
,
410 ep_poll_wakeup_proc
, NULL
, wq
);
414 * This function unregister poll callbacks from the associated file descriptor.
415 * Since this must be called without holding "ep->lock" the atomic exchange trick
416 * will protect us from multiple unregister.
418 static void ep_unregister_pollwait(struct eventpoll
*ep
, struct epitem
*epi
)
421 struct list_head
*lsthead
= &epi
->pwqlist
;
422 struct eppoll_entry
*pwq
;
424 /* This is called without locks, so we need the atomic exchange */
425 nwait
= xchg(&epi
->nwait
, 0);
428 while (!list_empty(lsthead
)) {
429 pwq
= list_first_entry(lsthead
, struct eppoll_entry
, llink
);
431 list_del_init(&pwq
->llink
);
432 remove_wait_queue(pwq
->whead
, &pwq
->wait
);
433 kmem_cache_free(pwq_cache
, pwq
);
439 * ep_scan_ready_list - Scans the ready list in a way that makes possible for
440 * the scan code, to call f_op->poll(). Also allows for
441 * O(NumReady) performance.
443 * @ep: Pointer to the epoll private data structure.
444 * @sproc: Pointer to the scan callback.
445 * @priv: Private opaque data passed to the @sproc callback.
447 * Returns: The same integer error code returned by the @sproc callback.
449 static int ep_scan_ready_list(struct eventpoll
*ep
,
450 int (*sproc
)(struct eventpoll
*,
451 struct list_head
*, void *),
454 int error
, pwake
= 0;
456 struct epitem
*epi
, *nepi
;
457 struct list_head txlist
;
459 INIT_LIST_HEAD(&txlist
);
462 * We need to lock this because we could be hit by
463 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
465 mutex_lock(&ep
->mtx
);
468 * Steal the ready list, and re-init the original one to the
469 * empty list. Also, set ep->ovflist to NULL so that events
470 * happening while looping w/out locks, are not lost. We cannot
471 * have the poll callback to queue directly on ep->rdllist,
472 * because we want the "sproc" callback to be able to do it
475 spin_lock_irqsave(&ep
->lock
, flags
);
476 list_splice(&ep
->rdllist
, &txlist
);
477 INIT_LIST_HEAD(&ep
->rdllist
);
479 spin_unlock_irqrestore(&ep
->lock
, flags
);
482 * Now call the callback function.
484 error
= (*sproc
)(ep
, &txlist
, priv
);
486 spin_lock_irqsave(&ep
->lock
, flags
);
488 * During the time we spent inside the "sproc" callback, some
489 * other events might have been queued by the poll callback.
490 * We re-insert them inside the main ready-list here.
492 for (nepi
= ep
->ovflist
; (epi
= nepi
) != NULL
;
493 nepi
= epi
->next
, epi
->next
= EP_UNACTIVE_PTR
) {
495 * We need to check if the item is already in the list.
496 * During the "sproc" callback execution time, items are
497 * queued into ->ovflist but the "txlist" might already
498 * contain them, and the list_splice() below takes care of them.
500 if (!ep_is_linked(&epi
->rdllink
))
501 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
504 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
505 * releasing the lock, events will be queued in the normal way inside
508 ep
->ovflist
= EP_UNACTIVE_PTR
;
511 * Quickly re-inject items left on "txlist".
513 list_splice(&txlist
, &ep
->rdllist
);
515 if (!list_empty(&ep
->rdllist
)) {
517 * Wake up (if active) both the eventpoll wait list and the ->poll()
518 * wait list (delayed after we release the lock).
520 if (waitqueue_active(&ep
->wq
))
521 wake_up_locked(&ep
->wq
);
522 if (waitqueue_active(&ep
->poll_wait
))
525 spin_unlock_irqrestore(&ep
->lock
, flags
);
527 mutex_unlock(&ep
->mtx
);
529 /* We have to call this outside the lock */
531 ep_poll_safewake(&ep
->poll_wait
);
537 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
538 * all the associated resources. Must be called with "mtx" held.
540 static int ep_remove(struct eventpoll
*ep
, struct epitem
*epi
)
543 struct file
*file
= epi
->ffd
.file
;
546 * Removes poll wait queue hooks. We _have_ to do this without holding
547 * the "ep->lock" otherwise a deadlock might occur. This because of the
548 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
549 * queue head lock when unregistering the wait queue. The wakeup callback
550 * will run by holding the wait queue head lock and will call our callback
551 * that will try to get "ep->lock".
553 ep_unregister_pollwait(ep
, epi
);
555 /* Remove the current item from the list of epoll hooks */
556 spin_lock(&file
->f_lock
);
557 if (ep_is_linked(&epi
->fllink
))
558 list_del_init(&epi
->fllink
);
559 spin_unlock(&file
->f_lock
);
561 rb_erase(&epi
->rbn
, &ep
->rbr
);
563 spin_lock_irqsave(&ep
->lock
, flags
);
564 if (ep_is_linked(&epi
->rdllink
))
565 list_del_init(&epi
->rdllink
);
566 spin_unlock_irqrestore(&ep
->lock
, flags
);
568 /* At this point it is safe to free the eventpoll item */
569 kmem_cache_free(epi_cache
, epi
);
571 atomic_dec(&ep
->user
->epoll_watches
);
573 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_remove(%p, %p)\n",
579 static void ep_free(struct eventpoll
*ep
)
584 /* We need to release all tasks waiting for these file */
585 if (waitqueue_active(&ep
->poll_wait
))
586 ep_poll_safewake(&ep
->poll_wait
);
589 * We need to lock this because we could be hit by
590 * eventpoll_release_file() while we're freeing the "struct eventpoll".
591 * We do not need to hold "ep->mtx" here because the epoll file
592 * is on the way to be removed and no one has references to it
593 * anymore. The only hit might come from eventpoll_release_file() but
594 * holding "epmutex" is sufficent here.
596 mutex_lock(&epmutex
);
599 * Walks through the whole tree by unregistering poll callbacks.
601 for (rbp
= rb_first(&ep
->rbr
); rbp
; rbp
= rb_next(rbp
)) {
602 epi
= rb_entry(rbp
, struct epitem
, rbn
);
604 ep_unregister_pollwait(ep
, epi
);
608 * Walks through the whole tree by freeing each "struct epitem". At this
609 * point we are sure no poll callbacks will be lingering around, and also by
610 * holding "epmutex" we can be sure that no file cleanup code will hit
611 * us during this operation. So we can avoid the lock on "ep->lock".
613 while ((rbp
= rb_first(&ep
->rbr
)) != NULL
) {
614 epi
= rb_entry(rbp
, struct epitem
, rbn
);
618 mutex_unlock(&epmutex
);
619 mutex_destroy(&ep
->mtx
);
624 static int ep_eventpoll_release(struct inode
*inode
, struct file
*file
)
626 struct eventpoll
*ep
= file
->private_data
;
631 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: close() ep=%p\n", current
, ep
));
635 static int ep_read_events_proc(struct eventpoll
*ep
, struct list_head
*head
, void *priv
)
637 struct epitem
*epi
, *tmp
;
639 list_for_each_entry_safe(epi
, tmp
, head
, rdllink
) {
640 if (epi
->ffd
.file
->f_op
->poll(epi
->ffd
.file
, NULL
) &
642 return POLLIN
| POLLRDNORM
;
645 * Item has been dropped into the ready list by the poll
646 * callback, but it's not actually ready, as far as
647 * caller requested events goes. We can remove it here.
649 list_del_init(&epi
->rdllink
);
655 static int ep_poll_readyevents_proc(void *priv
, void *cookie
, int call_nests
)
657 return ep_scan_ready_list(priv
, ep_read_events_proc
, NULL
);
660 static unsigned int ep_eventpoll_poll(struct file
*file
, poll_table
*wait
)
663 struct eventpoll
*ep
= file
->private_data
;
665 /* Insert inside our poll wait queue */
666 poll_wait(file
, &ep
->poll_wait
, wait
);
669 * Proceed to find out if wanted events are really available inside
670 * the ready list. This need to be done under ep_call_nested()
671 * supervision, since the call to f_op->poll() done on listed files
672 * could re-enter here.
674 pollflags
= ep_call_nested(&poll_readywalk_ncalls
, EP_MAX_NESTS
,
675 ep_poll_readyevents_proc
, ep
, ep
);
677 return pollflags
!= -1 ? pollflags
: 0;
680 /* File callbacks that implement the eventpoll file behaviour */
681 static const struct file_operations eventpoll_fops
= {
682 .release
= ep_eventpoll_release
,
683 .poll
= ep_eventpoll_poll
686 /* Fast test to see if the file is an evenpoll file */
687 static inline int is_file_epoll(struct file
*f
)
689 return f
->f_op
== &eventpoll_fops
;
693 * This is called from eventpoll_release() to unlink files from the eventpoll
694 * interface. We need to have this facility to cleanup correctly files that are
695 * closed without being removed from the eventpoll interface.
697 void eventpoll_release_file(struct file
*file
)
699 struct list_head
*lsthead
= &file
->f_ep_links
;
700 struct eventpoll
*ep
;
704 * We don't want to get "file->f_lock" because it is not
705 * necessary. It is not necessary because we're in the "struct file"
706 * cleanup path, and this means that noone is using this file anymore.
707 * So, for example, epoll_ctl() cannot hit here since if we reach this
708 * point, the file counter already went to zero and fget() would fail.
709 * The only hit might come from ep_free() but by holding the mutex
710 * will correctly serialize the operation. We do need to acquire
711 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
712 * from anywhere but ep_free().
714 * Besides, ep_remove() acquires the lock, so we can't hold it here.
716 mutex_lock(&epmutex
);
718 while (!list_empty(lsthead
)) {
719 epi
= list_first_entry(lsthead
, struct epitem
, fllink
);
722 list_del_init(&epi
->fllink
);
723 mutex_lock(&ep
->mtx
);
725 mutex_unlock(&ep
->mtx
);
728 mutex_unlock(&epmutex
);
731 static int ep_alloc(struct eventpoll
**pep
)
734 struct user_struct
*user
;
735 struct eventpoll
*ep
;
737 user
= get_current_user();
739 ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
743 spin_lock_init(&ep
->lock
);
744 mutex_init(&ep
->mtx
);
745 init_waitqueue_head(&ep
->wq
);
746 init_waitqueue_head(&ep
->poll_wait
);
747 INIT_LIST_HEAD(&ep
->rdllist
);
749 ep
->ovflist
= EP_UNACTIVE_PTR
;
754 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_alloc() ep=%p\n",
764 * Search the file inside the eventpoll tree. The RB tree operations
765 * are protected by the "mtx" mutex, and ep_find() must be called with
768 static struct epitem
*ep_find(struct eventpoll
*ep
, struct file
*file
, int fd
)
772 struct epitem
*epi
, *epir
= NULL
;
773 struct epoll_filefd ffd
;
775 ep_set_ffd(&ffd
, file
, fd
);
776 for (rbp
= ep
->rbr
.rb_node
; rbp
; ) {
777 epi
= rb_entry(rbp
, struct epitem
, rbn
);
778 kcmp
= ep_cmp_ffd(&ffd
, &epi
->ffd
);
789 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_find(%p) -> %p\n",
790 current
, file
, epir
));
796 * This is the callback that is passed to the wait queue wakeup
797 * machanism. It is called by the stored file descriptors when they
798 * have events to report.
800 static int ep_poll_callback(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
804 struct epitem
*epi
= ep_item_from_wait(wait
);
805 struct eventpoll
*ep
= epi
->ep
;
807 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
808 current
, epi
->ffd
.file
, epi
, ep
));
810 spin_lock_irqsave(&ep
->lock
, flags
);
813 * If the event mask does not contain any poll(2) event, we consider the
814 * descriptor to be disabled. This condition is likely the effect of the
815 * EPOLLONESHOT bit that disables the descriptor when an event is received,
816 * until the next EPOLL_CTL_MOD will be issued.
818 if (!(epi
->event
.events
& ~EP_PRIVATE_BITS
))
822 * If we are trasfering events to userspace, we can hold no locks
823 * (because we're accessing user memory, and because of linux f_op->poll()
824 * semantics). All the events that happens during that period of time are
825 * chained in ep->ovflist and requeued later on.
827 if (unlikely(ep
->ovflist
!= EP_UNACTIVE_PTR
)) {
828 if (epi
->next
== EP_UNACTIVE_PTR
) {
829 epi
->next
= ep
->ovflist
;
835 /* If this file is already in the ready list we exit soon */
836 if (!ep_is_linked(&epi
->rdllink
))
837 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
840 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
843 if (waitqueue_active(&ep
->wq
))
844 wake_up_locked(&ep
->wq
);
845 if (waitqueue_active(&ep
->poll_wait
))
849 spin_unlock_irqrestore(&ep
->lock
, flags
);
851 /* We have to call this outside the lock */
853 ep_poll_safewake(&ep
->poll_wait
);
859 * This is the callback that is used to add our wait queue to the
860 * target file wakeup lists.
862 static void ep_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*whead
,
865 struct epitem
*epi
= ep_item_from_epqueue(pt
);
866 struct eppoll_entry
*pwq
;
868 if (epi
->nwait
>= 0 && (pwq
= kmem_cache_alloc(pwq_cache
, GFP_KERNEL
))) {
869 init_waitqueue_func_entry(&pwq
->wait
, ep_poll_callback
);
872 add_wait_queue(whead
, &pwq
->wait
);
873 list_add_tail(&pwq
->llink
, &epi
->pwqlist
);
876 /* We have to signal that an error occurred */
880 static void ep_rbtree_insert(struct eventpoll
*ep
, struct epitem
*epi
)
883 struct rb_node
**p
= &ep
->rbr
.rb_node
, *parent
= NULL
;
888 epic
= rb_entry(parent
, struct epitem
, rbn
);
889 kcmp
= ep_cmp_ffd(&epi
->ffd
, &epic
->ffd
);
891 p
= &parent
->rb_right
;
893 p
= &parent
->rb_left
;
895 rb_link_node(&epi
->rbn
, parent
, p
);
896 rb_insert_color(&epi
->rbn
, &ep
->rbr
);
900 * Must be called with "mtx" held.
902 static int ep_insert(struct eventpoll
*ep
, struct epoll_event
*event
,
903 struct file
*tfile
, int fd
)
905 int error
, revents
, pwake
= 0;
908 struct ep_pqueue epq
;
910 if (unlikely(atomic_read(&ep
->user
->epoll_watches
) >=
913 if (!(epi
= kmem_cache_alloc(epi_cache
, GFP_KERNEL
)))
916 /* Item initialization follow here ... */
917 INIT_LIST_HEAD(&epi
->rdllink
);
918 INIT_LIST_HEAD(&epi
->fllink
);
919 INIT_LIST_HEAD(&epi
->pwqlist
);
921 ep_set_ffd(&epi
->ffd
, tfile
, fd
);
924 epi
->next
= EP_UNACTIVE_PTR
;
926 /* Initialize the poll table using the queue callback */
928 init_poll_funcptr(&epq
.pt
, ep_ptable_queue_proc
);
931 * Attach the item to the poll hooks and get current event bits.
932 * We can safely use the file* here because its usage count has
933 * been increased by the caller of this function. Note that after
934 * this operation completes, the poll callback can start hitting
937 revents
= tfile
->f_op
->poll(tfile
, &epq
.pt
);
940 * We have to check if something went wrong during the poll wait queue
941 * install process. Namely an allocation for a wait queue failed due
942 * high memory pressure.
946 goto error_unregister
;
948 /* Add the current item to the list of active epoll hook for this file */
949 spin_lock(&tfile
->f_lock
);
950 list_add_tail(&epi
->fllink
, &tfile
->f_ep_links
);
951 spin_unlock(&tfile
->f_lock
);
954 * Add the current item to the RB tree. All RB tree operations are
955 * protected by "mtx", and ep_insert() is called with "mtx" held.
957 ep_rbtree_insert(ep
, epi
);
959 /* We have to drop the new item inside our item list to keep track of it */
960 spin_lock_irqsave(&ep
->lock
, flags
);
962 /* If the file is already "ready" we drop it inside the ready list */
963 if ((revents
& event
->events
) && !ep_is_linked(&epi
->rdllink
)) {
964 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
966 /* Notify waiting tasks that events are available */
967 if (waitqueue_active(&ep
->wq
))
968 wake_up_locked(&ep
->wq
);
969 if (waitqueue_active(&ep
->poll_wait
))
973 spin_unlock_irqrestore(&ep
->lock
, flags
);
975 atomic_inc(&ep
->user
->epoll_watches
);
977 /* We have to call this outside the lock */
979 ep_poll_safewake(&ep
->poll_wait
);
981 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_insert(%p, %p, %d)\n",
982 current
, ep
, tfile
, fd
));
987 ep_unregister_pollwait(ep
, epi
);
990 * We need to do this because an event could have been arrived on some
991 * allocated wait queue. Note that we don't care about the ep->ovflist
992 * list, since that is used/cleaned only inside a section bound by "mtx".
993 * And ep_insert() is called with "mtx" held.
995 spin_lock_irqsave(&ep
->lock
, flags
);
996 if (ep_is_linked(&epi
->rdllink
))
997 list_del_init(&epi
->rdllink
);
998 spin_unlock_irqrestore(&ep
->lock
, flags
);
1000 kmem_cache_free(epi_cache
, epi
);
1006 * Modify the interest event mask by dropping an event if the new mask
1007 * has a match in the current file status. Must be called with "mtx" held.
1009 static int ep_modify(struct eventpoll
*ep
, struct epitem
*epi
, struct epoll_event
*event
)
1012 unsigned int revents
;
1013 unsigned long flags
;
1016 * Set the new event interest mask before calling f_op->poll(), otherwise
1017 * a potential race might occur. In fact if we do this operation inside
1018 * the lock, an event might happen between the f_op->poll() call and the
1019 * new event set registering.
1021 epi
->event
.events
= event
->events
;
1024 * Get current event bits. We can safely use the file* here because
1025 * its usage count has been increased by the caller of this function.
1027 revents
= epi
->ffd
.file
->f_op
->poll(epi
->ffd
.file
, NULL
);
1029 spin_lock_irqsave(&ep
->lock
, flags
);
1031 /* Copy the data member from inside the lock */
1032 epi
->event
.data
= event
->data
;
1035 * If the item is "hot" and it is not registered inside the ready
1036 * list, push it inside.
1038 if (revents
& event
->events
) {
1039 if (!ep_is_linked(&epi
->rdllink
)) {
1040 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1042 /* Notify waiting tasks that events are available */
1043 if (waitqueue_active(&ep
->wq
))
1044 wake_up_locked(&ep
->wq
);
1045 if (waitqueue_active(&ep
->poll_wait
))
1049 spin_unlock_irqrestore(&ep
->lock
, flags
);
1051 /* We have to call this outside the lock */
1053 ep_poll_safewake(&ep
->poll_wait
);
1058 static int ep_send_events_proc(struct eventpoll
*ep
, struct list_head
*head
, void *priv
)
1060 struct ep_send_events_data
*esed
= priv
;
1062 unsigned int revents
;
1064 struct epoll_event __user
*uevent
;
1067 * We can loop without lock because we are passed a task private list.
1068 * Items cannot vanish during the loop because ep_scan_ready_list() is
1069 * holding "mtx" during this call.
1071 for (eventcnt
= 0, uevent
= esed
->events
;
1072 !list_empty(head
) && eventcnt
< esed
->maxevents
;) {
1073 epi
= list_first_entry(head
, struct epitem
, rdllink
);
1075 list_del_init(&epi
->rdllink
);
1077 revents
= epi
->ffd
.file
->f_op
->poll(epi
->ffd
.file
, NULL
) &
1081 * If the event mask intersect the caller-requested one,
1082 * deliver the event to userspace. Again, ep_scan_ready_list()
1083 * is holding "mtx", so no operations coming from userspace
1084 * can change the item.
1087 if (__put_user(revents
, &uevent
->events
) ||
1088 __put_user(epi
->event
.data
, &uevent
->data
))
1089 return eventcnt
? eventcnt
: -EFAULT
;
1092 if (epi
->event
.events
& EPOLLONESHOT
)
1093 epi
->event
.events
&= EP_PRIVATE_BITS
;
1094 else if (!(epi
->event
.events
& EPOLLET
))
1096 * If this file has been added with Level Trigger
1097 * mode, we need to insert back inside the ready
1098 * list, so that the next call to epoll_wait()
1099 * will check again the events availability.
1100 * At this point, noone can insert into ep->rdllist
1101 * besides us. The epoll_ctl() callers are locked
1102 * out by ep_scan_ready_list() holding "mtx" and
1103 * the poll callback will queue them in ep->ovflist.
1105 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1112 static int ep_send_events(struct eventpoll
*ep
, struct epoll_event __user
*events
,
1115 struct ep_send_events_data esed
;
1117 esed
.maxevents
= maxevents
;
1118 esed
.events
= events
;
1120 return ep_scan_ready_list(ep
, ep_send_events_proc
, &esed
);
1123 static int ep_poll(struct eventpoll
*ep
, struct epoll_event __user
*events
,
1124 int maxevents
, long timeout
)
1127 unsigned long flags
;
1132 * Calculate the timeout by checking for the "infinite" value (-1)
1133 * and the overflow condition. The passed timeout is in milliseconds,
1134 * that why (t * HZ) / 1000.
1136 jtimeout
= (timeout
< 0 || timeout
>= EP_MAX_MSTIMEO
) ?
1137 MAX_SCHEDULE_TIMEOUT
: (timeout
* HZ
+ 999) / 1000;
1140 spin_lock_irqsave(&ep
->lock
, flags
);
1143 if (list_empty(&ep
->rdllist
)) {
1145 * We don't have any available event to return to the caller.
1146 * We need to sleep here, and we will be wake up by
1147 * ep_poll_callback() when events will become available.
1149 init_waitqueue_entry(&wait
, current
);
1150 wait
.flags
|= WQ_FLAG_EXCLUSIVE
;
1151 __add_wait_queue(&ep
->wq
, &wait
);
1155 * We don't want to sleep if the ep_poll_callback() sends us
1156 * a wakeup in between. That's why we set the task state
1157 * to TASK_INTERRUPTIBLE before doing the checks.
1159 set_current_state(TASK_INTERRUPTIBLE
);
1160 if (!list_empty(&ep
->rdllist
) || !jtimeout
)
1162 if (signal_pending(current
)) {
1167 spin_unlock_irqrestore(&ep
->lock
, flags
);
1168 jtimeout
= schedule_timeout(jtimeout
);
1169 spin_lock_irqsave(&ep
->lock
, flags
);
1171 __remove_wait_queue(&ep
->wq
, &wait
);
1173 set_current_state(TASK_RUNNING
);
1175 /* Is it worth to try to dig for events ? */
1176 eavail
= !list_empty(&ep
->rdllist
) || ep
->ovflist
!= EP_UNACTIVE_PTR
;
1178 spin_unlock_irqrestore(&ep
->lock
, flags
);
1181 * Try to transfer events to user space. In case we get 0 events and
1182 * there's still timeout left over, we go trying again in search of
1185 if (!res
&& eavail
&&
1186 !(res
= ep_send_events(ep
, events
, maxevents
)) && jtimeout
)
1193 * Open an eventpoll file descriptor.
1195 SYSCALL_DEFINE1(epoll_create1
, int, flags
)
1198 struct eventpoll
*ep
= NULL
;
1200 /* Check the EPOLL_* constant for consistency. */
1201 BUILD_BUG_ON(EPOLL_CLOEXEC
!= O_CLOEXEC
);
1203 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d)\n",
1207 if (flags
& ~EPOLL_CLOEXEC
)
1211 * Create the internal data structure ("struct eventpoll").
1213 error
= ep_alloc(&ep
);
1218 * Creates all the items needed to setup an eventpoll file. That is,
1219 * a file structure and a free file descriptor.
1221 error
= anon_inode_getfd("[eventpoll]", &eventpoll_fops
, ep
,
1227 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d) = %d\n",
1228 current
, flags
, error
));
1233 SYSCALL_DEFINE1(epoll_create
, int, size
)
1238 return sys_epoll_create1(0);
1242 * The following function implements the controller interface for
1243 * the eventpoll file that enables the insertion/removal/change of
1244 * file descriptors inside the interest set.
1246 SYSCALL_DEFINE4(epoll_ctl
, int, epfd
, int, op
, int, fd
,
1247 struct epoll_event __user
*, event
)
1250 struct file
*file
, *tfile
;
1251 struct eventpoll
*ep
;
1253 struct epoll_event epds
;
1255 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
1256 current
, epfd
, op
, fd
, event
));
1259 if (ep_op_has_event(op
) &&
1260 copy_from_user(&epds
, event
, sizeof(struct epoll_event
)))
1263 /* Get the "struct file *" for the eventpoll file */
1269 /* Get the "struct file *" for the target file */
1274 /* The target file descriptor must support poll */
1276 if (!tfile
->f_op
|| !tfile
->f_op
->poll
)
1277 goto error_tgt_fput
;
1280 * We have to check that the file structure underneath the file descriptor
1281 * the user passed to us _is_ an eventpoll file. And also we do not permit
1282 * adding an epoll file descriptor inside itself.
1285 if (file
== tfile
|| !is_file_epoll(file
))
1286 goto error_tgt_fput
;
1289 * At this point it is safe to assume that the "private_data" contains
1290 * our own data structure.
1292 ep
= file
->private_data
;
1294 mutex_lock(&ep
->mtx
);
1297 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
1298 * above, we can be sure to be able to use the item looked up by
1299 * ep_find() till we release the mutex.
1301 epi
= ep_find(ep
, tfile
, fd
);
1307 epds
.events
|= POLLERR
| POLLHUP
;
1309 error
= ep_insert(ep
, &epds
, tfile
, fd
);
1315 error
= ep_remove(ep
, epi
);
1321 epds
.events
|= POLLERR
| POLLHUP
;
1322 error
= ep_modify(ep
, epi
, &epds
);
1327 mutex_unlock(&ep
->mtx
);
1334 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
1335 current
, epfd
, op
, fd
, event
, error
));
1341 * Implement the event wait interface for the eventpoll file. It is the kernel
1342 * part of the user space epoll_wait(2).
1344 SYSCALL_DEFINE4(epoll_wait
, int, epfd
, struct epoll_event __user
*, events
,
1345 int, maxevents
, int, timeout
)
1349 struct eventpoll
*ep
;
1351 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
1352 current
, epfd
, events
, maxevents
, timeout
));
1354 /* The maximum number of event must be greater than zero */
1355 if (maxevents
<= 0 || maxevents
> EP_MAX_EVENTS
)
1358 /* Verify that the area passed by the user is writeable */
1359 if (!access_ok(VERIFY_WRITE
, events
, maxevents
* sizeof(struct epoll_event
))) {
1364 /* Get the "struct file *" for the eventpoll file */
1371 * We have to check that the file structure underneath the fd
1372 * the user passed to us _is_ an eventpoll file.
1375 if (!is_file_epoll(file
))
1379 * At this point it is safe to assume that the "private_data" contains
1380 * our own data structure.
1382 ep
= file
->private_data
;
1384 /* Time to fish for events ... */
1385 error
= ep_poll(ep
, events
, maxevents
, timeout
);
1390 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
1391 current
, epfd
, events
, maxevents
, timeout
, error
));
1396 #ifdef HAVE_SET_RESTORE_SIGMASK
1399 * Implement the event wait interface for the eventpoll file. It is the kernel
1400 * part of the user space epoll_pwait(2).
1402 SYSCALL_DEFINE6(epoll_pwait
, int, epfd
, struct epoll_event __user
*, events
,
1403 int, maxevents
, int, timeout
, const sigset_t __user
*, sigmask
,
1407 sigset_t ksigmask
, sigsaved
;
1410 * If the caller wants a certain signal mask to be set during the wait,
1414 if (sigsetsize
!= sizeof(sigset_t
))
1416 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
1418 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
1419 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
1422 error
= sys_epoll_wait(epfd
, events
, maxevents
, timeout
);
1425 * If we changed the signal mask, we need to restore the original one.
1426 * In case we've got a signal while waiting, we do not restore the
1427 * signal mask yet, and we allow do_signal() to deliver the signal on
1428 * the way back to userspace, before the signal mask is restored.
1431 if (error
== -EINTR
) {
1432 memcpy(¤t
->saved_sigmask
, &sigsaved
,
1434 set_restore_sigmask();
1436 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1442 #endif /* HAVE_SET_RESTORE_SIGMASK */
1444 static int __init
eventpoll_init(void)
1450 * Allows top 4% of lomem to be allocated for epoll watches (per user).
1452 max_user_watches
= (((si
.totalram
- si
.totalhigh
) / 25) << PAGE_SHIFT
) /
1455 /* Initialize the structure used to perform safe poll wait head wake ups */
1456 ep_nested_calls_init(&poll_safewake_ncalls
);
1458 /* Initialize the structure used to perform file's f_op->poll() calls */
1459 ep_nested_calls_init(&poll_readywalk_ncalls
);
1461 /* Allocates slab cache used to allocate "struct epitem" items */
1462 epi_cache
= kmem_cache_create("eventpoll_epi", sizeof(struct epitem
),
1463 0, SLAB_HWCACHE_ALIGN
|EPI_SLAB_DEBUG
|SLAB_PANIC
,
1466 /* Allocates slab cache used to allocate "struct eppoll_entry" */
1467 pwq_cache
= kmem_cache_create("eventpoll_pwq",
1468 sizeof(struct eppoll_entry
), 0,
1469 EPI_SLAB_DEBUG
|SLAB_PANIC
, NULL
);
1473 fs_initcall(eventpoll_init
);