]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/eventpoll.c
Merge tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-hirsute-kernel.git] / fs / eventpoll.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * fs/eventpoll.c (Efficient event retrieval implementation)
4 * Copyright (C) 2001,...,2009 Davide Libenzi
5 *
6 * Davide Libenzi <davidel@xmailserver.org>
7 */
8
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/sched/signal.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/signal.h>
15 #include <linux/errno.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/poll.h>
19 #include <linux/string.h>
20 #include <linux/list.h>
21 #include <linux/hash.h>
22 #include <linux/spinlock.h>
23 #include <linux/syscalls.h>
24 #include <linux/rbtree.h>
25 #include <linux/wait.h>
26 #include <linux/eventpoll.h>
27 #include <linux/mount.h>
28 #include <linux/bitops.h>
29 #include <linux/mutex.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/device.h>
32 #include <linux/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/mman.h>
35 #include <linux/atomic.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/compat.h>
39 #include <linux/rculist.h>
40 #include <net/busy_poll.h>
41
42 /*
43 * LOCKING:
44 * There are three level of locking required by epoll :
45 *
46 * 1) epmutex (mutex)
47 * 2) ep->mtx (mutex)
48 * 3) ep->lock (rwlock)
49 *
50 * The acquire order is the one listed above, from 1 to 3.
51 * We need a rwlock (ep->lock) because we manipulate objects
52 * from inside the poll callback, that might be triggered from
53 * a wake_up() that in turn might be called from IRQ context.
54 * So we can't sleep inside the poll callback and hence we need
55 * a spinlock. During the event transfer loop (from kernel to
56 * user space) we could end up sleeping due a copy_to_user(), so
57 * we need a lock that will allow us to sleep. This lock is a
58 * mutex (ep->mtx). It is acquired during the event transfer loop,
59 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
60 * Then we also need a global mutex to serialize eventpoll_release_file()
61 * and ep_free().
62 * This mutex is acquired by ep_free() during the epoll file
63 * cleanup path and it is also acquired by eventpoll_release_file()
64 * if a file has been pushed inside an epoll set and it is then
65 * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
66 * It is also acquired when inserting an epoll fd onto another epoll
67 * fd. We do this so that we walk the epoll tree and ensure that this
68 * insertion does not create a cycle of epoll file descriptors, which
69 * could lead to deadlock. We need a global mutex to prevent two
70 * simultaneous inserts (A into B and B into A) from racing and
71 * constructing a cycle without either insert observing that it is
72 * going to.
73 * It is necessary to acquire multiple "ep->mtx"es at once in the
74 * case when one epoll fd is added to another. In this case, we
75 * always acquire the locks in the order of nesting (i.e. after
76 * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
77 * before e2->mtx). Since we disallow cycles of epoll file
78 * descriptors, this ensures that the mutexes are well-ordered. In
79 * order to communicate this nesting to lockdep, when walking a tree
80 * of epoll file descriptors, we use the current recursion depth as
81 * the lockdep subkey.
82 * It is possible to drop the "ep->mtx" and to use the global
83 * mutex "epmutex" (together with "ep->lock") to have it working,
84 * but having "ep->mtx" will make the interface more scalable.
85 * Events that require holding "epmutex" are very rare, while for
86 * normal operations the epoll private "ep->mtx" will guarantee
87 * a better scalability.
88 */
89
90 /* Epoll private bits inside the event mask */
91 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
92
93 #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
94
95 #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
96 EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
97
98 /* Maximum number of nesting allowed inside epoll sets */
99 #define EP_MAX_NESTS 4
100
101 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
102
103 #define EP_UNACTIVE_PTR ((void *) -1L)
104
105 #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
106
107 struct epoll_filefd {
108 struct file *file;
109 int fd;
110 } __packed;
111
112 /*
113 * Structure used to track possible nested calls, for too deep recursions
114 * and loop cycles.
115 */
116 struct nested_call_node {
117 struct list_head llink;
118 void *cookie;
119 void *ctx;
120 };
121
122 /*
123 * This structure is used as collector for nested calls, to check for
124 * maximum recursion dept and loop cycles.
125 */
126 struct nested_calls {
127 struct list_head tasks_call_list;
128 spinlock_t lock;
129 };
130
131 /*
132 * Each file descriptor added to the eventpoll interface will
133 * have an entry of this type linked to the "rbr" RB tree.
134 * Avoid increasing the size of this struct, there can be many thousands
135 * of these on a server and we do not want this to take another cache line.
136 */
137 struct epitem {
138 union {
139 /* RB tree node links this structure to the eventpoll RB tree */
140 struct rb_node rbn;
141 /* Used to free the struct epitem */
142 struct rcu_head rcu;
143 };
144
145 /* List header used to link this structure to the eventpoll ready list */
146 struct list_head rdllink;
147
148 /*
149 * Works together "struct eventpoll"->ovflist in keeping the
150 * single linked chain of items.
151 */
152 struct epitem *next;
153
154 /* The file descriptor information this item refers to */
155 struct epoll_filefd ffd;
156
157 /* Number of active wait queue attached to poll operations */
158 int nwait;
159
160 /* List containing poll wait queues */
161 struct list_head pwqlist;
162
163 /* The "container" of this item */
164 struct eventpoll *ep;
165
166 /* List header used to link this item to the "struct file" items list */
167 struct list_head fllink;
168
169 /* wakeup_source used when EPOLLWAKEUP is set */
170 struct wakeup_source __rcu *ws;
171
172 /* The structure that describe the interested events and the source fd */
173 struct epoll_event event;
174 };
175
176 /*
177 * This structure is stored inside the "private_data" member of the file
178 * structure and represents the main data structure for the eventpoll
179 * interface.
180 */
181 struct eventpoll {
182 /*
183 * This mutex is used to ensure that files are not removed
184 * while epoll is using them. This is held during the event
185 * collection loop, the file cleanup path, the epoll file exit
186 * code and the ctl operations.
187 */
188 struct mutex mtx;
189
190 /* Wait queue used by sys_epoll_wait() */
191 wait_queue_head_t wq;
192
193 /* Wait queue used by file->poll() */
194 wait_queue_head_t poll_wait;
195
196 /* List of ready file descriptors */
197 struct list_head rdllist;
198
199 /* Lock which protects rdllist and ovflist */
200 rwlock_t lock;
201
202 /* RB tree root used to store monitored fd structs */
203 struct rb_root_cached rbr;
204
205 /*
206 * This is a single linked list that chains all the "struct epitem" that
207 * happened while transferring ready events to userspace w/out
208 * holding ->lock.
209 */
210 struct epitem *ovflist;
211
212 /* wakeup_source used when ep_scan_ready_list is running */
213 struct wakeup_source *ws;
214
215 /* The user that created the eventpoll descriptor */
216 struct user_struct *user;
217
218 struct file *file;
219
220 /* used to optimize loop detection check */
221 int visited;
222 struct list_head visited_list_link;
223
224 #ifdef CONFIG_NET_RX_BUSY_POLL
225 /* used to track busy poll napi_id */
226 unsigned int napi_id;
227 #endif
228 };
229
230 /* Wait structure used by the poll hooks */
231 struct eppoll_entry {
232 /* List header used to link this structure to the "struct epitem" */
233 struct list_head llink;
234
235 /* The "base" pointer is set to the container "struct epitem" */
236 struct epitem *base;
237
238 /*
239 * Wait queue item that will be linked to the target file wait
240 * queue head.
241 */
242 wait_queue_entry_t wait;
243
244 /* The wait queue head that linked the "wait" wait queue item */
245 wait_queue_head_t *whead;
246 };
247
248 /* Wrapper struct used by poll queueing */
249 struct ep_pqueue {
250 poll_table pt;
251 struct epitem *epi;
252 };
253
254 /* Used by the ep_send_events() function as callback private data */
255 struct ep_send_events_data {
256 int maxevents;
257 struct epoll_event __user *events;
258 int res;
259 };
260
261 /*
262 * Configuration options available inside /proc/sys/fs/epoll/
263 */
264 /* Maximum number of epoll watched descriptors, per user */
265 static long max_user_watches __read_mostly;
266
267 /*
268 * This mutex is used to serialize ep_free() and eventpoll_release_file().
269 */
270 static DEFINE_MUTEX(epmutex);
271
272 /* Used to check for epoll file descriptor inclusion loops */
273 static struct nested_calls poll_loop_ncalls;
274
275 /* Slab cache used to allocate "struct epitem" */
276 static struct kmem_cache *epi_cache __read_mostly;
277
278 /* Slab cache used to allocate "struct eppoll_entry" */
279 static struct kmem_cache *pwq_cache __read_mostly;
280
281 /* Visited nodes during ep_loop_check(), so we can unset them when we finish */
282 static LIST_HEAD(visited_list);
283
284 /*
285 * List of files with newly added links, where we may need to limit the number
286 * of emanating paths. Protected by the epmutex.
287 */
288 static LIST_HEAD(tfile_check_list);
289
290 #ifdef CONFIG_SYSCTL
291
292 #include <linux/sysctl.h>
293
294 static long long_zero;
295 static long long_max = LONG_MAX;
296
297 struct ctl_table epoll_table[] = {
298 {
299 .procname = "max_user_watches",
300 .data = &max_user_watches,
301 .maxlen = sizeof(max_user_watches),
302 .mode = 0644,
303 .proc_handler = proc_doulongvec_minmax,
304 .extra1 = &long_zero,
305 .extra2 = &long_max,
306 },
307 { }
308 };
309 #endif /* CONFIG_SYSCTL */
310
311 static const struct file_operations eventpoll_fops;
312
313 static inline int is_file_epoll(struct file *f)
314 {
315 return f->f_op == &eventpoll_fops;
316 }
317
318 /* Setup the structure that is used as key for the RB tree */
319 static inline void ep_set_ffd(struct epoll_filefd *ffd,
320 struct file *file, int fd)
321 {
322 ffd->file = file;
323 ffd->fd = fd;
324 }
325
326 /* Compare RB tree keys */
327 static inline int ep_cmp_ffd(struct epoll_filefd *p1,
328 struct epoll_filefd *p2)
329 {
330 return (p1->file > p2->file ? +1:
331 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
332 }
333
334 /* Tells us if the item is currently linked */
335 static inline int ep_is_linked(struct epitem *epi)
336 {
337 return !list_empty(&epi->rdllink);
338 }
339
340 static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
341 {
342 return container_of(p, struct eppoll_entry, wait);
343 }
344
345 /* Get the "struct epitem" from a wait queue pointer */
346 static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
347 {
348 return container_of(p, struct eppoll_entry, wait)->base;
349 }
350
351 /* Get the "struct epitem" from an epoll queue wrapper */
352 static inline struct epitem *ep_item_from_epqueue(poll_table *p)
353 {
354 return container_of(p, struct ep_pqueue, pt)->epi;
355 }
356
357 /* Initialize the poll safe wake up structure */
358 static void ep_nested_calls_init(struct nested_calls *ncalls)
359 {
360 INIT_LIST_HEAD(&ncalls->tasks_call_list);
361 spin_lock_init(&ncalls->lock);
362 }
363
364 /**
365 * ep_events_available - Checks if ready events might be available.
366 *
367 * @ep: Pointer to the eventpoll context.
368 *
369 * Returns: Returns a value different than zero if ready events are available,
370 * or zero otherwise.
371 */
372 static inline int ep_events_available(struct eventpoll *ep)
373 {
374 return !list_empty_careful(&ep->rdllist) ||
375 READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;
376 }
377
378 #ifdef CONFIG_NET_RX_BUSY_POLL
379 static bool ep_busy_loop_end(void *p, unsigned long start_time)
380 {
381 struct eventpoll *ep = p;
382
383 return ep_events_available(ep) || busy_loop_timeout(start_time);
384 }
385
386 /*
387 * Busy poll if globally on and supporting sockets found && no events,
388 * busy loop will return if need_resched or ep_events_available.
389 *
390 * we must do our busy polling with irqs enabled
391 */
392 static void ep_busy_loop(struct eventpoll *ep, int nonblock)
393 {
394 unsigned int napi_id = READ_ONCE(ep->napi_id);
395
396 if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on())
397 napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep);
398 }
399
400 static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
401 {
402 if (ep->napi_id)
403 ep->napi_id = 0;
404 }
405
406 /*
407 * Set epoll busy poll NAPI ID from sk.
408 */
409 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
410 {
411 struct eventpoll *ep;
412 unsigned int napi_id;
413 struct socket *sock;
414 struct sock *sk;
415 int err;
416
417 if (!net_busy_loop_on())
418 return;
419
420 sock = sock_from_file(epi->ffd.file, &err);
421 if (!sock)
422 return;
423
424 sk = sock->sk;
425 if (!sk)
426 return;
427
428 napi_id = READ_ONCE(sk->sk_napi_id);
429 ep = epi->ep;
430
431 /* Non-NAPI IDs can be rejected
432 * or
433 * Nothing to do if we already have this ID
434 */
435 if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
436 return;
437
438 /* record NAPI ID for use in next busy poll */
439 ep->napi_id = napi_id;
440 }
441
442 #else
443
444 static inline void ep_busy_loop(struct eventpoll *ep, int nonblock)
445 {
446 }
447
448 static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
449 {
450 }
451
452 static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
453 {
454 }
455
456 #endif /* CONFIG_NET_RX_BUSY_POLL */
457
458 /**
459 * ep_call_nested - Perform a bound (possibly) nested call, by checking
460 * that the recursion limit is not exceeded, and that
461 * the same nested call (by the meaning of same cookie) is
462 * no re-entered.
463 *
464 * @ncalls: Pointer to the nested_calls structure to be used for this call.
465 * @nproc: Nested call core function pointer.
466 * @priv: Opaque data to be passed to the @nproc callback.
467 * @cookie: Cookie to be used to identify this nested call.
468 * @ctx: This instance context.
469 *
470 * Returns: Returns the code returned by the @nproc callback, or -1 if
471 * the maximum recursion limit has been exceeded.
472 */
473 static int ep_call_nested(struct nested_calls *ncalls,
474 int (*nproc)(void *, void *, int), void *priv,
475 void *cookie, void *ctx)
476 {
477 int error, call_nests = 0;
478 unsigned long flags;
479 struct list_head *lsthead = &ncalls->tasks_call_list;
480 struct nested_call_node *tncur;
481 struct nested_call_node tnode;
482
483 spin_lock_irqsave(&ncalls->lock, flags);
484
485 /*
486 * Try to see if the current task is already inside this wakeup call.
487 * We use a list here, since the population inside this set is always
488 * very much limited.
489 */
490 list_for_each_entry(tncur, lsthead, llink) {
491 if (tncur->ctx == ctx &&
492 (tncur->cookie == cookie || ++call_nests > EP_MAX_NESTS)) {
493 /*
494 * Ops ... loop detected or maximum nest level reached.
495 * We abort this wake by breaking the cycle itself.
496 */
497 error = -1;
498 goto out_unlock;
499 }
500 }
501
502 /* Add the current task and cookie to the list */
503 tnode.ctx = ctx;
504 tnode.cookie = cookie;
505 list_add(&tnode.llink, lsthead);
506
507 spin_unlock_irqrestore(&ncalls->lock, flags);
508
509 /* Call the nested function */
510 error = (*nproc)(priv, cookie, call_nests);
511
512 /* Remove the current task from the list */
513 spin_lock_irqsave(&ncalls->lock, flags);
514 list_del(&tnode.llink);
515 out_unlock:
516 spin_unlock_irqrestore(&ncalls->lock, flags);
517
518 return error;
519 }
520
521 /*
522 * As described in commit 0ccf831cb lockdep: annotate epoll
523 * the use of wait queues used by epoll is done in a very controlled
524 * manner. Wake ups can nest inside each other, but are never done
525 * with the same locking. For example:
526 *
527 * dfd = socket(...);
528 * efd1 = epoll_create();
529 * efd2 = epoll_create();
530 * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
531 * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
532 *
533 * When a packet arrives to the device underneath "dfd", the net code will
534 * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
535 * callback wakeup entry on that queue, and the wake_up() performed by the
536 * "dfd" net code will end up in ep_poll_callback(). At this point epoll
537 * (efd1) notices that it may have some event ready, so it needs to wake up
538 * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
539 * that ends up in another wake_up(), after having checked about the
540 * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
541 * avoid stack blasting.
542 *
543 * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
544 * this special case of epoll.
545 */
546 #ifdef CONFIG_DEBUG_LOCK_ALLOC
547
548 static DEFINE_PER_CPU(int, wakeup_nest);
549
550 static void ep_poll_safewake(wait_queue_head_t *wq)
551 {
552 unsigned long flags;
553 int subclass;
554
555 local_irq_save(flags);
556 preempt_disable();
557 subclass = __this_cpu_read(wakeup_nest);
558 spin_lock_nested(&wq->lock, subclass + 1);
559 __this_cpu_inc(wakeup_nest);
560 wake_up_locked_poll(wq, POLLIN);
561 __this_cpu_dec(wakeup_nest);
562 spin_unlock(&wq->lock);
563 local_irq_restore(flags);
564 preempt_enable();
565 }
566
567 #else
568
569 static void ep_poll_safewake(wait_queue_head_t *wq)
570 {
571 wake_up_poll(wq, EPOLLIN);
572 }
573
574 #endif
575
576 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
577 {
578 wait_queue_head_t *whead;
579
580 rcu_read_lock();
581 /*
582 * If it is cleared by POLLFREE, it should be rcu-safe.
583 * If we read NULL we need a barrier paired with
584 * smp_store_release() in ep_poll_callback(), otherwise
585 * we rely on whead->lock.
586 */
587 whead = smp_load_acquire(&pwq->whead);
588 if (whead)
589 remove_wait_queue(whead, &pwq->wait);
590 rcu_read_unlock();
591 }
592
593 /*
594 * This function unregisters poll callbacks from the associated file
595 * descriptor. Must be called with "mtx" held (or "epmutex" if called from
596 * ep_free).
597 */
598 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
599 {
600 struct list_head *lsthead = &epi->pwqlist;
601 struct eppoll_entry *pwq;
602
603 while (!list_empty(lsthead)) {
604 pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
605
606 list_del(&pwq->llink);
607 ep_remove_wait_queue(pwq);
608 kmem_cache_free(pwq_cache, pwq);
609 }
610 }
611
612 /* call only when ep->mtx is held */
613 static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
614 {
615 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
616 }
617
618 /* call only when ep->mtx is held */
619 static inline void ep_pm_stay_awake(struct epitem *epi)
620 {
621 struct wakeup_source *ws = ep_wakeup_source(epi);
622
623 if (ws)
624 __pm_stay_awake(ws);
625 }
626
627 static inline bool ep_has_wakeup_source(struct epitem *epi)
628 {
629 return rcu_access_pointer(epi->ws) ? true : false;
630 }
631
632 /* call when ep->mtx cannot be held (ep_poll_callback) */
633 static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
634 {
635 struct wakeup_source *ws;
636
637 rcu_read_lock();
638 ws = rcu_dereference(epi->ws);
639 if (ws)
640 __pm_stay_awake(ws);
641 rcu_read_unlock();
642 }
643
644 /**
645 * ep_scan_ready_list - Scans the ready list in a way that makes possible for
646 * the scan code, to call f_op->poll(). Also allows for
647 * O(NumReady) performance.
648 *
649 * @ep: Pointer to the epoll private data structure.
650 * @sproc: Pointer to the scan callback.
651 * @priv: Private opaque data passed to the @sproc callback.
652 * @depth: The current depth of recursive f_op->poll calls.
653 * @ep_locked: caller already holds ep->mtx
654 *
655 * Returns: The same integer error code returned by the @sproc callback.
656 */
657 static __poll_t ep_scan_ready_list(struct eventpoll *ep,
658 __poll_t (*sproc)(struct eventpoll *,
659 struct list_head *, void *),
660 void *priv, int depth, bool ep_locked)
661 {
662 __poll_t res;
663 struct epitem *epi, *nepi;
664 LIST_HEAD(txlist);
665
666 lockdep_assert_irqs_enabled();
667
668 /*
669 * We need to lock this because we could be hit by
670 * eventpoll_release_file() and epoll_ctl().
671 */
672
673 if (!ep_locked)
674 mutex_lock_nested(&ep->mtx, depth);
675
676 /*
677 * Steal the ready list, and re-init the original one to the
678 * empty list. Also, set ep->ovflist to NULL so that events
679 * happening while looping w/out locks, are not lost. We cannot
680 * have the poll callback to queue directly on ep->rdllist,
681 * because we want the "sproc" callback to be able to do it
682 * in a lockless way.
683 */
684 write_lock_irq(&ep->lock);
685 list_splice_init(&ep->rdllist, &txlist);
686 WRITE_ONCE(ep->ovflist, NULL);
687 write_unlock_irq(&ep->lock);
688
689 /*
690 * Now call the callback function.
691 */
692 res = (*sproc)(ep, &txlist, priv);
693
694 write_lock_irq(&ep->lock);
695 /*
696 * During the time we spent inside the "sproc" callback, some
697 * other events might have been queued by the poll callback.
698 * We re-insert them inside the main ready-list here.
699 */
700 for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
701 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
702 /*
703 * We need to check if the item is already in the list.
704 * During the "sproc" callback execution time, items are
705 * queued into ->ovflist but the "txlist" might already
706 * contain them, and the list_splice() below takes care of them.
707 */
708 if (!ep_is_linked(epi)) {
709 /*
710 * ->ovflist is LIFO, so we have to reverse it in order
711 * to keep in FIFO.
712 */
713 list_add(&epi->rdllink, &ep->rdllist);
714 ep_pm_stay_awake(epi);
715 }
716 }
717 /*
718 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
719 * releasing the lock, events will be queued in the normal way inside
720 * ep->rdllist.
721 */
722 WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);
723
724 /*
725 * Quickly re-inject items left on "txlist".
726 */
727 list_splice(&txlist, &ep->rdllist);
728 __pm_relax(ep->ws);
729 write_unlock_irq(&ep->lock);
730
731 if (!ep_locked)
732 mutex_unlock(&ep->mtx);
733
734 return res;
735 }
736
737 static void epi_rcu_free(struct rcu_head *head)
738 {
739 struct epitem *epi = container_of(head, struct epitem, rcu);
740 kmem_cache_free(epi_cache, epi);
741 }
742
743 /*
744 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
745 * all the associated resources. Must be called with "mtx" held.
746 */
747 static int ep_remove(struct eventpoll *ep, struct epitem *epi)
748 {
749 struct file *file = epi->ffd.file;
750
751 lockdep_assert_irqs_enabled();
752
753 /*
754 * Removes poll wait queue hooks.
755 */
756 ep_unregister_pollwait(ep, epi);
757
758 /* Remove the current item from the list of epoll hooks */
759 spin_lock(&file->f_lock);
760 list_del_rcu(&epi->fllink);
761 spin_unlock(&file->f_lock);
762
763 rb_erase_cached(&epi->rbn, &ep->rbr);
764
765 write_lock_irq(&ep->lock);
766 if (ep_is_linked(epi))
767 list_del_init(&epi->rdllink);
768 write_unlock_irq(&ep->lock);
769
770 wakeup_source_unregister(ep_wakeup_source(epi));
771 /*
772 * At this point it is safe to free the eventpoll item. Use the union
773 * field epi->rcu, since we are trying to minimize the size of
774 * 'struct epitem'. The 'rbn' field is no longer in use. Protected by
775 * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
776 * use of the rbn field.
777 */
778 call_rcu(&epi->rcu, epi_rcu_free);
779
780 atomic_long_dec(&ep->user->epoll_watches);
781
782 return 0;
783 }
784
785 static void ep_free(struct eventpoll *ep)
786 {
787 struct rb_node *rbp;
788 struct epitem *epi;
789
790 /* We need to release all tasks waiting for these file */
791 if (waitqueue_active(&ep->poll_wait))
792 ep_poll_safewake(&ep->poll_wait);
793
794 /*
795 * We need to lock this because we could be hit by
796 * eventpoll_release_file() while we're freeing the "struct eventpoll".
797 * We do not need to hold "ep->mtx" here because the epoll file
798 * is on the way to be removed and no one has references to it
799 * anymore. The only hit might come from eventpoll_release_file() but
800 * holding "epmutex" is sufficient here.
801 */
802 mutex_lock(&epmutex);
803
804 /*
805 * Walks through the whole tree by unregistering poll callbacks.
806 */
807 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
808 epi = rb_entry(rbp, struct epitem, rbn);
809
810 ep_unregister_pollwait(ep, epi);
811 cond_resched();
812 }
813
814 /*
815 * Walks through the whole tree by freeing each "struct epitem". At this
816 * point we are sure no poll callbacks will be lingering around, and also by
817 * holding "epmutex" we can be sure that no file cleanup code will hit
818 * us during this operation. So we can avoid the lock on "ep->lock".
819 * We do not need to lock ep->mtx, either, we only do it to prevent
820 * a lockdep warning.
821 */
822 mutex_lock(&ep->mtx);
823 while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
824 epi = rb_entry(rbp, struct epitem, rbn);
825 ep_remove(ep, epi);
826 cond_resched();
827 }
828 mutex_unlock(&ep->mtx);
829
830 mutex_unlock(&epmutex);
831 mutex_destroy(&ep->mtx);
832 free_uid(ep->user);
833 wakeup_source_unregister(ep->ws);
834 kfree(ep);
835 }
836
837 static int ep_eventpoll_release(struct inode *inode, struct file *file)
838 {
839 struct eventpoll *ep = file->private_data;
840
841 if (ep)
842 ep_free(ep);
843
844 return 0;
845 }
846
847 static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
848 void *priv);
849 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
850 poll_table *pt);
851
852 /*
853 * Differs from ep_eventpoll_poll() in that internal callers already have
854 * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
855 * is correctly annotated.
856 */
857 static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
858 int depth)
859 {
860 struct eventpoll *ep;
861 bool locked;
862
863 pt->_key = epi->event.events;
864 if (!is_file_epoll(epi->ffd.file))
865 return vfs_poll(epi->ffd.file, pt) & epi->event.events;
866
867 ep = epi->ffd.file->private_data;
868 poll_wait(epi->ffd.file, &ep->poll_wait, pt);
869 locked = pt && (pt->_qproc == ep_ptable_queue_proc);
870
871 return ep_scan_ready_list(epi->ffd.file->private_data,
872 ep_read_events_proc, &depth, depth,
873 locked) & epi->event.events;
874 }
875
876 static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
877 void *priv)
878 {
879 struct epitem *epi, *tmp;
880 poll_table pt;
881 int depth = *(int *)priv;
882
883 init_poll_funcptr(&pt, NULL);
884 depth++;
885
886 list_for_each_entry_safe(epi, tmp, head, rdllink) {
887 if (ep_item_poll(epi, &pt, depth)) {
888 return EPOLLIN | EPOLLRDNORM;
889 } else {
890 /*
891 * Item has been dropped into the ready list by the poll
892 * callback, but it's not actually ready, as far as
893 * caller requested events goes. We can remove it here.
894 */
895 __pm_relax(ep_wakeup_source(epi));
896 list_del_init(&epi->rdllink);
897 }
898 }
899
900 return 0;
901 }
902
903 static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
904 {
905 struct eventpoll *ep = file->private_data;
906 int depth = 0;
907
908 /* Insert inside our poll wait queue */
909 poll_wait(file, &ep->poll_wait, wait);
910
911 /*
912 * Proceed to find out if wanted events are really available inside
913 * the ready list.
914 */
915 return ep_scan_ready_list(ep, ep_read_events_proc,
916 &depth, depth, false);
917 }
918
919 #ifdef CONFIG_PROC_FS
920 static void ep_show_fdinfo(struct seq_file *m, struct file *f)
921 {
922 struct eventpoll *ep = f->private_data;
923 struct rb_node *rbp;
924
925 mutex_lock(&ep->mtx);
926 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
927 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
928 struct inode *inode = file_inode(epi->ffd.file);
929
930 seq_printf(m, "tfd: %8d events: %8x data: %16llx "
931 " pos:%lli ino:%lx sdev:%x\n",
932 epi->ffd.fd, epi->event.events,
933 (long long)epi->event.data,
934 (long long)epi->ffd.file->f_pos,
935 inode->i_ino, inode->i_sb->s_dev);
936 if (seq_has_overflowed(m))
937 break;
938 }
939 mutex_unlock(&ep->mtx);
940 }
941 #endif
942
943 /* File callbacks that implement the eventpoll file behaviour */
944 static const struct file_operations eventpoll_fops = {
945 #ifdef CONFIG_PROC_FS
946 .show_fdinfo = ep_show_fdinfo,
947 #endif
948 .release = ep_eventpoll_release,
949 .poll = ep_eventpoll_poll,
950 .llseek = noop_llseek,
951 };
952
953 /*
954 * This is called from eventpoll_release() to unlink files from the eventpoll
955 * interface. We need to have this facility to cleanup correctly files that are
956 * closed without being removed from the eventpoll interface.
957 */
958 void eventpoll_release_file(struct file *file)
959 {
960 struct eventpoll *ep;
961 struct epitem *epi, *next;
962
963 /*
964 * We don't want to get "file->f_lock" because it is not
965 * necessary. It is not necessary because we're in the "struct file"
966 * cleanup path, and this means that no one is using this file anymore.
967 * So, for example, epoll_ctl() cannot hit here since if we reach this
968 * point, the file counter already went to zero and fget() would fail.
969 * The only hit might come from ep_free() but by holding the mutex
970 * will correctly serialize the operation. We do need to acquire
971 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
972 * from anywhere but ep_free().
973 *
974 * Besides, ep_remove() acquires the lock, so we can't hold it here.
975 */
976 mutex_lock(&epmutex);
977 list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
978 ep = epi->ep;
979 mutex_lock_nested(&ep->mtx, 0);
980 ep_remove(ep, epi);
981 mutex_unlock(&ep->mtx);
982 }
983 mutex_unlock(&epmutex);
984 }
985
986 static int ep_alloc(struct eventpoll **pep)
987 {
988 int error;
989 struct user_struct *user;
990 struct eventpoll *ep;
991
992 user = get_current_user();
993 error = -ENOMEM;
994 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
995 if (unlikely(!ep))
996 goto free_uid;
997
998 mutex_init(&ep->mtx);
999 rwlock_init(&ep->lock);
1000 init_waitqueue_head(&ep->wq);
1001 init_waitqueue_head(&ep->poll_wait);
1002 INIT_LIST_HEAD(&ep->rdllist);
1003 ep->rbr = RB_ROOT_CACHED;
1004 ep->ovflist = EP_UNACTIVE_PTR;
1005 ep->user = user;
1006
1007 *pep = ep;
1008
1009 return 0;
1010
1011 free_uid:
1012 free_uid(user);
1013 return error;
1014 }
1015
1016 /*
1017 * Search the file inside the eventpoll tree. The RB tree operations
1018 * are protected by the "mtx" mutex, and ep_find() must be called with
1019 * "mtx" held.
1020 */
1021 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
1022 {
1023 int kcmp;
1024 struct rb_node *rbp;
1025 struct epitem *epi, *epir = NULL;
1026 struct epoll_filefd ffd;
1027
1028 ep_set_ffd(&ffd, file, fd);
1029 for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
1030 epi = rb_entry(rbp, struct epitem, rbn);
1031 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
1032 if (kcmp > 0)
1033 rbp = rbp->rb_right;
1034 else if (kcmp < 0)
1035 rbp = rbp->rb_left;
1036 else {
1037 epir = epi;
1038 break;
1039 }
1040 }
1041
1042 return epir;
1043 }
1044
1045 #ifdef CONFIG_CHECKPOINT_RESTORE
1046 static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
1047 {
1048 struct rb_node *rbp;
1049 struct epitem *epi;
1050
1051 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1052 epi = rb_entry(rbp, struct epitem, rbn);
1053 if (epi->ffd.fd == tfd) {
1054 if (toff == 0)
1055 return epi;
1056 else
1057 toff--;
1058 }
1059 cond_resched();
1060 }
1061
1062 return NULL;
1063 }
1064
1065 struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
1066 unsigned long toff)
1067 {
1068 struct file *file_raw;
1069 struct eventpoll *ep;
1070 struct epitem *epi;
1071
1072 if (!is_file_epoll(file))
1073 return ERR_PTR(-EINVAL);
1074
1075 ep = file->private_data;
1076
1077 mutex_lock(&ep->mtx);
1078 epi = ep_find_tfd(ep, tfd, toff);
1079 if (epi)
1080 file_raw = epi->ffd.file;
1081 else
1082 file_raw = ERR_PTR(-ENOENT);
1083 mutex_unlock(&ep->mtx);
1084
1085 return file_raw;
1086 }
1087 #endif /* CONFIG_CHECKPOINT_RESTORE */
1088
1089 /**
1090 * Adds a new entry to the tail of the list in a lockless way, i.e.
1091 * multiple CPUs are allowed to call this function concurrently.
1092 *
1093 * Beware: it is necessary to prevent any other modifications of the
1094 * existing list until all changes are completed, in other words
1095 * concurrent list_add_tail_lockless() calls should be protected
1096 * with a read lock, where write lock acts as a barrier which
1097 * makes sure all list_add_tail_lockless() calls are fully
1098 * completed.
1099 *
1100 * Also an element can be locklessly added to the list only in one
1101 * direction i.e. either to the tail either to the head, otherwise
1102 * concurrent access will corrupt the list.
1103 *
1104 * Returns %false if element has been already added to the list, %true
1105 * otherwise.
1106 */
1107 static inline bool list_add_tail_lockless(struct list_head *new,
1108 struct list_head *head)
1109 {
1110 struct list_head *prev;
1111
1112 /*
1113 * This is simple 'new->next = head' operation, but cmpxchg()
1114 * is used in order to detect that same element has been just
1115 * added to the list from another CPU: the winner observes
1116 * new->next == new.
1117 */
1118 if (cmpxchg(&new->next, new, head) != new)
1119 return false;
1120
1121 /*
1122 * Initially ->next of a new element must be updated with the head
1123 * (we are inserting to the tail) and only then pointers are atomically
1124 * exchanged. XCHG guarantees memory ordering, thus ->next should be
1125 * updated before pointers are actually swapped and pointers are
1126 * swapped before prev->next is updated.
1127 */
1128
1129 prev = xchg(&head->prev, new);
1130
1131 /*
1132 * It is safe to modify prev->next and new->prev, because a new element
1133 * is added only to the tail and new->next is updated before XCHG.
1134 */
1135
1136 prev->next = new;
1137 new->prev = prev;
1138
1139 return true;
1140 }
1141
1142 /**
1143 * Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
1144 * i.e. multiple CPUs are allowed to call this function concurrently.
1145 *
1146 * Returns %false if epi element has been already chained, %true otherwise.
1147 */
1148 static inline bool chain_epi_lockless(struct epitem *epi)
1149 {
1150 struct eventpoll *ep = epi->ep;
1151
1152 /* Check that the same epi has not been just chained from another CPU */
1153 if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1154 return false;
1155
1156 /* Atomically exchange tail */
1157 epi->next = xchg(&ep->ovflist, epi);
1158
1159 return true;
1160 }
1161
1162 /*
1163 * This is the callback that is passed to the wait queue wakeup
1164 * mechanism. It is called by the stored file descriptors when they
1165 * have events to report.
1166 *
1167 * This callback takes a read lock in order not to content with concurrent
1168 * events from another file descriptors, thus all modifications to ->rdllist
1169 * or ->ovflist are lockless. Read lock is paired with the write lock from
1170 * ep_scan_ready_list(), which stops all list modifications and guarantees
1171 * that lists state is seen correctly.
1172 *
1173 * Another thing worth to mention is that ep_poll_callback() can be called
1174 * concurrently for the same @epi from different CPUs if poll table was inited
1175 * with several wait queues entries. Plural wakeup from different CPUs of a
1176 * single wait queue is serialized by wq.lock, but the case when multiple wait
1177 * queues are used should be detected accordingly. This is detected using
1178 * cmpxchg() operation.
1179 */
1180 static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1181 {
1182 int pwake = 0;
1183 struct epitem *epi = ep_item_from_wait(wait);
1184 struct eventpoll *ep = epi->ep;
1185 __poll_t pollflags = key_to_poll(key);
1186 unsigned long flags;
1187 int ewake = 0;
1188
1189 read_lock_irqsave(&ep->lock, flags);
1190
1191 ep_set_busy_poll_napi_id(epi);
1192
1193 /*
1194 * If the event mask does not contain any poll(2) event, we consider the
1195 * descriptor to be disabled. This condition is likely the effect of the
1196 * EPOLLONESHOT bit that disables the descriptor when an event is received,
1197 * until the next EPOLL_CTL_MOD will be issued.
1198 */
1199 if (!(epi->event.events & ~EP_PRIVATE_BITS))
1200 goto out_unlock;
1201
1202 /*
1203 * Check the events coming with the callback. At this stage, not
1204 * every device reports the events in the "key" parameter of the
1205 * callback. We need to be able to handle both cases here, hence the
1206 * test for "key" != NULL before the event match test.
1207 */
1208 if (pollflags && !(pollflags & epi->event.events))
1209 goto out_unlock;
1210
1211 /*
1212 * If we are transferring events to userspace, we can hold no locks
1213 * (because we're accessing user memory, and because of linux f_op->poll()
1214 * semantics). All the events that happen during that period of time are
1215 * chained in ep->ovflist and requeued later on.
1216 */
1217 if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
1218 if (epi->next == EP_UNACTIVE_PTR &&
1219 chain_epi_lockless(epi))
1220 ep_pm_stay_awake_rcu(epi);
1221 goto out_unlock;
1222 }
1223
1224 /* If this file is already in the ready list we exit soon */
1225 if (!ep_is_linked(epi) &&
1226 list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) {
1227 ep_pm_stay_awake_rcu(epi);
1228 }
1229
1230 /*
1231 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1232 * wait list.
1233 */
1234 if (waitqueue_active(&ep->wq)) {
1235 if ((epi->event.events & EPOLLEXCLUSIVE) &&
1236 !(pollflags & POLLFREE)) {
1237 switch (pollflags & EPOLLINOUT_BITS) {
1238 case EPOLLIN:
1239 if (epi->event.events & EPOLLIN)
1240 ewake = 1;
1241 break;
1242 case EPOLLOUT:
1243 if (epi->event.events & EPOLLOUT)
1244 ewake = 1;
1245 break;
1246 case 0:
1247 ewake = 1;
1248 break;
1249 }
1250 }
1251 wake_up(&ep->wq);
1252 }
1253 if (waitqueue_active(&ep->poll_wait))
1254 pwake++;
1255
1256 out_unlock:
1257 read_unlock_irqrestore(&ep->lock, flags);
1258
1259 /* We have to call this outside the lock */
1260 if (pwake)
1261 ep_poll_safewake(&ep->poll_wait);
1262
1263 if (!(epi->event.events & EPOLLEXCLUSIVE))
1264 ewake = 1;
1265
1266 if (pollflags & POLLFREE) {
1267 /*
1268 * If we race with ep_remove_wait_queue() it can miss
1269 * ->whead = NULL and do another remove_wait_queue() after
1270 * us, so we can't use __remove_wait_queue().
1271 */
1272 list_del_init(&wait->entry);
1273 /*
1274 * ->whead != NULL protects us from the race with ep_free()
1275 * or ep_remove(), ep_remove_wait_queue() takes whead->lock
1276 * held by the caller. Once we nullify it, nothing protects
1277 * ep/epi or even wait.
1278 */
1279 smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
1280 }
1281
1282 return ewake;
1283 }
1284
1285 /*
1286 * This is the callback that is used to add our wait queue to the
1287 * target file wakeup lists.
1288 */
1289 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1290 poll_table *pt)
1291 {
1292 struct epitem *epi = ep_item_from_epqueue(pt);
1293 struct eppoll_entry *pwq;
1294
1295 if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
1296 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1297 pwq->whead = whead;
1298 pwq->base = epi;
1299 if (epi->event.events & EPOLLEXCLUSIVE)
1300 add_wait_queue_exclusive(whead, &pwq->wait);
1301 else
1302 add_wait_queue(whead, &pwq->wait);
1303 list_add_tail(&pwq->llink, &epi->pwqlist);
1304 epi->nwait++;
1305 } else {
1306 /* We have to signal that an error occurred */
1307 epi->nwait = -1;
1308 }
1309 }
1310
1311 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1312 {
1313 int kcmp;
1314 struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
1315 struct epitem *epic;
1316 bool leftmost = true;
1317
1318 while (*p) {
1319 parent = *p;
1320 epic = rb_entry(parent, struct epitem, rbn);
1321 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1322 if (kcmp > 0) {
1323 p = &parent->rb_right;
1324 leftmost = false;
1325 } else
1326 p = &parent->rb_left;
1327 }
1328 rb_link_node(&epi->rbn, parent, p);
1329 rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1330 }
1331
1332
1333
1334 #define PATH_ARR_SIZE 5
1335 /*
1336 * These are the number paths of length 1 to 5, that we are allowing to emanate
1337 * from a single file of interest. For example, we allow 1000 paths of length
1338 * 1, to emanate from each file of interest. This essentially represents the
1339 * potential wakeup paths, which need to be limited in order to avoid massive
1340 * uncontrolled wakeup storms. The common use case should be a single ep which
1341 * is connected to n file sources. In this case each file source has 1 path
1342 * of length 1. Thus, the numbers below should be more than sufficient. These
1343 * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1344 * and delete can't add additional paths. Protected by the epmutex.
1345 */
1346 static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1347 static int path_count[PATH_ARR_SIZE];
1348
1349 static int path_count_inc(int nests)
1350 {
1351 /* Allow an arbitrary number of depth 1 paths */
1352 if (nests == 0)
1353 return 0;
1354
1355 if (++path_count[nests] > path_limits[nests])
1356 return -1;
1357 return 0;
1358 }
1359
1360 static void path_count_init(void)
1361 {
1362 int i;
1363
1364 for (i = 0; i < PATH_ARR_SIZE; i++)
1365 path_count[i] = 0;
1366 }
1367
1368 static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1369 {
1370 int error = 0;
1371 struct file *file = priv;
1372 struct file *child_file;
1373 struct epitem *epi;
1374
1375 /* CTL_DEL can remove links here, but that can't increase our count */
1376 rcu_read_lock();
1377 list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
1378 child_file = epi->ep->file;
1379 if (is_file_epoll(child_file)) {
1380 if (list_empty(&child_file->f_ep_links)) {
1381 if (path_count_inc(call_nests)) {
1382 error = -1;
1383 break;
1384 }
1385 } else {
1386 error = ep_call_nested(&poll_loop_ncalls,
1387 reverse_path_check_proc,
1388 child_file, child_file,
1389 current);
1390 }
1391 if (error != 0)
1392 break;
1393 } else {
1394 printk(KERN_ERR "reverse_path_check_proc: "
1395 "file is not an ep!\n");
1396 }
1397 }
1398 rcu_read_unlock();
1399 return error;
1400 }
1401
1402 /**
1403 * reverse_path_check - The tfile_check_list is list of file *, which have
1404 * links that are proposed to be newly added. We need to
1405 * make sure that those added links don't add too many
1406 * paths such that we will spend all our time waking up
1407 * eventpoll objects.
1408 *
1409 * Returns: Returns zero if the proposed links don't create too many paths,
1410 * -1 otherwise.
1411 */
1412 static int reverse_path_check(void)
1413 {
1414 int error = 0;
1415 struct file *current_file;
1416
1417 /* let's call this for all tfiles */
1418 list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
1419 path_count_init();
1420 error = ep_call_nested(&poll_loop_ncalls,
1421 reverse_path_check_proc, current_file,
1422 current_file, current);
1423 if (error)
1424 break;
1425 }
1426 return error;
1427 }
1428
1429 static int ep_create_wakeup_source(struct epitem *epi)
1430 {
1431 const char *name;
1432 struct wakeup_source *ws;
1433
1434 if (!epi->ep->ws) {
1435 epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
1436 if (!epi->ep->ws)
1437 return -ENOMEM;
1438 }
1439
1440 name = epi->ffd.file->f_path.dentry->d_name.name;
1441 ws = wakeup_source_register(NULL, name);
1442
1443 if (!ws)
1444 return -ENOMEM;
1445 rcu_assign_pointer(epi->ws, ws);
1446
1447 return 0;
1448 }
1449
1450 /* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
1451 static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1452 {
1453 struct wakeup_source *ws = ep_wakeup_source(epi);
1454
1455 RCU_INIT_POINTER(epi->ws, NULL);
1456
1457 /*
1458 * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1459 * used internally by wakeup_source_remove, too (called by
1460 * wakeup_source_unregister), so we cannot use call_rcu
1461 */
1462 synchronize_rcu();
1463 wakeup_source_unregister(ws);
1464 }
1465
1466 /*
1467 * Must be called with "mtx" held.
1468 */
1469 static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1470 struct file *tfile, int fd, int full_check)
1471 {
1472 int error, pwake = 0;
1473 __poll_t revents;
1474 long user_watches;
1475 struct epitem *epi;
1476 struct ep_pqueue epq;
1477
1478 lockdep_assert_irqs_enabled();
1479
1480 user_watches = atomic_long_read(&ep->user->epoll_watches);
1481 if (unlikely(user_watches >= max_user_watches))
1482 return -ENOSPC;
1483 if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
1484 return -ENOMEM;
1485
1486 /* Item initialization follow here ... */
1487 INIT_LIST_HEAD(&epi->rdllink);
1488 INIT_LIST_HEAD(&epi->fllink);
1489 INIT_LIST_HEAD(&epi->pwqlist);
1490 epi->ep = ep;
1491 ep_set_ffd(&epi->ffd, tfile, fd);
1492 epi->event = *event;
1493 epi->nwait = 0;
1494 epi->next = EP_UNACTIVE_PTR;
1495 if (epi->event.events & EPOLLWAKEUP) {
1496 error = ep_create_wakeup_source(epi);
1497 if (error)
1498 goto error_create_wakeup_source;
1499 } else {
1500 RCU_INIT_POINTER(epi->ws, NULL);
1501 }
1502
1503 /* Initialize the poll table using the queue callback */
1504 epq.epi = epi;
1505 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1506
1507 /*
1508 * Attach the item to the poll hooks and get current event bits.
1509 * We can safely use the file* here because its usage count has
1510 * been increased by the caller of this function. Note that after
1511 * this operation completes, the poll callback can start hitting
1512 * the new item.
1513 */
1514 revents = ep_item_poll(epi, &epq.pt, 1);
1515
1516 /*
1517 * We have to check if something went wrong during the poll wait queue
1518 * install process. Namely an allocation for a wait queue failed due
1519 * high memory pressure.
1520 */
1521 error = -ENOMEM;
1522 if (epi->nwait < 0)
1523 goto error_unregister;
1524
1525 /* Add the current item to the list of active epoll hook for this file */
1526 spin_lock(&tfile->f_lock);
1527 list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
1528 spin_unlock(&tfile->f_lock);
1529
1530 /*
1531 * Add the current item to the RB tree. All RB tree operations are
1532 * protected by "mtx", and ep_insert() is called with "mtx" held.
1533 */
1534 ep_rbtree_insert(ep, epi);
1535
1536 /* now check if we've created too many backpaths */
1537 error = -EINVAL;
1538 if (full_check && reverse_path_check())
1539 goto error_remove_epi;
1540
1541 /* We have to drop the new item inside our item list to keep track of it */
1542 write_lock_irq(&ep->lock);
1543
1544 /* record NAPI ID of new item if present */
1545 ep_set_busy_poll_napi_id(epi);
1546
1547 /* If the file is already "ready" we drop it inside the ready list */
1548 if (revents && !ep_is_linked(epi)) {
1549 list_add_tail(&epi->rdllink, &ep->rdllist);
1550 ep_pm_stay_awake(epi);
1551
1552 /* Notify waiting tasks that events are available */
1553 if (waitqueue_active(&ep->wq))
1554 wake_up(&ep->wq);
1555 if (waitqueue_active(&ep->poll_wait))
1556 pwake++;
1557 }
1558
1559 write_unlock_irq(&ep->lock);
1560
1561 atomic_long_inc(&ep->user->epoll_watches);
1562
1563 /* We have to call this outside the lock */
1564 if (pwake)
1565 ep_poll_safewake(&ep->poll_wait);
1566
1567 return 0;
1568
1569 error_remove_epi:
1570 spin_lock(&tfile->f_lock);
1571 list_del_rcu(&epi->fllink);
1572 spin_unlock(&tfile->f_lock);
1573
1574 rb_erase_cached(&epi->rbn, &ep->rbr);
1575
1576 error_unregister:
1577 ep_unregister_pollwait(ep, epi);
1578
1579 /*
1580 * We need to do this because an event could have been arrived on some
1581 * allocated wait queue. Note that we don't care about the ep->ovflist
1582 * list, since that is used/cleaned only inside a section bound by "mtx".
1583 * And ep_insert() is called with "mtx" held.
1584 */
1585 write_lock_irq(&ep->lock);
1586 if (ep_is_linked(epi))
1587 list_del_init(&epi->rdllink);
1588 write_unlock_irq(&ep->lock);
1589
1590 wakeup_source_unregister(ep_wakeup_source(epi));
1591
1592 error_create_wakeup_source:
1593 kmem_cache_free(epi_cache, epi);
1594
1595 return error;
1596 }
1597
1598 /*
1599 * Modify the interest event mask by dropping an event if the new mask
1600 * has a match in the current file status. Must be called with "mtx" held.
1601 */
1602 static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1603 const struct epoll_event *event)
1604 {
1605 int pwake = 0;
1606 poll_table pt;
1607
1608 lockdep_assert_irqs_enabled();
1609
1610 init_poll_funcptr(&pt, NULL);
1611
1612 /*
1613 * Set the new event interest mask before calling f_op->poll();
1614 * otherwise we might miss an event that happens between the
1615 * f_op->poll() call and the new event set registering.
1616 */
1617 epi->event.events = event->events; /* need barrier below */
1618 epi->event.data = event->data; /* protected by mtx */
1619 if (epi->event.events & EPOLLWAKEUP) {
1620 if (!ep_has_wakeup_source(epi))
1621 ep_create_wakeup_source(epi);
1622 } else if (ep_has_wakeup_source(epi)) {
1623 ep_destroy_wakeup_source(epi);
1624 }
1625
1626 /*
1627 * The following barrier has two effects:
1628 *
1629 * 1) Flush epi changes above to other CPUs. This ensures
1630 * we do not miss events from ep_poll_callback if an
1631 * event occurs immediately after we call f_op->poll().
1632 * We need this because we did not take ep->lock while
1633 * changing epi above (but ep_poll_callback does take
1634 * ep->lock).
1635 *
1636 * 2) We also need to ensure we do not miss _past_ events
1637 * when calling f_op->poll(). This barrier also
1638 * pairs with the barrier in wq_has_sleeper (see
1639 * comments for wq_has_sleeper).
1640 *
1641 * This barrier will now guarantee ep_poll_callback or f_op->poll
1642 * (or both) will notice the readiness of an item.
1643 */
1644 smp_mb();
1645
1646 /*
1647 * Get current event bits. We can safely use the file* here because
1648 * its usage count has been increased by the caller of this function.
1649 * If the item is "hot" and it is not registered inside the ready
1650 * list, push it inside.
1651 */
1652 if (ep_item_poll(epi, &pt, 1)) {
1653 write_lock_irq(&ep->lock);
1654 if (!ep_is_linked(epi)) {
1655 list_add_tail(&epi->rdllink, &ep->rdllist);
1656 ep_pm_stay_awake(epi);
1657
1658 /* Notify waiting tasks that events are available */
1659 if (waitqueue_active(&ep->wq))
1660 wake_up(&ep->wq);
1661 if (waitqueue_active(&ep->poll_wait))
1662 pwake++;
1663 }
1664 write_unlock_irq(&ep->lock);
1665 }
1666
1667 /* We have to call this outside the lock */
1668 if (pwake)
1669 ep_poll_safewake(&ep->poll_wait);
1670
1671 return 0;
1672 }
1673
1674 static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1675 void *priv)
1676 {
1677 struct ep_send_events_data *esed = priv;
1678 __poll_t revents;
1679 struct epitem *epi, *tmp;
1680 struct epoll_event __user *uevent = esed->events;
1681 struct wakeup_source *ws;
1682 poll_table pt;
1683
1684 init_poll_funcptr(&pt, NULL);
1685 esed->res = 0;
1686
1687 /*
1688 * We can loop without lock because we are passed a task private list.
1689 * Items cannot vanish during the loop because ep_scan_ready_list() is
1690 * holding "mtx" during this call.
1691 */
1692 lockdep_assert_held(&ep->mtx);
1693
1694 list_for_each_entry_safe(epi, tmp, head, rdllink) {
1695 if (esed->res >= esed->maxevents)
1696 break;
1697
1698 /*
1699 * Activate ep->ws before deactivating epi->ws to prevent
1700 * triggering auto-suspend here (in case we reactive epi->ws
1701 * below).
1702 *
1703 * This could be rearranged to delay the deactivation of epi->ws
1704 * instead, but then epi->ws would temporarily be out of sync
1705 * with ep_is_linked().
1706 */
1707 ws = ep_wakeup_source(epi);
1708 if (ws) {
1709 if (ws->active)
1710 __pm_stay_awake(ep->ws);
1711 __pm_relax(ws);
1712 }
1713
1714 list_del_init(&epi->rdllink);
1715
1716 /*
1717 * If the event mask intersect the caller-requested one,
1718 * deliver the event to userspace. Again, ep_scan_ready_list()
1719 * is holding ep->mtx, so no operations coming from userspace
1720 * can change the item.
1721 */
1722 revents = ep_item_poll(epi, &pt, 1);
1723 if (!revents)
1724 continue;
1725
1726 if (__put_user(revents, &uevent->events) ||
1727 __put_user(epi->event.data, &uevent->data)) {
1728 list_add(&epi->rdllink, head);
1729 ep_pm_stay_awake(epi);
1730 if (!esed->res)
1731 esed->res = -EFAULT;
1732 return 0;
1733 }
1734 esed->res++;
1735 uevent++;
1736 if (epi->event.events & EPOLLONESHOT)
1737 epi->event.events &= EP_PRIVATE_BITS;
1738 else if (!(epi->event.events & EPOLLET)) {
1739 /*
1740 * If this file has been added with Level
1741 * Trigger mode, we need to insert back inside
1742 * the ready list, so that the next call to
1743 * epoll_wait() will check again the events
1744 * availability. At this point, no one can insert
1745 * into ep->rdllist besides us. The epoll_ctl()
1746 * callers are locked out by
1747 * ep_scan_ready_list() holding "mtx" and the
1748 * poll callback will queue them in ep->ovflist.
1749 */
1750 list_add_tail(&epi->rdllink, &ep->rdllist);
1751 ep_pm_stay_awake(epi);
1752 }
1753 }
1754
1755 return 0;
1756 }
1757
1758 static int ep_send_events(struct eventpoll *ep,
1759 struct epoll_event __user *events, int maxevents)
1760 {
1761 struct ep_send_events_data esed;
1762
1763 esed.maxevents = maxevents;
1764 esed.events = events;
1765
1766 ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
1767 return esed.res;
1768 }
1769
1770 static inline struct timespec64 ep_set_mstimeout(long ms)
1771 {
1772 struct timespec64 now, ts = {
1773 .tv_sec = ms / MSEC_PER_SEC,
1774 .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
1775 };
1776
1777 ktime_get_ts64(&now);
1778 return timespec64_add_safe(now, ts);
1779 }
1780
1781 /**
1782 * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1783 * event buffer.
1784 *
1785 * @ep: Pointer to the eventpoll context.
1786 * @events: Pointer to the userspace buffer where the ready events should be
1787 * stored.
1788 * @maxevents: Size (in terms of number of events) of the caller event buffer.
1789 * @timeout: Maximum timeout for the ready events fetch operation, in
1790 * milliseconds. If the @timeout is zero, the function will not block,
1791 * while if the @timeout is less than zero, the function will block
1792 * until at least one event has been retrieved (or an error
1793 * occurred).
1794 *
1795 * Returns: Returns the number of ready events which have been fetched, or an
1796 * error code, in case of error.
1797 */
1798 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1799 int maxevents, long timeout)
1800 {
1801 int res = 0, eavail, timed_out = 0;
1802 u64 slack = 0;
1803 bool waiter = false;
1804 wait_queue_entry_t wait;
1805 ktime_t expires, *to = NULL;
1806
1807 lockdep_assert_irqs_enabled();
1808
1809 if (timeout > 0) {
1810 struct timespec64 end_time = ep_set_mstimeout(timeout);
1811
1812 slack = select_estimate_accuracy(&end_time);
1813 to = &expires;
1814 *to = timespec64_to_ktime(end_time);
1815 } else if (timeout == 0) {
1816 /*
1817 * Avoid the unnecessary trip to the wait queue loop, if the
1818 * caller specified a non blocking operation. We still need
1819 * lock because we could race and not see an epi being added
1820 * to the ready list while in irq callback. Thus incorrectly
1821 * returning 0 back to userspace.
1822 */
1823 timed_out = 1;
1824
1825 write_lock_irq(&ep->lock);
1826 eavail = ep_events_available(ep);
1827 write_unlock_irq(&ep->lock);
1828
1829 goto send_events;
1830 }
1831
1832 fetch_events:
1833
1834 if (!ep_events_available(ep))
1835 ep_busy_loop(ep, timed_out);
1836
1837 eavail = ep_events_available(ep);
1838 if (eavail)
1839 goto send_events;
1840
1841 /*
1842 * Busy poll timed out. Drop NAPI ID for now, we can add
1843 * it back in when we have moved a socket with a valid NAPI
1844 * ID onto the ready list.
1845 */
1846 ep_reset_busy_poll_napi_id(ep);
1847
1848 /*
1849 * We don't have any available event to return to the caller. We need
1850 * to sleep here, and we will be woken by ep_poll_callback() when events
1851 * become available.
1852 */
1853 if (!waiter) {
1854 waiter = true;
1855 init_waitqueue_entry(&wait, current);
1856
1857 spin_lock_irq(&ep->wq.lock);
1858 __add_wait_queue_exclusive(&ep->wq, &wait);
1859 spin_unlock_irq(&ep->wq.lock);
1860 }
1861
1862 for (;;) {
1863 /*
1864 * We don't want to sleep if the ep_poll_callback() sends us
1865 * a wakeup in between. That's why we set the task state
1866 * to TASK_INTERRUPTIBLE before doing the checks.
1867 */
1868 set_current_state(TASK_INTERRUPTIBLE);
1869 /*
1870 * Always short-circuit for fatal signals to allow
1871 * threads to make a timely exit without the chance of
1872 * finding more events available and fetching
1873 * repeatedly.
1874 */
1875 if (fatal_signal_pending(current)) {
1876 res = -EINTR;
1877 break;
1878 }
1879
1880 eavail = ep_events_available(ep);
1881 if (eavail)
1882 break;
1883 if (signal_pending(current)) {
1884 res = -EINTR;
1885 break;
1886 }
1887
1888 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) {
1889 timed_out = 1;
1890 break;
1891 }
1892 }
1893
1894 __set_current_state(TASK_RUNNING);
1895
1896 send_events:
1897 /*
1898 * Try to transfer events to user space. In case we get 0 events and
1899 * there's still timeout left over, we go trying again in search of
1900 * more luck.
1901 */
1902 if (!res && eavail &&
1903 !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1904 goto fetch_events;
1905
1906 if (waiter) {
1907 spin_lock_irq(&ep->wq.lock);
1908 __remove_wait_queue(&ep->wq, &wait);
1909 spin_unlock_irq(&ep->wq.lock);
1910 }
1911
1912 return res;
1913 }
1914
1915 /**
1916 * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1917 * API, to verify that adding an epoll file inside another
1918 * epoll structure, does not violate the constraints, in
1919 * terms of closed loops, or too deep chains (which can
1920 * result in excessive stack usage).
1921 *
1922 * @priv: Pointer to the epoll file to be currently checked.
1923 * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1924 * data structure pointer.
1925 * @call_nests: Current dept of the @ep_call_nested() call stack.
1926 *
1927 * Returns: Returns zero if adding the epoll @file inside current epoll
1928 * structure @ep does not violate the constraints, or -1 otherwise.
1929 */
1930 static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1931 {
1932 int error = 0;
1933 struct file *file = priv;
1934 struct eventpoll *ep = file->private_data;
1935 struct eventpoll *ep_tovisit;
1936 struct rb_node *rbp;
1937 struct epitem *epi;
1938
1939 mutex_lock_nested(&ep->mtx, call_nests + 1);
1940 ep->visited = 1;
1941 list_add(&ep->visited_list_link, &visited_list);
1942 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1943 epi = rb_entry(rbp, struct epitem, rbn);
1944 if (unlikely(is_file_epoll(epi->ffd.file))) {
1945 ep_tovisit = epi->ffd.file->private_data;
1946 if (ep_tovisit->visited)
1947 continue;
1948 error = ep_call_nested(&poll_loop_ncalls,
1949 ep_loop_check_proc, epi->ffd.file,
1950 ep_tovisit, current);
1951 if (error != 0)
1952 break;
1953 } else {
1954 /*
1955 * If we've reached a file that is not associated with
1956 * an ep, then we need to check if the newly added
1957 * links are going to add too many wakeup paths. We do
1958 * this by adding it to the tfile_check_list, if it's
1959 * not already there, and calling reverse_path_check()
1960 * during ep_insert().
1961 */
1962 if (list_empty(&epi->ffd.file->f_tfile_llink))
1963 list_add(&epi->ffd.file->f_tfile_llink,
1964 &tfile_check_list);
1965 }
1966 }
1967 mutex_unlock(&ep->mtx);
1968
1969 return error;
1970 }
1971
1972 /**
1973 * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1974 * another epoll file (represented by @ep) does not create
1975 * closed loops or too deep chains.
1976 *
1977 * @ep: Pointer to the epoll private data structure.
1978 * @file: Pointer to the epoll file to be checked.
1979 *
1980 * Returns: Returns zero if adding the epoll @file inside current epoll
1981 * structure @ep does not violate the constraints, or -1 otherwise.
1982 */
1983 static int ep_loop_check(struct eventpoll *ep, struct file *file)
1984 {
1985 int ret;
1986 struct eventpoll *ep_cur, *ep_next;
1987
1988 ret = ep_call_nested(&poll_loop_ncalls,
1989 ep_loop_check_proc, file, ep, current);
1990 /* clear visited list */
1991 list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
1992 visited_list_link) {
1993 ep_cur->visited = 0;
1994 list_del(&ep_cur->visited_list_link);
1995 }
1996 return ret;
1997 }
1998
1999 static void clear_tfile_check_list(void)
2000 {
2001 struct file *file;
2002
2003 /* first clear the tfile_check_list */
2004 while (!list_empty(&tfile_check_list)) {
2005 file = list_first_entry(&tfile_check_list, struct file,
2006 f_tfile_llink);
2007 list_del_init(&file->f_tfile_llink);
2008 }
2009 INIT_LIST_HEAD(&tfile_check_list);
2010 }
2011
2012 /*
2013 * Open an eventpoll file descriptor.
2014 */
2015 static int do_epoll_create(int flags)
2016 {
2017 int error, fd;
2018 struct eventpoll *ep = NULL;
2019 struct file *file;
2020
2021 /* Check the EPOLL_* constant for consistency. */
2022 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
2023
2024 if (flags & ~EPOLL_CLOEXEC)
2025 return -EINVAL;
2026 /*
2027 * Create the internal data structure ("struct eventpoll").
2028 */
2029 error = ep_alloc(&ep);
2030 if (error < 0)
2031 return error;
2032 /*
2033 * Creates all the items needed to setup an eventpoll file. That is,
2034 * a file structure and a free file descriptor.
2035 */
2036 fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
2037 if (fd < 0) {
2038 error = fd;
2039 goto out_free_ep;
2040 }
2041 file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
2042 O_RDWR | (flags & O_CLOEXEC));
2043 if (IS_ERR(file)) {
2044 error = PTR_ERR(file);
2045 goto out_free_fd;
2046 }
2047 ep->file = file;
2048 fd_install(fd, file);
2049 return fd;
2050
2051 out_free_fd:
2052 put_unused_fd(fd);
2053 out_free_ep:
2054 ep_free(ep);
2055 return error;
2056 }
2057
2058 SYSCALL_DEFINE1(epoll_create1, int, flags)
2059 {
2060 return do_epoll_create(flags);
2061 }
2062
2063 SYSCALL_DEFINE1(epoll_create, int, size)
2064 {
2065 if (size <= 0)
2066 return -EINVAL;
2067
2068 return do_epoll_create(0);
2069 }
2070
2071 static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
2072 bool nonblock)
2073 {
2074 if (!nonblock) {
2075 mutex_lock_nested(mutex, depth);
2076 return 0;
2077 }
2078 if (mutex_trylock(mutex))
2079 return 0;
2080 return -EAGAIN;
2081 }
2082
2083 int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2084 bool nonblock)
2085 {
2086 int error;
2087 int full_check = 0;
2088 struct fd f, tf;
2089 struct eventpoll *ep;
2090 struct epitem *epi;
2091 struct eventpoll *tep = NULL;
2092
2093 error = -EBADF;
2094 f = fdget(epfd);
2095 if (!f.file)
2096 goto error_return;
2097
2098 /* Get the "struct file *" for the target file */
2099 tf = fdget(fd);
2100 if (!tf.file)
2101 goto error_fput;
2102
2103 /* The target file descriptor must support poll */
2104 error = -EPERM;
2105 if (!file_can_poll(tf.file))
2106 goto error_tgt_fput;
2107
2108 /* Check if EPOLLWAKEUP is allowed */
2109 if (ep_op_has_event(op))
2110 ep_take_care_of_epollwakeup(epds);
2111
2112 /*
2113 * We have to check that the file structure underneath the file descriptor
2114 * the user passed to us _is_ an eventpoll file. And also we do not permit
2115 * adding an epoll file descriptor inside itself.
2116 */
2117 error = -EINVAL;
2118 if (f.file == tf.file || !is_file_epoll(f.file))
2119 goto error_tgt_fput;
2120
2121 /*
2122 * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
2123 * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
2124 * Also, we do not currently supported nested exclusive wakeups.
2125 */
2126 if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
2127 if (op == EPOLL_CTL_MOD)
2128 goto error_tgt_fput;
2129 if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
2130 (epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
2131 goto error_tgt_fput;
2132 }
2133
2134 /*
2135 * At this point it is safe to assume that the "private_data" contains
2136 * our own data structure.
2137 */
2138 ep = f.file->private_data;
2139
2140 /*
2141 * When we insert an epoll file descriptor, inside another epoll file
2142 * descriptor, there is the change of creating closed loops, which are
2143 * better be handled here, than in more critical paths. While we are
2144 * checking for loops we also determine the list of files reachable
2145 * and hang them on the tfile_check_list, so we can check that we
2146 * haven't created too many possible wakeup paths.
2147 *
2148 * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
2149 * the epoll file descriptor is attaching directly to a wakeup source,
2150 * unless the epoll file descriptor is nested. The purpose of taking the
2151 * 'epmutex' on add is to prevent complex toplogies such as loops and
2152 * deep wakeup paths from forming in parallel through multiple
2153 * EPOLL_CTL_ADD operations.
2154 */
2155 error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2156 if (error)
2157 goto error_tgt_fput;
2158 if (op == EPOLL_CTL_ADD) {
2159 if (!list_empty(&f.file->f_ep_links) ||
2160 is_file_epoll(tf.file)) {
2161 mutex_unlock(&ep->mtx);
2162 error = epoll_mutex_lock(&epmutex, 0, nonblock);
2163 if (error)
2164 goto error_tgt_fput;
2165 full_check = 1;
2166 if (is_file_epoll(tf.file)) {
2167 error = -ELOOP;
2168 if (ep_loop_check(ep, tf.file) != 0) {
2169 clear_tfile_check_list();
2170 goto error_tgt_fput;
2171 }
2172 } else
2173 list_add(&tf.file->f_tfile_llink,
2174 &tfile_check_list);
2175 error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2176 if (error) {
2177 out_del:
2178 list_del(&tf.file->f_tfile_llink);
2179 goto error_tgt_fput;
2180 }
2181 if (is_file_epoll(tf.file)) {
2182 tep = tf.file->private_data;
2183 error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
2184 if (error) {
2185 mutex_unlock(&ep->mtx);
2186 goto out_del;
2187 }
2188 }
2189 }
2190 }
2191
2192 /*
2193 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
2194 * above, we can be sure to be able to use the item looked up by
2195 * ep_find() till we release the mutex.
2196 */
2197 epi = ep_find(ep, tf.file, fd);
2198
2199 error = -EINVAL;
2200 switch (op) {
2201 case EPOLL_CTL_ADD:
2202 if (!epi) {
2203 epds->events |= EPOLLERR | EPOLLHUP;
2204 error = ep_insert(ep, epds, tf.file, fd, full_check);
2205 } else
2206 error = -EEXIST;
2207 if (full_check)
2208 clear_tfile_check_list();
2209 break;
2210 case EPOLL_CTL_DEL:
2211 if (epi)
2212 error = ep_remove(ep, epi);
2213 else
2214 error = -ENOENT;
2215 break;
2216 case EPOLL_CTL_MOD:
2217 if (epi) {
2218 if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2219 epds->events |= EPOLLERR | EPOLLHUP;
2220 error = ep_modify(ep, epi, epds);
2221 }
2222 } else
2223 error = -ENOENT;
2224 break;
2225 }
2226 if (tep != NULL)
2227 mutex_unlock(&tep->mtx);
2228 mutex_unlock(&ep->mtx);
2229
2230 error_tgt_fput:
2231 if (full_check)
2232 mutex_unlock(&epmutex);
2233
2234 fdput(tf);
2235 error_fput:
2236 fdput(f);
2237 error_return:
2238
2239 return error;
2240 }
2241
2242 /*
2243 * The following function implements the controller interface for
2244 * the eventpoll file that enables the insertion/removal/change of
2245 * file descriptors inside the interest set.
2246 */
2247 SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2248 struct epoll_event __user *, event)
2249 {
2250 struct epoll_event epds;
2251
2252 if (ep_op_has_event(op) &&
2253 copy_from_user(&epds, event, sizeof(struct epoll_event)))
2254 return -EFAULT;
2255
2256 return do_epoll_ctl(epfd, op, fd, &epds, false);
2257 }
2258
2259 /*
2260 * Implement the event wait interface for the eventpoll file. It is the kernel
2261 * part of the user space epoll_wait(2).
2262 */
2263 static int do_epoll_wait(int epfd, struct epoll_event __user *events,
2264 int maxevents, int timeout)
2265 {
2266 int error;
2267 struct fd f;
2268 struct eventpoll *ep;
2269
2270 /* The maximum number of event must be greater than zero */
2271 if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
2272 return -EINVAL;
2273
2274 /* Verify that the area passed by the user is writeable */
2275 if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
2276 return -EFAULT;
2277
2278 /* Get the "struct file *" for the eventpoll file */
2279 f = fdget(epfd);
2280 if (!f.file)
2281 return -EBADF;
2282
2283 /*
2284 * We have to check that the file structure underneath the fd
2285 * the user passed to us _is_ an eventpoll file.
2286 */
2287 error = -EINVAL;
2288 if (!is_file_epoll(f.file))
2289 goto error_fput;
2290
2291 /*
2292 * At this point it is safe to assume that the "private_data" contains
2293 * our own data structure.
2294 */
2295 ep = f.file->private_data;
2296
2297 /* Time to fish for events ... */
2298 error = ep_poll(ep, events, maxevents, timeout);
2299
2300 error_fput:
2301 fdput(f);
2302 return error;
2303 }
2304
2305 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
2306 int, maxevents, int, timeout)
2307 {
2308 return do_epoll_wait(epfd, events, maxevents, timeout);
2309 }
2310
2311 /*
2312 * Implement the event wait interface for the eventpoll file. It is the kernel
2313 * part of the user space epoll_pwait(2).
2314 */
2315 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2316 int, maxevents, int, timeout, const sigset_t __user *, sigmask,
2317 size_t, sigsetsize)
2318 {
2319 int error;
2320
2321 /*
2322 * If the caller wants a certain signal mask to be set during the wait,
2323 * we apply it here.
2324 */
2325 error = set_user_sigmask(sigmask, sigsetsize);
2326 if (error)
2327 return error;
2328
2329 error = do_epoll_wait(epfd, events, maxevents, timeout);
2330 restore_saved_sigmask_unless(error == -EINTR);
2331
2332 return error;
2333 }
2334
2335 #ifdef CONFIG_COMPAT
2336 COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2337 struct epoll_event __user *, events,
2338 int, maxevents, int, timeout,
2339 const compat_sigset_t __user *, sigmask,
2340 compat_size_t, sigsetsize)
2341 {
2342 long err;
2343
2344 /*
2345 * If the caller wants a certain signal mask to be set during the wait,
2346 * we apply it here.
2347 */
2348 err = set_compat_user_sigmask(sigmask, sigsetsize);
2349 if (err)
2350 return err;
2351
2352 err = do_epoll_wait(epfd, events, maxevents, timeout);
2353 restore_saved_sigmask_unless(err == -EINTR);
2354
2355 return err;
2356 }
2357 #endif
2358
2359 static int __init eventpoll_init(void)
2360 {
2361 struct sysinfo si;
2362
2363 si_meminfo(&si);
2364 /*
2365 * Allows top 4% of lomem to be allocated for epoll watches (per user).
2366 */
2367 max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2368 EP_ITEM_COST;
2369 BUG_ON(max_user_watches < 0);
2370
2371 /*
2372 * Initialize the structure used to perform epoll file descriptor
2373 * inclusion loops checks.
2374 */
2375 ep_nested_calls_init(&poll_loop_ncalls);
2376
2377 /*
2378 * We can have many thousands of epitems, so prevent this from
2379 * using an extra cache line on 64-bit (and smaller) CPUs
2380 */
2381 BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2382
2383 /* Allocates slab cache used to allocate "struct epitem" items */
2384 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2385 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
2386
2387 /* Allocates slab cache used to allocate "struct eppoll_entry" */
2388 pwq_cache = kmem_cache_create("eventpoll_pwq",
2389 sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2390
2391 return 0;
2392 }
2393 fs_initcall(eventpoll_init);