The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/eventpoll.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  fs/eventpoll.c (Efficient event retrieval implementation)
    3  *  Copyright (C) 2001,...,2009  Davide Libenzi
    4  *
    5  *  This program is free software; you can redistribute it and/or modify
    6  *  it under the terms of the GNU General Public License as published by
    7  *  the Free Software Foundation; either version 2 of the License, or
    8  *  (at your option) any later version.
    9  *
   10  *  Davide Libenzi <davidel@xmailserver.org>
   11  *
   12  */
   13 
   14 #include <linux/init.h>
   15 #include <linux/kernel.h>
   16 #include <linux/sched.h>
   17 #include <linux/fs.h>
   18 #include <linux/file.h>
   19 #include <linux/signal.h>
   20 #include <linux/errno.h>
   21 #include <linux/mm.h>
   22 #include <linux/slab.h>
   23 #include <linux/poll.h>
   24 #include <linux/string.h>
   25 #include <linux/list.h>
   26 #include <linux/hash.h>
   27 #include <linux/spinlock.h>
   28 #include <linux/syscalls.h>
   29 #include <linux/rbtree.h>
   30 #include <linux/wait.h>
   31 #include <linux/eventpoll.h>
   32 #include <linux/mount.h>
   33 #include <linux/bitops.h>
   34 #include <linux/mutex.h>
   35 #include <linux/anon_inodes.h>
   36 #include <linux/device.h>
   37 #include <asm/uaccess.h>
   38 #include <asm/io.h>
   39 #include <asm/mman.h>
   40 #include <linux/atomic.h>
   41 #include <linux/proc_fs.h>
   42 #include <linux/seq_file.h>
   43 
   44 /*
   45  * LOCKING:
   46  * There are three level of locking required by epoll :
   47  *
   48  * 1) epmutex (mutex)
   49  * 2) ep->mtx (mutex)
   50  * 3) ep->lock (spinlock)
   51  *
   52  * The acquire order is the one listed above, from 1 to 3.
   53  * We need a spinlock (ep->lock) because we manipulate objects
   54  * from inside the poll callback, that might be triggered from
   55  * a wake_up() that in turn might be called from IRQ context.
   56  * So we can't sleep inside the poll callback and hence we need
   57  * a spinlock. During the event transfer loop (from kernel to
   58  * user space) we could end up sleeping due a copy_to_user(), so
   59  * we need a lock that will allow us to sleep. This lock is a
   60  * mutex (ep->mtx). It is acquired during the event transfer loop,
   61  * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
   62  * Then we also need a global mutex to serialize eventpoll_release_file()
   63  * and ep_free().
   64  * This mutex is acquired by ep_free() during the epoll file
   65  * cleanup path and it is also acquired by eventpoll_release_file()
   66  * if a file has been pushed inside an epoll set and it is then
   67  * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
   68  * It is also acquired when inserting an epoll fd onto another epoll
   69  * fd. We do this so that we walk the epoll tree and ensure that this
   70  * insertion does not create a cycle of epoll file descriptors, which
   71  * could lead to deadlock. We need a global mutex to prevent two
   72  * simultaneous inserts (A into B and B into A) from racing and
   73  * constructing a cycle without either insert observing that it is
   74  * going to.
   75  * It is necessary to acquire multiple "ep->mtx"es at once in the
   76  * case when one epoll fd is added to another. In this case, we
   77  * always acquire the locks in the order of nesting (i.e. after
   78  * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
   79  * before e2->mtx). Since we disallow cycles of epoll file
   80  * descriptors, this ensures that the mutexes are well-ordered. In
   81  * order to communicate this nesting to lockdep, when walking a tree
   82  * of epoll file descriptors, we use the current recursion depth as
   83  * the lockdep subkey.
   84  * It is possible to drop the "ep->mtx" and to use the global
   85  * mutex "epmutex" (together with "ep->lock") to have it working,
   86  * but having "ep->mtx" will make the interface more scalable.
   87  * Events that require holding "epmutex" are very rare, while for
   88  * normal operations the epoll private "ep->mtx" will guarantee
   89  * a better scalability.
   90  */
   91 
   92 /* Epoll private bits inside the event mask */
   93 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET)
   94 
   95 /* Maximum number of nesting allowed inside epoll sets */
   96 #define EP_MAX_NESTS 4
   97 
   98 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
   99 
  100 #define EP_UNACTIVE_PTR ((void *) -1L)
  101 
  102 #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
  103 
  104 struct epoll_filefd {
  105         struct file *file;
  106         int fd;
  107 };
  108 
  109 /*
  110  * Structure used to track possible nested calls, for too deep recursions
  111  * and loop cycles.
  112  */
  113 struct nested_call_node {
  114         struct list_head llink;
  115         void *cookie;
  116         void *ctx;
  117 };
  118 
  119 /*
  120  * This structure is used as collector for nested calls, to check for
  121  * maximum recursion dept and loop cycles.
  122  */
  123 struct nested_calls {
  124         struct list_head tasks_call_list;
  125         spinlock_t lock;
  126 };
  127 
  128 /*
  129  * Each file descriptor added to the eventpoll interface will
  130  * have an entry of this type linked to the "rbr" RB tree.
  131  */
  132 struct epitem {
  133         /* RB tree node used to link this structure to the eventpoll RB tree */
  134         struct rb_node rbn;
  135 
  136         /* List header used to link this structure to the eventpoll ready list */
  137         struct list_head rdllink;
  138 
  139         /*
  140          * Works together "struct eventpoll"->ovflist in keeping the
  141          * single linked chain of items.
  142          */
  143         struct epitem *next;
  144 
  145         /* The file descriptor information this item refers to */
  146         struct epoll_filefd ffd;
  147 
  148         /* Number of active wait queue attached to poll operations */
  149         int nwait;
  150 
  151         /* List containing poll wait queues */
  152         struct list_head pwqlist;
  153 
  154         /* The "container" of this item */
  155         struct eventpoll *ep;
  156 
  157         /* List header used to link this item to the "struct file" items list */
  158         struct list_head fllink;
  159 
  160         /* wakeup_source used when EPOLLWAKEUP is set */
  161         struct wakeup_source *ws;
  162 
  163         /* The structure that describe the interested events and the source fd */
  164         struct epoll_event event;
  165 };
  166 
  167 /*
  168  * This structure is stored inside the "private_data" member of the file
  169  * structure and represents the main data structure for the eventpoll
  170  * interface.
  171  */
  172 struct eventpoll {
  173         /* Protect the access to this structure */
  174         spinlock_t lock;
  175 
  176         /*
  177          * This mutex is used to ensure that files are not removed
  178          * while epoll is using them. This is held during the event
  179          * collection loop, the file cleanup path, the epoll file exit
  180          * code and the ctl operations.
  181          */
  182         struct mutex mtx;
  183 
  184         /* Wait queue used by sys_epoll_wait() */
  185         wait_queue_head_t wq;
  186 
  187         /* Wait queue used by file->poll() */
  188         wait_queue_head_t poll_wait;
  189 
  190         /* List of ready file descriptors */
  191         struct list_head rdllist;
  192 
  193         /* RB tree root used to store monitored fd structs */
  194         struct rb_root rbr;
  195 
  196         /*
  197          * This is a single linked list that chains all the "struct epitem" that
  198          * happened while transferring ready events to userspace w/out
  199          * holding ->lock.
  200          */
  201         struct epitem *ovflist;
  202 
  203         /* wakeup_source used when ep_scan_ready_list is running */
  204         struct wakeup_source *ws;
  205 
  206         /* The user that created the eventpoll descriptor */
  207         struct user_struct *user;
  208 
  209         struct file *file;
  210 
  211         /* used to optimize loop detection check */
  212         int visited;
  213         struct list_head visited_list_link;
  214 };
  215 
  216 /* Wait structure used by the poll hooks */
  217 struct eppoll_entry {
  218         /* List header used to link this structure to the "struct epitem" */
  219         struct list_head llink;
  220 
  221         /* The "base" pointer is set to the container "struct epitem" */
  222         struct epitem *base;
  223 
  224         /*
  225          * Wait queue item that will be linked to the target file wait
  226          * queue head.
  227          */
  228         wait_queue_t wait;
  229 
  230         /* The wait queue head that linked the "wait" wait queue item */
  231         wait_queue_head_t *whead;
  232 };
  233 
  234 /* Wrapper struct used by poll queueing */
  235 struct ep_pqueue {
  236         poll_table pt;
  237         struct epitem *epi;
  238 };
  239 
  240 /* Used by the ep_send_events() function as callback private data */
  241 struct ep_send_events_data {
  242         int maxevents;
  243         struct epoll_event __user *events;
  244 };
  245 
  246 /*
  247  * Configuration options available inside /proc/sys/fs/epoll/
  248  */
  249 /* Maximum number of epoll watched descriptors, per user */
  250 static long max_user_watches __read_mostly;
  251 
  252 /*
  253  * This mutex is used to serialize ep_free() and eventpoll_release_file().
  254  */
  255 static DEFINE_MUTEX(epmutex);
  256 
  257 /* Used to check for epoll file descriptor inclusion loops */
  258 static struct nested_calls poll_loop_ncalls;
  259 
  260 /* Used for safe wake up implementation */
  261 static struct nested_calls poll_safewake_ncalls;
  262 
  263 /* Used to call file's f_op->poll() under the nested calls boundaries */
  264 static struct nested_calls poll_readywalk_ncalls;
  265 
  266 /* Slab cache used to allocate "struct epitem" */
  267 static struct kmem_cache *epi_cache __read_mostly;
  268 
  269 /* Slab cache used to allocate "struct eppoll_entry" */
  270 static struct kmem_cache *pwq_cache __read_mostly;
  271 
  272 /* Visited nodes during ep_loop_check(), so we can unset them when we finish */
  273 static LIST_HEAD(visited_list);
  274 
  275 /*
  276  * List of files with newly added links, where we may need to limit the number
  277  * of emanating paths. Protected by the epmutex.
  278  */
  279 static LIST_HEAD(tfile_check_list);
  280 
  281 #ifdef CONFIG_SYSCTL
  282 
  283 #include <linux/sysctl.h>
  284 
  285 static long zero;
  286 static long long_max = LONG_MAX;
  287 
  288 ctl_table epoll_table[] = {
  289         {
  290                 .procname       = "max_user_watches",
  291                 .data           = &max_user_watches,
  292                 .maxlen         = sizeof(max_user_watches),
  293                 .mode           = 0644,
  294                 .proc_handler   = proc_doulongvec_minmax,
  295                 .extra1         = &zero,
  296                 .extra2         = &long_max,
  297         },
  298         { }
  299 };
  300 #endif /* CONFIG_SYSCTL */
  301 
  302 static const struct file_operations eventpoll_fops;
  303 
  304 static inline int is_file_epoll(struct file *f)
  305 {
  306         return f->f_op == &eventpoll_fops;
  307 }
  308 
  309 /* Setup the structure that is used as key for the RB tree */
  310 static inline void ep_set_ffd(struct epoll_filefd *ffd,
  311                               struct file *file, int fd)
  312 {
  313         ffd->file = file;
  314         ffd->fd = fd;
  315 }
  316 
  317 /* Compare RB tree keys */
  318 static inline int ep_cmp_ffd(struct epoll_filefd *p1,
  319                              struct epoll_filefd *p2)
  320 {
  321         return (p1->file > p2->file ? +1:
  322                 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
  323 }
  324 
  325 /* Tells us if the item is currently linked */
  326 static inline int ep_is_linked(struct list_head *p)
  327 {
  328         return !list_empty(p);
  329 }
  330 
  331 static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
  332 {
  333         return container_of(p, struct eppoll_entry, wait);
  334 }
  335 
  336 /* Get the "struct epitem" from a wait queue pointer */
  337 static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
  338 {
  339         return container_of(p, struct eppoll_entry, wait)->base;
  340 }
  341 
  342 /* Get the "struct epitem" from an epoll queue wrapper */
  343 static inline struct epitem *ep_item_from_epqueue(poll_table *p)
  344 {
  345         return container_of(p, struct ep_pqueue, pt)->epi;
  346 }
  347 
  348 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
  349 static inline int ep_op_has_event(int op)
  350 {
  351         return op != EPOLL_CTL_DEL;
  352 }
  353 
  354 /* Initialize the poll safe wake up structure */
  355 static void ep_nested_calls_init(struct nested_calls *ncalls)
  356 {
  357         INIT_LIST_HEAD(&ncalls->tasks_call_list);
  358         spin_lock_init(&ncalls->lock);
  359 }
  360 
  361 /**
  362  * ep_events_available - Checks if ready events might be available.
  363  *
  364  * @ep: Pointer to the eventpoll context.
  365  *
  366  * Returns: Returns a value different than zero if ready events are available,
  367  *          or zero otherwise.
  368  */
  369 static inline int ep_events_available(struct eventpoll *ep)
  370 {
  371         return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
  372 }
  373 
  374 /**
  375  * ep_call_nested - Perform a bound (possibly) nested call, by checking
  376  *                  that the recursion limit is not exceeded, and that
  377  *                  the same nested call (by the meaning of same cookie) is
  378  *                  no re-entered.
  379  *
  380  * @ncalls: Pointer to the nested_calls structure to be used for this call.
  381  * @max_nests: Maximum number of allowed nesting calls.
  382  * @nproc: Nested call core function pointer.
  383  * @priv: Opaque data to be passed to the @nproc callback.
  384  * @cookie: Cookie to be used to identify this nested call.
  385  * @ctx: This instance context.
  386  *
  387  * Returns: Returns the code returned by the @nproc callback, or -1 if
  388  *          the maximum recursion limit has been exceeded.
  389  */
  390 static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
  391                           int (*nproc)(void *, void *, int), void *priv,
  392                           void *cookie, void *ctx)
  393 {
  394         int error, call_nests = 0;
  395         unsigned long flags;
  396         struct list_head *lsthead = &ncalls->tasks_call_list;
  397         struct nested_call_node *tncur;
  398         struct nested_call_node tnode;
  399 
  400         spin_lock_irqsave(&ncalls->lock, flags);
  401 
  402         /*
  403          * Try to see if the current task is already inside this wakeup call.
  404          * We use a list here, since the population inside this set is always
  405          * very much limited.
  406          */
  407         list_for_each_entry(tncur, lsthead, llink) {
  408                 if (tncur->ctx == ctx &&
  409                     (tncur->cookie == cookie || ++call_nests > max_nests)) {
  410                         /*
  411                          * Ops ... loop detected or maximum nest level reached.
  412                          * We abort this wake by breaking the cycle itself.
  413                          */
  414                         error = -1;
  415                         goto out_unlock;
  416                 }
  417         }
  418 
  419         /* Add the current task and cookie to the list */
  420         tnode.ctx = ctx;
  421         tnode.cookie = cookie;
  422         list_add(&tnode.llink, lsthead);
  423 
  424         spin_unlock_irqrestore(&ncalls->lock, flags);
  425 
  426         /* Call the nested function */
  427         error = (*nproc)(priv, cookie, call_nests);
  428 
  429         /* Remove the current task from the list */
  430         spin_lock_irqsave(&ncalls->lock, flags);
  431         list_del(&tnode.llink);
  432 out_unlock:
  433         spin_unlock_irqrestore(&ncalls->lock, flags);
  434 
  435         return error;
  436 }
  437 
  438 /*
  439  * As described in commit 0ccf831cb lockdep: annotate epoll
  440  * the use of wait queues used by epoll is done in a very controlled
  441  * manner. Wake ups can nest inside each other, but are never done
  442  * with the same locking. For example:
  443  *
  444  *   dfd = socket(...);
  445  *   efd1 = epoll_create();
  446  *   efd2 = epoll_create();
  447  *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
  448  *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
  449  *
  450  * When a packet arrives to the device underneath "dfd", the net code will
  451  * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
  452  * callback wakeup entry on that queue, and the wake_up() performed by the
  453  * "dfd" net code will end up in ep_poll_callback(). At this point epoll
  454  * (efd1) notices that it may have some event ready, so it needs to wake up
  455  * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
  456  * that ends up in another wake_up(), after having checked about the
  457  * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
  458  * avoid stack blasting.
  459  *
  460  * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
  461  * this special case of epoll.
  462  */
  463 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  464 static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
  465                                      unsigned long events, int subclass)
  466 {
  467         unsigned long flags;
  468 
  469         spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
  470         wake_up_locked_poll(wqueue, events);
  471         spin_unlock_irqrestore(&wqueue->lock, flags);
  472 }
  473 #else
  474 static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
  475                                      unsigned long events, int subclass)
  476 {
  477         wake_up_poll(wqueue, events);
  478 }
  479 #endif
  480 
  481 static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
  482 {
  483         ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
  484                           1 + call_nests);
  485         return 0;
  486 }
  487 
  488 /*
  489  * Perform a safe wake up of the poll wait list. The problem is that
  490  * with the new callback'd wake up system, it is possible that the
  491  * poll callback is reentered from inside the call to wake_up() done
  492  * on the poll wait queue head. The rule is that we cannot reenter the
  493  * wake up code from the same task more than EP_MAX_NESTS times,
  494  * and we cannot reenter the same wait queue head at all. This will
  495  * enable to have a hierarchy of epoll file descriptor of no more than
  496  * EP_MAX_NESTS deep.
  497  */
  498 static void ep_poll_safewake(wait_queue_head_t *wq)
  499 {
  500         int this_cpu = get_cpu();
  501 
  502         ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
  503                        ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
  504 
  505         put_cpu();
  506 }
  507 
  508 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
  509 {
  510         wait_queue_head_t *whead;
  511 
  512         rcu_read_lock();
  513         /* If it is cleared by POLLFREE, it should be rcu-safe */
  514         whead = rcu_dereference(pwq->whead);
  515         if (whead)
  516                 remove_wait_queue(whead, &pwq->wait);
  517         rcu_read_unlock();
  518 }
  519 
  520 /*
  521  * This function unregisters poll callbacks from the associated file
  522  * descriptor.  Must be called with "mtx" held (or "epmutex" if called from
  523  * ep_free).
  524  */
  525 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
  526 {
  527         struct list_head *lsthead = &epi->pwqlist;
  528         struct eppoll_entry *pwq;
  529 
  530         while (!list_empty(lsthead)) {
  531                 pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
  532 
  533                 list_del(&pwq->llink);
  534                 ep_remove_wait_queue(pwq);
  535                 kmem_cache_free(pwq_cache, pwq);
  536         }
  537 }
  538 
  539 /**
  540  * ep_scan_ready_list - Scans the ready list in a way that makes possible for
  541  *                      the scan code, to call f_op->poll(). Also allows for
  542  *                      O(NumReady) performance.
  543  *
  544  * @ep: Pointer to the epoll private data structure.
  545  * @sproc: Pointer to the scan callback.
  546  * @priv: Private opaque data passed to the @sproc callback.
  547  * @depth: The current depth of recursive f_op->poll calls.
  548  *
  549  * Returns: The same integer error code returned by the @sproc callback.
  550  */
  551 static int ep_scan_ready_list(struct eventpoll *ep,
  552                               int (*sproc)(struct eventpoll *,
  553                                            struct list_head *, void *),
  554                               void *priv,
  555                               int depth)
  556 {
  557         int error, pwake = 0;
  558         unsigned long flags;
  559         struct epitem *epi, *nepi;
  560         LIST_HEAD(txlist);
  561 
  562         /*
  563          * We need to lock this because we could be hit by
  564          * eventpoll_release_file() and epoll_ctl().
  565          */
  566         mutex_lock_nested(&ep->mtx, depth);
  567 
  568         /*
  569          * Steal the ready list, and re-init the original one to the
  570          * empty list. Also, set ep->ovflist to NULL so that events
  571          * happening while looping w/out locks, are not lost. We cannot
  572          * have the poll callback to queue directly on ep->rdllist,
  573          * because we want the "sproc" callback to be able to do it
  574          * in a lockless way.
  575          */
  576         spin_lock_irqsave(&ep->lock, flags);
  577         list_splice_init(&ep->rdllist, &txlist);
  578         ep->ovflist = NULL;
  579         spin_unlock_irqrestore(&ep->lock, flags);
  580 
  581         /*
  582          * Now call the callback function.
  583          */
  584         error = (*sproc)(ep, &txlist, priv);
  585 
  586         spin_lock_irqsave(&ep->lock, flags);
  587         /*
  588          * During the time we spent inside the "sproc" callback, some
  589          * other events might have been queued by the poll callback.
  590          * We re-insert them inside the main ready-list here.
  591          */
  592         for (nepi = ep->ovflist; (epi = nepi) != NULL;
  593              nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
  594                 /*
  595                  * We need to check if the item is already in the list.
  596                  * During the "sproc" callback execution time, items are
  597                  * queued into ->ovflist but the "txlist" might already
  598                  * contain them, and the list_splice() below takes care of them.
  599                  */
  600                 if (!ep_is_linked(&epi->rdllink)) {
  601                         list_add_tail(&epi->rdllink, &ep->rdllist);
  602                         __pm_stay_awake(epi->ws);
  603                 }
  604         }
  605         /*
  606          * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
  607          * releasing the lock, events will be queued in the normal way inside
  608          * ep->rdllist.
  609          */
  610         ep->ovflist = EP_UNACTIVE_PTR;
  611 
  612         /*
  613          * Quickly re-inject items left on "txlist".
  614          */
  615         list_splice(&txlist, &ep->rdllist);
  616         __pm_relax(ep->ws);
  617 
  618         if (!list_empty(&ep->rdllist)) {
  619                 /*
  620                  * Wake up (if active) both the eventpoll wait list and
  621                  * the ->poll() wait list (delayed after we release the lock).
  622                  */
  623                 if (waitqueue_active(&ep->wq))
  624                         wake_up_locked(&ep->wq);
  625                 if (waitqueue_active(&ep->poll_wait))
  626                         pwake++;
  627         }
  628         spin_unlock_irqrestore(&ep->lock, flags);
  629 
  630         mutex_unlock(&ep->mtx);
  631 
  632         /* We have to call this outside the lock */
  633         if (pwake)
  634                 ep_poll_safewake(&ep->poll_wait);
  635 
  636         return error;
  637 }
  638 
  639 /*
  640  * Removes a "struct epitem" from the eventpoll RB tree and deallocates
  641  * all the associated resources. Must be called with "mtx" held.
  642  */
  643 static int ep_remove(struct eventpoll *ep, struct epitem *epi)
  644 {
  645         unsigned long flags;
  646         struct file *file = epi->ffd.file;
  647 
  648         /*
  649          * Removes poll wait queue hooks. We _have_ to do this without holding
  650          * the "ep->lock" otherwise a deadlock might occur. This because of the
  651          * sequence of the lock acquisition. Here we do "ep->lock" then the wait
  652          * queue head lock when unregistering the wait queue. The wakeup callback
  653          * will run by holding the wait queue head lock and will call our callback
  654          * that will try to get "ep->lock".
  655          */
  656         ep_unregister_pollwait(ep, epi);
  657 
  658         /* Remove the current item from the list of epoll hooks */
  659         spin_lock(&file->f_lock);
  660         if (ep_is_linked(&epi->fllink))
  661                 list_del_init(&epi->fllink);
  662         spin_unlock(&file->f_lock);
  663 
  664         rb_erase(&epi->rbn, &ep->rbr);
  665 
  666         spin_lock_irqsave(&ep->lock, flags);
  667         if (ep_is_linked(&epi->rdllink))
  668                 list_del_init(&epi->rdllink);
  669         spin_unlock_irqrestore(&ep->lock, flags);
  670 
  671         wakeup_source_unregister(epi->ws);
  672 
  673         /* At this point it is safe to free the eventpoll item */
  674         kmem_cache_free(epi_cache, epi);
  675 
  676         atomic_long_dec(&ep->user->epoll_watches);
  677 
  678         return 0;
  679 }
  680 
  681 static void ep_free(struct eventpoll *ep)
  682 {
  683         struct rb_node *rbp;
  684         struct epitem *epi;
  685 
  686         /* We need to release all tasks waiting for these file */
  687         if (waitqueue_active(&ep->poll_wait))
  688                 ep_poll_safewake(&ep->poll_wait);
  689 
  690         /*
  691          * We need to lock this because we could be hit by
  692          * eventpoll_release_file() while we're freeing the "struct eventpoll".
  693          * We do not need to hold "ep->mtx" here because the epoll file
  694          * is on the way to be removed and no one has references to it
  695          * anymore. The only hit might come from eventpoll_release_file() but
  696          * holding "epmutex" is sufficient here.
  697          */
  698         mutex_lock(&epmutex);
  699 
  700         /*
  701          * Walks through the whole tree by unregistering poll callbacks.
  702          */
  703         for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
  704                 epi = rb_entry(rbp, struct epitem, rbn);
  705 
  706                 ep_unregister_pollwait(ep, epi);
  707         }
  708 
  709         /*
  710          * Walks through the whole tree by freeing each "struct epitem". At this
  711          * point we are sure no poll callbacks will be lingering around, and also by
  712          * holding "epmutex" we can be sure that no file cleanup code will hit
  713          * us during this operation. So we can avoid the lock on "ep->lock".
  714          */
  715         while ((rbp = rb_first(&ep->rbr)) != NULL) {
  716                 epi = rb_entry(rbp, struct epitem, rbn);
  717                 ep_remove(ep, epi);
  718         }
  719 
  720         mutex_unlock(&epmutex);
  721         mutex_destroy(&ep->mtx);
  722         free_uid(ep->user);
  723         wakeup_source_unregister(ep->ws);
  724         kfree(ep);
  725 }
  726 
  727 static int ep_eventpoll_release(struct inode *inode, struct file *file)
  728 {
  729         struct eventpoll *ep = file->private_data;
  730 
  731         if (ep)
  732                 ep_free(ep);
  733 
  734         return 0;
  735 }
  736 
  737 static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
  738                                void *priv)
  739 {
  740         struct epitem *epi, *tmp;
  741         poll_table pt;
  742 
  743         init_poll_funcptr(&pt, NULL);
  744         list_for_each_entry_safe(epi, tmp, head, rdllink) {
  745                 pt._key = epi->event.events;
  746                 if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
  747                     epi->event.events)
  748                         return POLLIN | POLLRDNORM;
  749                 else {
  750                         /*
  751                          * Item has been dropped into the ready list by the poll
  752                          * callback, but it's not actually ready, as far as
  753                          * caller requested events goes. We can remove it here.
  754                          */
  755                         __pm_relax(epi->ws);
  756                         list_del_init(&epi->rdllink);
  757                 }
  758         }
  759 
  760         return 0;
  761 }
  762 
  763 static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
  764 {
  765         return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
  766 }
  767 
  768 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
  769 {
  770         int pollflags;
  771         struct eventpoll *ep = file->private_data;
  772 
  773         /* Insert inside our poll wait queue */
  774         poll_wait(file, &ep->poll_wait, wait);
  775 
  776         /*
  777          * Proceed to find out if wanted events are really available inside
  778          * the ready list. This need to be done under ep_call_nested()
  779          * supervision, since the call to f_op->poll() done on listed files
  780          * could re-enter here.
  781          */
  782         pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
  783                                    ep_poll_readyevents_proc, ep, ep, current);
  784 
  785         return pollflags != -1 ? pollflags : 0;
  786 }
  787 
  788 #ifdef CONFIG_PROC_FS
  789 static int ep_show_fdinfo(struct seq_file *m, struct file *f)
  790 {
  791         struct eventpoll *ep = f->private_data;
  792         struct rb_node *rbp;
  793         int ret = 0;
  794 
  795         mutex_lock(&ep->mtx);
  796         for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
  797                 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
  798 
  799                 ret = seq_printf(m, "tfd: %8d events: %8x data: %16llx\n",
  800                                  epi->ffd.fd, epi->event.events,
  801                                  (long long)epi->event.data);
  802                 if (ret)
  803                         break;
  804         }
  805         mutex_unlock(&ep->mtx);
  806 
  807         return ret;
  808 }
  809 #endif
  810 
  811 /* File callbacks that implement the eventpoll file behaviour */
  812 static const struct file_operations eventpoll_fops = {
  813 #ifdef CONFIG_PROC_FS
  814         .show_fdinfo    = ep_show_fdinfo,
  815 #endif
  816         .release        = ep_eventpoll_release,
  817         .poll           = ep_eventpoll_poll,
  818         .llseek         = noop_llseek,
  819 };
  820 
  821 /*
  822  * This is called from eventpoll_release() to unlink files from the eventpoll
  823  * interface. We need to have this facility to cleanup correctly files that are
  824  * closed without being removed from the eventpoll interface.
  825  */
  826 void eventpoll_release_file(struct file *file)
  827 {
  828         struct list_head *lsthead = &file->f_ep_links;
  829         struct eventpoll *ep;
  830         struct epitem *epi;
  831 
  832         /*
  833          * We don't want to get "file->f_lock" because it is not
  834          * necessary. It is not necessary because we're in the "struct file"
  835          * cleanup path, and this means that no one is using this file anymore.
  836          * So, for example, epoll_ctl() cannot hit here since if we reach this
  837          * point, the file counter already went to zero and fget() would fail.
  838          * The only hit might come from ep_free() but by holding the mutex
  839          * will correctly serialize the operation. We do need to acquire
  840          * "ep->mtx" after "epmutex" because ep_remove() requires it when called
  841          * from anywhere but ep_free().
  842          *
  843          * Besides, ep_remove() acquires the lock, so we can't hold it here.
  844          */
  845         mutex_lock(&epmutex);
  846 
  847         while (!list_empty(lsthead)) {
  848                 epi = list_first_entry(lsthead, struct epitem, fllink);
  849 
  850                 ep = epi->ep;
  851                 list_del_init(&epi->fllink);
  852                 mutex_lock_nested(&ep->mtx, 0);
  853                 ep_remove(ep, epi);
  854                 mutex_unlock(&ep->mtx);
  855         }
  856 
  857         mutex_unlock(&epmutex);
  858 }
  859 
  860 static int ep_alloc(struct eventpoll **pep)
  861 {
  862         int error;
  863         struct user_struct *user;
  864         struct eventpoll *ep;
  865 
  866         user = get_current_user();
  867         error = -ENOMEM;
  868         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  869         if (unlikely(!ep))
  870                 goto free_uid;
  871 
  872         spin_lock_init(&ep->lock);
  873         mutex_init(&ep->mtx);
  874         init_waitqueue_head(&ep->wq);
  875         init_waitqueue_head(&ep->poll_wait);
  876         INIT_LIST_HEAD(&ep->rdllist);
  877         ep->rbr = RB_ROOT;
  878         ep->ovflist = EP_UNACTIVE_PTR;
  879         ep->user = user;
  880 
  881         *pep = ep;
  882 
  883         return 0;
  884 
  885 free_uid:
  886         free_uid(user);
  887         return error;
  888 }
  889 
  890 /*
  891  * Search the file inside the eventpoll tree. The RB tree operations
  892  * are protected by the "mtx" mutex, and ep_find() must be called with
  893  * "mtx" held.
  894  */
  895 static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
  896 {
  897         int kcmp;
  898         struct rb_node *rbp;
  899         struct epitem *epi, *epir = NULL;
  900         struct epoll_filefd ffd;
  901 
  902         ep_set_ffd(&ffd, file, fd);
  903         for (rbp = ep->rbr.rb_node; rbp; ) {
  904                 epi = rb_entry(rbp, struct epitem, rbn);
  905                 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
  906                 if (kcmp > 0)
  907                         rbp = rbp->rb_right;
  908                 else if (kcmp < 0)
  909                         rbp = rbp->rb_left;
  910                 else {
  911                         epir = epi;
  912                         break;
  913                 }
  914         }
  915 
  916         return epir;
  917 }
  918 
  919 /*
  920  * This is the callback that is passed to the wait queue wakeup
  921  * mechanism. It is called by the stored file descriptors when they
  922  * have events to report.
  923  */
  924 static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
  925 {
  926         int pwake = 0;
  927         unsigned long flags;
  928         struct epitem *epi = ep_item_from_wait(wait);
  929         struct eventpoll *ep = epi->ep;
  930 
  931         if ((unsigned long)key & POLLFREE) {
  932                 ep_pwq_from_wait(wait)->whead = NULL;
  933                 /*
  934                  * whead = NULL above can race with ep_remove_wait_queue()
  935                  * which can do another remove_wait_queue() after us, so we
  936                  * can't use __remove_wait_queue(). whead->lock is held by
  937                  * the caller.
  938                  */
  939                 list_del_init(&wait->task_list);
  940         }
  941 
  942         spin_lock_irqsave(&ep->lock, flags);
  943 
  944         /*
  945          * If the event mask does not contain any poll(2) event, we consider the
  946          * descriptor to be disabled. This condition is likely the effect of the
  947          * EPOLLONESHOT bit that disables the descriptor when an event is received,
  948          * until the next EPOLL_CTL_MOD will be issued.
  949          */
  950         if (!(epi->event.events & ~EP_PRIVATE_BITS))
  951                 goto out_unlock;
  952 
  953         /*
  954          * Check the events coming with the callback. At this stage, not
  955          * every device reports the events in the "key" parameter of the
  956          * callback. We need to be able to handle both cases here, hence the
  957          * test for "key" != NULL before the event match test.
  958          */
  959         if (key && !((unsigned long) key & epi->event.events))
  960                 goto out_unlock;
  961 
  962         /*
  963          * If we are transferring events to userspace, we can hold no locks
  964          * (because we're accessing user memory, and because of linux f_op->poll()
  965          * semantics). All the events that happen during that period of time are
  966          * chained in ep->ovflist and requeued later on.
  967          */
  968         if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
  969                 if (epi->next == EP_UNACTIVE_PTR) {
  970                         epi->next = ep->ovflist;
  971                         ep->ovflist = epi;
  972                         if (epi->ws) {
  973                                 /*
  974                                  * Activate ep->ws since epi->ws may get
  975                                  * deactivated at any time.
  976                                  */
  977                                 __pm_stay_awake(ep->ws);
  978                         }
  979 
  980                 }
  981                 goto out_unlock;
  982         }
  983 
  984         /* If this file is already in the ready list we exit soon */
  985         if (!ep_is_linked(&epi->rdllink)) {
  986                 list_add_tail(&epi->rdllink, &ep->rdllist);
  987                 __pm_stay_awake(epi->ws);
  988         }
  989 
  990         /*
  991          * Wake up ( if active ) both the eventpoll wait list and the ->poll()
  992          * wait list.
  993          */
  994         if (waitqueue_active(&ep->wq))
  995                 wake_up_locked(&ep->wq);
  996         if (waitqueue_active(&ep->poll_wait))
  997                 pwake++;
  998 
  999 out_unlock:
 1000         spin_unlock_irqrestore(&ep->lock, flags);
 1001 
 1002         /* We have to call this outside the lock */
 1003         if (pwake)
 1004                 ep_poll_safewake(&ep->poll_wait);
 1005 
 1006         return 1;
 1007 }
 1008 
 1009 /*
 1010  * This is the callback that is used to add our wait queue to the
 1011  * target file wakeup lists.
 1012  */
 1013 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
 1014                                  poll_table *pt)
 1015 {
 1016         struct epitem *epi = ep_item_from_epqueue(pt);
 1017         struct eppoll_entry *pwq;
 1018 
 1019         if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
 1020                 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
 1021                 pwq->whead = whead;
 1022                 pwq->base = epi;
 1023                 add_wait_queue(whead, &pwq->wait);
 1024                 list_add_tail(&pwq->llink, &epi->pwqlist);
 1025                 epi->nwait++;
 1026         } else {
 1027                 /* We have to signal that an error occurred */
 1028                 epi->nwait = -1;
 1029         }
 1030 }
 1031 
 1032 static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
 1033 {
 1034         int kcmp;
 1035         struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
 1036         struct epitem *epic;
 1037 
 1038         while (*p) {
 1039                 parent = *p;
 1040                 epic = rb_entry(parent, struct epitem, rbn);
 1041                 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
 1042                 if (kcmp > 0)
 1043                         p = &parent->rb_right;
 1044                 else
 1045                         p = &parent->rb_left;
 1046         }
 1047         rb_link_node(&epi->rbn, parent, p);
 1048         rb_insert_color(&epi->rbn, &ep->rbr);
 1049 }
 1050 
 1051 
 1052 
 1053 #define PATH_ARR_SIZE 5
 1054 /*
 1055  * These are the number paths of length 1 to 5, that we are allowing to emanate
 1056  * from a single file of interest. For example, we allow 1000 paths of length
 1057  * 1, to emanate from each file of interest. This essentially represents the
 1058  * potential wakeup paths, which need to be limited in order to avoid massive
 1059  * uncontrolled wakeup storms. The common use case should be a single ep which
 1060  * is connected to n file sources. In this case each file source has 1 path
 1061  * of length 1. Thus, the numbers below should be more than sufficient. These
 1062  * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
 1063  * and delete can't add additional paths. Protected by the epmutex.
 1064  */
 1065 static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
 1066 static int path_count[PATH_ARR_SIZE];
 1067 
 1068 static int path_count_inc(int nests)
 1069 {
 1070         /* Allow an arbitrary number of depth 1 paths */
 1071         if (nests == 0)
 1072                 return 0;
 1073 
 1074         if (++path_count[nests] > path_limits[nests])
 1075                 return -1;
 1076         return 0;
 1077 }
 1078 
 1079 static void path_count_init(void)
 1080 {
 1081         int i;
 1082 
 1083         for (i = 0; i < PATH_ARR_SIZE; i++)
 1084                 path_count[i] = 0;
 1085 }
 1086 
 1087 static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
 1088 {
 1089         int error = 0;
 1090         struct file *file = priv;
 1091         struct file *child_file;
 1092         struct epitem *epi;
 1093 
 1094         list_for_each_entry(epi, &file->f_ep_links, fllink) {
 1095                 child_file = epi->ep->file;
 1096                 if (is_file_epoll(child_file)) {
 1097                         if (list_empty(&child_file->f_ep_links)) {
 1098                                 if (path_count_inc(call_nests)) {
 1099                                         error = -1;
 1100                                         break;
 1101                                 }
 1102                         } else {
 1103                                 error = ep_call_nested(&poll_loop_ncalls,
 1104                                                         EP_MAX_NESTS,
 1105                                                         reverse_path_check_proc,
 1106                                                         child_file, child_file,
 1107                                                         current);
 1108                         }
 1109                         if (error != 0)
 1110                                 break;
 1111                 } else {
 1112                         printk(KERN_ERR "reverse_path_check_proc: "
 1113                                 "file is not an ep!\n");
 1114                 }
 1115         }
 1116         return error;
 1117 }
 1118 
 1119 /**
 1120  * reverse_path_check - The tfile_check_list is list of file *, which have
 1121  *                      links that are proposed to be newly added. We need to
 1122  *                      make sure that those added links don't add too many
 1123  *                      paths such that we will spend all our time waking up
 1124  *                      eventpoll objects.
 1125  *
 1126  * Returns: Returns zero if the proposed links don't create too many paths,
 1127  *          -1 otherwise.
 1128  */
 1129 static int reverse_path_check(void)
 1130 {
 1131         int error = 0;
 1132         struct file *current_file;
 1133 
 1134         /* let's call this for all tfiles */
 1135         list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
 1136                 path_count_init();
 1137                 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
 1138                                         reverse_path_check_proc, current_file,
 1139                                         current_file, current);
 1140                 if (error)
 1141                         break;
 1142         }
 1143         return error;
 1144 }
 1145 
 1146 static int ep_create_wakeup_source(struct epitem *epi)
 1147 {
 1148         const char *name;
 1149 
 1150         if (!epi->ep->ws) {
 1151                 epi->ep->ws = wakeup_source_register("eventpoll");
 1152                 if (!epi->ep->ws)
 1153                         return -ENOMEM;
 1154         }
 1155 
 1156         name = epi->ffd.file->f_path.dentry->d_name.name;
 1157         epi->ws = wakeup_source_register(name);
 1158         if (!epi->ws)
 1159                 return -ENOMEM;
 1160 
 1161         return 0;
 1162 }
 1163 
 1164 static void ep_destroy_wakeup_source(struct epitem *epi)
 1165 {
 1166         wakeup_source_unregister(epi->ws);
 1167         epi->ws = NULL;
 1168 }
 1169 
 1170 /*
 1171  * Must be called with "mtx" held.
 1172  */
 1173 static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 1174                      struct file *tfile, int fd)
 1175 {
 1176         int error, revents, pwake = 0;
 1177         unsigned long flags;
 1178         long user_watches;
 1179         struct epitem *epi;
 1180         struct ep_pqueue epq;
 1181 
 1182         user_watches = atomic_long_read(&ep->user->epoll_watches);
 1183         if (unlikely(user_watches >= max_user_watches))
 1184                 return -ENOSPC;
 1185         if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
 1186                 return -ENOMEM;
 1187 
 1188         /* Item initialization follow here ... */
 1189         INIT_LIST_HEAD(&epi->rdllink);
 1190         INIT_LIST_HEAD(&epi->fllink);
 1191         INIT_LIST_HEAD(&epi->pwqlist);
 1192         epi->ep = ep;
 1193         ep_set_ffd(&epi->ffd, tfile, fd);
 1194         epi->event = *event;
 1195         epi->nwait = 0;
 1196         epi->next = EP_UNACTIVE_PTR;
 1197         if (epi->event.events & EPOLLWAKEUP) {
 1198                 error = ep_create_wakeup_source(epi);
 1199                 if (error)
 1200                         goto error_create_wakeup_source;
 1201         } else {
 1202                 epi->ws = NULL;
 1203         }
 1204 
 1205         /* Initialize the poll table using the queue callback */
 1206         epq.epi = epi;
 1207         init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
 1208         epq.pt._key = event->events;
 1209 
 1210         /*
 1211          * Attach the item to the poll hooks and get current event bits.
 1212          * We can safely use the file* here because its usage count has
 1213          * been increased by the caller of this function. Note that after
 1214          * this operation completes, the poll callback can start hitting
 1215          * the new item.
 1216          */
 1217         revents = tfile->f_op->poll(tfile, &epq.pt);
 1218 
 1219         /*
 1220          * We have to check if something went wrong during the poll wait queue
 1221          * install process. Namely an allocation for a wait queue failed due
 1222          * high memory pressure.
 1223          */
 1224         error = -ENOMEM;
 1225         if (epi->nwait < 0)
 1226                 goto error_unregister;
 1227 
 1228         /* Add the current item to the list of active epoll hook for this file */
 1229         spin_lock(&tfile->f_lock);
 1230         list_add_tail(&epi->fllink, &tfile->f_ep_links);
 1231         spin_unlock(&tfile->f_lock);
 1232 
 1233         /*
 1234          * Add the current item to the RB tree. All RB tree operations are
 1235          * protected by "mtx", and ep_insert() is called with "mtx" held.
 1236          */
 1237         ep_rbtree_insert(ep, epi);
 1238 
 1239         /* now check if we've created too many backpaths */
 1240         error = -EINVAL;
 1241         if (reverse_path_check())
 1242                 goto error_remove_epi;
 1243 
 1244         /* We have to drop the new item inside our item list to keep track of it */
 1245         spin_lock_irqsave(&ep->lock, flags);
 1246 
 1247         /* If the file is already "ready" we drop it inside the ready list */
 1248         if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
 1249                 list_add_tail(&epi->rdllink, &ep->rdllist);
 1250                 __pm_stay_awake(epi->ws);
 1251 
 1252                 /* Notify waiting tasks that events are available */
 1253                 if (waitqueue_active(&ep->wq))
 1254                         wake_up_locked(&ep->wq);
 1255                 if (waitqueue_active(&ep->poll_wait))
 1256                         pwake++;
 1257         }
 1258 
 1259         spin_unlock_irqrestore(&ep->lock, flags);
 1260 
 1261         atomic_long_inc(&ep->user->epoll_watches);
 1262 
 1263         /* We have to call this outside the lock */
 1264         if (pwake)
 1265                 ep_poll_safewake(&ep->poll_wait);
 1266 
 1267         return 0;
 1268 
 1269 error_remove_epi:
 1270         spin_lock(&tfile->f_lock);
 1271         if (ep_is_linked(&epi->fllink))
 1272                 list_del_init(&epi->fllink);
 1273         spin_unlock(&tfile->f_lock);
 1274 
 1275         rb_erase(&epi->rbn, &ep->rbr);
 1276 
 1277 error_unregister:
 1278         ep_unregister_pollwait(ep, epi);
 1279 
 1280         /*
 1281          * We need to do this because an event could have been arrived on some
 1282          * allocated wait queue. Note that we don't care about the ep->ovflist
 1283          * list, since that is used/cleaned only inside a section bound by "mtx".
 1284          * And ep_insert() is called with "mtx" held.
 1285          */
 1286         spin_lock_irqsave(&ep->lock, flags);
 1287         if (ep_is_linked(&epi->rdllink))
 1288                 list_del_init(&epi->rdllink);
 1289         spin_unlock_irqrestore(&ep->lock, flags);
 1290 
 1291         wakeup_source_unregister(epi->ws);
 1292 
 1293 error_create_wakeup_source:
 1294         kmem_cache_free(epi_cache, epi);
 1295 
 1296         return error;
 1297 }
 1298 
 1299 /*
 1300  * Modify the interest event mask by dropping an event if the new mask
 1301  * has a match in the current file status. Must be called with "mtx" held.
 1302  */
 1303 static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
 1304 {
 1305         int pwake = 0;
 1306         unsigned int revents;
 1307         poll_table pt;
 1308 
 1309         init_poll_funcptr(&pt, NULL);
 1310 
 1311         /*
 1312          * Set the new event interest mask before calling f_op->poll();
 1313          * otherwise we might miss an event that happens between the
 1314          * f_op->poll() call and the new event set registering.
 1315          */
 1316         epi->event.events = event->events; /* need barrier below */
 1317         pt._key = event->events;
 1318         epi->event.data = event->data; /* protected by mtx */
 1319         if (epi->event.events & EPOLLWAKEUP) {
 1320                 if (!epi->ws)
 1321                         ep_create_wakeup_source(epi);
 1322         } else if (epi->ws) {
 1323                 ep_destroy_wakeup_source(epi);
 1324         }
 1325 
 1326         /*
 1327          * The following barrier has two effects:
 1328          *
 1329          * 1) Flush epi changes above to other CPUs.  This ensures
 1330          *    we do not miss events from ep_poll_callback if an
 1331          *    event occurs immediately after we call f_op->poll().
 1332          *    We need this because we did not take ep->lock while
 1333          *    changing epi above (but ep_poll_callback does take
 1334          *    ep->lock).
 1335          *
 1336          * 2) We also need to ensure we do not miss _past_ events
 1337          *    when calling f_op->poll().  This barrier also
 1338          *    pairs with the barrier in wq_has_sleeper (see
 1339          *    comments for wq_has_sleeper).
 1340          *
 1341          * This barrier will now guarantee ep_poll_callback or f_op->poll
 1342          * (or both) will notice the readiness of an item.
 1343          */
 1344         smp_mb();
 1345 
 1346         /*
 1347          * Get current event bits. We can safely use the file* here because
 1348          * its usage count has been increased by the caller of this function.
 1349          */
 1350         revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt);
 1351 
 1352         /*
 1353          * If the item is "hot" and it is not registered inside the ready
 1354          * list, push it inside.
 1355          */
 1356         if (revents & event->events) {
 1357                 spin_lock_irq(&ep->lock);
 1358                 if (!ep_is_linked(&epi->rdllink)) {
 1359                         list_add_tail(&epi->rdllink, &ep->rdllist);
 1360                         __pm_stay_awake(epi->ws);
 1361 
 1362                         /* Notify waiting tasks that events are available */
 1363                         if (waitqueue_active(&ep->wq))
 1364                                 wake_up_locked(&ep->wq);
 1365                         if (waitqueue_active(&ep->poll_wait))
 1366                                 pwake++;
 1367                 }
 1368                 spin_unlock_irq(&ep->lock);
 1369         }
 1370 
 1371         /* We have to call this outside the lock */
 1372         if (pwake)
 1373                 ep_poll_safewake(&ep->poll_wait);
 1374 
 1375         return 0;
 1376 }
 1377 
 1378 static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
 1379                                void *priv)
 1380 {
 1381         struct ep_send_events_data *esed = priv;
 1382         int eventcnt;
 1383         unsigned int revents;
 1384         struct epitem *epi;
 1385         struct epoll_event __user *uevent;
 1386         poll_table pt;
 1387 
 1388         init_poll_funcptr(&pt, NULL);
 1389 
 1390         /*
 1391          * We can loop without lock because we are passed a task private list.
 1392          * Items cannot vanish during the loop because ep_scan_ready_list() is
 1393          * holding "mtx" during this call.
 1394          */
 1395         for (eventcnt = 0, uevent = esed->events;
 1396              !list_empty(head) && eventcnt < esed->maxevents;) {
 1397                 epi = list_first_entry(head, struct epitem, rdllink);
 1398 
 1399                 /*
 1400                  * Activate ep->ws before deactivating epi->ws to prevent
 1401                  * triggering auto-suspend here (in case we reactive epi->ws
 1402                  * below).
 1403                  *
 1404                  * This could be rearranged to delay the deactivation of epi->ws
 1405                  * instead, but then epi->ws would temporarily be out of sync
 1406                  * with ep_is_linked().
 1407                  */
 1408                 if (epi->ws && epi->ws->active)
 1409                         __pm_stay_awake(ep->ws);
 1410                 __pm_relax(epi->ws);
 1411                 list_del_init(&epi->rdllink);
 1412 
 1413                 pt._key = epi->event.events;
 1414                 revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
 1415                         epi->event.events;
 1416 
 1417                 /*
 1418                  * If the event mask intersect the caller-requested one,
 1419                  * deliver the event to userspace. Again, ep_scan_ready_list()
 1420                  * is holding "mtx", so no operations coming from userspace
 1421                  * can change the item.
 1422                  */
 1423                 if (revents) {
 1424                         if (__put_user(revents, &uevent->events) ||
 1425                             __put_user(epi->event.data, &uevent->data)) {
 1426                                 list_add(&epi->rdllink, head);
 1427                                 __pm_stay_awake(epi->ws);
 1428                                 return eventcnt ? eventcnt : -EFAULT;
 1429                         }
 1430                         eventcnt++;
 1431                         uevent++;
 1432                         if (epi->event.events & EPOLLONESHOT)
 1433                                 epi->event.events &= EP_PRIVATE_BITS;
 1434                         else if (!(epi->event.events & EPOLLET)) {
 1435                                 /*
 1436                                  * If this file has been added with Level
 1437                                  * Trigger mode, we need to insert back inside
 1438                                  * the ready list, so that the next call to
 1439                                  * epoll_wait() will check again the events
 1440                                  * availability. At this point, no one can insert
 1441                                  * into ep->rdllist besides us. The epoll_ctl()
 1442                                  * callers are locked out by
 1443                                  * ep_scan_ready_list() holding "mtx" and the
 1444                                  * poll callback will queue them in ep->ovflist.
 1445                                  */
 1446                                 list_add_tail(&epi->rdllink, &ep->rdllist);
 1447                                 __pm_stay_awake(epi->ws);
 1448                         }
 1449                 }
 1450         }
 1451 
 1452         return eventcnt;
 1453 }
 1454 
 1455 static int ep_send_events(struct eventpoll *ep,
 1456                           struct epoll_event __user *events, int maxevents)
 1457 {
 1458         struct ep_send_events_data esed;
 1459 
 1460         esed.maxevents = maxevents;
 1461         esed.events = events;
 1462 
 1463         return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
 1464 }
 1465 
 1466 static inline struct timespec ep_set_mstimeout(long ms)
 1467 {
 1468         struct timespec now, ts = {
 1469                 .tv_sec = ms / MSEC_PER_SEC,
 1470                 .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
 1471         };
 1472 
 1473         ktime_get_ts(&now);
 1474         return timespec_add_safe(now, ts);
 1475 }
 1476 
 1477 /**
 1478  * ep_poll - Retrieves ready events, and delivers them to the caller supplied
 1479  *           event buffer.
 1480  *
 1481  * @ep: Pointer to the eventpoll context.
 1482  * @events: Pointer to the userspace buffer where the ready events should be
 1483  *          stored.
 1484  * @maxevents: Size (in terms of number of events) of the caller event buffer.
 1485  * @timeout: Maximum timeout for the ready events fetch operation, in
 1486  *           milliseconds. If the @timeout is zero, the function will not block,
 1487  *           while if the @timeout is less than zero, the function will block
 1488  *           until at least one event has been retrieved (or an error
 1489  *           occurred).
 1490  *
 1491  * Returns: Returns the number of ready events which have been fetched, or an
 1492  *          error code, in case of error.
 1493  */
 1494 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 1495                    int maxevents, long timeout)
 1496 {
 1497         int res = 0, eavail, timed_out = 0;
 1498         unsigned long flags;
 1499         long slack = 0;
 1500         wait_queue_t wait;
 1501         ktime_t expires, *to = NULL;
 1502 
 1503         if (timeout > 0) {
 1504                 struct timespec end_time = ep_set_mstimeout(timeout);
 1505 
 1506                 slack = select_estimate_accuracy(&end_time);
 1507                 to = &expires;
 1508                 *to = timespec_to_ktime(end_time);
 1509         } else if (timeout == 0) {
 1510                 /*
 1511                  * Avoid the unnecessary trip to the wait queue loop, if the
 1512                  * caller specified a non blocking operation.
 1513                  */
 1514                 timed_out = 1;
 1515                 spin_lock_irqsave(&ep->lock, flags);
 1516                 goto check_events;
 1517         }
 1518 
 1519 fetch_events:
 1520         spin_lock_irqsave(&ep->lock, flags);
 1521 
 1522         if (!ep_events_available(ep)) {
 1523                 /*
 1524                  * We don't have any available event to return to the caller.
 1525                  * We need to sleep here, and we will be wake up by
 1526                  * ep_poll_callback() when events will become available.
 1527                  */
 1528                 init_waitqueue_entry(&wait, current);
 1529                 __add_wait_queue_exclusive(&ep->wq, &wait);
 1530 
 1531                 for (;;) {
 1532                         /*
 1533                          * We don't want to sleep if the ep_poll_callback() sends us
 1534                          * a wakeup in between. That's why we set the task state
 1535                          * to TASK_INTERRUPTIBLE before doing the checks.
 1536                          */
 1537                         set_current_state(TASK_INTERRUPTIBLE);
 1538                         if (ep_events_available(ep) || timed_out)
 1539                                 break;
 1540                         if (signal_pending(current)) {
 1541                                 res = -EINTR;
 1542                                 break;
 1543                         }
 1544 
 1545                         spin_unlock_irqrestore(&ep->lock, flags);
 1546                         if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
 1547                                 timed_out = 1;
 1548 
 1549                         spin_lock_irqsave(&ep->lock, flags);
 1550                 }
 1551                 __remove_wait_queue(&ep->wq, &wait);
 1552 
 1553                 set_current_state(TASK_RUNNING);
 1554         }
 1555 check_events:
 1556         /* Is it worth to try to dig for events ? */
 1557         eavail = ep_events_available(ep);
 1558 
 1559         spin_unlock_irqrestore(&ep->lock, flags);
 1560 
 1561         /*
 1562          * Try to transfer events to user space. In case we get 0 events and
 1563          * there's still timeout left over, we go trying again in search of
 1564          * more luck.
 1565          */
 1566         if (!res && eavail &&
 1567             !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
 1568                 goto fetch_events;
 1569 
 1570         return res;
 1571 }
 1572 
 1573 /**
 1574  * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
 1575  *                      API, to verify that adding an epoll file inside another
 1576  *                      epoll structure, does not violate the constraints, in
 1577  *                      terms of closed loops, or too deep chains (which can
 1578  *                      result in excessive stack usage).
 1579  *
 1580  * @priv: Pointer to the epoll file to be currently checked.
 1581  * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
 1582  *          data structure pointer.
 1583  * @call_nests: Current dept of the @ep_call_nested() call stack.
 1584  *
 1585  * Returns: Returns zero if adding the epoll @file inside current epoll
 1586  *          structure @ep does not violate the constraints, or -1 otherwise.
 1587  */
 1588 static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
 1589 {
 1590         int error = 0;
 1591         struct file *file = priv;
 1592         struct eventpoll *ep = file->private_data;
 1593         struct eventpoll *ep_tovisit;
 1594         struct rb_node *rbp;
 1595         struct epitem *epi;
 1596 
 1597         mutex_lock_nested(&ep->mtx, call_nests + 1);
 1598         ep->visited = 1;
 1599         list_add(&ep->visited_list_link, &visited_list);
 1600         for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
 1601                 epi = rb_entry(rbp, struct epitem, rbn);
 1602                 if (unlikely(is_file_epoll(epi->ffd.file))) {
 1603                         ep_tovisit = epi->ffd.file->private_data;
 1604                         if (ep_tovisit->visited)
 1605                                 continue;
 1606                         error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
 1607                                         ep_loop_check_proc, epi->ffd.file,
 1608                                         ep_tovisit, current);
 1609                         if (error != 0)
 1610                                 break;
 1611                 } else {
 1612                         /*
 1613                          * If we've reached a file that is not associated with
 1614                          * an ep, then we need to check if the newly added
 1615                          * links are going to add too many wakeup paths. We do
 1616                          * this by adding it to the tfile_check_list, if it's
 1617                          * not already there, and calling reverse_path_check()
 1618                          * during ep_insert().
 1619                          */
 1620                         if (list_empty(&epi->ffd.file->f_tfile_llink))
 1621                                 list_add(&epi->ffd.file->f_tfile_llink,
 1622                                          &tfile_check_list);
 1623                 }
 1624         }
 1625         mutex_unlock(&ep->mtx);
 1626 
 1627         return error;
 1628 }
 1629 
 1630 /**
 1631  * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
 1632  *                 another epoll file (represented by @ep) does not create
 1633  *                 closed loops or too deep chains.
 1634  *
 1635  * @ep: Pointer to the epoll private data structure.
 1636  * @file: Pointer to the epoll file to be checked.
 1637  *
 1638  * Returns: Returns zero if adding the epoll @file inside current epoll
 1639  *          structure @ep does not violate the constraints, or -1 otherwise.
 1640  */
 1641 static int ep_loop_check(struct eventpoll *ep, struct file *file)
 1642 {
 1643         int ret;
 1644         struct eventpoll *ep_cur, *ep_next;
 1645 
 1646         ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
 1647                               ep_loop_check_proc, file, ep, current);
 1648         /* clear visited list */
 1649         list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
 1650                                                         visited_list_link) {
 1651                 ep_cur->visited = 0;
 1652                 list_del(&ep_cur->visited_list_link);
 1653         }
 1654         return ret;
 1655 }
 1656 
 1657 static void clear_tfile_check_list(void)
 1658 {
 1659         struct file *file;
 1660 
 1661         /* first clear the tfile_check_list */
 1662         while (!list_empty(&tfile_check_list)) {
 1663                 file = list_first_entry(&tfile_check_list, struct file,
 1664                                         f_tfile_llink);
 1665                 list_del_init(&file->f_tfile_llink);
 1666         }
 1667         INIT_LIST_HEAD(&tfile_check_list);
 1668 }
 1669 
 1670 /*
 1671  * Open an eventpoll file descriptor.
 1672  */
 1673 SYSCALL_DEFINE1(epoll_create1, int, flags)
 1674 {
 1675         int error, fd;
 1676         struct eventpoll *ep = NULL;
 1677         struct file *file;
 1678 
 1679         /* Check the EPOLL_* constant for consistency.  */
 1680         BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
 1681 
 1682         if (flags & ~EPOLL_CLOEXEC)
 1683                 return -EINVAL;
 1684         /*
 1685          * Create the internal data structure ("struct eventpoll").
 1686          */
 1687         error = ep_alloc(&ep);
 1688         if (error < 0)
 1689                 return error;
 1690         /*
 1691          * Creates all the items needed to setup an eventpoll file. That is,
 1692          * a file structure and a free file descriptor.
 1693          */
 1694         fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
 1695         if (fd < 0) {
 1696                 error = fd;
 1697                 goto out_free_ep;
 1698         }
 1699         file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
 1700                                  O_RDWR | (flags & O_CLOEXEC));
 1701         if (IS_ERR(file)) {
 1702                 error = PTR_ERR(file);
 1703                 goto out_free_fd;
 1704         }
 1705         ep->file = file;
 1706         fd_install(fd, file);
 1707         return fd;
 1708 
 1709 out_free_fd:
 1710         put_unused_fd(fd);
 1711 out_free_ep:
 1712         ep_free(ep);
 1713         return error;
 1714 }
 1715 
 1716 SYSCALL_DEFINE1(epoll_create, int, size)
 1717 {
 1718         if (size <= 0)
 1719                 return -EINVAL;
 1720 
 1721         return sys_epoll_create1(0);
 1722 }
 1723 
 1724 /*
 1725  * The following function implements the controller interface for
 1726  * the eventpoll file that enables the insertion/removal/change of
 1727  * file descriptors inside the interest set.
 1728  */
 1729 SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
 1730                 struct epoll_event __user *, event)
 1731 {
 1732         int error;
 1733         int did_lock_epmutex = 0;
 1734         struct file *file, *tfile;
 1735         struct eventpoll *ep;
 1736         struct epitem *epi;
 1737         struct epoll_event epds;
 1738 
 1739         error = -EFAULT;
 1740         if (ep_op_has_event(op) &&
 1741             copy_from_user(&epds, event, sizeof(struct epoll_event)))
 1742                 goto error_return;
 1743 
 1744         /* Get the "struct file *" for the eventpoll file */
 1745         error = -EBADF;
 1746         file = fget(epfd);
 1747         if (!file)
 1748                 goto error_return;
 1749 
 1750         /* Get the "struct file *" for the target file */
 1751         tfile = fget(fd);
 1752         if (!tfile)
 1753                 goto error_fput;
 1754 
 1755         /* The target file descriptor must support poll */
 1756         error = -EPERM;
 1757         if (!tfile->f_op || !tfile->f_op->poll)
 1758                 goto error_tgt_fput;
 1759 
 1760         /* Check if EPOLLWAKEUP is allowed */
 1761         if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
 1762                 epds.events &= ~EPOLLWAKEUP;
 1763 
 1764         /*
 1765          * We have to check that the file structure underneath the file descriptor
 1766          * the user passed to us _is_ an eventpoll file. And also we do not permit
 1767          * adding an epoll file descriptor inside itself.
 1768          */
 1769         error = -EINVAL;
 1770         if (file == tfile || !is_file_epoll(file))
 1771                 goto error_tgt_fput;
 1772 
 1773         /*
 1774          * At this point it is safe to assume that the "private_data" contains
 1775          * our own data structure.
 1776          */
 1777         ep = file->private_data;
 1778 
 1779         /*
 1780          * When we insert an epoll file descriptor, inside another epoll file
 1781          * descriptor, there is the change of creating closed loops, which are
 1782          * better be handled here, than in more critical paths. While we are
 1783          * checking for loops we also determine the list of files reachable
 1784          * and hang them on the tfile_check_list, so we can check that we
 1785          * haven't created too many possible wakeup paths.
 1786          *
 1787          * We need to hold the epmutex across both ep_insert and ep_remove
 1788          * b/c we want to make sure we are looking at a coherent view of
 1789          * epoll network.
 1790          */
 1791         if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
 1792                 mutex_lock(&epmutex);
 1793                 did_lock_epmutex = 1;
 1794         }
 1795         if (op == EPOLL_CTL_ADD) {
 1796                 if (is_file_epoll(tfile)) {
 1797                         error = -ELOOP;
 1798                         if (ep_loop_check(ep, tfile) != 0) {
 1799                                 clear_tfile_check_list();
 1800                                 goto error_tgt_fput;
 1801                         }
 1802                 } else
 1803                         list_add(&tfile->f_tfile_llink, &tfile_check_list);
 1804         }
 1805 
 1806         mutex_lock_nested(&ep->mtx, 0);
 1807 
 1808         /*
 1809          * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
 1810          * above, we can be sure to be able to use the item looked up by
 1811          * ep_find() till we release the mutex.
 1812          */
 1813         epi = ep_find(ep, tfile, fd);
 1814 
 1815         error = -EINVAL;
 1816         switch (op) {
 1817         case EPOLL_CTL_ADD:
 1818                 if (!epi) {
 1819                         epds.events |= POLLERR | POLLHUP;
 1820                         error = ep_insert(ep, &epds, tfile, fd);
 1821                 } else
 1822                         error = -EEXIST;
 1823                 clear_tfile_check_list();
 1824                 break;
 1825         case EPOLL_CTL_DEL:
 1826                 if (epi)
 1827                         error = ep_remove(ep, epi);
 1828                 else
 1829                         error = -ENOENT;
 1830                 break;
 1831         case EPOLL_CTL_MOD:
 1832                 if (epi) {
 1833                         epds.events |= POLLERR | POLLHUP;
 1834                         error = ep_modify(ep, epi, &epds);
 1835                 } else
 1836                         error = -ENOENT;
 1837                 break;
 1838         }
 1839         mutex_unlock(&ep->mtx);
 1840 
 1841 error_tgt_fput:
 1842         if (did_lock_epmutex)
 1843                 mutex_unlock(&epmutex);
 1844 
 1845         fput(tfile);
 1846 error_fput:
 1847         fput(file);
 1848 error_return:
 1849 
 1850         return error;
 1851 }
 1852 
 1853 /*
 1854  * Implement the event wait interface for the eventpoll file. It is the kernel
 1855  * part of the user space epoll_wait(2).
 1856  */
 1857 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
 1858                 int, maxevents, int, timeout)
 1859 {
 1860         int error;
 1861         struct fd f;
 1862         struct eventpoll *ep;
 1863 
 1864         /* The maximum number of event must be greater than zero */
 1865         if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
 1866                 return -EINVAL;
 1867 
 1868         /* Verify that the area passed by the user is writeable */
 1869         if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event)))
 1870                 return -EFAULT;
 1871 
 1872         /* Get the "struct file *" for the eventpoll file */
 1873         f = fdget(epfd);
 1874         if (!f.file)
 1875                 return -EBADF;
 1876 
 1877         /*
 1878          * We have to check that the file structure underneath the fd
 1879          * the user passed to us _is_ an eventpoll file.
 1880          */
 1881         error = -EINVAL;
 1882         if (!is_file_epoll(f.file))
 1883                 goto error_fput;
 1884 
 1885         /*
 1886          * At this point it is safe to assume that the "private_data" contains
 1887          * our own data structure.
 1888          */
 1889         ep = f.file->private_data;
 1890 
 1891         /* Time to fish for events ... */
 1892         error = ep_poll(ep, events, maxevents, timeout);
 1893 
 1894 error_fput:
 1895         fdput(f);
 1896         return error;
 1897 }
 1898 
 1899 /*
 1900  * Implement the event wait interface for the eventpoll file. It is the kernel
 1901  * part of the user space epoll_pwait(2).
 1902  */
 1903 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
 1904                 int, maxevents, int, timeout, const sigset_t __user *, sigmask,
 1905                 size_t, sigsetsize)
 1906 {
 1907         int error;
 1908         sigset_t ksigmask, sigsaved;
 1909 
 1910         /*
 1911          * If the caller wants a certain signal mask to be set during the wait,
 1912          * we apply it here.
 1913          */
 1914         if (sigmask) {
 1915                 if (sigsetsize != sizeof(sigset_t))
 1916                         return -EINVAL;
 1917                 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
 1918                         return -EFAULT;
 1919                 sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
 1920                 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
 1921         }
 1922 
 1923         error = sys_epoll_wait(epfd, events, maxevents, timeout);
 1924 
 1925         /*
 1926          * If we changed the signal mask, we need to restore the original one.
 1927          * In case we've got a signal while waiting, we do not restore the
 1928          * signal mask yet, and we allow do_signal() to deliver the signal on
 1929          * the way back to userspace, before the signal mask is restored.
 1930          */
 1931         if (sigmask) {
 1932                 if (error == -EINTR) {
 1933                         memcpy(&current->saved_sigmask, &sigsaved,
 1934                                sizeof(sigsaved));
 1935                         set_restore_sigmask();
 1936                 } else
 1937                         sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 1938         }
 1939 
 1940         return error;
 1941 }
 1942 
 1943 static int __init eventpoll_init(void)
 1944 {
 1945         struct sysinfo si;
 1946 
 1947         si_meminfo(&si);
 1948         /*
 1949          * Allows top 4% of lomem to be allocated for epoll watches (per user).
 1950          */
 1951         max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
 1952                 EP_ITEM_COST;
 1953         BUG_ON(max_user_watches < 0);
 1954 
 1955         /*
 1956          * Initialize the structure used to perform epoll file descriptor
 1957          * inclusion loops checks.
 1958          */
 1959         ep_nested_calls_init(&poll_loop_ncalls);
 1960 
 1961         /* Initialize the structure used to perform safe poll wait head wake ups */
 1962         ep_nested_calls_init(&poll_safewake_ncalls);
 1963 
 1964         /* Initialize the structure used to perform file's f_op->poll() calls */
 1965         ep_nested_calls_init(&poll_readywalk_ncalls);
 1966 
 1967         /* Allocates slab cache used to allocate "struct epitem" items */
 1968         epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
 1969                         0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 1970 
 1971         /* Allocates slab cache used to allocate "struct eppoll_entry" */
 1972         pwq_cache = kmem_cache_create("eventpoll_pwq",
 1973                         sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
 1974 
 1975         return 0;
 1976 }
 1977 fs_initcall(eventpoll_init);

Cache object: 3f620c324cd5368edea9f87507f42760


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.