The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
    4  * Copyright (c) 2009 Apple, Inc.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_event.c 303216 2016-07-23 08:23:57Z kib $");
   31 
   32 #include "opt_ktrace.h"
   33 #include "opt_kqueue.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/capsicum.h>
   38 #include <sys/kernel.h>
   39 #include <sys/lock.h>
   40 #include <sys/mutex.h>
   41 #include <sys/rwlock.h>
   42 #include <sys/proc.h>
   43 #include <sys/malloc.h>
   44 #include <sys/unistd.h>
   45 #include <sys/file.h>
   46 #include <sys/filedesc.h>
   47 #include <sys/filio.h>
   48 #include <sys/fcntl.h>
   49 #include <sys/kthread.h>
   50 #include <sys/selinfo.h>
   51 #include <sys/stdatomic.h>
   52 #include <sys/queue.h>
   53 #include <sys/event.h>
   54 #include <sys/eventvar.h>
   55 #include <sys/poll.h>
   56 #include <sys/protosw.h>
   57 #include <sys/resourcevar.h>
   58 #include <sys/sigio.h>
   59 #include <sys/signalvar.h>
   60 #include <sys/socket.h>
   61 #include <sys/socketvar.h>
   62 #include <sys/stat.h>
   63 #include <sys/sysctl.h>
   64 #include <sys/sysproto.h>
   65 #include <sys/syscallsubr.h>
   66 #include <sys/taskqueue.h>
   67 #include <sys/uio.h>
   68 #include <sys/user.h>
   69 #ifdef KTRACE
   70 #include <sys/ktrace.h>
   71 #endif
   72 
   73 #include <vm/uma.h>
   74 
   75 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
   76 
   77 /*
   78  * This lock is used if multiple kq locks are required.  This possibly
   79  * should be made into a per proc lock.
   80  */
   81 static struct mtx       kq_global;
   82 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
   83 #define KQ_GLOBAL_LOCK(lck, haslck)     do {    \
   84         if (!haslck)                            \
   85                 mtx_lock(lck);                  \
   86         haslck = 1;                             \
   87 } while (0)
   88 #define KQ_GLOBAL_UNLOCK(lck, haslck)   do {    \
   89         if (haslck)                             \
   90                 mtx_unlock(lck);                        \
   91         haslck = 0;                             \
   92 } while (0)
   93 
   94 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
   95 
   96 static int      kevent_copyout(void *arg, struct kevent *kevp, int count);
   97 static int      kevent_copyin(void *arg, struct kevent *kevp, int count);
   98 static int      kqueue_register(struct kqueue *kq, struct kevent *kev,
   99                     struct thread *td, int waitok);
  100 static int      kqueue_acquire(struct file *fp, struct kqueue **kqp);
  101 static void     kqueue_release(struct kqueue *kq, int locked);
  102 static void     kqueue_destroy(struct kqueue *kq);
  103 static void     kqueue_drain(struct kqueue *kq, struct thread *td);
  104 static int      kqueue_expand(struct kqueue *kq, struct filterops *fops,
  105                     uintptr_t ident, int waitok);
  106 static void     kqueue_task(void *arg, int pending);
  107 static int      kqueue_scan(struct kqueue *kq, int maxevents,
  108                     struct kevent_copyops *k_ops,
  109                     const struct timespec *timeout,
  110                     struct kevent *keva, struct thread *td);
  111 static void     kqueue_wakeup(struct kqueue *kq);
  112 static struct filterops *kqueue_fo_find(int filt);
  113 static void     kqueue_fo_release(int filt);
  114 
  115 static fo_ioctl_t       kqueue_ioctl;
  116 static fo_poll_t        kqueue_poll;
  117 static fo_kqfilter_t    kqueue_kqfilter;
  118 static fo_stat_t        kqueue_stat;
  119 static fo_close_t       kqueue_close;
  120 static fo_fill_kinfo_t  kqueue_fill_kinfo;
  121 
  122 static struct fileops kqueueops = {
  123         .fo_read = invfo_rdwr,
  124         .fo_write = invfo_rdwr,
  125         .fo_truncate = invfo_truncate,
  126         .fo_ioctl = kqueue_ioctl,
  127         .fo_poll = kqueue_poll,
  128         .fo_kqfilter = kqueue_kqfilter,
  129         .fo_stat = kqueue_stat,
  130         .fo_close = kqueue_close,
  131         .fo_chmod = invfo_chmod,
  132         .fo_chown = invfo_chown,
  133         .fo_sendfile = invfo_sendfile,
  134         .fo_fill_kinfo = kqueue_fill_kinfo,
  135 };
  136 
  137 static int      knote_attach(struct knote *kn, struct kqueue *kq);
  138 static void     knote_drop(struct knote *kn, struct thread *td);
  139 static void     knote_enqueue(struct knote *kn);
  140 static void     knote_dequeue(struct knote *kn);
  141 static void     knote_init(void);
  142 static struct   knote *knote_alloc(int waitok);
  143 static void     knote_free(struct knote *kn);
  144 
  145 static void     filt_kqdetach(struct knote *kn);
  146 static int      filt_kqueue(struct knote *kn, long hint);
  147 static int      filt_procattach(struct knote *kn);
  148 static void     filt_procdetach(struct knote *kn);
  149 static int      filt_proc(struct knote *kn, long hint);
  150 static int      filt_fileattach(struct knote *kn);
  151 static void     filt_timerexpire(void *knx);
  152 static int      filt_timerattach(struct knote *kn);
  153 static void     filt_timerdetach(struct knote *kn);
  154 static int      filt_timer(struct knote *kn, long hint);
  155 static int      filt_userattach(struct knote *kn);
  156 static void     filt_userdetach(struct knote *kn);
  157 static int      filt_user(struct knote *kn, long hint);
  158 static void     filt_usertouch(struct knote *kn, struct kevent *kev,
  159                     u_long type);
  160 
  161 static struct filterops file_filtops = {
  162         .f_isfd = 1,
  163         .f_attach = filt_fileattach,
  164 };
  165 static struct filterops kqread_filtops = {
  166         .f_isfd = 1,
  167         .f_detach = filt_kqdetach,
  168         .f_event = filt_kqueue,
  169 };
  170 /* XXX - move to kern_proc.c?  */
  171 static struct filterops proc_filtops = {
  172         .f_isfd = 0,
  173         .f_attach = filt_procattach,
  174         .f_detach = filt_procdetach,
  175         .f_event = filt_proc,
  176 };
  177 static struct filterops timer_filtops = {
  178         .f_isfd = 0,
  179         .f_attach = filt_timerattach,
  180         .f_detach = filt_timerdetach,
  181         .f_event = filt_timer,
  182 };
  183 static struct filterops user_filtops = {
  184         .f_attach = filt_userattach,
  185         .f_detach = filt_userdetach,
  186         .f_event = filt_user,
  187         .f_touch = filt_usertouch,
  188 };
  189 
  190 static uma_zone_t       knote_zone;
  191 static atomic_uint      kq_ncallouts = ATOMIC_VAR_INIT(0);
  192 static unsigned int     kq_calloutmax = 4 * 1024;
  193 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
  194     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
  195 
  196 /* XXX - ensure not KN_INFLUX?? */
  197 #define KNOTE_ACTIVATE(kn, islock) do {                                 \
  198         if ((islock))                                                   \
  199                 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);            \
  200         else                                                            \
  201                 KQ_LOCK((kn)->kn_kq);                                   \
  202         (kn)->kn_status |= KN_ACTIVE;                                   \
  203         if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)         \
  204                 knote_enqueue((kn));                                    \
  205         if (!(islock))                                                  \
  206                 KQ_UNLOCK((kn)->kn_kq);                                 \
  207 } while(0)
  208 #define KQ_LOCK(kq) do {                                                \
  209         mtx_lock(&(kq)->kq_lock);                                       \
  210 } while (0)
  211 #define KQ_FLUX_WAKEUP(kq) do {                                         \
  212         if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {            \
  213                 (kq)->kq_state &= ~KQ_FLUXWAIT;                         \
  214                 wakeup((kq));                                           \
  215         }                                                               \
  216 } while (0)
  217 #define KQ_UNLOCK_FLUX(kq) do {                                         \
  218         KQ_FLUX_WAKEUP(kq);                                             \
  219         mtx_unlock(&(kq)->kq_lock);                                     \
  220 } while (0)
  221 #define KQ_UNLOCK(kq) do {                                              \
  222         mtx_unlock(&(kq)->kq_lock);                                     \
  223 } while (0)
  224 #define KQ_OWNED(kq) do {                                               \
  225         mtx_assert(&(kq)->kq_lock, MA_OWNED);                           \
  226 } while (0)
  227 #define KQ_NOTOWNED(kq) do {                                            \
  228         mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);                        \
  229 } while (0)
  230 
  231 static struct knlist *
  232 kn_list_lock(struct knote *kn)
  233 {
  234         struct knlist *knl;
  235 
  236         knl = kn->kn_knlist;
  237         if (knl != NULL)
  238                 knl->kl_lock(knl->kl_lockarg);
  239         return (knl);
  240 }
  241 
  242 static void
  243 kn_list_unlock(struct knlist *knl)
  244 {
  245         bool do_free;
  246 
  247         if (knl == NULL)
  248                 return;
  249         do_free = knl->kl_autodestroy && knlist_empty(knl);
  250         knl->kl_unlock(knl->kl_lockarg);
  251         if (do_free) {
  252                 knlist_destroy(knl);
  253                 free(knl, M_KQUEUE);
  254         }
  255 }
  256 
  257 #define KNL_ASSERT_LOCK(knl, islocked) do {                             \
  258         if (islocked)                                                   \
  259                 KNL_ASSERT_LOCKED(knl);                         \
  260         else                                                            \
  261                 KNL_ASSERT_UNLOCKED(knl);                               \
  262 } while (0)
  263 #ifdef INVARIANTS
  264 #define KNL_ASSERT_LOCKED(knl) do {                                     \
  265         knl->kl_assert_locked((knl)->kl_lockarg);                       \
  266 } while (0)
  267 #define KNL_ASSERT_UNLOCKED(knl) do {                                   \
  268         knl->kl_assert_unlocked((knl)->kl_lockarg);                     \
  269 } while (0)
  270 #else /* !INVARIANTS */
  271 #define KNL_ASSERT_LOCKED(knl) do {} while(0)
  272 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
  273 #endif /* INVARIANTS */
  274 
  275 #ifndef KN_HASHSIZE
  276 #define KN_HASHSIZE             64              /* XXX should be tunable */
  277 #endif
  278 
  279 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  280 
  281 static int
  282 filt_nullattach(struct knote *kn)
  283 {
  284 
  285         return (ENXIO);
  286 };
  287 
  288 struct filterops null_filtops = {
  289         .f_isfd = 0,
  290         .f_attach = filt_nullattach,
  291 };
  292 
  293 /* XXX - make SYSINIT to add these, and move into respective modules. */
  294 extern struct filterops sig_filtops;
  295 extern struct filterops fs_filtops;
  296 
  297 /*
  298  * Table for for all system-defined filters.
  299  */
  300 static struct mtx       filterops_lock;
  301 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
  302         MTX_DEF);
  303 static struct {
  304         struct filterops *for_fop;
  305         int for_nolock;
  306         int for_refcnt;
  307 } sysfilt_ops[EVFILT_SYSCOUNT] = {
  308         { &file_filtops, 1 },                   /* EVFILT_READ */
  309         { &file_filtops, 1 },                   /* EVFILT_WRITE */
  310         { &null_filtops },                      /* EVFILT_AIO */
  311         { &file_filtops, 1 },                   /* EVFILT_VNODE */
  312         { &proc_filtops, 1 },                   /* EVFILT_PROC */
  313         { &sig_filtops, 1 },                    /* EVFILT_SIGNAL */
  314         { &timer_filtops, 1 },                  /* EVFILT_TIMER */
  315         { &file_filtops, 1 },                   /* EVFILT_PROCDESC */
  316         { &fs_filtops, 1 },                     /* EVFILT_FS */
  317         { &null_filtops },                      /* EVFILT_LIO */
  318         { &user_filtops, 1 },                   /* EVFILT_USER */
  319         { &null_filtops },                      /* EVFILT_SENDFILE */
  320 };
  321 
  322 /*
  323  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
  324  * method.
  325  */
  326 static int
  327 filt_fileattach(struct knote *kn)
  328 {
  329 
  330         return (fo_kqfilter(kn->kn_fp, kn));
  331 }
  332 
  333 /*ARGSUSED*/
  334 static int
  335 kqueue_kqfilter(struct file *fp, struct knote *kn)
  336 {
  337         struct kqueue *kq = kn->kn_fp->f_data;
  338 
  339         if (kn->kn_filter != EVFILT_READ)
  340                 return (EINVAL);
  341 
  342         kn->kn_status |= KN_KQUEUE;
  343         kn->kn_fop = &kqread_filtops;
  344         knlist_add(&kq->kq_sel.si_note, kn, 0);
  345 
  346         return (0);
  347 }
  348 
  349 static void
  350 filt_kqdetach(struct knote *kn)
  351 {
  352         struct kqueue *kq = kn->kn_fp->f_data;
  353 
  354         knlist_remove(&kq->kq_sel.si_note, kn, 0);
  355 }
  356 
  357 /*ARGSUSED*/
  358 static int
  359 filt_kqueue(struct knote *kn, long hint)
  360 {
  361         struct kqueue *kq = kn->kn_fp->f_data;
  362 
  363         kn->kn_data = kq->kq_count;
  364         return (kn->kn_data > 0);
  365 }
  366 
  367 /* XXX - move to kern_proc.c?  */
  368 static int
  369 filt_procattach(struct knote *kn)
  370 {
  371         struct proc *p;
  372         int error;
  373         bool exiting, immediate;
  374 
  375         exiting = immediate = false;
  376         p = pfind(kn->kn_id);
  377         if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
  378                 p = zpfind(kn->kn_id);
  379                 exiting = true;
  380         } else if (p != NULL && (p->p_flag & P_WEXIT)) {
  381                 exiting = true;
  382         }
  383 
  384         if (p == NULL)
  385                 return (ESRCH);
  386         if ((error = p_cansee(curthread, p))) {
  387                 PROC_UNLOCK(p);
  388                 return (error);
  389         }
  390 
  391         kn->kn_ptr.p_proc = p;
  392         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  393 
  394         /*
  395          * Internal flag indicating registration done by kernel for the
  396          * purposes of getting a NOTE_CHILD notification.
  397          */
  398         if (kn->kn_flags & EV_FLAG2) {
  399                 kn->kn_flags &= ~EV_FLAG2;
  400                 kn->kn_data = kn->kn_sdata;             /* ppid */
  401                 kn->kn_fflags = NOTE_CHILD;
  402                 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
  403                 immediate = true; /* Force immediate activation of child note. */
  404         }
  405         /*
  406          * Internal flag indicating registration done by kernel (for other than
  407          * NOTE_CHILD).
  408          */
  409         if (kn->kn_flags & EV_FLAG1) {
  410                 kn->kn_flags &= ~EV_FLAG1;
  411         }
  412 
  413         knlist_add(p->p_klist, kn, 1);
  414 
  415         /*
  416          * Immediately activate any child notes or, in the case of a zombie
  417          * target process, exit notes.  The latter is necessary to handle the
  418          * case where the target process, e.g. a child, dies before the kevent
  419          * is registered.
  420          */
  421         if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
  422                 KNOTE_ACTIVATE(kn, 0);
  423 
  424         PROC_UNLOCK(p);
  425 
  426         return (0);
  427 }
  428 
  429 /*
  430  * The knote may be attached to a different process, which may exit,
  431  * leaving nothing for the knote to be attached to.  So when the process
  432  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  433  * it will be deleted when read out.  However, as part of the knote deletion,
  434  * this routine is called, so a check is needed to avoid actually performing
  435  * a detach, because the original process does not exist any more.
  436  */
  437 /* XXX - move to kern_proc.c?  */
  438 static void
  439 filt_procdetach(struct knote *kn)
  440 {
  441 
  442         knlist_remove(kn->kn_knlist, kn, 0);
  443         kn->kn_ptr.p_proc = NULL;
  444 }
  445 
  446 /* XXX - move to kern_proc.c?  */
  447 static int
  448 filt_proc(struct knote *kn, long hint)
  449 {
  450         struct proc *p;
  451         u_int event;
  452 
  453         p = kn->kn_ptr.p_proc;
  454         if (p == NULL) /* already activated, from attach filter */
  455                 return (0);
  456 
  457         /* Mask off extra data. */
  458         event = (u_int)hint & NOTE_PCTRLMASK;
  459 
  460         /* If the user is interested in this event, record it. */
  461         if (kn->kn_sfflags & event)
  462                 kn->kn_fflags |= event;
  463 
  464         /* Process is gone, so flag the event as finished. */
  465         if (event == NOTE_EXIT) {
  466                 kn->kn_flags |= EV_EOF | EV_ONESHOT;
  467                 kn->kn_ptr.p_proc = NULL;
  468                 if (kn->kn_fflags & NOTE_EXIT)
  469                         kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
  470                 if (kn->kn_fflags == 0)
  471                         kn->kn_flags |= EV_DROP;
  472                 return (1);
  473         }
  474 
  475         return (kn->kn_fflags != 0);
  476 }
  477 
  478 /*
  479  * Called when the process forked. It mostly does the same as the
  480  * knote(), activating all knotes registered to be activated when the
  481  * process forked. Additionally, for each knote attached to the
  482  * parent, check whether user wants to track the new process. If so
  483  * attach a new knote to it, and immediately report an event with the
  484  * child's pid.
  485  */
  486 void
  487 knote_fork(struct knlist *list, int pid)
  488 {
  489         struct kqueue *kq;
  490         struct knote *kn;
  491         struct kevent kev;
  492         int error;
  493 
  494         if (list == NULL)
  495                 return;
  496         list->kl_lock(list->kl_lockarg);
  497 
  498         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
  499                 kq = kn->kn_kq;
  500                 KQ_LOCK(kq);
  501                 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) {
  502                         KQ_UNLOCK(kq);
  503                         continue;
  504                 }
  505 
  506                 /*
  507                  * The same as knote(), activate the event.
  508                  */
  509                 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
  510                         kn->kn_status |= KN_HASKQLOCK;
  511                         if (kn->kn_fop->f_event(kn, NOTE_FORK))
  512                                 KNOTE_ACTIVATE(kn, 1);
  513                         kn->kn_status &= ~KN_HASKQLOCK;
  514                         KQ_UNLOCK(kq);
  515                         continue;
  516                 }
  517 
  518                 /*
  519                  * The NOTE_TRACK case. In addition to the activation
  520                  * of the event, we need to register new events to
  521                  * track the child. Drop the locks in preparation for
  522                  * the call to kqueue_register().
  523                  */
  524                 kn->kn_status |= KN_INFLUX;
  525                 KQ_UNLOCK(kq);
  526                 list->kl_unlock(list->kl_lockarg);
  527 
  528                 /*
  529                  * Activate existing knote and register tracking knotes with
  530                  * new process.
  531                  *
  532                  * First register a knote to get just the child notice. This
  533                  * must be a separate note from a potential NOTE_EXIT
  534                  * notification since both NOTE_CHILD and NOTE_EXIT are defined
  535                  * to use the data field (in conflicting ways).
  536                  */
  537                 kev.ident = pid;
  538                 kev.filter = kn->kn_filter;
  539                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
  540                     EV_FLAG2;
  541                 kev.fflags = kn->kn_sfflags;
  542                 kev.data = kn->kn_id;           /* parent */
  543                 kev.udata = kn->kn_kevent.udata;/* preserve udata */
  544                 error = kqueue_register(kq, &kev, NULL, 0);
  545                 if (error)
  546                         kn->kn_fflags |= NOTE_TRACKERR;
  547 
  548                 /*
  549                  * Then register another knote to track other potential events
  550                  * from the new process.
  551                  */
  552                 kev.ident = pid;
  553                 kev.filter = kn->kn_filter;
  554                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  555                 kev.fflags = kn->kn_sfflags;
  556                 kev.data = kn->kn_id;           /* parent */
  557                 kev.udata = kn->kn_kevent.udata;/* preserve udata */
  558                 error = kqueue_register(kq, &kev, NULL, 0);
  559                 if (error)
  560                         kn->kn_fflags |= NOTE_TRACKERR;
  561                 if (kn->kn_fop->f_event(kn, NOTE_FORK))
  562                         KNOTE_ACTIVATE(kn, 0);
  563                 KQ_LOCK(kq);
  564                 kn->kn_status &= ~KN_INFLUX;
  565                 KQ_UNLOCK_FLUX(kq);
  566                 list->kl_lock(list->kl_lockarg);
  567         }
  568         list->kl_unlock(list->kl_lockarg);
  569 }
  570 
  571 /*
  572  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
  573  * interval timer support code.
  574  */
  575 
  576 #define NOTE_TIMER_PRECMASK     (NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \
  577                                 NOTE_NSECONDS)
  578 
  579 static sbintime_t
  580 timer2sbintime(intptr_t data, int flags)
  581 {
  582 
  583         /*
  584          * Macros for converting to the fractional second portion of an
  585          * sbintime_t using 64bit multiplication to improve precision.
  586          */
  587 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
  588 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
  589 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
  590         switch (flags & NOTE_TIMER_PRECMASK) {
  591         case NOTE_SECONDS:
  592 #ifdef __LP64__
  593                 if (data > (SBT_MAX / SBT_1S))
  594                         return SBT_MAX;
  595 #endif
  596                 return ((sbintime_t)data << 32);
  597         case NOTE_MSECONDS: /* FALLTHROUGH */
  598         case 0:
  599                 if (data >= 1000) {
  600                         int64_t secs = data / 1000;
  601 #ifdef __LP64__
  602                         if (secs > (SBT_MAX / SBT_1S))
  603                                 return SBT_MAX;
  604 #endif
  605                         return (secs << 32 | MS_TO_SBT(data % 1000));
  606                 }
  607                 return MS_TO_SBT(data);
  608         case NOTE_USECONDS:
  609                 if (data >= 1000000) {
  610                         int64_t secs = data / 1000000;
  611 #ifdef __LP64__
  612                         if (secs > (SBT_MAX / SBT_1S))
  613                                 return SBT_MAX;
  614 #endif
  615                         return (secs << 32 | US_TO_SBT(data % 1000000));
  616                 }
  617                 return US_TO_SBT(data);
  618         case NOTE_NSECONDS:
  619                 if (data >= 1000000000) {
  620                         int64_t secs = data / 1000000000;
  621 #ifdef __LP64__
  622                         if (secs > (SBT_MAX / SBT_1S))
  623                                 return SBT_MAX;
  624 #endif
  625                         return (secs << 32 | US_TO_SBT(data % 1000000000));
  626                 }
  627                 return NS_TO_SBT(data);
  628         default:
  629                 break;
  630         }
  631         return (-1);
  632 }
  633 
  634 static void
  635 filt_timerexpire(void *knx)
  636 {
  637         struct callout *calloutp;
  638         struct knote *kn;
  639 
  640         kn = knx;
  641         kn->kn_data++;
  642         KNOTE_ACTIVATE(kn, 0);  /* XXX - handle locking */
  643 
  644         if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
  645                 calloutp = (struct callout *)kn->kn_hook;
  646                 *kn->kn_ptr.p_nexttime += timer2sbintime(kn->kn_sdata, 
  647                     kn->kn_sfflags);
  648                 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0,
  649                     filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE);
  650         }
  651 }
  652 
  653 /*
  654  * data contains amount of time to sleep
  655  */
  656 static int
  657 filt_timerattach(struct knote *kn)
  658 {
  659         struct callout *calloutp;
  660         sbintime_t to;
  661         unsigned int ncallouts;
  662 
  663         if ((intptr_t)kn->kn_sdata < 0)
  664                 return (EINVAL);
  665         if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
  666                 kn->kn_sdata = 1;
  667         /* Only precision unit are supported in flags so far */
  668         if (kn->kn_sfflags & ~NOTE_TIMER_PRECMASK)
  669                 return (EINVAL);
  670 
  671         to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
  672         if (to < 0)
  673                 return (EINVAL);
  674 
  675         ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed);
  676         do {
  677                 if (ncallouts >= kq_calloutmax)
  678                         return (ENOMEM);
  679         } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts,
  680             &ncallouts, ncallouts + 1, memory_order_relaxed,
  681             memory_order_relaxed));
  682 
  683         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  684         kn->kn_status &= ~KN_DETACHED;          /* knlist_add clears it */
  685         kn->kn_ptr.p_nexttime = malloc(sizeof(sbintime_t), M_KQUEUE, M_WAITOK);
  686         calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
  687         callout_init(calloutp, 1);
  688         kn->kn_hook = calloutp;
  689         *kn->kn_ptr.p_nexttime = to + sbinuptime();
  690         callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0,
  691             filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE);
  692 
  693         return (0);
  694 }
  695 
  696 static void
  697 filt_timerdetach(struct knote *kn)
  698 {
  699         struct callout *calloutp;
  700         unsigned int old;
  701 
  702         calloutp = (struct callout *)kn->kn_hook;
  703         callout_drain(calloutp);
  704         free(calloutp, M_KQUEUE);
  705         free(kn->kn_ptr.p_nexttime, M_KQUEUE);
  706         old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed);
  707         KASSERT(old > 0, ("Number of callouts cannot become negative"));
  708         kn->kn_status |= KN_DETACHED;   /* knlist_remove sets it */
  709 }
  710 
  711 static int
  712 filt_timer(struct knote *kn, long hint)
  713 {
  714 
  715         return (kn->kn_data != 0);
  716 }
  717 
  718 static int
  719 filt_userattach(struct knote *kn)
  720 {
  721 
  722         /* 
  723          * EVFILT_USER knotes are not attached to anything in the kernel.
  724          */ 
  725         kn->kn_hook = NULL;
  726         if (kn->kn_fflags & NOTE_TRIGGER)
  727                 kn->kn_hookid = 1;
  728         else
  729                 kn->kn_hookid = 0;
  730         return (0);
  731 }
  732 
  733 static void
  734 filt_userdetach(__unused struct knote *kn)
  735 {
  736 
  737         /*
  738          * EVFILT_USER knotes are not attached to anything in the kernel.
  739          */
  740 }
  741 
  742 static int
  743 filt_user(struct knote *kn, __unused long hint)
  744 {
  745 
  746         return (kn->kn_hookid);
  747 }
  748 
  749 static void
  750 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
  751 {
  752         u_int ffctrl;
  753 
  754         switch (type) {
  755         case EVENT_REGISTER:
  756                 if (kev->fflags & NOTE_TRIGGER)
  757                         kn->kn_hookid = 1;
  758 
  759                 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
  760                 kev->fflags &= NOTE_FFLAGSMASK;
  761                 switch (ffctrl) {
  762                 case NOTE_FFNOP:
  763                         break;
  764 
  765                 case NOTE_FFAND:
  766                         kn->kn_sfflags &= kev->fflags;
  767                         break;
  768 
  769                 case NOTE_FFOR:
  770                         kn->kn_sfflags |= kev->fflags;
  771                         break;
  772 
  773                 case NOTE_FFCOPY:
  774                         kn->kn_sfflags = kev->fflags;
  775                         break;
  776 
  777                 default:
  778                         /* XXX Return error? */
  779                         break;
  780                 }
  781                 kn->kn_sdata = kev->data;
  782                 if (kev->flags & EV_CLEAR) {
  783                         kn->kn_hookid = 0;
  784                         kn->kn_data = 0;
  785                         kn->kn_fflags = 0;
  786                 }
  787                 break;
  788 
  789         case EVENT_PROCESS:
  790                 *kev = kn->kn_kevent;
  791                 kev->fflags = kn->kn_sfflags;
  792                 kev->data = kn->kn_sdata;
  793                 if (kn->kn_flags & EV_CLEAR) {
  794                         kn->kn_hookid = 0;
  795                         kn->kn_data = 0;
  796                         kn->kn_fflags = 0;
  797                 }
  798                 break;
  799 
  800         default:
  801                 panic("filt_usertouch() - invalid type (%ld)", type);
  802                 break;
  803         }
  804 }
  805 
  806 int
  807 sys_kqueue(struct thread *td, struct kqueue_args *uap)
  808 {
  809 
  810         return (kern_kqueue(td, 0, NULL));
  811 }
  812 
  813 static void
  814 kqueue_init(struct kqueue *kq)
  815 {
  816 
  817         mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
  818         TAILQ_INIT(&kq->kq_head);
  819         knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
  820         TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
  821 }
  822 
  823 int
  824 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
  825 {
  826         struct filedesc *fdp;
  827         struct kqueue *kq;
  828         struct file *fp;
  829         struct ucred *cred;
  830         int fd, error;
  831 
  832         fdp = td->td_proc->p_fd;
  833         cred = td->td_ucred;
  834         if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
  835                 return (ENOMEM);
  836 
  837         error = falloc_caps(td, &fp, &fd, flags, fcaps);
  838         if (error != 0) {
  839                 chgkqcnt(cred->cr_ruidinfo, -1, 0);
  840                 return (error);
  841         }
  842 
  843         /* An extra reference on `fp' has been held for us by falloc(). */
  844         kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
  845         kqueue_init(kq);
  846         kq->kq_fdp = fdp;
  847         kq->kq_cred = crhold(cred);
  848 
  849         FILEDESC_XLOCK(fdp);
  850         TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
  851         FILEDESC_XUNLOCK(fdp);
  852 
  853         finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
  854         fdrop(fp, td);
  855 
  856         td->td_retval[0] = fd;
  857         return (0);
  858 }
  859 
  860 #ifndef _SYS_SYSPROTO_H_
  861 struct kevent_args {
  862         int     fd;
  863         const struct kevent *changelist;
  864         int     nchanges;
  865         struct  kevent *eventlist;
  866         int     nevents;
  867         const struct timespec *timeout;
  868 };
  869 #endif
  870 int
  871 sys_kevent(struct thread *td, struct kevent_args *uap)
  872 {
  873         struct timespec ts, *tsp;
  874         struct kevent_copyops k_ops = { uap,
  875                                         kevent_copyout,
  876                                         kevent_copyin};
  877         int error;
  878 #ifdef KTRACE
  879         struct uio ktruio;
  880         struct iovec ktriov;
  881         struct uio *ktruioin = NULL;
  882         struct uio *ktruioout = NULL;
  883 #endif
  884 
  885         if (uap->timeout != NULL) {
  886                 error = copyin(uap->timeout, &ts, sizeof(ts));
  887                 if (error)
  888                         return (error);
  889                 tsp = &ts;
  890         } else
  891                 tsp = NULL;
  892 
  893 #ifdef KTRACE
  894         if (KTRPOINT(td, KTR_GENIO)) {
  895                 ktriov.iov_base = uap->changelist;
  896                 ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
  897                 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
  898                     .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
  899                     .uio_td = td };
  900                 ktruioin = cloneuio(&ktruio);
  901                 ktriov.iov_base = uap->eventlist;
  902                 ktriov.iov_len = uap->nevents * sizeof(struct kevent);
  903                 ktruioout = cloneuio(&ktruio);
  904         }
  905 #endif
  906 
  907         error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
  908             &k_ops, tsp);
  909 
  910 #ifdef KTRACE
  911         if (ktruioin != NULL) {
  912                 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
  913                 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
  914                 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
  915                 ktrgenio(uap->fd, UIO_READ, ktruioout, error);
  916         }
  917 #endif
  918 
  919         return (error);
  920 }
  921 
  922 /*
  923  * Copy 'count' items into the destination list pointed to by uap->eventlist.
  924  */
  925 static int
  926 kevent_copyout(void *arg, struct kevent *kevp, int count)
  927 {
  928         struct kevent_args *uap;
  929         int error;
  930 
  931         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  932         uap = (struct kevent_args *)arg;
  933 
  934         error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
  935         if (error == 0)
  936                 uap->eventlist += count;
  937         return (error);
  938 }
  939 
  940 /*
  941  * Copy 'count' items from the list pointed to by uap->changelist.
  942  */
  943 static int
  944 kevent_copyin(void *arg, struct kevent *kevp, int count)
  945 {
  946         struct kevent_args *uap;
  947         int error;
  948 
  949         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  950         uap = (struct kevent_args *)arg;
  951 
  952         error = copyin(uap->changelist, kevp, count * sizeof *kevp);
  953         if (error == 0)
  954                 uap->changelist += count;
  955         return (error);
  956 }
  957 
  958 int
  959 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
  960     struct kevent_copyops *k_ops, const struct timespec *timeout)
  961 {
  962         cap_rights_t rights;
  963         struct file *fp;
  964         int error;
  965 
  966         cap_rights_init(&rights);
  967         if (nchanges > 0)
  968                 cap_rights_set(&rights, CAP_KQUEUE_CHANGE);
  969         if (nevents > 0)
  970                 cap_rights_set(&rights, CAP_KQUEUE_EVENT);
  971         error = fget(td, fd, &rights, &fp);
  972         if (error != 0)
  973                 return (error);
  974 
  975         error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
  976         fdrop(fp, td);
  977 
  978         return (error);
  979 }
  980 
  981 static int
  982 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
  983     struct kevent_copyops *k_ops, const struct timespec *timeout)
  984 {
  985         struct kevent keva[KQ_NEVENTS];
  986         struct kevent *kevp, *changes;
  987         int i, n, nerrors, error;
  988 
  989         nerrors = 0;
  990         while (nchanges > 0) {
  991                 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
  992                 error = k_ops->k_copyin(k_ops->arg, keva, n);
  993                 if (error)
  994                         return (error);
  995                 changes = keva;
  996                 for (i = 0; i < n; i++) {
  997                         kevp = &changes[i];
  998                         if (!kevp->filter)
  999                                 continue;
 1000                         kevp->flags &= ~EV_SYSFLAGS;
 1001                         error = kqueue_register(kq, kevp, td, 1);
 1002                         if (error || (kevp->flags & EV_RECEIPT)) {
 1003                                 if (nevents == 0)
 1004                                         return (error);
 1005                                 kevp->flags = EV_ERROR;
 1006                                 kevp->data = error;
 1007                                 (void)k_ops->k_copyout(k_ops->arg, kevp, 1);
 1008                                 nevents--;
 1009                                 nerrors++;
 1010                         }
 1011                 }
 1012                 nchanges -= n;
 1013         }
 1014         if (nerrors) {
 1015                 td->td_retval[0] = nerrors;
 1016                 return (0);
 1017         }
 1018 
 1019         return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
 1020 }
 1021 
 1022 int
 1023 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
 1024     struct kevent_copyops *k_ops, const struct timespec *timeout)
 1025 {
 1026         struct kqueue *kq;
 1027         int error;
 1028 
 1029         error = kqueue_acquire(fp, &kq);
 1030         if (error != 0)
 1031                 return (error);
 1032         error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
 1033         kqueue_release(kq, 0);
 1034         return (error);
 1035 }
 1036 
 1037 /*
 1038  * Performs a kevent() call on a temporarily created kqueue. This can be
 1039  * used to perform one-shot polling, similar to poll() and select().
 1040  */
 1041 int
 1042 kern_kevent_anonymous(struct thread *td, int nevents,
 1043     struct kevent_copyops *k_ops)
 1044 {
 1045         struct kqueue kq = {};
 1046         int error;
 1047 
 1048         kqueue_init(&kq);
 1049         kq.kq_refcnt = 1;
 1050         error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
 1051         kqueue_drain(&kq, td);
 1052         kqueue_destroy(&kq);
 1053         return (error);
 1054 }
 1055 
 1056 int
 1057 kqueue_add_filteropts(int filt, struct filterops *filtops)
 1058 {
 1059         int error;
 1060 
 1061         error = 0;
 1062         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
 1063                 printf(
 1064 "trying to add a filterop that is out of range: %d is beyond %d\n",
 1065                     ~filt, EVFILT_SYSCOUNT);
 1066                 return EINVAL;
 1067         }
 1068         mtx_lock(&filterops_lock);
 1069         if (sysfilt_ops[~filt].for_fop != &null_filtops &&
 1070             sysfilt_ops[~filt].for_fop != NULL)
 1071                 error = EEXIST;
 1072         else {
 1073                 sysfilt_ops[~filt].for_fop = filtops;
 1074                 sysfilt_ops[~filt].for_refcnt = 0;
 1075         }
 1076         mtx_unlock(&filterops_lock);
 1077 
 1078         return (error);
 1079 }
 1080 
 1081 int
 1082 kqueue_del_filteropts(int filt)
 1083 {
 1084         int error;
 1085 
 1086         error = 0;
 1087         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
 1088                 return EINVAL;
 1089 
 1090         mtx_lock(&filterops_lock);
 1091         if (sysfilt_ops[~filt].for_fop == &null_filtops ||
 1092             sysfilt_ops[~filt].for_fop == NULL)
 1093                 error = EINVAL;
 1094         else if (sysfilt_ops[~filt].for_refcnt != 0)
 1095                 error = EBUSY;
 1096         else {
 1097                 sysfilt_ops[~filt].for_fop = &null_filtops;
 1098                 sysfilt_ops[~filt].for_refcnt = 0;
 1099         }
 1100         mtx_unlock(&filterops_lock);
 1101 
 1102         return error;
 1103 }
 1104 
 1105 static struct filterops *
 1106 kqueue_fo_find(int filt)
 1107 {
 1108 
 1109         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
 1110                 return NULL;
 1111 
 1112         if (sysfilt_ops[~filt].for_nolock)
 1113                 return sysfilt_ops[~filt].for_fop;
 1114 
 1115         mtx_lock(&filterops_lock);
 1116         sysfilt_ops[~filt].for_refcnt++;
 1117         if (sysfilt_ops[~filt].for_fop == NULL)
 1118                 sysfilt_ops[~filt].for_fop = &null_filtops;
 1119         mtx_unlock(&filterops_lock);
 1120 
 1121         return sysfilt_ops[~filt].for_fop;
 1122 }
 1123 
 1124 static void
 1125 kqueue_fo_release(int filt)
 1126 {
 1127 
 1128         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
 1129                 return;
 1130 
 1131         if (sysfilt_ops[~filt].for_nolock)
 1132                 return;
 1133 
 1134         mtx_lock(&filterops_lock);
 1135         KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
 1136             ("filter object refcount not valid on release"));
 1137         sysfilt_ops[~filt].for_refcnt--;
 1138         mtx_unlock(&filterops_lock);
 1139 }
 1140 
 1141 /*
 1142  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
 1143  * influence if memory allocation should wait.  Make sure it is 0 if you
 1144  * hold any mutexes.
 1145  */
 1146 static int
 1147 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
 1148 {
 1149         struct filterops *fops;
 1150         struct file *fp;
 1151         struct knote *kn, *tkn;
 1152         struct knlist *knl;
 1153         cap_rights_t rights;
 1154         int error, filt, event;
 1155         int haskqglobal, filedesc_unlock;
 1156 
 1157         if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
 1158                 return (EINVAL);
 1159 
 1160         fp = NULL;
 1161         kn = NULL;
 1162         knl = NULL;
 1163         error = 0;
 1164         haskqglobal = 0;
 1165         filedesc_unlock = 0;
 1166 
 1167         filt = kev->filter;
 1168         fops = kqueue_fo_find(filt);
 1169         if (fops == NULL)
 1170                 return EINVAL;
 1171 
 1172         if (kev->flags & EV_ADD) {
 1173                 /*
 1174                  * Prevent waiting with locks.  Non-sleepable
 1175                  * allocation failures are handled in the loop, only
 1176                  * if the spare knote appears to be actually required.
 1177                  */
 1178                 tkn = knote_alloc(waitok);
 1179         } else {
 1180                 tkn = NULL;
 1181         }
 1182 
 1183 findkn:
 1184         if (fops->f_isfd) {
 1185                 KASSERT(td != NULL, ("td is NULL"));
 1186                 if (kev->ident > INT_MAX)
 1187                         error = EBADF;
 1188                 else
 1189                         error = fget(td, kev->ident,
 1190                             cap_rights_init(&rights, CAP_EVENT), &fp);
 1191                 if (error)
 1192                         goto done;
 1193 
 1194                 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
 1195                     kev->ident, 0) != 0) {
 1196                         /* try again */
 1197                         fdrop(fp, td);
 1198                         fp = NULL;
 1199                         error = kqueue_expand(kq, fops, kev->ident, waitok);
 1200                         if (error)
 1201                                 goto done;
 1202                         goto findkn;
 1203                 }
 1204 
 1205                 if (fp->f_type == DTYPE_KQUEUE) {
 1206                         /*
 1207                          * If we add some intelligence about what we are doing,
 1208                          * we should be able to support events on ourselves.
 1209                          * We need to know when we are doing this to prevent
 1210                          * getting both the knlist lock and the kq lock since
 1211                          * they are the same thing.
 1212                          */
 1213                         if (fp->f_data == kq) {
 1214                                 error = EINVAL;
 1215                                 goto done;
 1216                         }
 1217 
 1218                         /*
 1219                          * Pre-lock the filedesc before the global
 1220                          * lock mutex, see the comment in
 1221                          * kqueue_close().
 1222                          */
 1223                         FILEDESC_XLOCK(td->td_proc->p_fd);
 1224                         filedesc_unlock = 1;
 1225                         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1226                 }
 1227 
 1228                 KQ_LOCK(kq);
 1229                 if (kev->ident < kq->kq_knlistsize) {
 1230                         SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
 1231                                 if (kev->filter == kn->kn_filter)
 1232                                         break;
 1233                 }
 1234         } else {
 1235                 if ((kev->flags & EV_ADD) == EV_ADD)
 1236                         kqueue_expand(kq, fops, kev->ident, waitok);
 1237 
 1238                 KQ_LOCK(kq);
 1239 
 1240                 /*
 1241                  * If possible, find an existing knote to use for this kevent.
 1242                  */
 1243                 if (kev->filter == EVFILT_PROC &&
 1244                     (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
 1245                         /* This is an internal creation of a process tracking
 1246                          * note. Don't attempt to coalesce this with an
 1247                          * existing note.
 1248                          */
 1249                         ;                       
 1250                 } else if (kq->kq_knhashmask != 0) {
 1251                         struct klist *list;
 1252 
 1253                         list = &kq->kq_knhash[
 1254                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
 1255                         SLIST_FOREACH(kn, list, kn_link)
 1256                                 if (kev->ident == kn->kn_id &&
 1257                                     kev->filter == kn->kn_filter)
 1258                                         break;
 1259                 }
 1260         }
 1261 
 1262         /* knote is in the process of changing, wait for it to stabilize. */
 1263         if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1264                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1265                 if (filedesc_unlock) {
 1266                         FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1267                         filedesc_unlock = 0;
 1268                 }
 1269                 kq->kq_state |= KQ_FLUXWAIT;
 1270                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
 1271                 if (fp != NULL) {
 1272                         fdrop(fp, td);
 1273                         fp = NULL;
 1274                 }
 1275                 goto findkn;
 1276         }
 1277 
 1278         /*
 1279          * kn now contains the matching knote, or NULL if no match
 1280          */
 1281         if (kn == NULL) {
 1282                 if (kev->flags & EV_ADD) {
 1283                         kn = tkn;
 1284                         tkn = NULL;
 1285                         if (kn == NULL) {
 1286                                 KQ_UNLOCK(kq);
 1287                                 error = ENOMEM;
 1288                                 goto done;
 1289                         }
 1290                         kn->kn_fp = fp;
 1291                         kn->kn_kq = kq;
 1292                         kn->kn_fop = fops;
 1293                         /*
 1294                          * apply reference counts to knote structure, and
 1295                          * do not release it at the end of this routine.
 1296                          */
 1297                         fops = NULL;
 1298                         fp = NULL;
 1299 
 1300                         kn->kn_sfflags = kev->fflags;
 1301                         kn->kn_sdata = kev->data;
 1302                         kev->fflags = 0;
 1303                         kev->data = 0;
 1304                         kn->kn_kevent = *kev;
 1305                         kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
 1306                             EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
 1307                         kn->kn_status = KN_INFLUX|KN_DETACHED;
 1308 
 1309                         error = knote_attach(kn, kq);
 1310                         KQ_UNLOCK(kq);
 1311                         if (error != 0) {
 1312                                 tkn = kn;
 1313                                 goto done;
 1314                         }
 1315 
 1316                         if ((error = kn->kn_fop->f_attach(kn)) != 0) {
 1317                                 knote_drop(kn, td);
 1318                                 goto done;
 1319                         }
 1320                         knl = kn_list_lock(kn);
 1321                         goto done_ev_add;
 1322                 } else {
 1323                         /* No matching knote and the EV_ADD flag is not set. */
 1324                         KQ_UNLOCK(kq);
 1325                         error = ENOENT;
 1326                         goto done;
 1327                 }
 1328         }
 1329         
 1330         if (kev->flags & EV_DELETE) {
 1331                 kn->kn_status |= KN_INFLUX;
 1332                 KQ_UNLOCK(kq);
 1333                 if (!(kn->kn_status & KN_DETACHED))
 1334                         kn->kn_fop->f_detach(kn);
 1335                 knote_drop(kn, td);
 1336                 goto done;
 1337         }
 1338 
 1339         if (kev->flags & EV_FORCEONESHOT) {
 1340                 kn->kn_flags |= EV_ONESHOT;
 1341                 KNOTE_ACTIVATE(kn, 1);
 1342         }
 1343 
 1344         /*
 1345          * The user may change some filter values after the initial EV_ADD,
 1346          * but doing so will not reset any filter which has already been
 1347          * triggered.
 1348          */
 1349         kn->kn_status |= KN_INFLUX | KN_SCAN;
 1350         KQ_UNLOCK(kq);
 1351         knl = kn_list_lock(kn);
 1352         kn->kn_kevent.udata = kev->udata;
 1353         if (!fops->f_isfd && fops->f_touch != NULL) {
 1354                 fops->f_touch(kn, kev, EVENT_REGISTER);
 1355         } else {
 1356                 kn->kn_sfflags = kev->fflags;
 1357                 kn->kn_sdata = kev->data;
 1358         }
 1359 
 1360         /*
 1361          * We can get here with kn->kn_knlist == NULL.  This can happen when
 1362          * the initial attach event decides that the event is "completed" 
 1363          * already.  i.e. filt_procattach is called on a zombie process.  It
 1364          * will call filt_proc which will remove it from the list, and NULL
 1365          * kn_knlist.
 1366          */
 1367 done_ev_add:
 1368         if ((kev->flags & EV_ENABLE) != 0)
 1369                 kn->kn_status &= ~KN_DISABLED;
 1370         else if ((kev->flags & EV_DISABLE) != 0)
 1371                 kn->kn_status |= KN_DISABLED;
 1372 
 1373         if ((kn->kn_status & KN_DISABLED) == 0)
 1374                 event = kn->kn_fop->f_event(kn, 0);
 1375         else
 1376                 event = 0;
 1377 
 1378         KQ_LOCK(kq);
 1379         if (event)
 1380                 kn->kn_status |= KN_ACTIVE;
 1381         if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
 1382             KN_ACTIVE)
 1383                 knote_enqueue(kn);
 1384         kn->kn_status &= ~(KN_INFLUX | KN_SCAN);
 1385         kn_list_unlock(knl);
 1386         KQ_UNLOCK_FLUX(kq);
 1387 
 1388 done:
 1389         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1390         if (filedesc_unlock)
 1391                 FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1392         if (fp != NULL)
 1393                 fdrop(fp, td);
 1394         knote_free(tkn);
 1395         if (fops != NULL)
 1396                 kqueue_fo_release(filt);
 1397         return (error);
 1398 }
 1399 
 1400 static int
 1401 kqueue_acquire(struct file *fp, struct kqueue **kqp)
 1402 {
 1403         int error;
 1404         struct kqueue *kq;
 1405 
 1406         error = 0;
 1407 
 1408         kq = fp->f_data;
 1409         if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
 1410                 return (EBADF);
 1411         *kqp = kq;
 1412         KQ_LOCK(kq);
 1413         if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
 1414                 KQ_UNLOCK(kq);
 1415                 return (EBADF);
 1416         }
 1417         kq->kq_refcnt++;
 1418         KQ_UNLOCK(kq);
 1419 
 1420         return error;
 1421 }
 1422 
 1423 static void
 1424 kqueue_release(struct kqueue *kq, int locked)
 1425 {
 1426         if (locked)
 1427                 KQ_OWNED(kq);
 1428         else
 1429                 KQ_LOCK(kq);
 1430         kq->kq_refcnt--;
 1431         if (kq->kq_refcnt == 1)
 1432                 wakeup(&kq->kq_refcnt);
 1433         if (!locked)
 1434                 KQ_UNLOCK(kq);
 1435 }
 1436 
 1437 static void
 1438 kqueue_schedtask(struct kqueue *kq)
 1439 {
 1440 
 1441         KQ_OWNED(kq);
 1442         KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
 1443             ("scheduling kqueue task while draining"));
 1444 
 1445         if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
 1446                 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
 1447                 kq->kq_state |= KQ_TASKSCHED;
 1448         }
 1449 }
 1450 
 1451 /*
 1452  * Expand the kq to make sure we have storage for fops/ident pair.
 1453  *
 1454  * Return 0 on success (or no work necessary), return errno on failure.
 1455  *
 1456  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
 1457  * If kqueue_register is called from a non-fd context, there usually/should
 1458  * be no locks held.
 1459  */
 1460 static int
 1461 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
 1462         int waitok)
 1463 {
 1464         struct klist *list, *tmp_knhash, *to_free;
 1465         u_long tmp_knhashmask;
 1466         int size;
 1467         int fd;
 1468         int mflag = waitok ? M_WAITOK : M_NOWAIT;
 1469 
 1470         KQ_NOTOWNED(kq);
 1471 
 1472         to_free = NULL;
 1473         if (fops->f_isfd) {
 1474                 fd = ident;
 1475                 if (kq->kq_knlistsize <= fd) {
 1476                         size = kq->kq_knlistsize;
 1477                         while (size <= fd)
 1478                                 size += KQEXTENT;
 1479                         list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
 1480                         if (list == NULL)
 1481                                 return ENOMEM;
 1482                         KQ_LOCK(kq);
 1483                         if (kq->kq_knlistsize > fd) {
 1484                                 to_free = list;
 1485                                 list = NULL;
 1486                         } else {
 1487                                 if (kq->kq_knlist != NULL) {
 1488                                         bcopy(kq->kq_knlist, list,
 1489                                             kq->kq_knlistsize * sizeof(*list));
 1490                                         to_free = kq->kq_knlist;
 1491                                         kq->kq_knlist = NULL;
 1492                                 }
 1493                                 bzero((caddr_t)list +
 1494                                     kq->kq_knlistsize * sizeof(*list),
 1495                                     (size - kq->kq_knlistsize) * sizeof(*list));
 1496                                 kq->kq_knlistsize = size;
 1497                                 kq->kq_knlist = list;
 1498                         }
 1499                         KQ_UNLOCK(kq);
 1500                 }
 1501         } else {
 1502                 if (kq->kq_knhashmask == 0) {
 1503                         tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
 1504                             &tmp_knhashmask);
 1505                         if (tmp_knhash == NULL)
 1506                                 return ENOMEM;
 1507                         KQ_LOCK(kq);
 1508                         if (kq->kq_knhashmask == 0) {
 1509                                 kq->kq_knhash = tmp_knhash;
 1510                                 kq->kq_knhashmask = tmp_knhashmask;
 1511                         } else {
 1512                                 to_free = tmp_knhash;
 1513                         }
 1514                         KQ_UNLOCK(kq);
 1515                 }
 1516         }
 1517         free(to_free, M_KQUEUE);
 1518 
 1519         KQ_NOTOWNED(kq);
 1520         return 0;
 1521 }
 1522 
 1523 static void
 1524 kqueue_task(void *arg, int pending)
 1525 {
 1526         struct kqueue *kq;
 1527         int haskqglobal;
 1528 
 1529         haskqglobal = 0;
 1530         kq = arg;
 1531 
 1532         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1533         KQ_LOCK(kq);
 1534 
 1535         KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
 1536 
 1537         kq->kq_state &= ~KQ_TASKSCHED;
 1538         if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
 1539                 wakeup(&kq->kq_state);
 1540         }
 1541         KQ_UNLOCK(kq);
 1542         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1543 }
 1544 
 1545 /*
 1546  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
 1547  * We treat KN_MARKER knotes as if they are INFLUX.
 1548  */
 1549 static int
 1550 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
 1551     const struct timespec *tsp, struct kevent *keva, struct thread *td)
 1552 {
 1553         struct kevent *kevp;
 1554         struct knote *kn, *marker;
 1555         struct knlist *knl;
 1556         sbintime_t asbt, rsbt;
 1557         int count, error, haskqglobal, influx, nkev, touch;
 1558 
 1559         count = maxevents;
 1560         nkev = 0;
 1561         error = 0;
 1562         haskqglobal = 0;
 1563 
 1564         if (maxevents == 0)
 1565                 goto done_nl;
 1566 
 1567         rsbt = 0;
 1568         if (tsp != NULL) {
 1569                 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
 1570                     tsp->tv_nsec >= 1000000000) {
 1571                         error = EINVAL;
 1572                         goto done_nl;
 1573                 }
 1574                 if (timespecisset(tsp)) {
 1575                         if (tsp->tv_sec <= INT32_MAX) {
 1576                                 rsbt = tstosbt(*tsp);
 1577                                 if (TIMESEL(&asbt, rsbt))
 1578                                         asbt += tc_tick_sbt;
 1579                                 if (asbt <= SBT_MAX - rsbt)
 1580                                         asbt += rsbt;
 1581                                 else
 1582                                         asbt = 0;
 1583                                 rsbt >>= tc_precexp;
 1584                         } else
 1585                                 asbt = 0;
 1586                 } else
 1587                         asbt = -1;
 1588         } else
 1589                 asbt = 0;
 1590         marker = knote_alloc(1);
 1591         marker->kn_status = KN_MARKER;
 1592         KQ_LOCK(kq);
 1593 
 1594 retry:
 1595         kevp = keva;
 1596         if (kq->kq_count == 0) {
 1597                 if (asbt == -1) {
 1598                         error = EWOULDBLOCK;
 1599                 } else {
 1600                         kq->kq_state |= KQ_SLEEP;
 1601                         error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
 1602                             "kqread", asbt, rsbt, C_ABSOLUTE);
 1603                 }
 1604                 if (error == 0)
 1605                         goto retry;
 1606                 /* don't restart after signals... */
 1607                 if (error == ERESTART)
 1608                         error = EINTR;
 1609                 else if (error == EWOULDBLOCK)
 1610                         error = 0;
 1611                 goto done;
 1612         }
 1613 
 1614         TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
 1615         influx = 0;
 1616         while (count) {
 1617                 KQ_OWNED(kq);
 1618                 kn = TAILQ_FIRST(&kq->kq_head);
 1619 
 1620                 if ((kn->kn_status == KN_MARKER && kn != marker) ||
 1621                     (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1622                         if (influx) {
 1623                                 influx = 0;
 1624                                 KQ_FLUX_WAKEUP(kq);
 1625                         }
 1626                         kq->kq_state |= KQ_FLUXWAIT;
 1627                         error = msleep(kq, &kq->kq_lock, PSOCK,
 1628                             "kqflxwt", 0);
 1629                         continue;
 1630                 }
 1631 
 1632                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1633                 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
 1634                         kn->kn_status &= ~KN_QUEUED;
 1635                         kq->kq_count--;
 1636                         continue;
 1637                 }
 1638                 if (kn == marker) {
 1639                         KQ_FLUX_WAKEUP(kq);
 1640                         if (count == maxevents)
 1641                                 goto retry;
 1642                         goto done;
 1643                 }
 1644                 KASSERT((kn->kn_status & KN_INFLUX) == 0,
 1645                     ("KN_INFLUX set when not suppose to be"));
 1646 
 1647                 if ((kn->kn_flags & EV_DROP) == EV_DROP) {
 1648                         kn->kn_status &= ~KN_QUEUED;
 1649                         kn->kn_status |= KN_INFLUX;
 1650                         kq->kq_count--;
 1651                         KQ_UNLOCK(kq);
 1652                         /*
 1653                          * We don't need to lock the list since we've marked
 1654                          * it _INFLUX.
 1655                          */
 1656                         if (!(kn->kn_status & KN_DETACHED))
 1657                                 kn->kn_fop->f_detach(kn);
 1658                         knote_drop(kn, td);
 1659                         KQ_LOCK(kq);
 1660                         continue;
 1661                 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
 1662                         kn->kn_status &= ~KN_QUEUED;
 1663                         kn->kn_status |= KN_INFLUX;
 1664                         kq->kq_count--;
 1665                         KQ_UNLOCK(kq);
 1666                         /*
 1667                          * We don't need to lock the list since we've marked
 1668                          * it _INFLUX.
 1669                          */
 1670                         *kevp = kn->kn_kevent;
 1671                         if (!(kn->kn_status & KN_DETACHED))
 1672                                 kn->kn_fop->f_detach(kn);
 1673                         knote_drop(kn, td);
 1674                         KQ_LOCK(kq);
 1675                         kn = NULL;
 1676                 } else {
 1677                         kn->kn_status |= KN_INFLUX | KN_SCAN;
 1678                         KQ_UNLOCK(kq);
 1679                         if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
 1680                                 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1681                         knl = kn_list_lock(kn);
 1682                         if (kn->kn_fop->f_event(kn, 0) == 0) {
 1683                                 KQ_LOCK(kq);
 1684                                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1685                                 kn->kn_status &=
 1686                                     ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX |
 1687                                     KN_SCAN);
 1688                                 kq->kq_count--;
 1689                                 kn_list_unlock(knl);
 1690                                 influx = 1;
 1691                                 continue;
 1692                         }
 1693                         touch = (!kn->kn_fop->f_isfd &&
 1694                             kn->kn_fop->f_touch != NULL);
 1695                         if (touch)
 1696                                 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
 1697                         else
 1698                                 *kevp = kn->kn_kevent;
 1699                         KQ_LOCK(kq);
 1700                         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1701                         if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
 1702                                 /* 
 1703                                  * Manually clear knotes who weren't 
 1704                                  * 'touch'ed.
 1705                                  */
 1706                                 if (touch == 0 && kn->kn_flags & EV_CLEAR) {
 1707                                         kn->kn_data = 0;
 1708                                         kn->kn_fflags = 0;
 1709                                 }
 1710                                 if (kn->kn_flags & EV_DISPATCH)
 1711                                         kn->kn_status |= KN_DISABLED;
 1712                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 1713                                 kq->kq_count--;
 1714                         } else
 1715                                 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1716                         
 1717                         kn->kn_status &= ~(KN_INFLUX | KN_SCAN);
 1718                         kn_list_unlock(knl);
 1719                         influx = 1;
 1720                 }
 1721 
 1722                 /* we are returning a copy to the user */
 1723                 kevp++;
 1724                 nkev++;
 1725                 count--;
 1726 
 1727                 if (nkev == KQ_NEVENTS) {
 1728                         influx = 0;
 1729                         KQ_UNLOCK_FLUX(kq);
 1730                         error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1731                         nkev = 0;
 1732                         kevp = keva;
 1733                         KQ_LOCK(kq);
 1734                         if (error)
 1735                                 break;
 1736                 }
 1737         }
 1738         TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
 1739 done:
 1740         KQ_OWNED(kq);
 1741         KQ_UNLOCK_FLUX(kq);
 1742         knote_free(marker);
 1743 done_nl:
 1744         KQ_NOTOWNED(kq);
 1745         if (nkev != 0)
 1746                 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1747         td->td_retval[0] = maxevents - count;
 1748         return (error);
 1749 }
 1750 
 1751 /*ARGSUSED*/
 1752 static int
 1753 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
 1754         struct ucred *active_cred, struct thread *td)
 1755 {
 1756         /*
 1757          * Enabling sigio causes two major problems:
 1758          * 1) infinite recursion:
 1759          * Synopsys: kevent is being used to track signals and have FIOASYNC
 1760          * set.  On receipt of a signal this will cause a kqueue to recurse
 1761          * into itself over and over.  Sending the sigio causes the kqueue
 1762          * to become ready, which in turn posts sigio again, forever.
 1763          * Solution: this can be solved by setting a flag in the kqueue that
 1764          * we have a SIGIO in progress.
 1765          * 2) locking problems:
 1766          * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
 1767          * us above the proc and pgrp locks.
 1768          * Solution: Post a signal using an async mechanism, being sure to
 1769          * record a generation count in the delivery so that we do not deliver
 1770          * a signal to the wrong process.
 1771          *
 1772          * Note, these two mechanisms are somewhat mutually exclusive!
 1773          */
 1774 #if 0
 1775         struct kqueue *kq;
 1776 
 1777         kq = fp->f_data;
 1778         switch (cmd) {
 1779         case FIOASYNC:
 1780                 if (*(int *)data) {
 1781                         kq->kq_state |= KQ_ASYNC;
 1782                 } else {
 1783                         kq->kq_state &= ~KQ_ASYNC;
 1784                 }
 1785                 return (0);
 1786 
 1787         case FIOSETOWN:
 1788                 return (fsetown(*(int *)data, &kq->kq_sigio));
 1789 
 1790         case FIOGETOWN:
 1791                 *(int *)data = fgetown(&kq->kq_sigio);
 1792                 return (0);
 1793         }
 1794 #endif
 1795 
 1796         return (ENOTTY);
 1797 }
 1798 
 1799 /*ARGSUSED*/
 1800 static int
 1801 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
 1802         struct thread *td)
 1803 {
 1804         struct kqueue *kq;
 1805         int revents = 0;
 1806         int error;
 1807 
 1808         if ((error = kqueue_acquire(fp, &kq)))
 1809                 return POLLERR;
 1810 
 1811         KQ_LOCK(kq);
 1812         if (events & (POLLIN | POLLRDNORM)) {
 1813                 if (kq->kq_count) {
 1814                         revents |= events & (POLLIN | POLLRDNORM);
 1815                 } else {
 1816                         selrecord(td, &kq->kq_sel);
 1817                         if (SEL_WAITING(&kq->kq_sel))
 1818                                 kq->kq_state |= KQ_SEL;
 1819                 }
 1820         }
 1821         kqueue_release(kq, 1);
 1822         KQ_UNLOCK(kq);
 1823         return (revents);
 1824 }
 1825 
 1826 /*ARGSUSED*/
 1827 static int
 1828 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
 1829         struct thread *td)
 1830 {
 1831 
 1832         bzero((void *)st, sizeof *st);
 1833         /*
 1834          * We no longer return kq_count because the unlocked value is useless.
 1835          * If you spent all this time getting the count, why not spend your
 1836          * syscall better by calling kevent?
 1837          *
 1838          * XXX - This is needed for libc_r.
 1839          */
 1840         st->st_mode = S_IFIFO;
 1841         return (0);
 1842 }
 1843 
 1844 static void
 1845 kqueue_drain(struct kqueue *kq, struct thread *td)
 1846 {
 1847         struct knote *kn;
 1848         int i;
 1849 
 1850         KQ_LOCK(kq);
 1851 
 1852         KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
 1853             ("kqueue already closing"));
 1854         kq->kq_state |= KQ_CLOSING;
 1855         if (kq->kq_refcnt > 1)
 1856                 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
 1857 
 1858         KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
 1859 
 1860         KASSERT(knlist_empty(&kq->kq_sel.si_note),
 1861             ("kqueue's knlist not empty"));
 1862 
 1863         for (i = 0; i < kq->kq_knlistsize; i++) {
 1864                 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
 1865                         if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1866                                 kq->kq_state |= KQ_FLUXWAIT;
 1867                                 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
 1868                                 continue;
 1869                         }
 1870                         kn->kn_status |= KN_INFLUX;
 1871                         KQ_UNLOCK(kq);
 1872                         if (!(kn->kn_status & KN_DETACHED))
 1873                                 kn->kn_fop->f_detach(kn);
 1874                         knote_drop(kn, td);
 1875                         KQ_LOCK(kq);
 1876                 }
 1877         }
 1878         if (kq->kq_knhashmask != 0) {
 1879                 for (i = 0; i <= kq->kq_knhashmask; i++) {
 1880                         while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
 1881                                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1882                                         kq->kq_state |= KQ_FLUXWAIT;
 1883                                         msleep(kq, &kq->kq_lock, PSOCK,
 1884                                                "kqclo2", 0);
 1885                                         continue;
 1886                                 }
 1887                                 kn->kn_status |= KN_INFLUX;
 1888                                 KQ_UNLOCK(kq);
 1889                                 if (!(kn->kn_status & KN_DETACHED))
 1890                                         kn->kn_fop->f_detach(kn);
 1891                                 knote_drop(kn, td);
 1892                                 KQ_LOCK(kq);
 1893                         }
 1894                 }
 1895         }
 1896 
 1897         if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
 1898                 kq->kq_state |= KQ_TASKDRAIN;
 1899                 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
 1900         }
 1901 
 1902         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1903                 selwakeuppri(&kq->kq_sel, PSOCK);
 1904                 if (!SEL_WAITING(&kq->kq_sel))
 1905                         kq->kq_state &= ~KQ_SEL;
 1906         }
 1907 
 1908         KQ_UNLOCK(kq);
 1909 }
 1910 
 1911 static void
 1912 kqueue_destroy(struct kqueue *kq)
 1913 {
 1914 
 1915         KASSERT(kq->kq_fdp == NULL,
 1916             ("kqueue still attached to a file descriptor"));
 1917         seldrain(&kq->kq_sel);
 1918         knlist_destroy(&kq->kq_sel.si_note);
 1919         mtx_destroy(&kq->kq_lock);
 1920 
 1921         if (kq->kq_knhash != NULL)
 1922                 free(kq->kq_knhash, M_KQUEUE);
 1923         if (kq->kq_knlist != NULL)
 1924                 free(kq->kq_knlist, M_KQUEUE);
 1925 
 1926         funsetown(&kq->kq_sigio);
 1927 }
 1928 
 1929 /*ARGSUSED*/
 1930 static int
 1931 kqueue_close(struct file *fp, struct thread *td)
 1932 {
 1933         struct kqueue *kq = fp->f_data;
 1934         struct filedesc *fdp;
 1935         int error;
 1936         int filedesc_unlock;
 1937 
 1938         if ((error = kqueue_acquire(fp, &kq)))
 1939                 return error;
 1940         kqueue_drain(kq, td);
 1941 
 1942         /*
 1943          * We could be called due to the knote_drop() doing fdrop(),
 1944          * called from kqueue_register().  In this case the global
 1945          * lock is owned, and filedesc sx is locked before, to not
 1946          * take the sleepable lock after non-sleepable.
 1947          */
 1948         fdp = kq->kq_fdp;
 1949         kq->kq_fdp = NULL;
 1950         if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
 1951                 FILEDESC_XLOCK(fdp);
 1952                 filedesc_unlock = 1;
 1953         } else
 1954                 filedesc_unlock = 0;
 1955         TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
 1956         if (filedesc_unlock)
 1957                 FILEDESC_XUNLOCK(fdp);
 1958 
 1959         kqueue_destroy(kq);
 1960         chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
 1961         crfree(kq->kq_cred);
 1962         free(kq, M_KQUEUE);
 1963         fp->f_data = NULL;
 1964 
 1965         return (0);
 1966 }
 1967 
 1968 static int
 1969 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
 1970 {
 1971 
 1972         kif->kf_type = KF_TYPE_KQUEUE;
 1973         return (0);
 1974 }
 1975 
 1976 static void
 1977 kqueue_wakeup(struct kqueue *kq)
 1978 {
 1979         KQ_OWNED(kq);
 1980 
 1981         if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
 1982                 kq->kq_state &= ~KQ_SLEEP;
 1983                 wakeup(kq);
 1984         }
 1985         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1986                 selwakeuppri(&kq->kq_sel, PSOCK);
 1987                 if (!SEL_WAITING(&kq->kq_sel))
 1988                         kq->kq_state &= ~KQ_SEL;
 1989         }
 1990         if (!knlist_empty(&kq->kq_sel.si_note))
 1991                 kqueue_schedtask(kq);
 1992         if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
 1993                 pgsigio(&kq->kq_sigio, SIGIO, 0);
 1994         }
 1995 }
 1996 
 1997 /*
 1998  * Walk down a list of knotes, activating them if their event has triggered.
 1999  *
 2000  * There is a possibility to optimize in the case of one kq watching another.
 2001  * Instead of scheduling a task to wake it up, you could pass enough state
 2002  * down the chain to make up the parent kqueue.  Make this code functional
 2003  * first.
 2004  */
 2005 void
 2006 knote(struct knlist *list, long hint, int lockflags)
 2007 {
 2008         struct kqueue *kq;
 2009         struct knote *kn, *tkn;
 2010         int error;
 2011 
 2012         if (list == NULL)
 2013                 return;
 2014 
 2015         KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
 2016 
 2017         if ((lockflags & KNF_LISTLOCKED) == 0)
 2018                 list->kl_lock(list->kl_lockarg); 
 2019 
 2020         /*
 2021          * If we unlock the list lock (and set KN_INFLUX), we can
 2022          * eliminate the kqueue scheduling, but this will introduce
 2023          * four lock/unlock's for each knote to test.  Also, marker
 2024          * would be needed to keep iteration position, since filters
 2025          * or other threads could remove events.
 2026          */
 2027         SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
 2028                 kq = kn->kn_kq;
 2029                 KQ_LOCK(kq);
 2030                 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) {
 2031                         /*
 2032                          * Do not process the influx notes, except for
 2033                          * the influx coming from the kq unlock in the
 2034                          * kqueue_scan().  In the later case, we do
 2035                          * not interfere with the scan, since the code
 2036                          * fragment in kqueue_scan() locks the knlist,
 2037                          * and cannot proceed until we finished.
 2038                          */
 2039                         KQ_UNLOCK(kq);
 2040                 } else if ((lockflags & KNF_NOKQLOCK) != 0) {
 2041                         kn->kn_status |= KN_INFLUX;
 2042                         KQ_UNLOCK(kq);
 2043                         error = kn->kn_fop->f_event(kn, hint);
 2044                         KQ_LOCK(kq);
 2045                         kn->kn_status &= ~KN_INFLUX;
 2046                         if (error)
 2047                                 KNOTE_ACTIVATE(kn, 1);
 2048                         KQ_UNLOCK_FLUX(kq);
 2049                 } else {
 2050                         kn->kn_status |= KN_HASKQLOCK;
 2051                         if (kn->kn_fop->f_event(kn, hint))
 2052                                 KNOTE_ACTIVATE(kn, 1);
 2053                         kn->kn_status &= ~KN_HASKQLOCK;
 2054                         KQ_UNLOCK(kq);
 2055                 }
 2056         }
 2057         if ((lockflags & KNF_LISTLOCKED) == 0)
 2058                 list->kl_unlock(list->kl_lockarg); 
 2059 }
 2060 
 2061 /*
 2062  * add a knote to a knlist
 2063  */
 2064 void
 2065 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
 2066 {
 2067         KNL_ASSERT_LOCK(knl, islocked);
 2068         KQ_NOTOWNED(kn->kn_kq);
 2069         KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
 2070             (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
 2071         if (!islocked)
 2072                 knl->kl_lock(knl->kl_lockarg);
 2073         SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
 2074         if (!islocked)
 2075                 knl->kl_unlock(knl->kl_lockarg);
 2076         KQ_LOCK(kn->kn_kq);
 2077         kn->kn_knlist = knl;
 2078         kn->kn_status &= ~KN_DETACHED;
 2079         KQ_UNLOCK(kn->kn_kq);
 2080 }
 2081 
 2082 static void
 2083 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
 2084     int kqislocked)
 2085 {
 2086         KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
 2087         KNL_ASSERT_LOCK(knl, knlislocked);
 2088         mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
 2089         if (!kqislocked)
 2090                 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
 2091     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
 2092         if (!knlislocked)
 2093                 knl->kl_lock(knl->kl_lockarg);
 2094         SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
 2095         kn->kn_knlist = NULL;
 2096         if (!knlislocked)
 2097                 kn_list_unlock(knl);
 2098         if (!kqislocked)
 2099                 KQ_LOCK(kn->kn_kq);
 2100         kn->kn_status |= KN_DETACHED;
 2101         if (!kqislocked)
 2102                 KQ_UNLOCK(kn->kn_kq);
 2103 }
 2104 
 2105 /*
 2106  * remove knote from the specified knlist
 2107  */
 2108 void
 2109 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
 2110 {
 2111 
 2112         knlist_remove_kq(knl, kn, islocked, 0);
 2113 }
 2114 
 2115 int
 2116 knlist_empty(struct knlist *knl)
 2117 {
 2118 
 2119         KNL_ASSERT_LOCKED(knl);
 2120         return SLIST_EMPTY(&knl->kl_list);
 2121 }
 2122 
 2123 static struct mtx       knlist_lock;
 2124 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
 2125         MTX_DEF);
 2126 static void knlist_mtx_lock(void *arg);
 2127 static void knlist_mtx_unlock(void *arg);
 2128 
 2129 static void
 2130 knlist_mtx_lock(void *arg)
 2131 {
 2132 
 2133         mtx_lock((struct mtx *)arg);
 2134 }
 2135 
 2136 static void
 2137 knlist_mtx_unlock(void *arg)
 2138 {
 2139 
 2140         mtx_unlock((struct mtx *)arg);
 2141 }
 2142 
 2143 static void
 2144 knlist_mtx_assert_locked(void *arg)
 2145 {
 2146 
 2147         mtx_assert((struct mtx *)arg, MA_OWNED);
 2148 }
 2149 
 2150 static void
 2151 knlist_mtx_assert_unlocked(void *arg)
 2152 {
 2153 
 2154         mtx_assert((struct mtx *)arg, MA_NOTOWNED);
 2155 }
 2156 
 2157 static void
 2158 knlist_rw_rlock(void *arg)
 2159 {
 2160 
 2161         rw_rlock((struct rwlock *)arg);
 2162 }
 2163 
 2164 static void
 2165 knlist_rw_runlock(void *arg)
 2166 {
 2167 
 2168         rw_runlock((struct rwlock *)arg);
 2169 }
 2170 
 2171 static void
 2172 knlist_rw_assert_locked(void *arg)
 2173 {
 2174 
 2175         rw_assert((struct rwlock *)arg, RA_LOCKED);
 2176 }
 2177 
 2178 static void
 2179 knlist_rw_assert_unlocked(void *arg)
 2180 {
 2181 
 2182         rw_assert((struct rwlock *)arg, RA_UNLOCKED);
 2183 }
 2184 
 2185 void
 2186 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
 2187     void (*kl_unlock)(void *),
 2188     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
 2189 {
 2190 
 2191         if (lock == NULL)
 2192                 knl->kl_lockarg = &knlist_lock;
 2193         else
 2194                 knl->kl_lockarg = lock;
 2195 
 2196         if (kl_lock == NULL)
 2197                 knl->kl_lock = knlist_mtx_lock;
 2198         else
 2199                 knl->kl_lock = kl_lock;
 2200         if (kl_unlock == NULL)
 2201                 knl->kl_unlock = knlist_mtx_unlock;
 2202         else
 2203                 knl->kl_unlock = kl_unlock;
 2204         if (kl_assert_locked == NULL)
 2205                 knl->kl_assert_locked = knlist_mtx_assert_locked;
 2206         else
 2207                 knl->kl_assert_locked = kl_assert_locked;
 2208         if (kl_assert_unlocked == NULL)
 2209                 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
 2210         else
 2211                 knl->kl_assert_unlocked = kl_assert_unlocked;
 2212 
 2213         knl->kl_autodestroy = 0;
 2214         SLIST_INIT(&knl->kl_list);
 2215 }
 2216 
 2217 void
 2218 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
 2219 {
 2220 
 2221         knlist_init(knl, lock, NULL, NULL, NULL, NULL);
 2222 }
 2223 
 2224 struct knlist *
 2225 knlist_alloc(struct mtx *lock)
 2226 {
 2227         struct knlist *knl;
 2228 
 2229         knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
 2230         knlist_init_mtx(knl, lock);
 2231         return (knl);
 2232 }
 2233 
 2234 void
 2235 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
 2236 {
 2237 
 2238         knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
 2239             knlist_rw_assert_locked, knlist_rw_assert_unlocked);
 2240 }
 2241 
 2242 void
 2243 knlist_destroy(struct knlist *knl)
 2244 {
 2245 
 2246 #ifdef INVARIANTS
 2247         /*
 2248          * if we run across this error, we need to find the offending
 2249          * driver and have it call knlist_clear or knlist_delete.
 2250          */
 2251         if (!SLIST_EMPTY(&knl->kl_list))
 2252                 printf("WARNING: destroying knlist w/ knotes on it!\n");
 2253 #endif
 2254 
 2255         knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
 2256         SLIST_INIT(&knl->kl_list);
 2257 }
 2258 
 2259 void
 2260 knlist_detach(struct knlist *knl)
 2261 {
 2262 
 2263         KNL_ASSERT_LOCKED(knl);
 2264         knl->kl_autodestroy = 1;
 2265         if (knlist_empty(knl)) {
 2266                 knlist_destroy(knl);
 2267                 free(knl, M_KQUEUE);
 2268         }
 2269 }
 2270 
 2271 /*
 2272  * Even if we are locked, we may need to drop the lock to allow any influx
 2273  * knotes time to "settle".
 2274  */
 2275 void
 2276 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
 2277 {
 2278         struct knote *kn, *kn2;
 2279         struct kqueue *kq;
 2280 
 2281         KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
 2282         if (islocked)
 2283                 KNL_ASSERT_LOCKED(knl);
 2284         else {
 2285                 KNL_ASSERT_UNLOCKED(knl);
 2286 again:          /* need to reacquire lock since we have dropped it */
 2287                 knl->kl_lock(knl->kl_lockarg);
 2288         }
 2289 
 2290         SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
 2291                 kq = kn->kn_kq;
 2292                 KQ_LOCK(kq);
 2293                 if ((kn->kn_status & KN_INFLUX)) {
 2294                         KQ_UNLOCK(kq);
 2295                         continue;
 2296                 }
 2297                 knlist_remove_kq(knl, kn, 1, 1);
 2298                 if (killkn) {
 2299                         kn->kn_status |= KN_INFLUX | KN_DETACHED;
 2300                         KQ_UNLOCK(kq);
 2301                         knote_drop(kn, td);
 2302                 } else {
 2303                         /* Make sure cleared knotes disappear soon */
 2304                         kn->kn_flags |= (EV_EOF | EV_ONESHOT);
 2305                         KQ_UNLOCK(kq);
 2306                 }
 2307                 kq = NULL;
 2308         }
 2309 
 2310         if (!SLIST_EMPTY(&knl->kl_list)) {
 2311                 /* there are still KN_INFLUX remaining */
 2312                 kn = SLIST_FIRST(&knl->kl_list);
 2313                 kq = kn->kn_kq;
 2314                 KQ_LOCK(kq);
 2315                 KASSERT(kn->kn_status & KN_INFLUX,
 2316                     ("knote removed w/o list lock"));
 2317                 knl->kl_unlock(knl->kl_lockarg);
 2318                 kq->kq_state |= KQ_FLUXWAIT;
 2319                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
 2320                 kq = NULL;
 2321                 goto again;
 2322         }
 2323 
 2324         if (islocked)
 2325                 KNL_ASSERT_LOCKED(knl);
 2326         else {
 2327                 knl->kl_unlock(knl->kl_lockarg);
 2328                 KNL_ASSERT_UNLOCKED(knl);
 2329         }
 2330 }
 2331 
 2332 /*
 2333  * Remove all knotes referencing a specified fd must be called with FILEDESC
 2334  * lock.  This prevents a race where a new fd comes along and occupies the
 2335  * entry and we attach a knote to the fd.
 2336  */
 2337 void
 2338 knote_fdclose(struct thread *td, int fd)
 2339 {
 2340         struct filedesc *fdp = td->td_proc->p_fd;
 2341         struct kqueue *kq;
 2342         struct knote *kn;
 2343         int influx;
 2344 
 2345         FILEDESC_XLOCK_ASSERT(fdp);
 2346 
 2347         /*
 2348          * We shouldn't have to worry about new kevents appearing on fd
 2349          * since filedesc is locked.
 2350          */
 2351         TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
 2352                 KQ_LOCK(kq);
 2353 
 2354 again:
 2355                 influx = 0;
 2356                 while (kq->kq_knlistsize > fd &&
 2357                     (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
 2358                         if (kn->kn_status & KN_INFLUX) {
 2359                                 /* someone else might be waiting on our knote */
 2360                                 if (influx)
 2361                                         wakeup(kq);
 2362                                 kq->kq_state |= KQ_FLUXWAIT;
 2363                                 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
 2364                                 goto again;
 2365                         }
 2366                         kn->kn_status |= KN_INFLUX;
 2367                         KQ_UNLOCK(kq);
 2368                         if (!(kn->kn_status & KN_DETACHED))
 2369                                 kn->kn_fop->f_detach(kn);
 2370                         knote_drop(kn, td);
 2371                         influx = 1;
 2372                         KQ_LOCK(kq);
 2373                 }
 2374                 KQ_UNLOCK_FLUX(kq);
 2375         }
 2376 }
 2377 
 2378 static int
 2379 knote_attach(struct knote *kn, struct kqueue *kq)
 2380 {
 2381         struct klist *list;
 2382 
 2383         KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
 2384         KQ_OWNED(kq);
 2385 
 2386         if (kn->kn_fop->f_isfd) {
 2387                 if (kn->kn_id >= kq->kq_knlistsize)
 2388                         return ENOMEM;
 2389                 list = &kq->kq_knlist[kn->kn_id];
 2390         } else {
 2391                 if (kq->kq_knhash == NULL)
 2392                         return ENOMEM;
 2393                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2394         }
 2395 
 2396         SLIST_INSERT_HEAD(list, kn, kn_link);
 2397 
 2398         return 0;
 2399 }
 2400 
 2401 /*
 2402  * knote must already have been detached using the f_detach method.
 2403  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
 2404  * to prevent other removal.
 2405  */
 2406 static void
 2407 knote_drop(struct knote *kn, struct thread *td)
 2408 {
 2409         struct kqueue *kq;
 2410         struct klist *list;
 2411 
 2412         kq = kn->kn_kq;
 2413 
 2414         KQ_NOTOWNED(kq);
 2415         KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
 2416             ("knote_drop called without KN_INFLUX set in kn_status"));
 2417 
 2418         KQ_LOCK(kq);
 2419         if (kn->kn_fop->f_isfd)
 2420                 list = &kq->kq_knlist[kn->kn_id];
 2421         else
 2422                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2423 
 2424         if (!SLIST_EMPTY(list))
 2425                 SLIST_REMOVE(list, kn, knote, kn_link);
 2426         if (kn->kn_status & KN_QUEUED)
 2427                 knote_dequeue(kn);
 2428         KQ_UNLOCK_FLUX(kq);
 2429 
 2430         if (kn->kn_fop->f_isfd) {
 2431                 fdrop(kn->kn_fp, td);
 2432                 kn->kn_fp = NULL;
 2433         }
 2434         kqueue_fo_release(kn->kn_kevent.filter);
 2435         kn->kn_fop = NULL;
 2436         knote_free(kn);
 2437 }
 2438 
 2439 static void
 2440 knote_enqueue(struct knote *kn)
 2441 {
 2442         struct kqueue *kq = kn->kn_kq;
 2443 
 2444         KQ_OWNED(kn->kn_kq);
 2445         KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
 2446 
 2447         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 2448         kn->kn_status |= KN_QUEUED;
 2449         kq->kq_count++;
 2450         kqueue_wakeup(kq);
 2451 }
 2452 
 2453 static void
 2454 knote_dequeue(struct knote *kn)
 2455 {
 2456         struct kqueue *kq = kn->kn_kq;
 2457 
 2458         KQ_OWNED(kn->kn_kq);
 2459         KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
 2460 
 2461         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 2462         kn->kn_status &= ~KN_QUEUED;
 2463         kq->kq_count--;
 2464 }
 2465 
 2466 static void
 2467 knote_init(void)
 2468 {
 2469 
 2470         knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
 2471             NULL, NULL, UMA_ALIGN_PTR, 0);
 2472 }
 2473 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
 2474 
 2475 static struct knote *
 2476 knote_alloc(int waitok)
 2477 {
 2478 
 2479         return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) |
 2480             M_ZERO));
 2481 }
 2482 
 2483 static void
 2484 knote_free(struct knote *kn)
 2485 {
 2486 
 2487         uma_zfree(knote_zone, kn);
 2488 }
 2489 
 2490 /*
 2491  * Register the kev w/ the kq specified by fd.
 2492  */
 2493 int 
 2494 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
 2495 {
 2496         struct kqueue *kq;
 2497         struct file *fp;
 2498         cap_rights_t rights;
 2499         int error;
 2500 
 2501         error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp);
 2502         if (error != 0)
 2503                 return (error);
 2504         if ((error = kqueue_acquire(fp, &kq)) != 0)
 2505                 goto noacquire;
 2506 
 2507         error = kqueue_register(kq, kev, td, waitok);
 2508 
 2509         kqueue_release(kq, 0);
 2510 
 2511 noacquire:
 2512         fdrop(fp, td);
 2513 
 2514         return error;
 2515 }

Cache object: 2d6cba00eb22f14cb82121ae38bb4746


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.