The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_event.c,v 1.22 2005/02/26 21:34:55 perry Exp $    */
    2 /*-
    3  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  * $FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp $
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.22 2005/02/26 21:34:55 perry Exp $");
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/proc.h>
   37 #include <sys/malloc.h>
   38 #include <sys/unistd.h>
   39 #include <sys/file.h>
   40 #include <sys/fcntl.h>
   41 #include <sys/select.h>
   42 #include <sys/queue.h>
   43 #include <sys/event.h>
   44 #include <sys/eventvar.h>
   45 #include <sys/poll.h>
   46 #include <sys/pool.h>
   47 #include <sys/protosw.h>
   48 #include <sys/socket.h>
   49 #include <sys/socketvar.h>
   50 #include <sys/stat.h>
   51 #include <sys/uio.h>
   52 #include <sys/mount.h>
   53 #include <sys/filedesc.h>
   54 #include <sys/sa.h>
   55 #include <sys/syscallargs.h>
   56 
   57 static int      kqueue_scan(struct file *fp, size_t maxevents,
   58                     struct kevent *ulistp, const struct timespec *timeout,
   59                     struct proc *p, register_t *retval);
   60 static void     kqueue_wakeup(struct kqueue *kq);
   61 
   62 static int      kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
   63                     struct ucred *cred, int flags);
   64 static int      kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
   65                     struct ucred *cred, int flags);
   66 static int      kqueue_ioctl(struct file *fp, u_long com, void *data,
   67                     struct proc *p);
   68 static int      kqueue_fcntl(struct file *fp, u_int com, void *data,
   69                     struct proc *p);
   70 static int      kqueue_poll(struct file *fp, int events, struct proc *p);
   71 static int      kqueue_kqfilter(struct file *fp, struct knote *kn);
   72 static int      kqueue_stat(struct file *fp, struct stat *sp, struct proc *p);
   73 static int      kqueue_close(struct file *fp, struct proc *p);
   74 
   75 static const struct fileops kqueueops = {
   76         kqueue_read, kqueue_write, kqueue_ioctl, kqueue_fcntl, kqueue_poll,
   77         kqueue_stat, kqueue_close, kqueue_kqfilter
   78 };
   79 
   80 static void     knote_attach(struct knote *kn, struct filedesc *fdp);
   81 static void     knote_drop(struct knote *kn, struct proc *p,
   82                     struct filedesc *fdp);
   83 static void     knote_enqueue(struct knote *kn);
   84 static void     knote_dequeue(struct knote *kn);
   85 
   86 static void     filt_kqdetach(struct knote *kn);
   87 static int      filt_kqueue(struct knote *kn, long hint);
   88 static int      filt_procattach(struct knote *kn);
   89 static void     filt_procdetach(struct knote *kn);
   90 static int      filt_proc(struct knote *kn, long hint);
   91 static int      filt_fileattach(struct knote *kn);
   92 static void     filt_timerexpire(void *knx);
   93 static int      filt_timerattach(struct knote *kn);
   94 static void     filt_timerdetach(struct knote *kn);
   95 static int      filt_timer(struct knote *kn, long hint);
   96 
   97 static const struct filterops kqread_filtops =
   98         { 1, NULL, filt_kqdetach, filt_kqueue };
   99 static const struct filterops proc_filtops =
  100         { 0, filt_procattach, filt_procdetach, filt_proc };
  101 static const struct filterops file_filtops =
  102         { 1, filt_fileattach, NULL, NULL };
  103 static struct filterops timer_filtops =
  104         { 0, filt_timerattach, filt_timerdetach, filt_timer };
  105 
  106 POOL_INIT(kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqueuepl", NULL);
  107 POOL_INIT(knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl", NULL);
  108 static int      kq_ncallouts = 0;
  109 static int      kq_calloutmax = (4 * 1024);
  110 
  111 MALLOC_DEFINE(M_KEVENT, "kevent", "kevents/knotes");
  112 
  113 #define KNOTE_ACTIVATE(kn)                                              \
  114 do {                                                                    \
  115         kn->kn_status |= KN_ACTIVE;                                     \
  116         if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)           \
  117                 knote_enqueue(kn);                                      \
  118 } while(0)
  119 
  120 #define KN_HASHSIZE             64              /* XXX should be tunable */
  121 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  122 
  123 extern const struct filterops sig_filtops;
  124 
  125 /*
  126  * Table for for all system-defined filters.
  127  * These should be listed in the numeric order of the EVFILT_* defines.
  128  * If filtops is NULL, the filter isn't implemented in NetBSD.
  129  * End of list is when name is NULL.
  130  */
  131 struct kfilter {
  132         const char       *name;         /* name of filter */
  133         uint32_t          filter;       /* id of filter */
  134         const struct filterops *filtops;/* operations for filter */
  135 };
  136 
  137                 /* System defined filters */
  138 static const struct kfilter sys_kfilters[] = {
  139         { "EVFILT_READ",        EVFILT_READ,    &file_filtops },
  140         { "EVFILT_WRITE",       EVFILT_WRITE,   &file_filtops },
  141         { "EVFILT_AIO",         EVFILT_AIO,     NULL },
  142         { "EVFILT_VNODE",       EVFILT_VNODE,   &file_filtops },
  143         { "EVFILT_PROC",        EVFILT_PROC,    &proc_filtops },
  144         { "EVFILT_SIGNAL",      EVFILT_SIGNAL,  &sig_filtops },
  145         { "EVFILT_TIMER",       EVFILT_TIMER,   &timer_filtops },
  146         { NULL,                 0,              NULL }, /* end of list */
  147 };
  148 
  149                 /* User defined kfilters */
  150 static struct kfilter   *user_kfilters;         /* array */
  151 static int              user_kfilterc;          /* current offset */
  152 static int              user_kfiltermaxc;       /* max size so far */
  153 
  154 /*
  155  * Find kfilter entry by name, or NULL if not found.
  156  */
  157 static const struct kfilter *
  158 kfilter_byname_sys(const char *name)
  159 {
  160         int i;
  161 
  162         for (i = 0; sys_kfilters[i].name != NULL; i++) {
  163                 if (strcmp(name, sys_kfilters[i].name) == 0)
  164                         return (&sys_kfilters[i]);
  165         }
  166         return (NULL);
  167 }
  168 
  169 static struct kfilter *
  170 kfilter_byname_user(const char *name)
  171 {
  172         int i;
  173 
  174         /* user_kfilters[] could be NULL if no filters were registered */
  175         if (!user_kfilters)
  176                 return (NULL);
  177 
  178         for (i = 0; user_kfilters[i].name != NULL; i++) {
  179                 if (user_kfilters[i].name != '\0' &&
  180                     strcmp(name, user_kfilters[i].name) == 0)
  181                         return (&user_kfilters[i]);
  182         }
  183         return (NULL);
  184 }
  185 
  186 static const struct kfilter *
  187 kfilter_byname(const char *name)
  188 {
  189         const struct kfilter *kfilter;
  190 
  191         if ((kfilter = kfilter_byname_sys(name)) != NULL)
  192                 return (kfilter);
  193 
  194         return (kfilter_byname_user(name));
  195 }
  196 
  197 /*
  198  * Find kfilter entry by filter id, or NULL if not found.
  199  * Assumes entries are indexed in filter id order, for speed.
  200  */
  201 static const struct kfilter *
  202 kfilter_byfilter(uint32_t filter)
  203 {
  204         const struct kfilter *kfilter;
  205 
  206         if (filter < EVFILT_SYSCOUNT)   /* it's a system filter */
  207                 kfilter = &sys_kfilters[filter];
  208         else if (user_kfilters != NULL &&
  209             filter < EVFILT_SYSCOUNT + user_kfilterc)
  210                                         /* it's a user filter */
  211                 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT];
  212         else
  213                 return (NULL);          /* out of range */
  214         KASSERT(kfilter->filter == filter);     /* sanity check! */
  215         return (kfilter);
  216 }
  217 
  218 /*
  219  * Register a new kfilter. Stores the entry in user_kfilters.
  220  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
  221  * If retfilter != NULL, the new filterid is returned in it.
  222  */
  223 int
  224 kfilter_register(const char *name, const struct filterops *filtops,
  225     int *retfilter)
  226 {
  227         struct kfilter *kfilter;
  228         void *space;
  229         int len;
  230 
  231         if (name == NULL || name[0] == '\0' || filtops == NULL)
  232                 return (EINVAL);        /* invalid args */
  233         if (kfilter_byname(name) != NULL)
  234                 return (EEXIST);        /* already exists */
  235         if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT)
  236                 return (EINVAL);        /* too many */
  237 
  238         /* check if need to grow user_kfilters */
  239         if (user_kfilterc + 1 > user_kfiltermaxc) {
  240                 /*
  241                  * Grow in KFILTER_EXTENT chunks. Use malloc(9), because we
  242                  * want to traverse user_kfilters as an array.
  243                  */
  244                 user_kfiltermaxc += KFILTER_EXTENT;
  245                 kfilter = malloc(user_kfiltermaxc * sizeof(struct filter *),
  246                     M_KEVENT, M_WAITOK);
  247 
  248                 /* copy existing user_kfilters */
  249                 if (user_kfilters != NULL)
  250                         memcpy((caddr_t)kfilter, (caddr_t)user_kfilters,
  251                             user_kfilterc * sizeof(struct kfilter *));
  252                                         /* zero new sections */
  253                 memset((caddr_t)kfilter +
  254                     user_kfilterc * sizeof(struct kfilter *), 0,
  255                     (user_kfiltermaxc - user_kfilterc) *
  256                     sizeof(struct kfilter *));
  257                                         /* switch to new kfilter */
  258                 if (user_kfilters != NULL)
  259                         free(user_kfilters, M_KEVENT);
  260                 user_kfilters = kfilter;
  261         }
  262         len = strlen(name) + 1;         /* copy name */
  263         space = malloc(len, M_KEVENT, M_WAITOK);
  264         memcpy(space, name, len);
  265         user_kfilters[user_kfilterc].name = space;
  266 
  267         user_kfilters[user_kfilterc].filter = user_kfilterc + EVFILT_SYSCOUNT;
  268 
  269         len = sizeof(struct filterops); /* copy filtops */
  270         space = malloc(len, M_KEVENT, M_WAITOK);
  271         memcpy(space, filtops, len);
  272         user_kfilters[user_kfilterc].filtops = space;
  273 
  274         if (retfilter != NULL)
  275                 *retfilter = user_kfilters[user_kfilterc].filter;
  276         user_kfilterc++;                /* finally, increment count */
  277         return (0);
  278 }
  279 
  280 /*
  281  * Unregister a kfilter previously registered with kfilter_register.
  282  * This retains the filter id, but clears the name and frees filtops (filter
  283  * operations), so that the number isn't reused during a boot.
  284  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
  285  */
  286 int
  287 kfilter_unregister(const char *name)
  288 {
  289         struct kfilter *kfilter;
  290 
  291         if (name == NULL || name[0] == '\0')
  292                 return (EINVAL);        /* invalid name */
  293 
  294         if (kfilter_byname_sys(name) != NULL)
  295                 return (EINVAL);        /* can't detach system filters */
  296 
  297         kfilter = kfilter_byname_user(name);
  298         if (kfilter == NULL)            /* not found */
  299                 return (ENOENT);
  300 
  301         if (kfilter->name[0] != '\0') {
  302                 /* XXX Cast away const (but we know it's safe. */
  303                 free((void *) kfilter->name, M_KEVENT);
  304                 kfilter->name = "";     /* mark as `not implemented' */
  305         }
  306         if (kfilter->filtops != NULL) {
  307                 /* XXX Cast away const (but we know it's safe. */
  308                 free((void *) kfilter->filtops, M_KEVENT);
  309                 kfilter->filtops = NULL; /* mark as `not implemented' */
  310         }
  311         return (0);
  312 }
  313 
  314 
  315 /*
  316  * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file
  317  * descriptors. Calls struct fileops kqfilter method for given file descriptor.
  318  */
  319 static int
  320 filt_fileattach(struct knote *kn)
  321 {
  322         struct file *fp;
  323 
  324         fp = kn->kn_fp;
  325         return ((*fp->f_ops->fo_kqfilter)(fp, kn));
  326 }
  327 
  328 /*
  329  * Filter detach method for EVFILT_READ on kqueue descriptor.
  330  */
  331 static void
  332 filt_kqdetach(struct knote *kn)
  333 {
  334         struct kqueue *kq;
  335 
  336         kq = (struct kqueue *)kn->kn_fp->f_data;
  337         SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext);
  338 }
  339 
  340 /*
  341  * Filter event method for EVFILT_READ on kqueue descriptor.
  342  */
  343 /*ARGSUSED*/
  344 static int
  345 filt_kqueue(struct knote *kn, long hint)
  346 {
  347         struct kqueue *kq;
  348 
  349         kq = (struct kqueue *)kn->kn_fp->f_data;
  350         kn->kn_data = kq->kq_count;
  351         return (kn->kn_data > 0);
  352 }
  353 
  354 /*
  355  * Filter attach method for EVFILT_PROC.
  356  */
  357 static int
  358 filt_procattach(struct knote *kn)
  359 {
  360         struct proc *p;
  361 
  362         p = pfind(kn->kn_id);
  363         if (p == NULL)
  364                 return (ESRCH);
  365 
  366         /*
  367          * Fail if it's not owned by you, or the last exec gave us
  368          * setuid/setgid privs (unless you're root).
  369          */
  370         if ((p->p_cred->p_ruid != curproc->p_cred->p_ruid ||
  371                 (p->p_flag & P_SUGID))
  372             && suser(curproc->p_ucred, &curproc->p_acflag) != 0)
  373                 return (EACCES);
  374 
  375         kn->kn_ptr.p_proc = p;
  376         kn->kn_flags |= EV_CLEAR;       /* automatically set */
  377 
  378         /*
  379          * internal flag indicating registration done by kernel
  380          */
  381         if (kn->kn_flags & EV_FLAG1) {
  382                 kn->kn_data = kn->kn_sdata;     /* ppid */
  383                 kn->kn_fflags = NOTE_CHILD;
  384                 kn->kn_flags &= ~EV_FLAG1;
  385         }
  386 
  387         /* XXXSMP lock the process? */
  388         SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
  389 
  390         return (0);
  391 }
  392 
  393 /*
  394  * Filter detach method for EVFILT_PROC.
  395  *
  396  * The knote may be attached to a different process, which may exit,
  397  * leaving nothing for the knote to be attached to.  So when the process
  398  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  399  * it will be deleted when read out.  However, as part of the knote deletion,
  400  * this routine is called, so a check is needed to avoid actually performing
  401  * a detach, because the original process might not exist any more.
  402  */
  403 static void
  404 filt_procdetach(struct knote *kn)
  405 {
  406         struct proc *p;
  407 
  408         if (kn->kn_status & KN_DETACHED)
  409                 return;
  410 
  411         p = kn->kn_ptr.p_proc;
  412         KASSERT(p->p_stat == SZOMB || pfind(kn->kn_id) == p);
  413 
  414         /* XXXSMP lock the process? */
  415         SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
  416 }
  417 
  418 /*
  419  * Filter event method for EVFILT_PROC.
  420  */
  421 static int
  422 filt_proc(struct knote *kn, long hint)
  423 {
  424         u_int event;
  425 
  426         /*
  427          * mask off extra data
  428          */
  429         event = (u_int)hint & NOTE_PCTRLMASK;
  430 
  431         /*
  432          * if the user is interested in this event, record it.
  433          */
  434         if (kn->kn_sfflags & event)
  435                 kn->kn_fflags |= event;
  436 
  437         /*
  438          * process is gone, so flag the event as finished.
  439          */
  440         if (event == NOTE_EXIT) {
  441                 /*
  442                  * Detach the knote from watched process and mark
  443                  * it as such. We can't leave this to kqueue_scan(),
  444                  * since the process might not exist by then. And we
  445                  * have to do this now, since psignal KNOTE() is called
  446                  * also for zombies and we might end up reading freed
  447                  * memory if the kevent would already be picked up
  448                  * and knote g/c'ed.
  449                  */
  450                 kn->kn_fop->f_detach(kn);
  451                 kn->kn_status |= KN_DETACHED;
  452 
  453                 /* Mark as ONESHOT, so that the knote it g/c'ed when read */
  454                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
  455                 return (1);
  456         }
  457 
  458         /*
  459          * process forked, and user wants to track the new process,
  460          * so attach a new knote to it, and immediately report an
  461          * event with the parent's pid.
  462          */
  463         if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
  464                 struct kevent kev;
  465                 int error;
  466 
  467                 /*
  468                  * register knote with new process.
  469                  */
  470                 kev.ident = hint & NOTE_PDATAMASK;      /* pid */
  471                 kev.filter = kn->kn_filter;
  472                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  473                 kev.fflags = kn->kn_sfflags;
  474                 kev.data = kn->kn_id;                   /* parent */
  475                 kev.udata = kn->kn_kevent.udata;        /* preserve udata */
  476                 error = kqueue_register(kn->kn_kq, &kev, NULL);
  477                 if (error)
  478                         kn->kn_fflags |= NOTE_TRACKERR;
  479         }
  480 
  481         return (kn->kn_fflags != 0);
  482 }
  483 
  484 static void
  485 filt_timerexpire(void *knx)
  486 {
  487         struct knote *kn = knx;
  488         int tticks;
  489 
  490         kn->kn_data++;
  491         KNOTE_ACTIVATE(kn);
  492 
  493         if ((kn->kn_flags & EV_ONESHOT) == 0) {
  494                 tticks = mstohz(kn->kn_sdata);
  495                 callout_schedule((struct callout *)kn->kn_hook, tticks);
  496         }
  497 }
  498 
  499 /*
  500  * data contains amount of time to sleep, in milliseconds
  501  */
  502 static int
  503 filt_timerattach(struct knote *kn)
  504 {
  505         struct callout *calloutp;
  506         int tticks;
  507 
  508         if (kq_ncallouts >= kq_calloutmax)
  509                 return (ENOMEM);
  510         kq_ncallouts++;
  511 
  512         tticks = mstohz(kn->kn_sdata);
  513 
  514         /* if the supplied value is under our resolution, use 1 tick */
  515         if (tticks == 0) {
  516                 if (kn->kn_sdata == 0)
  517                         return (EINVAL);
  518                 tticks = 1;
  519         }
  520 
  521         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  522         MALLOC(calloutp, struct callout *, sizeof(*calloutp),
  523             M_KEVENT, 0);
  524         callout_init(calloutp);
  525         callout_reset(calloutp, tticks, filt_timerexpire, kn);
  526         kn->kn_hook = calloutp;
  527 
  528         return (0);
  529 }
  530 
  531 static void
  532 filt_timerdetach(struct knote *kn)
  533 {
  534         struct callout *calloutp;
  535 
  536         calloutp = (struct callout *)kn->kn_hook;
  537         callout_stop(calloutp);
  538         FREE(calloutp, M_KEVENT);
  539         kq_ncallouts--;
  540 }
  541 
  542 static int
  543 filt_timer(struct knote *kn, long hint)
  544 {
  545         return (kn->kn_data != 0);
  546 }
  547 
  548 /*
  549  * filt_seltrue:
  550  *
  551  *      This filter "event" routine simulates seltrue().
  552  */
  553 int
  554 filt_seltrue(struct knote *kn, long hint)
  555 {
  556 
  557         /*
  558          * We don't know how much data can be read/written,
  559          * but we know that it *can* be.  This is about as
  560          * good as select/poll does as well.
  561          */
  562         kn->kn_data = 0;
  563         return (1);
  564 }
  565 
  566 /*
  567  * This provides full kqfilter entry for device switch tables, which
  568  * has same effect as filter using filt_seltrue() as filter method.
  569  */
  570 static void
  571 filt_seltruedetach(struct knote *kn)
  572 {
  573         /* Nothing to do */
  574 }
  575 
  576 static const struct filterops seltrue_filtops =
  577         { 1, NULL, filt_seltruedetach, filt_seltrue };
  578 
  579 int
  580 seltrue_kqfilter(dev_t dev, struct knote *kn)
  581 {
  582         switch (kn->kn_filter) {
  583         case EVFILT_READ:
  584         case EVFILT_WRITE:
  585                 kn->kn_fop = &seltrue_filtops;
  586                 break;
  587         default:
  588                 return (1);
  589         }
  590 
  591         /* Nothing more to do */
  592         return (0);
  593 }
  594 
  595 /*
  596  * kqueue(2) system call.
  597  */
  598 int
  599 sys_kqueue(struct lwp *l, void *v, register_t *retval)
  600 {
  601         struct filedesc *fdp;
  602         struct kqueue   *kq;
  603         struct file     *fp;
  604         struct proc     *p;
  605         int             fd, error;
  606 
  607         p = l->l_proc;
  608         fdp = p->p_fd;
  609         error = falloc(p, &fp, &fd);    /* setup a new file descriptor */
  610         if (error)
  611                 return (error);
  612         fp->f_flag = FREAD | FWRITE;
  613         fp->f_type = DTYPE_KQUEUE;
  614         fp->f_ops = &kqueueops;
  615         kq = pool_get(&kqueue_pool, PR_WAITOK);
  616         memset((char *)kq, 0, sizeof(struct kqueue));
  617         simple_lock_init(&kq->kq_lock);
  618         TAILQ_INIT(&kq->kq_head);
  619         fp->f_data = (caddr_t)kq;       /* store the kqueue with the fp */
  620         *retval = fd;
  621         if (fdp->fd_knlistsize < 0)
  622                 fdp->fd_knlistsize = 0; /* this process has a kq */
  623         kq->kq_fdp = fdp;
  624         FILE_SET_MATURE(fp);
  625         FILE_UNUSE(fp, p);              /* falloc() does FILE_USE() */
  626         return (error);
  627 }
  628 
  629 /*
  630  * kevent(2) system call.
  631  */
  632 int
  633 sys_kevent(struct lwp *l, void *v, register_t *retval)
  634 {
  635         struct sys_kevent_args /* {
  636                 syscallarg(int) fd;
  637                 syscallarg(const struct kevent *) changelist;
  638                 syscallarg(size_t) nchanges;
  639                 syscallarg(struct kevent *) eventlist;
  640                 syscallarg(size_t) nevents;
  641                 syscallarg(const struct timespec *) timeout;
  642         } */ *uap = v;
  643         struct kevent   *kevp;
  644         struct kqueue   *kq;
  645         struct file     *fp;
  646         struct timespec ts;
  647         struct proc     *p;
  648         size_t          i, n;
  649         int             nerrors, error;
  650 
  651         p = l->l_proc;
  652         /* check that we're dealing with a kq */
  653         fp = fd_getfile(p->p_fd, SCARG(uap, fd));
  654         if (fp == NULL)
  655                 return (EBADF);
  656 
  657         if (fp->f_type != DTYPE_KQUEUE) {
  658                 simple_unlock(&fp->f_slock);
  659                 return (EBADF);
  660         }
  661 
  662         FILE_USE(fp);
  663 
  664         if (SCARG(uap, timeout) != NULL) {
  665                 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
  666                 if (error)
  667                         goto done;
  668                 SCARG(uap, timeout) = &ts;
  669         }
  670 
  671         kq = (struct kqueue *)fp->f_data;
  672         nerrors = 0;
  673 
  674         /* traverse list of events to register */
  675         while (SCARG(uap, nchanges) > 0) {
  676                 /* copyin a maximum of KQ_EVENTS at each pass */
  677                 n = MIN(SCARG(uap, nchanges), KQ_NEVENTS);
  678                 error = copyin(SCARG(uap, changelist), kq->kq_kev,
  679                     n * sizeof(struct kevent));
  680                 if (error)
  681                         goto done;
  682                 for (i = 0; i < n; i++) {
  683                         kevp = &kq->kq_kev[i];
  684                         kevp->flags &= ~EV_SYSFLAGS;
  685                         /* register each knote */
  686                         error = kqueue_register(kq, kevp, p);
  687                         if (error) {
  688                                 if (SCARG(uap, nevents) != 0) {
  689                                         kevp->flags = EV_ERROR;
  690                                         kevp->data = error;
  691                                         error = copyout((caddr_t)kevp,
  692                                             (caddr_t)SCARG(uap, eventlist),
  693                                             sizeof(*kevp));
  694                                         if (error)
  695                                                 goto done;
  696                                         SCARG(uap, eventlist)++;
  697                                         SCARG(uap, nevents)--;
  698                                         nerrors++;
  699                                 } else {
  700                                         goto done;
  701                                 }
  702                         }
  703                 }
  704                 SCARG(uap, nchanges) -= n;      /* update the results */
  705                 SCARG(uap, changelist) += n;
  706         }
  707         if (nerrors) {
  708                 *retval = nerrors;
  709                 error = 0;
  710                 goto done;
  711         }
  712 
  713         /* actually scan through the events */
  714         error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist),
  715             SCARG(uap, timeout), p, retval);
  716  done:
  717         FILE_UNUSE(fp, p);
  718         return (error);
  719 }
  720 
  721 /*
  722  * Register a given kevent kev onto the kqueue
  723  */
  724 int
  725 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
  726 {
  727         const struct kfilter *kfilter;
  728         struct filedesc *fdp;
  729         struct file     *fp;
  730         struct knote    *kn;
  731         int             s, error;
  732 
  733         fdp = kq->kq_fdp;
  734         fp = NULL;
  735         kn = NULL;
  736         error = 0;
  737         kfilter = kfilter_byfilter(kev->filter);
  738         if (kfilter == NULL || kfilter->filtops == NULL) {
  739                 /* filter not found nor implemented */
  740                 return (EINVAL);
  741         }
  742 
  743         /* search if knote already exists */
  744         if (kfilter->filtops->f_isfd) {
  745                 /* monitoring a file descriptor */
  746                 if ((fp = fd_getfile(fdp, kev->ident)) == NULL)
  747                         return (EBADF); /* validate descriptor */
  748                 FILE_USE(fp);
  749 
  750                 if (kev->ident < fdp->fd_knlistsize) {
  751                         SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
  752                                 if (kq == kn->kn_kq &&
  753                                     kev->filter == kn->kn_filter)
  754                                         break;
  755                 }
  756         } else {
  757                 /*
  758                  * not monitoring a file descriptor, so
  759                  * lookup knotes in internal hash table
  760                  */
  761                 if (fdp->fd_knhashmask != 0) {
  762                         struct klist *list;
  763 
  764                         list = &fdp->fd_knhash[
  765                             KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
  766                         SLIST_FOREACH(kn, list, kn_link)
  767                                 if (kev->ident == kn->kn_id &&
  768                                     kq == kn->kn_kq &&
  769                                     kev->filter == kn->kn_filter)
  770                                         break;
  771                 }
  772         }
  773 
  774         if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
  775                 error = ENOENT;         /* filter not found */
  776                 goto done;
  777         }
  778 
  779         /*
  780          * kn now contains the matching knote, or NULL if no match
  781          */
  782         if (kev->flags & EV_ADD) {
  783                 /* add knote */
  784 
  785                 if (kn == NULL) {
  786                         /* create new knote */
  787                         kn = pool_get(&knote_pool, PR_WAITOK);
  788                         if (kn == NULL) {
  789                                 error = ENOMEM;
  790                                 goto done;
  791                         }
  792                         kn->kn_fp = fp;
  793                         kn->kn_kq = kq;
  794                         kn->kn_fop = kfilter->filtops;
  795 
  796                         /*
  797                          * apply reference count to knote structure, and
  798                          * do not release it at the end of this routine.
  799                          */
  800                         fp = NULL;
  801 
  802                         kn->kn_sfflags = kev->fflags;
  803                         kn->kn_sdata = kev->data;
  804                         kev->fflags = 0;
  805                         kev->data = 0;
  806                         kn->kn_kevent = *kev;
  807 
  808                         knote_attach(kn, fdp);
  809                         if ((error = kfilter->filtops->f_attach(kn)) != 0) {
  810                                 knote_drop(kn, p, fdp);
  811                                 goto done;
  812                         }
  813                 } else {
  814                         /* modify existing knote */
  815 
  816                         /*
  817                          * The user may change some filter values after the
  818                          * initial EV_ADD, but doing so will not reset any
  819                          * filter which have already been triggered.
  820                          */
  821                         kn->kn_sfflags = kev->fflags;
  822                         kn->kn_sdata = kev->data;
  823                         kn->kn_kevent.udata = kev->udata;
  824                 }
  825 
  826                 s = splsched();
  827                 if (kn->kn_fop->f_event(kn, 0))
  828                         KNOTE_ACTIVATE(kn);
  829                 splx(s);
  830 
  831         } else if (kev->flags & EV_DELETE) {    /* delete knote */
  832                 kn->kn_fop->f_detach(kn);
  833                 knote_drop(kn, p, fdp);
  834                 goto done;
  835         }
  836 
  837         /* disable knote */
  838         if ((kev->flags & EV_DISABLE) &&
  839             ((kn->kn_status & KN_DISABLED) == 0)) {
  840                 s = splsched();
  841                 kn->kn_status |= KN_DISABLED;
  842                 splx(s);
  843         }
  844 
  845         /* enable knote */
  846         if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
  847                 s = splsched();
  848                 kn->kn_status &= ~KN_DISABLED;
  849                 if ((kn->kn_status & KN_ACTIVE) &&
  850                     ((kn->kn_status & KN_QUEUED) == 0))
  851                         knote_enqueue(kn);
  852                 splx(s);
  853         }
  854 
  855  done:
  856         if (fp != NULL)
  857                 FILE_UNUSE(fp, p);
  858         return (error);
  859 }
  860 
  861 /*
  862  * Scan through the list of events on fp (for a maximum of maxevents),
  863  * returning the results in to ulistp. Timeout is determined by tsp; if
  864  * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait
  865  * as appropriate.
  866  */
  867 static int
  868 kqueue_scan(struct file *fp, size_t maxevents, struct kevent *ulistp,
  869         const struct timespec *tsp, struct proc *p, register_t *retval)
  870 {
  871         struct kqueue   *kq;
  872         struct kevent   *kevp;
  873         struct timeval  atv;
  874         struct knote    *kn, *marker=NULL;
  875         size_t          count, nkev;
  876         int             s, timeout, error;
  877 
  878         kq = (struct kqueue *)fp->f_data;
  879         count = maxevents;
  880         nkev = error = 0;
  881         if (count == 0)
  882                 goto done;
  883 
  884         if (tsp) {                              /* timeout supplied */
  885                 TIMESPEC_TO_TIMEVAL(&atv, tsp);
  886                 if (itimerfix(&atv)) {
  887                         error = EINVAL;
  888                         goto done;
  889                 }
  890                 s = splclock();
  891                 timeradd(&atv, &time, &atv);    /* calc. time to wait until */
  892                 splx(s);
  893                 timeout = hzto(&atv);
  894                 if (timeout <= 0)
  895                         timeout = -1;           /* do poll */
  896         } else {
  897                 /* no timeout, wait forever */
  898                 timeout = 0;
  899         }
  900 
  901         MALLOC(marker, struct knote *, sizeof(*marker), M_KEVENT, M_WAITOK);
  902         memset(marker, 0, sizeof(*marker));
  903 
  904         goto start;
  905 
  906  retry:
  907         if (tsp) {
  908                 /*
  909                  * We have to recalculate the timeout on every retry.
  910                  */
  911                 timeout = hzto(&atv);
  912                 if (timeout <= 0)
  913                         goto done;
  914         }
  915 
  916  start:
  917         kevp = kq->kq_kev;
  918         s = splsched();
  919         simple_lock(&kq->kq_lock);
  920         if (kq->kq_count == 0) {
  921                 if (timeout < 0) {
  922                         error = EWOULDBLOCK;
  923                         simple_unlock(&kq->kq_lock);
  924                 } else {
  925                         kq->kq_state |= KQ_SLEEP;
  926                         error = ltsleep(kq, PSOCK | PCATCH | PNORELOCK,
  927                                         "kqread", timeout, &kq->kq_lock);
  928                 }
  929                 splx(s);
  930                 if (error == 0)
  931                         goto retry;
  932                 /* don't restart after signals... */
  933                 if (error == ERESTART)
  934                         error = EINTR;
  935                 else if (error == EWOULDBLOCK)
  936                         error = 0;
  937                 goto done;
  938         }
  939 
  940         /* mark end of knote list */
  941         TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
  942         simple_unlock(&kq->kq_lock);
  943 
  944         while (count) {                         /* while user wants data ... */
  945                 simple_lock(&kq->kq_lock);
  946                 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */
  947                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
  948                 if (kn == marker) {             /* if it's our marker, stop */
  949                         /* What if it's some else's marker? */
  950                         simple_unlock(&kq->kq_lock);
  951                         splx(s);
  952                         if (count == maxevents)
  953                                 goto retry;
  954                         goto done;
  955                 }
  956                 kq->kq_count--;
  957                 simple_unlock(&kq->kq_lock);
  958 
  959                 if (kn->kn_status & KN_DISABLED) {
  960                         /* don't want disabled events */
  961                         kn->kn_status &= ~KN_QUEUED;
  962                         continue;
  963                 }
  964                 if ((kn->kn_flags & EV_ONESHOT) == 0 &&
  965                     kn->kn_fop->f_event(kn, 0) == 0) {
  966                         /*
  967                          * non-ONESHOT event that hasn't
  968                          * triggered again, so de-queue.
  969                          */
  970                         kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
  971                         continue;
  972                 }
  973                 *kevp = kn->kn_kevent;
  974                 kevp++;
  975                 nkev++;
  976                 if (kn->kn_flags & EV_ONESHOT) {
  977                         /* delete ONESHOT events after retrieval */
  978                         kn->kn_status &= ~KN_QUEUED;
  979                         splx(s);
  980                         kn->kn_fop->f_detach(kn);
  981                         knote_drop(kn, p, p->p_fd);
  982                         s = splsched();
  983                 } else if (kn->kn_flags & EV_CLEAR) {
  984                         /* clear state after retrieval */
  985                         kn->kn_data = 0;
  986                         kn->kn_fflags = 0;
  987                         kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
  988                 } else {
  989                         /* add event back on list */
  990                         simple_lock(&kq->kq_lock);
  991                         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
  992                         kq->kq_count++;
  993                         simple_unlock(&kq->kq_lock);
  994                 }
  995                 count--;
  996                 if (nkev == KQ_NEVENTS) {
  997                         /* do copyouts in KQ_NEVENTS chunks */
  998                         splx(s);
  999                         error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
 1000                             sizeof(struct kevent) * nkev);
 1001                         ulistp += nkev;
 1002                         nkev = 0;
 1003                         kevp = kq->kq_kev;
 1004                         s = splsched();
 1005                         if (error)
 1006                                 break;
 1007                 }
 1008         }
 1009 
 1010         /* remove marker */
 1011         simple_lock(&kq->kq_lock);
 1012         TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
 1013         simple_unlock(&kq->kq_lock);
 1014         splx(s);
 1015  done:
 1016         if (marker)
 1017                 FREE(marker, M_KEVENT);
 1018 
 1019         if (nkev != 0) {
 1020                 /* copyout remaining events */
 1021                 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
 1022                     sizeof(struct kevent) * nkev);
 1023         }
 1024         *retval = maxevents - count;
 1025 
 1026         return (error);
 1027 }
 1028 
 1029 /*
 1030  * struct fileops read method for a kqueue descriptor.
 1031  * Not implemented.
 1032  * XXX: This could be expanded to call kqueue_scan, if desired.
 1033  */
 1034 /*ARGSUSED*/
 1035 static int
 1036 kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
 1037         struct ucred *cred, int flags)
 1038 {
 1039 
 1040         return (ENXIO);
 1041 }
 1042 
 1043 /*
 1044  * struct fileops write method for a kqueue descriptor.
 1045  * Not implemented.
 1046  */
 1047 /*ARGSUSED*/
 1048 static int
 1049 kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
 1050         struct ucred *cred, int flags)
 1051 {
 1052 
 1053         return (ENXIO);
 1054 }
 1055 
 1056 /*
 1057  * struct fileops ioctl method for a kqueue descriptor.
 1058  *
 1059  * Two ioctls are currently supported. They both use struct kfilter_mapping:
 1060  *      KFILTER_BYNAME          find name for filter, and return result in
 1061  *                              name, which is of size len.
 1062  *      KFILTER_BYFILTER        find filter for name. len is ignored.
 1063  */
 1064 /*ARGSUSED*/
 1065 static int
 1066 kqueue_ioctl(struct file *fp, u_long com, void *data, struct proc *p)
 1067 {
 1068         struct kfilter_mapping  *km;
 1069         const struct kfilter    *kfilter;
 1070         char                    *name;
 1071         int                     error;
 1072 
 1073         km = (struct kfilter_mapping *)data;
 1074         error = 0;
 1075 
 1076         switch (com) {
 1077         case KFILTER_BYFILTER:  /* convert filter -> name */
 1078                 kfilter = kfilter_byfilter(km->filter);
 1079                 if (kfilter != NULL)
 1080                         error = copyoutstr(kfilter->name, km->name, km->len,
 1081                             NULL);
 1082                 else
 1083                         error = ENOENT;
 1084                 break;
 1085 
 1086         case KFILTER_BYNAME:    /* convert name -> filter */
 1087                 MALLOC(name, char *, KFILTER_MAXNAME, M_KEVENT, M_WAITOK);
 1088                 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
 1089                 if (error) {
 1090                         FREE(name, M_KEVENT);
 1091                         break;
 1092                 }
 1093                 kfilter = kfilter_byname(name);
 1094                 if (kfilter != NULL)
 1095                         km->filter = kfilter->filter;
 1096                 else
 1097                         error = ENOENT;
 1098                 FREE(name, M_KEVENT);
 1099                 break;
 1100 
 1101         default:
 1102                 error = ENOTTY;
 1103 
 1104         }
 1105         return (error);
 1106 }
 1107 
 1108 /*
 1109  * struct fileops fcntl method for a kqueue descriptor.
 1110  * Not implemented.
 1111  */
 1112 /*ARGSUSED*/
 1113 static int
 1114 kqueue_fcntl(struct file *fp, u_int com, void *data, struct proc *p)
 1115 {
 1116 
 1117         return (ENOTTY);
 1118 }
 1119 
 1120 /*
 1121  * struct fileops poll method for a kqueue descriptor.
 1122  * Determine if kqueue has events pending.
 1123  */
 1124 static int
 1125 kqueue_poll(struct file *fp, int events, struct proc *p)
 1126 {
 1127         struct kqueue   *kq;
 1128         int             revents;
 1129 
 1130         kq = (struct kqueue *)fp->f_data;
 1131         revents = 0;
 1132         if (events & (POLLIN | POLLRDNORM)) {
 1133                 if (kq->kq_count) {
 1134                         revents |= events & (POLLIN | POLLRDNORM);
 1135                 } else {
 1136                         selrecord(p, &kq->kq_sel);
 1137                 }
 1138         }
 1139         return (revents);
 1140 }
 1141 
 1142 /*
 1143  * struct fileops stat method for a kqueue descriptor.
 1144  * Returns dummy info, with st_size being number of events pending.
 1145  */
 1146 static int
 1147 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
 1148 {
 1149         struct kqueue   *kq;
 1150 
 1151         kq = (struct kqueue *)fp->f_data;
 1152         memset((void *)st, 0, sizeof(*st));
 1153         st->st_size = kq->kq_count;
 1154         st->st_blksize = sizeof(struct kevent);
 1155         st->st_mode = S_IFIFO;
 1156         return (0);
 1157 }
 1158 
 1159 /*
 1160  * struct fileops close method for a kqueue descriptor.
 1161  * Cleans up kqueue.
 1162  */
 1163 static int
 1164 kqueue_close(struct file *fp, struct proc *p)
 1165 {
 1166         struct kqueue   *kq;
 1167         struct filedesc *fdp;
 1168         struct knote    **knp, *kn, *kn0;
 1169         int             i;
 1170 
 1171         kq = (struct kqueue *)fp->f_data;
 1172         fdp = p->p_fd;
 1173         for (i = 0; i < fdp->fd_knlistsize; i++) {
 1174                 knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
 1175                 kn = *knp;
 1176                 while (kn != NULL) {
 1177                         kn0 = SLIST_NEXT(kn, kn_link);
 1178                         if (kq == kn->kn_kq) {
 1179                                 kn->kn_fop->f_detach(kn);
 1180                                 FILE_UNUSE(kn->kn_fp, p);
 1181                                 pool_put(&knote_pool, kn);
 1182                                 *knp = kn0;
 1183                         } else {
 1184                                 knp = &SLIST_NEXT(kn, kn_link);
 1185                         }
 1186                         kn = kn0;
 1187                 }
 1188         }
 1189         if (fdp->fd_knhashmask != 0) {
 1190                 for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
 1191                         knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
 1192                         kn = *knp;
 1193                         while (kn != NULL) {
 1194                                 kn0 = SLIST_NEXT(kn, kn_link);
 1195                                 if (kq == kn->kn_kq) {
 1196                                         kn->kn_fop->f_detach(kn);
 1197                                         /* XXX non-fd release of kn->kn_ptr */
 1198                                         pool_put(&knote_pool, kn);
 1199                                         *knp = kn0;
 1200                                 } else {
 1201                                         knp = &SLIST_NEXT(kn, kn_link);
 1202                                 }
 1203                                 kn = kn0;
 1204                         }
 1205                 }
 1206         }
 1207         pool_put(&kqueue_pool, kq);
 1208         fp->f_data = NULL;
 1209 
 1210         return (0);
 1211 }
 1212 
 1213 /*
 1214  * wakeup a kqueue
 1215  */
 1216 static void
 1217 kqueue_wakeup(struct kqueue *kq)
 1218 {
 1219         int s;
 1220 
 1221         s = splsched();
 1222         simple_lock(&kq->kq_lock);
 1223         if (kq->kq_state & KQ_SLEEP) {          /* if currently sleeping ...  */
 1224                 kq->kq_state &= ~KQ_SLEEP;
 1225                 wakeup(kq);                     /* ... wakeup */
 1226         }
 1227 
 1228         /* Notify select/poll and kevent. */
 1229         selnotify(&kq->kq_sel, 0);
 1230         simple_unlock(&kq->kq_lock);
 1231         splx(s);
 1232 }
 1233 
 1234 /*
 1235  * struct fileops kqfilter method for a kqueue descriptor.
 1236  * Event triggered when monitored kqueue changes.
 1237  */
 1238 /*ARGSUSED*/
 1239 static int
 1240 kqueue_kqfilter(struct file *fp, struct knote *kn)
 1241 {
 1242         struct kqueue *kq;
 1243 
 1244         KASSERT(fp == kn->kn_fp);
 1245         kq = (struct kqueue *)kn->kn_fp->f_data;
 1246         if (kn->kn_filter != EVFILT_READ)
 1247                 return (1);
 1248         kn->kn_fop = &kqread_filtops;
 1249         SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext);
 1250         return (0);
 1251 }
 1252 
 1253 
 1254 /*
 1255  * Walk down a list of knotes, activating them if their event has triggered.
 1256  */
 1257 void
 1258 knote(struct klist *list, long hint)
 1259 {
 1260         struct knote *kn;
 1261 
 1262         SLIST_FOREACH(kn, list, kn_selnext)
 1263                 if (kn->kn_fop->f_event(kn, hint))
 1264                         KNOTE_ACTIVATE(kn);
 1265 }
 1266 
 1267 /*
 1268  * Remove all knotes from a specified klist
 1269  */
 1270 void
 1271 knote_remove(struct proc *p, struct klist *list)
 1272 {
 1273         struct knote *kn;
 1274 
 1275         while ((kn = SLIST_FIRST(list)) != NULL) {
 1276                 kn->kn_fop->f_detach(kn);
 1277                 knote_drop(kn, p, p->p_fd);
 1278         }
 1279 }
 1280 
 1281 /*
 1282  * Remove all knotes referencing a specified fd
 1283  */
 1284 void
 1285 knote_fdclose(struct proc *p, int fd)
 1286 {
 1287         struct filedesc *fdp;
 1288         struct klist    *list;
 1289 
 1290         fdp = p->p_fd;
 1291         list = &fdp->fd_knlist[fd];
 1292         knote_remove(p, list);
 1293 }
 1294 
 1295 /*
 1296  * Attach a new knote to a file descriptor
 1297  */
 1298 static void
 1299 knote_attach(struct knote *kn, struct filedesc *fdp)
 1300 {
 1301         struct klist    *list;
 1302         int             size;
 1303 
 1304         if (! kn->kn_fop->f_isfd) {
 1305                 /* if knote is not on an fd, store on internal hash table */
 1306                 if (fdp->fd_knhashmask == 0)
 1307                         fdp->fd_knhash = hashinit(KN_HASHSIZE, HASH_LIST,
 1308                             M_KEVENT, M_WAITOK, &fdp->fd_knhashmask);
 1309                 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
 1310                 goto done;
 1311         }
 1312 
 1313         /*
 1314          * otherwise, knote is on an fd.
 1315          * knotes are stored in fd_knlist indexed by kn->kn_id.
 1316          */
 1317         if (fdp->fd_knlistsize <= kn->kn_id) {
 1318                 /* expand list, it's too small */
 1319                 size = fdp->fd_knlistsize;
 1320                 while (size <= kn->kn_id) {
 1321                         /* grow in KQ_EXTENT chunks */
 1322                         size += KQ_EXTENT;
 1323                 }
 1324                 list = malloc(size * sizeof(struct klist *), M_KEVENT,M_WAITOK);
 1325                 if (fdp->fd_knlist) {
 1326                         /* copy existing knlist */
 1327                         memcpy((caddr_t)list, (caddr_t)fdp->fd_knlist,
 1328                             fdp->fd_knlistsize * sizeof(struct klist *));
 1329                 }
 1330                 /*
 1331                  * Zero new memory. Stylistically, SLIST_INIT() should be
 1332                  * used here, but that does same thing as the memset() anyway.
 1333                  */
 1334                 memset(&list[fdp->fd_knlistsize], 0,
 1335                     (size - fdp->fd_knlistsize) * sizeof(struct klist *));
 1336 
 1337                 /* switch to new knlist */
 1338                 if (fdp->fd_knlist != NULL)
 1339                         free(fdp->fd_knlist, M_KEVENT);
 1340                 fdp->fd_knlistsize = size;
 1341                 fdp->fd_knlist = list;
 1342         }
 1343 
 1344         /* get list head for this fd */
 1345         list = &fdp->fd_knlist[kn->kn_id];
 1346  done:
 1347         /* add new knote */
 1348         SLIST_INSERT_HEAD(list, kn, kn_link);
 1349         kn->kn_status = 0;
 1350 }
 1351 
 1352 /*
 1353  * Drop knote.
 1354  * Should be called at spl == 0, since we don't want to hold spl
 1355  * while calling FILE_UNUSE and free.
 1356  */
 1357 static void
 1358 knote_drop(struct knote *kn, struct proc *p, struct filedesc *fdp)
 1359 {
 1360         struct klist    *list;
 1361 
 1362         if (kn->kn_fop->f_isfd)
 1363                 list = &fdp->fd_knlist[kn->kn_id];
 1364         else
 1365                 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
 1366 
 1367         SLIST_REMOVE(list, kn, knote, kn_link);
 1368         if (kn->kn_status & KN_QUEUED)
 1369                 knote_dequeue(kn);
 1370         if (kn->kn_fop->f_isfd)
 1371                 FILE_UNUSE(kn->kn_fp, p);
 1372         pool_put(&knote_pool, kn);
 1373 }
 1374 
 1375 
 1376 /*
 1377  * Queue new event for knote.
 1378  */
 1379 static void
 1380 knote_enqueue(struct knote *kn)
 1381 {
 1382         struct kqueue   *kq;
 1383         int             s;
 1384 
 1385         kq = kn->kn_kq;
 1386         KASSERT((kn->kn_status & KN_QUEUED) == 0);
 1387 
 1388         s = splsched();
 1389         simple_lock(&kq->kq_lock);
 1390         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1391         kn->kn_status |= KN_QUEUED;
 1392         kq->kq_count++;
 1393         simple_unlock(&kq->kq_lock);
 1394         splx(s);
 1395         kqueue_wakeup(kq);
 1396 }
 1397 
 1398 /*
 1399  * Dequeue event for knote.
 1400  */
 1401 static void
 1402 knote_dequeue(struct knote *kn)
 1403 {
 1404         struct kqueue   *kq;
 1405         int             s;
 1406 
 1407         KASSERT(kn->kn_status & KN_QUEUED);
 1408         kq = kn->kn_kq;
 1409 
 1410         s = splsched();
 1411         simple_lock(&kq->kq_lock);
 1412         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1413         kn->kn_status &= ~KN_QUEUED;
 1414         kq->kq_count--;
 1415         simple_unlock(&kq->kq_lock);
 1416         splx(s);
 1417 }

Cache object: f0c68f147197696cc1198a3e272a8407


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.