The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: kern_event.c,v 1.194 2022/11/09 22:25:36 claudio Exp $        */
    2 
    3 /*-
    4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * $FreeBSD: src/sys/kern/kern_event.c,v 1.22 2001/02/23 20:32:42 jlemon Exp $
   29  */
   30 
   31 #include <sys/param.h>
   32 #include <sys/systm.h>
   33 #include <sys/proc.h>
   34 #include <sys/pledge.h>
   35 #include <sys/malloc.h>
   36 #include <sys/file.h>
   37 #include <sys/filedesc.h>
   38 #include <sys/fcntl.h>
   39 #include <sys/queue.h>
   40 #include <sys/event.h>
   41 #include <sys/eventvar.h>
   42 #include <sys/ktrace.h>
   43 #include <sys/pool.h>
   44 #include <sys/stat.h>
   45 #include <sys/mount.h>
   46 #include <sys/syscallargs.h>
   47 #include <sys/time.h>
   48 #include <sys/timeout.h>
   49 #include <sys/vnode.h>
   50 #include <sys/wait.h>
   51 
   52 #ifdef DIAGNOSTIC
   53 #define KLIST_ASSERT_LOCKED(kl) do {                                    \
   54         if ((kl)->kl_ops != NULL)                                       \
   55                 (kl)->kl_ops->klo_assertlk((kl)->kl_arg);               \
   56         else                                                            \
   57                 KERNEL_ASSERT_LOCKED();                                 \
   58 } while (0)
   59 #else
   60 #define KLIST_ASSERT_LOCKED(kl) ((void)(kl))
   61 #endif
   62 
   63 struct  kqueue *kqueue_alloc(struct filedesc *);
   64 void    kqueue_terminate(struct proc *p, struct kqueue *);
   65 void    KQREF(struct kqueue *);
   66 void    KQRELE(struct kqueue *);
   67 
   68 void    kqueue_purge(struct proc *, struct kqueue *);
   69 int     kqueue_sleep(struct kqueue *, struct timespec *);
   70 
   71 int     kqueue_read(struct file *, struct uio *, int);
   72 int     kqueue_write(struct file *, struct uio *, int);
   73 int     kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
   74                     struct proc *p);
   75 int     kqueue_kqfilter(struct file *fp, struct knote *kn);
   76 int     kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
   77 int     kqueue_close(struct file *fp, struct proc *p);
   78 void    kqueue_wakeup(struct kqueue *kq);
   79 
   80 #ifdef KQUEUE_DEBUG
   81 void    kqueue_do_check(struct kqueue *kq, const char *func, int line);
   82 #define kqueue_check(kq)        kqueue_do_check((kq), __func__, __LINE__)
   83 #else
   84 #define kqueue_check(kq)        do {} while (0)
   85 #endif
   86 
   87 static int      filter_attach(struct knote *kn);
   88 static void     filter_detach(struct knote *kn);
   89 static int      filter_event(struct knote *kn, long hint);
   90 static int      filter_modify(struct kevent *kev, struct knote *kn);
   91 static int      filter_process(struct knote *kn, struct kevent *kev);
   92 static void     kqueue_expand_hash(struct kqueue *kq);
   93 static void     kqueue_expand_list(struct kqueue *kq, int fd);
   94 static void     kqueue_task(void *);
   95 static int      klist_lock(struct klist *);
   96 static void     klist_unlock(struct klist *, int);
   97 
   98 const struct fileops kqueueops = {
   99         .fo_read        = kqueue_read,
  100         .fo_write       = kqueue_write,
  101         .fo_ioctl       = kqueue_ioctl,
  102         .fo_kqfilter    = kqueue_kqfilter,
  103         .fo_stat        = kqueue_stat,
  104         .fo_close       = kqueue_close
  105 };
  106 
  107 void    knote_attach(struct knote *kn);
  108 void    knote_detach(struct knote *kn);
  109 void    knote_drop(struct knote *kn, struct proc *p);
  110 void    knote_enqueue(struct knote *kn);
  111 void    knote_dequeue(struct knote *kn);
  112 int     knote_acquire(struct knote *kn, struct klist *, int);
  113 void    knote_release(struct knote *kn);
  114 void    knote_activate(struct knote *kn);
  115 void    knote_remove(struct proc *p, struct kqueue *kq, struct knlist **plist,
  116             int idx, int purge);
  117 
  118 void    filt_kqdetach(struct knote *kn);
  119 int     filt_kqueue(struct knote *kn, long hint);
  120 int     filt_kqueuemodify(struct kevent *kev, struct knote *kn);
  121 int     filt_kqueueprocess(struct knote *kn, struct kevent *kev);
  122 int     filt_kqueue_common(struct knote *kn, struct kqueue *kq);
  123 int     filt_procattach(struct knote *kn);
  124 void    filt_procdetach(struct knote *kn);
  125 int     filt_proc(struct knote *kn, long hint);
  126 int     filt_fileattach(struct knote *kn);
  127 void    filt_timerexpire(void *knx);
  128 int     filt_timerattach(struct knote *kn);
  129 void    filt_timerdetach(struct knote *kn);
  130 int     filt_timermodify(struct kevent *kev, struct knote *kn);
  131 int     filt_timerprocess(struct knote *kn, struct kevent *kev);
  132 void    filt_seltruedetach(struct knote *kn);
  133 
  134 const struct filterops kqread_filtops = {
  135         .f_flags        = FILTEROP_ISFD | FILTEROP_MPSAFE,
  136         .f_attach       = NULL,
  137         .f_detach       = filt_kqdetach,
  138         .f_event        = filt_kqueue,
  139         .f_modify       = filt_kqueuemodify,
  140         .f_process      = filt_kqueueprocess,
  141 };
  142 
  143 const struct filterops proc_filtops = {
  144         .f_flags        = 0,
  145         .f_attach       = filt_procattach,
  146         .f_detach       = filt_procdetach,
  147         .f_event        = filt_proc,
  148 };
  149 
  150 const struct filterops file_filtops = {
  151         .f_flags        = FILTEROP_ISFD | FILTEROP_MPSAFE,
  152         .f_attach       = filt_fileattach,
  153         .f_detach       = NULL,
  154         .f_event        = NULL,
  155 };
  156 
  157 const struct filterops timer_filtops = {
  158         .f_flags        = 0,
  159         .f_attach       = filt_timerattach,
  160         .f_detach       = filt_timerdetach,
  161         .f_event        = NULL,
  162         .f_modify       = filt_timermodify,
  163         .f_process      = filt_timerprocess,
  164 };
  165 
  166 struct  pool knote_pool;
  167 struct  pool kqueue_pool;
  168 struct  mutex kqueue_klist_lock = MUTEX_INITIALIZER(IPL_MPFLOOR);
  169 int kq_ntimeouts = 0;
  170 int kq_timeoutmax = (4 * 1024);
  171 
  172 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  173 
  174 /*
  175  * Table for for all system-defined filters.
  176  */
  177 const struct filterops *const sysfilt_ops[] = {
  178         &file_filtops,                  /* EVFILT_READ */
  179         &file_filtops,                  /* EVFILT_WRITE */
  180         NULL, /*&aio_filtops,*/         /* EVFILT_AIO */
  181         &file_filtops,                  /* EVFILT_VNODE */
  182         &proc_filtops,                  /* EVFILT_PROC */
  183         &sig_filtops,                   /* EVFILT_SIGNAL */
  184         &timer_filtops,                 /* EVFILT_TIMER */
  185         &file_filtops,                  /* EVFILT_DEVICE */
  186         &file_filtops,                  /* EVFILT_EXCEPT */
  187 };
  188 
  189 void
  190 KQREF(struct kqueue *kq)
  191 {
  192         refcnt_take(&kq->kq_refcnt);
  193 }
  194 
  195 void
  196 KQRELE(struct kqueue *kq)
  197 {
  198         struct filedesc *fdp;
  199 
  200         if (refcnt_rele(&kq->kq_refcnt) == 0)
  201                 return;
  202 
  203         fdp = kq->kq_fdp;
  204         if (rw_status(&fdp->fd_lock) == RW_WRITE) {
  205                 LIST_REMOVE(kq, kq_next);
  206         } else {
  207                 fdplock(fdp);
  208                 LIST_REMOVE(kq, kq_next);
  209                 fdpunlock(fdp);
  210         }
  211 
  212         KASSERT(TAILQ_EMPTY(&kq->kq_head));
  213         KASSERT(kq->kq_nknotes == 0);
  214 
  215         free(kq->kq_knlist, M_KEVENT, kq->kq_knlistsize *
  216             sizeof(struct knlist));
  217         hashfree(kq->kq_knhash, KN_HASHSIZE, M_KEVENT);
  218         klist_free(&kq->kq_klist);
  219         pool_put(&kqueue_pool, kq);
  220 }
  221 
  222 void
  223 kqueue_init(void)
  224 {
  225         pool_init(&kqueue_pool, sizeof(struct kqueue), 0, IPL_MPFLOOR,
  226             PR_WAITOK, "kqueuepl", NULL);
  227         pool_init(&knote_pool, sizeof(struct knote), 0, IPL_MPFLOOR,
  228             PR_WAITOK, "knotepl", NULL);
  229 }
  230 
  231 void
  232 kqueue_init_percpu(void)
  233 {
  234         pool_cache_init(&knote_pool);
  235 }
  236 
  237 int
  238 filt_fileattach(struct knote *kn)
  239 {
  240         struct file *fp = kn->kn_fp;
  241 
  242         return fp->f_ops->fo_kqfilter(fp, kn);
  243 }
  244 
  245 int
  246 kqueue_kqfilter(struct file *fp, struct knote *kn)
  247 {
  248         struct kqueue *kq = kn->kn_fp->f_data;
  249 
  250         if (kn->kn_filter != EVFILT_READ)
  251                 return (EINVAL);
  252 
  253         kn->kn_fop = &kqread_filtops;
  254         klist_insert(&kq->kq_klist, kn);
  255         return (0);
  256 }
  257 
  258 void
  259 filt_kqdetach(struct knote *kn)
  260 {
  261         struct kqueue *kq = kn->kn_fp->f_data;
  262 
  263         klist_remove(&kq->kq_klist, kn);
  264 }
  265 
  266 int
  267 filt_kqueue_common(struct knote *kn, struct kqueue *kq)
  268 {
  269         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
  270 
  271         kn->kn_data = kq->kq_count;
  272 
  273         return (kn->kn_data > 0);
  274 }
  275 
  276 int
  277 filt_kqueue(struct knote *kn, long hint)
  278 {
  279         struct kqueue *kq = kn->kn_fp->f_data;
  280         int active;
  281 
  282         mtx_enter(&kq->kq_lock);
  283         active = filt_kqueue_common(kn, kq);
  284         mtx_leave(&kq->kq_lock);
  285 
  286         return (active);
  287 }
  288 
  289 int
  290 filt_kqueuemodify(struct kevent *kev, struct knote *kn)
  291 {
  292         struct kqueue *kq = kn->kn_fp->f_data;
  293         int active;
  294 
  295         mtx_enter(&kq->kq_lock);
  296         knote_assign(kev, kn);
  297         active = filt_kqueue_common(kn, kq);
  298         mtx_leave(&kq->kq_lock);
  299 
  300         return (active);
  301 }
  302 
  303 int
  304 filt_kqueueprocess(struct knote *kn, struct kevent *kev)
  305 {
  306         struct kqueue *kq = kn->kn_fp->f_data;
  307         int active;
  308 
  309         mtx_enter(&kq->kq_lock);
  310         if (kev != NULL && (kn->kn_flags & EV_ONESHOT))
  311                 active = 1;
  312         else
  313                 active = filt_kqueue_common(kn, kq);
  314         if (active)
  315                 knote_submit(kn, kev);
  316         mtx_leave(&kq->kq_lock);
  317 
  318         return (active);
  319 }
  320 
  321 int
  322 filt_procattach(struct knote *kn)
  323 {
  324         struct process *pr;
  325         int s;
  326 
  327         if ((curproc->p_p->ps_flags & PS_PLEDGE) &&
  328             (curproc->p_p->ps_pledge & PLEDGE_PROC) == 0)
  329                 return pledge_fail(curproc, EPERM, PLEDGE_PROC);
  330 
  331         if (kn->kn_id > PID_MAX)
  332                 return ESRCH;
  333 
  334         pr = prfind(kn->kn_id);
  335         if (pr == NULL)
  336                 return (ESRCH);
  337 
  338         /* exiting processes can't be specified */
  339         if (pr->ps_flags & PS_EXITING)
  340                 return (ESRCH);
  341 
  342         kn->kn_ptr.p_process = pr;
  343         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  344 
  345         /*
  346          * internal flag indicating registration done by kernel
  347          */
  348         if (kn->kn_flags & EV_FLAG1) {
  349                 kn->kn_data = kn->kn_sdata;             /* ppid */
  350                 kn->kn_fflags = NOTE_CHILD;
  351                 kn->kn_flags &= ~EV_FLAG1;
  352         }
  353 
  354         s = splhigh();
  355         klist_insert_locked(&pr->ps_klist, kn);
  356         splx(s);
  357 
  358         return (0);
  359 }
  360 
  361 /*
  362  * The knote may be attached to a different process, which may exit,
  363  * leaving nothing for the knote to be attached to.  So when the process
  364  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  365  * it will be deleted when read out.  However, as part of the knote deletion,
  366  * this routine is called, so a check is needed to avoid actually performing
  367  * a detach, because the original process does not exist any more.
  368  */
  369 void
  370 filt_procdetach(struct knote *kn)
  371 {
  372         struct kqueue *kq = kn->kn_kq;
  373         struct process *pr = kn->kn_ptr.p_process;
  374         int s, status;
  375 
  376         mtx_enter(&kq->kq_lock);
  377         status = kn->kn_status;
  378         mtx_leave(&kq->kq_lock);
  379 
  380         if (status & KN_DETACHED)
  381                 return;
  382 
  383         s = splhigh();
  384         klist_remove_locked(&pr->ps_klist, kn);
  385         splx(s);
  386 }
  387 
  388 int
  389 filt_proc(struct knote *kn, long hint)
  390 {
  391         struct kqueue *kq = kn->kn_kq;
  392         u_int event;
  393 
  394         /*
  395          * mask off extra data
  396          */
  397         event = (u_int)hint & NOTE_PCTRLMASK;
  398 
  399         /*
  400          * if the user is interested in this event, record it.
  401          */
  402         if (kn->kn_sfflags & event)
  403                 kn->kn_fflags |= event;
  404 
  405         /*
  406          * process is gone, so flag the event as finished and remove it
  407          * from the process's klist
  408          */
  409         if (event == NOTE_EXIT) {
  410                 struct process *pr = kn->kn_ptr.p_process;
  411                 int s;
  412 
  413                 mtx_enter(&kq->kq_lock);
  414                 kn->kn_status |= KN_DETACHED;
  415                 mtx_leave(&kq->kq_lock);
  416 
  417                 s = splhigh();
  418                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
  419                 kn->kn_data = W_EXITCODE(pr->ps_xexit, pr->ps_xsig);
  420                 klist_remove_locked(&pr->ps_klist, kn);
  421                 splx(s);
  422                 return (1);
  423         }
  424 
  425         /*
  426          * process forked, and user wants to track the new process,
  427          * so attach a new knote to it, and immediately report an
  428          * event with the parent's pid.
  429          */
  430         if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
  431                 struct kevent kev;
  432                 int error;
  433 
  434                 /*
  435                  * register knote with new process.
  436                  */
  437                 memset(&kev, 0, sizeof(kev));
  438                 kev.ident = hint & NOTE_PDATAMASK;      /* pid */
  439                 kev.filter = kn->kn_filter;
  440                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  441                 kev.fflags = kn->kn_sfflags;
  442                 kev.data = kn->kn_id;                   /* parent */
  443                 kev.udata = kn->kn_udata;               /* preserve udata */
  444                 error = kqueue_register(kq, &kev, 0, NULL);
  445                 if (error)
  446                         kn->kn_fflags |= NOTE_TRACKERR;
  447         }
  448 
  449         return (kn->kn_fflags != 0);
  450 }
  451 
  452 static void
  453 filt_timer_timeout_add(struct knote *kn)
  454 {
  455         struct timeval tv;
  456         struct timeout *to = kn->kn_hook;
  457         int tticks;
  458 
  459         tv.tv_sec = kn->kn_sdata / 1000;
  460         tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
  461         tticks = tvtohz(&tv);
  462         /* Remove extra tick from tvtohz() if timeout has fired before. */
  463         if (timeout_triggered(to))
  464                 tticks--;
  465         timeout_add(to, (tticks > 0) ? tticks : 1);
  466 }
  467 
  468 void
  469 filt_timerexpire(void *knx)
  470 {
  471         struct knote *kn = knx;
  472         struct kqueue *kq = kn->kn_kq;
  473 
  474         kn->kn_data++;
  475         mtx_enter(&kq->kq_lock);
  476         knote_activate(kn);
  477         mtx_leave(&kq->kq_lock);
  478 
  479         if ((kn->kn_flags & EV_ONESHOT) == 0)
  480                 filt_timer_timeout_add(kn);
  481 }
  482 
  483 
  484 /*
  485  * data contains amount of time to sleep, in milliseconds
  486  */
  487 int
  488 filt_timerattach(struct knote *kn)
  489 {
  490         struct timeout *to;
  491 
  492         if (kq_ntimeouts > kq_timeoutmax)
  493                 return (ENOMEM);
  494         kq_ntimeouts++;
  495 
  496         kn->kn_flags |= EV_CLEAR;       /* automatically set */
  497         to = malloc(sizeof(*to), M_KEVENT, M_WAITOK);
  498         timeout_set(to, filt_timerexpire, kn);
  499         kn->kn_hook = to;
  500         filt_timer_timeout_add(kn);
  501 
  502         return (0);
  503 }
  504 
  505 void
  506 filt_timerdetach(struct knote *kn)
  507 {
  508         struct timeout *to;
  509 
  510         to = (struct timeout *)kn->kn_hook;
  511         timeout_del_barrier(to);
  512         free(to, M_KEVENT, sizeof(*to));
  513         kq_ntimeouts--;
  514 }
  515 
  516 int
  517 filt_timermodify(struct kevent *kev, struct knote *kn)
  518 {
  519         struct kqueue *kq = kn->kn_kq;
  520         struct timeout *to = kn->kn_hook;
  521 
  522         /* Reset the timer. Any pending events are discarded. */
  523 
  524         timeout_del_barrier(to);
  525 
  526         mtx_enter(&kq->kq_lock);
  527         if (kn->kn_status & KN_QUEUED)
  528                 knote_dequeue(kn);
  529         kn->kn_status &= ~KN_ACTIVE;
  530         mtx_leave(&kq->kq_lock);
  531 
  532         kn->kn_data = 0;
  533         knote_assign(kev, kn);
  534         /* Reinit timeout to invoke tick adjustment again. */
  535         timeout_set(to, filt_timerexpire, kn);
  536         filt_timer_timeout_add(kn);
  537 
  538         return (0);
  539 }
  540 
  541 int
  542 filt_timerprocess(struct knote *kn, struct kevent *kev)
  543 {
  544         int active, s;
  545 
  546         s = splsoftclock();
  547         active = (kn->kn_data != 0);
  548         if (active)
  549                 knote_submit(kn, kev);
  550         splx(s);
  551 
  552         return (active);
  553 }
  554 
  555 
  556 /*
  557  * filt_seltrue:
  558  *
  559  *      This filter "event" routine simulates seltrue().
  560  */
  561 int
  562 filt_seltrue(struct knote *kn, long hint)
  563 {
  564 
  565         /*
  566          * We don't know how much data can be read/written,
  567          * but we know that it *can* be.  This is about as
  568          * good as select/poll does as well.
  569          */
  570         kn->kn_data = 0;
  571         return (1);
  572 }
  573 
  574 int
  575 filt_seltruemodify(struct kevent *kev, struct knote *kn)
  576 {
  577         knote_assign(kev, kn);
  578         return (kn->kn_fop->f_event(kn, 0));
  579 }
  580 
  581 int
  582 filt_seltrueprocess(struct knote *kn, struct kevent *kev)
  583 {
  584         int active;
  585 
  586         active = kn->kn_fop->f_event(kn, 0);
  587         if (active)
  588                 knote_submit(kn, kev);
  589         return (active);
  590 }
  591 
  592 /*
  593  * This provides full kqfilter entry for device switch tables, which
  594  * has same effect as filter using filt_seltrue() as filter method.
  595  */
  596 void
  597 filt_seltruedetach(struct knote *kn)
  598 {
  599         /* Nothing to do */
  600 }
  601 
  602 const struct filterops seltrue_filtops = {
  603         .f_flags        = FILTEROP_ISFD | FILTEROP_MPSAFE,
  604         .f_attach       = NULL,
  605         .f_detach       = filt_seltruedetach,
  606         .f_event        = filt_seltrue,
  607         .f_modify       = filt_seltruemodify,
  608         .f_process      = filt_seltrueprocess,
  609 };
  610 
  611 int
  612 seltrue_kqfilter(dev_t dev, struct knote *kn)
  613 {
  614         switch (kn->kn_filter) {
  615         case EVFILT_READ:
  616         case EVFILT_WRITE:
  617                 kn->kn_fop = &seltrue_filtops;
  618                 break;
  619         default:
  620                 return (EINVAL);
  621         }
  622 
  623         /* Nothing more to do */
  624         return (0);
  625 }
  626 
  627 static int
  628 filt_dead(struct knote *kn, long hint)
  629 {
  630         if (kn->kn_filter == EVFILT_EXCEPT) {
  631                 /*
  632                  * Do not deliver event because there is no out-of-band data.
  633                  * However, let HUP condition pass for poll(2).
  634                  */
  635                 if ((kn->kn_flags & __EV_POLL) == 0) {
  636                         kn->kn_flags |= EV_DISABLE;
  637                         return (0);
  638                 }
  639         }
  640 
  641         kn->kn_flags |= (EV_EOF | EV_ONESHOT);
  642         if (kn->kn_flags & __EV_POLL)
  643                 kn->kn_flags |= __EV_HUP;
  644         kn->kn_data = 0;
  645         return (1);
  646 }
  647 
  648 static void
  649 filt_deaddetach(struct knote *kn)
  650 {
  651         /* Nothing to do */
  652 }
  653 
  654 const struct filterops dead_filtops = {
  655         .f_flags        = FILTEROP_ISFD | FILTEROP_MPSAFE,
  656         .f_attach       = NULL,
  657         .f_detach       = filt_deaddetach,
  658         .f_event        = filt_dead,
  659         .f_modify       = filt_seltruemodify,
  660         .f_process      = filt_seltrueprocess,
  661 };
  662 
  663 static int
  664 filt_badfd(struct knote *kn, long hint)
  665 {
  666         kn->kn_flags |= (EV_ERROR | EV_ONESHOT);
  667         kn->kn_data = EBADF;
  668         return (1);
  669 }
  670 
  671 /* For use with kqpoll. */
  672 const struct filterops badfd_filtops = {
  673         .f_flags        = FILTEROP_ISFD | FILTEROP_MPSAFE,
  674         .f_attach       = NULL,
  675         .f_detach       = filt_deaddetach,
  676         .f_event        = filt_badfd,
  677         .f_modify       = filt_seltruemodify,
  678         .f_process      = filt_seltrueprocess,
  679 };
  680 
  681 static int
  682 filter_attach(struct knote *kn)
  683 {
  684         int error;
  685 
  686         if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
  687                 error = kn->kn_fop->f_attach(kn);
  688         } else {
  689                 KERNEL_LOCK();
  690                 error = kn->kn_fop->f_attach(kn);
  691                 KERNEL_UNLOCK();
  692         }
  693         return (error);
  694 }
  695 
  696 static void
  697 filter_detach(struct knote *kn)
  698 {
  699         if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
  700                 kn->kn_fop->f_detach(kn);
  701         } else {
  702                 KERNEL_LOCK();
  703                 kn->kn_fop->f_detach(kn);
  704                 KERNEL_UNLOCK();
  705         }
  706 }
  707 
  708 static int
  709 filter_event(struct knote *kn, long hint)
  710 {
  711         if ((kn->kn_fop->f_flags & FILTEROP_MPSAFE) == 0)
  712                 KERNEL_ASSERT_LOCKED();
  713 
  714         return (kn->kn_fop->f_event(kn, hint));
  715 }
  716 
  717 static int
  718 filter_modify(struct kevent *kev, struct knote *kn)
  719 {
  720         int active, s;
  721 
  722         if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
  723                 active = kn->kn_fop->f_modify(kev, kn);
  724         } else {
  725                 KERNEL_LOCK();
  726                 if (kn->kn_fop->f_modify != NULL) {
  727                         active = kn->kn_fop->f_modify(kev, kn);
  728                 } else {
  729                         s = splhigh();
  730                         active = knote_modify(kev, kn);
  731                         splx(s);
  732                 }
  733                 KERNEL_UNLOCK();
  734         }
  735         return (active);
  736 }
  737 
  738 static int
  739 filter_process(struct knote *kn, struct kevent *kev)
  740 {
  741         int active, s;
  742 
  743         if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
  744                 active = kn->kn_fop->f_process(kn, kev);
  745         } else {
  746                 KERNEL_LOCK();
  747                 if (kn->kn_fop->f_process != NULL) {
  748                         active = kn->kn_fop->f_process(kn, kev);
  749                 } else {
  750                         s = splhigh();
  751                         active = knote_process(kn, kev);
  752                         splx(s);
  753                 }
  754                 KERNEL_UNLOCK();
  755         }
  756         return (active);
  757 }
  758 
  759 /*
  760  * Initialize the current thread for poll/select system call.
  761  * num indicates the number of serials that the system call may utilize.
  762  * After this function, the valid range of serials is
  763  * p_kq_serial <= x < p_kq_serial + num.
  764  */
  765 void
  766 kqpoll_init(unsigned int num)
  767 {
  768         struct proc *p = curproc;
  769         struct filedesc *fdp;
  770 
  771         if (p->p_kq == NULL) {
  772                 p->p_kq = kqueue_alloc(p->p_fd);
  773                 p->p_kq_serial = arc4random();
  774                 fdp = p->p_fd;
  775                 fdplock(fdp);
  776                 LIST_INSERT_HEAD(&fdp->fd_kqlist, p->p_kq, kq_next);
  777                 fdpunlock(fdp);
  778         }
  779 
  780         if (p->p_kq_serial + num < p->p_kq_serial) {
  781                 /* Serial is about to wrap. Clear all attached knotes. */
  782                 kqueue_purge(p, p->p_kq);
  783                 p->p_kq_serial = 0;
  784         }
  785 }
  786 
  787 /*
  788  * Finish poll/select system call.
  789  * num must have the same value that was used with kqpoll_init().
  790  */
  791 void
  792 kqpoll_done(unsigned int num)
  793 {
  794         struct proc *p = curproc;
  795         struct kqueue *kq = p->p_kq;
  796 
  797         KASSERT(p->p_kq != NULL);
  798         KASSERT(p->p_kq_serial + num >= p->p_kq_serial);
  799 
  800         p->p_kq_serial += num;
  801 
  802         /*
  803          * Because of kn_pollid key, a thread can in principle allocate
  804          * up to O(maxfiles^2) knotes by calling poll(2) repeatedly
  805          * with suitably varying pollfd arrays.
  806          * Prevent such a large allocation by clearing knotes eagerly
  807          * if there are too many of them.
  808          *
  809          * A small multiple of kq_knlistsize should give enough margin
  810          * that eager clearing is infrequent, or does not happen at all,
  811          * with normal programs.
  812          * A single pollfd entry can use up to three knotes.
  813          * Typically there is no significant overlap of fd and events
  814          * between different entries in the pollfd array.
  815          */
  816         if (kq->kq_nknotes > 4 * kq->kq_knlistsize)
  817                 kqueue_purge(p, kq);
  818 }
  819 
  820 void
  821 kqpoll_exit(void)
  822 {
  823         struct proc *p = curproc;
  824 
  825         if (p->p_kq == NULL)
  826                 return;
  827 
  828         kqueue_purge(p, p->p_kq);
  829         kqueue_terminate(p, p->p_kq);
  830         KASSERT(p->p_kq->kq_refcnt.r_refs == 1);
  831         KQRELE(p->p_kq);
  832         p->p_kq = NULL;
  833 }
  834 
  835 struct kqueue *
  836 kqueue_alloc(struct filedesc *fdp)
  837 {
  838         struct kqueue *kq;
  839 
  840         kq = pool_get(&kqueue_pool, PR_WAITOK | PR_ZERO);
  841         refcnt_init(&kq->kq_refcnt);
  842         kq->kq_fdp = fdp;
  843         TAILQ_INIT(&kq->kq_head);
  844         mtx_init(&kq->kq_lock, IPL_HIGH);
  845         task_set(&kq->kq_task, kqueue_task, kq);
  846         klist_init_mutex(&kq->kq_klist, &kqueue_klist_lock);
  847 
  848         return (kq);
  849 }
  850 
  851 int
  852 sys_kqueue(struct proc *p, void *v, register_t *retval)
  853 {
  854         struct filedesc *fdp = p->p_fd;
  855         struct kqueue *kq;
  856         struct file *fp;
  857         int fd, error;
  858 
  859         kq = kqueue_alloc(fdp);
  860 
  861         fdplock(fdp);
  862         error = falloc(p, &fp, &fd);
  863         if (error)
  864                 goto out;
  865         fp->f_flag = FREAD | FWRITE;
  866         fp->f_type = DTYPE_KQUEUE;
  867         fp->f_ops = &kqueueops;
  868         fp->f_data = kq;
  869         *retval = fd;
  870         LIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_next);
  871         kq = NULL;
  872         fdinsert(fdp, fd, 0, fp);
  873         FRELE(fp, p);
  874 out:
  875         fdpunlock(fdp);
  876         if (kq != NULL)
  877                 pool_put(&kqueue_pool, kq);
  878         return (error);
  879 }
  880 
  881 int
  882 sys_kevent(struct proc *p, void *v, register_t *retval)
  883 {
  884         struct kqueue_scan_state scan;
  885         struct filedesc* fdp = p->p_fd;
  886         struct sys_kevent_args /* {
  887                 syscallarg(int) fd;
  888                 syscallarg(const struct kevent *) changelist;
  889                 syscallarg(int) nchanges;
  890                 syscallarg(struct kevent *) eventlist;
  891                 syscallarg(int) nevents;
  892                 syscallarg(const struct timespec *) timeout;
  893         } */ *uap = v;
  894         struct kevent *kevp;
  895         struct kqueue *kq;
  896         struct file *fp;
  897         struct timespec ts;
  898         struct timespec *tsp = NULL;
  899         int i, n, nerrors, error;
  900         int ready, total;
  901         struct kevent kev[KQ_NEVENTS];
  902 
  903         if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
  904                 return (EBADF);
  905 
  906         if (fp->f_type != DTYPE_KQUEUE) {
  907                 error = EBADF;
  908                 goto done;
  909         }
  910 
  911         if (SCARG(uap, timeout) != NULL) {
  912                 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
  913                 if (error)
  914                         goto done;
  915 #ifdef KTRACE
  916                 if (KTRPOINT(p, KTR_STRUCT))
  917                         ktrreltimespec(p, &ts);
  918 #endif
  919                 if (ts.tv_sec < 0 || !timespecisvalid(&ts)) {
  920                         error = EINVAL;
  921                         goto done;
  922                 }
  923                 tsp = &ts;
  924         }
  925 
  926         kq = fp->f_data;
  927         nerrors = 0;
  928 
  929         while ((n = SCARG(uap, nchanges)) > 0) {
  930                 if (n > nitems(kev))
  931                         n = nitems(kev);
  932                 error = copyin(SCARG(uap, changelist), kev,
  933                     n * sizeof(struct kevent));
  934                 if (error)
  935                         goto done;
  936 #ifdef KTRACE
  937                 if (KTRPOINT(p, KTR_STRUCT))
  938                         ktrevent(p, kev, n);
  939 #endif
  940                 for (i = 0; i < n; i++) {
  941                         kevp = &kev[i];
  942                         kevp->flags &= ~EV_SYSFLAGS;
  943                         error = kqueue_register(kq, kevp, 0, p);
  944                         if (error || (kevp->flags & EV_RECEIPT)) {
  945                                 if (SCARG(uap, nevents) != 0) {
  946                                         kevp->flags = EV_ERROR;
  947                                         kevp->data = error;
  948                                         copyout(kevp, SCARG(uap, eventlist),
  949                                             sizeof(*kevp));
  950                                         SCARG(uap, eventlist)++;
  951                                         SCARG(uap, nevents)--;
  952                                         nerrors++;
  953                                 } else {
  954                                         goto done;
  955                                 }
  956                         }
  957                 }
  958                 SCARG(uap, nchanges) -= n;
  959                 SCARG(uap, changelist) += n;
  960         }
  961         if (nerrors) {
  962                 *retval = nerrors;
  963                 error = 0;
  964                 goto done;
  965         }
  966 
  967         kqueue_scan_setup(&scan, kq);
  968         FRELE(fp, p);
  969         /*
  970          * Collect as many events as we can.  The timeout on successive
  971          * loops is disabled (kqueue_scan() becomes non-blocking).
  972          */
  973         total = 0;
  974         error = 0;
  975         while ((n = SCARG(uap, nevents) - total) > 0) {
  976                 if (n > nitems(kev))
  977                         n = nitems(kev);
  978                 ready = kqueue_scan(&scan, n, kev, tsp, p, &error);
  979                 if (ready == 0)
  980                         break;
  981                 error = copyout(kev, SCARG(uap, eventlist) + total,
  982                     sizeof(struct kevent) * ready);
  983 #ifdef KTRACE
  984                 if (KTRPOINT(p, KTR_STRUCT))
  985                         ktrevent(p, kev, ready);
  986 #endif
  987                 total += ready;
  988                 if (error || ready < n)
  989                         break;
  990         }
  991         kqueue_scan_finish(&scan);
  992         *retval = total;
  993         return (error);
  994 
  995  done:
  996         FRELE(fp, p);
  997         return (error);
  998 }
  999 
 1000 #ifdef KQUEUE_DEBUG
 1001 void
 1002 kqueue_do_check(struct kqueue *kq, const char *func, int line)
 1003 {
 1004         struct knote *kn;
 1005         int count = 0, nmarker = 0;
 1006 
 1007         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1008 
 1009         TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) {
 1010                 if (kn->kn_filter == EVFILT_MARKER) {
 1011                         if ((kn->kn_status & KN_QUEUED) != 0)
 1012                                 panic("%s:%d: kq=%p kn=%p marker QUEUED",
 1013                                     func, line, kq, kn);
 1014                         nmarker++;
 1015                 } else {
 1016                         if ((kn->kn_status & KN_ACTIVE) == 0)
 1017                                 panic("%s:%d: kq=%p kn=%p knote !ACTIVE",
 1018                                     func, line, kq, kn);
 1019                         if ((kn->kn_status & KN_QUEUED) == 0)
 1020                                 panic("%s:%d: kq=%p kn=%p knote !QUEUED",
 1021                                     func, line, kq, kn);
 1022                         if (kn->kn_kq != kq)
 1023                                 panic("%s:%d: kq=%p kn=%p kn_kq=%p != kq",
 1024                                     func, line, kq, kn, kn->kn_kq);
 1025                         count++;
 1026                         if (count > kq->kq_count)
 1027                                 goto bad;
 1028                 }
 1029         }
 1030         if (count != kq->kq_count) {
 1031 bad:
 1032                 panic("%s:%d: kq=%p kq_count=%d count=%d nmarker=%d",
 1033                     func, line, kq, kq->kq_count, count, nmarker);
 1034         }
 1035 }
 1036 #endif
 1037 
 1038 int
 1039 kqueue_register(struct kqueue *kq, struct kevent *kev, unsigned int pollid,
 1040     struct proc *p)
 1041 {
 1042         struct filedesc *fdp = kq->kq_fdp;
 1043         const struct filterops *fops = NULL;
 1044         struct file *fp = NULL;
 1045         struct knote *kn = NULL, *newkn = NULL;
 1046         struct knlist *list = NULL;
 1047         int active, error = 0;
 1048 
 1049         KASSERT(pollid == 0 || (p != NULL && p->p_kq == kq));
 1050 
 1051         if (kev->filter < 0) {
 1052                 if (kev->filter + EVFILT_SYSCOUNT < 0)
 1053                         return (EINVAL);
 1054                 fops = sysfilt_ops[~kev->filter];       /* to 0-base index */
 1055         }
 1056 
 1057         if (fops == NULL) {
 1058                 /*
 1059                  * XXX
 1060                  * filter attach routine is responsible for ensuring that
 1061                  * the identifier can be attached to it.
 1062                  */
 1063                 return (EINVAL);
 1064         }
 1065 
 1066         if (fops->f_flags & FILTEROP_ISFD) {
 1067                 /* validate descriptor */
 1068                 if (kev->ident > INT_MAX)
 1069                         return (EBADF);
 1070         }
 1071 
 1072         if (kev->flags & EV_ADD)
 1073                 newkn = pool_get(&knote_pool, PR_WAITOK | PR_ZERO);
 1074 
 1075 again:
 1076         if (fops->f_flags & FILTEROP_ISFD) {
 1077                 if ((fp = fd_getfile(fdp, kev->ident)) == NULL) {
 1078                         error = EBADF;
 1079                         goto done;
 1080                 }
 1081                 mtx_enter(&kq->kq_lock);
 1082                 if (kev->flags & EV_ADD)
 1083                         kqueue_expand_list(kq, kev->ident);
 1084                 if (kev->ident < kq->kq_knlistsize)
 1085                         list = &kq->kq_knlist[kev->ident];
 1086         } else {
 1087                 mtx_enter(&kq->kq_lock);
 1088                 if (kev->flags & EV_ADD)
 1089                         kqueue_expand_hash(kq);
 1090                 if (kq->kq_knhashmask != 0) {
 1091                         list = &kq->kq_knhash[
 1092                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
 1093                 }
 1094         }
 1095         if (list != NULL) {
 1096                 SLIST_FOREACH(kn, list, kn_link) {
 1097                         if (kev->filter == kn->kn_filter &&
 1098                             kev->ident == kn->kn_id &&
 1099                             pollid == kn->kn_pollid) {
 1100                                 if (!knote_acquire(kn, NULL, 0)) {
 1101                                         /* knote_acquire() has released
 1102                                          * kq_lock. */
 1103                                         if (fp != NULL) {
 1104                                                 FRELE(fp, p);
 1105                                                 fp = NULL;
 1106                                         }
 1107                                         goto again;
 1108                                 }
 1109                                 break;
 1110                         }
 1111                 }
 1112         }
 1113         KASSERT(kn == NULL || (kn->kn_status & KN_PROCESSING) != 0);
 1114 
 1115         if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
 1116                 mtx_leave(&kq->kq_lock);
 1117                 error = ENOENT;
 1118                 goto done;
 1119         }
 1120 
 1121         /*
 1122          * kn now contains the matching knote, or NULL if no match.
 1123          */
 1124         if (kev->flags & EV_ADD) {
 1125                 if (kn == NULL) {
 1126                         kn = newkn;
 1127                         newkn = NULL;
 1128                         kn->kn_status = KN_PROCESSING;
 1129                         kn->kn_fp = fp;
 1130                         kn->kn_kq = kq;
 1131                         kn->kn_fop = fops;
 1132 
 1133                         /*
 1134                          * apply reference count to knote structure, and
 1135                          * do not release it at the end of this routine.
 1136                          */
 1137                         fp = NULL;
 1138 
 1139                         kn->kn_sfflags = kev->fflags;
 1140                         kn->kn_sdata = kev->data;
 1141                         kev->fflags = 0;
 1142                         kev->data = 0;
 1143                         kn->kn_kevent = *kev;
 1144                         kn->kn_pollid = pollid;
 1145 
 1146                         knote_attach(kn);
 1147                         mtx_leave(&kq->kq_lock);
 1148 
 1149                         error = filter_attach(kn);
 1150                         if (error != 0) {
 1151                                 knote_drop(kn, p);
 1152                                 goto done;
 1153                         }
 1154 
 1155                         /*
 1156                          * If this is a file descriptor filter, check if
 1157                          * fd was closed while the knote was being added.
 1158                          * knote_fdclose() has missed kn if the function
 1159                          * ran before kn appeared in kq_knlist.
 1160                          */
 1161                         if ((fops->f_flags & FILTEROP_ISFD) &&
 1162                             fd_checkclosed(fdp, kev->ident, kn->kn_fp)) {
 1163                                 /*
 1164                                  * Drop the knote silently without error
 1165                                  * because another thread might already have
 1166                                  * seen it. This corresponds to the insert
 1167                                  * happening in full before the close.
 1168                                  */
 1169                                 filter_detach(kn);
 1170                                 knote_drop(kn, p);
 1171                                 goto done;
 1172                         }
 1173 
 1174                         /* Check if there is a pending event. */
 1175                         active = filter_process(kn, NULL);
 1176                         mtx_enter(&kq->kq_lock);
 1177                         if (active)
 1178                                 knote_activate(kn);
 1179                 } else if (kn->kn_fop == &badfd_filtops) {
 1180                         /*
 1181                          * Nothing expects this badfd knote any longer.
 1182                          * Drop it to make room for the new knote and retry.
 1183                          */
 1184                         KASSERT(kq == p->p_kq);
 1185                         mtx_leave(&kq->kq_lock);
 1186                         filter_detach(kn);
 1187                         knote_drop(kn, p);
 1188 
 1189                         KASSERT(fp != NULL);
 1190                         FRELE(fp, p);
 1191                         fp = NULL;
 1192 
 1193                         goto again;
 1194                 } else {
 1195                         /*
 1196                          * The user may change some filter values after the
 1197                          * initial EV_ADD, but doing so will not reset any
 1198                          * filters which have already been triggered.
 1199                          */
 1200                         mtx_leave(&kq->kq_lock);
 1201                         active = filter_modify(kev, kn);
 1202                         mtx_enter(&kq->kq_lock);
 1203                         if (active)
 1204                                 knote_activate(kn);
 1205                         if (kev->flags & EV_ERROR) {
 1206                                 error = kev->data;
 1207                                 goto release;
 1208                         }
 1209                 }
 1210         } else if (kev->flags & EV_DELETE) {
 1211                 mtx_leave(&kq->kq_lock);
 1212                 filter_detach(kn);
 1213                 knote_drop(kn, p);
 1214                 goto done;
 1215         }
 1216 
 1217         if ((kev->flags & EV_DISABLE) && ((kn->kn_status & KN_DISABLED) == 0))
 1218                 kn->kn_status |= KN_DISABLED;
 1219 
 1220         if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
 1221                 kn->kn_status &= ~KN_DISABLED;
 1222                 mtx_leave(&kq->kq_lock);
 1223                 /* Check if there is a pending event. */
 1224                 active = filter_process(kn, NULL);
 1225                 mtx_enter(&kq->kq_lock);
 1226                 if (active)
 1227                         knote_activate(kn);
 1228         }
 1229 
 1230 release:
 1231         knote_release(kn);
 1232         mtx_leave(&kq->kq_lock);
 1233 done:
 1234         if (fp != NULL)
 1235                 FRELE(fp, p);
 1236         if (newkn != NULL)
 1237                 pool_put(&knote_pool, newkn);
 1238         return (error);
 1239 }
 1240 
 1241 int
 1242 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
 1243 {
 1244         struct timespec elapsed, start, stop;
 1245         uint64_t nsecs;
 1246         int error;
 1247 
 1248         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1249 
 1250         if (tsp != NULL) {
 1251                 getnanouptime(&start);
 1252                 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP);
 1253         } else
 1254                 nsecs = INFSLP;
 1255         error = msleep_nsec(kq, &kq->kq_lock, PSOCK | PCATCH | PNORELOCK,
 1256             "kqread", nsecs);
 1257         if (tsp != NULL) {
 1258                 getnanouptime(&stop);
 1259                 timespecsub(&stop, &start, &elapsed);
 1260                 timespecsub(tsp, &elapsed, tsp);
 1261                 if (tsp->tv_sec < 0)
 1262                         timespecclear(tsp);
 1263         }
 1264 
 1265         return (error);
 1266 }
 1267 
 1268 /*
 1269  * Scan the kqueue, blocking if necessary until the target time is reached.
 1270  * If tsp is NULL we block indefinitely.  If tsp->ts_secs/nsecs are both
 1271  * 0 we do not block at all.
 1272  */
 1273 int
 1274 kqueue_scan(struct kqueue_scan_state *scan, int maxevents,
 1275     struct kevent *kevp, struct timespec *tsp, struct proc *p, int *errorp)
 1276 {
 1277         struct kqueue *kq = scan->kqs_kq;
 1278         struct knote *kn;
 1279         int error = 0, nkev = 0;
 1280         int reinserted;
 1281 
 1282         if (maxevents == 0)
 1283                 goto done;
 1284 retry:
 1285         KASSERT(nkev == 0);
 1286 
 1287         error = 0;
 1288         reinserted = 0;
 1289 
 1290         mtx_enter(&kq->kq_lock);
 1291 
 1292         if (kq->kq_state & KQ_DYING) {
 1293                 mtx_leave(&kq->kq_lock);
 1294                 error = EBADF;
 1295                 goto done;
 1296         }
 1297 
 1298         if (kq->kq_count == 0) {
 1299                 /*
 1300                  * Successive loops are only necessary if there are more
 1301                  * ready events to gather, so they don't need to block.
 1302                  */
 1303                 if ((tsp != NULL && !timespecisset(tsp)) ||
 1304                     scan->kqs_nevent != 0) {
 1305                         mtx_leave(&kq->kq_lock);
 1306                         error = 0;
 1307                         goto done;
 1308                 }
 1309                 kq->kq_state |= KQ_SLEEP;
 1310                 error = kqueue_sleep(kq, tsp);
 1311                 /* kqueue_sleep() has released kq_lock. */
 1312                 if (error == 0 || error == EWOULDBLOCK)
 1313                         goto retry;
 1314                 /* don't restart after signals... */
 1315                 if (error == ERESTART)
 1316                         error = EINTR;
 1317                 goto done;
 1318         }
 1319 
 1320         /*
 1321          * Put the end marker in the queue to limit the scan to the events
 1322          * that are currently active.  This prevents events from being
 1323          * recollected if they reactivate during scan.
 1324          *
 1325          * If a partial scan has been performed already but no events have
 1326          * been collected, reposition the end marker to make any new events
 1327          * reachable.
 1328          */
 1329         if (!scan->kqs_queued) {
 1330                 TAILQ_INSERT_TAIL(&kq->kq_head, &scan->kqs_end, kn_tqe);
 1331                 scan->kqs_queued = 1;
 1332         } else if (scan->kqs_nevent == 0) {
 1333                 TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end, kn_tqe);
 1334                 TAILQ_INSERT_TAIL(&kq->kq_head, &scan->kqs_end, kn_tqe);
 1335         }
 1336 
 1337         TAILQ_INSERT_HEAD(&kq->kq_head, &scan->kqs_start, kn_tqe);
 1338         while (nkev < maxevents) {
 1339                 kn = TAILQ_NEXT(&scan->kqs_start, kn_tqe);
 1340                 if (kn->kn_filter == EVFILT_MARKER) {
 1341                         if (kn == &scan->kqs_end)
 1342                                 break;
 1343 
 1344                         /* Move start marker past another thread's marker. */
 1345                         TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe);
 1346                         TAILQ_INSERT_AFTER(&kq->kq_head, kn, &scan->kqs_start,
 1347                             kn_tqe);
 1348                         continue;
 1349                 }
 1350 
 1351                 if (!knote_acquire(kn, NULL, 0)) {
 1352                         /* knote_acquire() has released kq_lock. */
 1353                         mtx_enter(&kq->kq_lock);
 1354                         continue;
 1355                 }
 1356 
 1357                 kqueue_check(kq);
 1358                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1359                 kn->kn_status &= ~KN_QUEUED;
 1360                 kq->kq_count--;
 1361                 kqueue_check(kq);
 1362 
 1363                 if (kn->kn_status & KN_DISABLED) {
 1364                         knote_release(kn);
 1365                         continue;
 1366                 }
 1367 
 1368                 mtx_leave(&kq->kq_lock);
 1369 
 1370                 /* Drop expired kqpoll knotes. */
 1371                 if (p->p_kq == kq &&
 1372                     p->p_kq_serial > (unsigned long)kn->kn_udata) {
 1373                         filter_detach(kn);
 1374                         knote_drop(kn, p);
 1375                         mtx_enter(&kq->kq_lock);
 1376                         continue;
 1377                 }
 1378 
 1379                 /*
 1380                  * Invalidate knotes whose vnodes have been revoked.
 1381                  * This is a workaround; it is tricky to clear existing
 1382                  * knotes and prevent new ones from being registered
 1383                  * with the current revocation mechanism.
 1384                  */
 1385                 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
 1386                     kn->kn_fp != NULL &&
 1387                     kn->kn_fp->f_type == DTYPE_VNODE) {
 1388                         struct vnode *vp = kn->kn_fp->f_data;
 1389 
 1390                         if (__predict_false(vp->v_op == &dead_vops &&
 1391                             kn->kn_fop != &dead_filtops)) {
 1392                                 filter_detach(kn);
 1393                                 kn->kn_fop = &dead_filtops;
 1394 
 1395                                 /*
 1396                                  * Check if the event should be delivered.
 1397                                  * Use f_event directly because this is
 1398                                  * a special situation.
 1399                                  */
 1400                                 if (kn->kn_fop->f_event(kn, 0) == 0) {
 1401                                         filter_detach(kn);
 1402                                         knote_drop(kn, p);
 1403                                         mtx_enter(&kq->kq_lock);
 1404                                         continue;
 1405                                 }
 1406                         }
 1407                 }
 1408 
 1409                 memset(kevp, 0, sizeof(*kevp));
 1410                 if (filter_process(kn, kevp) == 0) {
 1411                         mtx_enter(&kq->kq_lock);
 1412                         if ((kn->kn_status & KN_QUEUED) == 0)
 1413                                 kn->kn_status &= ~KN_ACTIVE;
 1414                         knote_release(kn);
 1415                         kqueue_check(kq);
 1416                         continue;
 1417                 }
 1418 
 1419                 /*
 1420                  * Post-event action on the note
 1421                  */
 1422                 if (kevp->flags & EV_ONESHOT) {
 1423                         filter_detach(kn);
 1424                         knote_drop(kn, p);
 1425                         mtx_enter(&kq->kq_lock);
 1426                 } else if (kevp->flags & (EV_CLEAR | EV_DISPATCH)) {
 1427                         mtx_enter(&kq->kq_lock);
 1428                         if (kevp->flags & EV_DISPATCH)
 1429                                 kn->kn_status |= KN_DISABLED;
 1430                         if ((kn->kn_status & KN_QUEUED) == 0)
 1431                                 kn->kn_status &= ~KN_ACTIVE;
 1432                         knote_release(kn);
 1433                 } else {
 1434                         mtx_enter(&kq->kq_lock);
 1435                         if ((kn->kn_status & KN_QUEUED) == 0) {
 1436                                 kqueue_check(kq);
 1437                                 kq->kq_count++;
 1438                                 kn->kn_status |= KN_QUEUED;
 1439                                 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1440                                 /* Wakeup is done after loop. */
 1441                                 reinserted = 1;
 1442                         }
 1443                         knote_release(kn);
 1444                 }
 1445                 kqueue_check(kq);
 1446 
 1447                 kevp++;
 1448                 nkev++;
 1449                 scan->kqs_nevent++;
 1450         }
 1451         TAILQ_REMOVE(&kq->kq_head, &scan->kqs_start, kn_tqe);
 1452         if (reinserted && kq->kq_count != 0)
 1453                 kqueue_wakeup(kq);
 1454         mtx_leave(&kq->kq_lock);
 1455         if (scan->kqs_nevent == 0)
 1456                 goto retry;
 1457 done:
 1458         *errorp = error;
 1459         return (nkev);
 1460 }
 1461 
 1462 void
 1463 kqueue_scan_setup(struct kqueue_scan_state *scan, struct kqueue *kq)
 1464 {
 1465         memset(scan, 0, sizeof(*scan));
 1466 
 1467         KQREF(kq);
 1468         scan->kqs_kq = kq;
 1469         scan->kqs_start.kn_filter = EVFILT_MARKER;
 1470         scan->kqs_start.kn_status = KN_PROCESSING;
 1471         scan->kqs_end.kn_filter = EVFILT_MARKER;
 1472         scan->kqs_end.kn_status = KN_PROCESSING;
 1473 }
 1474 
 1475 void
 1476 kqueue_scan_finish(struct kqueue_scan_state *scan)
 1477 {
 1478         struct kqueue *kq = scan->kqs_kq;
 1479 
 1480         KASSERT(scan->kqs_start.kn_filter == EVFILT_MARKER);
 1481         KASSERT(scan->kqs_start.kn_status == KN_PROCESSING);
 1482         KASSERT(scan->kqs_end.kn_filter == EVFILT_MARKER);
 1483         KASSERT(scan->kqs_end.kn_status == KN_PROCESSING);
 1484 
 1485         if (scan->kqs_queued) {
 1486                 scan->kqs_queued = 0;
 1487                 mtx_enter(&kq->kq_lock);
 1488                 TAILQ_REMOVE(&kq->kq_head, &scan->kqs_end, kn_tqe);
 1489                 mtx_leave(&kq->kq_lock);
 1490         }
 1491         KQRELE(kq);
 1492 }
 1493 
 1494 /*
 1495  * XXX
 1496  * This could be expanded to call kqueue_scan, if desired.
 1497  */
 1498 int
 1499 kqueue_read(struct file *fp, struct uio *uio, int fflags)
 1500 {
 1501         return (ENXIO);
 1502 }
 1503 
 1504 int
 1505 kqueue_write(struct file *fp, struct uio *uio, int fflags)
 1506 {
 1507         return (ENXIO);
 1508 }
 1509 
 1510 int
 1511 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
 1512 {
 1513         return (ENOTTY);
 1514 }
 1515 
 1516 int
 1517 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
 1518 {
 1519         struct kqueue *kq = fp->f_data;
 1520 
 1521         memset(st, 0, sizeof(*st));
 1522         st->st_size = kq->kq_count;     /* unlocked read */
 1523         st->st_blksize = sizeof(struct kevent);
 1524         st->st_mode = S_IFIFO;
 1525         return (0);
 1526 }
 1527 
 1528 void
 1529 kqueue_purge(struct proc *p, struct kqueue *kq)
 1530 {
 1531         int i;
 1532 
 1533         mtx_enter(&kq->kq_lock);
 1534         for (i = 0; i < kq->kq_knlistsize; i++)
 1535                 knote_remove(p, kq, &kq->kq_knlist, i, 1);
 1536         if (kq->kq_knhashmask != 0) {
 1537                 for (i = 0; i < kq->kq_knhashmask + 1; i++)
 1538                         knote_remove(p, kq, &kq->kq_knhash, i, 1);
 1539         }
 1540         mtx_leave(&kq->kq_lock);
 1541 }
 1542 
 1543 void
 1544 kqueue_terminate(struct proc *p, struct kqueue *kq)
 1545 {
 1546         struct knote *kn;
 1547         int state;
 1548 
 1549         mtx_enter(&kq->kq_lock);
 1550 
 1551         /*
 1552          * Any remaining entries should be scan markers.
 1553          * They are removed when the ongoing scans finish.
 1554          */
 1555         KASSERT(kq->kq_count == 0);
 1556         TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe)
 1557                 KASSERT(kn->kn_filter == EVFILT_MARKER);
 1558 
 1559         kq->kq_state |= KQ_DYING;
 1560         state = kq->kq_state;
 1561         kqueue_wakeup(kq);
 1562         mtx_leave(&kq->kq_lock);
 1563 
 1564         /*
 1565          * Any knotes that were attached to this kqueue were deleted
 1566          * by knote_fdclose() when this kqueue's file descriptor was closed.
 1567          */
 1568         KASSERT(klist_empty(&kq->kq_klist));
 1569         if (state & KQ_TASK)
 1570                 taskq_del_barrier(systqmp, &kq->kq_task);
 1571 }
 1572 
 1573 int
 1574 kqueue_close(struct file *fp, struct proc *p)
 1575 {
 1576         struct kqueue *kq = fp->f_data;
 1577 
 1578         fp->f_data = NULL;
 1579 
 1580         kqueue_purge(p, kq);
 1581         kqueue_terminate(p, kq);
 1582 
 1583         KQRELE(kq);
 1584 
 1585         return (0);
 1586 }
 1587 
 1588 static void
 1589 kqueue_task(void *arg)
 1590 {
 1591         struct kqueue *kq = arg;
 1592 
 1593         mtx_enter(&kqueue_klist_lock);
 1594         KNOTE(&kq->kq_klist, 0);
 1595         mtx_leave(&kqueue_klist_lock);
 1596 }
 1597 
 1598 void
 1599 kqueue_wakeup(struct kqueue *kq)
 1600 {
 1601         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1602 
 1603         if (kq->kq_state & KQ_SLEEP) {
 1604                 kq->kq_state &= ~KQ_SLEEP;
 1605                 wakeup(kq);
 1606         }
 1607         if (!klist_empty(&kq->kq_klist)) {
 1608                 /* Defer activation to avoid recursion. */
 1609                 kq->kq_state |= KQ_TASK;
 1610                 task_add(systqmp, &kq->kq_task);
 1611         }
 1612 }
 1613 
 1614 static void
 1615 kqueue_expand_hash(struct kqueue *kq)
 1616 {
 1617         struct knlist *hash;
 1618         u_long hashmask;
 1619 
 1620         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1621 
 1622         if (kq->kq_knhashmask == 0) {
 1623                 mtx_leave(&kq->kq_lock);
 1624                 hash = hashinit(KN_HASHSIZE, M_KEVENT, M_WAITOK, &hashmask);
 1625                 mtx_enter(&kq->kq_lock);
 1626                 if (kq->kq_knhashmask == 0) {
 1627                         kq->kq_knhash = hash;
 1628                         kq->kq_knhashmask = hashmask;
 1629                 } else {
 1630                         /* Another thread has allocated the hash. */
 1631                         mtx_leave(&kq->kq_lock);
 1632                         hashfree(hash, KN_HASHSIZE, M_KEVENT);
 1633                         mtx_enter(&kq->kq_lock);
 1634                 }
 1635         }
 1636 }
 1637 
 1638 static void
 1639 kqueue_expand_list(struct kqueue *kq, int fd)
 1640 {
 1641         struct knlist *list, *olist;
 1642         int size, osize;
 1643 
 1644         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1645 
 1646         if (kq->kq_knlistsize <= fd) {
 1647                 size = kq->kq_knlistsize;
 1648                 mtx_leave(&kq->kq_lock);
 1649                 while (size <= fd)
 1650                         size += KQEXTENT;
 1651                 list = mallocarray(size, sizeof(*list), M_KEVENT, M_WAITOK);
 1652                 mtx_enter(&kq->kq_lock);
 1653                 if (kq->kq_knlistsize <= fd) {
 1654                         memcpy(list, kq->kq_knlist,
 1655                             kq->kq_knlistsize * sizeof(*list));
 1656                         memset(&list[kq->kq_knlistsize], 0,
 1657                             (size - kq->kq_knlistsize) * sizeof(*list));
 1658                         olist = kq->kq_knlist;
 1659                         osize = kq->kq_knlistsize;
 1660                         kq->kq_knlist = list;
 1661                         kq->kq_knlistsize = size;
 1662                         mtx_leave(&kq->kq_lock);
 1663                         free(olist, M_KEVENT, osize * sizeof(*list));
 1664                         mtx_enter(&kq->kq_lock);
 1665                 } else {
 1666                         /* Another thread has expanded the list. */
 1667                         mtx_leave(&kq->kq_lock);
 1668                         free(list, M_KEVENT, size * sizeof(*list));
 1669                         mtx_enter(&kq->kq_lock);
 1670                 }
 1671         }
 1672 }
 1673 
 1674 /*
 1675  * Acquire a knote, return non-zero on success, 0 on failure.
 1676  *
 1677  * If we cannot acquire the knote we sleep and return 0.  The knote
 1678  * may be stale on return in this case and the caller must restart
 1679  * whatever loop they are in.
 1680  *
 1681  * If we are about to sleep and klist is non-NULL, the list is unlocked
 1682  * before sleep and remains unlocked on return.
 1683  */
 1684 int
 1685 knote_acquire(struct knote *kn, struct klist *klist, int ls)
 1686 {
 1687         struct kqueue *kq = kn->kn_kq;
 1688 
 1689         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1690         KASSERT(kn->kn_filter != EVFILT_MARKER);
 1691 
 1692         if (kn->kn_status & KN_PROCESSING) {
 1693                 kn->kn_status |= KN_WAITING;
 1694                 if (klist != NULL) {
 1695                         mtx_leave(&kq->kq_lock);
 1696                         klist_unlock(klist, ls);
 1697                         /* XXX Timeout resolves potential loss of wakeup. */
 1698                         tsleep_nsec(kn, 0, "kqepts", SEC_TO_NSEC(1));
 1699                 } else {
 1700                         msleep_nsec(kn, &kq->kq_lock, PNORELOCK, "kqepts",
 1701                             SEC_TO_NSEC(1));
 1702                 }
 1703                 /* knote may be stale now */
 1704                 return (0);
 1705         }
 1706         kn->kn_status |= KN_PROCESSING;
 1707         return (1);
 1708 }
 1709 
 1710 /*
 1711  * Release an acquired knote, clearing KN_PROCESSING.
 1712  */
 1713 void
 1714 knote_release(struct knote *kn)
 1715 {
 1716         MUTEX_ASSERT_LOCKED(&kn->kn_kq->kq_lock);
 1717         KASSERT(kn->kn_filter != EVFILT_MARKER);
 1718         KASSERT(kn->kn_status & KN_PROCESSING);
 1719 
 1720         if (kn->kn_status & KN_WAITING) {
 1721                 kn->kn_status &= ~KN_WAITING;
 1722                 wakeup(kn);
 1723         }
 1724         kn->kn_status &= ~KN_PROCESSING;
 1725         /* kn should not be accessed anymore */
 1726 }
 1727 
 1728 /*
 1729  * activate one knote.
 1730  */
 1731 void
 1732 knote_activate(struct knote *kn)
 1733 {
 1734         MUTEX_ASSERT_LOCKED(&kn->kn_kq->kq_lock);
 1735 
 1736         kn->kn_status |= KN_ACTIVE;
 1737         if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)
 1738                 knote_enqueue(kn);
 1739 }
 1740 
 1741 /*
 1742  * walk down a list of knotes, activating them if their event has triggered.
 1743  */
 1744 void
 1745 knote(struct klist *list, long hint)
 1746 {
 1747         struct knote *kn, *kn0;
 1748         struct kqueue *kq;
 1749 
 1750         KLIST_ASSERT_LOCKED(list);
 1751 
 1752         SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, kn0) {
 1753                 if (filter_event(kn, hint)) {
 1754                         kq = kn->kn_kq;
 1755                         mtx_enter(&kq->kq_lock);
 1756                         knote_activate(kn);
 1757                         mtx_leave(&kq->kq_lock);
 1758                 }
 1759         }
 1760 }
 1761 
 1762 /*
 1763  * remove all knotes from a specified knlist
 1764  */
 1765 void
 1766 knote_remove(struct proc *p, struct kqueue *kq, struct knlist **plist, int idx,
 1767     int purge)
 1768 {
 1769         struct knote *kn;
 1770 
 1771         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1772 
 1773         /* Always fetch array pointer as another thread can resize kq_knlist. */
 1774         while ((kn = SLIST_FIRST(*plist + idx)) != NULL) {
 1775                 KASSERT(kn->kn_kq == kq);
 1776 
 1777                 if (!purge) {
 1778                         /* Skip pending badfd knotes. */
 1779                         while (kn->kn_fop == &badfd_filtops) {
 1780                                 kn = SLIST_NEXT(kn, kn_link);
 1781                                 if (kn == NULL)
 1782                                         return;
 1783                                 KASSERT(kn->kn_kq == kq);
 1784                         }
 1785                 }
 1786 
 1787                 if (!knote_acquire(kn, NULL, 0)) {
 1788                         /* knote_acquire() has released kq_lock. */
 1789                         mtx_enter(&kq->kq_lock);
 1790                         continue;
 1791                 }
 1792                 mtx_leave(&kq->kq_lock);
 1793                 filter_detach(kn);
 1794 
 1795                 /*
 1796                  * Notify poll(2) and select(2) when a monitored
 1797                  * file descriptor is closed.
 1798                  *
 1799                  * This reuses the original knote for delivering the
 1800                  * notification so as to avoid allocating memory.
 1801                  */
 1802                 if (!purge && (kn->kn_flags & (__EV_POLL | __EV_SELECT)) &&
 1803                     !(p->p_kq == kq &&
 1804                       p->p_kq_serial > (unsigned long)kn->kn_udata) &&
 1805                     kn->kn_fop != &badfd_filtops) {
 1806                         KASSERT(kn->kn_fop->f_flags & FILTEROP_ISFD);
 1807                         FRELE(kn->kn_fp, p);
 1808                         kn->kn_fp = NULL;
 1809 
 1810                         kn->kn_fop = &badfd_filtops;
 1811                         filter_event(kn, 0);
 1812                         mtx_enter(&kq->kq_lock);
 1813                         knote_activate(kn);
 1814                         knote_release(kn);
 1815                         continue;
 1816                 }
 1817 
 1818                 knote_drop(kn, p);
 1819                 mtx_enter(&kq->kq_lock);
 1820         }
 1821 }
 1822 
 1823 /*
 1824  * remove all knotes referencing a specified fd
 1825  */
 1826 void
 1827 knote_fdclose(struct proc *p, int fd)
 1828 {
 1829         struct filedesc *fdp = p->p_p->ps_fd;
 1830         struct kqueue *kq;
 1831 
 1832         /*
 1833          * fdplock can be ignored if the file descriptor table is being freed
 1834          * because no other thread can access the fdp.
 1835          */
 1836         if (fdp->fd_refcnt != 0)
 1837                 fdpassertlocked(fdp);
 1838 
 1839         LIST_FOREACH(kq, &fdp->fd_kqlist, kq_next) {
 1840                 mtx_enter(&kq->kq_lock);
 1841                 if (fd < kq->kq_knlistsize)
 1842                         knote_remove(p, kq, &kq->kq_knlist, fd, 0);
 1843                 mtx_leave(&kq->kq_lock);
 1844         }
 1845 }
 1846 
 1847 /*
 1848  * handle a process exiting, including the triggering of NOTE_EXIT notes
 1849  * XXX this could be more efficient, doing a single pass down the klist
 1850  */
 1851 void
 1852 knote_processexit(struct process *pr)
 1853 {
 1854         KERNEL_ASSERT_LOCKED();
 1855 
 1856         KNOTE(&pr->ps_klist, NOTE_EXIT);
 1857 
 1858         /* remove other knotes hanging off the process */
 1859         klist_invalidate(&pr->ps_klist);
 1860 }
 1861 
 1862 void
 1863 knote_attach(struct knote *kn)
 1864 {
 1865         struct kqueue *kq = kn->kn_kq;
 1866         struct knlist *list;
 1867 
 1868         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1869         KASSERT(kn->kn_status & KN_PROCESSING);
 1870 
 1871         if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
 1872                 KASSERT(kq->kq_knlistsize > kn->kn_id);
 1873                 list = &kq->kq_knlist[kn->kn_id];
 1874         } else {
 1875                 KASSERT(kq->kq_knhashmask != 0);
 1876                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1877         }
 1878         SLIST_INSERT_HEAD(list, kn, kn_link);
 1879         kq->kq_nknotes++;
 1880 }
 1881 
 1882 void
 1883 knote_detach(struct knote *kn)
 1884 {
 1885         struct kqueue *kq = kn->kn_kq;
 1886         struct knlist *list;
 1887 
 1888         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1889         KASSERT(kn->kn_status & KN_PROCESSING);
 1890 
 1891         kq->kq_nknotes--;
 1892         if (kn->kn_fop->f_flags & FILTEROP_ISFD)
 1893                 list = &kq->kq_knlist[kn->kn_id];
 1894         else
 1895                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1896         SLIST_REMOVE(list, kn, knote, kn_link);
 1897 }
 1898 
 1899 /*
 1900  * should be called at spl == 0, since we don't want to hold spl
 1901  * while calling FRELE and pool_put.
 1902  */
 1903 void
 1904 knote_drop(struct knote *kn, struct proc *p)
 1905 {
 1906         struct kqueue *kq = kn->kn_kq;
 1907 
 1908         KASSERT(kn->kn_filter != EVFILT_MARKER);
 1909 
 1910         mtx_enter(&kq->kq_lock);
 1911         knote_detach(kn);
 1912         if (kn->kn_status & KN_QUEUED)
 1913                 knote_dequeue(kn);
 1914         if (kn->kn_status & KN_WAITING) {
 1915                 kn->kn_status &= ~KN_WAITING;
 1916                 wakeup(kn);
 1917         }
 1918         mtx_leave(&kq->kq_lock);
 1919 
 1920         if ((kn->kn_fop->f_flags & FILTEROP_ISFD) && kn->kn_fp != NULL)
 1921                 FRELE(kn->kn_fp, p);
 1922         pool_put(&knote_pool, kn);
 1923 }
 1924 
 1925 
 1926 void
 1927 knote_enqueue(struct knote *kn)
 1928 {
 1929         struct kqueue *kq = kn->kn_kq;
 1930 
 1931         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1932         KASSERT(kn->kn_filter != EVFILT_MARKER);
 1933         KASSERT((kn->kn_status & KN_QUEUED) == 0);
 1934 
 1935         kqueue_check(kq);
 1936         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1937         kn->kn_status |= KN_QUEUED;
 1938         kq->kq_count++;
 1939         kqueue_check(kq);
 1940         kqueue_wakeup(kq);
 1941 }
 1942 
 1943 void
 1944 knote_dequeue(struct knote *kn)
 1945 {
 1946         struct kqueue *kq = kn->kn_kq;
 1947 
 1948         MUTEX_ASSERT_LOCKED(&kq->kq_lock);
 1949         KASSERT(kn->kn_filter != EVFILT_MARKER);
 1950         KASSERT(kn->kn_status & KN_QUEUED);
 1951 
 1952         kqueue_check(kq);
 1953         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1954         kn->kn_status &= ~KN_QUEUED;
 1955         kq->kq_count--;
 1956         kqueue_check(kq);
 1957 }
 1958 
 1959 /*
 1960  * Assign parameters to the knote.
 1961  *
 1962  * The knote's object lock must be held.
 1963  */
 1964 void
 1965 knote_assign(const struct kevent *kev, struct knote *kn)
 1966 {
 1967         if ((kn->kn_fop->f_flags & FILTEROP_MPSAFE) == 0)
 1968                 KERNEL_ASSERT_LOCKED();
 1969 
 1970         kn->kn_sfflags = kev->fflags;
 1971         kn->kn_sdata = kev->data;
 1972         kn->kn_udata = kev->udata;
 1973 }
 1974 
 1975 /*
 1976  * Submit the knote's event for delivery.
 1977  *
 1978  * The knote's object lock must be held.
 1979  */
 1980 void
 1981 knote_submit(struct knote *kn, struct kevent *kev)
 1982 {
 1983         if ((kn->kn_fop->f_flags & FILTEROP_MPSAFE) == 0)
 1984                 KERNEL_ASSERT_LOCKED();
 1985 
 1986         if (kev != NULL) {
 1987                 *kev = kn->kn_kevent;
 1988                 if (kn->kn_flags & EV_CLEAR) {
 1989                         kn->kn_fflags = 0;
 1990                         kn->kn_data = 0;
 1991                 }
 1992         }
 1993 }
 1994 
 1995 void
 1996 klist_init(struct klist *klist, const struct klistops *ops, void *arg)
 1997 {
 1998         SLIST_INIT(&klist->kl_list);
 1999         klist->kl_ops = ops;
 2000         klist->kl_arg = arg;
 2001 }
 2002 
 2003 void
 2004 klist_free(struct klist *klist)
 2005 {
 2006         KASSERT(SLIST_EMPTY(&klist->kl_list));
 2007 }
 2008 
 2009 void
 2010 klist_insert(struct klist *klist, struct knote *kn)
 2011 {
 2012         int ls;
 2013 
 2014         ls = klist_lock(klist);
 2015         SLIST_INSERT_HEAD(&klist->kl_list, kn, kn_selnext);
 2016         klist_unlock(klist, ls);
 2017 }
 2018 
 2019 void
 2020 klist_insert_locked(struct klist *klist, struct knote *kn)
 2021 {
 2022         KLIST_ASSERT_LOCKED(klist);
 2023 
 2024         SLIST_INSERT_HEAD(&klist->kl_list, kn, kn_selnext);
 2025 }
 2026 
 2027 void
 2028 klist_remove(struct klist *klist, struct knote *kn)
 2029 {
 2030         int ls;
 2031 
 2032         ls = klist_lock(klist);
 2033         SLIST_REMOVE(&klist->kl_list, kn, knote, kn_selnext);
 2034         klist_unlock(klist, ls);
 2035 }
 2036 
 2037 void
 2038 klist_remove_locked(struct klist *klist, struct knote *kn)
 2039 {
 2040         KLIST_ASSERT_LOCKED(klist);
 2041 
 2042         SLIST_REMOVE(&klist->kl_list, kn, knote, kn_selnext);
 2043 }
 2044 
 2045 /*
 2046  * Detach all knotes from klist. The knotes are rewired to indicate EOF.
 2047  *
 2048  * The caller of this function must not hold any locks that can block
 2049  * filterops callbacks that run with KN_PROCESSING.
 2050  * Otherwise this function might deadlock.
 2051  */
 2052 void
 2053 klist_invalidate(struct klist *list)
 2054 {
 2055         struct knote *kn;
 2056         struct kqueue *kq;
 2057         struct proc *p = curproc;
 2058         int ls;
 2059 
 2060         NET_ASSERT_UNLOCKED();
 2061 
 2062         ls = klist_lock(list);
 2063         while ((kn = SLIST_FIRST(&list->kl_list)) != NULL) {
 2064                 kq = kn->kn_kq;
 2065                 mtx_enter(&kq->kq_lock);
 2066                 if (!knote_acquire(kn, list, ls)) {
 2067                         /* knote_acquire() has released kq_lock
 2068                          * and klist lock. */
 2069                         ls = klist_lock(list);
 2070                         continue;
 2071                 }
 2072                 mtx_leave(&kq->kq_lock);
 2073                 klist_unlock(list, ls);
 2074                 filter_detach(kn);
 2075                 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
 2076                         kn->kn_fop = &dead_filtops;
 2077                         filter_event(kn, 0);
 2078                         mtx_enter(&kq->kq_lock);
 2079                         knote_activate(kn);
 2080                         knote_release(kn);
 2081                         mtx_leave(&kq->kq_lock);
 2082                 } else {
 2083                         knote_drop(kn, p);
 2084                 }
 2085                 ls = klist_lock(list);
 2086         }
 2087         klist_unlock(list, ls);
 2088 }
 2089 
 2090 static int
 2091 klist_lock(struct klist *list)
 2092 {
 2093         int ls = 0;
 2094 
 2095         if (list->kl_ops != NULL) {
 2096                 ls = list->kl_ops->klo_lock(list->kl_arg);
 2097         } else {
 2098                 KERNEL_LOCK();
 2099                 ls = splhigh();
 2100         }
 2101         return ls;
 2102 }
 2103 
 2104 static void
 2105 klist_unlock(struct klist *list, int ls)
 2106 {
 2107         if (list->kl_ops != NULL) {
 2108                 list->kl_ops->klo_unlock(list->kl_arg, ls);
 2109         } else {
 2110                 splx(ls);
 2111                 KERNEL_UNLOCK();
 2112         }
 2113 }
 2114 
 2115 static void
 2116 klist_mutex_assertlk(void *arg)
 2117 {
 2118         struct mutex *mtx = arg;
 2119 
 2120         (void)mtx;
 2121 
 2122         MUTEX_ASSERT_LOCKED(mtx);
 2123 }
 2124 
 2125 static int
 2126 klist_mutex_lock(void *arg)
 2127 {
 2128         struct mutex *mtx = arg;
 2129 
 2130         mtx_enter(mtx);
 2131         return 0;
 2132 }
 2133 
 2134 static void
 2135 klist_mutex_unlock(void *arg, int s)
 2136 {
 2137         struct mutex *mtx = arg;
 2138 
 2139         mtx_leave(mtx);
 2140 }
 2141 
 2142 static const struct klistops mutex_klistops = {
 2143         .klo_assertlk   = klist_mutex_assertlk,
 2144         .klo_lock       = klist_mutex_lock,
 2145         .klo_unlock     = klist_mutex_unlock,
 2146 };
 2147 
 2148 void
 2149 klist_init_mutex(struct klist *klist, struct mutex *mtx)
 2150 {
 2151         klist_init(klist, &mutex_klistops, mtx);
 2152 }
 2153 
 2154 static void
 2155 klist_rwlock_assertlk(void *arg)
 2156 {
 2157         struct rwlock *rwl = arg;
 2158 
 2159         (void)rwl;
 2160 
 2161         rw_assert_wrlock(rwl);
 2162 }
 2163 
 2164 static int
 2165 klist_rwlock_lock(void *arg)
 2166 {
 2167         struct rwlock *rwl = arg;
 2168 
 2169         rw_enter_write(rwl);
 2170         return 0;
 2171 }
 2172 
 2173 static void
 2174 klist_rwlock_unlock(void *arg, int s)
 2175 {
 2176         struct rwlock *rwl = arg;
 2177 
 2178         rw_exit_write(rwl);
 2179 }
 2180 
 2181 static const struct klistops rwlock_klistops = {
 2182         .klo_assertlk   = klist_rwlock_assertlk,
 2183         .klo_lock       = klist_rwlock_lock,
 2184         .klo_unlock     = klist_rwlock_unlock,
 2185 };
 2186 
 2187 void
 2188 klist_init_rwlock(struct klist *klist, struct rwlock *rwl)
 2189 {
 2190         klist_init(klist, &rwlock_klistops, rwl);
 2191 }

Cache object: a0c4207cc3015cfe717968589057b61f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.