The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
    4  * Copyright (c) 2009 Apple, Inc.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/10.1/sys/kern/kern_event.c 264368 2014-04-12 14:08:53Z kib $");
   31 
   32 #include "opt_ktrace.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/systm.h>
   36 #include <sys/capability.h>
   37 #include <sys/kernel.h>
   38 #include <sys/lock.h>
   39 #include <sys/mutex.h>
   40 #include <sys/rwlock.h>
   41 #include <sys/proc.h>
   42 #include <sys/malloc.h>
   43 #include <sys/unistd.h>
   44 #include <sys/file.h>
   45 #include <sys/filedesc.h>
   46 #include <sys/filio.h>
   47 #include <sys/fcntl.h>
   48 #include <sys/kthread.h>
   49 #include <sys/selinfo.h>
   50 #include <sys/stdatomic.h>
   51 #include <sys/queue.h>
   52 #include <sys/event.h>
   53 #include <sys/eventvar.h>
   54 #include <sys/poll.h>
   55 #include <sys/protosw.h>
   56 #include <sys/sigio.h>
   57 #include <sys/signalvar.h>
   58 #include <sys/socket.h>
   59 #include <sys/socketvar.h>
   60 #include <sys/stat.h>
   61 #include <sys/sysctl.h>
   62 #include <sys/sysproto.h>
   63 #include <sys/syscallsubr.h>
   64 #include <sys/taskqueue.h>
   65 #include <sys/uio.h>
   66 #ifdef KTRACE
   67 #include <sys/ktrace.h>
   68 #endif
   69 
   70 #include <vm/uma.h>
   71 
   72 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
   73 
   74 /*
   75  * This lock is used if multiple kq locks are required.  This possibly
   76  * should be made into a per proc lock.
   77  */
   78 static struct mtx       kq_global;
   79 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
   80 #define KQ_GLOBAL_LOCK(lck, haslck)     do {    \
   81         if (!haslck)                            \
   82                 mtx_lock(lck);                  \
   83         haslck = 1;                             \
   84 } while (0)
   85 #define KQ_GLOBAL_UNLOCK(lck, haslck)   do {    \
   86         if (haslck)                             \
   87                 mtx_unlock(lck);                        \
   88         haslck = 0;                             \
   89 } while (0)
   90 
   91 TASKQUEUE_DEFINE_THREAD(kqueue);
   92 
   93 static int      kevent_copyout(void *arg, struct kevent *kevp, int count);
   94 static int      kevent_copyin(void *arg, struct kevent *kevp, int count);
   95 static int      kqueue_register(struct kqueue *kq, struct kevent *kev,
   96                     struct thread *td, int waitok);
   97 static int      kqueue_acquire(struct file *fp, struct kqueue **kqp);
   98 static void     kqueue_release(struct kqueue *kq, int locked);
   99 static int      kqueue_expand(struct kqueue *kq, struct filterops *fops,
  100                     uintptr_t ident, int waitok);
  101 static void     kqueue_task(void *arg, int pending);
  102 static int      kqueue_scan(struct kqueue *kq, int maxevents,
  103                     struct kevent_copyops *k_ops,
  104                     const struct timespec *timeout,
  105                     struct kevent *keva, struct thread *td);
  106 static void     kqueue_wakeup(struct kqueue *kq);
  107 static struct filterops *kqueue_fo_find(int filt);
  108 static void     kqueue_fo_release(int filt);
  109 
  110 static fo_rdwr_t        kqueue_read;
  111 static fo_rdwr_t        kqueue_write;
  112 static fo_truncate_t    kqueue_truncate;
  113 static fo_ioctl_t       kqueue_ioctl;
  114 static fo_poll_t        kqueue_poll;
  115 static fo_kqfilter_t    kqueue_kqfilter;
  116 static fo_stat_t        kqueue_stat;
  117 static fo_close_t       kqueue_close;
  118 
  119 static struct fileops kqueueops = {
  120         .fo_read = kqueue_read,
  121         .fo_write = kqueue_write,
  122         .fo_truncate = kqueue_truncate,
  123         .fo_ioctl = kqueue_ioctl,
  124         .fo_poll = kqueue_poll,
  125         .fo_kqfilter = kqueue_kqfilter,
  126         .fo_stat = kqueue_stat,
  127         .fo_close = kqueue_close,
  128         .fo_chmod = invfo_chmod,
  129         .fo_chown = invfo_chown,
  130         .fo_sendfile = invfo_sendfile,
  131 };
  132 
  133 static int      knote_attach(struct knote *kn, struct kqueue *kq);
  134 static void     knote_drop(struct knote *kn, struct thread *td);
  135 static void     knote_enqueue(struct knote *kn);
  136 static void     knote_dequeue(struct knote *kn);
  137 static void     knote_init(void);
  138 static struct   knote *knote_alloc(int waitok);
  139 static void     knote_free(struct knote *kn);
  140 
  141 static void     filt_kqdetach(struct knote *kn);
  142 static int      filt_kqueue(struct knote *kn, long hint);
  143 static int      filt_procattach(struct knote *kn);
  144 static void     filt_procdetach(struct knote *kn);
  145 static int      filt_proc(struct knote *kn, long hint);
  146 static int      filt_fileattach(struct knote *kn);
  147 static void     filt_timerexpire(void *knx);
  148 static int      filt_timerattach(struct knote *kn);
  149 static void     filt_timerdetach(struct knote *kn);
  150 static int      filt_timer(struct knote *kn, long hint);
  151 static int      filt_userattach(struct knote *kn);
  152 static void     filt_userdetach(struct knote *kn);
  153 static int      filt_user(struct knote *kn, long hint);
  154 static void     filt_usertouch(struct knote *kn, struct kevent *kev,
  155                     u_long type);
  156 
  157 static struct filterops file_filtops = {
  158         .f_isfd = 1,
  159         .f_attach = filt_fileattach,
  160 };
  161 static struct filterops kqread_filtops = {
  162         .f_isfd = 1,
  163         .f_detach = filt_kqdetach,
  164         .f_event = filt_kqueue,
  165 };
  166 /* XXX - move to kern_proc.c?  */
  167 static struct filterops proc_filtops = {
  168         .f_isfd = 0,
  169         .f_attach = filt_procattach,
  170         .f_detach = filt_procdetach,
  171         .f_event = filt_proc,
  172 };
  173 static struct filterops timer_filtops = {
  174         .f_isfd = 0,
  175         .f_attach = filt_timerattach,
  176         .f_detach = filt_timerdetach,
  177         .f_event = filt_timer,
  178 };
  179 static struct filterops user_filtops = {
  180         .f_attach = filt_userattach,
  181         .f_detach = filt_userdetach,
  182         .f_event = filt_user,
  183         .f_touch = filt_usertouch,
  184 };
  185 
  186 static uma_zone_t       knote_zone;
  187 static atomic_uint      kq_ncallouts = ATOMIC_VAR_INIT(0);
  188 static unsigned int     kq_calloutmax = 4 * 1024;
  189 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
  190     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
  191 
  192 /* XXX - ensure not KN_INFLUX?? */
  193 #define KNOTE_ACTIVATE(kn, islock) do {                                 \
  194         if ((islock))                                                   \
  195                 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);            \
  196         else                                                            \
  197                 KQ_LOCK((kn)->kn_kq);                                   \
  198         (kn)->kn_status |= KN_ACTIVE;                                   \
  199         if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)         \
  200                 knote_enqueue((kn));                                    \
  201         if (!(islock))                                                  \
  202                 KQ_UNLOCK((kn)->kn_kq);                                 \
  203 } while(0)
  204 #define KQ_LOCK(kq) do {                                                \
  205         mtx_lock(&(kq)->kq_lock);                                       \
  206 } while (0)
  207 #define KQ_FLUX_WAKEUP(kq) do {                                         \
  208         if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {            \
  209                 (kq)->kq_state &= ~KQ_FLUXWAIT;                         \
  210                 wakeup((kq));                                           \
  211         }                                                               \
  212 } while (0)
  213 #define KQ_UNLOCK_FLUX(kq) do {                                         \
  214         KQ_FLUX_WAKEUP(kq);                                             \
  215         mtx_unlock(&(kq)->kq_lock);                                     \
  216 } while (0)
  217 #define KQ_UNLOCK(kq) do {                                              \
  218         mtx_unlock(&(kq)->kq_lock);                                     \
  219 } while (0)
  220 #define KQ_OWNED(kq) do {                                               \
  221         mtx_assert(&(kq)->kq_lock, MA_OWNED);                           \
  222 } while (0)
  223 #define KQ_NOTOWNED(kq) do {                                            \
  224         mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);                        \
  225 } while (0)
  226 #define KN_LIST_LOCK(kn) do {                                           \
  227         if (kn->kn_knlist != NULL)                                      \
  228                 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg);      \
  229 } while (0)
  230 #define KN_LIST_UNLOCK(kn) do {                                         \
  231         if (kn->kn_knlist != NULL)                                      \
  232                 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg);    \
  233 } while (0)
  234 #define KNL_ASSERT_LOCK(knl, islocked) do {                             \
  235         if (islocked)                                                   \
  236                 KNL_ASSERT_LOCKED(knl);                         \
  237         else                                                            \
  238                 KNL_ASSERT_UNLOCKED(knl);                               \
  239 } while (0)
  240 #ifdef INVARIANTS
  241 #define KNL_ASSERT_LOCKED(knl) do {                                     \
  242         knl->kl_assert_locked((knl)->kl_lockarg);                       \
  243 } while (0)
  244 #define KNL_ASSERT_UNLOCKED(knl) do {                                   \
  245         knl->kl_assert_unlocked((knl)->kl_lockarg);                     \
  246 } while (0)
  247 #else /* !INVARIANTS */
  248 #define KNL_ASSERT_LOCKED(knl) do {} while(0)
  249 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
  250 #endif /* INVARIANTS */
  251 
  252 #define KN_HASHSIZE             64              /* XXX should be tunable */
  253 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  254 
  255 static int
  256 filt_nullattach(struct knote *kn)
  257 {
  258 
  259         return (ENXIO);
  260 };
  261 
  262 struct filterops null_filtops = {
  263         .f_isfd = 0,
  264         .f_attach = filt_nullattach,
  265 };
  266 
  267 /* XXX - make SYSINIT to add these, and move into respective modules. */
  268 extern struct filterops sig_filtops;
  269 extern struct filterops fs_filtops;
  270 
  271 /*
  272  * Table for for all system-defined filters.
  273  */
  274 static struct mtx       filterops_lock;
  275 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
  276         MTX_DEF);
  277 static struct {
  278         struct filterops *for_fop;
  279         int for_refcnt;
  280 } sysfilt_ops[EVFILT_SYSCOUNT] = {
  281         { &file_filtops },                      /* EVFILT_READ */
  282         { &file_filtops },                      /* EVFILT_WRITE */
  283         { &null_filtops },                      /* EVFILT_AIO */
  284         { &file_filtops },                      /* EVFILT_VNODE */
  285         { &proc_filtops },                      /* EVFILT_PROC */
  286         { &sig_filtops },                       /* EVFILT_SIGNAL */
  287         { &timer_filtops },                     /* EVFILT_TIMER */
  288         { &null_filtops },                      /* former EVFILT_NETDEV */
  289         { &fs_filtops },                        /* EVFILT_FS */
  290         { &null_filtops },                      /* EVFILT_LIO */
  291         { &user_filtops },                      /* EVFILT_USER */
  292 };
  293 
  294 /*
  295  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
  296  * method.
  297  */
  298 static int
  299 filt_fileattach(struct knote *kn)
  300 {
  301 
  302         return (fo_kqfilter(kn->kn_fp, kn));
  303 }
  304 
  305 /*ARGSUSED*/
  306 static int
  307 kqueue_kqfilter(struct file *fp, struct knote *kn)
  308 {
  309         struct kqueue *kq = kn->kn_fp->f_data;
  310 
  311         if (kn->kn_filter != EVFILT_READ)
  312                 return (EINVAL);
  313 
  314         kn->kn_status |= KN_KQUEUE;
  315         kn->kn_fop = &kqread_filtops;
  316         knlist_add(&kq->kq_sel.si_note, kn, 0);
  317 
  318         return (0);
  319 }
  320 
  321 static void
  322 filt_kqdetach(struct knote *kn)
  323 {
  324         struct kqueue *kq = kn->kn_fp->f_data;
  325 
  326         knlist_remove(&kq->kq_sel.si_note, kn, 0);
  327 }
  328 
  329 /*ARGSUSED*/
  330 static int
  331 filt_kqueue(struct knote *kn, long hint)
  332 {
  333         struct kqueue *kq = kn->kn_fp->f_data;
  334 
  335         kn->kn_data = kq->kq_count;
  336         return (kn->kn_data > 0);
  337 }
  338 
  339 /* XXX - move to kern_proc.c?  */
  340 static int
  341 filt_procattach(struct knote *kn)
  342 {
  343         struct proc *p;
  344         int immediate;
  345         int error;
  346 
  347         immediate = 0;
  348         p = pfind(kn->kn_id);
  349         if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
  350                 p = zpfind(kn->kn_id);
  351                 immediate = 1;
  352         } else if (p != NULL && (p->p_flag & P_WEXIT)) {
  353                 immediate = 1;
  354         }
  355 
  356         if (p == NULL)
  357                 return (ESRCH);
  358         if ((error = p_cansee(curthread, p))) {
  359                 PROC_UNLOCK(p);
  360                 return (error);
  361         }
  362 
  363         kn->kn_ptr.p_proc = p;
  364         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  365 
  366         /*
  367          * internal flag indicating registration done by kernel
  368          */
  369         if (kn->kn_flags & EV_FLAG1) {
  370                 kn->kn_data = kn->kn_sdata;             /* ppid */
  371                 kn->kn_fflags = NOTE_CHILD;
  372                 kn->kn_flags &= ~EV_FLAG1;
  373         }
  374 
  375         if (immediate == 0)
  376                 knlist_add(&p->p_klist, kn, 1);
  377 
  378         /*
  379          * Immediately activate any exit notes if the target process is a
  380          * zombie.  This is necessary to handle the case where the target
  381          * process, e.g. a child, dies before the kevent is registered.
  382          */
  383         if (immediate && filt_proc(kn, NOTE_EXIT))
  384                 KNOTE_ACTIVATE(kn, 0);
  385 
  386         PROC_UNLOCK(p);
  387 
  388         return (0);
  389 }
  390 
  391 /*
  392  * The knote may be attached to a different process, which may exit,
  393  * leaving nothing for the knote to be attached to.  So when the process
  394  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  395  * it will be deleted when read out.  However, as part of the knote deletion,
  396  * this routine is called, so a check is needed to avoid actually performing
  397  * a detach, because the original process does not exist any more.
  398  */
  399 /* XXX - move to kern_proc.c?  */
  400 static void
  401 filt_procdetach(struct knote *kn)
  402 {
  403         struct proc *p;
  404 
  405         p = kn->kn_ptr.p_proc;
  406         knlist_remove(&p->p_klist, kn, 0);
  407         kn->kn_ptr.p_proc = NULL;
  408 }
  409 
  410 /* XXX - move to kern_proc.c?  */
  411 static int
  412 filt_proc(struct knote *kn, long hint)
  413 {
  414         struct proc *p = kn->kn_ptr.p_proc;
  415         u_int event;
  416 
  417         /*
  418          * mask off extra data
  419          */
  420         event = (u_int)hint & NOTE_PCTRLMASK;
  421 
  422         /*
  423          * if the user is interested in this event, record it.
  424          */
  425         if (kn->kn_sfflags & event)
  426                 kn->kn_fflags |= event;
  427 
  428         /*
  429          * process is gone, so flag the event as finished.
  430          */
  431         if (event == NOTE_EXIT) {
  432                 if (!(kn->kn_status & KN_DETACHED))
  433                         knlist_remove_inevent(&p->p_klist, kn);
  434                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
  435                 kn->kn_ptr.p_proc = NULL;
  436                 if (kn->kn_fflags & NOTE_EXIT)
  437                         kn->kn_data = p->p_xstat;
  438                 if (kn->kn_fflags == 0)
  439                         kn->kn_flags |= EV_DROP;
  440                 return (1);
  441         }
  442 
  443         return (kn->kn_fflags != 0);
  444 }
  445 
  446 /*
  447  * Called when the process forked. It mostly does the same as the
  448  * knote(), activating all knotes registered to be activated when the
  449  * process forked. Additionally, for each knote attached to the
  450  * parent, check whether user wants to track the new process. If so
  451  * attach a new knote to it, and immediately report an event with the
  452  * child's pid.
  453  */
  454 void
  455 knote_fork(struct knlist *list, int pid)
  456 {
  457         struct kqueue *kq;
  458         struct knote *kn;
  459         struct kevent kev;
  460         int error;
  461 
  462         if (list == NULL)
  463                 return;
  464         list->kl_lock(list->kl_lockarg);
  465 
  466         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
  467                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX)
  468                         continue;
  469                 kq = kn->kn_kq;
  470                 KQ_LOCK(kq);
  471                 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) {
  472                         KQ_UNLOCK(kq);
  473                         continue;
  474                 }
  475 
  476                 /*
  477                  * The same as knote(), activate the event.
  478                  */
  479                 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
  480                         kn->kn_status |= KN_HASKQLOCK;
  481                         if (kn->kn_fop->f_event(kn, NOTE_FORK))
  482                                 KNOTE_ACTIVATE(kn, 1);
  483                         kn->kn_status &= ~KN_HASKQLOCK;
  484                         KQ_UNLOCK(kq);
  485                         continue;
  486                 }
  487 
  488                 /*
  489                  * The NOTE_TRACK case. In addition to the activation
  490                  * of the event, we need to register new event to
  491                  * track the child. Drop the locks in preparation for
  492                  * the call to kqueue_register().
  493                  */
  494                 kn->kn_status |= KN_INFLUX;
  495                 KQ_UNLOCK(kq);
  496                 list->kl_unlock(list->kl_lockarg);
  497 
  498                 /*
  499                  * Activate existing knote and register a knote with
  500                  * new process.
  501                  */
  502                 kev.ident = pid;
  503                 kev.filter = kn->kn_filter;
  504                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  505                 kev.fflags = kn->kn_sfflags;
  506                 kev.data = kn->kn_id;           /* parent */
  507                 kev.udata = kn->kn_kevent.udata;/* preserve udata */
  508                 error = kqueue_register(kq, &kev, NULL, 0);
  509                 if (error)
  510                         kn->kn_fflags |= NOTE_TRACKERR;
  511                 if (kn->kn_fop->f_event(kn, NOTE_FORK))
  512                         KNOTE_ACTIVATE(kn, 0);
  513                 KQ_LOCK(kq);
  514                 kn->kn_status &= ~KN_INFLUX;
  515                 KQ_UNLOCK_FLUX(kq);
  516                 list->kl_lock(list->kl_lockarg);
  517         }
  518         list->kl_unlock(list->kl_lockarg);
  519 }
  520 
  521 /*
  522  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
  523  * interval timer support code.
  524  */
  525 static __inline sbintime_t 
  526 timer2sbintime(intptr_t data)
  527 {
  528 
  529         return (SBT_1MS * data);
  530 }
  531 
  532 static void
  533 filt_timerexpire(void *knx)
  534 {
  535         struct callout *calloutp;
  536         struct knote *kn;
  537 
  538         kn = knx;
  539         kn->kn_data++;
  540         KNOTE_ACTIVATE(kn, 0);  /* XXX - handle locking */
  541 
  542         if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
  543                 calloutp = (struct callout *)kn->kn_hook;
  544                 callout_reset_sbt_on(calloutp,
  545                     timer2sbintime(kn->kn_sdata), 0 /* 1ms? */,
  546                     filt_timerexpire, kn, PCPU_GET(cpuid), 0);
  547         }
  548 }
  549 
  550 /*
  551  * data contains amount of time to sleep, in milliseconds
  552  */
  553 static int
  554 filt_timerattach(struct knote *kn)
  555 {
  556         struct callout *calloutp;
  557         sbintime_t to;
  558         unsigned int ncallouts;
  559 
  560         if ((intptr_t)kn->kn_sdata < 0)
  561                 return (EINVAL);
  562         if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
  563                 kn->kn_sdata = 1;
  564         to = timer2sbintime(kn->kn_sdata);
  565         if (to < 0)
  566                 return (EINVAL);
  567 
  568         ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed);
  569         do {
  570                 if (ncallouts >= kq_calloutmax)
  571                         return (ENOMEM);
  572         } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts,
  573             &ncallouts, ncallouts + 1, memory_order_relaxed,
  574             memory_order_relaxed));
  575 
  576         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  577         kn->kn_status &= ~KN_DETACHED;          /* knlist_add clears it */
  578         calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
  579         callout_init(calloutp, CALLOUT_MPSAFE);
  580         kn->kn_hook = calloutp;
  581         callout_reset_sbt_on(calloutp, to, 0 /* 1ms? */,
  582             filt_timerexpire, kn, PCPU_GET(cpuid), 0);
  583 
  584         return (0);
  585 }
  586 
  587 static void
  588 filt_timerdetach(struct knote *kn)
  589 {
  590         struct callout *calloutp;
  591         unsigned int old;
  592 
  593         calloutp = (struct callout *)kn->kn_hook;
  594         callout_drain(calloutp);
  595         free(calloutp, M_KQUEUE);
  596         old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed);
  597         KASSERT(old > 0, ("Number of callouts cannot become negative"));
  598         kn->kn_status |= KN_DETACHED;   /* knlist_remove sets it */
  599 }
  600 
  601 static int
  602 filt_timer(struct knote *kn, long hint)
  603 {
  604 
  605         return (kn->kn_data != 0);
  606 }
  607 
  608 static int
  609 filt_userattach(struct knote *kn)
  610 {
  611 
  612         /* 
  613          * EVFILT_USER knotes are not attached to anything in the kernel.
  614          */ 
  615         kn->kn_hook = NULL;
  616         if (kn->kn_fflags & NOTE_TRIGGER)
  617                 kn->kn_hookid = 1;
  618         else
  619                 kn->kn_hookid = 0;
  620         return (0);
  621 }
  622 
  623 static void
  624 filt_userdetach(__unused struct knote *kn)
  625 {
  626 
  627         /*
  628          * EVFILT_USER knotes are not attached to anything in the kernel.
  629          */
  630 }
  631 
  632 static int
  633 filt_user(struct knote *kn, __unused long hint)
  634 {
  635 
  636         return (kn->kn_hookid);
  637 }
  638 
  639 static void
  640 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
  641 {
  642         u_int ffctrl;
  643 
  644         switch (type) {
  645         case EVENT_REGISTER:
  646                 if (kev->fflags & NOTE_TRIGGER)
  647                         kn->kn_hookid = 1;
  648 
  649                 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
  650                 kev->fflags &= NOTE_FFLAGSMASK;
  651                 switch (ffctrl) {
  652                 case NOTE_FFNOP:
  653                         break;
  654 
  655                 case NOTE_FFAND:
  656                         kn->kn_sfflags &= kev->fflags;
  657                         break;
  658 
  659                 case NOTE_FFOR:
  660                         kn->kn_sfflags |= kev->fflags;
  661                         break;
  662 
  663                 case NOTE_FFCOPY:
  664                         kn->kn_sfflags = kev->fflags;
  665                         break;
  666 
  667                 default:
  668                         /* XXX Return error? */
  669                         break;
  670                 }
  671                 kn->kn_sdata = kev->data;
  672                 if (kev->flags & EV_CLEAR) {
  673                         kn->kn_hookid = 0;
  674                         kn->kn_data = 0;
  675                         kn->kn_fflags = 0;
  676                 }
  677                 break;
  678 
  679         case EVENT_PROCESS:
  680                 *kev = kn->kn_kevent;
  681                 kev->fflags = kn->kn_sfflags;
  682                 kev->data = kn->kn_sdata;
  683                 if (kn->kn_flags & EV_CLEAR) {
  684                         kn->kn_hookid = 0;
  685                         kn->kn_data = 0;
  686                         kn->kn_fflags = 0;
  687                 }
  688                 break;
  689 
  690         default:
  691                 panic("filt_usertouch() - invalid type (%ld)", type);
  692                 break;
  693         }
  694 }
  695 
  696 int
  697 sys_kqueue(struct thread *td, struct kqueue_args *uap)
  698 {
  699         struct filedesc *fdp;
  700         struct kqueue *kq;
  701         struct file *fp;
  702         int fd, error;
  703 
  704         fdp = td->td_proc->p_fd;
  705         error = falloc(td, &fp, &fd, 0);
  706         if (error)
  707                 goto done2;
  708 
  709         /* An extra reference on `fp' has been held for us by falloc(). */
  710         kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
  711         mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
  712         TAILQ_INIT(&kq->kq_head);
  713         kq->kq_fdp = fdp;
  714         knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
  715         TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
  716 
  717         FILEDESC_XLOCK(fdp);
  718         TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
  719         FILEDESC_XUNLOCK(fdp);
  720 
  721         finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
  722         fdrop(fp, td);
  723 
  724         td->td_retval[0] = fd;
  725 done2:
  726         return (error);
  727 }
  728 
  729 #ifndef _SYS_SYSPROTO_H_
  730 struct kevent_args {
  731         int     fd;
  732         const struct kevent *changelist;
  733         int     nchanges;
  734         struct  kevent *eventlist;
  735         int     nevents;
  736         const struct timespec *timeout;
  737 };
  738 #endif
  739 int
  740 sys_kevent(struct thread *td, struct kevent_args *uap)
  741 {
  742         struct timespec ts, *tsp;
  743         struct kevent_copyops k_ops = { uap,
  744                                         kevent_copyout,
  745                                         kevent_copyin};
  746         int error;
  747 #ifdef KTRACE
  748         struct uio ktruio;
  749         struct iovec ktriov;
  750         struct uio *ktruioin = NULL;
  751         struct uio *ktruioout = NULL;
  752 #endif
  753 
  754         if (uap->timeout != NULL) {
  755                 error = copyin(uap->timeout, &ts, sizeof(ts));
  756                 if (error)
  757                         return (error);
  758                 tsp = &ts;
  759         } else
  760                 tsp = NULL;
  761 
  762 #ifdef KTRACE
  763         if (KTRPOINT(td, KTR_GENIO)) {
  764                 ktriov.iov_base = uap->changelist;
  765                 ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
  766                 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
  767                     .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
  768                     .uio_td = td };
  769                 ktruioin = cloneuio(&ktruio);
  770                 ktriov.iov_base = uap->eventlist;
  771                 ktriov.iov_len = uap->nevents * sizeof(struct kevent);
  772                 ktruioout = cloneuio(&ktruio);
  773         }
  774 #endif
  775 
  776         error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
  777             &k_ops, tsp);
  778 
  779 #ifdef KTRACE
  780         if (ktruioin != NULL) {
  781                 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
  782                 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
  783                 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
  784                 ktrgenio(uap->fd, UIO_READ, ktruioout, error);
  785         }
  786 #endif
  787 
  788         return (error);
  789 }
  790 
  791 /*
  792  * Copy 'count' items into the destination list pointed to by uap->eventlist.
  793  */
  794 static int
  795 kevent_copyout(void *arg, struct kevent *kevp, int count)
  796 {
  797         struct kevent_args *uap;
  798         int error;
  799 
  800         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  801         uap = (struct kevent_args *)arg;
  802 
  803         error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
  804         if (error == 0)
  805                 uap->eventlist += count;
  806         return (error);
  807 }
  808 
  809 /*
  810  * Copy 'count' items from the list pointed to by uap->changelist.
  811  */
  812 static int
  813 kevent_copyin(void *arg, struct kevent *kevp, int count)
  814 {
  815         struct kevent_args *uap;
  816         int error;
  817 
  818         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  819         uap = (struct kevent_args *)arg;
  820 
  821         error = copyin(uap->changelist, kevp, count * sizeof *kevp);
  822         if (error == 0)
  823                 uap->changelist += count;
  824         return (error);
  825 }
  826 
  827 int
  828 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
  829     struct kevent_copyops *k_ops, const struct timespec *timeout)
  830 {
  831         struct kevent keva[KQ_NEVENTS];
  832         struct kevent *kevp, *changes;
  833         struct kqueue *kq;
  834         struct file *fp;
  835         cap_rights_t rights;
  836         int i, n, nerrors, error;
  837 
  838         cap_rights_init(&rights);
  839         if (nchanges > 0)
  840                 cap_rights_set(&rights, CAP_KQUEUE_CHANGE);
  841         if (nevents > 0)
  842                 cap_rights_set(&rights, CAP_KQUEUE_EVENT);
  843         error = fget(td, fd, &rights, &fp);
  844         if (error != 0)
  845                 return (error);
  846 
  847         error = kqueue_acquire(fp, &kq);
  848         if (error != 0)
  849                 goto done_norel;
  850 
  851         nerrors = 0;
  852 
  853         while (nchanges > 0) {
  854                 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
  855                 error = k_ops->k_copyin(k_ops->arg, keva, n);
  856                 if (error)
  857                         goto done;
  858                 changes = keva;
  859                 for (i = 0; i < n; i++) {
  860                         kevp = &changes[i];
  861                         if (!kevp->filter)
  862                                 continue;
  863                         kevp->flags &= ~EV_SYSFLAGS;
  864                         error = kqueue_register(kq, kevp, td, 1);
  865                         if (error || (kevp->flags & EV_RECEIPT)) {
  866                                 if (nevents != 0) {
  867                                         kevp->flags = EV_ERROR;
  868                                         kevp->data = error;
  869                                         (void) k_ops->k_copyout(k_ops->arg,
  870                                             kevp, 1);
  871                                         nevents--;
  872                                         nerrors++;
  873                                 } else {
  874                                         goto done;
  875                                 }
  876                         }
  877                 }
  878                 nchanges -= n;
  879         }
  880         if (nerrors) {
  881                 td->td_retval[0] = nerrors;
  882                 error = 0;
  883                 goto done;
  884         }
  885 
  886         error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
  887 done:
  888         kqueue_release(kq, 0);
  889 done_norel:
  890         fdrop(fp, td);
  891         return (error);
  892 }
  893 
  894 int
  895 kqueue_add_filteropts(int filt, struct filterops *filtops)
  896 {
  897         int error;
  898 
  899         error = 0;
  900         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
  901                 printf(
  902 "trying to add a filterop that is out of range: %d is beyond %d\n",
  903                     ~filt, EVFILT_SYSCOUNT);
  904                 return EINVAL;
  905         }
  906         mtx_lock(&filterops_lock);
  907         if (sysfilt_ops[~filt].for_fop != &null_filtops &&
  908             sysfilt_ops[~filt].for_fop != NULL)
  909                 error = EEXIST;
  910         else {
  911                 sysfilt_ops[~filt].for_fop = filtops;
  912                 sysfilt_ops[~filt].for_refcnt = 0;
  913         }
  914         mtx_unlock(&filterops_lock);
  915 
  916         return (error);
  917 }
  918 
  919 int
  920 kqueue_del_filteropts(int filt)
  921 {
  922         int error;
  923 
  924         error = 0;
  925         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  926                 return EINVAL;
  927 
  928         mtx_lock(&filterops_lock);
  929         if (sysfilt_ops[~filt].for_fop == &null_filtops ||
  930             sysfilt_ops[~filt].for_fop == NULL)
  931                 error = EINVAL;
  932         else if (sysfilt_ops[~filt].for_refcnt != 0)
  933                 error = EBUSY;
  934         else {
  935                 sysfilt_ops[~filt].for_fop = &null_filtops;
  936                 sysfilt_ops[~filt].for_refcnt = 0;
  937         }
  938         mtx_unlock(&filterops_lock);
  939 
  940         return error;
  941 }
  942 
  943 static struct filterops *
  944 kqueue_fo_find(int filt)
  945 {
  946 
  947         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  948                 return NULL;
  949 
  950         mtx_lock(&filterops_lock);
  951         sysfilt_ops[~filt].for_refcnt++;
  952         if (sysfilt_ops[~filt].for_fop == NULL)
  953                 sysfilt_ops[~filt].for_fop = &null_filtops;
  954         mtx_unlock(&filterops_lock);
  955 
  956         return sysfilt_ops[~filt].for_fop;
  957 }
  958 
  959 static void
  960 kqueue_fo_release(int filt)
  961 {
  962 
  963         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  964                 return;
  965 
  966         mtx_lock(&filterops_lock);
  967         KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
  968             ("filter object refcount not valid on release"));
  969         sysfilt_ops[~filt].for_refcnt--;
  970         mtx_unlock(&filterops_lock);
  971 }
  972 
  973 /*
  974  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
  975  * influence if memory allocation should wait.  Make sure it is 0 if you
  976  * hold any mutexes.
  977  */
  978 static int
  979 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
  980 {
  981         struct filterops *fops;
  982         struct file *fp;
  983         struct knote *kn, *tkn;
  984         cap_rights_t rights;
  985         int error, filt, event;
  986         int haskqglobal, filedesc_unlock;
  987 
  988         fp = NULL;
  989         kn = NULL;
  990         error = 0;
  991         haskqglobal = 0;
  992         filedesc_unlock = 0;
  993 
  994         filt = kev->filter;
  995         fops = kqueue_fo_find(filt);
  996         if (fops == NULL)
  997                 return EINVAL;
  998 
  999         tkn = knote_alloc(waitok);              /* prevent waiting with locks */
 1000 
 1001 findkn:
 1002         if (fops->f_isfd) {
 1003                 KASSERT(td != NULL, ("td is NULL"));
 1004                 error = fget(td, kev->ident,
 1005                     cap_rights_init(&rights, CAP_EVENT), &fp);
 1006                 if (error)
 1007                         goto done;
 1008 
 1009                 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
 1010                     kev->ident, 0) != 0) {
 1011                         /* try again */
 1012                         fdrop(fp, td);
 1013                         fp = NULL;
 1014                         error = kqueue_expand(kq, fops, kev->ident, waitok);
 1015                         if (error)
 1016                                 goto done;
 1017                         goto findkn;
 1018                 }
 1019 
 1020                 if (fp->f_type == DTYPE_KQUEUE) {
 1021                         /*
 1022                          * if we add some inteligence about what we are doing,
 1023                          * we should be able to support events on ourselves.
 1024                          * We need to know when we are doing this to prevent
 1025                          * getting both the knlist lock and the kq lock since
 1026                          * they are the same thing.
 1027                          */
 1028                         if (fp->f_data == kq) {
 1029                                 error = EINVAL;
 1030                                 goto done;
 1031                         }
 1032 
 1033                         /*
 1034                          * Pre-lock the filedesc before the global
 1035                          * lock mutex, see the comment in
 1036                          * kqueue_close().
 1037                          */
 1038                         FILEDESC_XLOCK(td->td_proc->p_fd);
 1039                         filedesc_unlock = 1;
 1040                         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1041                 }
 1042 
 1043                 KQ_LOCK(kq);
 1044                 if (kev->ident < kq->kq_knlistsize) {
 1045                         SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
 1046                                 if (kev->filter == kn->kn_filter)
 1047                                         break;
 1048                 }
 1049         } else {
 1050                 if ((kev->flags & EV_ADD) == EV_ADD)
 1051                         kqueue_expand(kq, fops, kev->ident, waitok);
 1052 
 1053                 KQ_LOCK(kq);
 1054                 if (kq->kq_knhashmask != 0) {
 1055                         struct klist *list;
 1056 
 1057                         list = &kq->kq_knhash[
 1058                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
 1059                         SLIST_FOREACH(kn, list, kn_link)
 1060                                 if (kev->ident == kn->kn_id &&
 1061                                     kev->filter == kn->kn_filter)
 1062                                         break;
 1063                 }
 1064         }
 1065 
 1066         /* knote is in the process of changing, wait for it to stablize. */
 1067         if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1068                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1069                 if (filedesc_unlock) {
 1070                         FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1071                         filedesc_unlock = 0;
 1072                 }
 1073                 kq->kq_state |= KQ_FLUXWAIT;
 1074                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
 1075                 if (fp != NULL) {
 1076                         fdrop(fp, td);
 1077                         fp = NULL;
 1078                 }
 1079                 goto findkn;
 1080         }
 1081 
 1082         /*
 1083          * kn now contains the matching knote, or NULL if no match
 1084          */
 1085         if (kn == NULL) {
 1086                 if (kev->flags & EV_ADD) {
 1087                         kn = tkn;
 1088                         tkn = NULL;
 1089                         if (kn == NULL) {
 1090                                 KQ_UNLOCK(kq);
 1091                                 error = ENOMEM;
 1092                                 goto done;
 1093                         }
 1094                         kn->kn_fp = fp;
 1095                         kn->kn_kq = kq;
 1096                         kn->kn_fop = fops;
 1097                         /*
 1098                          * apply reference counts to knote structure, and
 1099                          * do not release it at the end of this routine.
 1100                          */
 1101                         fops = NULL;
 1102                         fp = NULL;
 1103 
 1104                         kn->kn_sfflags = kev->fflags;
 1105                         kn->kn_sdata = kev->data;
 1106                         kev->fflags = 0;
 1107                         kev->data = 0;
 1108                         kn->kn_kevent = *kev;
 1109                         kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
 1110                             EV_ENABLE | EV_DISABLE);
 1111                         kn->kn_status = KN_INFLUX|KN_DETACHED;
 1112 
 1113                         error = knote_attach(kn, kq);
 1114                         KQ_UNLOCK(kq);
 1115                         if (error != 0) {
 1116                                 tkn = kn;
 1117                                 goto done;
 1118                         }
 1119 
 1120                         if ((error = kn->kn_fop->f_attach(kn)) != 0) {
 1121                                 knote_drop(kn, td);
 1122                                 goto done;
 1123                         }
 1124                         KN_LIST_LOCK(kn);
 1125                         goto done_ev_add;
 1126                 } else {
 1127                         /* No matching knote and the EV_ADD flag is not set. */
 1128                         KQ_UNLOCK(kq);
 1129                         error = ENOENT;
 1130                         goto done;
 1131                 }
 1132         }
 1133         
 1134         if (kev->flags & EV_DELETE) {
 1135                 kn->kn_status |= KN_INFLUX;
 1136                 KQ_UNLOCK(kq);
 1137                 if (!(kn->kn_status & KN_DETACHED))
 1138                         kn->kn_fop->f_detach(kn);
 1139                 knote_drop(kn, td);
 1140                 goto done;
 1141         }
 1142 
 1143         /*
 1144          * The user may change some filter values after the initial EV_ADD,
 1145          * but doing so will not reset any filter which has already been
 1146          * triggered.
 1147          */
 1148         kn->kn_status |= KN_INFLUX | KN_SCAN;
 1149         KQ_UNLOCK(kq);
 1150         KN_LIST_LOCK(kn);
 1151         kn->kn_kevent.udata = kev->udata;
 1152         if (!fops->f_isfd && fops->f_touch != NULL) {
 1153                 fops->f_touch(kn, kev, EVENT_REGISTER);
 1154         } else {
 1155                 kn->kn_sfflags = kev->fflags;
 1156                 kn->kn_sdata = kev->data;
 1157         }
 1158 
 1159         /*
 1160          * We can get here with kn->kn_knlist == NULL.  This can happen when
 1161          * the initial attach event decides that the event is "completed" 
 1162          * already.  i.e. filt_procattach is called on a zombie process.  It
 1163          * will call filt_proc which will remove it from the list, and NULL
 1164          * kn_knlist.
 1165          */
 1166 done_ev_add:
 1167         event = kn->kn_fop->f_event(kn, 0);
 1168         KQ_LOCK(kq);
 1169         if (event)
 1170                 KNOTE_ACTIVATE(kn, 1);
 1171         kn->kn_status &= ~(KN_INFLUX | KN_SCAN);
 1172         KN_LIST_UNLOCK(kn);
 1173 
 1174         if ((kev->flags & EV_DISABLE) &&
 1175             ((kn->kn_status & KN_DISABLED) == 0)) {
 1176                 kn->kn_status |= KN_DISABLED;
 1177         }
 1178 
 1179         if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
 1180                 kn->kn_status &= ~KN_DISABLED;
 1181                 if ((kn->kn_status & KN_ACTIVE) &&
 1182                     ((kn->kn_status & KN_QUEUED) == 0))
 1183                         knote_enqueue(kn);
 1184         }
 1185         KQ_UNLOCK_FLUX(kq);
 1186 
 1187 done:
 1188         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1189         if (filedesc_unlock)
 1190                 FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1191         if (fp != NULL)
 1192                 fdrop(fp, td);
 1193         if (tkn != NULL)
 1194                 knote_free(tkn);
 1195         if (fops != NULL)
 1196                 kqueue_fo_release(filt);
 1197         return (error);
 1198 }
 1199 
 1200 static int
 1201 kqueue_acquire(struct file *fp, struct kqueue **kqp)
 1202 {
 1203         int error;
 1204         struct kqueue *kq;
 1205 
 1206         error = 0;
 1207 
 1208         kq = fp->f_data;
 1209         if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
 1210                 return (EBADF);
 1211         *kqp = kq;
 1212         KQ_LOCK(kq);
 1213         if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
 1214                 KQ_UNLOCK(kq);
 1215                 return (EBADF);
 1216         }
 1217         kq->kq_refcnt++;
 1218         KQ_UNLOCK(kq);
 1219 
 1220         return error;
 1221 }
 1222 
 1223 static void
 1224 kqueue_release(struct kqueue *kq, int locked)
 1225 {
 1226         if (locked)
 1227                 KQ_OWNED(kq);
 1228         else
 1229                 KQ_LOCK(kq);
 1230         kq->kq_refcnt--;
 1231         if (kq->kq_refcnt == 1)
 1232                 wakeup(&kq->kq_refcnt);
 1233         if (!locked)
 1234                 KQ_UNLOCK(kq);
 1235 }
 1236 
 1237 static void
 1238 kqueue_schedtask(struct kqueue *kq)
 1239 {
 1240 
 1241         KQ_OWNED(kq);
 1242         KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
 1243             ("scheduling kqueue task while draining"));
 1244 
 1245         if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
 1246                 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
 1247                 kq->kq_state |= KQ_TASKSCHED;
 1248         }
 1249 }
 1250 
 1251 /*
 1252  * Expand the kq to make sure we have storage for fops/ident pair.
 1253  *
 1254  * Return 0 on success (or no work necessary), return errno on failure.
 1255  *
 1256  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
 1257  * If kqueue_register is called from a non-fd context, there usually/should
 1258  * be no locks held.
 1259  */
 1260 static int
 1261 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
 1262         int waitok)
 1263 {
 1264         struct klist *list, *tmp_knhash, *to_free;
 1265         u_long tmp_knhashmask;
 1266         int size;
 1267         int fd;
 1268         int mflag = waitok ? M_WAITOK : M_NOWAIT;
 1269 
 1270         KQ_NOTOWNED(kq);
 1271 
 1272         to_free = NULL;
 1273         if (fops->f_isfd) {
 1274                 fd = ident;
 1275                 if (kq->kq_knlistsize <= fd) {
 1276                         size = kq->kq_knlistsize;
 1277                         while (size <= fd)
 1278                                 size += KQEXTENT;
 1279                         list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
 1280                         if (list == NULL)
 1281                                 return ENOMEM;
 1282                         KQ_LOCK(kq);
 1283                         if (kq->kq_knlistsize > fd) {
 1284                                 to_free = list;
 1285                                 list = NULL;
 1286                         } else {
 1287                                 if (kq->kq_knlist != NULL) {
 1288                                         bcopy(kq->kq_knlist, list,
 1289                                             kq->kq_knlistsize * sizeof(*list));
 1290                                         to_free = kq->kq_knlist;
 1291                                         kq->kq_knlist = NULL;
 1292                                 }
 1293                                 bzero((caddr_t)list +
 1294                                     kq->kq_knlistsize * sizeof(*list),
 1295                                     (size - kq->kq_knlistsize) * sizeof(*list));
 1296                                 kq->kq_knlistsize = size;
 1297                                 kq->kq_knlist = list;
 1298                         }
 1299                         KQ_UNLOCK(kq);
 1300                 }
 1301         } else {
 1302                 if (kq->kq_knhashmask == 0) {
 1303                         tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
 1304                             &tmp_knhashmask);
 1305                         if (tmp_knhash == NULL)
 1306                                 return ENOMEM;
 1307                         KQ_LOCK(kq);
 1308                         if (kq->kq_knhashmask == 0) {
 1309                                 kq->kq_knhash = tmp_knhash;
 1310                                 kq->kq_knhashmask = tmp_knhashmask;
 1311                         } else {
 1312                                 to_free = tmp_knhash;
 1313                         }
 1314                         KQ_UNLOCK(kq);
 1315                 }
 1316         }
 1317         free(to_free, M_KQUEUE);
 1318 
 1319         KQ_NOTOWNED(kq);
 1320         return 0;
 1321 }
 1322 
 1323 static void
 1324 kqueue_task(void *arg, int pending)
 1325 {
 1326         struct kqueue *kq;
 1327         int haskqglobal;
 1328 
 1329         haskqglobal = 0;
 1330         kq = arg;
 1331 
 1332         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1333         KQ_LOCK(kq);
 1334 
 1335         KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
 1336 
 1337         kq->kq_state &= ~KQ_TASKSCHED;
 1338         if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
 1339                 wakeup(&kq->kq_state);
 1340         }
 1341         KQ_UNLOCK(kq);
 1342         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1343 }
 1344 
 1345 /*
 1346  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
 1347  * We treat KN_MARKER knotes as if they are INFLUX.
 1348  */
 1349 static int
 1350 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
 1351     const struct timespec *tsp, struct kevent *keva, struct thread *td)
 1352 {
 1353         struct kevent *kevp;
 1354         struct knote *kn, *marker;
 1355         sbintime_t asbt, rsbt;
 1356         int count, error, haskqglobal, influx, nkev, touch;
 1357 
 1358         count = maxevents;
 1359         nkev = 0;
 1360         error = 0;
 1361         haskqglobal = 0;
 1362 
 1363         if (maxevents == 0)
 1364                 goto done_nl;
 1365 
 1366         rsbt = 0;
 1367         if (tsp != NULL) {
 1368                 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
 1369                     tsp->tv_nsec >= 1000000000) {
 1370                         error = EINVAL;
 1371                         goto done_nl;
 1372                 }
 1373                 if (timespecisset(tsp)) {
 1374                         if (tsp->tv_sec <= INT32_MAX) {
 1375                                 rsbt = tstosbt(*tsp);
 1376                                 if (TIMESEL(&asbt, rsbt))
 1377                                         asbt += tc_tick_sbt;
 1378                                 if (asbt <= INT64_MAX - rsbt)
 1379                                         asbt += rsbt;
 1380                                 else
 1381                                         asbt = 0;
 1382                                 rsbt >>= tc_precexp;
 1383                         } else
 1384                                 asbt = 0;
 1385                 } else
 1386                         asbt = -1;
 1387         } else
 1388                 asbt = 0;
 1389         marker = knote_alloc(1);
 1390         if (marker == NULL) {
 1391                 error = ENOMEM;
 1392                 goto done_nl;
 1393         }
 1394         marker->kn_status = KN_MARKER;
 1395         KQ_LOCK(kq);
 1396 
 1397 retry:
 1398         kevp = keva;
 1399         if (kq->kq_count == 0) {
 1400                 if (asbt == -1) {
 1401                         error = EWOULDBLOCK;
 1402                 } else {
 1403                         kq->kq_state |= KQ_SLEEP;
 1404                         error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
 1405                             "kqread", asbt, rsbt, C_ABSOLUTE);
 1406                 }
 1407                 if (error == 0)
 1408                         goto retry;
 1409                 /* don't restart after signals... */
 1410                 if (error == ERESTART)
 1411                         error = EINTR;
 1412                 else if (error == EWOULDBLOCK)
 1413                         error = 0;
 1414                 goto done;
 1415         }
 1416 
 1417         TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
 1418         influx = 0;
 1419         while (count) {
 1420                 KQ_OWNED(kq);
 1421                 kn = TAILQ_FIRST(&kq->kq_head);
 1422 
 1423                 if ((kn->kn_status == KN_MARKER && kn != marker) ||
 1424                     (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1425                         if (influx) {
 1426                                 influx = 0;
 1427                                 KQ_FLUX_WAKEUP(kq);
 1428                         }
 1429                         kq->kq_state |= KQ_FLUXWAIT;
 1430                         error = msleep(kq, &kq->kq_lock, PSOCK,
 1431                             "kqflxwt", 0);
 1432                         continue;
 1433                 }
 1434 
 1435                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1436                 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
 1437                         kn->kn_status &= ~KN_QUEUED;
 1438                         kq->kq_count--;
 1439                         continue;
 1440                 }
 1441                 if (kn == marker) {
 1442                         KQ_FLUX_WAKEUP(kq);
 1443                         if (count == maxevents)
 1444                                 goto retry;
 1445                         goto done;
 1446                 }
 1447                 KASSERT((kn->kn_status & KN_INFLUX) == 0,
 1448                     ("KN_INFLUX set when not suppose to be"));
 1449 
 1450                 if ((kn->kn_flags & EV_DROP) == EV_DROP) {
 1451                         kn->kn_status &= ~KN_QUEUED;
 1452                         kn->kn_status |= KN_INFLUX;
 1453                         kq->kq_count--;
 1454                         KQ_UNLOCK(kq);
 1455                         /*
 1456                          * We don't need to lock the list since we've marked
 1457                          * it _INFLUX.
 1458                          */
 1459                         if (!(kn->kn_status & KN_DETACHED))
 1460                                 kn->kn_fop->f_detach(kn);
 1461                         knote_drop(kn, td);
 1462                         KQ_LOCK(kq);
 1463                         continue;
 1464                 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
 1465                         kn->kn_status &= ~KN_QUEUED;
 1466                         kn->kn_status |= KN_INFLUX;
 1467                         kq->kq_count--;
 1468                         KQ_UNLOCK(kq);
 1469                         /*
 1470                          * We don't need to lock the list since we've marked
 1471                          * it _INFLUX.
 1472                          */
 1473                         *kevp = kn->kn_kevent;
 1474                         if (!(kn->kn_status & KN_DETACHED))
 1475                                 kn->kn_fop->f_detach(kn);
 1476                         knote_drop(kn, td);
 1477                         KQ_LOCK(kq);
 1478                         kn = NULL;
 1479                 } else {
 1480                         kn->kn_status |= KN_INFLUX | KN_SCAN;
 1481                         KQ_UNLOCK(kq);
 1482                         if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
 1483                                 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1484                         KN_LIST_LOCK(kn);
 1485                         if (kn->kn_fop->f_event(kn, 0) == 0) {
 1486                                 KQ_LOCK(kq);
 1487                                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1488                                 kn->kn_status &=
 1489                                     ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX |
 1490                                     KN_SCAN);
 1491                                 kq->kq_count--;
 1492                                 KN_LIST_UNLOCK(kn);
 1493                                 influx = 1;
 1494                                 continue;
 1495                         }
 1496                         touch = (!kn->kn_fop->f_isfd &&
 1497                             kn->kn_fop->f_touch != NULL);
 1498                         if (touch)
 1499                                 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
 1500                         else
 1501                                 *kevp = kn->kn_kevent;
 1502                         KQ_LOCK(kq);
 1503                         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1504                         if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
 1505                                 /* 
 1506                                  * Manually clear knotes who weren't 
 1507                                  * 'touch'ed.
 1508                                  */
 1509                                 if (touch == 0 && kn->kn_flags & EV_CLEAR) {
 1510                                         kn->kn_data = 0;
 1511                                         kn->kn_fflags = 0;
 1512                                 }
 1513                                 if (kn->kn_flags & EV_DISPATCH)
 1514                                         kn->kn_status |= KN_DISABLED;
 1515                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 1516                                 kq->kq_count--;
 1517                         } else
 1518                                 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1519                         
 1520                         kn->kn_status &= ~(KN_INFLUX | KN_SCAN);
 1521                         KN_LIST_UNLOCK(kn);
 1522                         influx = 1;
 1523                 }
 1524 
 1525                 /* we are returning a copy to the user */
 1526                 kevp++;
 1527                 nkev++;
 1528                 count--;
 1529 
 1530                 if (nkev == KQ_NEVENTS) {
 1531                         influx = 0;
 1532                         KQ_UNLOCK_FLUX(kq);
 1533                         error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1534                         nkev = 0;
 1535                         kevp = keva;
 1536                         KQ_LOCK(kq);
 1537                         if (error)
 1538                                 break;
 1539                 }
 1540         }
 1541         TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
 1542 done:
 1543         KQ_OWNED(kq);
 1544         KQ_UNLOCK_FLUX(kq);
 1545         knote_free(marker);
 1546 done_nl:
 1547         KQ_NOTOWNED(kq);
 1548         if (nkev != 0)
 1549                 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1550         td->td_retval[0] = maxevents - count;
 1551         return (error);
 1552 }
 1553 
 1554 /*
 1555  * XXX
 1556  * This could be expanded to call kqueue_scan, if desired.
 1557  */
 1558 /*ARGSUSED*/
 1559 static int
 1560 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1561         int flags, struct thread *td)
 1562 {
 1563         return (ENXIO);
 1564 }
 1565 
 1566 /*ARGSUSED*/
 1567 static int
 1568 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1569          int flags, struct thread *td)
 1570 {
 1571         return (ENXIO);
 1572 }
 1573 
 1574 /*ARGSUSED*/
 1575 static int
 1576 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred,
 1577         struct thread *td)
 1578 {
 1579 
 1580         return (EINVAL);
 1581 }
 1582 
 1583 /*ARGSUSED*/
 1584 static int
 1585 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
 1586         struct ucred *active_cred, struct thread *td)
 1587 {
 1588         /*
 1589          * Enabling sigio causes two major problems:
 1590          * 1) infinite recursion:
 1591          * Synopsys: kevent is being used to track signals and have FIOASYNC
 1592          * set.  On receipt of a signal this will cause a kqueue to recurse
 1593          * into itself over and over.  Sending the sigio causes the kqueue
 1594          * to become ready, which in turn posts sigio again, forever.
 1595          * Solution: this can be solved by setting a flag in the kqueue that
 1596          * we have a SIGIO in progress.
 1597          * 2) locking problems:
 1598          * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
 1599          * us above the proc and pgrp locks.
 1600          * Solution: Post a signal using an async mechanism, being sure to
 1601          * record a generation count in the delivery so that we do not deliver
 1602          * a signal to the wrong process.
 1603          *
 1604          * Note, these two mechanisms are somewhat mutually exclusive!
 1605          */
 1606 #if 0
 1607         struct kqueue *kq;
 1608 
 1609         kq = fp->f_data;
 1610         switch (cmd) {
 1611         case FIOASYNC:
 1612                 if (*(int *)data) {
 1613                         kq->kq_state |= KQ_ASYNC;
 1614                 } else {
 1615                         kq->kq_state &= ~KQ_ASYNC;
 1616                 }
 1617                 return (0);
 1618 
 1619         case FIOSETOWN:
 1620                 return (fsetown(*(int *)data, &kq->kq_sigio));
 1621 
 1622         case FIOGETOWN:
 1623                 *(int *)data = fgetown(&kq->kq_sigio);
 1624                 return (0);
 1625         }
 1626 #endif
 1627 
 1628         return (ENOTTY);
 1629 }
 1630 
 1631 /*ARGSUSED*/
 1632 static int
 1633 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
 1634         struct thread *td)
 1635 {
 1636         struct kqueue *kq;
 1637         int revents = 0;
 1638         int error;
 1639 
 1640         if ((error = kqueue_acquire(fp, &kq)))
 1641                 return POLLERR;
 1642 
 1643         KQ_LOCK(kq);
 1644         if (events & (POLLIN | POLLRDNORM)) {
 1645                 if (kq->kq_count) {
 1646                         revents |= events & (POLLIN | POLLRDNORM);
 1647                 } else {
 1648                         selrecord(td, &kq->kq_sel);
 1649                         if (SEL_WAITING(&kq->kq_sel))
 1650                                 kq->kq_state |= KQ_SEL;
 1651                 }
 1652         }
 1653         kqueue_release(kq, 1);
 1654         KQ_UNLOCK(kq);
 1655         return (revents);
 1656 }
 1657 
 1658 /*ARGSUSED*/
 1659 static int
 1660 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
 1661         struct thread *td)
 1662 {
 1663 
 1664         bzero((void *)st, sizeof *st);
 1665         /*
 1666          * We no longer return kq_count because the unlocked value is useless.
 1667          * If you spent all this time getting the count, why not spend your
 1668          * syscall better by calling kevent?
 1669          *
 1670          * XXX - This is needed for libc_r.
 1671          */
 1672         st->st_mode = S_IFIFO;
 1673         return (0);
 1674 }
 1675 
 1676 /*ARGSUSED*/
 1677 static int
 1678 kqueue_close(struct file *fp, struct thread *td)
 1679 {
 1680         struct kqueue *kq = fp->f_data;
 1681         struct filedesc *fdp;
 1682         struct knote *kn;
 1683         int i;
 1684         int error;
 1685         int filedesc_unlock;
 1686 
 1687         if ((error = kqueue_acquire(fp, &kq)))
 1688                 return error;
 1689 
 1690         filedesc_unlock = 0;
 1691         KQ_LOCK(kq);
 1692 
 1693         KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
 1694             ("kqueue already closing"));
 1695         kq->kq_state |= KQ_CLOSING;
 1696         if (kq->kq_refcnt > 1)
 1697                 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
 1698 
 1699         KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
 1700         fdp = kq->kq_fdp;
 1701 
 1702         KASSERT(knlist_empty(&kq->kq_sel.si_note),
 1703             ("kqueue's knlist not empty"));
 1704 
 1705         for (i = 0; i < kq->kq_knlistsize; i++) {
 1706                 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
 1707                         if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1708                                 kq->kq_state |= KQ_FLUXWAIT;
 1709                                 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
 1710                                 continue;
 1711                         }
 1712                         kn->kn_status |= KN_INFLUX;
 1713                         KQ_UNLOCK(kq);
 1714                         if (!(kn->kn_status & KN_DETACHED))
 1715                                 kn->kn_fop->f_detach(kn);
 1716                         knote_drop(kn, td);
 1717                         KQ_LOCK(kq);
 1718                 }
 1719         }
 1720         if (kq->kq_knhashmask != 0) {
 1721                 for (i = 0; i <= kq->kq_knhashmask; i++) {
 1722                         while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
 1723                                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1724                                         kq->kq_state |= KQ_FLUXWAIT;
 1725                                         msleep(kq, &kq->kq_lock, PSOCK,
 1726                                                "kqclo2", 0);
 1727                                         continue;
 1728                                 }
 1729                                 kn->kn_status |= KN_INFLUX;
 1730                                 KQ_UNLOCK(kq);
 1731                                 if (!(kn->kn_status & KN_DETACHED))
 1732                                         kn->kn_fop->f_detach(kn);
 1733                                 knote_drop(kn, td);
 1734                                 KQ_LOCK(kq);
 1735                         }
 1736                 }
 1737         }
 1738 
 1739         if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
 1740                 kq->kq_state |= KQ_TASKDRAIN;
 1741                 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
 1742         }
 1743 
 1744         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1745                 selwakeuppri(&kq->kq_sel, PSOCK);
 1746                 if (!SEL_WAITING(&kq->kq_sel))
 1747                         kq->kq_state &= ~KQ_SEL;
 1748         }
 1749 
 1750         KQ_UNLOCK(kq);
 1751 
 1752         /*
 1753          * We could be called due to the knote_drop() doing fdrop(),
 1754          * called from kqueue_register().  In this case the global
 1755          * lock is owned, and filedesc sx is locked before, to not
 1756          * take the sleepable lock after non-sleepable.
 1757          */
 1758         if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
 1759                 FILEDESC_XLOCK(fdp);
 1760                 filedesc_unlock = 1;
 1761         } else
 1762                 filedesc_unlock = 0;
 1763         TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
 1764         if (filedesc_unlock)
 1765                 FILEDESC_XUNLOCK(fdp);
 1766 
 1767         seldrain(&kq->kq_sel);
 1768         knlist_destroy(&kq->kq_sel.si_note);
 1769         mtx_destroy(&kq->kq_lock);
 1770         kq->kq_fdp = NULL;
 1771 
 1772         if (kq->kq_knhash != NULL)
 1773                 free(kq->kq_knhash, M_KQUEUE);
 1774         if (kq->kq_knlist != NULL)
 1775                 free(kq->kq_knlist, M_KQUEUE);
 1776 
 1777         funsetown(&kq->kq_sigio);
 1778         free(kq, M_KQUEUE);
 1779         fp->f_data = NULL;
 1780 
 1781         return (0);
 1782 }
 1783 
 1784 static void
 1785 kqueue_wakeup(struct kqueue *kq)
 1786 {
 1787         KQ_OWNED(kq);
 1788 
 1789         if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
 1790                 kq->kq_state &= ~KQ_SLEEP;
 1791                 wakeup(kq);
 1792         }
 1793         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1794                 selwakeuppri(&kq->kq_sel, PSOCK);
 1795                 if (!SEL_WAITING(&kq->kq_sel))
 1796                         kq->kq_state &= ~KQ_SEL;
 1797         }
 1798         if (!knlist_empty(&kq->kq_sel.si_note))
 1799                 kqueue_schedtask(kq);
 1800         if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
 1801                 pgsigio(&kq->kq_sigio, SIGIO, 0);
 1802         }
 1803 }
 1804 
 1805 /*
 1806  * Walk down a list of knotes, activating them if their event has triggered.
 1807  *
 1808  * There is a possibility to optimize in the case of one kq watching another.
 1809  * Instead of scheduling a task to wake it up, you could pass enough state
 1810  * down the chain to make up the parent kqueue.  Make this code functional
 1811  * first.
 1812  */
 1813 void
 1814 knote(struct knlist *list, long hint, int lockflags)
 1815 {
 1816         struct kqueue *kq;
 1817         struct knote *kn;
 1818         int error;
 1819 
 1820         if (list == NULL)
 1821                 return;
 1822 
 1823         KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
 1824 
 1825         if ((lockflags & KNF_LISTLOCKED) == 0)
 1826                 list->kl_lock(list->kl_lockarg); 
 1827 
 1828         /*
 1829          * If we unlock the list lock (and set KN_INFLUX), we can eliminate
 1830          * the kqueue scheduling, but this will introduce four
 1831          * lock/unlock's for each knote to test.  If we do, continue to use
 1832          * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
 1833          * only safe if you want to remove the current item, which we are
 1834          * not doing.
 1835          */
 1836         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
 1837                 kq = kn->kn_kq;
 1838                 KQ_LOCK(kq);
 1839                 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) {
 1840                         /*
 1841                          * Do not process the influx notes, except for
 1842                          * the influx coming from the kq unlock in the
 1843                          * kqueue_scan().  In the later case, we do
 1844                          * not interfere with the scan, since the code
 1845                          * fragment in kqueue_scan() locks the knlist,
 1846                          * and cannot proceed until we finished.
 1847                          */
 1848                         KQ_UNLOCK(kq);
 1849                 } else if ((lockflags & KNF_NOKQLOCK) != 0) {
 1850                         kn->kn_status |= KN_INFLUX;
 1851                         KQ_UNLOCK(kq);
 1852                         error = kn->kn_fop->f_event(kn, hint);
 1853                         KQ_LOCK(kq);
 1854                         kn->kn_status &= ~KN_INFLUX;
 1855                         if (error)
 1856                                 KNOTE_ACTIVATE(kn, 1);
 1857                         KQ_UNLOCK_FLUX(kq);
 1858                 } else {
 1859                         kn->kn_status |= KN_HASKQLOCK;
 1860                         if (kn->kn_fop->f_event(kn, hint))
 1861                                 KNOTE_ACTIVATE(kn, 1);
 1862                         kn->kn_status &= ~KN_HASKQLOCK;
 1863                         KQ_UNLOCK(kq);
 1864                 }
 1865         }
 1866         if ((lockflags & KNF_LISTLOCKED) == 0)
 1867                 list->kl_unlock(list->kl_lockarg); 
 1868 }
 1869 
 1870 /*
 1871  * add a knote to a knlist
 1872  */
 1873 void
 1874 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
 1875 {
 1876         KNL_ASSERT_LOCK(knl, islocked);
 1877         KQ_NOTOWNED(kn->kn_kq);
 1878         KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
 1879             (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
 1880         if (!islocked)
 1881                 knl->kl_lock(knl->kl_lockarg);
 1882         SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
 1883         if (!islocked)
 1884                 knl->kl_unlock(knl->kl_lockarg);
 1885         KQ_LOCK(kn->kn_kq);
 1886         kn->kn_knlist = knl;
 1887         kn->kn_status &= ~KN_DETACHED;
 1888         KQ_UNLOCK(kn->kn_kq);
 1889 }
 1890 
 1891 static void
 1892 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
 1893 {
 1894         KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
 1895         KNL_ASSERT_LOCK(knl, knlislocked);
 1896         mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
 1897         if (!kqislocked)
 1898                 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
 1899     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
 1900         if (!knlislocked)
 1901                 knl->kl_lock(knl->kl_lockarg);
 1902         SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
 1903         kn->kn_knlist = NULL;
 1904         if (!knlislocked)
 1905                 knl->kl_unlock(knl->kl_lockarg);
 1906         if (!kqislocked)
 1907                 KQ_LOCK(kn->kn_kq);
 1908         kn->kn_status |= KN_DETACHED;
 1909         if (!kqislocked)
 1910                 KQ_UNLOCK(kn->kn_kq);
 1911 }
 1912 
 1913 /*
 1914  * remove knote from the specified knlist
 1915  */
 1916 void
 1917 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
 1918 {
 1919 
 1920         knlist_remove_kq(knl, kn, islocked, 0);
 1921 }
 1922 
 1923 /*
 1924  * remove knote from the specified knlist while in f_event handler.
 1925  */
 1926 void
 1927 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
 1928 {
 1929 
 1930         knlist_remove_kq(knl, kn, 1,
 1931             (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
 1932 }
 1933 
 1934 int
 1935 knlist_empty(struct knlist *knl)
 1936 {
 1937 
 1938         KNL_ASSERT_LOCKED(knl);
 1939         return SLIST_EMPTY(&knl->kl_list);
 1940 }
 1941 
 1942 static struct mtx       knlist_lock;
 1943 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
 1944         MTX_DEF);
 1945 static void knlist_mtx_lock(void *arg);
 1946 static void knlist_mtx_unlock(void *arg);
 1947 
 1948 static void
 1949 knlist_mtx_lock(void *arg)
 1950 {
 1951 
 1952         mtx_lock((struct mtx *)arg);
 1953 }
 1954 
 1955 static void
 1956 knlist_mtx_unlock(void *arg)
 1957 {
 1958 
 1959         mtx_unlock((struct mtx *)arg);
 1960 }
 1961 
 1962 static void
 1963 knlist_mtx_assert_locked(void *arg)
 1964 {
 1965 
 1966         mtx_assert((struct mtx *)arg, MA_OWNED);
 1967 }
 1968 
 1969 static void
 1970 knlist_mtx_assert_unlocked(void *arg)
 1971 {
 1972 
 1973         mtx_assert((struct mtx *)arg, MA_NOTOWNED);
 1974 }
 1975 
 1976 static void
 1977 knlist_rw_rlock(void *arg)
 1978 {
 1979 
 1980         rw_rlock((struct rwlock *)arg);
 1981 }
 1982 
 1983 static void
 1984 knlist_rw_runlock(void *arg)
 1985 {
 1986 
 1987         rw_runlock((struct rwlock *)arg);
 1988 }
 1989 
 1990 static void
 1991 knlist_rw_assert_locked(void *arg)
 1992 {
 1993 
 1994         rw_assert((struct rwlock *)arg, RA_LOCKED);
 1995 }
 1996 
 1997 static void
 1998 knlist_rw_assert_unlocked(void *arg)
 1999 {
 2000 
 2001         rw_assert((struct rwlock *)arg, RA_UNLOCKED);
 2002 }
 2003 
 2004 void
 2005 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
 2006     void (*kl_unlock)(void *),
 2007     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
 2008 {
 2009 
 2010         if (lock == NULL)
 2011                 knl->kl_lockarg = &knlist_lock;
 2012         else
 2013                 knl->kl_lockarg = lock;
 2014 
 2015         if (kl_lock == NULL)
 2016                 knl->kl_lock = knlist_mtx_lock;
 2017         else
 2018                 knl->kl_lock = kl_lock;
 2019         if (kl_unlock == NULL)
 2020                 knl->kl_unlock = knlist_mtx_unlock;
 2021         else
 2022                 knl->kl_unlock = kl_unlock;
 2023         if (kl_assert_locked == NULL)
 2024                 knl->kl_assert_locked = knlist_mtx_assert_locked;
 2025         else
 2026                 knl->kl_assert_locked = kl_assert_locked;
 2027         if (kl_assert_unlocked == NULL)
 2028                 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
 2029         else
 2030                 knl->kl_assert_unlocked = kl_assert_unlocked;
 2031 
 2032         SLIST_INIT(&knl->kl_list);
 2033 }
 2034 
 2035 void
 2036 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
 2037 {
 2038 
 2039         knlist_init(knl, lock, NULL, NULL, NULL, NULL);
 2040 }
 2041 
 2042 void
 2043 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
 2044 {
 2045 
 2046         knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
 2047             knlist_rw_assert_locked, knlist_rw_assert_unlocked);
 2048 }
 2049 
 2050 void
 2051 knlist_destroy(struct knlist *knl)
 2052 {
 2053 
 2054 #ifdef INVARIANTS
 2055         /*
 2056          * if we run across this error, we need to find the offending
 2057          * driver and have it call knlist_clear or knlist_delete.
 2058          */
 2059         if (!SLIST_EMPTY(&knl->kl_list))
 2060                 printf("WARNING: destroying knlist w/ knotes on it!\n");
 2061 #endif
 2062 
 2063         knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
 2064         SLIST_INIT(&knl->kl_list);
 2065 }
 2066 
 2067 /*
 2068  * Even if we are locked, we may need to drop the lock to allow any influx
 2069  * knotes time to "settle".
 2070  */
 2071 void
 2072 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
 2073 {
 2074         struct knote *kn, *kn2;
 2075         struct kqueue *kq;
 2076 
 2077         if (islocked)
 2078                 KNL_ASSERT_LOCKED(knl);
 2079         else {
 2080                 KNL_ASSERT_UNLOCKED(knl);
 2081 again:          /* need to reacquire lock since we have dropped it */
 2082                 knl->kl_lock(knl->kl_lockarg);
 2083         }
 2084 
 2085         SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
 2086                 kq = kn->kn_kq;
 2087                 KQ_LOCK(kq);
 2088                 if ((kn->kn_status & KN_INFLUX)) {
 2089                         KQ_UNLOCK(kq);
 2090                         continue;
 2091                 }
 2092                 knlist_remove_kq(knl, kn, 1, 1);
 2093                 if (killkn) {
 2094                         kn->kn_status |= KN_INFLUX | KN_DETACHED;
 2095                         KQ_UNLOCK(kq);
 2096                         knote_drop(kn, td);
 2097                 } else {
 2098                         /* Make sure cleared knotes disappear soon */
 2099                         kn->kn_flags |= (EV_EOF | EV_ONESHOT);
 2100                         KQ_UNLOCK(kq);
 2101                 }
 2102                 kq = NULL;
 2103         }
 2104 
 2105         if (!SLIST_EMPTY(&knl->kl_list)) {
 2106                 /* there are still KN_INFLUX remaining */
 2107                 kn = SLIST_FIRST(&knl->kl_list);
 2108                 kq = kn->kn_kq;
 2109                 KQ_LOCK(kq);
 2110                 KASSERT(kn->kn_status & KN_INFLUX,
 2111                     ("knote removed w/o list lock"));
 2112                 knl->kl_unlock(knl->kl_lockarg);
 2113                 kq->kq_state |= KQ_FLUXWAIT;
 2114                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
 2115                 kq = NULL;
 2116                 goto again;
 2117         }
 2118 
 2119         if (islocked)
 2120                 KNL_ASSERT_LOCKED(knl);
 2121         else {
 2122                 knl->kl_unlock(knl->kl_lockarg);
 2123                 KNL_ASSERT_UNLOCKED(knl);
 2124         }
 2125 }
 2126 
 2127 /*
 2128  * Remove all knotes referencing a specified fd must be called with FILEDESC
 2129  * lock.  This prevents a race where a new fd comes along and occupies the
 2130  * entry and we attach a knote to the fd.
 2131  */
 2132 void
 2133 knote_fdclose(struct thread *td, int fd)
 2134 {
 2135         struct filedesc *fdp = td->td_proc->p_fd;
 2136         struct kqueue *kq;
 2137         struct knote *kn;
 2138         int influx;
 2139 
 2140         FILEDESC_XLOCK_ASSERT(fdp);
 2141 
 2142         /*
 2143          * We shouldn't have to worry about new kevents appearing on fd
 2144          * since filedesc is locked.
 2145          */
 2146         TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
 2147                 KQ_LOCK(kq);
 2148 
 2149 again:
 2150                 influx = 0;
 2151                 while (kq->kq_knlistsize > fd &&
 2152                     (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
 2153                         if (kn->kn_status & KN_INFLUX) {
 2154                                 /* someone else might be waiting on our knote */
 2155                                 if (influx)
 2156                                         wakeup(kq);
 2157                                 kq->kq_state |= KQ_FLUXWAIT;
 2158                                 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
 2159                                 goto again;
 2160                         }
 2161                         kn->kn_status |= KN_INFLUX;
 2162                         KQ_UNLOCK(kq);
 2163                         if (!(kn->kn_status & KN_DETACHED))
 2164                                 kn->kn_fop->f_detach(kn);
 2165                         knote_drop(kn, td);
 2166                         influx = 1;
 2167                         KQ_LOCK(kq);
 2168                 }
 2169                 KQ_UNLOCK_FLUX(kq);
 2170         }
 2171 }
 2172 
 2173 static int
 2174 knote_attach(struct knote *kn, struct kqueue *kq)
 2175 {
 2176         struct klist *list;
 2177 
 2178         KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
 2179         KQ_OWNED(kq);
 2180 
 2181         if (kn->kn_fop->f_isfd) {
 2182                 if (kn->kn_id >= kq->kq_knlistsize)
 2183                         return ENOMEM;
 2184                 list = &kq->kq_knlist[kn->kn_id];
 2185         } else {
 2186                 if (kq->kq_knhash == NULL)
 2187                         return ENOMEM;
 2188                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2189         }
 2190 
 2191         SLIST_INSERT_HEAD(list, kn, kn_link);
 2192 
 2193         return 0;
 2194 }
 2195 
 2196 /*
 2197  * knote must already have been detached using the f_detach method.
 2198  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
 2199  * to prevent other removal.
 2200  */
 2201 static void
 2202 knote_drop(struct knote *kn, struct thread *td)
 2203 {
 2204         struct kqueue *kq;
 2205         struct klist *list;
 2206 
 2207         kq = kn->kn_kq;
 2208 
 2209         KQ_NOTOWNED(kq);
 2210         KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
 2211             ("knote_drop called without KN_INFLUX set in kn_status"));
 2212 
 2213         KQ_LOCK(kq);
 2214         if (kn->kn_fop->f_isfd)
 2215                 list = &kq->kq_knlist[kn->kn_id];
 2216         else
 2217                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2218 
 2219         if (!SLIST_EMPTY(list))
 2220                 SLIST_REMOVE(list, kn, knote, kn_link);
 2221         if (kn->kn_status & KN_QUEUED)
 2222                 knote_dequeue(kn);
 2223         KQ_UNLOCK_FLUX(kq);
 2224 
 2225         if (kn->kn_fop->f_isfd) {
 2226                 fdrop(kn->kn_fp, td);
 2227                 kn->kn_fp = NULL;
 2228         }
 2229         kqueue_fo_release(kn->kn_kevent.filter);
 2230         kn->kn_fop = NULL;
 2231         knote_free(kn);
 2232 }
 2233 
 2234 static void
 2235 knote_enqueue(struct knote *kn)
 2236 {
 2237         struct kqueue *kq = kn->kn_kq;
 2238 
 2239         KQ_OWNED(kn->kn_kq);
 2240         KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
 2241 
 2242         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 2243         kn->kn_status |= KN_QUEUED;
 2244         kq->kq_count++;
 2245         kqueue_wakeup(kq);
 2246 }
 2247 
 2248 static void
 2249 knote_dequeue(struct knote *kn)
 2250 {
 2251         struct kqueue *kq = kn->kn_kq;
 2252 
 2253         KQ_OWNED(kn->kn_kq);
 2254         KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
 2255 
 2256         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 2257         kn->kn_status &= ~KN_QUEUED;
 2258         kq->kq_count--;
 2259 }
 2260 
 2261 static void
 2262 knote_init(void)
 2263 {
 2264 
 2265         knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
 2266             NULL, NULL, UMA_ALIGN_PTR, 0);
 2267 }
 2268 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
 2269 
 2270 static struct knote *
 2271 knote_alloc(int waitok)
 2272 {
 2273         return ((struct knote *)uma_zalloc(knote_zone,
 2274             (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
 2275 }
 2276 
 2277 static void
 2278 knote_free(struct knote *kn)
 2279 {
 2280         if (kn != NULL)
 2281                 uma_zfree(knote_zone, kn);
 2282 }
 2283 
 2284 /*
 2285  * Register the kev w/ the kq specified by fd.
 2286  */
 2287 int 
 2288 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
 2289 {
 2290         struct kqueue *kq;
 2291         struct file *fp;
 2292         cap_rights_t rights;
 2293         int error;
 2294 
 2295         error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp);
 2296         if (error != 0)
 2297                 return (error);
 2298         if ((error = kqueue_acquire(fp, &kq)) != 0)
 2299                 goto noacquire;
 2300 
 2301         error = kqueue_register(kq, kev, td, waitok);
 2302 
 2303         kqueue_release(kq, 0);
 2304 
 2305 noacquire:
 2306         fdrop(fp, td);
 2307 
 2308         return error;
 2309 }

Cache object: 30643b9d584b8d853af788996dee62a0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.