The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/8.0/sys/kern/kern_event.c 195148 2009-06-28 21:49:43Z stas $");
   30 
   31 #include "opt_ktrace.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/kernel.h>
   36 #include <sys/lock.h>
   37 #include <sys/mutex.h>
   38 #include <sys/proc.h>
   39 #include <sys/malloc.h>
   40 #include <sys/unistd.h>
   41 #include <sys/file.h>
   42 #include <sys/filedesc.h>
   43 #include <sys/filio.h>
   44 #include <sys/fcntl.h>
   45 #include <sys/kthread.h>
   46 #include <sys/selinfo.h>
   47 #include <sys/queue.h>
   48 #include <sys/event.h>
   49 #include <sys/eventvar.h>
   50 #include <sys/poll.h>
   51 #include <sys/protosw.h>
   52 #include <sys/sigio.h>
   53 #include <sys/signalvar.h>
   54 #include <sys/socket.h>
   55 #include <sys/socketvar.h>
   56 #include <sys/stat.h>
   57 #include <sys/sysctl.h>
   58 #include <sys/sysproto.h>
   59 #include <sys/syscallsubr.h>
   60 #include <sys/taskqueue.h>
   61 #include <sys/uio.h>
   62 #ifdef KTRACE
   63 #include <sys/ktrace.h>
   64 #endif
   65 
   66 #include <vm/uma.h>
   67 
   68 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
   69 
   70 /*
   71  * This lock is used if multiple kq locks are required.  This possibly
   72  * should be made into a per proc lock.
   73  */
   74 static struct mtx       kq_global;
   75 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
   76 #define KQ_GLOBAL_LOCK(lck, haslck)     do {    \
   77         if (!haslck)                            \
   78                 mtx_lock(lck);                  \
   79         haslck = 1;                             \
   80 } while (0)
   81 #define KQ_GLOBAL_UNLOCK(lck, haslck)   do {    \
   82         if (haslck)                             \
   83                 mtx_unlock(lck);                        \
   84         haslck = 0;                             \
   85 } while (0)
   86 
   87 TASKQUEUE_DEFINE_THREAD(kqueue);
   88 
   89 static int      kevent_copyout(void *arg, struct kevent *kevp, int count);
   90 static int      kevent_copyin(void *arg, struct kevent *kevp, int count);
   91 static int      kqueue_register(struct kqueue *kq, struct kevent *kev,
   92                     struct thread *td, int waitok);
   93 static int      kqueue_acquire(struct file *fp, struct kqueue **kqp);
   94 static void     kqueue_release(struct kqueue *kq, int locked);
   95 static int      kqueue_expand(struct kqueue *kq, struct filterops *fops,
   96                     uintptr_t ident, int waitok);
   97 static void     kqueue_task(void *arg, int pending);
   98 static int      kqueue_scan(struct kqueue *kq, int maxevents,
   99                     struct kevent_copyops *k_ops,
  100                     const struct timespec *timeout,
  101                     struct kevent *keva, struct thread *td);
  102 static void     kqueue_wakeup(struct kqueue *kq);
  103 static struct filterops *kqueue_fo_find(int filt);
  104 static void     kqueue_fo_release(int filt);
  105 
  106 static fo_rdwr_t        kqueue_read;
  107 static fo_rdwr_t        kqueue_write;
  108 static fo_truncate_t    kqueue_truncate;
  109 static fo_ioctl_t       kqueue_ioctl;
  110 static fo_poll_t        kqueue_poll;
  111 static fo_kqfilter_t    kqueue_kqfilter;
  112 static fo_stat_t        kqueue_stat;
  113 static fo_close_t       kqueue_close;
  114 
  115 static struct fileops kqueueops = {
  116         .fo_read = kqueue_read,
  117         .fo_write = kqueue_write,
  118         .fo_truncate = kqueue_truncate,
  119         .fo_ioctl = kqueue_ioctl,
  120         .fo_poll = kqueue_poll,
  121         .fo_kqfilter = kqueue_kqfilter,
  122         .fo_stat = kqueue_stat,
  123         .fo_close = kqueue_close,
  124 };
  125 
  126 static int      knote_attach(struct knote *kn, struct kqueue *kq);
  127 static void     knote_drop(struct knote *kn, struct thread *td);
  128 static void     knote_enqueue(struct knote *kn);
  129 static void     knote_dequeue(struct knote *kn);
  130 static void     knote_init(void);
  131 static struct   knote *knote_alloc(int waitok);
  132 static void     knote_free(struct knote *kn);
  133 
  134 static void     filt_kqdetach(struct knote *kn);
  135 static int      filt_kqueue(struct knote *kn, long hint);
  136 static int      filt_procattach(struct knote *kn);
  137 static void     filt_procdetach(struct knote *kn);
  138 static int      filt_proc(struct knote *kn, long hint);
  139 static int      filt_fileattach(struct knote *kn);
  140 static void     filt_timerexpire(void *knx);
  141 static int      filt_timerattach(struct knote *kn);
  142 static void     filt_timerdetach(struct knote *kn);
  143 static int      filt_timer(struct knote *kn, long hint);
  144 
  145 static struct filterops file_filtops =
  146         { 1, filt_fileattach, NULL, NULL };
  147 static struct filterops kqread_filtops =
  148         { 1, NULL, filt_kqdetach, filt_kqueue };
  149 /* XXX - move to kern_proc.c?  */
  150 static struct filterops proc_filtops =
  151         { 0, filt_procattach, filt_procdetach, filt_proc };
  152 static struct filterops timer_filtops =
  153         { 0, filt_timerattach, filt_timerdetach, filt_timer };
  154 
  155 static uma_zone_t       knote_zone;
  156 static int              kq_ncallouts = 0;
  157 static int              kq_calloutmax = (4 * 1024);
  158 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
  159     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
  160 
  161 /* XXX - ensure not KN_INFLUX?? */
  162 #define KNOTE_ACTIVATE(kn, islock) do {                                 \
  163         if ((islock))                                                   \
  164                 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);            \
  165         else                                                            \
  166                 KQ_LOCK((kn)->kn_kq);                                   \
  167         (kn)->kn_status |= KN_ACTIVE;                                   \
  168         if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)         \
  169                 knote_enqueue((kn));                                    \
  170         if (!(islock))                                                  \
  171                 KQ_UNLOCK((kn)->kn_kq);                                 \
  172 } while(0)
  173 #define KQ_LOCK(kq) do {                                                \
  174         mtx_lock(&(kq)->kq_lock);                                       \
  175 } while (0)
  176 #define KQ_FLUX_WAKEUP(kq) do {                                         \
  177         if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {            \
  178                 (kq)->kq_state &= ~KQ_FLUXWAIT;                         \
  179                 wakeup((kq));                                           \
  180         }                                                               \
  181 } while (0)
  182 #define KQ_UNLOCK_FLUX(kq) do {                                         \
  183         KQ_FLUX_WAKEUP(kq);                                             \
  184         mtx_unlock(&(kq)->kq_lock);                                     \
  185 } while (0)
  186 #define KQ_UNLOCK(kq) do {                                              \
  187         mtx_unlock(&(kq)->kq_lock);                                     \
  188 } while (0)
  189 #define KQ_OWNED(kq) do {                                               \
  190         mtx_assert(&(kq)->kq_lock, MA_OWNED);                           \
  191 } while (0)
  192 #define KQ_NOTOWNED(kq) do {                                            \
  193         mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);                        \
  194 } while (0)
  195 #define KN_LIST_LOCK(kn) do {                                           \
  196         if (kn->kn_knlist != NULL)                                      \
  197                 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg);      \
  198 } while (0)
  199 #define KN_LIST_UNLOCK(kn) do {                                         \
  200         if (kn->kn_knlist != NULL)                                      \
  201                 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg);    \
  202 } while (0)
  203 #define KNL_ASSERT_LOCK(knl, islocked) do {                             \
  204         if (islocked)                                                   \
  205                 KNL_ASSERT_LOCKED(knl);                         \
  206         else                                                            \
  207                 KNL_ASSERT_UNLOCKED(knl);                               \
  208 } while (0)
  209 #ifdef INVARIANTS
  210 #define KNL_ASSERT_LOCKED(knl) do {                                     \
  211         knl->kl_assert_locked((knl)->kl_lockarg);                       \
  212 } while (0)
  213 #define KNL_ASSERT_UNLOCKED(knl) do {                                   \
  214         knl->kl_assert_unlocked((knl)->kl_lockarg);                     \
  215 } while (0)
  216 #else /* !INVARIANTS */
  217 #define KNL_ASSERT_LOCKED(knl) do {} while(0)
  218 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
  219 #endif /* INVARIANTS */
  220 
  221 #define KN_HASHSIZE             64              /* XXX should be tunable */
  222 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  223 
  224 static int
  225 filt_nullattach(struct knote *kn)
  226 {
  227 
  228         return (ENXIO);
  229 };
  230 
  231 struct filterops null_filtops =
  232         { 0, filt_nullattach, NULL, NULL };
  233 
  234 /* XXX - make SYSINIT to add these, and move into respective modules. */
  235 extern struct filterops sig_filtops;
  236 extern struct filterops fs_filtops;
  237 
  238 /*
  239  * Table for for all system-defined filters.
  240  */
  241 static struct mtx       filterops_lock;
  242 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
  243         MTX_DEF);
  244 static struct {
  245         struct filterops *for_fop;
  246         int for_refcnt;
  247 } sysfilt_ops[EVFILT_SYSCOUNT] = {
  248         { &file_filtops },                      /* EVFILT_READ */
  249         { &file_filtops },                      /* EVFILT_WRITE */
  250         { &null_filtops },                      /* EVFILT_AIO */
  251         { &file_filtops },                      /* EVFILT_VNODE */
  252         { &proc_filtops },                      /* EVFILT_PROC */
  253         { &sig_filtops },                       /* EVFILT_SIGNAL */
  254         { &timer_filtops },                     /* EVFILT_TIMER */
  255         { &file_filtops },                      /* EVFILT_NETDEV */
  256         { &fs_filtops },                        /* EVFILT_FS */
  257         { &null_filtops },                      /* EVFILT_LIO */
  258 };
  259 
  260 /*
  261  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
  262  * method.
  263  */
  264 static int
  265 filt_fileattach(struct knote *kn)
  266 {
  267 
  268         return (fo_kqfilter(kn->kn_fp, kn));
  269 }
  270 
  271 /*ARGSUSED*/
  272 static int
  273 kqueue_kqfilter(struct file *fp, struct knote *kn)
  274 {
  275         struct kqueue *kq = kn->kn_fp->f_data;
  276 
  277         if (kn->kn_filter != EVFILT_READ)
  278                 return (EINVAL);
  279 
  280         kn->kn_status |= KN_KQUEUE;
  281         kn->kn_fop = &kqread_filtops;
  282         knlist_add(&kq->kq_sel.si_note, kn, 0);
  283 
  284         return (0);
  285 }
  286 
  287 static void
  288 filt_kqdetach(struct knote *kn)
  289 {
  290         struct kqueue *kq = kn->kn_fp->f_data;
  291 
  292         knlist_remove(&kq->kq_sel.si_note, kn, 0);
  293 }
  294 
  295 /*ARGSUSED*/
  296 static int
  297 filt_kqueue(struct knote *kn, long hint)
  298 {
  299         struct kqueue *kq = kn->kn_fp->f_data;
  300 
  301         kn->kn_data = kq->kq_count;
  302         return (kn->kn_data > 0);
  303 }
  304 
  305 /* XXX - move to kern_proc.c?  */
  306 static int
  307 filt_procattach(struct knote *kn)
  308 {
  309         struct proc *p;
  310         int immediate;
  311         int error;
  312 
  313         immediate = 0;
  314         p = pfind(kn->kn_id);
  315         if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
  316                 p = zpfind(kn->kn_id);
  317                 immediate = 1;
  318         } else if (p != NULL && (p->p_flag & P_WEXIT)) {
  319                 immediate = 1;
  320         }
  321 
  322         if (p == NULL)
  323                 return (ESRCH);
  324         if ((error = p_cansee(curthread, p)))
  325                 return (error);
  326 
  327         kn->kn_ptr.p_proc = p;
  328         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  329 
  330         /*
  331          * internal flag indicating registration done by kernel
  332          */
  333         if (kn->kn_flags & EV_FLAG1) {
  334                 kn->kn_data = kn->kn_sdata;             /* ppid */
  335                 kn->kn_fflags = NOTE_CHILD;
  336                 kn->kn_flags &= ~EV_FLAG1;
  337         }
  338 
  339         if (immediate == 0)
  340                 knlist_add(&p->p_klist, kn, 1);
  341 
  342         /*
  343          * Immediately activate any exit notes if the target process is a
  344          * zombie.  This is necessary to handle the case where the target
  345          * process, e.g. a child, dies before the kevent is registered.
  346          */
  347         if (immediate && filt_proc(kn, NOTE_EXIT))
  348                 KNOTE_ACTIVATE(kn, 0);
  349 
  350         PROC_UNLOCK(p);
  351 
  352         return (0);
  353 }
  354 
  355 /*
  356  * The knote may be attached to a different process, which may exit,
  357  * leaving nothing for the knote to be attached to.  So when the process
  358  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  359  * it will be deleted when read out.  However, as part of the knote deletion,
  360  * this routine is called, so a check is needed to avoid actually performing
  361  * a detach, because the original process does not exist any more.
  362  */
  363 /* XXX - move to kern_proc.c?  */
  364 static void
  365 filt_procdetach(struct knote *kn)
  366 {
  367         struct proc *p;
  368 
  369         p = kn->kn_ptr.p_proc;
  370         knlist_remove(&p->p_klist, kn, 0);
  371         kn->kn_ptr.p_proc = NULL;
  372 }
  373 
  374 /* XXX - move to kern_proc.c?  */
  375 static int
  376 filt_proc(struct knote *kn, long hint)
  377 {
  378         struct proc *p = kn->kn_ptr.p_proc;
  379         u_int event;
  380 
  381         /*
  382          * mask off extra data
  383          */
  384         event = (u_int)hint & NOTE_PCTRLMASK;
  385 
  386         /*
  387          * if the user is interested in this event, record it.
  388          */
  389         if (kn->kn_sfflags & event)
  390                 kn->kn_fflags |= event;
  391 
  392         /*
  393          * process is gone, so flag the event as finished.
  394          */
  395         if (event == NOTE_EXIT) {
  396                 if (!(kn->kn_status & KN_DETACHED))
  397                         knlist_remove_inevent(&p->p_klist, kn);
  398                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
  399                 kn->kn_data = p->p_xstat;
  400                 kn->kn_ptr.p_proc = NULL;
  401                 return (1);
  402         }
  403 
  404         return (kn->kn_fflags != 0);
  405 }
  406 
  407 /*
  408  * Called when the process forked. It mostly does the same as the
  409  * knote(), activating all knotes registered to be activated when the
  410  * process forked. Additionally, for each knote attached to the
  411  * parent, check whether user wants to track the new process. If so
  412  * attach a new knote to it, and immediately report an event with the
  413  * child's pid.
  414  */
  415 void
  416 knote_fork(struct knlist *list, int pid)
  417 {
  418         struct kqueue *kq;
  419         struct knote *kn;
  420         struct kevent kev;
  421         int error;
  422 
  423         if (list == NULL)
  424                 return;
  425         list->kl_lock(list->kl_lockarg);
  426 
  427         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
  428                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX)
  429                         continue;
  430                 kq = kn->kn_kq;
  431                 KQ_LOCK(kq);
  432                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
  433                         KQ_UNLOCK(kq);
  434                         continue;
  435                 }
  436 
  437                 /*
  438                  * The same as knote(), activate the event.
  439                  */
  440                 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
  441                         kn->kn_status |= KN_HASKQLOCK;
  442                         if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
  443                                 KNOTE_ACTIVATE(kn, 1);
  444                         kn->kn_status &= ~KN_HASKQLOCK;
  445                         KQ_UNLOCK(kq);
  446                         continue;
  447                 }
  448 
  449                 /*
  450                  * The NOTE_TRACK case. In addition to the activation
  451                  * of the event, we need to register new event to
  452                  * track the child. Drop the locks in preparation for
  453                  * the call to kqueue_register().
  454                  */
  455                 kn->kn_status |= KN_INFLUX;
  456                 KQ_UNLOCK(kq);
  457                 list->kl_unlock(list->kl_lockarg);
  458 
  459                 /*
  460                  * Activate existing knote and register a knote with
  461                  * new process.
  462                  */
  463                 kev.ident = pid;
  464                 kev.filter = kn->kn_filter;
  465                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  466                 kev.fflags = kn->kn_sfflags;
  467                 kev.data = kn->kn_id;           /* parent */
  468                 kev.udata = kn->kn_kevent.udata;/* preserve udata */
  469                 error = kqueue_register(kq, &kev, NULL, 0);
  470                 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
  471                         KNOTE_ACTIVATE(kn, 0);
  472                 if (error)
  473                         kn->kn_fflags |= NOTE_TRACKERR;
  474                 KQ_LOCK(kq);
  475                 kn->kn_status &= ~KN_INFLUX;
  476                 KQ_UNLOCK_FLUX(kq);
  477                 list->kl_lock(list->kl_lockarg);
  478         }
  479         list->kl_unlock(list->kl_lockarg);
  480 }
  481 
  482 static int
  483 timertoticks(intptr_t data)
  484 {
  485         struct timeval tv;
  486         int tticks;
  487 
  488         tv.tv_sec = data / 1000;
  489         tv.tv_usec = (data % 1000) * 1000;
  490         tticks = tvtohz(&tv);
  491 
  492         return tticks;
  493 }
  494 
  495 /* XXX - move to kern_timeout.c? */
  496 static void
  497 filt_timerexpire(void *knx)
  498 {
  499         struct knote *kn = knx;
  500         struct callout *calloutp;
  501 
  502         kn->kn_data++;
  503         KNOTE_ACTIVATE(kn, 0);  /* XXX - handle locking */
  504 
  505         if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
  506                 calloutp = (struct callout *)kn->kn_hook;
  507                 callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata),
  508                     filt_timerexpire, kn);
  509         }
  510 }
  511 
  512 /*
  513  * data contains amount of time to sleep, in milliseconds
  514  */
  515 /* XXX - move to kern_timeout.c? */
  516 static int
  517 filt_timerattach(struct knote *kn)
  518 {
  519         struct callout *calloutp;
  520 
  521         atomic_add_int(&kq_ncallouts, 1);
  522 
  523         if (kq_ncallouts >= kq_calloutmax) {
  524                 atomic_add_int(&kq_ncallouts, -1);
  525                 return (ENOMEM);
  526         }
  527 
  528         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  529         kn->kn_status &= ~KN_DETACHED;          /* knlist_add usually sets it */
  530         calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
  531         callout_init(calloutp, CALLOUT_MPSAFE);
  532         kn->kn_hook = calloutp;
  533         callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata),
  534             filt_timerexpire, kn);
  535 
  536         return (0);
  537 }
  538 
  539 /* XXX - move to kern_timeout.c? */
  540 static void
  541 filt_timerdetach(struct knote *kn)
  542 {
  543         struct callout *calloutp;
  544 
  545         calloutp = (struct callout *)kn->kn_hook;
  546         callout_drain(calloutp);
  547         free(calloutp, M_KQUEUE);
  548         atomic_add_int(&kq_ncallouts, -1);
  549         kn->kn_status |= KN_DETACHED;   /* knlist_remove usually clears it */
  550 }
  551 
  552 /* XXX - move to kern_timeout.c? */
  553 static int
  554 filt_timer(struct knote *kn, long hint)
  555 {
  556 
  557         return (kn->kn_data != 0);
  558 }
  559 
  560 int
  561 kqueue(struct thread *td, struct kqueue_args *uap)
  562 {
  563         struct filedesc *fdp;
  564         struct kqueue *kq;
  565         struct file *fp;
  566         int fd, error;
  567 
  568         fdp = td->td_proc->p_fd;
  569         error = falloc(td, &fp, &fd);
  570         if (error)
  571                 goto done2;
  572 
  573         /* An extra reference on `nfp' has been held for us by falloc(). */
  574         kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
  575         mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
  576         TAILQ_INIT(&kq->kq_head);
  577         kq->kq_fdp = fdp;
  578         knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
  579         TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
  580 
  581         FILEDESC_XLOCK(fdp);
  582         SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
  583         FILEDESC_XUNLOCK(fdp);
  584 
  585         finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
  586         fdrop(fp, td);
  587 
  588         td->td_retval[0] = fd;
  589 done2:
  590         return (error);
  591 }
  592 
  593 #ifndef _SYS_SYSPROTO_H_
  594 struct kevent_args {
  595         int     fd;
  596         const struct kevent *changelist;
  597         int     nchanges;
  598         struct  kevent *eventlist;
  599         int     nevents;
  600         const struct timespec *timeout;
  601 };
  602 #endif
  603 int
  604 kevent(struct thread *td, struct kevent_args *uap)
  605 {
  606         struct timespec ts, *tsp;
  607         struct kevent_copyops k_ops = { uap,
  608                                         kevent_copyout,
  609                                         kevent_copyin};
  610         int error;
  611 #ifdef KTRACE
  612         struct uio ktruio;
  613         struct iovec ktriov;
  614         struct uio *ktruioin = NULL;
  615         struct uio *ktruioout = NULL;
  616 #endif
  617 
  618         if (uap->timeout != NULL) {
  619                 error = copyin(uap->timeout, &ts, sizeof(ts));
  620                 if (error)
  621                         return (error);
  622                 tsp = &ts;
  623         } else
  624                 tsp = NULL;
  625 
  626 #ifdef KTRACE
  627         if (KTRPOINT(td, KTR_GENIO)) {
  628                 ktriov.iov_base = uap->changelist;
  629                 ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
  630                 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
  631                     .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
  632                     .uio_td = td };
  633                 ktruioin = cloneuio(&ktruio);
  634                 ktriov.iov_base = uap->eventlist;
  635                 ktriov.iov_len = uap->nevents * sizeof(struct kevent);
  636                 ktruioout = cloneuio(&ktruio);
  637         }
  638 #endif
  639 
  640         error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
  641             &k_ops, tsp);
  642 
  643 #ifdef KTRACE
  644         if (ktruioin != NULL) {
  645                 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
  646                 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
  647                 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
  648                 ktrgenio(uap->fd, UIO_READ, ktruioout, error);
  649         }
  650 #endif
  651 
  652         return (error);
  653 }
  654 
  655 /*
  656  * Copy 'count' items into the destination list pointed to by uap->eventlist.
  657  */
  658 static int
  659 kevent_copyout(void *arg, struct kevent *kevp, int count)
  660 {
  661         struct kevent_args *uap;
  662         int error;
  663 
  664         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  665         uap = (struct kevent_args *)arg;
  666 
  667         error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
  668         if (error == 0)
  669                 uap->eventlist += count;
  670         return (error);
  671 }
  672 
  673 /*
  674  * Copy 'count' items from the list pointed to by uap->changelist.
  675  */
  676 static int
  677 kevent_copyin(void *arg, struct kevent *kevp, int count)
  678 {
  679         struct kevent_args *uap;
  680         int error;
  681 
  682         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  683         uap = (struct kevent_args *)arg;
  684 
  685         error = copyin(uap->changelist, kevp, count * sizeof *kevp);
  686         if (error == 0)
  687                 uap->changelist += count;
  688         return (error);
  689 }
  690 
  691 int
  692 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
  693     struct kevent_copyops *k_ops, const struct timespec *timeout)
  694 {
  695         struct kevent keva[KQ_NEVENTS];
  696         struct kevent *kevp, *changes;
  697         struct kqueue *kq;
  698         struct file *fp;
  699         int i, n, nerrors, error;
  700 
  701         if ((error = fget(td, fd, &fp)) != 0)
  702                 return (error);
  703         if ((error = kqueue_acquire(fp, &kq)) != 0)
  704                 goto done_norel;
  705 
  706         nerrors = 0;
  707 
  708         while (nchanges > 0) {
  709                 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
  710                 error = k_ops->k_copyin(k_ops->arg, keva, n);
  711                 if (error)
  712                         goto done;
  713                 changes = keva;
  714                 for (i = 0; i < n; i++) {
  715                         kevp = &changes[i];
  716                         if (!kevp->filter)
  717                                 continue;
  718                         kevp->flags &= ~EV_SYSFLAGS;
  719                         error = kqueue_register(kq, kevp, td, 1);
  720                         if (error) {
  721                                 if (nevents != 0) {
  722                                         kevp->flags = EV_ERROR;
  723                                         kevp->data = error;
  724                                         (void) k_ops->k_copyout(k_ops->arg,
  725                                             kevp, 1);
  726                                         nevents--;
  727                                         nerrors++;
  728                                 } else {
  729                                         goto done;
  730                                 }
  731                         }
  732                 }
  733                 nchanges -= n;
  734         }
  735         if (nerrors) {
  736                 td->td_retval[0] = nerrors;
  737                 error = 0;
  738                 goto done;
  739         }
  740 
  741         error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
  742 done:
  743         kqueue_release(kq, 0);
  744 done_norel:
  745         fdrop(fp, td);
  746         return (error);
  747 }
  748 
  749 int
  750 kqueue_add_filteropts(int filt, struct filterops *filtops)
  751 {
  752         int error;
  753 
  754         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
  755                 printf(
  756 "trying to add a filterop that is out of range: %d is beyond %d\n",
  757                     ~filt, EVFILT_SYSCOUNT);
  758                 return EINVAL;
  759         }
  760         mtx_lock(&filterops_lock);
  761         if (sysfilt_ops[~filt].for_fop != &null_filtops &&
  762             sysfilt_ops[~filt].for_fop != NULL)
  763                 error = EEXIST;
  764         else {
  765                 sysfilt_ops[~filt].for_fop = filtops;
  766                 sysfilt_ops[~filt].for_refcnt = 0;
  767         }
  768         mtx_unlock(&filterops_lock);
  769 
  770         return (0);
  771 }
  772 
  773 int
  774 kqueue_del_filteropts(int filt)
  775 {
  776         int error;
  777 
  778         error = 0;
  779         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  780                 return EINVAL;
  781 
  782         mtx_lock(&filterops_lock);
  783         if (sysfilt_ops[~filt].for_fop == &null_filtops ||
  784             sysfilt_ops[~filt].for_fop == NULL)
  785                 error = EINVAL;
  786         else if (sysfilt_ops[~filt].for_refcnt != 0)
  787                 error = EBUSY;
  788         else {
  789                 sysfilt_ops[~filt].for_fop = &null_filtops;
  790                 sysfilt_ops[~filt].for_refcnt = 0;
  791         }
  792         mtx_unlock(&filterops_lock);
  793 
  794         return error;
  795 }
  796 
  797 static struct filterops *
  798 kqueue_fo_find(int filt)
  799 {
  800 
  801         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  802                 return NULL;
  803 
  804         mtx_lock(&filterops_lock);
  805         sysfilt_ops[~filt].for_refcnt++;
  806         if (sysfilt_ops[~filt].for_fop == NULL)
  807                 sysfilt_ops[~filt].for_fop = &null_filtops;
  808         mtx_unlock(&filterops_lock);
  809 
  810         return sysfilt_ops[~filt].for_fop;
  811 }
  812 
  813 static void
  814 kqueue_fo_release(int filt)
  815 {
  816 
  817         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  818                 return;
  819 
  820         mtx_lock(&filterops_lock);
  821         KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
  822             ("filter object refcount not valid on release"));
  823         sysfilt_ops[~filt].for_refcnt--;
  824         mtx_unlock(&filterops_lock);
  825 }
  826 
  827 /*
  828  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
  829  * influence if memory allocation should wait.  Make sure it is 0 if you
  830  * hold any mutexes.
  831  */
  832 static int
  833 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
  834 {
  835         struct filterops *fops;
  836         struct file *fp;
  837         struct knote *kn, *tkn;
  838         int error, filt, event;
  839         int haskqglobal;
  840 
  841         fp = NULL;
  842         kn = NULL;
  843         error = 0;
  844         haskqglobal = 0;
  845 
  846         filt = kev->filter;
  847         fops = kqueue_fo_find(filt);
  848         if (fops == NULL)
  849                 return EINVAL;
  850 
  851         tkn = knote_alloc(waitok);              /* prevent waiting with locks */
  852 
  853 findkn:
  854         if (fops->f_isfd) {
  855                 KASSERT(td != NULL, ("td is NULL"));
  856                 error = fget(td, kev->ident, &fp);
  857                 if (error)
  858                         goto done;
  859 
  860                 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
  861                     kev->ident, 0) != 0) {
  862                         /* try again */
  863                         fdrop(fp, td);
  864                         fp = NULL;
  865                         error = kqueue_expand(kq, fops, kev->ident, waitok);
  866                         if (error)
  867                                 goto done;
  868                         goto findkn;
  869                 }
  870 
  871                 if (fp->f_type == DTYPE_KQUEUE) {
  872                         /*
  873                          * if we add some inteligence about what we are doing,
  874                          * we should be able to support events on ourselves.
  875                          * We need to know when we are doing this to prevent
  876                          * getting both the knlist lock and the kq lock since
  877                          * they are the same thing.
  878                          */
  879                         if (fp->f_data == kq) {
  880                                 error = EINVAL;
  881                                 goto done;
  882                         }
  883 
  884                         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
  885                 }
  886 
  887                 KQ_LOCK(kq);
  888                 if (kev->ident < kq->kq_knlistsize) {
  889                         SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
  890                                 if (kev->filter == kn->kn_filter)
  891                                         break;
  892                 }
  893         } else {
  894                 if ((kev->flags & EV_ADD) == EV_ADD)
  895                         kqueue_expand(kq, fops, kev->ident, waitok);
  896 
  897                 KQ_LOCK(kq);
  898                 if (kq->kq_knhashmask != 0) {
  899                         struct klist *list;
  900 
  901                         list = &kq->kq_knhash[
  902                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
  903                         SLIST_FOREACH(kn, list, kn_link)
  904                                 if (kev->ident == kn->kn_id &&
  905                                     kev->filter == kn->kn_filter)
  906                                         break;
  907                 }
  908         }
  909 
  910         /* knote is in the process of changing, wait for it to stablize. */
  911         if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
  912                 if (fp != NULL) {
  913                         fdrop(fp, td);
  914                         fp = NULL;
  915                 }
  916                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
  917                 kq->kq_state |= KQ_FLUXWAIT;
  918                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
  919                 goto findkn;
  920         }
  921 
  922         if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
  923                 KQ_UNLOCK(kq);
  924                 error = ENOENT;
  925                 goto done;
  926         }
  927 
  928         /*
  929          * kn now contains the matching knote, or NULL if no match
  930          */
  931         if (kev->flags & EV_ADD) {
  932                 if (kn == NULL) {
  933                         kn = tkn;
  934                         tkn = NULL;
  935                         if (kn == NULL) {
  936                                 KQ_UNLOCK(kq);
  937                                 error = ENOMEM;
  938                                 goto done;
  939                         }
  940                         kn->kn_fp = fp;
  941                         kn->kn_kq = kq;
  942                         kn->kn_fop = fops;
  943                         /*
  944                          * apply reference counts to knote structure, and
  945                          * do not release it at the end of this routine.
  946                          */
  947                         fops = NULL;
  948                         fp = NULL;
  949 
  950                         kn->kn_sfflags = kev->fflags;
  951                         kn->kn_sdata = kev->data;
  952                         kev->fflags = 0;
  953                         kev->data = 0;
  954                         kn->kn_kevent = *kev;
  955                         kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
  956                             EV_ENABLE | EV_DISABLE);
  957                         kn->kn_status = KN_INFLUX|KN_DETACHED;
  958 
  959                         error = knote_attach(kn, kq);
  960                         KQ_UNLOCK(kq);
  961                         if (error != 0) {
  962                                 tkn = kn;
  963                                 goto done;
  964                         }
  965 
  966                         if ((error = kn->kn_fop->f_attach(kn)) != 0) {
  967                                 knote_drop(kn, td);
  968                                 goto done;
  969                         }
  970                         KN_LIST_LOCK(kn);
  971                 } else {
  972                         /*
  973                          * The user may change some filter values after the
  974                          * initial EV_ADD, but doing so will not reset any
  975                          * filter which has already been triggered.
  976                          */
  977                         kn->kn_status |= KN_INFLUX;
  978                         KQ_UNLOCK(kq);
  979                         KN_LIST_LOCK(kn);
  980                         kn->kn_sfflags = kev->fflags;
  981                         kn->kn_sdata = kev->data;
  982                         kn->kn_kevent.udata = kev->udata;
  983                 }
  984 
  985                 /*
  986                  * We can get here with kn->kn_knlist == NULL.
  987                  * This can happen when the initial attach event decides that
  988                  * the event is "completed" already.  i.e. filt_procattach
  989                  * is called on a zombie process.  It will call filt_proc
  990                  * which will remove it from the list, and NULL kn_knlist.
  991                  */
  992                 event = kn->kn_fop->f_event(kn, 0);
  993                 KQ_LOCK(kq);
  994                 if (event)
  995                         KNOTE_ACTIVATE(kn, 1);
  996                 kn->kn_status &= ~KN_INFLUX;
  997                 KN_LIST_UNLOCK(kn);
  998         } else if (kev->flags & EV_DELETE) {
  999                 kn->kn_status |= KN_INFLUX;
 1000                 KQ_UNLOCK(kq);
 1001                 if (!(kn->kn_status & KN_DETACHED))
 1002                         kn->kn_fop->f_detach(kn);
 1003                 knote_drop(kn, td);
 1004                 goto done;
 1005         }
 1006 
 1007         if ((kev->flags & EV_DISABLE) &&
 1008             ((kn->kn_status & KN_DISABLED) == 0)) {
 1009                 kn->kn_status |= KN_DISABLED;
 1010         }
 1011 
 1012         if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
 1013                 kn->kn_status &= ~KN_DISABLED;
 1014                 if ((kn->kn_status & KN_ACTIVE) &&
 1015                     ((kn->kn_status & KN_QUEUED) == 0))
 1016                         knote_enqueue(kn);
 1017         }
 1018         KQ_UNLOCK_FLUX(kq);
 1019 
 1020 done:
 1021         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1022         if (fp != NULL)
 1023                 fdrop(fp, td);
 1024         if (tkn != NULL)
 1025                 knote_free(tkn);
 1026         if (fops != NULL)
 1027                 kqueue_fo_release(filt);
 1028         return (error);
 1029 }
 1030 
 1031 static int
 1032 kqueue_acquire(struct file *fp, struct kqueue **kqp)
 1033 {
 1034         int error;
 1035         struct kqueue *kq;
 1036 
 1037         error = 0;
 1038 
 1039         kq = fp->f_data;
 1040         if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
 1041                 return (EBADF);
 1042         *kqp = kq;
 1043         KQ_LOCK(kq);
 1044         if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
 1045                 KQ_UNLOCK(kq);
 1046                 return (EBADF);
 1047         }
 1048         kq->kq_refcnt++;
 1049         KQ_UNLOCK(kq);
 1050 
 1051         return error;
 1052 }
 1053 
 1054 static void
 1055 kqueue_release(struct kqueue *kq, int locked)
 1056 {
 1057         if (locked)
 1058                 KQ_OWNED(kq);
 1059         else
 1060                 KQ_LOCK(kq);
 1061         kq->kq_refcnt--;
 1062         if (kq->kq_refcnt == 1)
 1063                 wakeup(&kq->kq_refcnt);
 1064         if (!locked)
 1065                 KQ_UNLOCK(kq);
 1066 }
 1067 
 1068 static void
 1069 kqueue_schedtask(struct kqueue *kq)
 1070 {
 1071 
 1072         KQ_OWNED(kq);
 1073         KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
 1074             ("scheduling kqueue task while draining"));
 1075 
 1076         if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
 1077                 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
 1078                 kq->kq_state |= KQ_TASKSCHED;
 1079         }
 1080 }
 1081 
 1082 /*
 1083  * Expand the kq to make sure we have storage for fops/ident pair.
 1084  *
 1085  * Return 0 on success (or no work necessary), return errno on failure.
 1086  *
 1087  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
 1088  * If kqueue_register is called from a non-fd context, there usually/should
 1089  * be no locks held.
 1090  */
 1091 static int
 1092 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
 1093         int waitok)
 1094 {
 1095         struct klist *list, *tmp_knhash;
 1096         u_long tmp_knhashmask;
 1097         int size;
 1098         int fd;
 1099         int mflag = waitok ? M_WAITOK : M_NOWAIT;
 1100 
 1101         KQ_NOTOWNED(kq);
 1102 
 1103         if (fops->f_isfd) {
 1104                 fd = ident;
 1105                 if (kq->kq_knlistsize <= fd) {
 1106                         size = kq->kq_knlistsize;
 1107                         while (size <= fd)
 1108                                 size += KQEXTENT;
 1109                         list = malloc(size * sizeof list, M_KQUEUE, mflag);
 1110                         if (list == NULL)
 1111                                 return ENOMEM;
 1112                         KQ_LOCK(kq);
 1113                         if (kq->kq_knlistsize > fd) {
 1114                                 free(list, M_KQUEUE);
 1115                                 list = NULL;
 1116                         } else {
 1117                                 if (kq->kq_knlist != NULL) {
 1118                                         bcopy(kq->kq_knlist, list,
 1119                                             kq->kq_knlistsize * sizeof list);
 1120                                         free(kq->kq_knlist, M_KQUEUE);
 1121                                         kq->kq_knlist = NULL;
 1122                                 }
 1123                                 bzero((caddr_t)list +
 1124                                     kq->kq_knlistsize * sizeof list,
 1125                                     (size - kq->kq_knlistsize) * sizeof list);
 1126                                 kq->kq_knlistsize = size;
 1127                                 kq->kq_knlist = list;
 1128                         }
 1129                         KQ_UNLOCK(kq);
 1130                 }
 1131         } else {
 1132                 if (kq->kq_knhashmask == 0) {
 1133                         tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
 1134                             &tmp_knhashmask);
 1135                         if (tmp_knhash == NULL)
 1136                                 return ENOMEM;
 1137                         KQ_LOCK(kq);
 1138                         if (kq->kq_knhashmask == 0) {
 1139                                 kq->kq_knhash = tmp_knhash;
 1140                                 kq->kq_knhashmask = tmp_knhashmask;
 1141                         } else {
 1142                                 free(tmp_knhash, M_KQUEUE);
 1143                         }
 1144                         KQ_UNLOCK(kq);
 1145                 }
 1146         }
 1147 
 1148         KQ_NOTOWNED(kq);
 1149         return 0;
 1150 }
 1151 
 1152 static void
 1153 kqueue_task(void *arg, int pending)
 1154 {
 1155         struct kqueue *kq;
 1156         int haskqglobal;
 1157 
 1158         haskqglobal = 0;
 1159         kq = arg;
 1160 
 1161         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1162         KQ_LOCK(kq);
 1163 
 1164         KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
 1165 
 1166         kq->kq_state &= ~KQ_TASKSCHED;
 1167         if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
 1168                 wakeup(&kq->kq_state);
 1169         }
 1170         KQ_UNLOCK(kq);
 1171         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1172 }
 1173 
 1174 /*
 1175  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
 1176  * We treat KN_MARKER knotes as if they are INFLUX.
 1177  */
 1178 static int
 1179 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
 1180     const struct timespec *tsp, struct kevent *keva, struct thread *td)
 1181 {
 1182         struct kevent *kevp;
 1183         struct timeval atv, rtv, ttv;
 1184         struct knote *kn, *marker;
 1185         int count, timeout, nkev, error, influx;
 1186         int haskqglobal;
 1187 
 1188         count = maxevents;
 1189         nkev = 0;
 1190         error = 0;
 1191         haskqglobal = 0;
 1192 
 1193         if (maxevents == 0)
 1194                 goto done_nl;
 1195 
 1196         if (tsp != NULL) {
 1197                 TIMESPEC_TO_TIMEVAL(&atv, tsp);
 1198                 if (itimerfix(&atv)) {
 1199                         error = EINVAL;
 1200                         goto done_nl;
 1201                 }
 1202                 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
 1203                         timeout = -1;
 1204                 else
 1205                         timeout = atv.tv_sec > 24 * 60 * 60 ?
 1206                             24 * 60 * 60 * hz : tvtohz(&atv);
 1207                 getmicrouptime(&rtv);
 1208                 timevaladd(&atv, &rtv);
 1209         } else {
 1210                 atv.tv_sec = 0;
 1211                 atv.tv_usec = 0;
 1212                 timeout = 0;
 1213         }
 1214         marker = knote_alloc(1);
 1215         if (marker == NULL) {
 1216                 error = ENOMEM;
 1217                 goto done_nl;
 1218         }
 1219         marker->kn_status = KN_MARKER;
 1220         KQ_LOCK(kq);
 1221         goto start;
 1222 
 1223 retry:
 1224         if (atv.tv_sec || atv.tv_usec) {
 1225                 getmicrouptime(&rtv);
 1226                 if (timevalcmp(&rtv, &atv, >=))
 1227                         goto done;
 1228                 ttv = atv;
 1229                 timevalsub(&ttv, &rtv);
 1230                 timeout = ttv.tv_sec > 24 * 60 * 60 ?
 1231                         24 * 60 * 60 * hz : tvtohz(&ttv);
 1232         }
 1233 
 1234 start:
 1235         kevp = keva;
 1236         if (kq->kq_count == 0) {
 1237                 if (timeout < 0) {
 1238                         error = EWOULDBLOCK;
 1239                 } else {
 1240                         kq->kq_state |= KQ_SLEEP;
 1241                         error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
 1242                             "kqread", timeout);
 1243                 }
 1244                 if (error == 0)
 1245                         goto retry;
 1246                 /* don't restart after signals... */
 1247                 if (error == ERESTART)
 1248                         error = EINTR;
 1249                 else if (error == EWOULDBLOCK)
 1250                         error = 0;
 1251                 goto done;
 1252         }
 1253 
 1254         TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
 1255         influx = 0;
 1256         while (count) {
 1257                 KQ_OWNED(kq);
 1258                 kn = TAILQ_FIRST(&kq->kq_head);
 1259 
 1260                 if ((kn->kn_status == KN_MARKER && kn != marker) ||
 1261                     (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1262                         if (influx) {
 1263                                 influx = 0;
 1264                                 KQ_FLUX_WAKEUP(kq);
 1265                         }
 1266                         kq->kq_state |= KQ_FLUXWAIT;
 1267                         error = msleep(kq, &kq->kq_lock, PSOCK,
 1268                             "kqflxwt", 0);
 1269                         continue;
 1270                 }
 1271 
 1272                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1273                 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
 1274                         kn->kn_status &= ~KN_QUEUED;
 1275                         kq->kq_count--;
 1276                         continue;
 1277                 }
 1278                 if (kn == marker) {
 1279                         KQ_FLUX_WAKEUP(kq);
 1280                         if (count == maxevents)
 1281                                 goto retry;
 1282                         goto done;
 1283                 }
 1284                 KASSERT((kn->kn_status & KN_INFLUX) == 0,
 1285                     ("KN_INFLUX set when not suppose to be"));
 1286 
 1287                 if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
 1288                         kn->kn_status &= ~KN_QUEUED;
 1289                         kn->kn_status |= KN_INFLUX;
 1290                         kq->kq_count--;
 1291                         KQ_UNLOCK(kq);
 1292                         /*
 1293                          * We don't need to lock the list since we've marked
 1294                          * it _INFLUX.
 1295                          */
 1296                         *kevp = kn->kn_kevent;
 1297                         if (!(kn->kn_status & KN_DETACHED))
 1298                                 kn->kn_fop->f_detach(kn);
 1299                         knote_drop(kn, td);
 1300                         KQ_LOCK(kq);
 1301                         kn = NULL;
 1302                 } else {
 1303                         kn->kn_status |= KN_INFLUX;
 1304                         KQ_UNLOCK(kq);
 1305                         if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
 1306                                 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1307                         KN_LIST_LOCK(kn);
 1308                         if (kn->kn_fop->f_event(kn, 0) == 0) {
 1309                                 KQ_LOCK(kq);
 1310                                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1311                                 kn->kn_status &=
 1312                                     ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
 1313                                 kq->kq_count--;
 1314                                 KN_LIST_UNLOCK(kn);
 1315                                 influx = 1;
 1316                                 continue;
 1317                         }
 1318                         *kevp = kn->kn_kevent;
 1319                         KQ_LOCK(kq);
 1320                         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1321                         if (kn->kn_flags & EV_CLEAR) {
 1322                                 kn->kn_data = 0;
 1323                                 kn->kn_fflags = 0;
 1324                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 1325                                 kq->kq_count--;
 1326                         } else
 1327                                 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1328                         
 1329                         kn->kn_status &= ~(KN_INFLUX);
 1330                         KN_LIST_UNLOCK(kn);
 1331                         influx = 1;
 1332                 }
 1333 
 1334                 /* we are returning a copy to the user */
 1335                 kevp++;
 1336                 nkev++;
 1337                 count--;
 1338 
 1339                 if (nkev == KQ_NEVENTS) {
 1340                         influx = 0;
 1341                         KQ_UNLOCK_FLUX(kq);
 1342                         error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1343                         nkev = 0;
 1344                         kevp = keva;
 1345                         KQ_LOCK(kq);
 1346                         if (error)
 1347                                 break;
 1348                 }
 1349         }
 1350         TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
 1351 done:
 1352         KQ_OWNED(kq);
 1353         KQ_UNLOCK_FLUX(kq);
 1354         knote_free(marker);
 1355 done_nl:
 1356         KQ_NOTOWNED(kq);
 1357         if (nkev != 0)
 1358                 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1359         td->td_retval[0] = maxevents - count;
 1360         return (error);
 1361 }
 1362 
 1363 /*
 1364  * XXX
 1365  * This could be expanded to call kqueue_scan, if desired.
 1366  */
 1367 /*ARGSUSED*/
 1368 static int
 1369 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1370         int flags, struct thread *td)
 1371 {
 1372         return (ENXIO);
 1373 }
 1374 
 1375 /*ARGSUSED*/
 1376 static int
 1377 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1378          int flags, struct thread *td)
 1379 {
 1380         return (ENXIO);
 1381 }
 1382 
 1383 /*ARGSUSED*/
 1384 static int
 1385 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred,
 1386         struct thread *td)
 1387 {
 1388 
 1389         return (EINVAL);
 1390 }
 1391 
 1392 /*ARGSUSED*/
 1393 static int
 1394 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
 1395         struct ucred *active_cred, struct thread *td)
 1396 {
 1397         /*
 1398          * Enabling sigio causes two major problems:
 1399          * 1) infinite recursion:
 1400          * Synopsys: kevent is being used to track signals and have FIOASYNC
 1401          * set.  On receipt of a signal this will cause a kqueue to recurse
 1402          * into itself over and over.  Sending the sigio causes the kqueue
 1403          * to become ready, which in turn posts sigio again, forever.
 1404          * Solution: this can be solved by setting a flag in the kqueue that
 1405          * we have a SIGIO in progress.
 1406          * 2) locking problems:
 1407          * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
 1408          * us above the proc and pgrp locks.
 1409          * Solution: Post a signal using an async mechanism, being sure to
 1410          * record a generation count in the delivery so that we do not deliver
 1411          * a signal to the wrong process.
 1412          *
 1413          * Note, these two mechanisms are somewhat mutually exclusive!
 1414          */
 1415 #if 0
 1416         struct kqueue *kq;
 1417 
 1418         kq = fp->f_data;
 1419         switch (cmd) {
 1420         case FIOASYNC:
 1421                 if (*(int *)data) {
 1422                         kq->kq_state |= KQ_ASYNC;
 1423                 } else {
 1424                         kq->kq_state &= ~KQ_ASYNC;
 1425                 }
 1426                 return (0);
 1427 
 1428         case FIOSETOWN:
 1429                 return (fsetown(*(int *)data, &kq->kq_sigio));
 1430 
 1431         case FIOGETOWN:
 1432                 *(int *)data = fgetown(&kq->kq_sigio);
 1433                 return (0);
 1434         }
 1435 #endif
 1436 
 1437         return (ENOTTY);
 1438 }
 1439 
 1440 /*ARGSUSED*/
 1441 static int
 1442 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
 1443         struct thread *td)
 1444 {
 1445         struct kqueue *kq;
 1446         int revents = 0;
 1447         int error;
 1448 
 1449         if ((error = kqueue_acquire(fp, &kq)))
 1450                 return POLLERR;
 1451 
 1452         KQ_LOCK(kq);
 1453         if (events & (POLLIN | POLLRDNORM)) {
 1454                 if (kq->kq_count) {
 1455                         revents |= events & (POLLIN | POLLRDNORM);
 1456                 } else {
 1457                         selrecord(td, &kq->kq_sel);
 1458                         if (SEL_WAITING(&kq->kq_sel))
 1459                                 kq->kq_state |= KQ_SEL;
 1460                 }
 1461         }
 1462         kqueue_release(kq, 1);
 1463         KQ_UNLOCK(kq);
 1464         return (revents);
 1465 }
 1466 
 1467 /*ARGSUSED*/
 1468 static int
 1469 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
 1470         struct thread *td)
 1471 {
 1472 
 1473         bzero((void *)st, sizeof *st);
 1474         /*
 1475          * We no longer return kq_count because the unlocked value is useless.
 1476          * If you spent all this time getting the count, why not spend your
 1477          * syscall better by calling kevent?
 1478          *
 1479          * XXX - This is needed for libc_r.
 1480          */
 1481         st->st_mode = S_IFIFO;
 1482         return (0);
 1483 }
 1484 
 1485 /*ARGSUSED*/
 1486 static int
 1487 kqueue_close(struct file *fp, struct thread *td)
 1488 {
 1489         struct kqueue *kq = fp->f_data;
 1490         struct filedesc *fdp;
 1491         struct knote *kn;
 1492         int i;
 1493         int error;
 1494 
 1495         if ((error = kqueue_acquire(fp, &kq)))
 1496                 return error;
 1497 
 1498         KQ_LOCK(kq);
 1499 
 1500         KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
 1501             ("kqueue already closing"));
 1502         kq->kq_state |= KQ_CLOSING;
 1503         if (kq->kq_refcnt > 1)
 1504                 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
 1505 
 1506         KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
 1507         fdp = kq->kq_fdp;
 1508 
 1509         KASSERT(knlist_empty(&kq->kq_sel.si_note),
 1510             ("kqueue's knlist not empty"));
 1511 
 1512         for (i = 0; i < kq->kq_knlistsize; i++) {
 1513                 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
 1514                         if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1515                                 kq->kq_state |= KQ_FLUXWAIT;
 1516                                 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
 1517                                 continue;
 1518                         }
 1519                         kn->kn_status |= KN_INFLUX;
 1520                         KQ_UNLOCK(kq);
 1521                         if (!(kn->kn_status & KN_DETACHED))
 1522                                 kn->kn_fop->f_detach(kn);
 1523                         knote_drop(kn, td);
 1524                         KQ_LOCK(kq);
 1525                 }
 1526         }
 1527         if (kq->kq_knhashmask != 0) {
 1528                 for (i = 0; i <= kq->kq_knhashmask; i++) {
 1529                         while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
 1530                                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1531                                         kq->kq_state |= KQ_FLUXWAIT;
 1532                                         msleep(kq, &kq->kq_lock, PSOCK,
 1533                                                "kqclo2", 0);
 1534                                         continue;
 1535                                 }
 1536                                 kn->kn_status |= KN_INFLUX;
 1537                                 KQ_UNLOCK(kq);
 1538                                 if (!(kn->kn_status & KN_DETACHED))
 1539                                         kn->kn_fop->f_detach(kn);
 1540                                 knote_drop(kn, td);
 1541                                 KQ_LOCK(kq);
 1542                         }
 1543                 }
 1544         }
 1545 
 1546         if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
 1547                 kq->kq_state |= KQ_TASKDRAIN;
 1548                 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
 1549         }
 1550 
 1551         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1552                 selwakeuppri(&kq->kq_sel, PSOCK);
 1553                 if (!SEL_WAITING(&kq->kq_sel))
 1554                         kq->kq_state &= ~KQ_SEL;
 1555         }
 1556 
 1557         KQ_UNLOCK(kq);
 1558 
 1559         FILEDESC_XLOCK(fdp);
 1560         SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
 1561         FILEDESC_XUNLOCK(fdp);
 1562 
 1563         knlist_destroy(&kq->kq_sel.si_note);
 1564         mtx_destroy(&kq->kq_lock);
 1565         kq->kq_fdp = NULL;
 1566 
 1567         if (kq->kq_knhash != NULL)
 1568                 free(kq->kq_knhash, M_KQUEUE);
 1569         if (kq->kq_knlist != NULL)
 1570                 free(kq->kq_knlist, M_KQUEUE);
 1571 
 1572         funsetown(&kq->kq_sigio);
 1573         free(kq, M_KQUEUE);
 1574         fp->f_data = NULL;
 1575 
 1576         return (0);
 1577 }
 1578 
 1579 static void
 1580 kqueue_wakeup(struct kqueue *kq)
 1581 {
 1582         KQ_OWNED(kq);
 1583 
 1584         if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
 1585                 kq->kq_state &= ~KQ_SLEEP;
 1586                 wakeup(kq);
 1587         }
 1588         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1589                 selwakeuppri(&kq->kq_sel, PSOCK);
 1590                 if (!SEL_WAITING(&kq->kq_sel))
 1591                         kq->kq_state &= ~KQ_SEL;
 1592         }
 1593         if (!knlist_empty(&kq->kq_sel.si_note))
 1594                 kqueue_schedtask(kq);
 1595         if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
 1596                 pgsigio(&kq->kq_sigio, SIGIO, 0);
 1597         }
 1598 }
 1599 
 1600 /*
 1601  * Walk down a list of knotes, activating them if their event has triggered.
 1602  *
 1603  * There is a possibility to optimize in the case of one kq watching another.
 1604  * Instead of scheduling a task to wake it up, you could pass enough state
 1605  * down the chain to make up the parent kqueue.  Make this code functional
 1606  * first.
 1607  */
 1608 void
 1609 knote(struct knlist *list, long hint, int lockflags)
 1610 {
 1611         struct kqueue *kq;
 1612         struct knote *kn;
 1613         int error;
 1614 
 1615         if (list == NULL)
 1616                 return;
 1617 
 1618         KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
 1619 
 1620         if ((lockflags & KNF_LISTLOCKED) == 0)
 1621                 list->kl_lock(list->kl_lockarg); 
 1622 
 1623         /*
 1624          * If we unlock the list lock (and set KN_INFLUX), we can eliminate
 1625          * the kqueue scheduling, but this will introduce four
 1626          * lock/unlock's for each knote to test.  If we do, continue to use
 1627          * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
 1628          * only safe if you want to remove the current item, which we are
 1629          * not doing.
 1630          */
 1631         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
 1632                 kq = kn->kn_kq;
 1633                 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
 1634                         KQ_LOCK(kq);
 1635                         if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1636                                 KQ_UNLOCK(kq);
 1637                         } else if ((lockflags & KNF_NOKQLOCK) != 0) {
 1638                                 kn->kn_status |= KN_INFLUX;
 1639                                 KQ_UNLOCK(kq);
 1640                                 error = kn->kn_fop->f_event(kn, hint);
 1641                                 KQ_LOCK(kq);
 1642                                 kn->kn_status &= ~KN_INFLUX;
 1643                                 if (error)
 1644                                         KNOTE_ACTIVATE(kn, 1);
 1645                                 KQ_UNLOCK_FLUX(kq);
 1646                         } else {
 1647                                 kn->kn_status |= KN_HASKQLOCK;
 1648                                 if (kn->kn_fop->f_event(kn, hint))
 1649                                         KNOTE_ACTIVATE(kn, 1);
 1650                                 kn->kn_status &= ~KN_HASKQLOCK;
 1651                                 KQ_UNLOCK(kq);
 1652                         }
 1653                 }
 1654                 kq = NULL;
 1655         }
 1656         if ((lockflags & KNF_LISTLOCKED) == 0)
 1657                 list->kl_unlock(list->kl_lockarg); 
 1658 }
 1659 
 1660 /*
 1661  * add a knote to a knlist
 1662  */
 1663 void
 1664 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
 1665 {
 1666         KNL_ASSERT_LOCK(knl, islocked);
 1667         KQ_NOTOWNED(kn->kn_kq);
 1668         KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
 1669             (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
 1670         if (!islocked)
 1671                 knl->kl_lock(knl->kl_lockarg);
 1672         SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
 1673         if (!islocked)
 1674                 knl->kl_unlock(knl->kl_lockarg);
 1675         KQ_LOCK(kn->kn_kq);
 1676         kn->kn_knlist = knl;
 1677         kn->kn_status &= ~KN_DETACHED;
 1678         KQ_UNLOCK(kn->kn_kq);
 1679 }
 1680 
 1681 static void
 1682 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
 1683 {
 1684         KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
 1685         KNL_ASSERT_LOCK(knl, knlislocked);
 1686         mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
 1687         if (!kqislocked)
 1688                 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
 1689     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
 1690         if (!knlislocked)
 1691                 knl->kl_lock(knl->kl_lockarg);
 1692         SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
 1693         kn->kn_knlist = NULL;
 1694         if (!knlislocked)
 1695                 knl->kl_unlock(knl->kl_lockarg);
 1696         if (!kqislocked)
 1697                 KQ_LOCK(kn->kn_kq);
 1698         kn->kn_status |= KN_DETACHED;
 1699         if (!kqislocked)
 1700                 KQ_UNLOCK(kn->kn_kq);
 1701 }
 1702 
 1703 /*
 1704  * remove all knotes from a specified klist
 1705  */
 1706 void
 1707 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
 1708 {
 1709 
 1710         knlist_remove_kq(knl, kn, islocked, 0);
 1711 }
 1712 
 1713 /*
 1714  * remove knote from a specified klist while in f_event handler.
 1715  */
 1716 void
 1717 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
 1718 {
 1719 
 1720         knlist_remove_kq(knl, kn, 1,
 1721             (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
 1722 }
 1723 
 1724 int
 1725 knlist_empty(struct knlist *knl)
 1726 {
 1727         KNL_ASSERT_LOCKED(knl);
 1728         return SLIST_EMPTY(&knl->kl_list);
 1729 }
 1730 
 1731 static struct mtx       knlist_lock;
 1732 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
 1733         MTX_DEF);
 1734 static void knlist_mtx_lock(void *arg);
 1735 static void knlist_mtx_unlock(void *arg);
 1736 
 1737 static void
 1738 knlist_mtx_lock(void *arg)
 1739 {
 1740         mtx_lock((struct mtx *)arg);
 1741 }
 1742 
 1743 static void
 1744 knlist_mtx_unlock(void *arg)
 1745 {
 1746         mtx_unlock((struct mtx *)arg);
 1747 }
 1748 
 1749 static void
 1750 knlist_mtx_assert_locked(void *arg)
 1751 {
 1752         mtx_assert((struct mtx *)arg, MA_OWNED);
 1753 }
 1754 
 1755 static void
 1756 knlist_mtx_assert_unlocked(void *arg)
 1757 {
 1758         mtx_assert((struct mtx *)arg, MA_NOTOWNED);
 1759 }
 1760 
 1761 void
 1762 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
 1763     void (*kl_unlock)(void *),
 1764     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
 1765 {
 1766 
 1767         if (lock == NULL)
 1768                 knl->kl_lockarg = &knlist_lock;
 1769         else
 1770                 knl->kl_lockarg = lock;
 1771 
 1772         if (kl_lock == NULL)
 1773                 knl->kl_lock = knlist_mtx_lock;
 1774         else
 1775                 knl->kl_lock = kl_lock;
 1776         if (kl_unlock == NULL)
 1777                 knl->kl_unlock = knlist_mtx_unlock;
 1778         else
 1779                 knl->kl_unlock = kl_unlock;
 1780         if (kl_assert_locked == NULL)
 1781                 knl->kl_assert_locked = knlist_mtx_assert_locked;
 1782         else
 1783                 knl->kl_assert_locked = kl_assert_locked;
 1784         if (kl_assert_unlocked == NULL)
 1785                 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
 1786         else
 1787                 knl->kl_assert_unlocked = kl_assert_unlocked;
 1788 
 1789         SLIST_INIT(&knl->kl_list);
 1790 }
 1791 
 1792 void
 1793 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
 1794 {
 1795 
 1796         knlist_init(knl, lock, NULL, NULL, NULL, NULL);
 1797 }
 1798 
 1799 void
 1800 knlist_destroy(struct knlist *knl)
 1801 {
 1802 
 1803 #ifdef INVARIANTS
 1804         /*
 1805          * if we run across this error, we need to find the offending
 1806          * driver and have it call knlist_clear.
 1807          */
 1808         if (!SLIST_EMPTY(&knl->kl_list))
 1809                 printf("WARNING: destroying knlist w/ knotes on it!\n");
 1810 #endif
 1811 
 1812         knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
 1813         SLIST_INIT(&knl->kl_list);
 1814 }
 1815 
 1816 /*
 1817  * Even if we are locked, we may need to drop the lock to allow any influx
 1818  * knotes time to "settle".
 1819  */
 1820 void
 1821 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
 1822 {
 1823         struct knote *kn, *kn2;
 1824         struct kqueue *kq;
 1825 
 1826         if (islocked)
 1827                 KNL_ASSERT_LOCKED(knl);
 1828         else {
 1829                 KNL_ASSERT_UNLOCKED(knl);
 1830 again:          /* need to reacquire lock since we have dropped it */
 1831                 knl->kl_lock(knl->kl_lockarg);
 1832         }
 1833 
 1834         SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
 1835                 kq = kn->kn_kq;
 1836                 KQ_LOCK(kq);
 1837                 if ((kn->kn_status & KN_INFLUX)) {
 1838                         KQ_UNLOCK(kq);
 1839                         continue;
 1840                 }
 1841                 knlist_remove_kq(knl, kn, 1, 1);
 1842                 if (killkn) {
 1843                         kn->kn_status |= KN_INFLUX | KN_DETACHED;
 1844                         KQ_UNLOCK(kq);
 1845                         knote_drop(kn, td);
 1846                 } else {
 1847                         /* Make sure cleared knotes disappear soon */
 1848                         kn->kn_flags |= (EV_EOF | EV_ONESHOT);
 1849                         KQ_UNLOCK(kq);
 1850                 }
 1851                 kq = NULL;
 1852         }
 1853 
 1854         if (!SLIST_EMPTY(&knl->kl_list)) {
 1855                 /* there are still KN_INFLUX remaining */
 1856                 kn = SLIST_FIRST(&knl->kl_list);
 1857                 kq = kn->kn_kq;
 1858                 KQ_LOCK(kq);
 1859                 KASSERT(kn->kn_status & KN_INFLUX,
 1860                     ("knote removed w/o list lock"));
 1861                 knl->kl_unlock(knl->kl_lockarg);
 1862                 kq->kq_state |= KQ_FLUXWAIT;
 1863                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
 1864                 kq = NULL;
 1865                 goto again;
 1866         }
 1867 
 1868         if (islocked)
 1869                 KNL_ASSERT_LOCKED(knl);
 1870         else {
 1871                 knl->kl_unlock(knl->kl_lockarg);
 1872                 KNL_ASSERT_UNLOCKED(knl);
 1873         }
 1874 }
 1875 
 1876 /*
 1877  * Remove all knotes referencing a specified fd must be called with FILEDESC
 1878  * lock.  This prevents a race where a new fd comes along and occupies the
 1879  * entry and we attach a knote to the fd.
 1880  */
 1881 void
 1882 knote_fdclose(struct thread *td, int fd)
 1883 {
 1884         struct filedesc *fdp = td->td_proc->p_fd;
 1885         struct kqueue *kq;
 1886         struct knote *kn;
 1887         int influx;
 1888 
 1889         FILEDESC_XLOCK_ASSERT(fdp);
 1890 
 1891         /*
 1892          * We shouldn't have to worry about new kevents appearing on fd
 1893          * since filedesc is locked.
 1894          */
 1895         SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
 1896                 KQ_LOCK(kq);
 1897 
 1898 again:
 1899                 influx = 0;
 1900                 while (kq->kq_knlistsize > fd &&
 1901                     (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
 1902                         if (kn->kn_status & KN_INFLUX) {
 1903                                 /* someone else might be waiting on our knote */
 1904                                 if (influx)
 1905                                         wakeup(kq);
 1906                                 kq->kq_state |= KQ_FLUXWAIT;
 1907                                 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
 1908                                 goto again;
 1909                         }
 1910                         kn->kn_status |= KN_INFLUX;
 1911                         KQ_UNLOCK(kq);
 1912                         if (!(kn->kn_status & KN_DETACHED))
 1913                                 kn->kn_fop->f_detach(kn);
 1914                         knote_drop(kn, td);
 1915                         influx = 1;
 1916                         KQ_LOCK(kq);
 1917                 }
 1918                 KQ_UNLOCK_FLUX(kq);
 1919         }
 1920 }
 1921 
 1922 static int
 1923 knote_attach(struct knote *kn, struct kqueue *kq)
 1924 {
 1925         struct klist *list;
 1926 
 1927         KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
 1928         KQ_OWNED(kq);
 1929 
 1930         if (kn->kn_fop->f_isfd) {
 1931                 if (kn->kn_id >= kq->kq_knlistsize)
 1932                         return ENOMEM;
 1933                 list = &kq->kq_knlist[kn->kn_id];
 1934         } else {
 1935                 if (kq->kq_knhash == NULL)
 1936                         return ENOMEM;
 1937                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1938         }
 1939 
 1940         SLIST_INSERT_HEAD(list, kn, kn_link);
 1941 
 1942         return 0;
 1943 }
 1944 
 1945 /*
 1946  * knote must already have been detached using the f_detach method.
 1947  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
 1948  * to prevent other removal.
 1949  */
 1950 static void
 1951 knote_drop(struct knote *kn, struct thread *td)
 1952 {
 1953         struct kqueue *kq;
 1954         struct klist *list;
 1955 
 1956         kq = kn->kn_kq;
 1957 
 1958         KQ_NOTOWNED(kq);
 1959         KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
 1960             ("knote_drop called without KN_INFLUX set in kn_status"));
 1961 
 1962         KQ_LOCK(kq);
 1963         if (kn->kn_fop->f_isfd)
 1964                 list = &kq->kq_knlist[kn->kn_id];
 1965         else
 1966                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1967 
 1968         if (!SLIST_EMPTY(list))
 1969                 SLIST_REMOVE(list, kn, knote, kn_link);
 1970         if (kn->kn_status & KN_QUEUED)
 1971                 knote_dequeue(kn);
 1972         KQ_UNLOCK_FLUX(kq);
 1973 
 1974         if (kn->kn_fop->f_isfd) {
 1975                 fdrop(kn->kn_fp, td);
 1976                 kn->kn_fp = NULL;
 1977         }
 1978         kqueue_fo_release(kn->kn_kevent.filter);
 1979         kn->kn_fop = NULL;
 1980         knote_free(kn);
 1981 }
 1982 
 1983 static void
 1984 knote_enqueue(struct knote *kn)
 1985 {
 1986         struct kqueue *kq = kn->kn_kq;
 1987 
 1988         KQ_OWNED(kn->kn_kq);
 1989         KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
 1990 
 1991         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1992         kn->kn_status |= KN_QUEUED;
 1993         kq->kq_count++;
 1994         kqueue_wakeup(kq);
 1995 }
 1996 
 1997 static void
 1998 knote_dequeue(struct knote *kn)
 1999 {
 2000         struct kqueue *kq = kn->kn_kq;
 2001 
 2002         KQ_OWNED(kn->kn_kq);
 2003         KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
 2004 
 2005         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 2006         kn->kn_status &= ~KN_QUEUED;
 2007         kq->kq_count--;
 2008 }
 2009 
 2010 static void
 2011 knote_init(void)
 2012 {
 2013 
 2014         knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
 2015             NULL, NULL, UMA_ALIGN_PTR, 0);
 2016 }
 2017 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
 2018 
 2019 static struct knote *
 2020 knote_alloc(int waitok)
 2021 {
 2022         return ((struct knote *)uma_zalloc(knote_zone,
 2023             (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
 2024 }
 2025 
 2026 static void
 2027 knote_free(struct knote *kn)
 2028 {
 2029         if (kn != NULL)
 2030                 uma_zfree(knote_zone, kn);
 2031 }
 2032 
 2033 /*
 2034  * Register the kev w/ the kq specified by fd.
 2035  */
 2036 int 
 2037 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
 2038 {
 2039         struct kqueue *kq;
 2040         struct file *fp;
 2041         int error;
 2042 
 2043         if ((error = fget(td, fd, &fp)) != 0)
 2044                 return (error);
 2045         if ((error = kqueue_acquire(fp, &kq)) != 0)
 2046                 goto noacquire;
 2047 
 2048         error = kqueue_register(kq, kev, td, waitok);
 2049 
 2050         kqueue_release(kq, 0);
 2051 
 2052 noacquire:
 2053         fdrop(fp, td);
 2054 
 2055         return error;
 2056 }

Cache object: 9957aec882b81058d0bae5da063f7985


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.