The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    5  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
    6  * Copyright (c) 2009 Apple, Inc.
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD$");
   33 
   34 #include "opt_ktrace.h"
   35 #include "opt_kqueue.h"
   36 
   37 #ifdef COMPAT_FREEBSD11
   38 #define _WANT_FREEBSD11_KEVENT
   39 #endif
   40 
   41 #include <sys/param.h>
   42 #include <sys/systm.h>
   43 #include <sys/capsicum.h>
   44 #include <sys/kernel.h>
   45 #include <sys/limits.h>
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/rwlock.h>
   49 #include <sys/proc.h>
   50 #include <sys/malloc.h>
   51 #include <sys/unistd.h>
   52 #include <sys/file.h>
   53 #include <sys/filedesc.h>
   54 #include <sys/filio.h>
   55 #include <sys/fcntl.h>
   56 #include <sys/kthread.h>
   57 #include <sys/selinfo.h>
   58 #include <sys/queue.h>
   59 #include <sys/event.h>
   60 #include <sys/eventvar.h>
   61 #include <sys/poll.h>
   62 #include <sys/protosw.h>
   63 #include <sys/resourcevar.h>
   64 #include <sys/sigio.h>
   65 #include <sys/signalvar.h>
   66 #include <sys/socket.h>
   67 #include <sys/socketvar.h>
   68 #include <sys/stat.h>
   69 #include <sys/sysctl.h>
   70 #include <sys/sysproto.h>
   71 #include <sys/syscallsubr.h>
   72 #include <sys/taskqueue.h>
   73 #include <sys/uio.h>
   74 #include <sys/user.h>
   75 #ifdef KTRACE
   76 #include <sys/ktrace.h>
   77 #endif
   78 #include <machine/atomic.h>
   79 
   80 #include <vm/uma.h>
   81 
   82 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
   83 
   84 /*
   85  * This lock is used if multiple kq locks are required.  This possibly
   86  * should be made into a per proc lock.
   87  */
   88 static struct mtx       kq_global;
   89 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
   90 #define KQ_GLOBAL_LOCK(lck, haslck)     do {    \
   91         if (!haslck)                            \
   92                 mtx_lock(lck);                  \
   93         haslck = 1;                             \
   94 } while (0)
   95 #define KQ_GLOBAL_UNLOCK(lck, haslck)   do {    \
   96         if (haslck)                             \
   97                 mtx_unlock(lck);                        \
   98         haslck = 0;                             \
   99 } while (0)
  100 
  101 TASKQUEUE_DEFINE_THREAD(kqueue_ctx);
  102 
  103 static int      kevent_copyout(void *arg, struct kevent *kevp, int count);
  104 static int      kevent_copyin(void *arg, struct kevent *kevp, int count);
  105 static int      kqueue_register(struct kqueue *kq, struct kevent *kev,
  106                     struct thread *td, int mflag);
  107 static int      kqueue_acquire(struct file *fp, struct kqueue **kqp);
  108 static void     kqueue_release(struct kqueue *kq, int locked);
  109 static void     kqueue_destroy(struct kqueue *kq);
  110 static void     kqueue_drain(struct kqueue *kq, struct thread *td);
  111 static int      kqueue_expand(struct kqueue *kq, struct filterops *fops,
  112                     uintptr_t ident, int mflag);
  113 static void     kqueue_task(void *arg, int pending);
  114 static int      kqueue_scan(struct kqueue *kq, int maxevents,
  115                     struct kevent_copyops *k_ops,
  116                     const struct timespec *timeout,
  117                     struct kevent *keva, struct thread *td);
  118 static void     kqueue_wakeup(struct kqueue *kq);
  119 static struct filterops *kqueue_fo_find(int filt);
  120 static void     kqueue_fo_release(int filt);
  121 struct g_kevent_args;
  122 static int      kern_kevent_generic(struct thread *td,
  123                     struct g_kevent_args *uap,
  124                     struct kevent_copyops *k_ops, const char *struct_name);
  125 
  126 static fo_ioctl_t       kqueue_ioctl;
  127 static fo_poll_t        kqueue_poll;
  128 static fo_kqfilter_t    kqueue_kqfilter;
  129 static fo_stat_t        kqueue_stat;
  130 static fo_close_t       kqueue_close;
  131 static fo_fill_kinfo_t  kqueue_fill_kinfo;
  132 
  133 static struct fileops kqueueops = {
  134         .fo_read = invfo_rdwr,
  135         .fo_write = invfo_rdwr,
  136         .fo_truncate = invfo_truncate,
  137         .fo_ioctl = kqueue_ioctl,
  138         .fo_poll = kqueue_poll,
  139         .fo_kqfilter = kqueue_kqfilter,
  140         .fo_stat = kqueue_stat,
  141         .fo_close = kqueue_close,
  142         .fo_chmod = invfo_chmod,
  143         .fo_chown = invfo_chown,
  144         .fo_sendfile = invfo_sendfile,
  145         .fo_fill_kinfo = kqueue_fill_kinfo,
  146 };
  147 
  148 static int      knote_attach(struct knote *kn, struct kqueue *kq);
  149 static void     knote_drop(struct knote *kn, struct thread *td);
  150 static void     knote_drop_detached(struct knote *kn, struct thread *td);
  151 static void     knote_enqueue(struct knote *kn);
  152 static void     knote_dequeue(struct knote *kn);
  153 static void     knote_init(void);
  154 static struct   knote *knote_alloc(int mflag);
  155 static void     knote_free(struct knote *kn);
  156 
  157 static void     filt_kqdetach(struct knote *kn);
  158 static int      filt_kqueue(struct knote *kn, long hint);
  159 static int      filt_procattach(struct knote *kn);
  160 static void     filt_procdetach(struct knote *kn);
  161 static int      filt_proc(struct knote *kn, long hint);
  162 static int      filt_fileattach(struct knote *kn);
  163 static void     filt_timerexpire(void *knx);
  164 static void     filt_timerexpire_l(struct knote *kn, bool proc_locked);
  165 static int      filt_timerattach(struct knote *kn);
  166 static void     filt_timerdetach(struct knote *kn);
  167 static void     filt_timerstart(struct knote *kn, sbintime_t to);
  168 static void     filt_timertouch(struct knote *kn, struct kevent *kev,
  169                     u_long type);
  170 static int      filt_timervalidate(struct knote *kn, sbintime_t *to);
  171 static int      filt_timer(struct knote *kn, long hint);
  172 static int      filt_userattach(struct knote *kn);
  173 static void     filt_userdetach(struct knote *kn);
  174 static int      filt_user(struct knote *kn, long hint);
  175 static void     filt_usertouch(struct knote *kn, struct kevent *kev,
  176                     u_long type);
  177 
  178 static struct filterops file_filtops = {
  179         .f_isfd = 1,
  180         .f_attach = filt_fileattach,
  181 };
  182 static struct filterops kqread_filtops = {
  183         .f_isfd = 1,
  184         .f_detach = filt_kqdetach,
  185         .f_event = filt_kqueue,
  186 };
  187 /* XXX - move to kern_proc.c?  */
  188 static struct filterops proc_filtops = {
  189         .f_isfd = 0,
  190         .f_attach = filt_procattach,
  191         .f_detach = filt_procdetach,
  192         .f_event = filt_proc,
  193 };
  194 static struct filterops timer_filtops = {
  195         .f_isfd = 0,
  196         .f_attach = filt_timerattach,
  197         .f_detach = filt_timerdetach,
  198         .f_event = filt_timer,
  199         .f_touch = filt_timertouch,
  200 };
  201 static struct filterops user_filtops = {
  202         .f_attach = filt_userattach,
  203         .f_detach = filt_userdetach,
  204         .f_event = filt_user,
  205         .f_touch = filt_usertouch,
  206 };
  207 
  208 static uma_zone_t       knote_zone;
  209 static unsigned int __exclusive_cache_line      kq_ncallouts;
  210 static unsigned int     kq_calloutmax = 4 * 1024;
  211 SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
  212     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
  213 
  214 /* XXX - ensure not influx ? */
  215 #define KNOTE_ACTIVATE(kn, islock) do {                                 \
  216         if ((islock))                                                   \
  217                 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);            \
  218         else                                                            \
  219                 KQ_LOCK((kn)->kn_kq);                                   \
  220         (kn)->kn_status |= KN_ACTIVE;                                   \
  221         if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)         \
  222                 knote_enqueue((kn));                                    \
  223         if (!(islock))                                                  \
  224                 KQ_UNLOCK((kn)->kn_kq);                                 \
  225 } while(0)
  226 #define KQ_LOCK(kq) do {                                                \
  227         mtx_lock(&(kq)->kq_lock);                                       \
  228 } while (0)
  229 #define KQ_FLUX_WAKEUP(kq) do {                                         \
  230         if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {            \
  231                 (kq)->kq_state &= ~KQ_FLUXWAIT;                         \
  232                 wakeup((kq));                                           \
  233         }                                                               \
  234 } while (0)
  235 #define KQ_UNLOCK_FLUX(kq) do {                                         \
  236         KQ_FLUX_WAKEUP(kq);                                             \
  237         mtx_unlock(&(kq)->kq_lock);                                     \
  238 } while (0)
  239 #define KQ_UNLOCK(kq) do {                                              \
  240         mtx_unlock(&(kq)->kq_lock);                                     \
  241 } while (0)
  242 #define KQ_OWNED(kq) do {                                               \
  243         mtx_assert(&(kq)->kq_lock, MA_OWNED);                           \
  244 } while (0)
  245 #define KQ_NOTOWNED(kq) do {                                            \
  246         mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);                        \
  247 } while (0)
  248 
  249 static struct knlist *
  250 kn_list_lock(struct knote *kn)
  251 {
  252         struct knlist *knl;
  253 
  254         knl = kn->kn_knlist;
  255         if (knl != NULL)
  256                 knl->kl_lock(knl->kl_lockarg);
  257         return (knl);
  258 }
  259 
  260 static void
  261 kn_list_unlock(struct knlist *knl)
  262 {
  263         bool do_free;
  264 
  265         if (knl == NULL)
  266                 return;
  267         do_free = knl->kl_autodestroy && knlist_empty(knl);
  268         knl->kl_unlock(knl->kl_lockarg);
  269         if (do_free) {
  270                 knlist_destroy(knl);
  271                 free(knl, M_KQUEUE);
  272         }
  273 }
  274 
  275 static bool
  276 kn_in_flux(struct knote *kn)
  277 {
  278 
  279         return (kn->kn_influx > 0);
  280 }
  281 
  282 static void
  283 kn_enter_flux(struct knote *kn)
  284 {
  285 
  286         KQ_OWNED(kn->kn_kq);
  287         MPASS(kn->kn_influx < INT_MAX);
  288         kn->kn_influx++;
  289 }
  290 
  291 static bool
  292 kn_leave_flux(struct knote *kn)
  293 {
  294 
  295         KQ_OWNED(kn->kn_kq);
  296         MPASS(kn->kn_influx > 0);
  297         kn->kn_influx--;
  298         return (kn->kn_influx == 0);
  299 }
  300 
  301 #define KNL_ASSERT_LOCK(knl, islocked) do {                             \
  302         if (islocked)                                                   \
  303                 KNL_ASSERT_LOCKED(knl);                         \
  304         else                                                            \
  305                 KNL_ASSERT_UNLOCKED(knl);                               \
  306 } while (0)
  307 #ifdef INVARIANTS
  308 #define KNL_ASSERT_LOCKED(knl) do {                                     \
  309         knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED);              \
  310 } while (0)
  311 #define KNL_ASSERT_UNLOCKED(knl) do {                                   \
  312         knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED);            \
  313 } while (0)
  314 #else /* !INVARIANTS */
  315 #define KNL_ASSERT_LOCKED(knl) do {} while(0)
  316 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
  317 #endif /* INVARIANTS */
  318 
  319 #ifndef KN_HASHSIZE
  320 #define KN_HASHSIZE             64              /* XXX should be tunable */
  321 #endif
  322 
  323 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  324 
  325 static int
  326 filt_nullattach(struct knote *kn)
  327 {
  328 
  329         return (ENXIO);
  330 };
  331 
  332 struct filterops null_filtops = {
  333         .f_isfd = 0,
  334         .f_attach = filt_nullattach,
  335 };
  336 
  337 /* XXX - make SYSINIT to add these, and move into respective modules. */
  338 extern struct filterops sig_filtops;
  339 extern struct filterops fs_filtops;
  340 
  341 /*
  342  * Table for for all system-defined filters.
  343  */
  344 static struct mtx       filterops_lock;
  345 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
  346         MTX_DEF);
  347 static struct {
  348         struct filterops *for_fop;
  349         int for_nolock;
  350         int for_refcnt;
  351 } sysfilt_ops[EVFILT_SYSCOUNT] = {
  352         { &file_filtops, 1 },                   /* EVFILT_READ */
  353         { &file_filtops, 1 },                   /* EVFILT_WRITE */
  354         { &null_filtops },                      /* EVFILT_AIO */
  355         { &file_filtops, 1 },                   /* EVFILT_VNODE */
  356         { &proc_filtops, 1 },                   /* EVFILT_PROC */
  357         { &sig_filtops, 1 },                    /* EVFILT_SIGNAL */
  358         { &timer_filtops, 1 },                  /* EVFILT_TIMER */
  359         { &file_filtops, 1 },                   /* EVFILT_PROCDESC */
  360         { &fs_filtops, 1 },                     /* EVFILT_FS */
  361         { &null_filtops },                      /* EVFILT_LIO */
  362         { &user_filtops, 1 },                   /* EVFILT_USER */
  363         { &null_filtops },                      /* EVFILT_SENDFILE */
  364         { &file_filtops, 1 },                   /* EVFILT_EMPTY */
  365 };
  366 
  367 /*
  368  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
  369  * method.
  370  */
  371 static int
  372 filt_fileattach(struct knote *kn)
  373 {
  374 
  375         return (fo_kqfilter(kn->kn_fp, kn));
  376 }
  377 
  378 /*ARGSUSED*/
  379 static int
  380 kqueue_kqfilter(struct file *fp, struct knote *kn)
  381 {
  382         struct kqueue *kq = kn->kn_fp->f_data;
  383 
  384         if (kn->kn_filter != EVFILT_READ)
  385                 return (EINVAL);
  386 
  387         kn->kn_status |= KN_KQUEUE;
  388         kn->kn_fop = &kqread_filtops;
  389         knlist_add(&kq->kq_sel.si_note, kn, 0);
  390 
  391         return (0);
  392 }
  393 
  394 static void
  395 filt_kqdetach(struct knote *kn)
  396 {
  397         struct kqueue *kq = kn->kn_fp->f_data;
  398 
  399         knlist_remove(&kq->kq_sel.si_note, kn, 0);
  400 }
  401 
  402 /*ARGSUSED*/
  403 static int
  404 filt_kqueue(struct knote *kn, long hint)
  405 {
  406         struct kqueue *kq = kn->kn_fp->f_data;
  407 
  408         kn->kn_data = kq->kq_count;
  409         return (kn->kn_data > 0);
  410 }
  411 
  412 /* XXX - move to kern_proc.c?  */
  413 static int
  414 filt_procattach(struct knote *kn)
  415 {
  416         struct proc *p;
  417         int error;
  418         bool exiting, immediate;
  419 
  420         exiting = immediate = false;
  421         if (kn->kn_sfflags & NOTE_EXIT)
  422                 p = pfind_any(kn->kn_id);
  423         else
  424                 p = pfind(kn->kn_id);
  425         if (p == NULL)
  426                 return (ESRCH);
  427         if (p->p_flag & P_WEXIT)
  428                 exiting = true;
  429 
  430         if ((error = p_cansee(curthread, p))) {
  431                 PROC_UNLOCK(p);
  432                 return (error);
  433         }
  434 
  435         kn->kn_ptr.p_proc = p;
  436         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  437 
  438         /*
  439          * Internal flag indicating registration done by kernel for the
  440          * purposes of getting a NOTE_CHILD notification.
  441          */
  442         if (kn->kn_flags & EV_FLAG2) {
  443                 kn->kn_flags &= ~EV_FLAG2;
  444                 kn->kn_data = kn->kn_sdata;             /* ppid */
  445                 kn->kn_fflags = NOTE_CHILD;
  446                 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
  447                 immediate = true; /* Force immediate activation of child note. */
  448         }
  449         /*
  450          * Internal flag indicating registration done by kernel (for other than
  451          * NOTE_CHILD).
  452          */
  453         if (kn->kn_flags & EV_FLAG1) {
  454                 kn->kn_flags &= ~EV_FLAG1;
  455         }
  456 
  457         knlist_add(p->p_klist, kn, 1);
  458 
  459         /*
  460          * Immediately activate any child notes or, in the case of a zombie
  461          * target process, exit notes.  The latter is necessary to handle the
  462          * case where the target process, e.g. a child, dies before the kevent
  463          * is registered.
  464          */
  465         if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
  466                 KNOTE_ACTIVATE(kn, 0);
  467 
  468         PROC_UNLOCK(p);
  469 
  470         return (0);
  471 }
  472 
  473 /*
  474  * The knote may be attached to a different process, which may exit,
  475  * leaving nothing for the knote to be attached to.  So when the process
  476  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  477  * it will be deleted when read out.  However, as part of the knote deletion,
  478  * this routine is called, so a check is needed to avoid actually performing
  479  * a detach, because the original process does not exist any more.
  480  */
  481 /* XXX - move to kern_proc.c?  */
  482 static void
  483 filt_procdetach(struct knote *kn)
  484 {
  485 
  486         knlist_remove(kn->kn_knlist, kn, 0);
  487         kn->kn_ptr.p_proc = NULL;
  488 }
  489 
  490 /* XXX - move to kern_proc.c?  */
  491 static int
  492 filt_proc(struct knote *kn, long hint)
  493 {
  494         struct proc *p;
  495         u_int event;
  496 
  497         p = kn->kn_ptr.p_proc;
  498         if (p == NULL) /* already activated, from attach filter */
  499                 return (0);
  500 
  501         /* Mask off extra data. */
  502         event = (u_int)hint & NOTE_PCTRLMASK;
  503 
  504         /* If the user is interested in this event, record it. */
  505         if (kn->kn_sfflags & event)
  506                 kn->kn_fflags |= event;
  507 
  508         /* Process is gone, so flag the event as finished. */
  509         if (event == NOTE_EXIT) {
  510                 kn->kn_flags |= EV_EOF | EV_ONESHOT;
  511                 kn->kn_ptr.p_proc = NULL;
  512                 if (kn->kn_fflags & NOTE_EXIT)
  513                         kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
  514                 if (kn->kn_fflags == 0)
  515                         kn->kn_flags |= EV_DROP;
  516                 return (1);
  517         }
  518 
  519         return (kn->kn_fflags != 0);
  520 }
  521 
  522 /*
  523  * Called when the process forked. It mostly does the same as the
  524  * knote(), activating all knotes registered to be activated when the
  525  * process forked. Additionally, for each knote attached to the
  526  * parent, check whether user wants to track the new process. If so
  527  * attach a new knote to it, and immediately report an event with the
  528  * child's pid.
  529  */
  530 void
  531 knote_fork(struct knlist *list, int pid)
  532 {
  533         struct kqueue *kq;
  534         struct knote *kn;
  535         struct kevent kev;
  536         int error;
  537 
  538         MPASS(list != NULL);
  539         KNL_ASSERT_LOCKED(list);
  540         if (SLIST_EMPTY(&list->kl_list))
  541                 return;
  542 
  543         memset(&kev, 0, sizeof(kev));
  544         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
  545                 kq = kn->kn_kq;
  546                 KQ_LOCK(kq);
  547                 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
  548                         KQ_UNLOCK(kq);
  549                         continue;
  550                 }
  551 
  552                 /*
  553                  * The same as knote(), activate the event.
  554                  */
  555                 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
  556                         if (kn->kn_fop->f_event(kn, NOTE_FORK))
  557                                 KNOTE_ACTIVATE(kn, 1);
  558                         KQ_UNLOCK(kq);
  559                         continue;
  560                 }
  561 
  562                 /*
  563                  * The NOTE_TRACK case. In addition to the activation
  564                  * of the event, we need to register new events to
  565                  * track the child. Drop the locks in preparation for
  566                  * the call to kqueue_register().
  567                  */
  568                 kn_enter_flux(kn);
  569                 KQ_UNLOCK(kq);
  570                 list->kl_unlock(list->kl_lockarg);
  571 
  572                 /*
  573                  * Activate existing knote and register tracking knotes with
  574                  * new process.
  575                  *
  576                  * First register a knote to get just the child notice. This
  577                  * must be a separate note from a potential NOTE_EXIT
  578                  * notification since both NOTE_CHILD and NOTE_EXIT are defined
  579                  * to use the data field (in conflicting ways).
  580                  */
  581                 kev.ident = pid;
  582                 kev.filter = kn->kn_filter;
  583                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
  584                     EV_FLAG2;
  585                 kev.fflags = kn->kn_sfflags;
  586                 kev.data = kn->kn_id;           /* parent */
  587                 kev.udata = kn->kn_kevent.udata;/* preserve udata */
  588                 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
  589                 if (error)
  590                         kn->kn_fflags |= NOTE_TRACKERR;
  591 
  592                 /*
  593                  * Then register another knote to track other potential events
  594                  * from the new process.
  595                  */
  596                 kev.ident = pid;
  597                 kev.filter = kn->kn_filter;
  598                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  599                 kev.fflags = kn->kn_sfflags;
  600                 kev.data = kn->kn_id;           /* parent */
  601                 kev.udata = kn->kn_kevent.udata;/* preserve udata */
  602                 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
  603                 if (error)
  604                         kn->kn_fflags |= NOTE_TRACKERR;
  605                 if (kn->kn_fop->f_event(kn, NOTE_FORK))
  606                         KNOTE_ACTIVATE(kn, 0);
  607                 list->kl_lock(list->kl_lockarg);
  608                 KQ_LOCK(kq);
  609                 kn_leave_flux(kn);
  610                 KQ_UNLOCK_FLUX(kq);
  611         }
  612 }
  613 
  614 /*
  615  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
  616  * interval timer support code.
  617  */
  618 
  619 #define NOTE_TIMER_PRECMASK                                             \
  620     (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS)
  621 
  622 static sbintime_t
  623 timer2sbintime(int64_t data, int flags)
  624 {
  625         int64_t secs;
  626 
  627         /*
  628          * Macros for converting to the fractional second portion of an
  629          * sbintime_t using 64bit multiplication to improve precision.
  630          */
  631 #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
  632 #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
  633 #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
  634         switch (flags & NOTE_TIMER_PRECMASK) {
  635         case NOTE_SECONDS:
  636 #ifdef __LP64__
  637                 if (data > (SBT_MAX / SBT_1S))
  638                         return (SBT_MAX);
  639 #endif
  640                 return ((sbintime_t)data << 32);
  641         case NOTE_MSECONDS: /* FALLTHROUGH */
  642         case 0:
  643                 if (data >= 1000) {
  644                         secs = data / 1000;
  645 #ifdef __LP64__
  646                         if (secs > (SBT_MAX / SBT_1S))
  647                                 return (SBT_MAX);
  648 #endif
  649                         return (secs << 32 | MS_TO_SBT(data % 1000));
  650                 }
  651                 return (MS_TO_SBT(data));
  652         case NOTE_USECONDS:
  653                 if (data >= 1000000) {
  654                         secs = data / 1000000;
  655 #ifdef __LP64__
  656                         if (secs > (SBT_MAX / SBT_1S))
  657                                 return (SBT_MAX);
  658 #endif
  659                         return (secs << 32 | US_TO_SBT(data % 1000000));
  660                 }
  661                 return (US_TO_SBT(data));
  662         case NOTE_NSECONDS:
  663                 if (data >= 1000000000) {
  664                         secs = data / 1000000000;
  665 #ifdef __LP64__
  666                         if (secs > (SBT_MAX / SBT_1S))
  667                                 return (SBT_MAX);
  668 #endif
  669                         return (secs << 32 | NS_TO_SBT(data % 1000000000));
  670                 }
  671                 return (NS_TO_SBT(data));
  672         default:
  673                 break;
  674         }
  675         return (-1);
  676 }
  677 
  678 struct kq_timer_cb_data {
  679         struct callout c;
  680         struct proc *p;
  681         struct knote *kn;
  682         int cpuid;
  683         int flags;
  684         TAILQ_ENTRY(kq_timer_cb_data) link;
  685         sbintime_t next;        /* next timer event fires at */
  686         sbintime_t to;          /* precalculated timer period, 0 for abs */
  687 };
  688 
  689 #define KQ_TIMER_CB_ENQUEUED    0x01
  690 
  691 static void
  692 kqtimer_sched_callout(struct kq_timer_cb_data *kc)
  693 {
  694         callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn,
  695             kc->cpuid, C_ABSOLUTE);
  696 }
  697 
  698 void
  699 kqtimer_proc_continue(struct proc *p)
  700 {
  701         struct kq_timer_cb_data *kc, *kc1;
  702         struct bintime bt;
  703         sbintime_t now;
  704 
  705         PROC_LOCK_ASSERT(p, MA_OWNED);
  706 
  707         getboottimebin(&bt);
  708         now = bttosbt(bt);
  709 
  710         TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) {
  711                 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link);
  712                 kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
  713                 if (kc->next <= now)
  714                         filt_timerexpire_l(kc->kn, true);
  715                 else
  716                         kqtimer_sched_callout(kc);
  717         }
  718 }
  719 
  720 static void
  721 filt_timerexpire_l(struct knote *kn, bool proc_locked)
  722 {
  723         struct kq_timer_cb_data *kc;
  724         struct proc *p;
  725         uint64_t delta;
  726         sbintime_t now;
  727 
  728         kc = kn->kn_ptr.p_v;
  729 
  730         if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) {
  731                 kn->kn_data++;
  732                 KNOTE_ACTIVATE(kn, 0);
  733                 return;
  734         }
  735 
  736         now = sbinuptime();
  737         if (now >= kc->next) {
  738                 delta = (now - kc->next) / kc->to;
  739                 if (delta == 0)
  740                         delta = 1;
  741                 kn->kn_data += delta;
  742                 kc->next += (delta + 1) * kc->to;
  743                 if (now >= kc->next)    /* overflow */
  744                         kc->next = now + kc->to;
  745                 KNOTE_ACTIVATE(kn, 0);  /* XXX - handle locking */
  746         }
  747 
  748         /*
  749          * Initial check for stopped kc->p is racy.  It is fine to
  750          * miss the set of the stop flags, at worst we would schedule
  751          * one more callout.  On the other hand, it is not fine to not
  752          * schedule when we we missed clearing of the flags, we
  753          * recheck them under the lock and observe consistent state.
  754          */
  755         p = kc->p;
  756         if (P_SHOULDSTOP(p) || P_KILLED(p)) {
  757                 if (!proc_locked)
  758                         PROC_LOCK(p);
  759                 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
  760                         if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) {
  761                                 kc->flags |= KQ_TIMER_CB_ENQUEUED;
  762                                 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link);
  763                         }
  764                         if (!proc_locked)
  765                                 PROC_UNLOCK(p);
  766                         return;
  767                 }
  768                 if (!proc_locked)
  769                         PROC_UNLOCK(p);
  770         }
  771         kqtimer_sched_callout(kc);
  772 }
  773 
  774 static void
  775 filt_timerexpire(void *knx)
  776 {
  777         filt_timerexpire_l(knx, false);
  778 }
  779 
  780 /*
  781  * data contains amount of time to sleep
  782  */
  783 static int
  784 filt_timervalidate(struct knote *kn, sbintime_t *to)
  785 {
  786         struct bintime bt;
  787         sbintime_t sbt;
  788 
  789         if (kn->kn_sdata < 0)
  790                 return (EINVAL);
  791         if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
  792                 kn->kn_sdata = 1;
  793         /*
  794          * The only fflags values supported are the timer unit
  795          * (precision) and the absolute time indicator.
  796          */
  797         if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0)
  798                 return (EINVAL);
  799 
  800         *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
  801         if (*to < 0)
  802                 return (EINVAL);
  803         if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
  804                 getboottimebin(&bt);
  805                 sbt = bttosbt(bt);
  806                 *to = MAX(0, *to - sbt);
  807         }
  808         return (0);
  809 }
  810 
  811 static int
  812 filt_timerattach(struct knote *kn)
  813 {
  814         struct kq_timer_cb_data *kc;
  815         sbintime_t to;
  816         int error;
  817 
  818         to = -1;
  819         error = filt_timervalidate(kn, &to);
  820         if (error != 0)
  821                 return (error);
  822         KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 ||
  823             (kn->kn_sfflags & NOTE_ABSTIME) != 0,
  824             ("%s: periodic timer has a calculated zero timeout", __func__));
  825         KASSERT(to >= 0,
  826             ("%s: timer has a calculated negative timeout", __func__));
  827 
  828         if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) {
  829                 atomic_subtract_int(&kq_ncallouts, 1);
  830                 return (ENOMEM);
  831         }
  832 
  833         if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
  834                 kn->kn_flags |= EV_CLEAR;       /* automatically set */
  835         kn->kn_status &= ~KN_DETACHED;          /* knlist_add clears it */
  836         kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
  837         kc->kn = kn;
  838         kc->p = curproc;
  839         kc->cpuid = PCPU_GET(cpuid);
  840         kc->flags = 0;
  841         callout_init(&kc->c, 1);
  842         filt_timerstart(kn, to);
  843 
  844         return (0);
  845 }
  846 
  847 static void
  848 filt_timerstart(struct knote *kn, sbintime_t to)
  849 {
  850         struct kq_timer_cb_data *kc;
  851 
  852         kc = kn->kn_ptr.p_v;
  853         if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
  854                 kc->next = to;
  855                 kc->to = 0;
  856         } else {
  857                 kc->next = to + sbinuptime();
  858                 kc->to = to;
  859         }
  860         kqtimer_sched_callout(kc);
  861 }
  862 
  863 static void
  864 filt_timerdetach(struct knote *kn)
  865 {
  866         struct kq_timer_cb_data *kc;
  867         unsigned int old __unused;
  868         bool pending;
  869 
  870         kc = kn->kn_ptr.p_v;
  871         do {
  872                 callout_drain(&kc->c);
  873 
  874                 /*
  875                  * kqtimer_proc_continue() might have rescheduled this callout.
  876                  * Double-check, using the process mutex as an interlock.
  877                  */
  878                 PROC_LOCK(kc->p);
  879                 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) {
  880                         kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
  881                         TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link);
  882                 }
  883                 pending = callout_pending(&kc->c);
  884                 PROC_UNLOCK(kc->p);
  885         } while (pending);
  886         free(kc, M_KQUEUE);
  887         old = atomic_fetchadd_int(&kq_ncallouts, -1);
  888         KASSERT(old > 0, ("Number of callouts cannot become negative"));
  889         kn->kn_status |= KN_DETACHED;   /* knlist_remove sets it */
  890 }
  891 
  892 static void
  893 filt_timertouch(struct knote *kn, struct kevent *kev, u_long type)
  894 {
  895         struct kq_timer_cb_data *kc;    
  896         struct kqueue *kq;
  897         sbintime_t to;
  898         int error;
  899 
  900         switch (type) {
  901         case EVENT_REGISTER:
  902                 /* Handle re-added timers that update data/fflags */
  903                 if (kev->flags & EV_ADD) {
  904                         kc = kn->kn_ptr.p_v;
  905 
  906                         /* Drain any existing callout. */
  907                         callout_drain(&kc->c);
  908 
  909                         /* Throw away any existing undelivered record
  910                          * of the timer expiration. This is done under
  911                          * the presumption that if a process is
  912                          * re-adding this timer with new parameters,
  913                          * it is no longer interested in what may have
  914                          * happened under the old parameters. If it is
  915                          * interested, it can wait for the expiration,
  916                          * delete the old timer definition, and then
  917                          * add the new one.
  918                          *
  919                          * This has to be done while the kq is locked:
  920                          *   - if enqueued, dequeue
  921                          *   - make it no longer active
  922                          *   - clear the count of expiration events
  923                          */
  924                         kq = kn->kn_kq;
  925                         KQ_LOCK(kq);
  926                         if (kn->kn_status & KN_QUEUED)
  927                                 knote_dequeue(kn);
  928 
  929                         kn->kn_status &= ~KN_ACTIVE;
  930                         kn->kn_data = 0;
  931                         KQ_UNLOCK(kq);
  932                         
  933                         /* Reschedule timer based on new data/fflags */
  934                         kn->kn_sfflags = kev->fflags;
  935                         kn->kn_sdata = kev->data;
  936                         error = filt_timervalidate(kn, &to);
  937                         if (error != 0) {
  938                                 kn->kn_flags |= EV_ERROR;
  939                                 kn->kn_data = error;
  940                         } else
  941                                 filt_timerstart(kn, to);
  942                 }
  943                 break;
  944 
  945         case EVENT_PROCESS:
  946                 *kev = kn->kn_kevent;
  947                 if (kn->kn_flags & EV_CLEAR) {
  948                         kn->kn_data = 0;
  949                         kn->kn_fflags = 0;
  950                 }
  951                 break;
  952 
  953         default:
  954                 panic("filt_timertouch() - invalid type (%ld)", type);
  955                 break;
  956         }
  957 }
  958 
  959 static int
  960 filt_timer(struct knote *kn, long hint)
  961 {
  962 
  963         return (kn->kn_data != 0);
  964 }
  965 
  966 static int
  967 filt_userattach(struct knote *kn)
  968 {
  969 
  970         /* 
  971          * EVFILT_USER knotes are not attached to anything in the kernel.
  972          */ 
  973         kn->kn_hook = NULL;
  974         if (kn->kn_fflags & NOTE_TRIGGER)
  975                 kn->kn_hookid = 1;
  976         else
  977                 kn->kn_hookid = 0;
  978         return (0);
  979 }
  980 
  981 static void
  982 filt_userdetach(__unused struct knote *kn)
  983 {
  984 
  985         /*
  986          * EVFILT_USER knotes are not attached to anything in the kernel.
  987          */
  988 }
  989 
  990 static int
  991 filt_user(struct knote *kn, __unused long hint)
  992 {
  993 
  994         return (kn->kn_hookid);
  995 }
  996 
  997 static void
  998 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
  999 {
 1000         u_int ffctrl;
 1001 
 1002         switch (type) {
 1003         case EVENT_REGISTER:
 1004                 if (kev->fflags & NOTE_TRIGGER)
 1005                         kn->kn_hookid = 1;
 1006 
 1007                 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
 1008                 kev->fflags &= NOTE_FFLAGSMASK;
 1009                 switch (ffctrl) {
 1010                 case NOTE_FFNOP:
 1011                         break;
 1012 
 1013                 case NOTE_FFAND:
 1014                         kn->kn_sfflags &= kev->fflags;
 1015                         break;
 1016 
 1017                 case NOTE_FFOR:
 1018                         kn->kn_sfflags |= kev->fflags;
 1019                         break;
 1020 
 1021                 case NOTE_FFCOPY:
 1022                         kn->kn_sfflags = kev->fflags;
 1023                         break;
 1024 
 1025                 default:
 1026                         /* XXX Return error? */
 1027                         break;
 1028                 }
 1029                 kn->kn_sdata = kev->data;
 1030                 if (kev->flags & EV_CLEAR) {
 1031                         kn->kn_hookid = 0;
 1032                         kn->kn_data = 0;
 1033                         kn->kn_fflags = 0;
 1034                 }
 1035                 break;
 1036 
 1037         case EVENT_PROCESS:
 1038                 *kev = kn->kn_kevent;
 1039                 kev->fflags = kn->kn_sfflags;
 1040                 kev->data = kn->kn_sdata;
 1041                 if (kn->kn_flags & EV_CLEAR) {
 1042                         kn->kn_hookid = 0;
 1043                         kn->kn_data = 0;
 1044                         kn->kn_fflags = 0;
 1045                 }
 1046                 break;
 1047 
 1048         default:
 1049                 panic("filt_usertouch() - invalid type (%ld)", type);
 1050                 break;
 1051         }
 1052 }
 1053 
 1054 int
 1055 sys_kqueue(struct thread *td, struct kqueue_args *uap)
 1056 {
 1057 
 1058         return (kern_kqueue(td, 0, NULL));
 1059 }
 1060 
 1061 static void
 1062 kqueue_init(struct kqueue *kq)
 1063 {
 1064 
 1065         mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
 1066         TAILQ_INIT(&kq->kq_head);
 1067         knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
 1068         TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
 1069 }
 1070 
 1071 int
 1072 kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
 1073 {
 1074         struct filedesc *fdp;
 1075         struct kqueue *kq;
 1076         struct file *fp;
 1077         struct ucred *cred;
 1078         int fd, error;
 1079 
 1080         fdp = td->td_proc->p_fd;
 1081         cred = td->td_ucred;
 1082         if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
 1083                 return (ENOMEM);
 1084 
 1085         error = falloc_caps(td, &fp, &fd, flags, fcaps);
 1086         if (error != 0) {
 1087                 chgkqcnt(cred->cr_ruidinfo, -1, 0);
 1088                 return (error);
 1089         }
 1090 
 1091         /* An extra reference on `fp' has been held for us by falloc(). */
 1092         kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
 1093         kqueue_init(kq);
 1094         kq->kq_fdp = fdp;
 1095         kq->kq_cred = crhold(cred);
 1096 
 1097         FILEDESC_XLOCK(fdp);
 1098         TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
 1099         FILEDESC_XUNLOCK(fdp);
 1100 
 1101         finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
 1102         fdrop(fp, td);
 1103 
 1104         td->td_retval[0] = fd;
 1105         return (0);
 1106 }
 1107 
 1108 struct g_kevent_args {
 1109         int     fd;
 1110         void    *changelist;
 1111         int     nchanges;
 1112         void    *eventlist;
 1113         int     nevents;
 1114         const struct timespec *timeout;
 1115 };
 1116 
 1117 int
 1118 sys_kevent(struct thread *td, struct kevent_args *uap)
 1119 {
 1120         struct kevent_copyops k_ops = {
 1121                 .arg = uap,
 1122                 .k_copyout = kevent_copyout,
 1123                 .k_copyin = kevent_copyin,
 1124                 .kevent_size = sizeof(struct kevent),
 1125         };
 1126         struct g_kevent_args gk_args = {
 1127                 .fd = uap->fd,
 1128                 .changelist = uap->changelist,
 1129                 .nchanges = uap->nchanges,
 1130                 .eventlist = uap->eventlist,
 1131                 .nevents = uap->nevents,
 1132                 .timeout = uap->timeout,
 1133         };
 1134 
 1135         return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent"));
 1136 }
 1137 
 1138 static int
 1139 kern_kevent_generic(struct thread *td, struct g_kevent_args *uap,
 1140     struct kevent_copyops *k_ops, const char *struct_name)
 1141 {
 1142         struct timespec ts, *tsp;
 1143 #ifdef KTRACE
 1144         struct kevent *eventlist = uap->eventlist;
 1145 #endif
 1146         int error;
 1147 
 1148         if (uap->timeout != NULL) {
 1149                 error = copyin(uap->timeout, &ts, sizeof(ts));
 1150                 if (error)
 1151                         return (error);
 1152                 tsp = &ts;
 1153         } else
 1154                 tsp = NULL;
 1155 
 1156 #ifdef KTRACE
 1157         if (KTRPOINT(td, KTR_STRUCT_ARRAY))
 1158                 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist,
 1159                     uap->nchanges, k_ops->kevent_size);
 1160 #endif
 1161 
 1162         error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
 1163             k_ops, tsp);
 1164 
 1165 #ifdef KTRACE
 1166         if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
 1167                 ktrstructarray(struct_name, UIO_USERSPACE, eventlist,
 1168                     td->td_retval[0], k_ops->kevent_size);
 1169 #endif
 1170 
 1171         return (error);
 1172 }
 1173 
 1174 /*
 1175  * Copy 'count' items into the destination list pointed to by uap->eventlist.
 1176  */
 1177 static int
 1178 kevent_copyout(void *arg, struct kevent *kevp, int count)
 1179 {
 1180         struct kevent_args *uap;
 1181         int error;
 1182 
 1183         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
 1184         uap = (struct kevent_args *)arg;
 1185 
 1186         error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
 1187         if (error == 0)
 1188                 uap->eventlist += count;
 1189         return (error);
 1190 }
 1191 
 1192 /*
 1193  * Copy 'count' items from the list pointed to by uap->changelist.
 1194  */
 1195 static int
 1196 kevent_copyin(void *arg, struct kevent *kevp, int count)
 1197 {
 1198         struct kevent_args *uap;
 1199         int error;
 1200 
 1201         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
 1202         uap = (struct kevent_args *)arg;
 1203 
 1204         error = copyin(uap->changelist, kevp, count * sizeof *kevp);
 1205         if (error == 0)
 1206                 uap->changelist += count;
 1207         return (error);
 1208 }
 1209 
 1210 #ifdef COMPAT_FREEBSD11
 1211 static int
 1212 kevent11_copyout(void *arg, struct kevent *kevp, int count)
 1213 {
 1214         struct freebsd11_kevent_args *uap;
 1215         struct kevent_freebsd11 kev11;
 1216         int error, i;
 1217 
 1218         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
 1219         uap = (struct freebsd11_kevent_args *)arg;
 1220 
 1221         for (i = 0; i < count; i++) {
 1222                 kev11.ident = kevp->ident;
 1223                 kev11.filter = kevp->filter;
 1224                 kev11.flags = kevp->flags;
 1225                 kev11.fflags = kevp->fflags;
 1226                 kev11.data = kevp->data;
 1227                 kev11.udata = kevp->udata;
 1228                 error = copyout(&kev11, uap->eventlist, sizeof(kev11));
 1229                 if (error != 0)
 1230                         break;
 1231                 uap->eventlist++;
 1232                 kevp++;
 1233         }
 1234         return (error);
 1235 }
 1236 
 1237 /*
 1238  * Copy 'count' items from the list pointed to by uap->changelist.
 1239  */
 1240 static int
 1241 kevent11_copyin(void *arg, struct kevent *kevp, int count)
 1242 {
 1243         struct freebsd11_kevent_args *uap;
 1244         struct kevent_freebsd11 kev11;
 1245         int error, i;
 1246 
 1247         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
 1248         uap = (struct freebsd11_kevent_args *)arg;
 1249 
 1250         for (i = 0; i < count; i++) {
 1251                 error = copyin(uap->changelist, &kev11, sizeof(kev11));
 1252                 if (error != 0)
 1253                         break;
 1254                 kevp->ident = kev11.ident;
 1255                 kevp->filter = kev11.filter;
 1256                 kevp->flags = kev11.flags;
 1257                 kevp->fflags = kev11.fflags;
 1258                 kevp->data = (uintptr_t)kev11.data;
 1259                 kevp->udata = kev11.udata;
 1260                 bzero(&kevp->ext, sizeof(kevp->ext));
 1261                 uap->changelist++;
 1262                 kevp++;
 1263         }
 1264         return (error);
 1265 }
 1266 
 1267 int
 1268 freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap)
 1269 {
 1270         struct kevent_copyops k_ops = {
 1271                 .arg = uap,
 1272                 .k_copyout = kevent11_copyout,
 1273                 .k_copyin = kevent11_copyin,
 1274                 .kevent_size = sizeof(struct kevent_freebsd11),
 1275         };
 1276         struct g_kevent_args gk_args = {
 1277                 .fd = uap->fd,
 1278                 .changelist = uap->changelist,
 1279                 .nchanges = uap->nchanges,
 1280                 .eventlist = uap->eventlist,
 1281                 .nevents = uap->nevents,
 1282                 .timeout = uap->timeout,
 1283         };
 1284 
 1285         return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11"));
 1286 }
 1287 #endif
 1288 
 1289 int
 1290 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
 1291     struct kevent_copyops *k_ops, const struct timespec *timeout)
 1292 {
 1293         cap_rights_t rights;
 1294         struct file *fp;
 1295         int error;
 1296 
 1297         cap_rights_init_zero(&rights);
 1298         if (nchanges > 0)
 1299                 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE);
 1300         if (nevents > 0)
 1301                 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT);
 1302         error = fget(td, fd, &rights, &fp);
 1303         if (error != 0)
 1304                 return (error);
 1305 
 1306         error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
 1307         fdrop(fp, td);
 1308 
 1309         return (error);
 1310 }
 1311 
 1312 static int
 1313 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
 1314     struct kevent_copyops *k_ops, const struct timespec *timeout)
 1315 {
 1316         struct kevent keva[KQ_NEVENTS];
 1317         struct kevent *kevp, *changes;
 1318         int i, n, nerrors, error;
 1319 
 1320         if (nchanges < 0)
 1321                 return (EINVAL);
 1322 
 1323         nerrors = 0;
 1324         while (nchanges > 0) {
 1325                 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
 1326                 error = k_ops->k_copyin(k_ops->arg, keva, n);
 1327                 if (error)
 1328                         return (error);
 1329                 changes = keva;
 1330                 for (i = 0; i < n; i++) {
 1331                         kevp = &changes[i];
 1332                         if (!kevp->filter)
 1333                                 continue;
 1334                         kevp->flags &= ~EV_SYSFLAGS;
 1335                         error = kqueue_register(kq, kevp, td, M_WAITOK);
 1336                         if (error || (kevp->flags & EV_RECEIPT)) {
 1337                                 if (nevents == 0)
 1338                                         return (error);
 1339                                 kevp->flags = EV_ERROR;
 1340                                 kevp->data = error;
 1341                                 (void)k_ops->k_copyout(k_ops->arg, kevp, 1);
 1342                                 nevents--;
 1343                                 nerrors++;
 1344                         }
 1345                 }
 1346                 nchanges -= n;
 1347         }
 1348         if (nerrors) {
 1349                 td->td_retval[0] = nerrors;
 1350                 return (0);
 1351         }
 1352 
 1353         return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
 1354 }
 1355 
 1356 int
 1357 kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
 1358     struct kevent_copyops *k_ops, const struct timespec *timeout)
 1359 {
 1360         struct kqueue *kq;
 1361         int error;
 1362 
 1363         error = kqueue_acquire(fp, &kq);
 1364         if (error != 0)
 1365                 return (error);
 1366         error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
 1367         kqueue_release(kq, 0);
 1368         return (error);
 1369 }
 1370 
 1371 /*
 1372  * Performs a kevent() call on a temporarily created kqueue. This can be
 1373  * used to perform one-shot polling, similar to poll() and select().
 1374  */
 1375 int
 1376 kern_kevent_anonymous(struct thread *td, int nevents,
 1377     struct kevent_copyops *k_ops)
 1378 {
 1379         struct kqueue kq = {};
 1380         int error;
 1381 
 1382         kqueue_init(&kq);
 1383         kq.kq_refcnt = 1;
 1384         error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
 1385         kqueue_drain(&kq, td);
 1386         kqueue_destroy(&kq);
 1387         return (error);
 1388 }
 1389 
 1390 int
 1391 kqueue_add_filteropts(int filt, struct filterops *filtops)
 1392 {
 1393         int error;
 1394 
 1395         error = 0;
 1396         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
 1397                 printf(
 1398 "trying to add a filterop that is out of range: %d is beyond %d\n",
 1399                     ~filt, EVFILT_SYSCOUNT);
 1400                 return EINVAL;
 1401         }
 1402         mtx_lock(&filterops_lock);
 1403         if (sysfilt_ops[~filt].for_fop != &null_filtops &&
 1404             sysfilt_ops[~filt].for_fop != NULL)
 1405                 error = EEXIST;
 1406         else {
 1407                 sysfilt_ops[~filt].for_fop = filtops;
 1408                 sysfilt_ops[~filt].for_refcnt = 0;
 1409         }
 1410         mtx_unlock(&filterops_lock);
 1411 
 1412         return (error);
 1413 }
 1414 
 1415 int
 1416 kqueue_del_filteropts(int filt)
 1417 {
 1418         int error;
 1419 
 1420         error = 0;
 1421         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
 1422                 return EINVAL;
 1423 
 1424         mtx_lock(&filterops_lock);
 1425         if (sysfilt_ops[~filt].for_fop == &null_filtops ||
 1426             sysfilt_ops[~filt].for_fop == NULL)
 1427                 error = EINVAL;
 1428         else if (sysfilt_ops[~filt].for_refcnt != 0)
 1429                 error = EBUSY;
 1430         else {
 1431                 sysfilt_ops[~filt].for_fop = &null_filtops;
 1432                 sysfilt_ops[~filt].for_refcnt = 0;
 1433         }
 1434         mtx_unlock(&filterops_lock);
 1435 
 1436         return error;
 1437 }
 1438 
 1439 static struct filterops *
 1440 kqueue_fo_find(int filt)
 1441 {
 1442 
 1443         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
 1444                 return NULL;
 1445 
 1446         if (sysfilt_ops[~filt].for_nolock)
 1447                 return sysfilt_ops[~filt].for_fop;
 1448 
 1449         mtx_lock(&filterops_lock);
 1450         sysfilt_ops[~filt].for_refcnt++;
 1451         if (sysfilt_ops[~filt].for_fop == NULL)
 1452                 sysfilt_ops[~filt].for_fop = &null_filtops;
 1453         mtx_unlock(&filterops_lock);
 1454 
 1455         return sysfilt_ops[~filt].for_fop;
 1456 }
 1457 
 1458 static void
 1459 kqueue_fo_release(int filt)
 1460 {
 1461 
 1462         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
 1463                 return;
 1464 
 1465         if (sysfilt_ops[~filt].for_nolock)
 1466                 return;
 1467 
 1468         mtx_lock(&filterops_lock);
 1469         KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
 1470             ("filter object refcount not valid on release"));
 1471         sysfilt_ops[~filt].for_refcnt--;
 1472         mtx_unlock(&filterops_lock);
 1473 }
 1474 
 1475 /*
 1476  * A ref to kq (obtained via kqueue_acquire) must be held.
 1477  */
 1478 static int
 1479 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
 1480     int mflag)
 1481 {
 1482         struct filterops *fops;
 1483         struct file *fp;
 1484         struct knote *kn, *tkn;
 1485         struct knlist *knl;
 1486         int error, filt, event;
 1487         int haskqglobal, filedesc_unlock;
 1488 
 1489         if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
 1490                 return (EINVAL);
 1491 
 1492         fp = NULL;
 1493         kn = NULL;
 1494         knl = NULL;
 1495         error = 0;
 1496         haskqglobal = 0;
 1497         filedesc_unlock = 0;
 1498 
 1499         filt = kev->filter;
 1500         fops = kqueue_fo_find(filt);
 1501         if (fops == NULL)
 1502                 return EINVAL;
 1503 
 1504         if (kev->flags & EV_ADD) {
 1505                 /*
 1506                  * Prevent waiting with locks.  Non-sleepable
 1507                  * allocation failures are handled in the loop, only
 1508                  * if the spare knote appears to be actually required.
 1509                  */
 1510                 tkn = knote_alloc(mflag);
 1511         } else {
 1512                 tkn = NULL;
 1513         }
 1514 
 1515 findkn:
 1516         if (fops->f_isfd) {
 1517                 KASSERT(td != NULL, ("td is NULL"));
 1518                 if (kev->ident > INT_MAX)
 1519                         error = EBADF;
 1520                 else
 1521                         error = fget(td, kev->ident, &cap_event_rights, &fp);
 1522                 if (error)
 1523                         goto done;
 1524 
 1525                 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
 1526                     kev->ident, M_NOWAIT) != 0) {
 1527                         /* try again */
 1528                         fdrop(fp, td);
 1529                         fp = NULL;
 1530                         error = kqueue_expand(kq, fops, kev->ident, mflag);
 1531                         if (error)
 1532                                 goto done;
 1533                         goto findkn;
 1534                 }
 1535 
 1536                 if (fp->f_type == DTYPE_KQUEUE) {
 1537                         /*
 1538                          * If we add some intelligence about what we are doing,
 1539                          * we should be able to support events on ourselves.
 1540                          * We need to know when we are doing this to prevent
 1541                          * getting both the knlist lock and the kq lock since
 1542                          * they are the same thing.
 1543                          */
 1544                         if (fp->f_data == kq) {
 1545                                 error = EINVAL;
 1546                                 goto done;
 1547                         }
 1548 
 1549                         /*
 1550                          * Pre-lock the filedesc before the global
 1551                          * lock mutex, see the comment in
 1552                          * kqueue_close().
 1553                          */
 1554                         FILEDESC_XLOCK(td->td_proc->p_fd);
 1555                         filedesc_unlock = 1;
 1556                         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1557                 }
 1558 
 1559                 KQ_LOCK(kq);
 1560                 if (kev->ident < kq->kq_knlistsize) {
 1561                         SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
 1562                                 if (kev->filter == kn->kn_filter)
 1563                                         break;
 1564                 }
 1565         } else {
 1566                 if ((kev->flags & EV_ADD) == EV_ADD) {
 1567                         error = kqueue_expand(kq, fops, kev->ident, mflag);
 1568                         if (error != 0)
 1569                                 goto done;
 1570                 }
 1571 
 1572                 KQ_LOCK(kq);
 1573 
 1574                 /*
 1575                  * If possible, find an existing knote to use for this kevent.
 1576                  */
 1577                 if (kev->filter == EVFILT_PROC &&
 1578                     (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
 1579                         /* This is an internal creation of a process tracking
 1580                          * note. Don't attempt to coalesce this with an
 1581                          * existing note.
 1582                          */
 1583                         ;                       
 1584                 } else if (kq->kq_knhashmask != 0) {
 1585                         struct klist *list;
 1586 
 1587                         list = &kq->kq_knhash[
 1588                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
 1589                         SLIST_FOREACH(kn, list, kn_link)
 1590                                 if (kev->ident == kn->kn_id &&
 1591                                     kev->filter == kn->kn_filter)
 1592                                         break;
 1593                 }
 1594         }
 1595 
 1596         /* knote is in the process of changing, wait for it to stabilize. */
 1597         if (kn != NULL && kn_in_flux(kn)) {
 1598                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1599                 if (filedesc_unlock) {
 1600                         FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1601                         filedesc_unlock = 0;
 1602                 }
 1603                 kq->kq_state |= KQ_FLUXWAIT;
 1604                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
 1605                 if (fp != NULL) {
 1606                         fdrop(fp, td);
 1607                         fp = NULL;
 1608                 }
 1609                 goto findkn;
 1610         }
 1611 
 1612         /*
 1613          * kn now contains the matching knote, or NULL if no match
 1614          */
 1615         if (kn == NULL) {
 1616                 if (kev->flags & EV_ADD) {
 1617                         kn = tkn;
 1618                         tkn = NULL;
 1619                         if (kn == NULL) {
 1620                                 KQ_UNLOCK(kq);
 1621                                 error = ENOMEM;
 1622                                 goto done;
 1623                         }
 1624                         kn->kn_fp = fp;
 1625                         kn->kn_kq = kq;
 1626                         kn->kn_fop = fops;
 1627                         /*
 1628                          * apply reference counts to knote structure, and
 1629                          * do not release it at the end of this routine.
 1630                          */
 1631                         fops = NULL;
 1632                         fp = NULL;
 1633 
 1634                         kn->kn_sfflags = kev->fflags;
 1635                         kn->kn_sdata = kev->data;
 1636                         kev->fflags = 0;
 1637                         kev->data = 0;
 1638                         kn->kn_kevent = *kev;
 1639                         kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
 1640                             EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
 1641                         kn->kn_status = KN_DETACHED;
 1642                         if ((kev->flags & EV_DISABLE) != 0)
 1643                                 kn->kn_status |= KN_DISABLED;
 1644                         kn_enter_flux(kn);
 1645 
 1646                         error = knote_attach(kn, kq);
 1647                         KQ_UNLOCK(kq);
 1648                         if (error != 0) {
 1649                                 tkn = kn;
 1650                                 goto done;
 1651                         }
 1652 
 1653                         if ((error = kn->kn_fop->f_attach(kn)) != 0) {
 1654                                 knote_drop_detached(kn, td);
 1655                                 goto done;
 1656                         }
 1657                         knl = kn_list_lock(kn);
 1658                         goto done_ev_add;
 1659                 } else {
 1660                         /* No matching knote and the EV_ADD flag is not set. */
 1661                         KQ_UNLOCK(kq);
 1662                         error = ENOENT;
 1663                         goto done;
 1664                 }
 1665         }
 1666 
 1667         if (kev->flags & EV_DELETE) {
 1668                 kn_enter_flux(kn);
 1669                 KQ_UNLOCK(kq);
 1670                 knote_drop(kn, td);
 1671                 goto done;
 1672         }
 1673 
 1674         if (kev->flags & EV_FORCEONESHOT) {
 1675                 kn->kn_flags |= EV_ONESHOT;
 1676                 KNOTE_ACTIVATE(kn, 1);
 1677         }
 1678 
 1679         if ((kev->flags & EV_ENABLE) != 0)
 1680                 kn->kn_status &= ~KN_DISABLED;
 1681         else if ((kev->flags & EV_DISABLE) != 0)
 1682                 kn->kn_status |= KN_DISABLED;
 1683 
 1684         /*
 1685          * The user may change some filter values after the initial EV_ADD,
 1686          * but doing so will not reset any filter which has already been
 1687          * triggered.
 1688          */
 1689         kn->kn_status |= KN_SCAN;
 1690         kn_enter_flux(kn);
 1691         KQ_UNLOCK(kq);
 1692         knl = kn_list_lock(kn);
 1693         kn->kn_kevent.udata = kev->udata;
 1694         if (!fops->f_isfd && fops->f_touch != NULL) {
 1695                 fops->f_touch(kn, kev, EVENT_REGISTER);
 1696         } else {
 1697                 kn->kn_sfflags = kev->fflags;
 1698                 kn->kn_sdata = kev->data;
 1699         }
 1700 
 1701 done_ev_add:
 1702         /*
 1703          * We can get here with kn->kn_knlist == NULL.  This can happen when
 1704          * the initial attach event decides that the event is "completed" 
 1705          * already, e.g., filt_procattach() is called on a zombie process.  It
 1706          * will call filt_proc() which will remove it from the list, and NULL
 1707          * kn_knlist.
 1708          *
 1709          * KN_DISABLED will be stable while the knote is in flux, so the
 1710          * unlocked read will not race with an update.
 1711          */
 1712         if ((kn->kn_status & KN_DISABLED) == 0)
 1713                 event = kn->kn_fop->f_event(kn, 0);
 1714         else
 1715                 event = 0;
 1716 
 1717         KQ_LOCK(kq);
 1718         if (event)
 1719                 kn->kn_status |= KN_ACTIVE;
 1720         if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
 1721             KN_ACTIVE)
 1722                 knote_enqueue(kn);
 1723         kn->kn_status &= ~KN_SCAN;
 1724         kn_leave_flux(kn);
 1725         kn_list_unlock(knl);
 1726         KQ_UNLOCK_FLUX(kq);
 1727 
 1728 done:
 1729         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1730         if (filedesc_unlock)
 1731                 FILEDESC_XUNLOCK(td->td_proc->p_fd);
 1732         if (fp != NULL)
 1733                 fdrop(fp, td);
 1734         knote_free(tkn);
 1735         if (fops != NULL)
 1736                 kqueue_fo_release(filt);
 1737         return (error);
 1738 }
 1739 
 1740 static int
 1741 kqueue_acquire(struct file *fp, struct kqueue **kqp)
 1742 {
 1743         int error;
 1744         struct kqueue *kq;
 1745 
 1746         error = 0;
 1747 
 1748         kq = fp->f_data;
 1749         if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
 1750                 return (EBADF);
 1751         *kqp = kq;
 1752         KQ_LOCK(kq);
 1753         if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
 1754                 KQ_UNLOCK(kq);
 1755                 return (EBADF);
 1756         }
 1757         kq->kq_refcnt++;
 1758         KQ_UNLOCK(kq);
 1759 
 1760         return error;
 1761 }
 1762 
 1763 static void
 1764 kqueue_release(struct kqueue *kq, int locked)
 1765 {
 1766         if (locked)
 1767                 KQ_OWNED(kq);
 1768         else
 1769                 KQ_LOCK(kq);
 1770         kq->kq_refcnt--;
 1771         if (kq->kq_refcnt == 1)
 1772                 wakeup(&kq->kq_refcnt);
 1773         if (!locked)
 1774                 KQ_UNLOCK(kq);
 1775 }
 1776 
 1777 void
 1778 kqueue_drain_schedtask(void)
 1779 {
 1780         taskqueue_quiesce(taskqueue_kqueue_ctx);
 1781 }
 1782 
 1783 static void
 1784 kqueue_schedtask(struct kqueue *kq)
 1785 {
 1786         struct thread *td;
 1787 
 1788         KQ_OWNED(kq);
 1789         KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
 1790             ("scheduling kqueue task while draining"));
 1791 
 1792         if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
 1793                 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
 1794                 kq->kq_state |= KQ_TASKSCHED;
 1795                 td = curthread;
 1796                 thread_lock(td);
 1797                 td->td_flags |= TDF_ASTPENDING | TDF_KQTICKLED;
 1798                 thread_unlock(td);
 1799         }
 1800 }
 1801 
 1802 /*
 1803  * Expand the kq to make sure we have storage for fops/ident pair.
 1804  *
 1805  * Return 0 on success (or no work necessary), return errno on failure.
 1806  */
 1807 static int
 1808 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
 1809     int mflag)
 1810 {
 1811         struct klist *list, *tmp_knhash, *to_free;
 1812         u_long tmp_knhashmask;
 1813         int error, fd, size;
 1814 
 1815         KQ_NOTOWNED(kq);
 1816 
 1817         error = 0;
 1818         to_free = NULL;
 1819         if (fops->f_isfd) {
 1820                 fd = ident;
 1821                 if (kq->kq_knlistsize <= fd) {
 1822                         size = kq->kq_knlistsize;
 1823                         while (size <= fd)
 1824                                 size += KQEXTENT;
 1825                         list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
 1826                         if (list == NULL)
 1827                                 return ENOMEM;
 1828                         KQ_LOCK(kq);
 1829                         if ((kq->kq_state & KQ_CLOSING) != 0) {
 1830                                 to_free = list;
 1831                                 error = EBADF;
 1832                         } else if (kq->kq_knlistsize > fd) {
 1833                                 to_free = list;
 1834                         } else {
 1835                                 if (kq->kq_knlist != NULL) {
 1836                                         bcopy(kq->kq_knlist, list,
 1837                                             kq->kq_knlistsize * sizeof(*list));
 1838                                         to_free = kq->kq_knlist;
 1839                                         kq->kq_knlist = NULL;
 1840                                 }
 1841                                 bzero((caddr_t)list +
 1842                                     kq->kq_knlistsize * sizeof(*list),
 1843                                     (size - kq->kq_knlistsize) * sizeof(*list));
 1844                                 kq->kq_knlistsize = size;
 1845                                 kq->kq_knlist = list;
 1846                         }
 1847                         KQ_UNLOCK(kq);
 1848                 }
 1849         } else {
 1850                 if (kq->kq_knhashmask == 0) {
 1851                         tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE,
 1852                             &tmp_knhashmask, (mflag & M_WAITOK) != 0 ?
 1853                             HASH_WAITOK : HASH_NOWAIT);
 1854                         if (tmp_knhash == NULL)
 1855                                 return (ENOMEM);
 1856                         KQ_LOCK(kq);
 1857                         if ((kq->kq_state & KQ_CLOSING) != 0) {
 1858                                 to_free = tmp_knhash;
 1859                                 error = EBADF;
 1860                         } else if (kq->kq_knhashmask == 0) {
 1861                                 kq->kq_knhash = tmp_knhash;
 1862                                 kq->kq_knhashmask = tmp_knhashmask;
 1863                         } else {
 1864                                 to_free = tmp_knhash;
 1865                         }
 1866                         KQ_UNLOCK(kq);
 1867                 }
 1868         }
 1869         free(to_free, M_KQUEUE);
 1870 
 1871         KQ_NOTOWNED(kq);
 1872         return (error);
 1873 }
 1874 
 1875 static void
 1876 kqueue_task(void *arg, int pending)
 1877 {
 1878         struct kqueue *kq;
 1879         int haskqglobal;
 1880 
 1881         haskqglobal = 0;
 1882         kq = arg;
 1883 
 1884         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1885         KQ_LOCK(kq);
 1886 
 1887         KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
 1888 
 1889         kq->kq_state &= ~KQ_TASKSCHED;
 1890         if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
 1891                 wakeup(&kq->kq_state);
 1892         }
 1893         KQ_UNLOCK(kq);
 1894         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1895 }
 1896 
 1897 /*
 1898  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
 1899  * We treat KN_MARKER knotes as if they are in flux.
 1900  */
 1901 static int
 1902 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
 1903     const struct timespec *tsp, struct kevent *keva, struct thread *td)
 1904 {
 1905         struct kevent *kevp;
 1906         struct knote *kn, *marker;
 1907         struct knlist *knl;
 1908         sbintime_t asbt, rsbt;
 1909         int count, error, haskqglobal, influx, nkev, touch;
 1910 
 1911         count = maxevents;
 1912         nkev = 0;
 1913         error = 0;
 1914         haskqglobal = 0;
 1915 
 1916         if (maxevents == 0)
 1917                 goto done_nl;
 1918         if (maxevents < 0) {
 1919                 error = EINVAL;
 1920                 goto done_nl;
 1921         }
 1922 
 1923         rsbt = 0;
 1924         if (tsp != NULL) {
 1925                 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
 1926                     tsp->tv_nsec >= 1000000000) {
 1927                         error = EINVAL;
 1928                         goto done_nl;
 1929                 }
 1930                 if (timespecisset(tsp)) {
 1931                         if (tsp->tv_sec <= INT32_MAX) {
 1932                                 rsbt = tstosbt(*tsp);
 1933                                 if (TIMESEL(&asbt, rsbt))
 1934                                         asbt += tc_tick_sbt;
 1935                                 if (asbt <= SBT_MAX - rsbt)
 1936                                         asbt += rsbt;
 1937                                 else
 1938                                         asbt = 0;
 1939                                 rsbt >>= tc_precexp;
 1940                         } else
 1941                                 asbt = 0;
 1942                 } else
 1943                         asbt = -1;
 1944         } else
 1945                 asbt = 0;
 1946         marker = knote_alloc(M_WAITOK);
 1947         marker->kn_status = KN_MARKER;
 1948         KQ_LOCK(kq);
 1949 
 1950 retry:
 1951         kevp = keva;
 1952         if (kq->kq_count == 0) {
 1953                 if (asbt == -1) {
 1954                         error = EWOULDBLOCK;
 1955                 } else {
 1956                         kq->kq_state |= KQ_SLEEP;
 1957                         error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
 1958                             "kqread", asbt, rsbt, C_ABSOLUTE);
 1959                 }
 1960                 if (error == 0)
 1961                         goto retry;
 1962                 /* don't restart after signals... */
 1963                 if (error == ERESTART)
 1964                         error = EINTR;
 1965                 else if (error == EWOULDBLOCK)
 1966                         error = 0;
 1967                 goto done;
 1968         }
 1969 
 1970         TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
 1971         influx = 0;
 1972         while (count) {
 1973                 KQ_OWNED(kq);
 1974                 kn = TAILQ_FIRST(&kq->kq_head);
 1975 
 1976                 if ((kn->kn_status == KN_MARKER && kn != marker) ||
 1977                     kn_in_flux(kn)) {
 1978                         if (influx) {
 1979                                 influx = 0;
 1980                                 KQ_FLUX_WAKEUP(kq);
 1981                         }
 1982                         kq->kq_state |= KQ_FLUXWAIT;
 1983                         error = msleep(kq, &kq->kq_lock, PSOCK,
 1984                             "kqflxwt", 0);
 1985                         continue;
 1986                 }
 1987 
 1988                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1989                 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
 1990                         kn->kn_status &= ~KN_QUEUED;
 1991                         kq->kq_count--;
 1992                         continue;
 1993                 }
 1994                 if (kn == marker) {
 1995                         KQ_FLUX_WAKEUP(kq);
 1996                         if (count == maxevents)
 1997                                 goto retry;
 1998                         goto done;
 1999                 }
 2000                 KASSERT(!kn_in_flux(kn),
 2001                     ("knote %p is unexpectedly in flux", kn));
 2002 
 2003                 if ((kn->kn_flags & EV_DROP) == EV_DROP) {
 2004                         kn->kn_status &= ~KN_QUEUED;
 2005                         kn_enter_flux(kn);
 2006                         kq->kq_count--;
 2007                         KQ_UNLOCK(kq);
 2008                         /*
 2009                          * We don't need to lock the list since we've
 2010                          * marked it as in flux.
 2011                          */
 2012                         knote_drop(kn, td);
 2013                         KQ_LOCK(kq);
 2014                         continue;
 2015                 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
 2016                         kn->kn_status &= ~KN_QUEUED;
 2017                         kn_enter_flux(kn);
 2018                         kq->kq_count--;
 2019                         KQ_UNLOCK(kq);
 2020                         /*
 2021                          * We don't need to lock the list since we've
 2022                          * marked the knote as being in flux.
 2023                          */
 2024                         *kevp = kn->kn_kevent;
 2025                         knote_drop(kn, td);
 2026                         KQ_LOCK(kq);
 2027                         kn = NULL;
 2028                 } else {
 2029                         kn->kn_status |= KN_SCAN;
 2030                         kn_enter_flux(kn);
 2031                         KQ_UNLOCK(kq);
 2032                         if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
 2033                                 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 2034                         knl = kn_list_lock(kn);
 2035                         if (kn->kn_fop->f_event(kn, 0) == 0) {
 2036                                 KQ_LOCK(kq);
 2037                                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 2038                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
 2039                                     KN_SCAN);
 2040                                 kn_leave_flux(kn);
 2041                                 kq->kq_count--;
 2042                                 kn_list_unlock(knl);
 2043                                 influx = 1;
 2044                                 continue;
 2045                         }
 2046                         touch = (!kn->kn_fop->f_isfd &&
 2047                             kn->kn_fop->f_touch != NULL);
 2048                         if (touch)
 2049                                 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
 2050                         else
 2051                                 *kevp = kn->kn_kevent;
 2052                         KQ_LOCK(kq);
 2053                         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 2054                         if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
 2055                                 /* 
 2056                                  * Manually clear knotes who weren't 
 2057                                  * 'touch'ed.
 2058                                  */
 2059                                 if (touch == 0 && kn->kn_flags & EV_CLEAR) {
 2060                                         kn->kn_data = 0;
 2061                                         kn->kn_fflags = 0;
 2062                                 }
 2063                                 if (kn->kn_flags & EV_DISPATCH)
 2064                                         kn->kn_status |= KN_DISABLED;
 2065                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 2066                                 kq->kq_count--;
 2067                         } else
 2068                                 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 2069                         
 2070                         kn->kn_status &= ~KN_SCAN;
 2071                         kn_leave_flux(kn);
 2072                         kn_list_unlock(knl);
 2073                         influx = 1;
 2074                 }
 2075 
 2076                 /* we are returning a copy to the user */
 2077                 kevp++;
 2078                 nkev++;
 2079                 count--;
 2080 
 2081                 if (nkev == KQ_NEVENTS) {
 2082                         influx = 0;
 2083                         KQ_UNLOCK_FLUX(kq);
 2084                         error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 2085                         nkev = 0;
 2086                         kevp = keva;
 2087                         KQ_LOCK(kq);
 2088                         if (error)
 2089                                 break;
 2090                 }
 2091         }
 2092         TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
 2093 done:
 2094         KQ_OWNED(kq);
 2095         KQ_UNLOCK_FLUX(kq);
 2096         knote_free(marker);
 2097 done_nl:
 2098         KQ_NOTOWNED(kq);
 2099         if (nkev != 0)
 2100                 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 2101         td->td_retval[0] = maxevents - count;
 2102         return (error);
 2103 }
 2104 
 2105 /*ARGSUSED*/
 2106 static int
 2107 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
 2108         struct ucred *active_cred, struct thread *td)
 2109 {
 2110         /*
 2111          * Enabling sigio causes two major problems:
 2112          * 1) infinite recursion:
 2113          * Synopsys: kevent is being used to track signals and have FIOASYNC
 2114          * set.  On receipt of a signal this will cause a kqueue to recurse
 2115          * into itself over and over.  Sending the sigio causes the kqueue
 2116          * to become ready, which in turn posts sigio again, forever.
 2117          * Solution: this can be solved by setting a flag in the kqueue that
 2118          * we have a SIGIO in progress.
 2119          * 2) locking problems:
 2120          * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
 2121          * us above the proc and pgrp locks.
 2122          * Solution: Post a signal using an async mechanism, being sure to
 2123          * record a generation count in the delivery so that we do not deliver
 2124          * a signal to the wrong process.
 2125          *
 2126          * Note, these two mechanisms are somewhat mutually exclusive!
 2127          */
 2128 #if 0
 2129         struct kqueue *kq;
 2130 
 2131         kq = fp->f_data;
 2132         switch (cmd) {
 2133         case FIOASYNC:
 2134                 if (*(int *)data) {
 2135                         kq->kq_state |= KQ_ASYNC;
 2136                 } else {
 2137                         kq->kq_state &= ~KQ_ASYNC;
 2138                 }
 2139                 return (0);
 2140 
 2141         case FIOSETOWN:
 2142                 return (fsetown(*(int *)data, &kq->kq_sigio));
 2143 
 2144         case FIOGETOWN:
 2145                 *(int *)data = fgetown(&kq->kq_sigio);
 2146                 return (0);
 2147         }
 2148 #endif
 2149 
 2150         return (ENOTTY);
 2151 }
 2152 
 2153 /*ARGSUSED*/
 2154 static int
 2155 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
 2156         struct thread *td)
 2157 {
 2158         struct kqueue *kq;
 2159         int revents = 0;
 2160         int error;
 2161 
 2162         if ((error = kqueue_acquire(fp, &kq)))
 2163                 return POLLERR;
 2164 
 2165         KQ_LOCK(kq);
 2166         if (events & (POLLIN | POLLRDNORM)) {
 2167                 if (kq->kq_count) {
 2168                         revents |= events & (POLLIN | POLLRDNORM);
 2169                 } else {
 2170                         selrecord(td, &kq->kq_sel);
 2171                         if (SEL_WAITING(&kq->kq_sel))
 2172                                 kq->kq_state |= KQ_SEL;
 2173                 }
 2174         }
 2175         kqueue_release(kq, 1);
 2176         KQ_UNLOCK(kq);
 2177         return (revents);
 2178 }
 2179 
 2180 /*ARGSUSED*/
 2181 static int
 2182 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
 2183         struct thread *td)
 2184 {
 2185 
 2186         bzero((void *)st, sizeof *st);
 2187         /*
 2188          * We no longer return kq_count because the unlocked value is useless.
 2189          * If you spent all this time getting the count, why not spend your
 2190          * syscall better by calling kevent?
 2191          *
 2192          * XXX - This is needed for libc_r.
 2193          */
 2194         st->st_mode = S_IFIFO;
 2195         return (0);
 2196 }
 2197 
 2198 static void
 2199 kqueue_drain(struct kqueue *kq, struct thread *td)
 2200 {
 2201         struct knote *kn;
 2202         int i;
 2203 
 2204         KQ_LOCK(kq);
 2205 
 2206         KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
 2207             ("kqueue already closing"));
 2208         kq->kq_state |= KQ_CLOSING;
 2209         if (kq->kq_refcnt > 1)
 2210                 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
 2211 
 2212         KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
 2213 
 2214         KASSERT(knlist_empty(&kq->kq_sel.si_note),
 2215             ("kqueue's knlist not empty"));
 2216 
 2217         for (i = 0; i < kq->kq_knlistsize; i++) {
 2218                 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
 2219                         if (kn_in_flux(kn)) {
 2220                                 kq->kq_state |= KQ_FLUXWAIT;
 2221                                 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
 2222                                 continue;
 2223                         }
 2224                         kn_enter_flux(kn);
 2225                         KQ_UNLOCK(kq);
 2226                         knote_drop(kn, td);
 2227                         KQ_LOCK(kq);
 2228                 }
 2229         }
 2230         if (kq->kq_knhashmask != 0) {
 2231                 for (i = 0; i <= kq->kq_knhashmask; i++) {
 2232                         while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
 2233                                 if (kn_in_flux(kn)) {
 2234                                         kq->kq_state |= KQ_FLUXWAIT;
 2235                                         msleep(kq, &kq->kq_lock, PSOCK,
 2236                                                "kqclo2", 0);
 2237                                         continue;
 2238                                 }
 2239                                 kn_enter_flux(kn);
 2240                                 KQ_UNLOCK(kq);
 2241                                 knote_drop(kn, td);
 2242                                 KQ_LOCK(kq);
 2243                         }
 2244                 }
 2245         }
 2246 
 2247         if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
 2248                 kq->kq_state |= KQ_TASKDRAIN;
 2249                 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
 2250         }
 2251 
 2252         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 2253                 selwakeuppri(&kq->kq_sel, PSOCK);
 2254                 if (!SEL_WAITING(&kq->kq_sel))
 2255                         kq->kq_state &= ~KQ_SEL;
 2256         }
 2257 
 2258         KQ_UNLOCK(kq);
 2259 }
 2260 
 2261 static void
 2262 kqueue_destroy(struct kqueue *kq)
 2263 {
 2264 
 2265         KASSERT(kq->kq_fdp == NULL,
 2266             ("kqueue still attached to a file descriptor"));
 2267         seldrain(&kq->kq_sel);
 2268         knlist_destroy(&kq->kq_sel.si_note);
 2269         mtx_destroy(&kq->kq_lock);
 2270 
 2271         if (kq->kq_knhash != NULL)
 2272                 free(kq->kq_knhash, M_KQUEUE);
 2273         if (kq->kq_knlist != NULL)
 2274                 free(kq->kq_knlist, M_KQUEUE);
 2275 
 2276         funsetown(&kq->kq_sigio);
 2277 }
 2278 
 2279 /*ARGSUSED*/
 2280 static int
 2281 kqueue_close(struct file *fp, struct thread *td)
 2282 {
 2283         struct kqueue *kq = fp->f_data;
 2284         struct filedesc *fdp;
 2285         int error;
 2286         int filedesc_unlock;
 2287 
 2288         if ((error = kqueue_acquire(fp, &kq)))
 2289                 return error;
 2290         kqueue_drain(kq, td);
 2291 
 2292         /*
 2293          * We could be called due to the knote_drop() doing fdrop(),
 2294          * called from kqueue_register().  In this case the global
 2295          * lock is owned, and filedesc sx is locked before, to not
 2296          * take the sleepable lock after non-sleepable.
 2297          */
 2298         fdp = kq->kq_fdp;
 2299         kq->kq_fdp = NULL;
 2300         if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
 2301                 FILEDESC_XLOCK(fdp);
 2302                 filedesc_unlock = 1;
 2303         } else
 2304                 filedesc_unlock = 0;
 2305         TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
 2306         if (filedesc_unlock)
 2307                 FILEDESC_XUNLOCK(fdp);
 2308 
 2309         kqueue_destroy(kq);
 2310         chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
 2311         crfree(kq->kq_cred);
 2312         free(kq, M_KQUEUE);
 2313         fp->f_data = NULL;
 2314 
 2315         return (0);
 2316 }
 2317 
 2318 static int
 2319 kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
 2320 {
 2321 
 2322         kif->kf_type = KF_TYPE_KQUEUE;
 2323         return (0);
 2324 }
 2325 
 2326 static void
 2327 kqueue_wakeup(struct kqueue *kq)
 2328 {
 2329         KQ_OWNED(kq);
 2330 
 2331         if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
 2332                 kq->kq_state &= ~KQ_SLEEP;
 2333                 wakeup(kq);
 2334         }
 2335         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 2336                 selwakeuppri(&kq->kq_sel, PSOCK);
 2337                 if (!SEL_WAITING(&kq->kq_sel))
 2338                         kq->kq_state &= ~KQ_SEL;
 2339         }
 2340         if (!knlist_empty(&kq->kq_sel.si_note))
 2341                 kqueue_schedtask(kq);
 2342         if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
 2343                 pgsigio(&kq->kq_sigio, SIGIO, 0);
 2344         }
 2345 }
 2346 
 2347 /*
 2348  * Walk down a list of knotes, activating them if their event has triggered.
 2349  *
 2350  * There is a possibility to optimize in the case of one kq watching another.
 2351  * Instead of scheduling a task to wake it up, you could pass enough state
 2352  * down the chain to make up the parent kqueue.  Make this code functional
 2353  * first.
 2354  */
 2355 void
 2356 knote(struct knlist *list, long hint, int lockflags)
 2357 {
 2358         struct kqueue *kq;
 2359         struct knote *kn, *tkn;
 2360         int error;
 2361 
 2362         if (list == NULL)
 2363                 return;
 2364 
 2365         KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
 2366 
 2367         if ((lockflags & KNF_LISTLOCKED) == 0)
 2368                 list->kl_lock(list->kl_lockarg); 
 2369 
 2370         /*
 2371          * If we unlock the list lock (and enter influx), we can
 2372          * eliminate the kqueue scheduling, but this will introduce
 2373          * four lock/unlock's for each knote to test.  Also, marker
 2374          * would be needed to keep iteration position, since filters
 2375          * or other threads could remove events.
 2376          */
 2377         SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
 2378                 kq = kn->kn_kq;
 2379                 KQ_LOCK(kq);
 2380                 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
 2381                         /*
 2382                          * Do not process the influx notes, except for
 2383                          * the influx coming from the kq unlock in the
 2384                          * kqueue_scan().  In the later case, we do
 2385                          * not interfere with the scan, since the code
 2386                          * fragment in kqueue_scan() locks the knlist,
 2387                          * and cannot proceed until we finished.
 2388                          */
 2389                         KQ_UNLOCK(kq);
 2390                 } else if ((lockflags & KNF_NOKQLOCK) != 0) {
 2391                         kn_enter_flux(kn);
 2392                         KQ_UNLOCK(kq);
 2393                         error = kn->kn_fop->f_event(kn, hint);
 2394                         KQ_LOCK(kq);
 2395                         kn_leave_flux(kn);
 2396                         if (error)
 2397                                 KNOTE_ACTIVATE(kn, 1);
 2398                         KQ_UNLOCK_FLUX(kq);
 2399                 } else {
 2400                         if (kn->kn_fop->f_event(kn, hint))
 2401                                 KNOTE_ACTIVATE(kn, 1);
 2402                         KQ_UNLOCK(kq);
 2403                 }
 2404         }
 2405         if ((lockflags & KNF_LISTLOCKED) == 0)
 2406                 list->kl_unlock(list->kl_lockarg); 
 2407 }
 2408 
 2409 /*
 2410  * add a knote to a knlist
 2411  */
 2412 void
 2413 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
 2414 {
 2415 
 2416         KNL_ASSERT_LOCK(knl, islocked);
 2417         KQ_NOTOWNED(kn->kn_kq);
 2418         KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
 2419         KASSERT((kn->kn_status & KN_DETACHED) != 0,
 2420             ("knote %p was not detached", kn));
 2421         if (!islocked)
 2422                 knl->kl_lock(knl->kl_lockarg);
 2423         SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
 2424         if (!islocked)
 2425                 knl->kl_unlock(knl->kl_lockarg);
 2426         KQ_LOCK(kn->kn_kq);
 2427         kn->kn_knlist = knl;
 2428         kn->kn_status &= ~KN_DETACHED;
 2429         KQ_UNLOCK(kn->kn_kq);
 2430 }
 2431 
 2432 static void
 2433 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
 2434     int kqislocked)
 2435 {
 2436 
 2437         KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
 2438         KNL_ASSERT_LOCK(knl, knlislocked);
 2439         mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
 2440         KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
 2441         KASSERT((kn->kn_status & KN_DETACHED) == 0,
 2442             ("knote %p was already detached", kn));
 2443         if (!knlislocked)
 2444                 knl->kl_lock(knl->kl_lockarg);
 2445         SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
 2446         kn->kn_knlist = NULL;
 2447         if (!knlislocked)
 2448                 kn_list_unlock(knl);
 2449         if (!kqislocked)
 2450                 KQ_LOCK(kn->kn_kq);
 2451         kn->kn_status |= KN_DETACHED;
 2452         if (!kqislocked)
 2453                 KQ_UNLOCK(kn->kn_kq);
 2454 }
 2455 
 2456 /*
 2457  * remove knote from the specified knlist
 2458  */
 2459 void
 2460 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
 2461 {
 2462 
 2463         knlist_remove_kq(knl, kn, islocked, 0);
 2464 }
 2465 
 2466 int
 2467 knlist_empty(struct knlist *knl)
 2468 {
 2469 
 2470         KNL_ASSERT_LOCKED(knl);
 2471         return (SLIST_EMPTY(&knl->kl_list));
 2472 }
 2473 
 2474 static struct mtx knlist_lock;
 2475 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
 2476     MTX_DEF);
 2477 static void knlist_mtx_lock(void *arg);
 2478 static void knlist_mtx_unlock(void *arg);
 2479 
 2480 static void
 2481 knlist_mtx_lock(void *arg)
 2482 {
 2483 
 2484         mtx_lock((struct mtx *)arg);
 2485 }
 2486 
 2487 static void
 2488 knlist_mtx_unlock(void *arg)
 2489 {
 2490 
 2491         mtx_unlock((struct mtx *)arg);
 2492 }
 2493 
 2494 static void
 2495 knlist_mtx_assert_lock(void *arg, int what)
 2496 {
 2497 
 2498         if (what == LA_LOCKED)
 2499                 mtx_assert((struct mtx *)arg, MA_OWNED);
 2500         else
 2501                 mtx_assert((struct mtx *)arg, MA_NOTOWNED);
 2502 }
 2503 
 2504 static void
 2505 knlist_rw_rlock(void *arg)
 2506 {
 2507 
 2508         rw_rlock((struct rwlock *)arg);
 2509 }
 2510 
 2511 static void
 2512 knlist_rw_runlock(void *arg)
 2513 {
 2514 
 2515         rw_runlock((struct rwlock *)arg);
 2516 }
 2517 
 2518 static void
 2519 knlist_rw_assert_lock(void *arg, int what)
 2520 {
 2521 
 2522         if (what == LA_LOCKED)
 2523                 rw_assert((struct rwlock *)arg, RA_LOCKED);
 2524         else
 2525                 rw_assert((struct rwlock *)arg, RA_UNLOCKED);
 2526 }
 2527 
 2528 void
 2529 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
 2530     void (*kl_unlock)(void *),
 2531     void (*kl_assert_lock)(void *, int))
 2532 {
 2533 
 2534         if (lock == NULL)
 2535                 knl->kl_lockarg = &knlist_lock;
 2536         else
 2537                 knl->kl_lockarg = lock;
 2538 
 2539         if (kl_lock == NULL)
 2540                 knl->kl_lock = knlist_mtx_lock;
 2541         else
 2542                 knl->kl_lock = kl_lock;
 2543         if (kl_unlock == NULL)
 2544                 knl->kl_unlock = knlist_mtx_unlock;
 2545         else
 2546                 knl->kl_unlock = kl_unlock;
 2547         if (kl_assert_lock == NULL)
 2548                 knl->kl_assert_lock = knlist_mtx_assert_lock;
 2549         else
 2550                 knl->kl_assert_lock = kl_assert_lock;
 2551 
 2552         knl->kl_autodestroy = 0;
 2553         SLIST_INIT(&knl->kl_list);
 2554 }
 2555 
 2556 void
 2557 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
 2558 {
 2559 
 2560         knlist_init(knl, lock, NULL, NULL, NULL);
 2561 }
 2562 
 2563 struct knlist *
 2564 knlist_alloc(struct mtx *lock)
 2565 {
 2566         struct knlist *knl;
 2567 
 2568         knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
 2569         knlist_init_mtx(knl, lock);
 2570         return (knl);
 2571 }
 2572 
 2573 void
 2574 knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
 2575 {
 2576 
 2577         knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock,
 2578             knlist_rw_assert_lock);
 2579 }
 2580 
 2581 void
 2582 knlist_destroy(struct knlist *knl)
 2583 {
 2584 
 2585         KASSERT(KNLIST_EMPTY(knl),
 2586             ("destroying knlist %p with knotes on it", knl));
 2587 }
 2588 
 2589 void
 2590 knlist_detach(struct knlist *knl)
 2591 {
 2592 
 2593         KNL_ASSERT_LOCKED(knl);
 2594         knl->kl_autodestroy = 1;
 2595         if (knlist_empty(knl)) {
 2596                 knlist_destroy(knl);
 2597                 free(knl, M_KQUEUE);
 2598         }
 2599 }
 2600 
 2601 /*
 2602  * Even if we are locked, we may need to drop the lock to allow any influx
 2603  * knotes time to "settle".
 2604  */
 2605 void
 2606 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
 2607 {
 2608         struct knote *kn, *kn2;
 2609         struct kqueue *kq;
 2610 
 2611         KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
 2612         if (islocked)
 2613                 KNL_ASSERT_LOCKED(knl);
 2614         else {
 2615                 KNL_ASSERT_UNLOCKED(knl);
 2616 again:          /* need to reacquire lock since we have dropped it */
 2617                 knl->kl_lock(knl->kl_lockarg);
 2618         }
 2619 
 2620         SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
 2621                 kq = kn->kn_kq;
 2622                 KQ_LOCK(kq);
 2623                 if (kn_in_flux(kn)) {
 2624                         KQ_UNLOCK(kq);
 2625                         continue;
 2626                 }
 2627                 knlist_remove_kq(knl, kn, 1, 1);
 2628                 if (killkn) {
 2629                         kn_enter_flux(kn);
 2630                         KQ_UNLOCK(kq);
 2631                         knote_drop_detached(kn, td);
 2632                 } else {
 2633                         /* Make sure cleared knotes disappear soon */
 2634                         kn->kn_flags |= EV_EOF | EV_ONESHOT;
 2635                         KQ_UNLOCK(kq);
 2636                 }
 2637                 kq = NULL;
 2638         }
 2639 
 2640         if (!SLIST_EMPTY(&knl->kl_list)) {
 2641                 /* there are still in flux knotes remaining */
 2642                 kn = SLIST_FIRST(&knl->kl_list);
 2643                 kq = kn->kn_kq;
 2644                 KQ_LOCK(kq);
 2645                 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
 2646                 knl->kl_unlock(knl->kl_lockarg);
 2647                 kq->kq_state |= KQ_FLUXWAIT;
 2648                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
 2649                 kq = NULL;
 2650                 goto again;
 2651         }
 2652 
 2653         if (islocked)
 2654                 KNL_ASSERT_LOCKED(knl);
 2655         else {
 2656                 knl->kl_unlock(knl->kl_lockarg);
 2657                 KNL_ASSERT_UNLOCKED(knl);
 2658         }
 2659 }
 2660 
 2661 /*
 2662  * Remove all knotes referencing a specified fd must be called with FILEDESC
 2663  * lock.  This prevents a race where a new fd comes along and occupies the
 2664  * entry and we attach a knote to the fd.
 2665  */
 2666 void
 2667 knote_fdclose(struct thread *td, int fd)
 2668 {
 2669         struct filedesc *fdp = td->td_proc->p_fd;
 2670         struct kqueue *kq;
 2671         struct knote *kn;
 2672         int influx;
 2673 
 2674         FILEDESC_XLOCK_ASSERT(fdp);
 2675 
 2676         /*
 2677          * We shouldn't have to worry about new kevents appearing on fd
 2678          * since filedesc is locked.
 2679          */
 2680         TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
 2681                 KQ_LOCK(kq);
 2682 
 2683 again:
 2684                 influx = 0;
 2685                 while (kq->kq_knlistsize > fd &&
 2686                     (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
 2687                         if (kn_in_flux(kn)) {
 2688                                 /* someone else might be waiting on our knote */
 2689                                 if (influx)
 2690                                         wakeup(kq);
 2691                                 kq->kq_state |= KQ_FLUXWAIT;
 2692                                 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
 2693                                 goto again;
 2694                         }
 2695                         kn_enter_flux(kn);
 2696                         KQ_UNLOCK(kq);
 2697                         influx = 1;
 2698                         knote_drop(kn, td);
 2699                         KQ_LOCK(kq);
 2700                 }
 2701                 KQ_UNLOCK_FLUX(kq);
 2702         }
 2703 }
 2704 
 2705 static int
 2706 knote_attach(struct knote *kn, struct kqueue *kq)
 2707 {
 2708         struct klist *list;
 2709 
 2710         KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
 2711         KQ_OWNED(kq);
 2712 
 2713         if ((kq->kq_state & KQ_CLOSING) != 0)
 2714                 return (EBADF);
 2715         if (kn->kn_fop->f_isfd) {
 2716                 if (kn->kn_id >= kq->kq_knlistsize)
 2717                         return (ENOMEM);
 2718                 list = &kq->kq_knlist[kn->kn_id];
 2719         } else {
 2720                 if (kq->kq_knhash == NULL)
 2721                         return (ENOMEM);
 2722                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2723         }
 2724         SLIST_INSERT_HEAD(list, kn, kn_link);
 2725         return (0);
 2726 }
 2727 
 2728 static void
 2729 knote_drop(struct knote *kn, struct thread *td)
 2730 {
 2731 
 2732         if ((kn->kn_status & KN_DETACHED) == 0)
 2733                 kn->kn_fop->f_detach(kn);
 2734         knote_drop_detached(kn, td);
 2735 }
 2736 
 2737 static void
 2738 knote_drop_detached(struct knote *kn, struct thread *td)
 2739 {
 2740         struct kqueue *kq;
 2741         struct klist *list;
 2742 
 2743         kq = kn->kn_kq;
 2744 
 2745         KASSERT((kn->kn_status & KN_DETACHED) != 0,
 2746             ("knote %p still attached", kn));
 2747         KQ_NOTOWNED(kq);
 2748 
 2749         KQ_LOCK(kq);
 2750         KASSERT(kn->kn_influx == 1,
 2751             ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
 2752 
 2753         if (kn->kn_fop->f_isfd)
 2754                 list = &kq->kq_knlist[kn->kn_id];
 2755         else
 2756                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2757 
 2758         if (!SLIST_EMPTY(list))
 2759                 SLIST_REMOVE(list, kn, knote, kn_link);
 2760         if (kn->kn_status & KN_QUEUED)
 2761                 knote_dequeue(kn);
 2762         KQ_UNLOCK_FLUX(kq);
 2763 
 2764         if (kn->kn_fop->f_isfd) {
 2765                 fdrop(kn->kn_fp, td);
 2766                 kn->kn_fp = NULL;
 2767         }
 2768         kqueue_fo_release(kn->kn_kevent.filter);
 2769         kn->kn_fop = NULL;
 2770         knote_free(kn);
 2771 }
 2772 
 2773 static void
 2774 knote_enqueue(struct knote *kn)
 2775 {
 2776         struct kqueue *kq = kn->kn_kq;
 2777 
 2778         KQ_OWNED(kn->kn_kq);
 2779         KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
 2780 
 2781         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 2782         kn->kn_status |= KN_QUEUED;
 2783         kq->kq_count++;
 2784         kqueue_wakeup(kq);
 2785 }
 2786 
 2787 static void
 2788 knote_dequeue(struct knote *kn)
 2789 {
 2790         struct kqueue *kq = kn->kn_kq;
 2791 
 2792         KQ_OWNED(kn->kn_kq);
 2793         KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
 2794 
 2795         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 2796         kn->kn_status &= ~KN_QUEUED;
 2797         kq->kq_count--;
 2798 }
 2799 
 2800 static void
 2801 knote_init(void)
 2802 {
 2803 
 2804         knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
 2805             NULL, NULL, UMA_ALIGN_PTR, 0);
 2806 }
 2807 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
 2808 
 2809 static struct knote *
 2810 knote_alloc(int mflag)
 2811 {
 2812 
 2813         return (uma_zalloc(knote_zone, mflag | M_ZERO));
 2814 }
 2815 
 2816 static void
 2817 knote_free(struct knote *kn)
 2818 {
 2819 
 2820         uma_zfree(knote_zone, kn);
 2821 }
 2822 
 2823 /*
 2824  * Register the kev w/ the kq specified by fd.
 2825  */
 2826 int 
 2827 kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag)
 2828 {
 2829         struct kqueue *kq;
 2830         struct file *fp;
 2831         cap_rights_t rights;
 2832         int error;
 2833 
 2834         error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE),
 2835             &fp);
 2836         if (error != 0)
 2837                 return (error);
 2838         if ((error = kqueue_acquire(fp, &kq)) != 0)
 2839                 goto noacquire;
 2840 
 2841         error = kqueue_register(kq, kev, td, mflag);
 2842         kqueue_release(kq, 0);
 2843 
 2844 noacquire:
 2845         fdrop(fp, td);
 2846         return (error);
 2847 }

Cache object: fd8213d5c4e272cb3c50542a0bf6470c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.