The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
    4  * Copyright (c) 2009 Apple, Inc.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/9.2/sys/kern/kern_event.c 239915 2012-08-30 18:29:49Z jhb $");
   31 
   32 #include "opt_ktrace.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/systm.h>
   36 #include <sys/capability.h>
   37 #include <sys/kernel.h>
   38 #include <sys/lock.h>
   39 #include <sys/mutex.h>
   40 #include <sys/proc.h>
   41 #include <sys/malloc.h>
   42 #include <sys/unistd.h>
   43 #include <sys/file.h>
   44 #include <sys/filedesc.h>
   45 #include <sys/filio.h>
   46 #include <sys/fcntl.h>
   47 #include <sys/kthread.h>
   48 #include <sys/selinfo.h>
   49 #include <sys/queue.h>
   50 #include <sys/event.h>
   51 #include <sys/eventvar.h>
   52 #include <sys/poll.h>
   53 #include <sys/protosw.h>
   54 #include <sys/sigio.h>
   55 #include <sys/signalvar.h>
   56 #include <sys/socket.h>
   57 #include <sys/socketvar.h>
   58 #include <sys/stat.h>
   59 #include <sys/sysctl.h>
   60 #include <sys/sysproto.h>
   61 #include <sys/syscallsubr.h>
   62 #include <sys/taskqueue.h>
   63 #include <sys/uio.h>
   64 #ifdef KTRACE
   65 #include <sys/ktrace.h>
   66 #endif
   67 
   68 #include <vm/uma.h>
   69 
   70 static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
   71 
   72 /*
   73  * This lock is used if multiple kq locks are required.  This possibly
   74  * should be made into a per proc lock.
   75  */
   76 static struct mtx       kq_global;
   77 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
   78 #define KQ_GLOBAL_LOCK(lck, haslck)     do {    \
   79         if (!haslck)                            \
   80                 mtx_lock(lck);                  \
   81         haslck = 1;                             \
   82 } while (0)
   83 #define KQ_GLOBAL_UNLOCK(lck, haslck)   do {    \
   84         if (haslck)                             \
   85                 mtx_unlock(lck);                        \
   86         haslck = 0;                             \
   87 } while (0)
   88 
   89 TASKQUEUE_DEFINE_THREAD(kqueue);
   90 
   91 static int      kevent_copyout(void *arg, struct kevent *kevp, int count);
   92 static int      kevent_copyin(void *arg, struct kevent *kevp, int count);
   93 static int      kqueue_register(struct kqueue *kq, struct kevent *kev,
   94                     struct thread *td, int waitok);
   95 static int      kqueue_acquire(struct file *fp, struct kqueue **kqp);
   96 static void     kqueue_release(struct kqueue *kq, int locked);
   97 static int      kqueue_expand(struct kqueue *kq, struct filterops *fops,
   98                     uintptr_t ident, int waitok);
   99 static void     kqueue_task(void *arg, int pending);
  100 static int      kqueue_scan(struct kqueue *kq, int maxevents,
  101                     struct kevent_copyops *k_ops,
  102                     const struct timespec *timeout,
  103                     struct kevent *keva, struct thread *td);
  104 static void     kqueue_wakeup(struct kqueue *kq);
  105 static struct filterops *kqueue_fo_find(int filt);
  106 static void     kqueue_fo_release(int filt);
  107 
  108 static fo_rdwr_t        kqueue_read;
  109 static fo_rdwr_t        kqueue_write;
  110 static fo_truncate_t    kqueue_truncate;
  111 static fo_ioctl_t       kqueue_ioctl;
  112 static fo_poll_t        kqueue_poll;
  113 static fo_kqfilter_t    kqueue_kqfilter;
  114 static fo_stat_t        kqueue_stat;
  115 static fo_close_t       kqueue_close;
  116 
  117 static struct fileops kqueueops = {
  118         .fo_read = kqueue_read,
  119         .fo_write = kqueue_write,
  120         .fo_truncate = kqueue_truncate,
  121         .fo_ioctl = kqueue_ioctl,
  122         .fo_poll = kqueue_poll,
  123         .fo_kqfilter = kqueue_kqfilter,
  124         .fo_stat = kqueue_stat,
  125         .fo_close = kqueue_close,
  126         .fo_chmod = invfo_chmod,
  127         .fo_chown = invfo_chown,
  128 };
  129 
  130 static int      knote_attach(struct knote *kn, struct kqueue *kq);
  131 static void     knote_drop(struct knote *kn, struct thread *td);
  132 static void     knote_enqueue(struct knote *kn);
  133 static void     knote_dequeue(struct knote *kn);
  134 static void     knote_init(void);
  135 static struct   knote *knote_alloc(int waitok);
  136 static void     knote_free(struct knote *kn);
  137 
  138 static void     filt_kqdetach(struct knote *kn);
  139 static int      filt_kqueue(struct knote *kn, long hint);
  140 static int      filt_procattach(struct knote *kn);
  141 static void     filt_procdetach(struct knote *kn);
  142 static int      filt_proc(struct knote *kn, long hint);
  143 static int      filt_fileattach(struct knote *kn);
  144 static void     filt_timerexpire(void *knx);
  145 static int      filt_timerattach(struct knote *kn);
  146 static void     filt_timerdetach(struct knote *kn);
  147 static int      filt_timer(struct knote *kn, long hint);
  148 static int      filt_userattach(struct knote *kn);
  149 static void     filt_userdetach(struct knote *kn);
  150 static int      filt_user(struct knote *kn, long hint);
  151 static void     filt_usertouch(struct knote *kn, struct kevent *kev,
  152                     u_long type);
  153 
  154 static struct filterops file_filtops = {
  155         .f_isfd = 1,
  156         .f_attach = filt_fileattach,
  157 };
  158 static struct filterops kqread_filtops = {
  159         .f_isfd = 1,
  160         .f_detach = filt_kqdetach,
  161         .f_event = filt_kqueue,
  162 };
  163 /* XXX - move to kern_proc.c?  */
  164 static struct filterops proc_filtops = {
  165         .f_isfd = 0,
  166         .f_attach = filt_procattach,
  167         .f_detach = filt_procdetach,
  168         .f_event = filt_proc,
  169 };
  170 static struct filterops timer_filtops = {
  171         .f_isfd = 0,
  172         .f_attach = filt_timerattach,
  173         .f_detach = filt_timerdetach,
  174         .f_event = filt_timer,
  175 };
  176 static struct filterops user_filtops = {
  177         .f_attach = filt_userattach,
  178         .f_detach = filt_userdetach,
  179         .f_event = filt_user,
  180         .f_touch = filt_usertouch,
  181 };
  182 
  183 static uma_zone_t       knote_zone;
  184 static int              kq_ncallouts = 0;
  185 static int              kq_calloutmax = (4 * 1024);
  186 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
  187     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
  188 
  189 /* XXX - ensure not KN_INFLUX?? */
  190 #define KNOTE_ACTIVATE(kn, islock) do {                                 \
  191         if ((islock))                                                   \
  192                 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);            \
  193         else                                                            \
  194                 KQ_LOCK((kn)->kn_kq);                                   \
  195         (kn)->kn_status |= KN_ACTIVE;                                   \
  196         if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)         \
  197                 knote_enqueue((kn));                                    \
  198         if (!(islock))                                                  \
  199                 KQ_UNLOCK((kn)->kn_kq);                                 \
  200 } while(0)
  201 #define KQ_LOCK(kq) do {                                                \
  202         mtx_lock(&(kq)->kq_lock);                                       \
  203 } while (0)
  204 #define KQ_FLUX_WAKEUP(kq) do {                                         \
  205         if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {            \
  206                 (kq)->kq_state &= ~KQ_FLUXWAIT;                         \
  207                 wakeup((kq));                                           \
  208         }                                                               \
  209 } while (0)
  210 #define KQ_UNLOCK_FLUX(kq) do {                                         \
  211         KQ_FLUX_WAKEUP(kq);                                             \
  212         mtx_unlock(&(kq)->kq_lock);                                     \
  213 } while (0)
  214 #define KQ_UNLOCK(kq) do {                                              \
  215         mtx_unlock(&(kq)->kq_lock);                                     \
  216 } while (0)
  217 #define KQ_OWNED(kq) do {                                               \
  218         mtx_assert(&(kq)->kq_lock, MA_OWNED);                           \
  219 } while (0)
  220 #define KQ_NOTOWNED(kq) do {                                            \
  221         mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);                        \
  222 } while (0)
  223 #define KN_LIST_LOCK(kn) do {                                           \
  224         if (kn->kn_knlist != NULL)                                      \
  225                 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg);      \
  226 } while (0)
  227 #define KN_LIST_UNLOCK(kn) do {                                         \
  228         if (kn->kn_knlist != NULL)                                      \
  229                 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg);    \
  230 } while (0)
  231 #define KNL_ASSERT_LOCK(knl, islocked) do {                             \
  232         if (islocked)                                                   \
  233                 KNL_ASSERT_LOCKED(knl);                         \
  234         else                                                            \
  235                 KNL_ASSERT_UNLOCKED(knl);                               \
  236 } while (0)
  237 #ifdef INVARIANTS
  238 #define KNL_ASSERT_LOCKED(knl) do {                                     \
  239         knl->kl_assert_locked((knl)->kl_lockarg);                       \
  240 } while (0)
  241 #define KNL_ASSERT_UNLOCKED(knl) do {                                   \
  242         knl->kl_assert_unlocked((knl)->kl_lockarg);                     \
  243 } while (0)
  244 #else /* !INVARIANTS */
  245 #define KNL_ASSERT_LOCKED(knl) do {} while(0)
  246 #define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
  247 #endif /* INVARIANTS */
  248 
  249 #define KN_HASHSIZE             64              /* XXX should be tunable */
  250 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  251 
  252 static int
  253 filt_nullattach(struct knote *kn)
  254 {
  255 
  256         return (ENXIO);
  257 };
  258 
  259 struct filterops null_filtops = {
  260         .f_isfd = 0,
  261         .f_attach = filt_nullattach,
  262 };
  263 
  264 /* XXX - make SYSINIT to add these, and move into respective modules. */
  265 extern struct filterops sig_filtops;
  266 extern struct filterops fs_filtops;
  267 
  268 /*
  269  * Table for for all system-defined filters.
  270  */
  271 static struct mtx       filterops_lock;
  272 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
  273         MTX_DEF);
  274 static struct {
  275         struct filterops *for_fop;
  276         int for_refcnt;
  277 } sysfilt_ops[EVFILT_SYSCOUNT] = {
  278         { &file_filtops },                      /* EVFILT_READ */
  279         { &file_filtops },                      /* EVFILT_WRITE */
  280         { &null_filtops },                      /* EVFILT_AIO */
  281         { &file_filtops },                      /* EVFILT_VNODE */
  282         { &proc_filtops },                      /* EVFILT_PROC */
  283         { &sig_filtops },                       /* EVFILT_SIGNAL */
  284         { &timer_filtops },                     /* EVFILT_TIMER */
  285         { &null_filtops },                      /* former EVFILT_NETDEV */
  286         { &fs_filtops },                        /* EVFILT_FS */
  287         { &null_filtops },                      /* EVFILT_LIO */
  288         { &user_filtops },                      /* EVFILT_USER */
  289 };
  290 
  291 /*
  292  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
  293  * method.
  294  */
  295 static int
  296 filt_fileattach(struct knote *kn)
  297 {
  298 
  299         return (fo_kqfilter(kn->kn_fp, kn));
  300 }
  301 
  302 /*ARGSUSED*/
  303 static int
  304 kqueue_kqfilter(struct file *fp, struct knote *kn)
  305 {
  306         struct kqueue *kq = kn->kn_fp->f_data;
  307 
  308         if (kn->kn_filter != EVFILT_READ)
  309                 return (EINVAL);
  310 
  311         kn->kn_status |= KN_KQUEUE;
  312         kn->kn_fop = &kqread_filtops;
  313         knlist_add(&kq->kq_sel.si_note, kn, 0);
  314 
  315         return (0);
  316 }
  317 
  318 static void
  319 filt_kqdetach(struct knote *kn)
  320 {
  321         struct kqueue *kq = kn->kn_fp->f_data;
  322 
  323         knlist_remove(&kq->kq_sel.si_note, kn, 0);
  324 }
  325 
  326 /*ARGSUSED*/
  327 static int
  328 filt_kqueue(struct knote *kn, long hint)
  329 {
  330         struct kqueue *kq = kn->kn_fp->f_data;
  331 
  332         kn->kn_data = kq->kq_count;
  333         return (kn->kn_data > 0);
  334 }
  335 
  336 /* XXX - move to kern_proc.c?  */
  337 static int
  338 filt_procattach(struct knote *kn)
  339 {
  340         struct proc *p;
  341         int immediate;
  342         int error;
  343 
  344         immediate = 0;
  345         p = pfind(kn->kn_id);
  346         if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
  347                 p = zpfind(kn->kn_id);
  348                 immediate = 1;
  349         } else if (p != NULL && (p->p_flag & P_WEXIT)) {
  350                 immediate = 1;
  351         }
  352 
  353         if (p == NULL)
  354                 return (ESRCH);
  355         if ((error = p_cansee(curthread, p))) {
  356                 PROC_UNLOCK(p);
  357                 return (error);
  358         }
  359 
  360         kn->kn_ptr.p_proc = p;
  361         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  362 
  363         /*
  364          * internal flag indicating registration done by kernel
  365          */
  366         if (kn->kn_flags & EV_FLAG1) {
  367                 kn->kn_data = kn->kn_sdata;             /* ppid */
  368                 kn->kn_fflags = NOTE_CHILD;
  369                 kn->kn_flags &= ~EV_FLAG1;
  370         }
  371 
  372         if (immediate == 0)
  373                 knlist_add(&p->p_klist, kn, 1);
  374 
  375         /*
  376          * Immediately activate any exit notes if the target process is a
  377          * zombie.  This is necessary to handle the case where the target
  378          * process, e.g. a child, dies before the kevent is registered.
  379          */
  380         if (immediate && filt_proc(kn, NOTE_EXIT))
  381                 KNOTE_ACTIVATE(kn, 0);
  382 
  383         PROC_UNLOCK(p);
  384 
  385         return (0);
  386 }
  387 
  388 /*
  389  * The knote may be attached to a different process, which may exit,
  390  * leaving nothing for the knote to be attached to.  So when the process
  391  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  392  * it will be deleted when read out.  However, as part of the knote deletion,
  393  * this routine is called, so a check is needed to avoid actually performing
  394  * a detach, because the original process does not exist any more.
  395  */
  396 /* XXX - move to kern_proc.c?  */
  397 static void
  398 filt_procdetach(struct knote *kn)
  399 {
  400         struct proc *p;
  401 
  402         p = kn->kn_ptr.p_proc;
  403         knlist_remove(&p->p_klist, kn, 0);
  404         kn->kn_ptr.p_proc = NULL;
  405 }
  406 
  407 /* XXX - move to kern_proc.c?  */
  408 static int
  409 filt_proc(struct knote *kn, long hint)
  410 {
  411         struct proc *p = kn->kn_ptr.p_proc;
  412         u_int event;
  413 
  414         /*
  415          * mask off extra data
  416          */
  417         event = (u_int)hint & NOTE_PCTRLMASK;
  418 
  419         /*
  420          * if the user is interested in this event, record it.
  421          */
  422         if (kn->kn_sfflags & event)
  423                 kn->kn_fflags |= event;
  424 
  425         /*
  426          * process is gone, so flag the event as finished.
  427          */
  428         if (event == NOTE_EXIT) {
  429                 if (!(kn->kn_status & KN_DETACHED))
  430                         knlist_remove_inevent(&p->p_klist, kn);
  431                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
  432                 kn->kn_data = p->p_xstat;
  433                 kn->kn_ptr.p_proc = NULL;
  434                 return (1);
  435         }
  436 
  437         return (kn->kn_fflags != 0);
  438 }
  439 
  440 /*
  441  * Called when the process forked. It mostly does the same as the
  442  * knote(), activating all knotes registered to be activated when the
  443  * process forked. Additionally, for each knote attached to the
  444  * parent, check whether user wants to track the new process. If so
  445  * attach a new knote to it, and immediately report an event with the
  446  * child's pid.
  447  */
  448 void
  449 knote_fork(struct knlist *list, int pid)
  450 {
  451         struct kqueue *kq;
  452         struct knote *kn;
  453         struct kevent kev;
  454         int error;
  455 
  456         if (list == NULL)
  457                 return;
  458         list->kl_lock(list->kl_lockarg);
  459 
  460         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
  461                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX)
  462                         continue;
  463                 kq = kn->kn_kq;
  464                 KQ_LOCK(kq);
  465                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
  466                         KQ_UNLOCK(kq);
  467                         continue;
  468                 }
  469 
  470                 /*
  471                  * The same as knote(), activate the event.
  472                  */
  473                 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
  474                         kn->kn_status |= KN_HASKQLOCK;
  475                         if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
  476                                 KNOTE_ACTIVATE(kn, 1);
  477                         kn->kn_status &= ~KN_HASKQLOCK;
  478                         KQ_UNLOCK(kq);
  479                         continue;
  480                 }
  481 
  482                 /*
  483                  * The NOTE_TRACK case. In addition to the activation
  484                  * of the event, we need to register new event to
  485                  * track the child. Drop the locks in preparation for
  486                  * the call to kqueue_register().
  487                  */
  488                 kn->kn_status |= KN_INFLUX;
  489                 KQ_UNLOCK(kq);
  490                 list->kl_unlock(list->kl_lockarg);
  491 
  492                 /*
  493                  * Activate existing knote and register a knote with
  494                  * new process.
  495                  */
  496                 kev.ident = pid;
  497                 kev.filter = kn->kn_filter;
  498                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  499                 kev.fflags = kn->kn_sfflags;
  500                 kev.data = kn->kn_id;           /* parent */
  501                 kev.udata = kn->kn_kevent.udata;/* preserve udata */
  502                 error = kqueue_register(kq, &kev, NULL, 0);
  503                 if (kn->kn_fop->f_event(kn, NOTE_FORK | pid))
  504                         KNOTE_ACTIVATE(kn, 0);
  505                 if (error)
  506                         kn->kn_fflags |= NOTE_TRACKERR;
  507                 KQ_LOCK(kq);
  508                 kn->kn_status &= ~KN_INFLUX;
  509                 KQ_UNLOCK_FLUX(kq);
  510                 list->kl_lock(list->kl_lockarg);
  511         }
  512         list->kl_unlock(list->kl_lockarg);
  513 }
  514 
  515 /*
  516  * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
  517  * interval timer support code.
  518  */
  519 static int
  520 timertoticks(intptr_t data)
  521 {
  522         struct timeval tv;
  523         int tticks;
  524 
  525         tv.tv_sec = data / 1000;
  526         tv.tv_usec = (data % 1000) * 1000;
  527         tticks = tvtohz(&tv);
  528 
  529         return tticks;
  530 }
  531 
  532 static void
  533 filt_timerexpire(void *knx)
  534 {
  535         struct knote *kn = knx;
  536         struct callout *calloutp;
  537 
  538         kn->kn_data++;
  539         KNOTE_ACTIVATE(kn, 0);  /* XXX - handle locking */
  540 
  541         /*
  542          * timertoticks() uses tvtohz() which always adds 1 to allow
  543          * for the time until the next clock interrupt being strictly
  544          * less than 1 clock tick.  We don't want that here since we
  545          * want to appear to be in sync with the clock interrupt even
  546          * when we're delayed.
  547          */
  548         if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
  549                 calloutp = (struct callout *)kn->kn_hook;
  550                 callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata) - 1,
  551                     filt_timerexpire, kn);
  552         }
  553 }
  554 
  555 /*
  556  * data contains amount of time to sleep, in milliseconds
  557  */
  558 static int
  559 filt_timerattach(struct knote *kn)
  560 {
  561         struct callout *calloutp;
  562 
  563         atomic_add_int(&kq_ncallouts, 1);
  564 
  565         if (kq_ncallouts >= kq_calloutmax) {
  566                 atomic_add_int(&kq_ncallouts, -1);
  567                 return (ENOMEM);
  568         }
  569 
  570         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  571         kn->kn_status &= ~KN_DETACHED;          /* knlist_add usually sets it */
  572         calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
  573         callout_init(calloutp, CALLOUT_MPSAFE);
  574         kn->kn_hook = calloutp;
  575         callout_reset_curcpu(calloutp, timertoticks(kn->kn_sdata),
  576             filt_timerexpire, kn);
  577 
  578         return (0);
  579 }
  580 
  581 static void
  582 filt_timerdetach(struct knote *kn)
  583 {
  584         struct callout *calloutp;
  585 
  586         calloutp = (struct callout *)kn->kn_hook;
  587         callout_drain(calloutp);
  588         free(calloutp, M_KQUEUE);
  589         atomic_add_int(&kq_ncallouts, -1);
  590         kn->kn_status |= KN_DETACHED;   /* knlist_remove usually clears it */
  591 }
  592 
  593 static int
  594 filt_timer(struct knote *kn, long hint)
  595 {
  596 
  597         return (kn->kn_data != 0);
  598 }
  599 
  600 static int
  601 filt_userattach(struct knote *kn)
  602 {
  603 
  604         /* 
  605          * EVFILT_USER knotes are not attached to anything in the kernel.
  606          */ 
  607         kn->kn_hook = NULL;
  608         if (kn->kn_fflags & NOTE_TRIGGER)
  609                 kn->kn_hookid = 1;
  610         else
  611                 kn->kn_hookid = 0;
  612         return (0);
  613 }
  614 
  615 static void
  616 filt_userdetach(__unused struct knote *kn)
  617 {
  618 
  619         /*
  620          * EVFILT_USER knotes are not attached to anything in the kernel.
  621          */
  622 }
  623 
  624 static int
  625 filt_user(struct knote *kn, __unused long hint)
  626 {
  627 
  628         return (kn->kn_hookid);
  629 }
  630 
  631 static void
  632 filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
  633 {
  634         u_int ffctrl;
  635 
  636         switch (type) {
  637         case EVENT_REGISTER:
  638                 if (kev->fflags & NOTE_TRIGGER)
  639                         kn->kn_hookid = 1;
  640 
  641                 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
  642                 kev->fflags &= NOTE_FFLAGSMASK;
  643                 switch (ffctrl) {
  644                 case NOTE_FFNOP:
  645                         break;
  646 
  647                 case NOTE_FFAND:
  648                         kn->kn_sfflags &= kev->fflags;
  649                         break;
  650 
  651                 case NOTE_FFOR:
  652                         kn->kn_sfflags |= kev->fflags;
  653                         break;
  654 
  655                 case NOTE_FFCOPY:
  656                         kn->kn_sfflags = kev->fflags;
  657                         break;
  658 
  659                 default:
  660                         /* XXX Return error? */
  661                         break;
  662                 }
  663                 kn->kn_sdata = kev->data;
  664                 if (kev->flags & EV_CLEAR) {
  665                         kn->kn_hookid = 0;
  666                         kn->kn_data = 0;
  667                         kn->kn_fflags = 0;
  668                 }
  669                 break;
  670 
  671         case EVENT_PROCESS:
  672                 *kev = kn->kn_kevent;
  673                 kev->fflags = kn->kn_sfflags;
  674                 kev->data = kn->kn_sdata;
  675                 if (kn->kn_flags & EV_CLEAR) {
  676                         kn->kn_hookid = 0;
  677                         kn->kn_data = 0;
  678                         kn->kn_fflags = 0;
  679                 }
  680                 break;
  681 
  682         default:
  683                 panic("filt_usertouch() - invalid type (%ld)", type);
  684                 break;
  685         }
  686 }
  687 
  688 int
  689 sys_kqueue(struct thread *td, struct kqueue_args *uap)
  690 {
  691         struct filedesc *fdp;
  692         struct kqueue *kq;
  693         struct file *fp;
  694         int fd, error;
  695 
  696         fdp = td->td_proc->p_fd;
  697         error = falloc(td, &fp, &fd, 0);
  698         if (error)
  699                 goto done2;
  700 
  701         /* An extra reference on `nfp' has been held for us by falloc(). */
  702         kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
  703         mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
  704         TAILQ_INIT(&kq->kq_head);
  705         kq->kq_fdp = fdp;
  706         knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
  707         TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
  708 
  709         FILEDESC_XLOCK(fdp);
  710         SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
  711         FILEDESC_XUNLOCK(fdp);
  712 
  713         finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
  714         fdrop(fp, td);
  715 
  716         td->td_retval[0] = fd;
  717 done2:
  718         return (error);
  719 }
  720 
  721 #ifndef _SYS_SYSPROTO_H_
  722 struct kevent_args {
  723         int     fd;
  724         const struct kevent *changelist;
  725         int     nchanges;
  726         struct  kevent *eventlist;
  727         int     nevents;
  728         const struct timespec *timeout;
  729 };
  730 #endif
  731 int
  732 sys_kevent(struct thread *td, struct kevent_args *uap)
  733 {
  734         struct timespec ts, *tsp;
  735         struct kevent_copyops k_ops = { uap,
  736                                         kevent_copyout,
  737                                         kevent_copyin};
  738         int error;
  739 #ifdef KTRACE
  740         struct uio ktruio;
  741         struct iovec ktriov;
  742         struct uio *ktruioin = NULL;
  743         struct uio *ktruioout = NULL;
  744 #endif
  745 
  746         if (uap->timeout != NULL) {
  747                 error = copyin(uap->timeout, &ts, sizeof(ts));
  748                 if (error)
  749                         return (error);
  750                 tsp = &ts;
  751         } else
  752                 tsp = NULL;
  753 
  754 #ifdef KTRACE
  755         if (KTRPOINT(td, KTR_GENIO)) {
  756                 ktriov.iov_base = uap->changelist;
  757                 ktriov.iov_len = uap->nchanges * sizeof(struct kevent);
  758                 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1,
  759                     .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ,
  760                     .uio_td = td };
  761                 ktruioin = cloneuio(&ktruio);
  762                 ktriov.iov_base = uap->eventlist;
  763                 ktriov.iov_len = uap->nevents * sizeof(struct kevent);
  764                 ktruioout = cloneuio(&ktruio);
  765         }
  766 #endif
  767 
  768         error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
  769             &k_ops, tsp);
  770 
  771 #ifdef KTRACE
  772         if (ktruioin != NULL) {
  773                 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent);
  774                 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0);
  775                 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent);
  776                 ktrgenio(uap->fd, UIO_READ, ktruioout, error);
  777         }
  778 #endif
  779 
  780         return (error);
  781 }
  782 
  783 /*
  784  * Copy 'count' items into the destination list pointed to by uap->eventlist.
  785  */
  786 static int
  787 kevent_copyout(void *arg, struct kevent *kevp, int count)
  788 {
  789         struct kevent_args *uap;
  790         int error;
  791 
  792         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  793         uap = (struct kevent_args *)arg;
  794 
  795         error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
  796         if (error == 0)
  797                 uap->eventlist += count;
  798         return (error);
  799 }
  800 
  801 /*
  802  * Copy 'count' items from the list pointed to by uap->changelist.
  803  */
  804 static int
  805 kevent_copyin(void *arg, struct kevent *kevp, int count)
  806 {
  807         struct kevent_args *uap;
  808         int error;
  809 
  810         KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
  811         uap = (struct kevent_args *)arg;
  812 
  813         error = copyin(uap->changelist, kevp, count * sizeof *kevp);
  814         if (error == 0)
  815                 uap->changelist += count;
  816         return (error);
  817 }
  818 
  819 int
  820 kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
  821     struct kevent_copyops *k_ops, const struct timespec *timeout)
  822 {
  823         struct kevent keva[KQ_NEVENTS];
  824         struct kevent *kevp, *changes;
  825         struct kqueue *kq;
  826         struct file *fp;
  827         int i, n, nerrors, error;
  828 
  829         if ((error = fget(td, fd, CAP_POST_EVENT, &fp)) != 0)
  830                 return (error);
  831         if ((error = kqueue_acquire(fp, &kq)) != 0)
  832                 goto done_norel;
  833 
  834         nerrors = 0;
  835 
  836         while (nchanges > 0) {
  837                 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
  838                 error = k_ops->k_copyin(k_ops->arg, keva, n);
  839                 if (error)
  840                         goto done;
  841                 changes = keva;
  842                 for (i = 0; i < n; i++) {
  843                         kevp = &changes[i];
  844                         if (!kevp->filter)
  845                                 continue;
  846                         kevp->flags &= ~EV_SYSFLAGS;
  847                         error = kqueue_register(kq, kevp, td, 1);
  848                         if (error || (kevp->flags & EV_RECEIPT)) {
  849                                 if (nevents != 0) {
  850                                         kevp->flags = EV_ERROR;
  851                                         kevp->data = error;
  852                                         (void) k_ops->k_copyout(k_ops->arg,
  853                                             kevp, 1);
  854                                         nevents--;
  855                                         nerrors++;
  856                                 } else {
  857                                         goto done;
  858                                 }
  859                         }
  860                 }
  861                 nchanges -= n;
  862         }
  863         if (nerrors) {
  864                 td->td_retval[0] = nerrors;
  865                 error = 0;
  866                 goto done;
  867         }
  868 
  869         error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
  870 done:
  871         kqueue_release(kq, 0);
  872 done_norel:
  873         fdrop(fp, td);
  874         return (error);
  875 }
  876 
  877 int
  878 kqueue_add_filteropts(int filt, struct filterops *filtops)
  879 {
  880         int error;
  881 
  882         error = 0;
  883         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
  884                 printf(
  885 "trying to add a filterop that is out of range: %d is beyond %d\n",
  886                     ~filt, EVFILT_SYSCOUNT);
  887                 return EINVAL;
  888         }
  889         mtx_lock(&filterops_lock);
  890         if (sysfilt_ops[~filt].for_fop != &null_filtops &&
  891             sysfilt_ops[~filt].for_fop != NULL)
  892                 error = EEXIST;
  893         else {
  894                 sysfilt_ops[~filt].for_fop = filtops;
  895                 sysfilt_ops[~filt].for_refcnt = 0;
  896         }
  897         mtx_unlock(&filterops_lock);
  898 
  899         return (error);
  900 }
  901 
  902 int
  903 kqueue_del_filteropts(int filt)
  904 {
  905         int error;
  906 
  907         error = 0;
  908         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  909                 return EINVAL;
  910 
  911         mtx_lock(&filterops_lock);
  912         if (sysfilt_ops[~filt].for_fop == &null_filtops ||
  913             sysfilt_ops[~filt].for_fop == NULL)
  914                 error = EINVAL;
  915         else if (sysfilt_ops[~filt].for_refcnt != 0)
  916                 error = EBUSY;
  917         else {
  918                 sysfilt_ops[~filt].for_fop = &null_filtops;
  919                 sysfilt_ops[~filt].for_refcnt = 0;
  920         }
  921         mtx_unlock(&filterops_lock);
  922 
  923         return error;
  924 }
  925 
  926 static struct filterops *
  927 kqueue_fo_find(int filt)
  928 {
  929 
  930         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  931                 return NULL;
  932 
  933         mtx_lock(&filterops_lock);
  934         sysfilt_ops[~filt].for_refcnt++;
  935         if (sysfilt_ops[~filt].for_fop == NULL)
  936                 sysfilt_ops[~filt].for_fop = &null_filtops;
  937         mtx_unlock(&filterops_lock);
  938 
  939         return sysfilt_ops[~filt].for_fop;
  940 }
  941 
  942 static void
  943 kqueue_fo_release(int filt)
  944 {
  945 
  946         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  947                 return;
  948 
  949         mtx_lock(&filterops_lock);
  950         KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
  951             ("filter object refcount not valid on release"));
  952         sysfilt_ops[~filt].for_refcnt--;
  953         mtx_unlock(&filterops_lock);
  954 }
  955 
  956 /*
  957  * A ref to kq (obtained via kqueue_acquire) must be held.  waitok will
  958  * influence if memory allocation should wait.  Make sure it is 0 if you
  959  * hold any mutexes.
  960  */
  961 static int
  962 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
  963 {
  964         struct filterops *fops;
  965         struct file *fp;
  966         struct knote *kn, *tkn;
  967         int error, filt, event;
  968         int haskqglobal;
  969 
  970         fp = NULL;
  971         kn = NULL;
  972         error = 0;
  973         haskqglobal = 0;
  974 
  975         filt = kev->filter;
  976         fops = kqueue_fo_find(filt);
  977         if (fops == NULL)
  978                 return EINVAL;
  979 
  980         tkn = knote_alloc(waitok);              /* prevent waiting with locks */
  981 
  982 findkn:
  983         if (fops->f_isfd) {
  984                 KASSERT(td != NULL, ("td is NULL"));
  985                 error = fget(td, kev->ident, CAP_POLL_EVENT, &fp);
  986                 if (error)
  987                         goto done;
  988 
  989                 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
  990                     kev->ident, 0) != 0) {
  991                         /* try again */
  992                         fdrop(fp, td);
  993                         fp = NULL;
  994                         error = kqueue_expand(kq, fops, kev->ident, waitok);
  995                         if (error)
  996                                 goto done;
  997                         goto findkn;
  998                 }
  999 
 1000                 if (fp->f_type == DTYPE_KQUEUE) {
 1001                         /*
 1002                          * if we add some inteligence about what we are doing,
 1003                          * we should be able to support events on ourselves.
 1004                          * We need to know when we are doing this to prevent
 1005                          * getting both the knlist lock and the kq lock since
 1006                          * they are the same thing.
 1007                          */
 1008                         if (fp->f_data == kq) {
 1009                                 error = EINVAL;
 1010                                 goto done;
 1011                         }
 1012 
 1013                         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1014                 }
 1015 
 1016                 KQ_LOCK(kq);
 1017                 if (kev->ident < kq->kq_knlistsize) {
 1018                         SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
 1019                                 if (kev->filter == kn->kn_filter)
 1020                                         break;
 1021                 }
 1022         } else {
 1023                 if ((kev->flags & EV_ADD) == EV_ADD)
 1024                         kqueue_expand(kq, fops, kev->ident, waitok);
 1025 
 1026                 KQ_LOCK(kq);
 1027                 if (kq->kq_knhashmask != 0) {
 1028                         struct klist *list;
 1029 
 1030                         list = &kq->kq_knhash[
 1031                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
 1032                         SLIST_FOREACH(kn, list, kn_link)
 1033                                 if (kev->ident == kn->kn_id &&
 1034                                     kev->filter == kn->kn_filter)
 1035                                         break;
 1036                 }
 1037         }
 1038 
 1039         /* knote is in the process of changing, wait for it to stablize. */
 1040         if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1041                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1042                 kq->kq_state |= KQ_FLUXWAIT;
 1043                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
 1044                 if (fp != NULL) {
 1045                         fdrop(fp, td);
 1046                         fp = NULL;
 1047                 }
 1048                 goto findkn;
 1049         }
 1050 
 1051         /*
 1052          * kn now contains the matching knote, or NULL if no match
 1053          */
 1054         if (kn == NULL) {
 1055                 if (kev->flags & EV_ADD) {
 1056                         kn = tkn;
 1057                         tkn = NULL;
 1058                         if (kn == NULL) {
 1059                                 KQ_UNLOCK(kq);
 1060                                 error = ENOMEM;
 1061                                 goto done;
 1062                         }
 1063                         kn->kn_fp = fp;
 1064                         kn->kn_kq = kq;
 1065                         kn->kn_fop = fops;
 1066                         /*
 1067                          * apply reference counts to knote structure, and
 1068                          * do not release it at the end of this routine.
 1069                          */
 1070                         fops = NULL;
 1071                         fp = NULL;
 1072 
 1073                         kn->kn_sfflags = kev->fflags;
 1074                         kn->kn_sdata = kev->data;
 1075                         kev->fflags = 0;
 1076                         kev->data = 0;
 1077                         kn->kn_kevent = *kev;
 1078                         kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
 1079                             EV_ENABLE | EV_DISABLE);
 1080                         kn->kn_status = KN_INFLUX|KN_DETACHED;
 1081 
 1082                         error = knote_attach(kn, kq);
 1083                         KQ_UNLOCK(kq);
 1084                         if (error != 0) {
 1085                                 tkn = kn;
 1086                                 goto done;
 1087                         }
 1088 
 1089                         if ((error = kn->kn_fop->f_attach(kn)) != 0) {
 1090                                 knote_drop(kn, td);
 1091                                 goto done;
 1092                         }
 1093                         KN_LIST_LOCK(kn);
 1094                         goto done_ev_add;
 1095                 } else {
 1096                         /* No matching knote and the EV_ADD flag is not set. */
 1097                         KQ_UNLOCK(kq);
 1098                         error = ENOENT;
 1099                         goto done;
 1100                 }
 1101         }
 1102         
 1103         if (kev->flags & EV_DELETE) {
 1104                 kn->kn_status |= KN_INFLUX;
 1105                 KQ_UNLOCK(kq);
 1106                 if (!(kn->kn_status & KN_DETACHED))
 1107                         kn->kn_fop->f_detach(kn);
 1108                 knote_drop(kn, td);
 1109                 goto done;
 1110         }
 1111 
 1112         /*
 1113          * The user may change some filter values after the initial EV_ADD,
 1114          * but doing so will not reset any filter which has already been
 1115          * triggered.
 1116          */
 1117         kn->kn_status |= KN_INFLUX;
 1118         KQ_UNLOCK(kq);
 1119         KN_LIST_LOCK(kn);
 1120         kn->kn_kevent.udata = kev->udata;
 1121         if (!fops->f_isfd && fops->f_touch != NULL) {
 1122                 fops->f_touch(kn, kev, EVENT_REGISTER);
 1123         } else {
 1124                 kn->kn_sfflags = kev->fflags;
 1125                 kn->kn_sdata = kev->data;
 1126         }
 1127 
 1128         /*
 1129          * We can get here with kn->kn_knlist == NULL.  This can happen when
 1130          * the initial attach event decides that the event is "completed" 
 1131          * already.  i.e. filt_procattach is called on a zombie process.  It
 1132          * will call filt_proc which will remove it from the list, and NULL
 1133          * kn_knlist.
 1134          */
 1135 done_ev_add:
 1136         event = kn->kn_fop->f_event(kn, 0);
 1137         KQ_LOCK(kq);
 1138         if (event)
 1139                 KNOTE_ACTIVATE(kn, 1);
 1140         kn->kn_status &= ~KN_INFLUX;
 1141         KN_LIST_UNLOCK(kn);
 1142 
 1143         if ((kev->flags & EV_DISABLE) &&
 1144             ((kn->kn_status & KN_DISABLED) == 0)) {
 1145                 kn->kn_status |= KN_DISABLED;
 1146         }
 1147 
 1148         if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
 1149                 kn->kn_status &= ~KN_DISABLED;
 1150                 if ((kn->kn_status & KN_ACTIVE) &&
 1151                     ((kn->kn_status & KN_QUEUED) == 0))
 1152                         knote_enqueue(kn);
 1153         }
 1154         KQ_UNLOCK_FLUX(kq);
 1155 
 1156 done:
 1157         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1158         if (fp != NULL)
 1159                 fdrop(fp, td);
 1160         if (tkn != NULL)
 1161                 knote_free(tkn);
 1162         if (fops != NULL)
 1163                 kqueue_fo_release(filt);
 1164         return (error);
 1165 }
 1166 
 1167 static int
 1168 kqueue_acquire(struct file *fp, struct kqueue **kqp)
 1169 {
 1170         int error;
 1171         struct kqueue *kq;
 1172 
 1173         error = 0;
 1174 
 1175         kq = fp->f_data;
 1176         if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
 1177                 return (EBADF);
 1178         *kqp = kq;
 1179         KQ_LOCK(kq);
 1180         if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
 1181                 KQ_UNLOCK(kq);
 1182                 return (EBADF);
 1183         }
 1184         kq->kq_refcnt++;
 1185         KQ_UNLOCK(kq);
 1186 
 1187         return error;
 1188 }
 1189 
 1190 static void
 1191 kqueue_release(struct kqueue *kq, int locked)
 1192 {
 1193         if (locked)
 1194                 KQ_OWNED(kq);
 1195         else
 1196                 KQ_LOCK(kq);
 1197         kq->kq_refcnt--;
 1198         if (kq->kq_refcnt == 1)
 1199                 wakeup(&kq->kq_refcnt);
 1200         if (!locked)
 1201                 KQ_UNLOCK(kq);
 1202 }
 1203 
 1204 static void
 1205 kqueue_schedtask(struct kqueue *kq)
 1206 {
 1207 
 1208         KQ_OWNED(kq);
 1209         KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
 1210             ("scheduling kqueue task while draining"));
 1211 
 1212         if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
 1213                 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
 1214                 kq->kq_state |= KQ_TASKSCHED;
 1215         }
 1216 }
 1217 
 1218 /*
 1219  * Expand the kq to make sure we have storage for fops/ident pair.
 1220  *
 1221  * Return 0 on success (or no work necessary), return errno on failure.
 1222  *
 1223  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
 1224  * If kqueue_register is called from a non-fd context, there usually/should
 1225  * be no locks held.
 1226  */
 1227 static int
 1228 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
 1229         int waitok)
 1230 {
 1231         struct klist *list, *tmp_knhash, *to_free;
 1232         u_long tmp_knhashmask;
 1233         int size;
 1234         int fd;
 1235         int mflag = waitok ? M_WAITOK : M_NOWAIT;
 1236 
 1237         KQ_NOTOWNED(kq);
 1238 
 1239         to_free = NULL;
 1240         if (fops->f_isfd) {
 1241                 fd = ident;
 1242                 if (kq->kq_knlistsize <= fd) {
 1243                         size = kq->kq_knlistsize;
 1244                         while (size <= fd)
 1245                                 size += KQEXTENT;
 1246                         list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
 1247                         if (list == NULL)
 1248                                 return ENOMEM;
 1249                         KQ_LOCK(kq);
 1250                         if (kq->kq_knlistsize > fd) {
 1251                                 to_free = list;
 1252                                 list = NULL;
 1253                         } else {
 1254                                 if (kq->kq_knlist != NULL) {
 1255                                         bcopy(kq->kq_knlist, list,
 1256                                             kq->kq_knlistsize * sizeof(*list));
 1257                                         to_free = kq->kq_knlist;
 1258                                         kq->kq_knlist = NULL;
 1259                                 }
 1260                                 bzero((caddr_t)list +
 1261                                     kq->kq_knlistsize * sizeof(*list),
 1262                                     (size - kq->kq_knlistsize) * sizeof(*list));
 1263                                 kq->kq_knlistsize = size;
 1264                                 kq->kq_knlist = list;
 1265                         }
 1266                         KQ_UNLOCK(kq);
 1267                 }
 1268         } else {
 1269                 if (kq->kq_knhashmask == 0) {
 1270                         tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
 1271                             &tmp_knhashmask);
 1272                         if (tmp_knhash == NULL)
 1273                                 return ENOMEM;
 1274                         KQ_LOCK(kq);
 1275                         if (kq->kq_knhashmask == 0) {
 1276                                 kq->kq_knhash = tmp_knhash;
 1277                                 kq->kq_knhashmask = tmp_knhashmask;
 1278                         } else {
 1279                                 to_free = tmp_knhash;
 1280                         }
 1281                         KQ_UNLOCK(kq);
 1282                 }
 1283         }
 1284         free(to_free, M_KQUEUE);
 1285 
 1286         KQ_NOTOWNED(kq);
 1287         return 0;
 1288 }
 1289 
 1290 static void
 1291 kqueue_task(void *arg, int pending)
 1292 {
 1293         struct kqueue *kq;
 1294         int haskqglobal;
 1295 
 1296         haskqglobal = 0;
 1297         kq = arg;
 1298 
 1299         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1300         KQ_LOCK(kq);
 1301 
 1302         KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
 1303 
 1304         kq->kq_state &= ~KQ_TASKSCHED;
 1305         if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
 1306                 wakeup(&kq->kq_state);
 1307         }
 1308         KQ_UNLOCK(kq);
 1309         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1310 }
 1311 
 1312 /*
 1313  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
 1314  * We treat KN_MARKER knotes as if they are INFLUX.
 1315  */
 1316 static int
 1317 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
 1318     const struct timespec *tsp, struct kevent *keva, struct thread *td)
 1319 {
 1320         struct kevent *kevp;
 1321         struct timeval atv, rtv, ttv;
 1322         struct knote *kn, *marker;
 1323         int count, timeout, nkev, error, influx;
 1324         int haskqglobal, touch;
 1325 
 1326         count = maxevents;
 1327         nkev = 0;
 1328         error = 0;
 1329         haskqglobal = 0;
 1330 
 1331         if (maxevents == 0)
 1332                 goto done_nl;
 1333 
 1334         if (tsp != NULL) {
 1335                 TIMESPEC_TO_TIMEVAL(&atv, tsp);
 1336                 if (itimerfix(&atv)) {
 1337                         error = EINVAL;
 1338                         goto done_nl;
 1339                 }
 1340                 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
 1341                         timeout = -1;
 1342                 else
 1343                         timeout = atv.tv_sec > 24 * 60 * 60 ?
 1344                             24 * 60 * 60 * hz : tvtohz(&atv);
 1345                 getmicrouptime(&rtv);
 1346                 timevaladd(&atv, &rtv);
 1347         } else {
 1348                 atv.tv_sec = 0;
 1349                 atv.tv_usec = 0;
 1350                 timeout = 0;
 1351         }
 1352         marker = knote_alloc(1);
 1353         if (marker == NULL) {
 1354                 error = ENOMEM;
 1355                 goto done_nl;
 1356         }
 1357         marker->kn_status = KN_MARKER;
 1358         KQ_LOCK(kq);
 1359         goto start;
 1360 
 1361 retry:
 1362         if (atv.tv_sec || atv.tv_usec) {
 1363                 getmicrouptime(&rtv);
 1364                 if (timevalcmp(&rtv, &atv, >=))
 1365                         goto done;
 1366                 ttv = atv;
 1367                 timevalsub(&ttv, &rtv);
 1368                 timeout = ttv.tv_sec > 24 * 60 * 60 ?
 1369                         24 * 60 * 60 * hz : tvtohz(&ttv);
 1370         }
 1371 
 1372 start:
 1373         kevp = keva;
 1374         if (kq->kq_count == 0) {
 1375                 if (timeout < 0) {
 1376                         error = EWOULDBLOCK;
 1377                 } else {
 1378                         kq->kq_state |= KQ_SLEEP;
 1379                         error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
 1380                             "kqread", timeout);
 1381                 }
 1382                 if (error == 0)
 1383                         goto retry;
 1384                 /* don't restart after signals... */
 1385                 if (error == ERESTART)
 1386                         error = EINTR;
 1387                 else if (error == EWOULDBLOCK)
 1388                         error = 0;
 1389                 goto done;
 1390         }
 1391 
 1392         TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
 1393         influx = 0;
 1394         while (count) {
 1395                 KQ_OWNED(kq);
 1396                 kn = TAILQ_FIRST(&kq->kq_head);
 1397 
 1398                 if ((kn->kn_status == KN_MARKER && kn != marker) ||
 1399                     (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1400                         if (influx) {
 1401                                 influx = 0;
 1402                                 KQ_FLUX_WAKEUP(kq);
 1403                         }
 1404                         kq->kq_state |= KQ_FLUXWAIT;
 1405                         error = msleep(kq, &kq->kq_lock, PSOCK,
 1406                             "kqflxwt", 0);
 1407                         continue;
 1408                 }
 1409 
 1410                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1411                 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
 1412                         kn->kn_status &= ~KN_QUEUED;
 1413                         kq->kq_count--;
 1414                         continue;
 1415                 }
 1416                 if (kn == marker) {
 1417                         KQ_FLUX_WAKEUP(kq);
 1418                         if (count == maxevents)
 1419                                 goto retry;
 1420                         goto done;
 1421                 }
 1422                 KASSERT((kn->kn_status & KN_INFLUX) == 0,
 1423                     ("KN_INFLUX set when not suppose to be"));
 1424 
 1425                 if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
 1426                         kn->kn_status &= ~KN_QUEUED;
 1427                         kn->kn_status |= KN_INFLUX;
 1428                         kq->kq_count--;
 1429                         KQ_UNLOCK(kq);
 1430                         /*
 1431                          * We don't need to lock the list since we've marked
 1432                          * it _INFLUX.
 1433                          */
 1434                         *kevp = kn->kn_kevent;
 1435                         if (!(kn->kn_status & KN_DETACHED))
 1436                                 kn->kn_fop->f_detach(kn);
 1437                         knote_drop(kn, td);
 1438                         KQ_LOCK(kq);
 1439                         kn = NULL;
 1440                 } else {
 1441                         kn->kn_status |= KN_INFLUX;
 1442                         KQ_UNLOCK(kq);
 1443                         if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
 1444                                 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1445                         KN_LIST_LOCK(kn);
 1446                         if (kn->kn_fop->f_event(kn, 0) == 0) {
 1447                                 KQ_LOCK(kq);
 1448                                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1449                                 kn->kn_status &=
 1450                                     ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
 1451                                 kq->kq_count--;
 1452                                 KN_LIST_UNLOCK(kn);
 1453                                 influx = 1;
 1454                                 continue;
 1455                         }
 1456                         touch = (!kn->kn_fop->f_isfd &&
 1457                             kn->kn_fop->f_touch != NULL);
 1458                         if (touch)
 1459                                 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
 1460                         else
 1461                                 *kevp = kn->kn_kevent;
 1462                         KQ_LOCK(kq);
 1463                         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1464                         if (kn->kn_flags & (EV_CLEAR |  EV_DISPATCH)) {
 1465                                 /* 
 1466                                  * Manually clear knotes who weren't 
 1467                                  * 'touch'ed.
 1468                                  */
 1469                                 if (touch == 0 && kn->kn_flags & EV_CLEAR) {
 1470                                         kn->kn_data = 0;
 1471                                         kn->kn_fflags = 0;
 1472                                 }
 1473                                 if (kn->kn_flags & EV_DISPATCH)
 1474                                         kn->kn_status |= KN_DISABLED;
 1475                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 1476                                 kq->kq_count--;
 1477                         } else
 1478                                 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1479                         
 1480                         kn->kn_status &= ~(KN_INFLUX);
 1481                         KN_LIST_UNLOCK(kn);
 1482                         influx = 1;
 1483                 }
 1484 
 1485                 /* we are returning a copy to the user */
 1486                 kevp++;
 1487                 nkev++;
 1488                 count--;
 1489 
 1490                 if (nkev == KQ_NEVENTS) {
 1491                         influx = 0;
 1492                         KQ_UNLOCK_FLUX(kq);
 1493                         error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1494                         nkev = 0;
 1495                         kevp = keva;
 1496                         KQ_LOCK(kq);
 1497                         if (error)
 1498                                 break;
 1499                 }
 1500         }
 1501         TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
 1502 done:
 1503         KQ_OWNED(kq);
 1504         KQ_UNLOCK_FLUX(kq);
 1505         knote_free(marker);
 1506 done_nl:
 1507         KQ_NOTOWNED(kq);
 1508         if (nkev != 0)
 1509                 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
 1510         td->td_retval[0] = maxevents - count;
 1511         return (error);
 1512 }
 1513 
 1514 /*
 1515  * XXX
 1516  * This could be expanded to call kqueue_scan, if desired.
 1517  */
 1518 /*ARGSUSED*/
 1519 static int
 1520 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1521         int flags, struct thread *td)
 1522 {
 1523         return (ENXIO);
 1524 }
 1525 
 1526 /*ARGSUSED*/
 1527 static int
 1528 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1529          int flags, struct thread *td)
 1530 {
 1531         return (ENXIO);
 1532 }
 1533 
 1534 /*ARGSUSED*/
 1535 static int
 1536 kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred,
 1537         struct thread *td)
 1538 {
 1539 
 1540         return (EINVAL);
 1541 }
 1542 
 1543 /*ARGSUSED*/
 1544 static int
 1545 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
 1546         struct ucred *active_cred, struct thread *td)
 1547 {
 1548         /*
 1549          * Enabling sigio causes two major problems:
 1550          * 1) infinite recursion:
 1551          * Synopsys: kevent is being used to track signals and have FIOASYNC
 1552          * set.  On receipt of a signal this will cause a kqueue to recurse
 1553          * into itself over and over.  Sending the sigio causes the kqueue
 1554          * to become ready, which in turn posts sigio again, forever.
 1555          * Solution: this can be solved by setting a flag in the kqueue that
 1556          * we have a SIGIO in progress.
 1557          * 2) locking problems:
 1558          * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
 1559          * us above the proc and pgrp locks.
 1560          * Solution: Post a signal using an async mechanism, being sure to
 1561          * record a generation count in the delivery so that we do not deliver
 1562          * a signal to the wrong process.
 1563          *
 1564          * Note, these two mechanisms are somewhat mutually exclusive!
 1565          */
 1566 #if 0
 1567         struct kqueue *kq;
 1568 
 1569         kq = fp->f_data;
 1570         switch (cmd) {
 1571         case FIOASYNC:
 1572                 if (*(int *)data) {
 1573                         kq->kq_state |= KQ_ASYNC;
 1574                 } else {
 1575                         kq->kq_state &= ~KQ_ASYNC;
 1576                 }
 1577                 return (0);
 1578 
 1579         case FIOSETOWN:
 1580                 return (fsetown(*(int *)data, &kq->kq_sigio));
 1581 
 1582         case FIOGETOWN:
 1583                 *(int *)data = fgetown(&kq->kq_sigio);
 1584                 return (0);
 1585         }
 1586 #endif
 1587 
 1588         return (ENOTTY);
 1589 }
 1590 
 1591 /*ARGSUSED*/
 1592 static int
 1593 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
 1594         struct thread *td)
 1595 {
 1596         struct kqueue *kq;
 1597         int revents = 0;
 1598         int error;
 1599 
 1600         if ((error = kqueue_acquire(fp, &kq)))
 1601                 return POLLERR;
 1602 
 1603         KQ_LOCK(kq);
 1604         if (events & (POLLIN | POLLRDNORM)) {
 1605                 if (kq->kq_count) {
 1606                         revents |= events & (POLLIN | POLLRDNORM);
 1607                 } else {
 1608                         selrecord(td, &kq->kq_sel);
 1609                         if (SEL_WAITING(&kq->kq_sel))
 1610                                 kq->kq_state |= KQ_SEL;
 1611                 }
 1612         }
 1613         kqueue_release(kq, 1);
 1614         KQ_UNLOCK(kq);
 1615         return (revents);
 1616 }
 1617 
 1618 /*ARGSUSED*/
 1619 static int
 1620 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
 1621         struct thread *td)
 1622 {
 1623 
 1624         bzero((void *)st, sizeof *st);
 1625         /*
 1626          * We no longer return kq_count because the unlocked value is useless.
 1627          * If you spent all this time getting the count, why not spend your
 1628          * syscall better by calling kevent?
 1629          *
 1630          * XXX - This is needed for libc_r.
 1631          */
 1632         st->st_mode = S_IFIFO;
 1633         return (0);
 1634 }
 1635 
 1636 /*ARGSUSED*/
 1637 static int
 1638 kqueue_close(struct file *fp, struct thread *td)
 1639 {
 1640         struct kqueue *kq = fp->f_data;
 1641         struct filedesc *fdp;
 1642         struct knote *kn;
 1643         int i;
 1644         int error;
 1645 
 1646         if ((error = kqueue_acquire(fp, &kq)))
 1647                 return error;
 1648 
 1649         KQ_LOCK(kq);
 1650 
 1651         KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
 1652             ("kqueue already closing"));
 1653         kq->kq_state |= KQ_CLOSING;
 1654         if (kq->kq_refcnt > 1)
 1655                 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
 1656 
 1657         KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
 1658         fdp = kq->kq_fdp;
 1659 
 1660         KASSERT(knlist_empty(&kq->kq_sel.si_note),
 1661             ("kqueue's knlist not empty"));
 1662 
 1663         for (i = 0; i < kq->kq_knlistsize; i++) {
 1664                 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
 1665                         if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1666                                 kq->kq_state |= KQ_FLUXWAIT;
 1667                                 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
 1668                                 continue;
 1669                         }
 1670                         kn->kn_status |= KN_INFLUX;
 1671                         KQ_UNLOCK(kq);
 1672                         if (!(kn->kn_status & KN_DETACHED))
 1673                                 kn->kn_fop->f_detach(kn);
 1674                         knote_drop(kn, td);
 1675                         KQ_LOCK(kq);
 1676                 }
 1677         }
 1678         if (kq->kq_knhashmask != 0) {
 1679                 for (i = 0; i <= kq->kq_knhashmask; i++) {
 1680                         while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
 1681                                 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1682                                         kq->kq_state |= KQ_FLUXWAIT;
 1683                                         msleep(kq, &kq->kq_lock, PSOCK,
 1684                                                "kqclo2", 0);
 1685                                         continue;
 1686                                 }
 1687                                 kn->kn_status |= KN_INFLUX;
 1688                                 KQ_UNLOCK(kq);
 1689                                 if (!(kn->kn_status & KN_DETACHED))
 1690                                         kn->kn_fop->f_detach(kn);
 1691                                 knote_drop(kn, td);
 1692                                 KQ_LOCK(kq);
 1693                         }
 1694                 }
 1695         }
 1696 
 1697         if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
 1698                 kq->kq_state |= KQ_TASKDRAIN;
 1699                 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
 1700         }
 1701 
 1702         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1703                 selwakeuppri(&kq->kq_sel, PSOCK);
 1704                 if (!SEL_WAITING(&kq->kq_sel))
 1705                         kq->kq_state &= ~KQ_SEL;
 1706         }
 1707 
 1708         KQ_UNLOCK(kq);
 1709 
 1710         FILEDESC_XLOCK(fdp);
 1711         SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
 1712         FILEDESC_XUNLOCK(fdp);
 1713 
 1714         seldrain(&kq->kq_sel);
 1715         knlist_destroy(&kq->kq_sel.si_note);
 1716         mtx_destroy(&kq->kq_lock);
 1717         kq->kq_fdp = NULL;
 1718 
 1719         if (kq->kq_knhash != NULL)
 1720                 free(kq->kq_knhash, M_KQUEUE);
 1721         if (kq->kq_knlist != NULL)
 1722                 free(kq->kq_knlist, M_KQUEUE);
 1723 
 1724         funsetown(&kq->kq_sigio);
 1725         free(kq, M_KQUEUE);
 1726         fp->f_data = NULL;
 1727 
 1728         return (0);
 1729 }
 1730 
 1731 static void
 1732 kqueue_wakeup(struct kqueue *kq)
 1733 {
 1734         KQ_OWNED(kq);
 1735 
 1736         if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
 1737                 kq->kq_state &= ~KQ_SLEEP;
 1738                 wakeup(kq);
 1739         }
 1740         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1741                 selwakeuppri(&kq->kq_sel, PSOCK);
 1742                 if (!SEL_WAITING(&kq->kq_sel))
 1743                         kq->kq_state &= ~KQ_SEL;
 1744         }
 1745         if (!knlist_empty(&kq->kq_sel.si_note))
 1746                 kqueue_schedtask(kq);
 1747         if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
 1748                 pgsigio(&kq->kq_sigio, SIGIO, 0);
 1749         }
 1750 }
 1751 
 1752 /*
 1753  * Walk down a list of knotes, activating them if their event has triggered.
 1754  *
 1755  * There is a possibility to optimize in the case of one kq watching another.
 1756  * Instead of scheduling a task to wake it up, you could pass enough state
 1757  * down the chain to make up the parent kqueue.  Make this code functional
 1758  * first.
 1759  */
 1760 void
 1761 knote(struct knlist *list, long hint, int lockflags)
 1762 {
 1763         struct kqueue *kq;
 1764         struct knote *kn;
 1765         int error;
 1766 
 1767         if (list == NULL)
 1768                 return;
 1769 
 1770         KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
 1771 
 1772         if ((lockflags & KNF_LISTLOCKED) == 0)
 1773                 list->kl_lock(list->kl_lockarg); 
 1774 
 1775         /*
 1776          * If we unlock the list lock (and set KN_INFLUX), we can eliminate
 1777          * the kqueue scheduling, but this will introduce four
 1778          * lock/unlock's for each knote to test.  If we do, continue to use
 1779          * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
 1780          * only safe if you want to remove the current item, which we are
 1781          * not doing.
 1782          */
 1783         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
 1784                 kq = kn->kn_kq;
 1785                 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
 1786                         KQ_LOCK(kq);
 1787                         if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1788                                 KQ_UNLOCK(kq);
 1789                         } else if ((lockflags & KNF_NOKQLOCK) != 0) {
 1790                                 kn->kn_status |= KN_INFLUX;
 1791                                 KQ_UNLOCK(kq);
 1792                                 error = kn->kn_fop->f_event(kn, hint);
 1793                                 KQ_LOCK(kq);
 1794                                 kn->kn_status &= ~KN_INFLUX;
 1795                                 if (error)
 1796                                         KNOTE_ACTIVATE(kn, 1);
 1797                                 KQ_UNLOCK_FLUX(kq);
 1798                         } else {
 1799                                 kn->kn_status |= KN_HASKQLOCK;
 1800                                 if (kn->kn_fop->f_event(kn, hint))
 1801                                         KNOTE_ACTIVATE(kn, 1);
 1802                                 kn->kn_status &= ~KN_HASKQLOCK;
 1803                                 KQ_UNLOCK(kq);
 1804                         }
 1805                 }
 1806                 kq = NULL;
 1807         }
 1808         if ((lockflags & KNF_LISTLOCKED) == 0)
 1809                 list->kl_unlock(list->kl_lockarg); 
 1810 }
 1811 
 1812 /*
 1813  * add a knote to a knlist
 1814  */
 1815 void
 1816 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
 1817 {
 1818         KNL_ASSERT_LOCK(knl, islocked);
 1819         KQ_NOTOWNED(kn->kn_kq);
 1820         KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
 1821             (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
 1822         if (!islocked)
 1823                 knl->kl_lock(knl->kl_lockarg);
 1824         SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
 1825         if (!islocked)
 1826                 knl->kl_unlock(knl->kl_lockarg);
 1827         KQ_LOCK(kn->kn_kq);
 1828         kn->kn_knlist = knl;
 1829         kn->kn_status &= ~KN_DETACHED;
 1830         KQ_UNLOCK(kn->kn_kq);
 1831 }
 1832 
 1833 static void
 1834 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
 1835 {
 1836         KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
 1837         KNL_ASSERT_LOCK(knl, knlislocked);
 1838         mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
 1839         if (!kqislocked)
 1840                 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
 1841     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
 1842         if (!knlislocked)
 1843                 knl->kl_lock(knl->kl_lockarg);
 1844         SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
 1845         kn->kn_knlist = NULL;
 1846         if (!knlislocked)
 1847                 knl->kl_unlock(knl->kl_lockarg);
 1848         if (!kqislocked)
 1849                 KQ_LOCK(kn->kn_kq);
 1850         kn->kn_status |= KN_DETACHED;
 1851         if (!kqislocked)
 1852                 KQ_UNLOCK(kn->kn_kq);
 1853 }
 1854 
 1855 /*
 1856  * remove all knotes from a specified klist
 1857  */
 1858 void
 1859 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
 1860 {
 1861 
 1862         knlist_remove_kq(knl, kn, islocked, 0);
 1863 }
 1864 
 1865 /*
 1866  * remove knote from a specified klist while in f_event handler.
 1867  */
 1868 void
 1869 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
 1870 {
 1871 
 1872         knlist_remove_kq(knl, kn, 1,
 1873             (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
 1874 }
 1875 
 1876 int
 1877 knlist_empty(struct knlist *knl)
 1878 {
 1879         KNL_ASSERT_LOCKED(knl);
 1880         return SLIST_EMPTY(&knl->kl_list);
 1881 }
 1882 
 1883 static struct mtx       knlist_lock;
 1884 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
 1885         MTX_DEF);
 1886 static void knlist_mtx_lock(void *arg);
 1887 static void knlist_mtx_unlock(void *arg);
 1888 
 1889 static void
 1890 knlist_mtx_lock(void *arg)
 1891 {
 1892         mtx_lock((struct mtx *)arg);
 1893 }
 1894 
 1895 static void
 1896 knlist_mtx_unlock(void *arg)
 1897 {
 1898         mtx_unlock((struct mtx *)arg);
 1899 }
 1900 
 1901 static void
 1902 knlist_mtx_assert_locked(void *arg)
 1903 {
 1904         mtx_assert((struct mtx *)arg, MA_OWNED);
 1905 }
 1906 
 1907 static void
 1908 knlist_mtx_assert_unlocked(void *arg)
 1909 {
 1910         mtx_assert((struct mtx *)arg, MA_NOTOWNED);
 1911 }
 1912 
 1913 void
 1914 knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
 1915     void (*kl_unlock)(void *),
 1916     void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *))
 1917 {
 1918 
 1919         if (lock == NULL)
 1920                 knl->kl_lockarg = &knlist_lock;
 1921         else
 1922                 knl->kl_lockarg = lock;
 1923 
 1924         if (kl_lock == NULL)
 1925                 knl->kl_lock = knlist_mtx_lock;
 1926         else
 1927                 knl->kl_lock = kl_lock;
 1928         if (kl_unlock == NULL)
 1929                 knl->kl_unlock = knlist_mtx_unlock;
 1930         else
 1931                 knl->kl_unlock = kl_unlock;
 1932         if (kl_assert_locked == NULL)
 1933                 knl->kl_assert_locked = knlist_mtx_assert_locked;
 1934         else
 1935                 knl->kl_assert_locked = kl_assert_locked;
 1936         if (kl_assert_unlocked == NULL)
 1937                 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked;
 1938         else
 1939                 knl->kl_assert_unlocked = kl_assert_unlocked;
 1940 
 1941         SLIST_INIT(&knl->kl_list);
 1942 }
 1943 
 1944 void
 1945 knlist_init_mtx(struct knlist *knl, struct mtx *lock)
 1946 {
 1947 
 1948         knlist_init(knl, lock, NULL, NULL, NULL, NULL);
 1949 }
 1950 
 1951 void
 1952 knlist_destroy(struct knlist *knl)
 1953 {
 1954 
 1955 #ifdef INVARIANTS
 1956         /*
 1957          * if we run across this error, we need to find the offending
 1958          * driver and have it call knlist_clear.
 1959          */
 1960         if (!SLIST_EMPTY(&knl->kl_list))
 1961                 printf("WARNING: destroying knlist w/ knotes on it!\n");
 1962 #endif
 1963 
 1964         knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL;
 1965         SLIST_INIT(&knl->kl_list);
 1966 }
 1967 
 1968 /*
 1969  * Even if we are locked, we may need to drop the lock to allow any influx
 1970  * knotes time to "settle".
 1971  */
 1972 void
 1973 knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
 1974 {
 1975         struct knote *kn, *kn2;
 1976         struct kqueue *kq;
 1977 
 1978         if (islocked)
 1979                 KNL_ASSERT_LOCKED(knl);
 1980         else {
 1981                 KNL_ASSERT_UNLOCKED(knl);
 1982 again:          /* need to reacquire lock since we have dropped it */
 1983                 knl->kl_lock(knl->kl_lockarg);
 1984         }
 1985 
 1986         SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
 1987                 kq = kn->kn_kq;
 1988                 KQ_LOCK(kq);
 1989                 if ((kn->kn_status & KN_INFLUX)) {
 1990                         KQ_UNLOCK(kq);
 1991                         continue;
 1992                 }
 1993                 knlist_remove_kq(knl, kn, 1, 1);
 1994                 if (killkn) {
 1995                         kn->kn_status |= KN_INFLUX | KN_DETACHED;
 1996                         KQ_UNLOCK(kq);
 1997                         knote_drop(kn, td);
 1998                 } else {
 1999                         /* Make sure cleared knotes disappear soon */
 2000                         kn->kn_flags |= (EV_EOF | EV_ONESHOT);
 2001                         KQ_UNLOCK(kq);
 2002                 }
 2003                 kq = NULL;
 2004         }
 2005 
 2006         if (!SLIST_EMPTY(&knl->kl_list)) {
 2007                 /* there are still KN_INFLUX remaining */
 2008                 kn = SLIST_FIRST(&knl->kl_list);
 2009                 kq = kn->kn_kq;
 2010                 KQ_LOCK(kq);
 2011                 KASSERT(kn->kn_status & KN_INFLUX,
 2012                     ("knote removed w/o list lock"));
 2013                 knl->kl_unlock(knl->kl_lockarg);
 2014                 kq->kq_state |= KQ_FLUXWAIT;
 2015                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
 2016                 kq = NULL;
 2017                 goto again;
 2018         }
 2019 
 2020         if (islocked)
 2021                 KNL_ASSERT_LOCKED(knl);
 2022         else {
 2023                 knl->kl_unlock(knl->kl_lockarg);
 2024                 KNL_ASSERT_UNLOCKED(knl);
 2025         }
 2026 }
 2027 
 2028 /*
 2029  * Remove all knotes referencing a specified fd must be called with FILEDESC
 2030  * lock.  This prevents a race where a new fd comes along and occupies the
 2031  * entry and we attach a knote to the fd.
 2032  */
 2033 void
 2034 knote_fdclose(struct thread *td, int fd)
 2035 {
 2036         struct filedesc *fdp = td->td_proc->p_fd;
 2037         struct kqueue *kq;
 2038         struct knote *kn;
 2039         int influx;
 2040 
 2041         FILEDESC_XLOCK_ASSERT(fdp);
 2042 
 2043         /*
 2044          * We shouldn't have to worry about new kevents appearing on fd
 2045          * since filedesc is locked.
 2046          */
 2047         SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
 2048                 KQ_LOCK(kq);
 2049 
 2050 again:
 2051                 influx = 0;
 2052                 while (kq->kq_knlistsize > fd &&
 2053                     (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
 2054                         if (kn->kn_status & KN_INFLUX) {
 2055                                 /* someone else might be waiting on our knote */
 2056                                 if (influx)
 2057                                         wakeup(kq);
 2058                                 kq->kq_state |= KQ_FLUXWAIT;
 2059                                 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
 2060                                 goto again;
 2061                         }
 2062                         kn->kn_status |= KN_INFLUX;
 2063                         KQ_UNLOCK(kq);
 2064                         if (!(kn->kn_status & KN_DETACHED))
 2065                                 kn->kn_fop->f_detach(kn);
 2066                         knote_drop(kn, td);
 2067                         influx = 1;
 2068                         KQ_LOCK(kq);
 2069                 }
 2070                 KQ_UNLOCK_FLUX(kq);
 2071         }
 2072 }
 2073 
 2074 static int
 2075 knote_attach(struct knote *kn, struct kqueue *kq)
 2076 {
 2077         struct klist *list;
 2078 
 2079         KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
 2080         KQ_OWNED(kq);
 2081 
 2082         if (kn->kn_fop->f_isfd) {
 2083                 if (kn->kn_id >= kq->kq_knlistsize)
 2084                         return ENOMEM;
 2085                 list = &kq->kq_knlist[kn->kn_id];
 2086         } else {
 2087                 if (kq->kq_knhash == NULL)
 2088                         return ENOMEM;
 2089                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2090         }
 2091 
 2092         SLIST_INSERT_HEAD(list, kn, kn_link);
 2093 
 2094         return 0;
 2095 }
 2096 
 2097 /*
 2098  * knote must already have been detached using the f_detach method.
 2099  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
 2100  * to prevent other removal.
 2101  */
 2102 static void
 2103 knote_drop(struct knote *kn, struct thread *td)
 2104 {
 2105         struct kqueue *kq;
 2106         struct klist *list;
 2107 
 2108         kq = kn->kn_kq;
 2109 
 2110         KQ_NOTOWNED(kq);
 2111         KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
 2112             ("knote_drop called without KN_INFLUX set in kn_status"));
 2113 
 2114         KQ_LOCK(kq);
 2115         if (kn->kn_fop->f_isfd)
 2116                 list = &kq->kq_knlist[kn->kn_id];
 2117         else
 2118                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 2119 
 2120         if (!SLIST_EMPTY(list))
 2121                 SLIST_REMOVE(list, kn, knote, kn_link);
 2122         if (kn->kn_status & KN_QUEUED)
 2123                 knote_dequeue(kn);
 2124         KQ_UNLOCK_FLUX(kq);
 2125 
 2126         if (kn->kn_fop->f_isfd) {
 2127                 fdrop(kn->kn_fp, td);
 2128                 kn->kn_fp = NULL;
 2129         }
 2130         kqueue_fo_release(kn->kn_kevent.filter);
 2131         kn->kn_fop = NULL;
 2132         knote_free(kn);
 2133 }
 2134 
 2135 static void
 2136 knote_enqueue(struct knote *kn)
 2137 {
 2138         struct kqueue *kq = kn->kn_kq;
 2139 
 2140         KQ_OWNED(kn->kn_kq);
 2141         KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
 2142 
 2143         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 2144         kn->kn_status |= KN_QUEUED;
 2145         kq->kq_count++;
 2146         kqueue_wakeup(kq);
 2147 }
 2148 
 2149 static void
 2150 knote_dequeue(struct knote *kn)
 2151 {
 2152         struct kqueue *kq = kn->kn_kq;
 2153 
 2154         KQ_OWNED(kn->kn_kq);
 2155         KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
 2156 
 2157         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 2158         kn->kn_status &= ~KN_QUEUED;
 2159         kq->kq_count--;
 2160 }
 2161 
 2162 static void
 2163 knote_init(void)
 2164 {
 2165 
 2166         knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
 2167             NULL, NULL, UMA_ALIGN_PTR, 0);
 2168 }
 2169 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
 2170 
 2171 static struct knote *
 2172 knote_alloc(int waitok)
 2173 {
 2174         return ((struct knote *)uma_zalloc(knote_zone,
 2175             (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
 2176 }
 2177 
 2178 static void
 2179 knote_free(struct knote *kn)
 2180 {
 2181         if (kn != NULL)
 2182                 uma_zfree(knote_zone, kn);
 2183 }
 2184 
 2185 /*
 2186  * Register the kev w/ the kq specified by fd.
 2187  */
 2188 int 
 2189 kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok)
 2190 {
 2191         struct kqueue *kq;
 2192         struct file *fp;
 2193         int error;
 2194 
 2195         if ((error = fget(td, fd, CAP_POST_EVENT, &fp)) != 0)
 2196                 return (error);
 2197         if ((error = kqueue_acquire(fp, &kq)) != 0)
 2198                 goto noacquire;
 2199 
 2200         error = kqueue_register(kq, kev, td, waitok);
 2201 
 2202         kqueue_release(kq, 0);
 2203 
 2204 noacquire:
 2205         fdrop(fp, td);
 2206 
 2207         return error;
 2208 }

Cache object: d316cc843fbf43cedd572fb6aac77078


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.