The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    3  * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/5.3/sys/kern/kern_event.c 136588 2004-10-16 08:43:07Z cvs2svn $");
   30 
   31 #include <sys/param.h>
   32 #include <sys/systm.h>
   33 #include <sys/kernel.h>
   34 #include <sys/lock.h>
   35 #include <sys/mutex.h>
   36 #include <sys/proc.h>
   37 #include <sys/malloc.h>
   38 #include <sys/unistd.h>
   39 #include <sys/file.h>
   40 #include <sys/filedesc.h>
   41 #include <sys/filio.h>
   42 #include <sys/fcntl.h>
   43 #include <sys/kthread.h>
   44 #include <sys/selinfo.h>
   45 #include <sys/queue.h>
   46 #include <sys/event.h>
   47 #include <sys/eventvar.h>
   48 #include <sys/poll.h>
   49 #include <sys/protosw.h>
   50 #include <sys/sigio.h>
   51 #include <sys/signalvar.h>
   52 #include <sys/socket.h>
   53 #include <sys/socketvar.h>
   54 #include <sys/stat.h>
   55 #include <sys/sysctl.h>
   56 #include <sys/sysproto.h>
   57 #include <sys/taskqueue.h>
   58 #include <sys/uio.h>
   59 
   60 #include <vm/uma.h>
   61 
   62 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
   63 /*
   64  * This lock is used if multiple kq locks are required.  This possibly
   65  * should be made into a per proc lock.
   66  */
   67 static struct mtx       kq_global;
   68 MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
   69 #define KQ_GLOBAL_LOCK(lck, haslck)     do {    \
   70         if (!haslck)                            \
   71                 mtx_lock(lck);                  \
   72         haslck = 1;                             \
   73 } while (0)
   74 #define KQ_GLOBAL_UNLOCK(lck, haslck)   do {    \
   75         if (haslck)                             \
   76                 mtx_unlock(lck);                        \
   77         haslck = 0;                             \
   78 } while (0)
   79 
   80 TASKQUEUE_DEFINE_THREAD(kqueue);
   81 
   82 static int      kqueue_aquire(struct file *fp, struct kqueue **kqp);
   83 static void     kqueue_release(struct kqueue *kq, int locked);
   84 static int      kqueue_expand(struct kqueue *kq, struct filterops *fops,
   85                     uintptr_t ident, int waitok);
   86 static void     kqueue_task(void *arg, int pending);
   87 static int      kqueue_scan(struct kqueue *kq, int maxevents,
   88                     struct kevent *ulistp, const struct timespec *timeout,
   89                     struct kevent *keva, struct thread *td);
   90 static void     kqueue_wakeup(struct kqueue *kq);
   91 static struct filterops *kqueue_fo_find(int filt);
   92 static void     kqueue_fo_release(int filt);
   93 
   94 static fo_rdwr_t        kqueue_read;
   95 static fo_rdwr_t        kqueue_write;
   96 static fo_ioctl_t       kqueue_ioctl;
   97 static fo_poll_t        kqueue_poll;
   98 static fo_kqfilter_t    kqueue_kqfilter;
   99 static fo_stat_t        kqueue_stat;
  100 static fo_close_t       kqueue_close;
  101 
  102 static struct fileops kqueueops = {
  103         .fo_read = kqueue_read,
  104         .fo_write = kqueue_write,
  105         .fo_ioctl = kqueue_ioctl,
  106         .fo_poll = kqueue_poll,
  107         .fo_kqfilter = kqueue_kqfilter,
  108         .fo_stat = kqueue_stat,
  109         .fo_close = kqueue_close,
  110 };
  111 
  112 static int      knote_attach(struct knote *kn, struct kqueue *kq);
  113 static void     knote_drop(struct knote *kn, struct thread *td);
  114 static void     knote_enqueue(struct knote *kn);
  115 static void     knote_dequeue(struct knote *kn);
  116 static void     knote_init(void);
  117 static struct   knote *knote_alloc(int waitok);
  118 static void     knote_free(struct knote *kn);
  119 
  120 static void     filt_kqdetach(struct knote *kn);
  121 static int      filt_kqueue(struct knote *kn, long hint);
  122 static int      filt_procattach(struct knote *kn);
  123 static void     filt_procdetach(struct knote *kn);
  124 static int      filt_proc(struct knote *kn, long hint);
  125 static int      filt_fileattach(struct knote *kn);
  126 static void     filt_timerexpire(void *knx);
  127 static int      filt_timerattach(struct knote *kn);
  128 static void     filt_timerdetach(struct knote *kn);
  129 static int      filt_timer(struct knote *kn, long hint);
  130 
  131 static struct filterops file_filtops =
  132         { 1, filt_fileattach, NULL, NULL };
  133 static struct filterops kqread_filtops =
  134         { 1, NULL, filt_kqdetach, filt_kqueue };
  135 /* XXX - move to kern_proc.c?  */
  136 static struct filterops proc_filtops =
  137         { 0, filt_procattach, filt_procdetach, filt_proc };
  138 static struct filterops timer_filtops =
  139         { 0, filt_timerattach, filt_timerdetach, filt_timer };
  140 
  141 static uma_zone_t       knote_zone;
  142 static int              kq_ncallouts = 0;
  143 static int              kq_calloutmax = (4 * 1024);
  144 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
  145     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
  146 
  147 /* XXX - ensure not KN_INFLUX?? */
  148 #define KNOTE_ACTIVATE(kn, islock) do {                                 \
  149         if ((islock))                                                   \
  150                 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED);            \
  151         else                                                            \
  152                 KQ_LOCK((kn)->kn_kq);                                   \
  153         (kn)->kn_status |= KN_ACTIVE;                                   \
  154         if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)         \
  155                 knote_enqueue((kn));                                    \
  156         if (!(islock))                                                  \
  157                 KQ_UNLOCK((kn)->kn_kq);                                 \
  158 } while(0)
  159 #define KQ_LOCK(kq) do {                                                \
  160         mtx_lock(&(kq)->kq_lock);                                       \
  161 } while (0)
  162 #define KQ_FLUX_WAKEUP(kq) do {                                         \
  163         if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) {            \
  164                 (kq)->kq_state &= ~KQ_FLUXWAIT;                         \
  165                 wakeup((kq));                                           \
  166         }                                                               \
  167 } while (0)
  168 #define KQ_UNLOCK_FLUX(kq) do {                                         \
  169         KQ_FLUX_WAKEUP(kq);                                             \
  170         mtx_unlock(&(kq)->kq_lock);                                     \
  171 } while (0)
  172 #define KQ_UNLOCK(kq) do {                                              \
  173         mtx_unlock(&(kq)->kq_lock);                                     \
  174 } while (0)
  175 #define KQ_OWNED(kq) do {                                               \
  176         mtx_assert(&(kq)->kq_lock, MA_OWNED);                           \
  177 } while (0)
  178 #define KQ_NOTOWNED(kq) do {                                            \
  179         mtx_assert(&(kq)->kq_lock, MA_NOTOWNED);                        \
  180 } while (0)
  181 #define KN_LIST_LOCK(kn) do {                                           \
  182         if (kn->kn_knlist != NULL)                                      \
  183                 mtx_lock(kn->kn_knlist->kl_lock);                       \
  184 } while (0)
  185 #define KN_LIST_UNLOCK(kn) do {                                         \
  186         if (kn->kn_knlist != NULL)                                      \
  187                 mtx_unlock(kn->kn_knlist->kl_lock);                     \
  188 } while (0)
  189 
  190 #define KN_HASHSIZE             64              /* XXX should be tunable */
  191 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  192 
  193 static int
  194 filt_nullattach(struct knote *kn)
  195 {
  196 
  197         return (ENXIO);
  198 };
  199 
  200 struct filterops null_filtops =
  201         { 0, filt_nullattach, NULL, NULL };
  202 
  203 /* XXX - make SYSINIT to add these, and move into respective modules. */
  204 extern struct filterops sig_filtops;
  205 extern struct filterops fs_filtops;
  206 
  207 /*
  208  * Table for for all system-defined filters.
  209  */
  210 static struct mtx       filterops_lock;
  211 MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
  212         MTX_DEF);
  213 static struct {
  214         struct filterops *for_fop;
  215         int for_refcnt;
  216 } sysfilt_ops[EVFILT_SYSCOUNT] = {
  217         { &file_filtops },                      /* EVFILT_READ */
  218         { &file_filtops },                      /* EVFILT_WRITE */
  219         { &null_filtops },                      /* EVFILT_AIO */
  220         { &file_filtops },                      /* EVFILT_VNODE */
  221         { &proc_filtops },                      /* EVFILT_PROC */
  222         { &sig_filtops },                       /* EVFILT_SIGNAL */
  223         { &timer_filtops },                     /* EVFILT_TIMER */
  224         { &file_filtops },                      /* EVFILT_NETDEV */
  225         { &fs_filtops },                        /* EVFILT_FS */
  226 };
  227 
  228 /*
  229  * Simple redirection for all cdevsw style objects to call their fo_kqfilter
  230  * method.
  231  */
  232 static int
  233 filt_fileattach(struct knote *kn)
  234 {
  235 
  236         return (fo_kqfilter(kn->kn_fp, kn));
  237 }
  238 
  239 /*ARGSUSED*/
  240 static int
  241 kqueue_kqfilter(struct file *fp, struct knote *kn)
  242 {
  243         struct kqueue *kq = kn->kn_fp->f_data;
  244 
  245         if (kn->kn_filter != EVFILT_READ)
  246                 return (EINVAL);
  247 
  248         kn->kn_status |= KN_KQUEUE;
  249         kn->kn_fop = &kqread_filtops;
  250         knlist_add(&kq->kq_sel.si_note, kn, 0);
  251 
  252         return (0);
  253 }
  254 
  255 static void
  256 filt_kqdetach(struct knote *kn)
  257 {
  258         struct kqueue *kq = kn->kn_fp->f_data;
  259 
  260         knlist_remove(&kq->kq_sel.si_note, kn, 0);
  261 }
  262 
  263 /*ARGSUSED*/
  264 static int
  265 filt_kqueue(struct knote *kn, long hint)
  266 {
  267         struct kqueue *kq = kn->kn_fp->f_data;
  268 
  269         kn->kn_data = kq->kq_count;
  270         return (kn->kn_data > 0);
  271 }
  272 
  273 /* XXX - move to kern_proc.c?  */
  274 static int
  275 filt_procattach(struct knote *kn)
  276 {
  277         struct proc *p;
  278         int immediate;
  279         int error;
  280 
  281         immediate = 0;
  282         p = pfind(kn->kn_id);
  283         if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
  284                 p = zpfind(kn->kn_id);
  285                 immediate = 1;
  286         } else if (p != NULL && (p->p_flag & P_WEXIT)) {
  287                 immediate = 1;
  288         }
  289 
  290         if (p == NULL)
  291                 return (ESRCH);
  292         if ((error = p_cansee(curthread, p)))
  293                 return (error);
  294 
  295         kn->kn_ptr.p_proc = p;
  296         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  297 
  298         /*
  299          * internal flag indicating registration done by kernel
  300          */
  301         if (kn->kn_flags & EV_FLAG1) {
  302                 kn->kn_data = kn->kn_sdata;             /* ppid */
  303                 kn->kn_fflags = NOTE_CHILD;
  304                 kn->kn_flags &= ~EV_FLAG1;
  305         }
  306 
  307         if (immediate == 0)
  308                 knlist_add(&p->p_klist, kn, 1);
  309 
  310         /*
  311          * Immediately activate any exit notes if the target process is a
  312          * zombie.  This is necessary to handle the case where the target
  313          * process, e.g. a child, dies before the kevent is registered.
  314          */
  315         if (immediate && filt_proc(kn, NOTE_EXIT))
  316                 KNOTE_ACTIVATE(kn, 0);
  317 
  318         PROC_UNLOCK(p);
  319 
  320         return (0);
  321 }
  322 
  323 /*
  324  * The knote may be attached to a different process, which may exit,
  325  * leaving nothing for the knote to be attached to.  So when the process
  326  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  327  * it will be deleted when read out.  However, as part of the knote deletion,
  328  * this routine is called, so a check is needed to avoid actually performing
  329  * a detach, because the original process does not exist any more.
  330  */
  331 /* XXX - move to kern_proc.c?  */
  332 static void
  333 filt_procdetach(struct knote *kn)
  334 {
  335         struct proc *p;
  336 
  337         p = kn->kn_ptr.p_proc;
  338         knlist_remove(&p->p_klist, kn, 0);
  339         kn->kn_ptr.p_proc = NULL;
  340 }
  341 
  342 /* XXX - move to kern_proc.c?  */
  343 static int
  344 filt_proc(struct knote *kn, long hint)
  345 {
  346         struct proc *p = kn->kn_ptr.p_proc;
  347         u_int event;
  348 
  349         /*
  350          * mask off extra data
  351          */
  352         event = (u_int)hint & NOTE_PCTRLMASK;
  353 
  354         /*
  355          * if the user is interested in this event, record it.
  356          */
  357         if (kn->kn_sfflags & event)
  358                 kn->kn_fflags |= event;
  359 
  360         /*
  361          * process is gone, so flag the event as finished.
  362          */
  363         if (event == NOTE_EXIT) {
  364                 if (!(kn->kn_status & KN_DETACHED))
  365                         knlist_remove_inevent(&p->p_klist, kn);
  366                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
  367                 kn->kn_ptr.p_proc = NULL;
  368                 return (1);
  369         }
  370 
  371         /*
  372          * process forked, and user wants to track the new process,
  373          * so attach a new knote to it, and immediately report an
  374          * event with the parent's pid.
  375          */
  376         if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
  377                 struct kevent kev;
  378                 int error;
  379 
  380                 /*
  381                  * register knote with new process.
  382                  */
  383                 kev.ident = hint & NOTE_PDATAMASK;      /* pid */
  384                 kev.filter = kn->kn_filter;
  385                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  386                 kev.fflags = kn->kn_sfflags;
  387                 kev.data = kn->kn_id;                   /* parent */
  388                 kev.udata = kn->kn_kevent.udata;        /* preserve udata */
  389                 error = kqueue_register(kn->kn_kq, &kev, NULL, 0);
  390                 if (error)
  391                         kn->kn_fflags |= NOTE_TRACKERR;
  392         }
  393 
  394         return (kn->kn_fflags != 0);
  395 }
  396 
  397 static int
  398 timertoticks(intptr_t data)
  399 {
  400         struct timeval tv;
  401         int tticks;
  402 
  403         tv.tv_sec = data / 1000;
  404         tv.tv_usec = (data % 1000) * 1000;
  405         tticks = tvtohz(&tv);
  406 
  407         return tticks;
  408 }
  409 
  410 /* XXX - move to kern_timeout.c? */
  411 static void
  412 filt_timerexpire(void *knx)
  413 {
  414         struct knote *kn = knx;
  415         struct callout *calloutp;
  416 
  417         kn->kn_data++;
  418         KNOTE_ACTIVATE(kn, 0);  /* XXX - handle locking */
  419 
  420         if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) {
  421                 calloutp = (struct callout *)kn->kn_hook;
  422                 callout_reset(calloutp, timertoticks(kn->kn_sdata),
  423                     filt_timerexpire, kn);
  424         }
  425 }
  426 
  427 /*
  428  * data contains amount of time to sleep, in milliseconds
  429  */
  430 /* XXX - move to kern_timeout.c? */
  431 static int
  432 filt_timerattach(struct knote *kn)
  433 {
  434         struct callout *calloutp;
  435 
  436         atomic_add_int(&kq_ncallouts, 1);
  437 
  438         if (kq_ncallouts >= kq_calloutmax) {
  439                 atomic_add_int(&kq_ncallouts, -1);
  440                 return (ENOMEM);
  441         }
  442 
  443         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  444         kn->kn_status &= ~KN_DETACHED;          /* knlist_add usually sets it */
  445         MALLOC(calloutp, struct callout *, sizeof(*calloutp),
  446             M_KQUEUE, M_WAITOK);
  447         callout_init(calloutp, 1);
  448         kn->kn_hook = calloutp;
  449         callout_reset(calloutp, timertoticks(kn->kn_sdata), filt_timerexpire,
  450             kn);
  451 
  452         return (0);
  453 }
  454 
  455 /* XXX - move to kern_timeout.c? */
  456 static void
  457 filt_timerdetach(struct knote *kn)
  458 {
  459         struct callout *calloutp;
  460 
  461         calloutp = (struct callout *)kn->kn_hook;
  462         callout_drain(calloutp);
  463         FREE(calloutp, M_KQUEUE);
  464         atomic_add_int(&kq_ncallouts, -1);
  465         kn->kn_status |= KN_DETACHED;   /* knlist_remove usually clears it */
  466 }
  467 
  468 /* XXX - move to kern_timeout.c? */
  469 static int
  470 filt_timer(struct knote *kn, long hint)
  471 {
  472 
  473         return (kn->kn_data != 0);
  474 }
  475 
  476 /*
  477  * MPSAFE
  478  */
  479 int
  480 kqueue(struct thread *td, struct kqueue_args *uap)
  481 {
  482         struct filedesc *fdp;
  483         struct kqueue *kq;
  484         struct file *fp;
  485         int fd, error;
  486 
  487         fdp = td->td_proc->p_fd;
  488         error = falloc(td, &fp, &fd);
  489         if (error)
  490                 goto done2;
  491 
  492         /* An extra reference on `nfp' has been held for us by falloc(). */
  493         kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
  494         mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
  495         TAILQ_INIT(&kq->kq_head);
  496         kq->kq_fdp = fdp;
  497         knlist_init(&kq->kq_sel.si_note, &kq->kq_lock);
  498         TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
  499 
  500         FILEDESC_LOCK(fdp);
  501         SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
  502         FILEDESC_UNLOCK(fdp);
  503 
  504         FILE_LOCK(fp);
  505         fp->f_flag = FREAD | FWRITE;
  506         fp->f_type = DTYPE_KQUEUE;
  507         fp->f_ops = &kqueueops;
  508         fp->f_data = kq;
  509         FILE_UNLOCK(fp);
  510         fdrop(fp, td);
  511 
  512         td->td_retval[0] = fd;
  513 done2:
  514         return (error);
  515 }
  516 
  517 #ifndef _SYS_SYSPROTO_H_
  518 struct kevent_args {
  519         int     fd;
  520         const struct kevent *changelist;
  521         int     nchanges;
  522         struct  kevent *eventlist;
  523         int     nevents;
  524         const struct timespec *timeout;
  525 };
  526 #endif
  527 /*
  528  * MPSAFE
  529  */
  530 int
  531 kevent(struct thread *td, struct kevent_args *uap)
  532 {
  533         struct kevent keva[KQ_NEVENTS];
  534         struct kevent *kevp;
  535         struct kqueue *kq;
  536         struct file *fp;
  537         struct timespec ts;
  538         int i, n, nerrors, error;
  539 
  540         if ((error = fget(td, uap->fd, &fp)) != 0)
  541                 return (error);
  542         if ((error = kqueue_aquire(fp, &kq)) != 0)
  543                 goto done_norel;
  544 
  545         if (uap->timeout != NULL) {
  546                 error = copyin(uap->timeout, &ts, sizeof(ts));
  547                 if (error)
  548                         goto done;
  549                 uap->timeout = &ts;
  550         }
  551 
  552         nerrors = 0;
  553 
  554         while (uap->nchanges > 0) {
  555                 n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges;
  556                 error = copyin(uap->changelist, keva,
  557                     n * sizeof *keva);
  558                 if (error)
  559                         goto done;
  560                 for (i = 0; i < n; i++) {
  561                         kevp = &keva[i];
  562                         kevp->flags &= ~EV_SYSFLAGS;
  563                         error = kqueue_register(kq, kevp, td, 1);
  564                         if (error) {
  565                                 if (uap->nevents != 0) {
  566                                         kevp->flags = EV_ERROR;
  567                                         kevp->data = error;
  568                                         (void) copyout(kevp,
  569                                             uap->eventlist,
  570                                             sizeof(*kevp));
  571                                         uap->eventlist++;
  572                                         uap->nevents--;
  573                                         nerrors++;
  574                                 } else {
  575                                         goto done;
  576                                 }
  577                         }
  578                 }
  579                 uap->nchanges -= n;
  580                 uap->changelist += n;
  581         }
  582         if (nerrors) {
  583                 td->td_retval[0] = nerrors;
  584                 error = 0;
  585                 goto done;
  586         }
  587 
  588         error = kqueue_scan(kq, uap->nevents, uap->eventlist, uap->timeout,
  589             keva, td);
  590 done:
  591         kqueue_release(kq, 0);
  592 done_norel:
  593         if (fp != NULL)
  594                 fdrop(fp, td);
  595         return (error);
  596 }
  597 
  598 int
  599 kqueue_add_filteropts(int filt, struct filterops *filtops)
  600 {
  601         int error;
  602 
  603         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
  604                 printf(
  605 "trying to add a filterop that is out of range: %d is beyond %d\n",
  606                     ~filt, EVFILT_SYSCOUNT);
  607                 return EINVAL;
  608         }
  609         mtx_lock(&filterops_lock);
  610         if (sysfilt_ops[~filt].for_fop != &null_filtops &&
  611             sysfilt_ops[~filt].for_fop != NULL)
  612                 error = EEXIST;
  613         else {
  614                 sysfilt_ops[~filt].for_fop = filtops;
  615                 sysfilt_ops[~filt].for_refcnt = 0;
  616         }
  617         mtx_unlock(&filterops_lock);
  618 
  619         return (0);
  620 }
  621 
  622 int
  623 kqueue_del_filteropts(int filt)
  624 {
  625         int error;
  626 
  627         error = 0;
  628         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  629                 return EINVAL;
  630 
  631         mtx_lock(&filterops_lock);
  632         if (sysfilt_ops[~filt].for_fop == &null_filtops ||
  633             sysfilt_ops[~filt].for_fop == NULL)
  634                 error = EINVAL;
  635         else if (sysfilt_ops[~filt].for_refcnt != 0)
  636                 error = EBUSY;
  637         else {
  638                 sysfilt_ops[~filt].for_fop = &null_filtops;
  639                 sysfilt_ops[~filt].for_refcnt = 0;
  640         }
  641         mtx_unlock(&filterops_lock);
  642 
  643         return error;
  644 }
  645 
  646 static struct filterops *
  647 kqueue_fo_find(int filt)
  648 {
  649 
  650         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  651                 return NULL;
  652 
  653         mtx_lock(&filterops_lock);
  654         sysfilt_ops[~filt].for_refcnt++;
  655         if (sysfilt_ops[~filt].for_fop == NULL)
  656                 sysfilt_ops[~filt].for_fop = &null_filtops;
  657         mtx_unlock(&filterops_lock);
  658 
  659         return sysfilt_ops[~filt].for_fop;
  660 }
  661 
  662 static void
  663 kqueue_fo_release(int filt)
  664 {
  665 
  666         if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
  667                 return;
  668 
  669         mtx_lock(&filterops_lock);
  670         KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
  671             ("filter object refcount not valid on release"));
  672         sysfilt_ops[~filt].for_refcnt--;
  673         mtx_unlock(&filterops_lock);
  674 }
  675 
  676 /*
  677  * A ref to kq (obtained via kqueue_aquire) should be held.  waitok will
  678  * influence if memory allocation should wait.  Make sure it is 0 if you
  679  * hold any mutexes.
  680  */
  681 int
  682 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
  683 {
  684         struct filedesc *fdp;
  685         struct filterops *fops;
  686         struct file *fp;
  687         struct knote *kn, *tkn;
  688         int error, filt, event;
  689         int haskqglobal;
  690         int fd;
  691 
  692         fdp = NULL;
  693         fp = NULL;
  694         kn = NULL;
  695         error = 0;
  696         haskqglobal = 0;
  697 
  698         filt = kev->filter;
  699         fops = kqueue_fo_find(filt);
  700         if (fops == NULL)
  701                 return EINVAL;
  702 
  703         tkn = knote_alloc(waitok);              /* prevent waiting with locks */
  704 
  705 findkn:
  706         if (fops->f_isfd) {
  707                 KASSERT(td != NULL, ("td is NULL"));
  708                 fdp = td->td_proc->p_fd;
  709                 FILEDESC_LOCK(fdp);
  710                 /* validate descriptor */
  711                 fd = kev->ident;
  712                 if (fd < 0 || fd >= fdp->fd_nfiles ||
  713                     (fp = fdp->fd_ofiles[fd]) == NULL) {
  714                         FILEDESC_UNLOCK(fdp);
  715                         error = EBADF;
  716                         goto done;
  717                 }
  718                 fhold(fp);
  719 
  720                 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
  721                     kev->ident, 0) != 0) {
  722                         /* unlock and try again */
  723                         FILEDESC_UNLOCK(fdp);
  724                         fdrop(fp, td);
  725                         fp = NULL;
  726                         error = kqueue_expand(kq, fops, kev->ident, waitok);
  727                         if (error)
  728                                 goto done;
  729                         goto findkn;
  730                 }
  731 
  732                 if (fp->f_type == DTYPE_KQUEUE) {
  733                         /*
  734                          * if we add some inteligence about what we are doing,
  735                          * we should be able to support events on ourselves.
  736                          * We need to know when we are doing this to prevent
  737                          * getting both the knlist lock and the kq lock since
  738                          * they are the same thing.
  739                          */
  740                         if (fp->f_data == kq) {
  741                                 FILEDESC_UNLOCK(fdp);
  742                                 error = EINVAL;
  743                                 goto done_noglobal;
  744                         }
  745 
  746                         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
  747                 }
  748 
  749                 KQ_LOCK(kq);
  750                 if (kev->ident < kq->kq_knlistsize) {
  751                         SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
  752                                 if (kev->filter == kn->kn_filter)
  753                                         break;
  754                 }
  755                 FILEDESC_UNLOCK(fdp);
  756         } else {
  757                 if ((kev->flags & EV_ADD) == EV_ADD)
  758                         kqueue_expand(kq, fops, kev->ident, waitok);
  759 
  760                 KQ_LOCK(kq);
  761                 if (kq->kq_knhashmask != 0) {
  762                         struct klist *list;
  763 
  764                         list = &kq->kq_knhash[
  765                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
  766                         SLIST_FOREACH(kn, list, kn_link)
  767                                 if (kev->ident == kn->kn_id &&
  768                                     kev->filter == kn->kn_filter)
  769                                         break;
  770                 }
  771         }
  772 
  773         /* knote is in the process of changing, wait for it to stablize. */
  774         if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
  775                 if (fp != NULL) {
  776                         fdrop(fp, td);
  777                         fp = NULL;
  778                 }
  779                 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
  780                 kq->kq_state |= KQ_FLUXWAIT;
  781                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
  782                 goto findkn;
  783         }
  784 
  785         if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
  786                 KQ_UNLOCK(kq);
  787                 error = ENOENT;
  788                 goto done;
  789         }
  790 
  791         /*
  792          * kn now contains the matching knote, or NULL if no match
  793          */
  794         if (kev->flags & EV_ADD) {
  795                 if (kn == NULL) {
  796                         kn = tkn;
  797                         tkn = NULL;
  798                         if (kn == NULL) {
  799                                 error = ENOMEM;
  800                                 goto done;
  801                         }
  802                         kn->kn_fp = fp;
  803                         kn->kn_kq = kq;
  804                         kn->kn_fop = fops;
  805                         /*
  806                          * apply reference counts to knote structure, and
  807                          * do not release it at the end of this routine.
  808                          */
  809                         fops = NULL;
  810                         fp = NULL;
  811 
  812                         kn->kn_sfflags = kev->fflags;
  813                         kn->kn_sdata = kev->data;
  814                         kev->fflags = 0;
  815                         kev->data = 0;
  816                         kn->kn_kevent = *kev;
  817                         kn->kn_status = KN_INFLUX|KN_DETACHED;
  818 
  819                         error = knote_attach(kn, kq);
  820                         KQ_UNLOCK(kq);
  821                         if (error != 0) {
  822                                 tkn = kn;
  823                                 goto done;
  824                         }
  825 
  826                         if ((error = kn->kn_fop->f_attach(kn)) != 0) {
  827                                 knote_drop(kn, td);
  828                                 goto done;
  829                         }
  830                         KN_LIST_LOCK(kn);
  831                 } else {
  832                         /*
  833                          * The user may change some filter values after the
  834                          * initial EV_ADD, but doing so will not reset any
  835                          * filter which has already been triggered.
  836                          */
  837                         kn->kn_status |= KN_INFLUX;
  838                         KQ_UNLOCK(kq);
  839                         KN_LIST_LOCK(kn);
  840                         kn->kn_sfflags = kev->fflags;
  841                         kn->kn_sdata = kev->data;
  842                         kn->kn_kevent.udata = kev->udata;
  843                 }
  844 
  845                 /*
  846                  * We can get here with kn->kn_knlist == NULL.
  847                  * This can happen when the initial attach event decides that
  848                  * the event is "completed" already.  i.e. filt_procattach
  849                  * is called on a zombie process.  It will call filt_proc
  850                  * which will remove it from the list, and NULL kn_knlist.
  851                  */
  852                 event = kn->kn_fop->f_event(kn, 0);
  853                 KN_LIST_UNLOCK(kn);
  854                 KQ_LOCK(kq);
  855                 if (event)
  856                         KNOTE_ACTIVATE(kn, 1);
  857                 kn->kn_status &= ~KN_INFLUX;
  858         } else if (kev->flags & EV_DELETE) {
  859                 kn->kn_status |= KN_INFLUX;
  860                 KQ_UNLOCK(kq);
  861                 if (!(kn->kn_status & KN_DETACHED))
  862                         kn->kn_fop->f_detach(kn);
  863                 knote_drop(kn, td);
  864                 goto done;
  865         }
  866 
  867         if ((kev->flags & EV_DISABLE) &&
  868             ((kn->kn_status & KN_DISABLED) == 0)) {
  869                 kn->kn_status |= KN_DISABLED;
  870         }
  871 
  872         if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
  873                 kn->kn_status &= ~KN_DISABLED;
  874                 if ((kn->kn_status & KN_ACTIVE) &&
  875                     ((kn->kn_status & KN_QUEUED) == 0))
  876                         knote_enqueue(kn);
  877         }
  878         KQ_UNLOCK_FLUX(kq);
  879 
  880 done:
  881         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
  882 done_noglobal:
  883         if (fp != NULL)
  884                 fdrop(fp, td);
  885         if (tkn != NULL)
  886                 knote_free(tkn);
  887         if (fops != NULL)
  888                 kqueue_fo_release(filt);
  889         return (error);
  890 }
  891 
  892 static int
  893 kqueue_aquire(struct file *fp, struct kqueue **kqp)
  894 {
  895         int error;
  896         struct kqueue *kq;
  897 
  898         error = 0;
  899 
  900         FILE_LOCK(fp);
  901         do {
  902                 kq = fp->f_data;
  903                 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) {
  904                         error = EBADF;
  905                         break;
  906                 }
  907                 *kqp = kq;
  908                 KQ_LOCK(kq);
  909                 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
  910                         KQ_UNLOCK(kq);
  911                         error = EBADF;
  912                         break;
  913                 }
  914                 kq->kq_refcnt++;
  915                 KQ_UNLOCK(kq);
  916         } while (0);
  917         FILE_UNLOCK(fp);
  918 
  919         return error;
  920 }
  921 
  922 static void
  923 kqueue_release(struct kqueue *kq, int locked)
  924 {
  925         if (locked)
  926                 KQ_OWNED(kq);
  927         else
  928                 KQ_LOCK(kq);
  929         kq->kq_refcnt--;
  930         if (kq->kq_refcnt == 1)
  931                 wakeup(&kq->kq_refcnt);
  932         if (!locked)
  933                 KQ_UNLOCK(kq);
  934 }
  935 
  936 static void
  937 kqueue_schedtask(struct kqueue *kq)
  938 {
  939 
  940         KQ_OWNED(kq);
  941         KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
  942             ("scheduling kqueue task while draining"));
  943 
  944         if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
  945                 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
  946                 kq->kq_state |= KQ_TASKSCHED;
  947         }
  948 }
  949 
  950 /*
  951  * Expand the kq to make sure we have storage for fops/ident pair.
  952  *
  953  * Return 0 on success (or no work necessary), return errno on failure.
  954  *
  955  * Not calling hashinit w/ waitok (proper malloc flag) should be safe.
  956  * If kqueue_register is called from a non-fd context, there usually/should
  957  * be no locks held.
  958  */
  959 static int
  960 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
  961         int waitok)
  962 {
  963         struct klist *list, *tmp_knhash;
  964         u_long tmp_knhashmask;
  965         int size;
  966         int fd;
  967         int mflag = waitok ? M_WAITOK : M_NOWAIT;
  968 
  969         KQ_NOTOWNED(kq);
  970 
  971         if (fops->f_isfd) {
  972                 fd = ident;
  973                 if (kq->kq_knlistsize <= fd) {
  974                         size = kq->kq_knlistsize;
  975                         while (size <= fd)
  976                                 size += KQEXTENT;
  977                         MALLOC(list, struct klist *,
  978                             size * sizeof list, M_KQUEUE, mflag);
  979                         if (list == NULL)
  980                                 return ENOMEM;
  981                         KQ_LOCK(kq);
  982                         if (kq->kq_knlistsize > fd) {
  983                                 FREE(list, M_KQUEUE);
  984                                 list = NULL;
  985                         } else {
  986                                 if (kq->kq_knlist != NULL) {
  987                                         bcopy(kq->kq_knlist, list,
  988                                             kq->kq_knlistsize * sizeof list);
  989                                         FREE(kq->kq_knlist, M_KQUEUE);
  990                                         kq->kq_knlist = NULL;
  991                                 }
  992                                 bzero((caddr_t)list +
  993                                     kq->kq_knlistsize * sizeof list,
  994                                     (size - kq->kq_knlistsize) * sizeof list);
  995                                 kq->kq_knlistsize = size;
  996                                 kq->kq_knlist = list;
  997                         }
  998                         KQ_UNLOCK(kq);
  999                 }
 1000         } else {
 1001                 if (kq->kq_knhashmask == 0) {
 1002                         tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
 1003                             &tmp_knhashmask);
 1004                         if (tmp_knhash == NULL)
 1005                                 return ENOMEM;
 1006                         KQ_LOCK(kq);
 1007                         if (kq->kq_knhashmask == 0) {
 1008                                 kq->kq_knhash = tmp_knhash;
 1009                                 kq->kq_knhashmask = tmp_knhashmask;
 1010                         } else {
 1011                                 free(tmp_knhash, M_KQUEUE);
 1012                         }
 1013                         KQ_UNLOCK(kq);
 1014                 }
 1015         }
 1016 
 1017         KQ_NOTOWNED(kq);
 1018         return 0;
 1019 }
 1020 
 1021 static void
 1022 kqueue_task(void *arg, int pending)
 1023 {
 1024         struct kqueue *kq;
 1025         int haskqglobal;
 1026 
 1027         haskqglobal = 0;
 1028         kq = arg;
 1029 
 1030         KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1031         KQ_LOCK(kq);
 1032 
 1033         KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
 1034 
 1035         kq->kq_state &= ~KQ_TASKSCHED;
 1036         if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
 1037                 wakeup(&kq->kq_state);
 1038         }
 1039         KQ_UNLOCK(kq);
 1040         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1041 }
 1042 
 1043 /*
 1044  * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
 1045  * We treat KN_MARKER knotes as if they are INFLUX.
 1046  */
 1047 static int
 1048 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *ulistp,
 1049         const struct timespec *tsp, struct kevent *keva, struct thread *td)
 1050 {
 1051         struct kevent *kevp;
 1052         struct timeval atv, rtv, ttv;
 1053         struct knote *kn, *marker;
 1054         int count, timeout, nkev, error;
 1055         int haskqglobal;
 1056 
 1057         count = maxevents;
 1058         nkev = 0;
 1059         error = 0;
 1060         haskqglobal = 0;
 1061 
 1062         if (maxevents == 0)
 1063                 goto done_nl;
 1064 
 1065         if (tsp != NULL) {
 1066                 TIMESPEC_TO_TIMEVAL(&atv, tsp);
 1067                 if (itimerfix(&atv)) {
 1068                         error = EINVAL;
 1069                         goto done_nl;
 1070                 }
 1071                 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
 1072                         timeout = -1;
 1073                 else
 1074                         timeout = atv.tv_sec > 24 * 60 * 60 ?
 1075                             24 * 60 * 60 * hz : tvtohz(&atv);
 1076                 getmicrouptime(&rtv);
 1077                 timevaladd(&atv, &rtv);
 1078         } else {
 1079                 atv.tv_sec = 0;
 1080                 atv.tv_usec = 0;
 1081                 timeout = 0;
 1082         }
 1083         marker = knote_alloc(1);
 1084         if (marker == NULL) {
 1085                 error = ENOMEM;
 1086                 goto done_nl;
 1087         }
 1088         marker->kn_status = KN_MARKER;
 1089         KQ_LOCK(kq);
 1090         goto start;
 1091 
 1092 retry:
 1093         if (atv.tv_sec || atv.tv_usec) {
 1094                 getmicrouptime(&rtv);
 1095                 if (timevalcmp(&rtv, &atv, >=))
 1096                         goto done;
 1097                 ttv = atv;
 1098                 timevalsub(&ttv, &rtv);
 1099                 timeout = ttv.tv_sec > 24 * 60 * 60 ?
 1100                         24 * 60 * 60 * hz : tvtohz(&ttv);
 1101         }
 1102 
 1103 start:
 1104         kevp = keva;
 1105         if (kq->kq_count == 0) {
 1106                 if (timeout < 0) {
 1107                         error = EWOULDBLOCK;
 1108                 } else {
 1109                         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1110                         kq->kq_state |= KQ_SLEEP;
 1111                         error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
 1112                             "kqread", timeout);
 1113                 }
 1114                 if (error == 0)
 1115                         goto retry;
 1116                 /* don't restart after signals... */
 1117                 if (error == ERESTART)
 1118                         error = EINTR;
 1119                 else if (error == EWOULDBLOCK)
 1120                         error = 0;
 1121                 goto done;
 1122         }
 1123 
 1124         TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
 1125         while (count) {
 1126                 KQ_OWNED(kq);
 1127                 kn = TAILQ_FIRST(&kq->kq_head);
 1128 
 1129                 if ((kn->kn_status == KN_MARKER && kn != marker) ||
 1130                     (kn->kn_status & KN_INFLUX) == KN_INFLUX) {
 1131                         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1132                         kq->kq_state |= KQ_FLUXWAIT;
 1133                         error = msleep(kq, &kq->kq_lock, PSOCK,
 1134                             "kqflxwt", 0);
 1135                         continue;
 1136                 }
 1137 
 1138                 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1139                 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
 1140                         kn->kn_status &= ~KN_QUEUED;
 1141                         kq->kq_count--;
 1142                         continue;
 1143                 }
 1144                 if (kn == marker) {
 1145                         KQ_FLUX_WAKEUP(kq);
 1146                         if (count == maxevents)
 1147                                 goto retry;
 1148                         goto done;
 1149                 }
 1150                 KASSERT((kn->kn_status & KN_INFLUX) == 0,
 1151                     ("KN_INFLUX set when not suppose to be"));
 1152 
 1153                 if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
 1154                         kn->kn_status &= ~KN_QUEUED;
 1155                         kn->kn_status |= KN_INFLUX;
 1156                         kq->kq_count--;
 1157                         KQ_UNLOCK(kq);
 1158                         /*
 1159                          * We don't need to lock the list since we've marked
 1160                          * it _INFLUX.
 1161                          */
 1162                         *kevp = kn->kn_kevent;
 1163                         if (!(kn->kn_status & KN_DETACHED))
 1164                                 kn->kn_fop->f_detach(kn);
 1165                         knote_drop(kn, td);
 1166                         KQ_LOCK(kq);
 1167                         kn = NULL;
 1168                 } else {
 1169                         kn->kn_status |= KN_INFLUX;
 1170                         KQ_UNLOCK(kq);
 1171                         if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
 1172                                 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
 1173                         KN_LIST_LOCK(kn);
 1174                         if (kn->kn_fop->f_event(kn, 0) == 0) {
 1175                                 KN_LIST_UNLOCK(kn);
 1176                                 KQ_LOCK(kq);
 1177                                 kn->kn_status &=
 1178                                     ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
 1179                                 kq->kq_count--;
 1180                                 continue;
 1181                         }
 1182                         *kevp = kn->kn_kevent;
 1183                         KQ_LOCK(kq);
 1184                         if (kn->kn_flags & EV_CLEAR) {
 1185                                 kn->kn_data = 0;
 1186                                 kn->kn_fflags = 0;
 1187                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 1188                                 kq->kq_count--;
 1189                         } else
 1190                                 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1191                         KN_LIST_UNLOCK(kn);
 1192                         kn->kn_status &= ~(KN_INFLUX);
 1193                 }
 1194 
 1195                 /* we are returning a copy to the user */
 1196                 kevp++;
 1197                 nkev++;
 1198                 count--;
 1199 
 1200                 if (nkev == KQ_NEVENTS) {
 1201                         KQ_UNLOCK_FLUX(kq);
 1202                         error = copyout(keva, ulistp, sizeof *keva * nkev);
 1203                         ulistp += nkev;
 1204                         nkev = 0;
 1205                         kevp = keva;
 1206                         KQ_LOCK(kq);
 1207                         if (error)
 1208                                 break;
 1209                 }
 1210         }
 1211         TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
 1212 done:
 1213         KQ_OWNED(kq);
 1214         KQ_UNLOCK_FLUX(kq);
 1215         KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
 1216         knote_free(marker);
 1217 done_nl:
 1218         KQ_NOTOWNED(kq);
 1219         if (nkev != 0)
 1220                 error = copyout(keva, ulistp, sizeof *keva * nkev);
 1221         td->td_retval[0] = maxevents - count;
 1222         return (error);
 1223 }
 1224 
 1225 /*
 1226  * XXX
 1227  * This could be expanded to call kqueue_scan, if desired.
 1228  */
 1229 /*ARGSUSED*/
 1230 static int
 1231 kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1232         int flags, struct thread *td)
 1233 {
 1234         return (ENXIO);
 1235 }
 1236 
 1237 /*ARGSUSED*/
 1238 static int
 1239 kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
 1240          int flags, struct thread *td)
 1241 {
 1242         return (ENXIO);
 1243 }
 1244 
 1245 /*ARGSUSED*/
 1246 static int
 1247 kqueue_ioctl(struct file *fp, u_long cmd, void *data,
 1248         struct ucred *active_cred, struct thread *td)
 1249 {
 1250         /*
 1251          * Enabling sigio causes two major problems:
 1252          * 1) infinite recursion:
 1253          * Synopsys: kevent is being used to track signals and have FIOASYNC
 1254          * set.  On receipt of a signal this will cause a kqueue to recurse
 1255          * into itself over and over.  Sending the sigio causes the kqueue
 1256          * to become ready, which in turn posts sigio again, forever.
 1257          * Solution: this can be solved by setting a flag in the kqueue that
 1258          * we have a SIGIO in progress.
 1259          * 2) locking problems:
 1260          * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
 1261          * us above the proc and pgrp locks.
 1262          * Solution: Post a signal using an async mechanism, being sure to
 1263          * record a generation count in the delivery so that we do not deliver
 1264          * a signal to the wrong process.
 1265          *
 1266          * Note, these two mechanisms are somewhat mutually exclusive!
 1267          */
 1268 #if 0
 1269         struct kqueue *kq;
 1270 
 1271         kq = fp->f_data;
 1272         switch (cmd) {
 1273         case FIOASYNC:
 1274                 if (*(int *)data) {
 1275                         kq->kq_state |= KQ_ASYNC;
 1276                 } else {
 1277                         kq->kq_state &= ~KQ_ASYNC;
 1278                 }
 1279                 return (0);
 1280 
 1281         case FIOSETOWN:
 1282                 return (fsetown(*(int *)data, &kq->kq_sigio));
 1283 
 1284         case FIOGETOWN:
 1285                 *(int *)data = fgetown(&kq->kq_sigio);
 1286                 return (0);
 1287         }
 1288 #endif
 1289 
 1290         return (ENOTTY);
 1291 }
 1292 
 1293 /*ARGSUSED*/
 1294 static int
 1295 kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
 1296         struct thread *td)
 1297 {
 1298         struct kqueue *kq;
 1299         int revents = 0;
 1300         int error;
 1301 
 1302         if ((error = kqueue_aquire(fp, &kq)))
 1303                 return POLLERR;
 1304 
 1305         KQ_LOCK(kq);
 1306         if (events & (POLLIN | POLLRDNORM)) {
 1307                 if (kq->kq_count) {
 1308                         revents |= events & (POLLIN | POLLRDNORM);
 1309                 } else {
 1310                         selrecord(td, &kq->kq_sel);
 1311                         kq->kq_state |= KQ_SEL;
 1312                 }
 1313         }
 1314         kqueue_release(kq, 1);
 1315         KQ_UNLOCK(kq);
 1316         return (revents);
 1317 }
 1318 
 1319 /*ARGSUSED*/
 1320 static int
 1321 kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
 1322         struct thread *td)
 1323 {
 1324 
 1325         return (ENXIO);
 1326 }
 1327 
 1328 /*ARGSUSED*/
 1329 static int
 1330 kqueue_close(struct file *fp, struct thread *td)
 1331 {
 1332         struct kqueue *kq = fp->f_data;
 1333         struct filedesc *fdp;
 1334         struct knote *kn;
 1335         int i;
 1336         int error;
 1337 
 1338         if ((error = kqueue_aquire(fp, &kq)))
 1339                 return error;
 1340 
 1341         KQ_LOCK(kq);
 1342 
 1343         KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
 1344             ("kqueue already closing"));
 1345         kq->kq_state |= KQ_CLOSING;
 1346         if (kq->kq_refcnt > 1)
 1347                 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
 1348 
 1349         KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
 1350         fdp = kq->kq_fdp;
 1351 
 1352         KASSERT(knlist_empty(&kq->kq_sel.si_note),
 1353             ("kqueue's knlist not empty"));
 1354 
 1355         for (i = 0; i < kq->kq_knlistsize; i++) {
 1356                 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
 1357                         KASSERT((kn->kn_status & KN_INFLUX) == 0,
 1358                             ("KN_INFLUX set when not suppose to be"));
 1359                         kn->kn_status |= KN_INFLUX;
 1360                         KQ_UNLOCK(kq);
 1361                         if (!(kn->kn_status & KN_DETACHED))
 1362                                 kn->kn_fop->f_detach(kn);
 1363                         knote_drop(kn, td);
 1364                         KQ_LOCK(kq);
 1365                 }
 1366         }
 1367         if (kq->kq_knhashmask != 0) {
 1368                 for (i = 0; i <= kq->kq_knhashmask; i++) {
 1369                         while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
 1370                                 KASSERT((kn->kn_status & KN_INFLUX) == 0,
 1371                                     ("KN_INFLUX set when not suppose to be"));
 1372                                 kn->kn_status |= KN_INFLUX;
 1373                                 KQ_UNLOCK(kq);
 1374                                 if (!(kn->kn_status & KN_DETACHED))
 1375                                         kn->kn_fop->f_detach(kn);
 1376                                 knote_drop(kn, td);
 1377                                 KQ_LOCK(kq);
 1378                         }
 1379                 }
 1380         }
 1381 
 1382         if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
 1383                 kq->kq_state |= KQ_TASKDRAIN;
 1384                 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
 1385         }
 1386 
 1387         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1388                 kq->kq_state &= ~KQ_SEL;
 1389                 selwakeuppri(&kq->kq_sel, PSOCK);
 1390         }
 1391 
 1392         KQ_UNLOCK(kq);
 1393 
 1394         FILEDESC_LOCK(fdp);
 1395         SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
 1396         FILEDESC_UNLOCK(fdp);
 1397 
 1398         knlist_destroy(&kq->kq_sel.si_note);
 1399         mtx_destroy(&kq->kq_lock);
 1400         kq->kq_fdp = NULL;
 1401 
 1402         if (kq->kq_knhash != NULL)
 1403                 free(kq->kq_knhash, M_KQUEUE);
 1404         if (kq->kq_knlist != NULL)
 1405                 free(kq->kq_knlist, M_KQUEUE);
 1406 
 1407         funsetown(&kq->kq_sigio);
 1408         free(kq, M_KQUEUE);
 1409         fp->f_data = NULL;
 1410 
 1411         return (0);
 1412 }
 1413 
 1414 static void
 1415 kqueue_wakeup(struct kqueue *kq)
 1416 {
 1417         KQ_OWNED(kq);
 1418 
 1419         if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
 1420                 kq->kq_state &= ~KQ_SLEEP;
 1421                 wakeup(kq);
 1422         }
 1423         if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
 1424                 kq->kq_state &= ~KQ_SEL;
 1425                 selwakeuppri(&kq->kq_sel, PSOCK);
 1426         }
 1427         if (!knlist_empty(&kq->kq_sel.si_note))
 1428                 kqueue_schedtask(kq);
 1429         if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
 1430                 pgsigio(&kq->kq_sigio, SIGIO, 0);
 1431         }
 1432 }
 1433 
 1434 /*
 1435  * Walk down a list of knotes, activating them if their event has triggered.
 1436  *
 1437  * There is a possibility to optimize in the case of one kq watching another.
 1438  * Instead of scheduling a task to wake it up, you could pass enough state
 1439  * down the chain to make up the parent kqueue.  Make this code functional
 1440  * first.
 1441  */
 1442 void
 1443 knote(struct knlist *list, long hint, int islocked)
 1444 {
 1445         struct kqueue *kq;
 1446         struct knote *kn;
 1447 
 1448         if (list == NULL)
 1449                 return;
 1450 
 1451         mtx_assert(list->kl_lock, islocked ? MA_OWNED : MA_NOTOWNED);
 1452         if (!islocked)
 1453                 mtx_lock(list->kl_lock);
 1454         /*
 1455          * If we unlock the list lock (and set KN_INFLUX), we can eliminate
 1456          * the kqueue scheduling, but this will introduce four
 1457          * lock/unlock's for each knote to test.  If we do, continue to use
 1458          * SLIST_FOREACH, SLIST_FOREACH_SAFE is not safe in our case, it is
 1459          * only safe if you want to remove the current item, which we are
 1460          * not doing.
 1461          */
 1462         SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
 1463                 kq = kn->kn_kq;
 1464                 if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
 1465                         KQ_LOCK(kq);
 1466                         if ((kn->kn_status & KN_INFLUX) != KN_INFLUX) {
 1467                                 kn->kn_status |= KN_HASKQLOCK;
 1468                                 if (kn->kn_fop->f_event(kn, hint))
 1469                                         KNOTE_ACTIVATE(kn, 1);
 1470                                 kn->kn_status &= ~KN_HASKQLOCK;
 1471                         }
 1472                         KQ_UNLOCK(kq);
 1473                 }
 1474                 kq = NULL;
 1475         }
 1476         if (!islocked)
 1477                 mtx_unlock(list->kl_lock);
 1478 }
 1479 
 1480 /*
 1481  * add a knote to a knlist
 1482  */
 1483 void
 1484 knlist_add(struct knlist *knl, struct knote *kn, int islocked)
 1485 {
 1486         mtx_assert(knl->kl_lock, islocked ? MA_OWNED : MA_NOTOWNED);
 1487         KQ_NOTOWNED(kn->kn_kq);
 1488         KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) ==
 1489             (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED"));
 1490         if (!islocked)
 1491                 mtx_lock(knl->kl_lock);
 1492         SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
 1493         if (!islocked)
 1494                 mtx_unlock(knl->kl_lock);
 1495         KQ_LOCK(kn->kn_kq);
 1496         kn->kn_knlist = knl;
 1497         kn->kn_status &= ~KN_DETACHED;
 1498         KQ_UNLOCK(kn->kn_kq);
 1499 }
 1500 
 1501 static void
 1502 knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
 1503 {
 1504         KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
 1505         mtx_assert(knl->kl_lock, knlislocked ? MA_OWNED : MA_NOTOWNED);
 1506         mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
 1507         if (!kqislocked)
 1508                 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX,
 1509     ("knlist_remove called w/o knote being KN_INFLUX or already removed"));
 1510         if (!knlislocked)
 1511                 mtx_lock(knl->kl_lock);
 1512         SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
 1513         kn->kn_knlist = NULL;
 1514         if (!knlislocked)
 1515                 mtx_unlock(knl->kl_lock);
 1516         if (!kqislocked)
 1517                 KQ_LOCK(kn->kn_kq);
 1518         kn->kn_status |= KN_DETACHED;
 1519         if (!kqislocked)
 1520                 KQ_UNLOCK(kn->kn_kq);
 1521 }
 1522 
 1523 /*
 1524  * remove all knotes from a specified klist
 1525  */
 1526 void
 1527 knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
 1528 {
 1529 
 1530         knlist_remove_kq(knl, kn, islocked, 0);
 1531 }
 1532 
 1533 /*
 1534  * remove knote from a specified klist while in f_event handler.
 1535  */
 1536 void
 1537 knlist_remove_inevent(struct knlist *knl, struct knote *kn)
 1538 {
 1539 
 1540         knlist_remove_kq(knl, kn, 1,
 1541             (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
 1542 }
 1543 
 1544 int
 1545 knlist_empty(struct knlist *knl)
 1546 {
 1547 
 1548         mtx_assert(knl->kl_lock, MA_OWNED);
 1549         return SLIST_EMPTY(&knl->kl_list);
 1550 }
 1551 
 1552 static struct mtx       knlist_lock;
 1553 MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
 1554         MTX_DEF);
 1555 
 1556 void
 1557 knlist_init(struct knlist *knl, struct mtx *mtx)
 1558 {
 1559 
 1560         if (mtx == NULL)
 1561                 knl->kl_lock = &knlist_lock;
 1562         else
 1563                 knl->kl_lock = mtx;
 1564 
 1565         SLIST_INIT(&knl->kl_list);
 1566 }
 1567 
 1568 void
 1569 knlist_destroy(struct knlist *knl)
 1570 {
 1571 
 1572 #ifdef INVARIANTS
 1573         /*
 1574          * if we run across this error, we need to find the offending
 1575          * driver and have it call knlist_clear.
 1576          */
 1577         if (!SLIST_EMPTY(&knl->kl_list))
 1578                 printf("WARNING: destroying knlist w/ knotes on it!\n");
 1579 #endif
 1580 
 1581         knl->kl_lock = NULL;
 1582         SLIST_INIT(&knl->kl_list);
 1583 }
 1584 
 1585 /*
 1586  * Even if we are locked, we may need to drop the lock to allow any influx
 1587  * knotes time to "settle".
 1588  */
 1589 void
 1590 knlist_clear(struct knlist *knl, int islocked)
 1591 {
 1592         struct knote *kn;
 1593         struct kqueue *kq;
 1594 
 1595         if (islocked)
 1596                 mtx_assert(knl->kl_lock, MA_OWNED);
 1597         else {
 1598                 mtx_assert(knl->kl_lock, MA_NOTOWNED);
 1599 again:          /* need to reaquire lock since we have dropped it */
 1600                 mtx_lock(knl->kl_lock);
 1601         }
 1602 
 1603         SLIST_FOREACH(kn, &knl->kl_list, kn_selnext) {
 1604                 kq = kn->kn_kq;
 1605                 KQ_LOCK(kq);
 1606                 if ((kn->kn_status & KN_INFLUX) &&
 1607                     (kn->kn_status & KN_DETACHED) != KN_DETACHED) {
 1608                         KQ_UNLOCK(kq);
 1609                         continue;
 1610                 }
 1611                 /* Make sure cleared knotes disappear soon */
 1612                 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
 1613                 knlist_remove_kq(knl, kn, 1, 1);
 1614                 KQ_UNLOCK(kq);
 1615                 kq = NULL;
 1616         }
 1617 
 1618         if (!SLIST_EMPTY(&knl->kl_list)) {
 1619                 /* there are still KN_INFLUX remaining */
 1620                 kn = SLIST_FIRST(&knl->kl_list);
 1621                 kq = kn->kn_kq;
 1622                 KQ_LOCK(kq);
 1623                 KASSERT(kn->kn_status & KN_INFLUX,
 1624                     ("knote removed w/o list lock"));
 1625                 mtx_unlock(knl->kl_lock);
 1626                 kq->kq_state |= KQ_FLUXWAIT;
 1627                 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
 1628                 kq = NULL;
 1629                 goto again;
 1630         }
 1631 
 1632         SLIST_INIT(&knl->kl_list);
 1633 
 1634         if (islocked)
 1635                 mtx_assert(knl->kl_lock, MA_OWNED);
 1636         else {
 1637                 mtx_unlock(knl->kl_lock);
 1638                 mtx_assert(knl->kl_lock, MA_NOTOWNED);
 1639         }
 1640 }
 1641 
 1642 /*
 1643  * remove all knotes referencing a specified fd
 1644  * must be called with FILEDESC lock.  This prevents a race where a new fd
 1645  * comes along and occupies the entry and we attach a knote to the fd.
 1646  */
 1647 void
 1648 knote_fdclose(struct thread *td, int fd)
 1649 {
 1650         struct filedesc *fdp = td->td_proc->p_fd;
 1651         struct kqueue *kq;
 1652         struct knote *kn;
 1653         int influx;
 1654 
 1655         FILEDESC_LOCK_ASSERT(fdp, MA_OWNED);
 1656 
 1657         /*
 1658          * We shouldn't have to worry about new kevents appearing on fd
 1659          * since filedesc is locked.
 1660          */
 1661         SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
 1662                 KQ_LOCK(kq);
 1663 
 1664 again:
 1665                 influx = 0;
 1666                 while (kq->kq_knlistsize > fd &&
 1667                     (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
 1668                         if (kn->kn_status & KN_INFLUX) {
 1669                                 /* someone else might be waiting on our knote */
 1670                                 if (influx)
 1671                                         wakeup(kq);
 1672                                 kq->kq_state |= KQ_FLUXWAIT;
 1673                                 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
 1674                                 goto again;
 1675                         }
 1676                         kn->kn_status |= KN_INFLUX;
 1677                         KQ_UNLOCK(kq);
 1678                         if (!(kn->kn_status & KN_DETACHED))
 1679                                 kn->kn_fop->f_detach(kn);
 1680                         knote_drop(kn, td);
 1681                         influx = 1;
 1682                         KQ_LOCK(kq);
 1683                 }
 1684                 KQ_UNLOCK_FLUX(kq);
 1685         }
 1686 }
 1687 
 1688 static int
 1689 knote_attach(struct knote *kn, struct kqueue *kq)
 1690 {
 1691         struct klist *list;
 1692 
 1693         KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX"));
 1694         KQ_OWNED(kq);
 1695 
 1696         if (kn->kn_fop->f_isfd) {
 1697                 if (kn->kn_id >= kq->kq_knlistsize)
 1698                         return ENOMEM;
 1699                 list = &kq->kq_knlist[kn->kn_id];
 1700         } else {
 1701                 if (kq->kq_knhash == NULL)
 1702                         return ENOMEM;
 1703                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1704         }
 1705 
 1706         SLIST_INSERT_HEAD(list, kn, kn_link);
 1707 
 1708         return 0;
 1709 }
 1710 
 1711 /*
 1712  * knote must already have been detatched using the f_detach method.
 1713  * no lock need to be held, it is assumed that the KN_INFLUX flag is set
 1714  * to prevent other removal.
 1715  */
 1716 static void
 1717 knote_drop(struct knote *kn, struct thread *td)
 1718 {
 1719         struct kqueue *kq;
 1720         struct klist *list;
 1721 
 1722         kq = kn->kn_kq;
 1723 
 1724         KQ_NOTOWNED(kq);
 1725         KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX,
 1726             ("knote_drop called without KN_INFLUX set in kn_status"));
 1727 
 1728         KQ_LOCK(kq);
 1729         if (kn->kn_fop->f_isfd)
 1730                 list = &kq->kq_knlist[kn->kn_id];
 1731         else
 1732                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1733 
 1734         SLIST_REMOVE(list, kn, knote, kn_link);
 1735         if (kn->kn_status & KN_QUEUED)
 1736                 knote_dequeue(kn);
 1737         KQ_UNLOCK_FLUX(kq);
 1738 
 1739         if (kn->kn_fop->f_isfd) {
 1740                 fdrop(kn->kn_fp, td);
 1741                 kn->kn_fp = NULL;
 1742         }
 1743         kqueue_fo_release(kn->kn_kevent.filter);
 1744         kn->kn_fop = NULL;
 1745         knote_free(kn);
 1746 }
 1747 
 1748 static void
 1749 knote_enqueue(struct knote *kn)
 1750 {
 1751         struct kqueue *kq = kn->kn_kq;
 1752 
 1753         KQ_OWNED(kn->kn_kq);
 1754         KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
 1755 
 1756         TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
 1757         kn->kn_status |= KN_QUEUED;
 1758         kq->kq_count++;
 1759         kqueue_wakeup(kq);
 1760 }
 1761 
 1762 static void
 1763 knote_dequeue(struct knote *kn)
 1764 {
 1765         struct kqueue *kq = kn->kn_kq;
 1766 
 1767         KQ_OWNED(kn->kn_kq);
 1768         KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
 1769 
 1770         TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
 1771         kn->kn_status &= ~KN_QUEUED;
 1772         kq->kq_count--;
 1773 }
 1774 
 1775 static void
 1776 knote_init(void)
 1777 {
 1778 
 1779         knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
 1780             NULL, NULL, UMA_ALIGN_PTR, 0);
 1781 }
 1782 SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
 1783 
 1784 static struct knote *
 1785 knote_alloc(int waitok)
 1786 {
 1787         return ((struct knote *)uma_zalloc(knote_zone,
 1788             (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO));
 1789 }
 1790 
 1791 static void
 1792 knote_free(struct knote *kn)
 1793 {
 1794         if (kn != NULL)
 1795                 uma_zfree(knote_zone, kn);
 1796 }

Cache object: cce37bde667efdaa31087892270e4def


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.