The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_event.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: src/sys/kern/kern_event.c,v 1.2.2.10 2004/04/04 07:03:14 cperciva Exp $
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/systm.h>
   31 #include <sys/kernel.h>
   32 #include <sys/proc.h>
   33 #include <sys/malloc.h> 
   34 #include <sys/unistd.h>
   35 #include <sys/file.h>
   36 #include <sys/lock.h>
   37 #include <sys/fcntl.h>
   38 #include <sys/queue.h>
   39 #include <sys/event.h>
   40 #include <sys/eventvar.h>
   41 #include <sys/protosw.h>
   42 #include <sys/socket.h>
   43 #include <sys/socketvar.h>
   44 #include <sys/stat.h>
   45 #include <sys/sysctl.h>
   46 #include <sys/sysproto.h>
   47 #include <sys/thread.h>
   48 #include <sys/uio.h>
   49 #include <sys/signalvar.h>
   50 #include <sys/filio.h>
   51 #include <sys/ktr.h>
   52 
   53 #include <sys/thread2.h>
   54 #include <sys/file2.h>
   55 #include <sys/mplock2.h>
   56 
   57 /*
   58  * Global token for kqueue subsystem
   59  */
   60 #if 0
   61 struct lwkt_token kq_token = LWKT_TOKEN_INITIALIZER(kq_token);
   62 SYSCTL_LONG(_lwkt, OID_AUTO, kq_collisions,
   63     CTLFLAG_RW, &kq_token.t_collisions, 0,
   64     "Collision counter of kq_token");
   65 #endif
   66 
   67 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
   68 
   69 struct kevent_copyin_args {
   70         struct kevent_args      *ka;
   71         int                     pchanges;
   72 };
   73 
   74 static int      kqueue_sleep(struct kqueue *kq, struct timespec *tsp);
   75 static int      kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
   76                     struct knote *marker);
   77 static int      kqueue_read(struct file *fp, struct uio *uio,
   78                     struct ucred *cred, int flags);
   79 static int      kqueue_write(struct file *fp, struct uio *uio,
   80                     struct ucred *cred, int flags);
   81 static int      kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
   82                     struct ucred *cred, struct sysmsg *msg);
   83 static int      kqueue_kqfilter(struct file *fp, struct knote *kn);
   84 static int      kqueue_stat(struct file *fp, struct stat *st,
   85                     struct ucred *cred);
   86 static int      kqueue_close(struct file *fp);
   87 static void     kqueue_wakeup(struct kqueue *kq);
   88 static int      filter_attach(struct knote *kn);
   89 static int      filter_event(struct knote *kn, long hint);
   90 
   91 /*
   92  * MPSAFE
   93  */
   94 static struct fileops kqueueops = {
   95         .fo_read = kqueue_read,
   96         .fo_write = kqueue_write,
   97         .fo_ioctl = kqueue_ioctl,
   98         .fo_kqfilter = kqueue_kqfilter,
   99         .fo_stat = kqueue_stat,
  100         .fo_close = kqueue_close,
  101         .fo_shutdown = nofo_shutdown
  102 };
  103 
  104 static void     knote_attach(struct knote *kn);
  105 static void     knote_drop(struct knote *kn);
  106 static void     knote_detach_and_drop(struct knote *kn);
  107 static void     knote_enqueue(struct knote *kn);
  108 static void     knote_dequeue(struct knote *kn);
  109 static struct   knote *knote_alloc(void);
  110 static void     knote_free(struct knote *kn);
  111 
  112 static void     filt_kqdetach(struct knote *kn);
  113 static int      filt_kqueue(struct knote *kn, long hint);
  114 static int      filt_procattach(struct knote *kn);
  115 static void     filt_procdetach(struct knote *kn);
  116 static int      filt_proc(struct knote *kn, long hint);
  117 static int      filt_fileattach(struct knote *kn);
  118 static void     filt_timerexpire(void *knx);
  119 static int      filt_timerattach(struct knote *kn);
  120 static void     filt_timerdetach(struct knote *kn);
  121 static int      filt_timer(struct knote *kn, long hint);
  122 
  123 static struct filterops file_filtops =
  124         { FILTEROP_ISFD, filt_fileattach, NULL, NULL };
  125 static struct filterops kqread_filtops =
  126         { FILTEROP_ISFD, NULL, filt_kqdetach, filt_kqueue };
  127 static struct filterops proc_filtops =
  128         { 0, filt_procattach, filt_procdetach, filt_proc };
  129 static struct filterops timer_filtops =
  130         { 0, filt_timerattach, filt_timerdetach, filt_timer };
  131 
  132 static int              kq_ncallouts = 0;
  133 static int              kq_calloutmax = (4 * 1024);
  134 SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
  135     &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
  136 static int              kq_checkloop = 1000000;
  137 SYSCTL_INT(_kern, OID_AUTO, kq_checkloop, CTLFLAG_RW,
  138     &kq_checkloop, 0, "Maximum number of callouts allocated for kqueue");
  139 
  140 #define KNOTE_ACTIVATE(kn) do {                                         \
  141         kn->kn_status |= KN_ACTIVE;                                     \
  142         if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)           \
  143                 knote_enqueue(kn);                                      \
  144 } while(0)
  145 
  146 #define KN_HASHSIZE             64              /* XXX should be tunable */
  147 #define KN_HASH(val, mask)      (((val) ^ (val >> 8)) & (mask))
  148 
  149 extern struct filterops aio_filtops;
  150 extern struct filterops sig_filtops;
  151 
  152 /*
  153  * Table for for all system-defined filters.
  154  */
  155 static struct filterops *sysfilt_ops[] = {
  156         &file_filtops,                  /* EVFILT_READ */
  157         &file_filtops,                  /* EVFILT_WRITE */
  158         &aio_filtops,                   /* EVFILT_AIO */
  159         &file_filtops,                  /* EVFILT_VNODE */
  160         &proc_filtops,                  /* EVFILT_PROC */
  161         &sig_filtops,                   /* EVFILT_SIGNAL */
  162         &timer_filtops,                 /* EVFILT_TIMER */
  163         &file_filtops,                  /* EVFILT_EXCEPT */
  164 };
  165 
  166 static int
  167 filt_fileattach(struct knote *kn)
  168 {
  169         return (fo_kqfilter(kn->kn_fp, kn));
  170 }
  171 
  172 /*
  173  * MPSAFE
  174  */
  175 static int
  176 kqueue_kqfilter(struct file *fp, struct knote *kn)
  177 {
  178         struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
  179 
  180         if (kn->kn_filter != EVFILT_READ)
  181                 return (EOPNOTSUPP);
  182 
  183         kn->kn_fop = &kqread_filtops;
  184         knote_insert(&kq->kq_kqinfo.ki_note, kn);
  185         return (0);
  186 }
  187 
  188 static void
  189 filt_kqdetach(struct knote *kn)
  190 {
  191         struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
  192 
  193         knote_remove(&kq->kq_kqinfo.ki_note, kn);
  194 }
  195 
  196 /*ARGSUSED*/
  197 static int
  198 filt_kqueue(struct knote *kn, long hint)
  199 {
  200         struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
  201 
  202         kn->kn_data = kq->kq_count;
  203         return (kn->kn_data > 0);
  204 }
  205 
  206 static int
  207 filt_procattach(struct knote *kn)
  208 {
  209         struct proc *p;
  210         int immediate;
  211 
  212         immediate = 0;
  213         p = pfind(kn->kn_id);
  214         if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) {
  215                 p = zpfind(kn->kn_id);
  216                 immediate = 1;
  217         }
  218         if (p == NULL) {
  219                 return (ESRCH);
  220         }
  221         if (!PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
  222                 if (p)
  223                         PRELE(p);
  224                 return (EACCES);
  225         }
  226 
  227         lwkt_gettoken(&p->p_token);
  228         kn->kn_ptr.p_proc = p;
  229         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  230 
  231         /*
  232          * internal flag indicating registration done by kernel
  233          */
  234         if (kn->kn_flags & EV_FLAG1) {
  235                 kn->kn_data = kn->kn_sdata;             /* ppid */
  236                 kn->kn_fflags = NOTE_CHILD;
  237                 kn->kn_flags &= ~EV_FLAG1;
  238         }
  239 
  240         knote_insert(&p->p_klist, kn);
  241 
  242         /*
  243          * Immediately activate any exit notes if the target process is a
  244          * zombie.  This is necessary to handle the case where the target
  245          * process, e.g. a child, dies before the kevent is negistered.
  246          */
  247         if (immediate && filt_proc(kn, NOTE_EXIT))
  248                 KNOTE_ACTIVATE(kn);
  249         lwkt_reltoken(&p->p_token);
  250         PRELE(p);
  251 
  252         return (0);
  253 }
  254 
  255 /*
  256  * The knote may be attached to a different process, which may exit,
  257  * leaving nothing for the knote to be attached to.  So when the process
  258  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
  259  * it will be deleted when read out.  However, as part of the knote deletion,
  260  * this routine is called, so a check is needed to avoid actually performing
  261  * a detach, because the original process does not exist any more.
  262  */
  263 static void
  264 filt_procdetach(struct knote *kn)
  265 {
  266         struct proc *p;
  267 
  268         if (kn->kn_status & KN_DETACHED)
  269                 return;
  270         p = kn->kn_ptr.p_proc;
  271         knote_remove(&p->p_klist, kn);
  272 }
  273 
  274 static int
  275 filt_proc(struct knote *kn, long hint)
  276 {
  277         u_int event;
  278 
  279         /*
  280          * mask off extra data
  281          */
  282         event = (u_int)hint & NOTE_PCTRLMASK;
  283 
  284         /*
  285          * if the user is interested in this event, record it.
  286          */
  287         if (kn->kn_sfflags & event)
  288                 kn->kn_fflags |= event;
  289 
  290         /*
  291          * Process is gone, so flag the event as finished.  Detach the
  292          * knote from the process now because the process will be poof,
  293          * gone later on.
  294          */
  295         if (event == NOTE_EXIT) {
  296                 struct proc *p = kn->kn_ptr.p_proc;
  297                 if ((kn->kn_status & KN_DETACHED) == 0) {
  298                         PHOLD(p);
  299                         knote_remove(&p->p_klist, kn);
  300                         kn->kn_status |= KN_DETACHED;
  301                         kn->kn_data = p->p_xstat;
  302                         kn->kn_ptr.p_proc = NULL;
  303                         PRELE(p);
  304                 }
  305                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 
  306                 return (1);
  307         }
  308 
  309         /*
  310          * process forked, and user wants to track the new process,
  311          * so attach a new knote to it, and immediately report an
  312          * event with the parent's pid.
  313          */
  314         if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
  315                 struct kevent kev;
  316                 int error;
  317 
  318                 /*
  319                  * register knote with new process.
  320                  */
  321                 kev.ident = hint & NOTE_PDATAMASK;      /* pid */
  322                 kev.filter = kn->kn_filter;
  323                 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
  324                 kev.fflags = kn->kn_sfflags;
  325                 kev.data = kn->kn_id;                   /* parent */
  326                 kev.udata = kn->kn_kevent.udata;        /* preserve udata */
  327                 error = kqueue_register(kn->kn_kq, &kev);
  328                 if (error)
  329                         kn->kn_fflags |= NOTE_TRACKERR;
  330         }
  331 
  332         return (kn->kn_fflags != 0);
  333 }
  334 
  335 /*
  336  * The callout interlocks with callout_terminate() but can still
  337  * race a deletion so if KN_DELETING is set we just don't touch
  338  * the knote.
  339  */
  340 static void
  341 filt_timerexpire(void *knx)
  342 {
  343         struct lwkt_token *tok;
  344         struct knote *kn = knx;
  345         struct callout *calloutp;
  346         struct timeval tv;
  347         int tticks;
  348 
  349         tok = lwkt_token_pool_lookup(kn->kn_kq);
  350         lwkt_gettoken(tok);
  351         if ((kn->kn_status & KN_DELETING) == 0) {
  352                 kn->kn_data++;
  353                 KNOTE_ACTIVATE(kn);
  354 
  355                 if ((kn->kn_flags & EV_ONESHOT) == 0) {
  356                         tv.tv_sec = kn->kn_sdata / 1000;
  357                         tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
  358                         tticks = tvtohz_high(&tv);
  359                         calloutp = (struct callout *)kn->kn_hook;
  360                         callout_reset(calloutp, tticks, filt_timerexpire, kn);
  361                 }
  362         }
  363         lwkt_reltoken(tok);
  364 }
  365 
  366 /*
  367  * data contains amount of time to sleep, in milliseconds
  368  */ 
  369 static int
  370 filt_timerattach(struct knote *kn)
  371 {
  372         struct callout *calloutp;
  373         struct timeval tv;
  374         int tticks;
  375 
  376         if (kq_ncallouts >= kq_calloutmax) {
  377                 kn->kn_hook = NULL;
  378                 return (ENOMEM);
  379         }
  380         kq_ncallouts++;
  381 
  382         tv.tv_sec = kn->kn_sdata / 1000;
  383         tv.tv_usec = (kn->kn_sdata % 1000) * 1000;
  384         tticks = tvtohz_high(&tv);
  385 
  386         kn->kn_flags |= EV_CLEAR;               /* automatically set */
  387         calloutp = kmalloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK);
  388         callout_init(calloutp);
  389         kn->kn_hook = (caddr_t)calloutp;
  390         callout_reset(calloutp, tticks, filt_timerexpire, kn);
  391 
  392         return (0);
  393 }
  394 
  395 /*
  396  * This function is called with the knote flagged locked but it is
  397  * still possible to race a callout event due to the callback blocking.
  398  * We must call callout_terminate() instead of callout_stop() to deal
  399  * with the race.
  400  */
  401 static void
  402 filt_timerdetach(struct knote *kn)
  403 {
  404         struct callout *calloutp;
  405 
  406         calloutp = (struct callout *)kn->kn_hook;
  407         callout_terminate(calloutp);
  408         kfree(calloutp, M_KQUEUE);
  409         kq_ncallouts--;
  410 }
  411 
  412 static int
  413 filt_timer(struct knote *kn, long hint)
  414 {
  415 
  416         return (kn->kn_data != 0);
  417 }
  418 
  419 /*
  420  * Acquire a knote, return non-zero on success, 0 on failure.
  421  *
  422  * If we cannot acquire the knote we sleep and return 0.  The knote
  423  * may be stale on return in this case and the caller must restart
  424  * whatever loop they are in.
  425  *
  426  * Related kq token must be held.
  427  */
  428 static __inline
  429 int
  430 knote_acquire(struct knote *kn)
  431 {
  432         if (kn->kn_status & KN_PROCESSING) {
  433                 kn->kn_status |= KN_WAITING | KN_REPROCESS;
  434                 tsleep(kn, 0, "kqepts", hz);
  435                 /* knote may be stale now */
  436                 return(0);
  437         }
  438         kn->kn_status |= KN_PROCESSING;
  439         return(1);
  440 }
  441 
  442 /*
  443  * Release an acquired knote, clearing KN_PROCESSING and handling any
  444  * KN_REPROCESS events.
  445  *
  446  * Caller must be holding the related kq token
  447  *
  448  * Non-zero is returned if the knote is destroyed or detached.
  449  */
  450 static __inline
  451 int
  452 knote_release(struct knote *kn)
  453 {
  454         while (kn->kn_status & KN_REPROCESS) {
  455                 kn->kn_status &= ~KN_REPROCESS;
  456                 if (kn->kn_status & KN_WAITING) {
  457                         kn->kn_status &= ~KN_WAITING;
  458                         wakeup(kn);
  459                 }
  460                 if (kn->kn_status & KN_DELETING) {
  461                         knote_detach_and_drop(kn);
  462                         return(1);
  463                         /* NOT REACHED */
  464                 }
  465                 if (filter_event(kn, 0))
  466                         KNOTE_ACTIVATE(kn);
  467         }
  468         if (kn->kn_status & KN_DETACHED) {
  469                 kn->kn_status &= ~KN_PROCESSING;
  470                 return(1);
  471         } else {
  472                 kn->kn_status &= ~KN_PROCESSING;
  473                 return(0);
  474         }
  475 }
  476 
  477 /*
  478  * Initialize a kqueue.
  479  *
  480  * NOTE: The lwp/proc code initializes a kqueue for select/poll ops.
  481  *
  482  * MPSAFE
  483  */
  484 void
  485 kqueue_init(struct kqueue *kq, struct filedesc *fdp)
  486 {
  487         TAILQ_INIT(&kq->kq_knpend);
  488         TAILQ_INIT(&kq->kq_knlist);
  489         kq->kq_count = 0;
  490         kq->kq_fdp = fdp;
  491         SLIST_INIT(&kq->kq_kqinfo.ki_note);
  492 }
  493 
  494 /*
  495  * Terminate a kqueue.  Freeing the actual kq itself is left up to the
  496  * caller (it might be embedded in a lwp so we don't do it here).
  497  *
  498  * The kq's knlist must be completely eradicated so block on any
  499  * processing races.
  500  */
  501 void
  502 kqueue_terminate(struct kqueue *kq)
  503 {
  504         struct lwkt_token *tok;
  505         struct knote *kn;
  506 
  507         tok = lwkt_token_pool_lookup(kq);
  508         lwkt_gettoken(tok);
  509         while ((kn = TAILQ_FIRST(&kq->kq_knlist)) != NULL) {
  510                 if (knote_acquire(kn))
  511                         knote_detach_and_drop(kn);
  512         }
  513         if (kq->kq_knhash) {
  514                 hashdestroy(kq->kq_knhash, M_KQUEUE, kq->kq_knhashmask);
  515                 kq->kq_knhash = NULL;
  516                 kq->kq_knhashmask = 0;
  517         }
  518         lwkt_reltoken(tok);
  519 }
  520 
  521 /*
  522  * MPSAFE
  523  */
  524 int
  525 sys_kqueue(struct kqueue_args *uap)
  526 {
  527         struct thread *td = curthread;
  528         struct kqueue *kq;
  529         struct file *fp;
  530         int fd, error;
  531 
  532         error = falloc(td->td_lwp, &fp, &fd);
  533         if (error)
  534                 return (error);
  535         fp->f_flag = FREAD | FWRITE;
  536         fp->f_type = DTYPE_KQUEUE;
  537         fp->f_ops = &kqueueops;
  538 
  539         kq = kmalloc(sizeof(struct kqueue), M_KQUEUE, M_WAITOK | M_ZERO);
  540         kqueue_init(kq, td->td_proc->p_fd);
  541         fp->f_data = kq;
  542 
  543         fsetfd(kq->kq_fdp, fp, fd);
  544         uap->sysmsg_result = fd;
  545         fdrop(fp);
  546         return (error);
  547 }
  548 
  549 /*
  550  * Copy 'count' items into the destination list pointed to by uap->eventlist.
  551  */
  552 static int
  553 kevent_copyout(void *arg, struct kevent *kevp, int count, int *res)
  554 {
  555         struct kevent_copyin_args *kap;
  556         int error;
  557 
  558         kap = (struct kevent_copyin_args *)arg;
  559 
  560         error = copyout(kevp, kap->ka->eventlist, count * sizeof(*kevp));
  561         if (error == 0) {
  562                 kap->ka->eventlist += count;
  563                 *res += count;
  564         } else {
  565                 *res = -1;
  566         }
  567 
  568         return (error);
  569 }
  570 
  571 /*
  572  * Copy at most 'max' items from the list pointed to by kap->changelist,
  573  * return number of items in 'events'.
  574  */
  575 static int
  576 kevent_copyin(void *arg, struct kevent *kevp, int max, int *events)
  577 {
  578         struct kevent_copyin_args *kap;
  579         int error, count;
  580 
  581         kap = (struct kevent_copyin_args *)arg;
  582 
  583         count = min(kap->ka->nchanges - kap->pchanges, max);
  584         error = copyin(kap->ka->changelist, kevp, count * sizeof *kevp);
  585         if (error == 0) {
  586                 kap->ka->changelist += count;
  587                 kap->pchanges += count;
  588                 *events = count;
  589         }
  590 
  591         return (error);
  592 }
  593 
  594 /*
  595  * MPSAFE
  596  */
  597 int
  598 kern_kevent(struct kqueue *kq, int nevents, int *res, void *uap,
  599             k_copyin_fn kevent_copyinfn, k_copyout_fn kevent_copyoutfn,
  600             struct timespec *tsp_in)
  601 {
  602         struct kevent *kevp;
  603         struct timespec *tsp;
  604         int i, n, total, error, nerrors = 0;
  605         int lres;
  606         int limit = kq_checkloop;
  607         struct kevent kev[KQ_NEVENTS];
  608         struct knote marker;
  609         struct lwkt_token *tok;
  610 
  611         if (tsp_in == NULL || tsp_in->tv_sec || tsp_in->tv_nsec)
  612                 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
  613 
  614 
  615         tsp = tsp_in;
  616         *res = 0;
  617 
  618         tok = lwkt_token_pool_lookup(kq);
  619         lwkt_gettoken(tok);
  620         for ( ;; ) {
  621                 n = 0;
  622                 error = kevent_copyinfn(uap, kev, KQ_NEVENTS, &n);
  623                 if (error)
  624                         goto done;
  625                 if (n == 0)
  626                         break;
  627                 for (i = 0; i < n; i++) {
  628                         kevp = &kev[i];
  629                         kevp->flags &= ~EV_SYSFLAGS;
  630                         error = kqueue_register(kq, kevp);
  631 
  632                         /*
  633                          * If a registration returns an error we
  634                          * immediately post the error.  The kevent()
  635                          * call itself will fail with the error if
  636                          * no space is available for posting.
  637                          *
  638                          * Such errors normally bypass the timeout/blocking
  639                          * code.  However, if the copyoutfn function refuses
  640                          * to post the error (see sys_poll()), then we
  641                          * ignore it too.
  642                          */
  643                         if (error) {
  644                                 kevp->flags = EV_ERROR;
  645                                 kevp->data = error;
  646                                 lres = *res;
  647                                 kevent_copyoutfn(uap, kevp, 1, res);
  648                                 if (*res < 0) {
  649                                         goto done;
  650                                 } else if (lres != *res) {
  651                                         nevents--;
  652                                         nerrors++;
  653                                 }
  654                         }
  655                 }
  656         }
  657         if (nerrors) {
  658                 error = 0;
  659                 goto done;
  660         }
  661 
  662         /*
  663          * Acquire/wait for events - setup timeout
  664          */
  665         if (tsp != NULL) {
  666                 struct timespec ats;
  667 
  668                 if (tsp->tv_sec || tsp->tv_nsec) {
  669                         getnanouptime(&ats);
  670                         timespecadd(tsp, &ats);         /* tsp = target time */
  671                 }
  672         }
  673 
  674         /*
  675          * Loop as required.
  676          *
  677          * Collect as many events as we can. Sleeping on successive
  678          * loops is disabled if copyoutfn has incremented (*res).
  679          *
  680          * The loop stops if an error occurs, all events have been
  681          * scanned (the marker has been reached), or fewer than the
  682          * maximum number of events is found.
  683          *
  684          * The copyoutfn function does not have to increment (*res) in
  685          * order for the loop to continue.
  686          *
  687          * NOTE: doselect() usually passes 0x7FFFFFFF for nevents.
  688          */
  689         total = 0;
  690         error = 0;
  691         marker.kn_filter = EVFILT_MARKER;
  692         marker.kn_status = KN_PROCESSING;
  693         TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
  694         while ((n = nevents - total) > 0) {
  695                 if (n > KQ_NEVENTS)
  696                         n = KQ_NEVENTS;
  697 
  698                 /*
  699                  * If no events are pending sleep until timeout (if any)
  700                  * or an event occurs.
  701                  *
  702                  * After the sleep completes the marker is moved to the
  703                  * end of the list, making any received events available
  704                  * to our scan.
  705                  */
  706                 if (kq->kq_count == 0 && *res == 0) {
  707                         error = kqueue_sleep(kq, tsp);
  708                         if (error)
  709                                 break;
  710 
  711                         TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
  712                         TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
  713                 }
  714 
  715                 /*
  716                  * Process all received events
  717                  * Account for all non-spurious events in our total
  718                  */
  719                 i = kqueue_scan(kq, kev, n, &marker);
  720                 if (i) {
  721                         lres = *res;
  722                         error = kevent_copyoutfn(uap, kev, i, res);
  723                         total += *res - lres;
  724                         if (error)
  725                                 break;
  726                 }
  727                 if (limit && --limit == 0)
  728                         panic("kqueue: checkloop failed i=%d", i);
  729 
  730                 /*
  731                  * Normally when fewer events are returned than requested
  732                  * we can stop.  However, if only spurious events were
  733                  * collected the copyout will not bump (*res) and we have
  734                  * to continue.
  735                  */
  736                 if (i < n && *res)
  737                         break;
  738 
  739                 /*
  740                  * Deal with an edge case where spurious events can cause
  741                  * a loop to occur without moving the marker.  This can
  742                  * prevent kqueue_scan() from picking up new events which
  743                  * race us.  We must be sure to move the marker for this
  744                  * case.
  745                  *
  746                  * NOTE: We do not want to move the marker if events
  747                  *       were scanned because normal kqueue operations
  748                  *       may reactivate events.  Moving the marker in
  749                  *       that case could result in duplicates for the
  750                  *       same event.
  751                  */
  752                 if (i == 0) {
  753                         TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
  754                         TAILQ_INSERT_TAIL(&kq->kq_knpend, &marker, kn_tqe);
  755                 }
  756         }
  757         TAILQ_REMOVE(&kq->kq_knpend, &marker, kn_tqe);
  758 
  759         /* Timeouts do not return EWOULDBLOCK. */
  760         if (error == EWOULDBLOCK)
  761                 error = 0;
  762 
  763 done:
  764         lwkt_reltoken(tok);
  765         return (error);
  766 }
  767 
  768 /*
  769  * MPALMOSTSAFE
  770  */
  771 int
  772 sys_kevent(struct kevent_args *uap)
  773 {
  774         struct thread *td = curthread;
  775         struct proc *p = td->td_proc;
  776         struct timespec ts, *tsp;
  777         struct kqueue *kq;
  778         struct file *fp = NULL;
  779         struct kevent_copyin_args *kap, ka;
  780         int error;
  781 
  782         if (uap->timeout) {
  783                 error = copyin(uap->timeout, &ts, sizeof(ts));
  784                 if (error)
  785                         return (error);
  786                 tsp = &ts;
  787         } else {
  788                 tsp = NULL;
  789         }
  790         fp = holdfp(p->p_fd, uap->fd, -1);
  791         if (fp == NULL)
  792                 return (EBADF);
  793         if (fp->f_type != DTYPE_KQUEUE) {
  794                 fdrop(fp);
  795                 return (EBADF);
  796         }
  797 
  798         kq = (struct kqueue *)fp->f_data;
  799 
  800         kap = &ka;
  801         kap->ka = uap;
  802         kap->pchanges = 0;
  803 
  804         error = kern_kevent(kq, uap->nevents, &uap->sysmsg_result, kap,
  805                             kevent_copyin, kevent_copyout, tsp);
  806 
  807         fdrop(fp);
  808 
  809         return (error);
  810 }
  811 
  812 /*
  813  * Caller must be holding the kq token
  814  */
  815 int
  816 kqueue_register(struct kqueue *kq, struct kevent *kev)
  817 {
  818         struct lwkt_token *tok;
  819         struct filedesc *fdp = kq->kq_fdp;
  820         struct filterops *fops;
  821         struct file *fp = NULL;
  822         struct knote *kn = NULL;
  823         int error = 0;
  824 
  825         if (kev->filter < 0) {
  826                 if (kev->filter + EVFILT_SYSCOUNT < 0)
  827                         return (EINVAL);
  828                 fops = sysfilt_ops[~kev->filter];       /* to 0-base index */
  829         } else {
  830                 /*
  831                  * XXX
  832                  * filter attach routine is responsible for insuring that
  833                  * the identifier can be attached to it.
  834                  */
  835                 kprintf("unknown filter: %d\n", kev->filter);
  836                 return (EINVAL);
  837         }
  838 
  839         tok = lwkt_token_pool_lookup(kq);
  840         lwkt_gettoken(tok);
  841         if (fops->f_flags & FILTEROP_ISFD) {
  842                 /* validate descriptor */
  843                 fp = holdfp(fdp, kev->ident, -1);
  844                 if (fp == NULL) {
  845                         lwkt_reltoken(tok);
  846                         return (EBADF);
  847                 }
  848                 lwkt_getpooltoken(&fp->f_klist);
  849 again1:
  850                 SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
  851                         if (kn->kn_kq == kq &&
  852                             kn->kn_filter == kev->filter &&
  853                             kn->kn_id == kev->ident) {
  854                                 if (knote_acquire(kn) == 0)
  855                                         goto again1;
  856                                 break;
  857                         }
  858                 }
  859                 lwkt_relpooltoken(&fp->f_klist);
  860         } else {
  861                 if (kq->kq_knhashmask) {
  862                         struct klist *list;
  863                         
  864                         list = &kq->kq_knhash[
  865                             KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
  866                         lwkt_getpooltoken(list);
  867 again2:
  868                         SLIST_FOREACH(kn, list, kn_link) {
  869                                 if (kn->kn_id == kev->ident &&
  870                                     kn->kn_filter == kev->filter) {
  871                                         if (knote_acquire(kn) == 0)
  872                                                 goto again2;
  873                                         break;
  874                                 }
  875                         }
  876                         lwkt_relpooltoken(list);
  877                 }
  878         }
  879 
  880         /*
  881          * NOTE: At this point if kn is non-NULL we will have acquired
  882          *       it and set KN_PROCESSING.
  883          */
  884         if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
  885                 error = ENOENT;
  886                 goto done;
  887         }
  888 
  889         /*
  890          * kn now contains the matching knote, or NULL if no match
  891          */
  892         if (kev->flags & EV_ADD) {
  893                 if (kn == NULL) {
  894                         kn = knote_alloc();
  895                         if (kn == NULL) {
  896                                 error = ENOMEM;
  897                                 goto done;
  898                         }
  899                         kn->kn_fp = fp;
  900                         kn->kn_kq = kq;
  901                         kn->kn_fop = fops;
  902 
  903                         /*
  904                          * apply reference count to knote structure, and
  905                          * do not release it at the end of this routine.
  906                          */
  907                         fp = NULL;
  908 
  909                         kn->kn_sfflags = kev->fflags;
  910                         kn->kn_sdata = kev->data;
  911                         kev->fflags = 0;
  912                         kev->data = 0;
  913                         kn->kn_kevent = *kev;
  914 
  915                         /*
  916                          * KN_PROCESSING prevents the knote from getting
  917                          * ripped out from under us while we are trying
  918                          * to attach it, in case the attach blocks.
  919                          */
  920                         kn->kn_status = KN_PROCESSING;
  921                         knote_attach(kn);
  922                         if ((error = filter_attach(kn)) != 0) {
  923                                 kn->kn_status |= KN_DELETING | KN_REPROCESS;
  924                                 knote_drop(kn);
  925                                 goto done;
  926                         }
  927 
  928                         /*
  929                          * Interlock against close races which either tried
  930                          * to remove our knote while we were blocked or missed
  931                          * it entirely prior to our attachment.  We do not
  932                          * want to end up with a knote on a closed descriptor.
  933                          */
  934                         if ((fops->f_flags & FILTEROP_ISFD) &&
  935                             checkfdclosed(fdp, kev->ident, kn->kn_fp)) {
  936                                 kn->kn_status |= KN_DELETING | KN_REPROCESS;
  937                         }
  938                 } else {
  939                         /*
  940                          * The user may change some filter values after the
  941                          * initial EV_ADD, but doing so will not reset any 
  942                          * filter which have already been triggered.
  943                          */
  944                         KKASSERT(kn->kn_status & KN_PROCESSING);
  945                         kn->kn_sfflags = kev->fflags;
  946                         kn->kn_sdata = kev->data;
  947                         kn->kn_kevent.udata = kev->udata;
  948                 }
  949 
  950                 /*
  951                  * Execute the filter event to immediately activate the
  952                  * knote if necessary.  If reprocessing events are pending
  953                  * due to blocking above we do not run the filter here
  954                  * but instead let knote_release() do it.  Otherwise we
  955                  * might run the filter on a deleted event.
  956                  */
  957                 if ((kn->kn_status & KN_REPROCESS) == 0) {
  958                         if (filter_event(kn, 0))
  959                                 KNOTE_ACTIVATE(kn);
  960                 }
  961         } else if (kev->flags & EV_DELETE) {
  962                 /*
  963                  * Delete the existing knote
  964                  */
  965                 knote_detach_and_drop(kn);
  966                 goto done;
  967         }
  968 
  969         /*
  970          * Disablement does not deactivate a knote here.
  971          */
  972         if ((kev->flags & EV_DISABLE) &&
  973             ((kn->kn_status & KN_DISABLED) == 0)) {
  974                 kn->kn_status |= KN_DISABLED;
  975         }
  976 
  977         /*
  978          * Re-enablement may have to immediately enqueue an active knote.
  979          */
  980         if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
  981                 kn->kn_status &= ~KN_DISABLED;
  982                 if ((kn->kn_status & KN_ACTIVE) &&
  983                     ((kn->kn_status & KN_QUEUED) == 0)) {
  984                         knote_enqueue(kn);
  985                 }
  986         }
  987 
  988         /*
  989          * Handle any required reprocessing
  990          */
  991         knote_release(kn);
  992         /* kn may be invalid now */
  993 
  994 done:
  995         lwkt_reltoken(tok);
  996         if (fp != NULL)
  997                 fdrop(fp);
  998         return (error);
  999 }
 1000 
 1001 /*
 1002  * Block as necessary until the target time is reached.
 1003  * If tsp is NULL we block indefinitely.  If tsp->ts_secs/nsecs are both
 1004  * 0 we do not block at all.
 1005  *
 1006  * Caller must be holding the kq token.
 1007  */
 1008 static int
 1009 kqueue_sleep(struct kqueue *kq, struct timespec *tsp)
 1010 {
 1011         int error = 0;
 1012 
 1013         if (tsp == NULL) {
 1014                 kq->kq_state |= KQ_SLEEP;
 1015                 error = tsleep(kq, PCATCH, "kqread", 0);
 1016         } else if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) {
 1017                 error = EWOULDBLOCK;
 1018         } else {
 1019                 struct timespec ats;
 1020                 struct timespec atx = *tsp;
 1021                 int timeout;
 1022 
 1023                 getnanouptime(&ats);
 1024                 timespecsub(&atx, &ats);
 1025                 if (ats.tv_sec < 0) {
 1026                         error = EWOULDBLOCK;
 1027                 } else {
 1028                         timeout = atx.tv_sec > 24 * 60 * 60 ?
 1029                                 24 * 60 * 60 * hz : tstohz_high(&atx);
 1030                         kq->kq_state |= KQ_SLEEP;
 1031                         error = tsleep(kq, PCATCH, "kqread", timeout);
 1032                 }
 1033         }
 1034 
 1035         /* don't restart after signals... */
 1036         if (error == ERESTART)
 1037                 return (EINTR);
 1038 
 1039         return (error);
 1040 }
 1041 
 1042 /*
 1043  * Scan the kqueue, return the number of active events placed in kevp up
 1044  * to count.
 1045  *
 1046  * Continuous mode events may get recycled, do not continue scanning past
 1047  * marker unless no events have been collected.
 1048  *
 1049  * Caller must be holding the kq token
 1050  */
 1051 static int
 1052 kqueue_scan(struct kqueue *kq, struct kevent *kevp, int count,
 1053             struct knote *marker)
 1054 {
 1055         struct knote *kn, local_marker;
 1056         int total;
 1057 
 1058         total = 0;
 1059         local_marker.kn_filter = EVFILT_MARKER;
 1060         local_marker.kn_status = KN_PROCESSING;
 1061 
 1062         /*
 1063          * Collect events.
 1064          */
 1065         TAILQ_INSERT_HEAD(&kq->kq_knpend, &local_marker, kn_tqe);
 1066         while (count) {
 1067                 kn = TAILQ_NEXT(&local_marker, kn_tqe);
 1068                 if (kn->kn_filter == EVFILT_MARKER) {
 1069                         /* Marker reached, we are done */
 1070                         if (kn == marker)
 1071                                 break;
 1072 
 1073                         /* Move local marker past some other threads marker */
 1074                         kn = TAILQ_NEXT(kn, kn_tqe);
 1075                         TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
 1076                         TAILQ_INSERT_BEFORE(kn, &local_marker, kn_tqe);
 1077                         continue;
 1078                 }
 1079 
 1080                 /*
 1081                  * We can't skip a knote undergoing processing, otherwise
 1082                  * we risk not returning it when the user process expects
 1083                  * it should be returned.  Sleep and retry.
 1084                  */
 1085                 if (knote_acquire(kn) == 0)
 1086                         continue;
 1087 
 1088                 /*
 1089                  * Remove the event for processing.
 1090                  *
 1091                  * WARNING!  We must leave KN_QUEUED set to prevent the
 1092                  *           event from being KNOTE_ACTIVATE()d while
 1093                  *           the queue state is in limbo, in case we
 1094                  *           block.
 1095                  *
 1096                  * WARNING!  We must set KN_PROCESSING to avoid races
 1097                  *           against deletion or another thread's
 1098                  *           processing.
 1099                  */
 1100                 TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
 1101                 kq->kq_count--;
 1102 
 1103                 /*
 1104                  * We have to deal with an extremely important race against
 1105                  * file descriptor close()s here.  The file descriptor can
 1106                  * disappear MPSAFE, and there is a small window of
 1107                  * opportunity between that and the call to knote_fdclose().
 1108                  *
 1109                  * If we hit that window here while doselect or dopoll is
 1110                  * trying to delete a spurious event they will not be able
 1111                  * to match up the event against a knote and will go haywire.
 1112                  */
 1113                 if ((kn->kn_fop->f_flags & FILTEROP_ISFD) &&
 1114                     checkfdclosed(kq->kq_fdp, kn->kn_kevent.ident, kn->kn_fp)) {
 1115                         kn->kn_status |= KN_DELETING | KN_REPROCESS;
 1116                 }
 1117 
 1118                 if (kn->kn_status & KN_DISABLED) {
 1119                         /*
 1120                          * If disabled we ensure the event is not queued
 1121                          * but leave its active bit set.  On re-enablement
 1122                          * the event may be immediately triggered.
 1123                          */
 1124                         kn->kn_status &= ~KN_QUEUED;
 1125                 } else if ((kn->kn_flags & EV_ONESHOT) == 0 &&
 1126                            (kn->kn_status & KN_DELETING) == 0 &&
 1127                            filter_event(kn, 0) == 0) {
 1128                         /*
 1129                          * If not running in one-shot mode and the event
 1130                          * is no longer present we ensure it is removed
 1131                          * from the queue and ignore it.
 1132                          */
 1133                         kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 1134                 } else {
 1135                         /*
 1136                          * Post the event
 1137                          */
 1138                         *kevp++ = kn->kn_kevent;
 1139                         ++total;
 1140                         --count;
 1141 
 1142                         if (kn->kn_flags & EV_ONESHOT) {
 1143                                 kn->kn_status &= ~KN_QUEUED;
 1144                                 kn->kn_status |= KN_DELETING | KN_REPROCESS;
 1145                         } else if (kn->kn_flags & EV_CLEAR) {
 1146                                 kn->kn_data = 0;
 1147                                 kn->kn_fflags = 0;
 1148                                 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
 1149                         } else {
 1150                                 TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
 1151                                 kq->kq_count++;
 1152                         }
 1153                 }
 1154 
 1155                 /*
 1156                  * Handle any post-processing states
 1157                  */
 1158                 knote_release(kn);
 1159         }
 1160         TAILQ_REMOVE(&kq->kq_knpend, &local_marker, kn_tqe);
 1161 
 1162         return (total);
 1163 }
 1164 
 1165 /*
 1166  * XXX
 1167  * This could be expanded to call kqueue_scan, if desired.
 1168  *
 1169  * MPSAFE
 1170  */
 1171 static int
 1172 kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
 1173 {
 1174         return (ENXIO);
 1175 }
 1176 
 1177 /*
 1178  * MPSAFE
 1179  */
 1180 static int
 1181 kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
 1182 {
 1183         return (ENXIO);
 1184 }
 1185 
 1186 /*
 1187  * MPALMOSTSAFE
 1188  */
 1189 static int
 1190 kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
 1191              struct ucred *cred, struct sysmsg *msg)
 1192 {
 1193         struct lwkt_token *tok;
 1194         struct kqueue *kq;
 1195         int error;
 1196 
 1197         kq = (struct kqueue *)fp->f_data;
 1198         tok = lwkt_token_pool_lookup(kq);
 1199         lwkt_gettoken(tok);
 1200 
 1201         switch(com) {
 1202         case FIOASYNC:
 1203                 if (*(int *)data)
 1204                         kq->kq_state |= KQ_ASYNC;
 1205                 else
 1206                         kq->kq_state &= ~KQ_ASYNC;
 1207                 error = 0;
 1208                 break;
 1209         case FIOSETOWN:
 1210                 error = fsetown(*(int *)data, &kq->kq_sigio);
 1211                 break;
 1212         default:
 1213                 error = ENOTTY;
 1214                 break;
 1215         }
 1216         lwkt_reltoken(tok);
 1217         return (error);
 1218 }
 1219 
 1220 /*
 1221  * MPSAFE
 1222  */
 1223 static int
 1224 kqueue_stat(struct file *fp, struct stat *st, struct ucred *cred)
 1225 {
 1226         struct kqueue *kq = (struct kqueue *)fp->f_data;
 1227 
 1228         bzero((void *)st, sizeof(*st));
 1229         st->st_size = kq->kq_count;
 1230         st->st_blksize = sizeof(struct kevent);
 1231         st->st_mode = S_IFIFO;
 1232         return (0);
 1233 }
 1234 
 1235 /*
 1236  * MPSAFE
 1237  */
 1238 static int
 1239 kqueue_close(struct file *fp)
 1240 {
 1241         struct kqueue *kq = (struct kqueue *)fp->f_data;
 1242 
 1243         kqueue_terminate(kq);
 1244 
 1245         fp->f_data = NULL;
 1246         funsetown(&kq->kq_sigio);
 1247 
 1248         kfree(kq, M_KQUEUE);
 1249         return (0);
 1250 }
 1251 
 1252 static void
 1253 kqueue_wakeup(struct kqueue *kq)
 1254 {
 1255         if (kq->kq_state & KQ_SLEEP) {
 1256                 kq->kq_state &= ~KQ_SLEEP;
 1257                 wakeup(kq);
 1258         }
 1259         KNOTE(&kq->kq_kqinfo.ki_note, 0);
 1260 }
 1261 
 1262 /*
 1263  * Calls filterops f_attach function, acquiring mplock if filter is not
 1264  * marked as FILTEROP_MPSAFE.
 1265  *
 1266  * Caller must be holding the related kq token
 1267  */
 1268 static int
 1269 filter_attach(struct knote *kn)
 1270 {
 1271         int ret;
 1272 
 1273         if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
 1274                 ret = kn->kn_fop->f_attach(kn);
 1275         } else {
 1276                 get_mplock();
 1277                 ret = kn->kn_fop->f_attach(kn);
 1278                 rel_mplock();
 1279         }
 1280         return (ret);
 1281 }
 1282 
 1283 /*
 1284  * Detach the knote and drop it, destroying the knote.
 1285  *
 1286  * Calls filterops f_detach function, acquiring mplock if filter is not
 1287  * marked as FILTEROP_MPSAFE.
 1288  *
 1289  * Caller must be holding the related kq token
 1290  */
 1291 static void
 1292 knote_detach_and_drop(struct knote *kn)
 1293 {
 1294         kn->kn_status |= KN_DELETING | KN_REPROCESS;
 1295         if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
 1296                 kn->kn_fop->f_detach(kn);
 1297         } else {
 1298                 get_mplock();
 1299                 kn->kn_fop->f_detach(kn);
 1300                 rel_mplock();
 1301         }
 1302         knote_drop(kn);
 1303 }
 1304 
 1305 /*
 1306  * Calls filterops f_event function, acquiring mplock if filter is not
 1307  * marked as FILTEROP_MPSAFE.
 1308  *
 1309  * If the knote is in the middle of being created or deleted we cannot
 1310  * safely call the filter op.
 1311  *
 1312  * Caller must be holding the related kq token
 1313  */
 1314 static int
 1315 filter_event(struct knote *kn, long hint)
 1316 {
 1317         int ret;
 1318 
 1319         if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
 1320                 ret = kn->kn_fop->f_event(kn, hint);
 1321         } else {
 1322                 get_mplock();
 1323                 ret = kn->kn_fop->f_event(kn, hint);
 1324                 rel_mplock();
 1325         }
 1326         return (ret);
 1327 }
 1328 
 1329 /*
 1330  * Walk down a list of knotes, activating them if their event has triggered.
 1331  *
 1332  * If we encounter any knotes which are undergoing processing we just mark
 1333  * them for reprocessing and do not try to [re]activate the knote.  However,
 1334  * if a hint is being passed we have to wait and that makes things a bit
 1335  * sticky.
 1336  */
 1337 void
 1338 knote(struct klist *list, long hint)
 1339 {
 1340         struct kqueue *kq;
 1341         struct knote *kn;
 1342         struct knote *kntmp;
 1343 
 1344         lwkt_getpooltoken(list);
 1345 restart:
 1346         SLIST_FOREACH(kn, list, kn_next) {
 1347                 kq = kn->kn_kq;
 1348                 lwkt_getpooltoken(kq);
 1349 
 1350                 /* temporary verification hack */
 1351                 SLIST_FOREACH(kntmp, list, kn_next) {
 1352                         if (kn == kntmp)
 1353                                 break;
 1354                 }
 1355                 if (kn != kntmp || kn->kn_kq != kq) {
 1356                         lwkt_relpooltoken(kq);
 1357                         goto restart;
 1358                 }
 1359 
 1360                 if (kn->kn_status & KN_PROCESSING) {
 1361                         /*
 1362                          * Someone else is processing the knote, ask the
 1363                          * other thread to reprocess it and don't mess
 1364                          * with it otherwise.
 1365                          */
 1366                         if (hint == 0) {
 1367                                 kn->kn_status |= KN_REPROCESS;
 1368                                 lwkt_relpooltoken(kq);
 1369                                 continue;
 1370                         }
 1371 
 1372                         /*
 1373                          * If the hint is non-zero we have to wait or risk
 1374                          * losing the state the caller is trying to update.
 1375                          *
 1376                          * XXX This is a real problem, certain process
 1377                          *     and signal filters will bump kn_data for
 1378                          *     already-processed notes more than once if
 1379                          *     we restart the list scan.  FIXME.
 1380                          */
 1381                         kn->kn_status |= KN_WAITING | KN_REPROCESS;
 1382                         tsleep(kn, 0, "knotec", hz);
 1383                         lwkt_relpooltoken(kq);
 1384                         goto restart;
 1385                 }
 1386 
 1387                 /*
 1388                  * Become the reprocessing master ourselves.
 1389                  *
 1390                  * If hint is non-zer running the event is mandatory
 1391                  * when not deleting so do it whether reprocessing is
 1392                  * set or not.
 1393                  */
 1394                 kn->kn_status |= KN_PROCESSING;
 1395                 if ((kn->kn_status & KN_DELETING) == 0) {
 1396                         if (filter_event(kn, hint))
 1397                                 KNOTE_ACTIVATE(kn);
 1398                 }
 1399                 if (knote_release(kn)) {
 1400                         lwkt_relpooltoken(kq);
 1401                         goto restart;
 1402                 }
 1403                 lwkt_relpooltoken(kq);
 1404         }
 1405         lwkt_relpooltoken(list);
 1406 }
 1407 
 1408 /*
 1409  * Insert knote at head of klist.
 1410  *
 1411  * This function may only be called via a filter function and thus
 1412  * kq_token should already be held and marked for processing.
 1413  */
 1414 void
 1415 knote_insert(struct klist *klist, struct knote *kn)
 1416 {
 1417         lwkt_getpooltoken(klist);
 1418         KKASSERT(kn->kn_status & KN_PROCESSING);
 1419         SLIST_INSERT_HEAD(klist, kn, kn_next);
 1420         lwkt_relpooltoken(klist);
 1421 }
 1422 
 1423 /*
 1424  * Remove knote from a klist
 1425  *
 1426  * This function may only be called via a filter function and thus
 1427  * kq_token should already be held and marked for processing.
 1428  */
 1429 void
 1430 knote_remove(struct klist *klist, struct knote *kn)
 1431 {
 1432         lwkt_getpooltoken(klist);
 1433         KKASSERT(kn->kn_status & KN_PROCESSING);
 1434         SLIST_REMOVE(klist, kn, knote, kn_next);
 1435         lwkt_relpooltoken(klist);
 1436 }
 1437 
 1438 #if 0
 1439 /*
 1440  * Remove all knotes from a specified klist
 1441  *
 1442  * Only called from aio.
 1443  */
 1444 void
 1445 knote_empty(struct klist *list)
 1446 {
 1447         struct knote *kn;
 1448 
 1449         lwkt_gettoken(&kq_token);
 1450         while ((kn = SLIST_FIRST(list)) != NULL) {
 1451                 if (knote_acquire(kn))
 1452                         knote_detach_and_drop(kn);
 1453         }
 1454         lwkt_reltoken(&kq_token);
 1455 }
 1456 #endif
 1457 
 1458 void
 1459 knote_assume_knotes(struct kqinfo *src, struct kqinfo *dst,
 1460                     struct filterops *ops, void *hook)
 1461 {
 1462         struct kqueue *kq;
 1463         struct knote *kn;
 1464 
 1465         lwkt_getpooltoken(&src->ki_note);
 1466         lwkt_getpooltoken(&dst->ki_note);
 1467         while ((kn = SLIST_FIRST(&src->ki_note)) != NULL) {
 1468                 kq = kn->kn_kq;
 1469                 lwkt_getpooltoken(kq);
 1470                 if (SLIST_FIRST(&src->ki_note) != kn || kn->kn_kq != kq) {
 1471                         lwkt_relpooltoken(kq);
 1472                         continue;
 1473                 }
 1474                 if (knote_acquire(kn)) {
 1475                         knote_remove(&src->ki_note, kn);
 1476                         kn->kn_fop = ops;
 1477                         kn->kn_hook = hook;
 1478                         knote_insert(&dst->ki_note, kn);
 1479                         knote_release(kn);
 1480                         /* kn may be invalid now */
 1481                 }
 1482                 lwkt_relpooltoken(kq);
 1483         }
 1484         lwkt_relpooltoken(&dst->ki_note);
 1485         lwkt_relpooltoken(&src->ki_note);
 1486 }
 1487 
 1488 /*
 1489  * Remove all knotes referencing a specified fd
 1490  */
 1491 void
 1492 knote_fdclose(struct file *fp, struct filedesc *fdp, int fd)
 1493 {
 1494         struct kqueue *kq;
 1495         struct knote *kn;
 1496         struct knote *kntmp;
 1497 
 1498         lwkt_getpooltoken(&fp->f_klist);
 1499 restart:
 1500         SLIST_FOREACH(kn, &fp->f_klist, kn_link) {
 1501                 if (kn->kn_kq->kq_fdp == fdp && kn->kn_id == fd) {
 1502                         kq = kn->kn_kq;
 1503                         lwkt_getpooltoken(kq);
 1504 
 1505                         /* temporary verification hack */
 1506                         SLIST_FOREACH(kntmp, &fp->f_klist, kn_link) {
 1507                                 if (kn == kntmp)
 1508                                         break;
 1509                         }
 1510                         if (kn != kntmp || kn->kn_kq->kq_fdp != fdp ||
 1511                             kn->kn_id != fd || kn->kn_kq != kq) {
 1512                                 lwkt_relpooltoken(kq);
 1513                                 goto restart;
 1514                         }
 1515                         if (knote_acquire(kn))
 1516                                 knote_detach_and_drop(kn);
 1517                         lwkt_relpooltoken(kq);
 1518                         goto restart;
 1519                 }
 1520         }
 1521         lwkt_relpooltoken(&fp->f_klist);
 1522 }
 1523 
 1524 /*
 1525  * Low level attach function.
 1526  *
 1527  * The knote should already be marked for processing.
 1528  * Caller must hold the related kq token.
 1529  */
 1530 static void
 1531 knote_attach(struct knote *kn)
 1532 {
 1533         struct klist *list;
 1534         struct kqueue *kq = kn->kn_kq;
 1535 
 1536         if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
 1537                 KKASSERT(kn->kn_fp);
 1538                 list = &kn->kn_fp->f_klist;
 1539         } else {
 1540                 if (kq->kq_knhashmask == 0)
 1541                         kq->kq_knhash = hashinit(KN_HASHSIZE, M_KQUEUE,
 1542                                                  &kq->kq_knhashmask);
 1543                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1544         }
 1545         lwkt_getpooltoken(list);
 1546         SLIST_INSERT_HEAD(list, kn, kn_link);
 1547         TAILQ_INSERT_HEAD(&kq->kq_knlist, kn, kn_kqlink);
 1548         lwkt_relpooltoken(list);
 1549 }
 1550 
 1551 /*
 1552  * Low level drop function.
 1553  *
 1554  * The knote should already be marked for processing.
 1555  * Caller must hold the related kq token.
 1556  */
 1557 static void
 1558 knote_drop(struct knote *kn)
 1559 {
 1560         struct kqueue *kq;
 1561         struct klist *list;
 1562 
 1563         kq = kn->kn_kq;
 1564 
 1565         if (kn->kn_fop->f_flags & FILTEROP_ISFD)
 1566                 list = &kn->kn_fp->f_klist;
 1567         else
 1568                 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
 1569 
 1570         lwkt_getpooltoken(list);
 1571         SLIST_REMOVE(list, kn, knote, kn_link);
 1572         TAILQ_REMOVE(&kq->kq_knlist, kn, kn_kqlink);
 1573         if (kn->kn_status & KN_QUEUED)
 1574                 knote_dequeue(kn);
 1575         if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
 1576                 fdrop(kn->kn_fp);
 1577                 kn->kn_fp = NULL;
 1578         }
 1579         knote_free(kn);
 1580         lwkt_relpooltoken(list);
 1581 }
 1582 
 1583 /*
 1584  * Low level enqueue function.
 1585  *
 1586  * The knote should already be marked for processing.
 1587  * Caller must be holding the kq token
 1588  */
 1589 static void
 1590 knote_enqueue(struct knote *kn)
 1591 {
 1592         struct kqueue *kq = kn->kn_kq;
 1593 
 1594         KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
 1595         TAILQ_INSERT_TAIL(&kq->kq_knpend, kn, kn_tqe);
 1596         kn->kn_status |= KN_QUEUED;
 1597         ++kq->kq_count;
 1598 
 1599         /*
 1600          * Send SIGIO on request (typically set up as a mailbox signal)
 1601          */
 1602         if (kq->kq_sigio && (kq->kq_state & KQ_ASYNC) && kq->kq_count == 1)
 1603                 pgsigio(kq->kq_sigio, SIGIO, 0);
 1604 
 1605         kqueue_wakeup(kq);
 1606 }
 1607 
 1608 /*
 1609  * Low level dequeue function.
 1610  *
 1611  * The knote should already be marked for processing.
 1612  * Caller must be holding the kq token
 1613  */
 1614 static void
 1615 knote_dequeue(struct knote *kn)
 1616 {
 1617         struct kqueue *kq = kn->kn_kq;
 1618 
 1619         KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
 1620         TAILQ_REMOVE(&kq->kq_knpend, kn, kn_tqe);
 1621         kn->kn_status &= ~KN_QUEUED;
 1622         kq->kq_count--;
 1623 }
 1624 
 1625 static struct knote *
 1626 knote_alloc(void)
 1627 {
 1628         return kmalloc(sizeof(struct knote), M_KQUEUE, M_WAITOK);
 1629 }
 1630 
 1631 static void
 1632 knote_free(struct knote *kn)
 1633 {
 1634         kfree(kn, M_KQUEUE);
 1635 }

Cache object: 1f67fb7f0638ee04f2e646f003b45990


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.