The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/puffs/puffs_msgif.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: puffs_msgif.c,v 1.8 2006/11/21 01:53:33 pooka Exp $    */
    2 
    3 /*
    4  * Copyright (c) 2005, 2006  Antti Kantee.  All Rights Reserved.
    5  *
    6  * Development of this software was supported by the
    7  * Google Summer of Code program and the Ulla Tuominen Foundation.
    8  * The Google SoC project was mentored by Bill Studenmund.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. The name of the company nor the name of the author may be used to
   19  *    endorse or promote products derived from this software without specific
   20  *    prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
   23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   25  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   28  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __KERNEL_RCSID(0, "$NetBSD: puffs_msgif.c,v 1.8 2006/11/21 01:53:33 pooka Exp $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/conf.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mount.h>
   42 #include <sys/socketvar.h>
   43 #include <sys/vnode.h>
   44 #include <sys/file.h>
   45 #include <sys/filedesc.h>
   46 #include <sys/lock.h>
   47 #include <sys/poll.h>
   48 
   49 #include <fs/puffs/puffs_msgif.h>
   50 #include <fs/puffs/puffs_sys.h>
   51 
   52 #include <miscfs/syncfs/syncfs.h> /* XXX: for syncer_lock reference */
   53 
   54 
   55 /*
   56  * kernel-user-kernel waitqueues
   57  */
   58 
   59 static int touser(struct puffs_mount *, struct puffs_park *, uint64_t,
   60                   struct vnode *, struct vnode *);
   61 
   62 uint64_t
   63 puffs_getreqid(struct puffs_mount *pmp)
   64 {
   65         unsigned int rv;
   66 
   67         simple_lock(&pmp->pmp_lock);
   68         rv = pmp->pmp_nextreq++;
   69         simple_unlock(&pmp->pmp_lock);
   70 
   71         return rv;
   72 }
   73 
   74 /* vfs request */
   75 int
   76 puffs_vfstouser(struct puffs_mount *pmp, int optype, void *kbuf, size_t buflen)
   77 {
   78         struct puffs_park park;
   79 
   80         memset(&park.park_preq, 0, sizeof(struct puffs_req));
   81 
   82         park.park_opclass = PUFFSOP_VFS; 
   83         park.park_optype = optype;
   84 
   85         park.park_kernbuf = kbuf;
   86         park.park_buflen = buflen;
   87         park.park_copylen = buflen;
   88         park.park_flags = 0;
   89 
   90         return touser(pmp, &park, puffs_getreqid(pmp), NULL, NULL);
   91 }
   92 
   93 /*
   94  * vnode level request
   95  */
   96 int
   97 puffs_vntouser(struct puffs_mount *pmp, int optype,
   98         void *kbuf, size_t buflen, void *cookie,
   99         struct vnode *vp1, struct vnode *vp2)
  100 {
  101         struct puffs_park park;
  102 
  103         memset(&park.park_preq, 0, sizeof(struct puffs_req));
  104 
  105         park.park_opclass = PUFFSOP_VN; 
  106         park.park_optype = optype;
  107         park.park_cookie = cookie;
  108 
  109         park.park_kernbuf = kbuf;
  110         park.park_buflen = buflen;
  111         park.park_copylen = buflen;
  112         park.park_flags = 0;
  113 
  114         return touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
  115 }
  116 
  117 /*
  118  * vnode level request, caller-controller req id
  119  */
  120 int
  121 puffs_vntouser_req(struct puffs_mount *pmp, int optype,
  122         void *kbuf, size_t buflen, void *cookie, uint64_t reqid,
  123         struct vnode *vp1, struct vnode *vp2)
  124 {
  125         struct puffs_park park;
  126 
  127         memset(&park.park_preq, 0, sizeof(struct puffs_req));
  128 
  129         park.park_opclass = PUFFSOP_VN; 
  130         park.park_optype = optype;
  131         park.park_cookie = cookie;
  132 
  133         park.park_kernbuf = kbuf;
  134         park.park_buflen = buflen;
  135         park.park_copylen = buflen;
  136         park.park_flags = 0;
  137 
  138         return touser(pmp, &park, reqid, vp1, vp2);
  139 }
  140 
  141 /*
  142  * vnode level request, copy routines can adjust "kernbuf"
  143  */
  144 int
  145 puffs_vntouser_adjbuf(struct puffs_mount *pmp, int optype,
  146         void **kbuf, size_t *buflen, size_t copylen, void *cookie,
  147         struct vnode *vp1, struct vnode *vp2)
  148 {
  149         struct puffs_park park;
  150         int error;
  151 
  152         memset(&park.park_preq, 0, sizeof(struct puffs_req));
  153 
  154         park.park_opclass = PUFFSOP_VN; 
  155         park.park_optype = optype;
  156         park.park_cookie = cookie;
  157 
  158         park.park_kernbuf = *kbuf;
  159         park.park_buflen = *buflen;
  160         park.park_copylen = copylen;
  161         park.park_flags = PUFFS_REQFLAG_ADJBUF;
  162 
  163         error = touser(pmp, &park, puffs_getreqid(pmp), vp1, vp2);
  164         *kbuf = park.park_kernbuf;
  165         *buflen = park.park_buflen;
  166 
  167         return error;
  168 }
  169 
  170 /*
  171  * Notice: kbuf will be free'd later.  I must be allocated from the
  172  * kernel heap and it's ownership is shifted to this function from
  173  * now on, i.e. the caller is not allowed to use it anymore!
  174  */
  175 void
  176 puffs_vntouser_faf(struct puffs_mount *pmp, int optype,
  177         void *kbuf, size_t buflen, void *cookie)
  178 {
  179         struct puffs_park *ppark;
  180 
  181         /* XXX: is it allowable to sleep here? */
  182         ppark = malloc(sizeof(struct puffs_park), M_PUFFS, M_NOWAIT | M_ZERO);
  183         if (ppark == NULL)
  184                 return; /* 2bad */
  185 
  186         ppark->park_opclass = PUFFSOP_VN | PUFFSOPFLAG_FAF;
  187         ppark->park_optype = optype;
  188         ppark->park_cookie = cookie;
  189 
  190         ppark->park_kernbuf = kbuf;
  191         ppark->park_buflen = buflen;
  192         ppark->park_copylen = buflen;
  193 
  194         (void)touser(pmp, ppark, 0, NULL, NULL);
  195 }
  196 
  197 /*
  198  * Wait for the userspace ping-pong game in calling process context.
  199  *
  200  * This unlocks vnodes if they are supplied.  vp1 is the vnode
  201  * before in the locking order, i.e. the one which must be locked
  202  * before accessing vp2.  This is done here so that operations are
  203  * already ordered in the queue when vnodes are unlocked (I'm not
  204  * sure if that's really necessary, but it can't hurt).  Okok, maybe
  205  * there's a slight ugly-factor also, but let's not worry about that.
  206  */
  207 static int
  208 touser(struct puffs_mount *pmp, struct puffs_park *ppark, uint64_t reqid,
  209         struct vnode *vp1, struct vnode *vp2)
  210 {
  211 
  212         simple_lock(&pmp->pmp_lock);
  213         if (pmp->pmp_status != PUFFSTAT_RUNNING
  214             && pmp->pmp_status != PUFFSTAT_MOUNTING) {
  215                 simple_unlock(&pmp->pmp_lock);
  216                 return ENXIO;
  217         }
  218 
  219         ppark->park_id = reqid;
  220 
  221         TAILQ_INSERT_TAIL(&pmp->pmp_req_touser, ppark, park_entries);
  222         pmp->pmp_req_touser_waiters++;
  223 
  224         /*
  225          * Don't do unlock-relock dance yet.  There are a couple of
  226          * unsolved issues with it.  If we don't unlock, we can have
  227          * processes wanting vn_lock in case userspace hangs.  But
  228          * that can be "solved" by killing the userspace process.  It
  229          * would of course be nicer to have antilocking in the userspace
  230          * interface protocol itself.. your patience will be rewarded.
  231          */
  232 #if 0
  233         /* unlock */
  234         if (vp2)
  235                 VOP_UNLOCK(vp2, 0);
  236         if (vp1)
  237                 VOP_UNLOCK(vp1, 0);
  238 #endif
  239 
  240         /*
  241          * XXX: does releasing the lock here cause trouble?  Can't hold
  242          * it, because otherwise the below would cause locking against
  243          * oneself-problems in the kqueue stuff.  yes, it is a
  244          * theoretical race, so it must be solved
  245          */
  246         simple_unlock(&pmp->pmp_lock);
  247 
  248         wakeup(&pmp->pmp_req_touser);
  249         selnotify(pmp->pmp_sel, 0);
  250 
  251         if (PUFFSOP_WANTREPLY(ppark->park_opclass))
  252                 ltsleep(ppark, PUSER, "puffs1", 0, NULL);
  253 
  254 #if 0
  255         /* relock */
  256         if (vp1)
  257                 KASSERT(vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY) == 0);
  258         if (vp2)
  259                 KASSERT(vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY) == 0);
  260 #endif
  261 
  262         return ppark->park_rv;
  263 }
  264 
  265 /*
  266  * We're dead, kaput, RIP, slightly more than merely pining for the
  267  * fjords, belly-up, fallen, lifeless, finished, expired, gone to meet
  268  * our maker, ceased to be, etcetc.  YASD.  It's a dead FS!
  269  */
  270 void
  271 puffs_userdead(struct puffs_mount *pmp)
  272 {
  273         struct puffs_park *park;
  274 
  275         simple_lock(&pmp->pmp_lock);
  276 
  277         /*
  278          * Mark filesystem status as dying so that operations don't
  279          * attempt to march to userspace any longer.
  280          */
  281         pmp->pmp_status = PUFFSTAT_DYING;
  282 
  283         /* and wakeup processes waiting for a reply from userspace */
  284         TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
  285                 park->park_rv = ENXIO;
  286                 TAILQ_REMOVE(&pmp->pmp_req_replywait, park, park_entries);
  287                 wakeup(park);
  288         }
  289 
  290         /* wakeup waiters for completion of vfs/vnode requests */
  291         TAILQ_FOREACH(park, &pmp->pmp_req_touser, park_entries) {
  292                 park->park_rv = ENXIO;
  293                 TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
  294                 wakeup(park);
  295         }
  296 
  297         simple_unlock(&pmp->pmp_lock);
  298 }
  299 
  300 
  301 /*
  302  * Device routines
  303  */
  304 
  305 dev_type_open(puffscdopen);
  306 dev_type_close(puffscdclose);
  307 dev_type_ioctl(puffscdioctl);
  308 
  309 /* dev */
  310 const struct cdevsw puffs_cdevsw = {
  311         puffscdopen,    puffscdclose,   noread,         nowrite,
  312         noioctl,        nostop,         notty,          nopoll,
  313         nommap,         nokqfilter,     D_OTHER
  314 };
  315 
  316 static int puffs_fop_read(struct file *, off_t *, struct uio *,
  317                           kauth_cred_t, int);
  318 static int puffs_fop_write(struct file *, off_t *, struct uio *,
  319                            kauth_cred_t, int);
  320 static int puffs_fop_ioctl(struct file*, u_long, void *, struct lwp *);
  321 static int puffs_fop_poll(struct file *, int, struct lwp *);
  322 static int puffs_fop_close(struct file *, struct lwp *);
  323 static int puffs_fop_kqfilter(struct file *, struct knote *);
  324 
  325 
  326 /* fd routines, for cloner */
  327 static const struct fileops puffs_fileops = {
  328         puffs_fop_read,
  329         puffs_fop_write,
  330         puffs_fop_ioctl,
  331         fnullop_fcntl,
  332         puffs_fop_poll,
  333         fbadop_stat,
  334         puffs_fop_close,
  335         puffs_fop_kqfilter
  336 };
  337 
  338 /*
  339  * puffs instance structures.  these are always allocated and freed
  340  * from the context of the device node / fileop code.
  341  */
  342 struct puffs_instance {
  343         pid_t pi_pid;
  344         int pi_idx;
  345         int pi_fd;
  346         struct puffs_mount *pi_pmp;
  347         struct selinfo pi_sel;
  348 
  349         TAILQ_ENTRY(puffs_instance) pi_entries;
  350 };
  351 #define PMP_EMBRYO ((struct puffs_mount *)-1)   /* before mount */
  352 #define PMP_DEAD ((struct puffs_mount *)-2)     /* goner        */
  353 
  354 static TAILQ_HEAD(, puffs_instance) puffs_ilist
  355     = TAILQ_HEAD_INITIALIZER(puffs_ilist);
  356 
  357 /* protects both the list and the contents of the list elements */
  358 static struct simplelock pi_lock = SIMPLELOCK_INITIALIZER;
  359 
  360 static int get_pi_idx(struct puffs_instance *);
  361 
  362 /* search sorted list of instances for free minor, sorted insert arg */
  363 static int
  364 get_pi_idx(struct puffs_instance *pi_i)
  365 {
  366         struct puffs_instance *pi;
  367         int i;
  368 
  369         i = 0;
  370         TAILQ_FOREACH(pi, &puffs_ilist, pi_entries) {
  371                 if (i == PUFFS_CLONER)
  372                         return PUFFS_CLONER;
  373                 if (i != pi->pi_idx)
  374                         break;
  375                 i++;
  376         }
  377 
  378         pi_i->pi_pmp = PMP_EMBRYO;
  379 
  380         if (pi == NULL)
  381                 TAILQ_INSERT_TAIL(&puffs_ilist, pi_i, pi_entries);
  382         else
  383                 TAILQ_INSERT_BEFORE(pi, pi_i, pi_entries);
  384 
  385         return i;
  386 }
  387 
  388 int
  389 puffscdopen(dev_t dev, int flags, int fmt, struct lwp *l)
  390 {
  391         struct puffs_instance *pi;
  392         struct file *fp;
  393         int error, fd, idx;
  394 
  395         /*
  396          * XXX: decide on some security model and check permissions
  397          */
  398 
  399         if (minor(dev) != PUFFS_CLONER)
  400                 return ENXIO;
  401 
  402         if ((error = falloc(l, &fp, &fd)) != 0)
  403                 return error;
  404 
  405         MALLOC(pi, struct puffs_instance *, sizeof(struct puffs_instance),
  406             M_PUFFS, M_WAITOK | M_ZERO);
  407 
  408         simple_lock(&pi_lock);
  409         idx = get_pi_idx(pi);
  410         if (idx == PUFFS_CLONER) {
  411                 simple_unlock(&pi_lock);
  412                 FREE(pi, M_PUFFS);
  413                 FILE_UNUSE(fp, l);
  414                 ffree(fp);
  415                 return EBUSY;
  416         }
  417 
  418         pi->pi_pid = l->l_proc->p_pid;
  419         pi->pi_idx = idx;
  420         simple_unlock(&pi_lock);
  421 
  422         DPRINTF(("puffscdopen: registered embryonic pmp for pid: %d\n",
  423             pi->pi_pid));
  424 
  425         return fdclone(l, fp, fd, FREAD|FWRITE, &puffs_fileops, pi);
  426 }
  427 
  428 int
  429 puffscdclose(dev_t dev, int flags, int fmt, struct lwp *l)
  430 {
  431 
  432         panic("puffscdclose\n");
  433 
  434         return 0;
  435 }
  436 
  437 /*
  438  * Set puffs_mount -pointer.  Called from puffs_mount(), which is the
  439  * earliest place that knows about this.
  440  *
  441  * We only want to make sure that the caller had the right to open the
  442  * device, we don't so much care about which context it gets in case
  443  * the same process opened multiple (since they are equal at this point).
  444  */
  445 int
  446 puffs_setpmp(pid_t pid, int fd, struct puffs_mount *pmp)
  447 {
  448         struct puffs_instance *pi;
  449         int rv = 1;
  450 
  451         simple_lock(&pi_lock);
  452         TAILQ_FOREACH(pi, &puffs_ilist, pi_entries) {
  453                 if (pi->pi_pid == pid && pi->pi_pmp == PMP_EMBRYO) {
  454                         pi->pi_pmp = pmp;
  455                         pi->pi_fd = fd;
  456                         pmp->pmp_sel = &pi->pi_sel;
  457                         rv = 0;
  458                         break;
  459                     }
  460         }
  461         simple_unlock(&pi_lock);
  462 
  463         return rv;
  464 }
  465 
  466 /*
  467  * Remove mount point from list of instances.  Called from unmount.
  468  */
  469 void
  470 puffs_nukebypmp(struct puffs_mount *pmp)
  471 {
  472         struct puffs_instance *pi;
  473 
  474         simple_lock(&pi_lock);
  475         TAILQ_FOREACH(pi, &puffs_ilist, pi_entries) {
  476                 if (pi->pi_pmp == pmp) {
  477                         TAILQ_REMOVE(&puffs_ilist, pi, pi_entries);
  478                         break;
  479                 }
  480         }
  481         if (pi)
  482                 pi->pi_pmp = PMP_DEAD;
  483 
  484 #ifdef DIAGNOSTIC
  485         else
  486                 panic("puffs_nukebypmp: invalid puffs_mount\n");
  487 #endif /* DIAGNOSTIC */
  488 
  489         simple_unlock(&pi_lock);
  490 
  491         DPRINTF(("puffs_nukebypmp: nuked %p\n", pi));
  492 }
  493 
  494 
  495 static int
  496 puffs_fop_read(struct file *fp, off_t *off, struct uio *uio,
  497         kauth_cred_t cred, int flags)
  498 {
  499 
  500         printf("READ\n");
  501         return ENODEV;
  502 }
  503 
  504 static int
  505 puffs_fop_write(struct file *fp, off_t *off, struct uio *uio,
  506         kauth_cred_t cred, int flags)
  507 {
  508 
  509         printf("WRITE\n");
  510         return ENODEV;
  511 }
  512 
  513 /*
  514  * Poll query interface.  The question is only if an event
  515  * can be read from us (and by read I mean ioctl... ugh).
  516  */
  517 #define PUFFPOLL_EVSET (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)
  518 static int
  519 puffs_fop_poll(struct file *fp, int events, struct lwp *l)
  520 {
  521         struct puffs_mount *pmp = FPTOPMP(fp);
  522         int revents;
  523 
  524         if (pmp == PMP_EMBRYO || pmp == PMP_DEAD) {
  525                 printf("puffs_fop_ioctl: puffs %p, not mounted\n", pmp);
  526                 return ENOENT;
  527         }
  528 
  529         revents = events & (POLLOUT | POLLWRNORM | POLLWRBAND);
  530         if ((events & PUFFPOLL_EVSET) == 0)
  531                 return revents;
  532 
  533         /* check queue */
  534         simple_lock(&pmp->pmp_lock);
  535         if (!TAILQ_EMPTY(&pmp->pmp_req_touser))
  536                 revents |= PUFFPOLL_EVSET;
  537         else
  538                 selrecord(l, pmp->pmp_sel);
  539         simple_unlock(&pmp->pmp_lock);
  540 
  541         return revents;
  542 }
  543 
  544 /*
  545  * device close = forced unmount.
  546  *
  547  * unmounting is a frightfully complex operation to avoid races
  548  *
  549  * XXX: if userspace is terminated by a signal, this will be
  550  * called only after the signal is delivered (i.e. after someone tries
  551  * to access the file system).  Also, the first one for a delivery
  552  * will get a free bounce-bounce ride before it can be notified
  553  * that the fs is dead.  I'm not terribly concerned about optimizing
  554  * this for speed ...
  555  */
  556 static int
  557 puffs_fop_close(struct file *fp, struct lwp *l)
  558 {
  559         struct puffs_instance *pi;
  560         struct puffs_mount *pmp;
  561         struct mount *mp;
  562         int gone;
  563 
  564         DPRINTF(("puffs_fop_close: device closed, force filesystem unmount\n"));
  565 
  566         simple_lock(&pi_lock);
  567         pmp = FPTOPMP(fp);
  568         /*
  569          * First check if the fs was never mounted.  In that case
  570          * remove the instance from the list.  If mount is attempted later,
  571          * it will simply fail.
  572          */
  573         if (pmp == PMP_EMBRYO) {
  574                 pi = FPTOPI(fp);
  575                 TAILQ_REMOVE(&puffs_ilist, pi, pi_entries);
  576                 simple_unlock(&pi_lock);
  577                 FREE(pi, M_PUFFS);
  578                 return 0;
  579         }
  580 
  581         /*
  582          * Next, analyze unmount was called and the instance is dead.
  583          * In this case we can just free the structure and go home, it
  584          * was removed from the list by puffs_nukebypmp().
  585          */
  586         if (pmp == PMP_DEAD) {
  587                 /* would be nice, but don't have a reference to it ... */
  588                 /* KASSERT(pmp_status == PUFFSTAT_DYING); */
  589                 simple_unlock(&pi_lock);
  590                 pi = FPTOPI(fp);
  591                 FREE(pi, M_PUFFS);
  592                 return 0;
  593         }
  594 
  595         /*
  596          * So we have a reference.  Proceed to unwrap the file system.
  597          */
  598         mp = PMPTOMP(pmp);
  599         simple_unlock(&pi_lock);
  600 
  601         /*
  602          * Free the waiting callers before proceeding any further.
  603          * The syncer might be jogging around in this file system
  604          * currently.  If we allow it to go to the userspace of no
  605          * return while trying to get the syncer lock, well ...
  606          * synclk: I feel happy, I feel fine.
  607          * lockmgr: You're not fooling anyone, you know.
  608          */
  609         puffs_userdead(pmp);
  610 
  611         /*
  612          * Detach from VFS.  First do necessary XXX-dance (from
  613          * sys_unmount() & other callers of dounmount()
  614          *
  615          * XXX Freeze syncer.  Must do this before locking the
  616          * mount point.  See dounmount() for details.
  617          *
  618          * XXX2: take a reference to the mountpoint before starting to
  619          * wait for syncer_lock.  Otherwise the mointpoint can be
  620          * wiped out while we wait.
  621          */
  622         simple_lock(&mp->mnt_slock);
  623         mp->mnt_wcnt++;
  624         simple_unlock(&mp->mnt_slock);
  625 
  626         lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
  627 
  628         simple_lock(&mp->mnt_slock);
  629         mp->mnt_wcnt--;
  630         if (mp->mnt_wcnt == 0)
  631                 wakeup(&mp->mnt_wcnt);
  632         gone = mp->mnt_iflag & IMNT_GONE;
  633         simple_unlock(&mp->mnt_slock);
  634         if (gone) {
  635                 lockmgr(&syncer_lock, LK_RELEASE, NULL);
  636                 return 0;
  637         }
  638 
  639         /*
  640          * microscopic race condition here (although not with the current
  641          * kernel), but can't really fix it without starting a crusade
  642          * against vfs_busy(), so let it be, let it be, let it be
  643          */
  644 
  645         /*
  646          * The only way vfs_busy() will fail for us is if the filesystem
  647          * is already a goner.
  648          * XXX: skating on the thin ice of modern calling conventions ...
  649          */
  650         if (vfs_busy(mp, 0, 0)) {
  651                 lockmgr(&syncer_lock, LK_RELEASE, NULL);
  652                 return 0;
  653         }
  654 
  655         /* Once we have the mount point, unmount() can't interfere */
  656         dounmount(mp, MNT_FORCE, l);
  657 
  658         return 0;
  659 }
  660 
  661 static int puffsgetop(struct puffs_mount *, struct puffs_req *, int);
  662 static int puffsputop(struct puffs_mount *, struct puffs_req *);
  663 static int puffssizeop(struct puffs_mount *, struct puffs_sizeop *);
  664 
  665 static int
  666 puffs_fop_ioctl(struct file *fp, u_long cmd, void *data, struct lwp *l)
  667 {
  668         struct puffs_mount *pmp = FPTOPMP(fp);
  669         int rv;
  670 
  671         if (pmp == PMP_EMBRYO || pmp == PMP_DEAD) {
  672                 printf("puffs_fop_ioctl: puffs %p, not mounted\n", pmp);
  673                 return ENOENT;
  674         }
  675 
  676         switch (cmd) {
  677         case PUFFSGETOP:
  678                 rv = puffsgetop(pmp, data, fp->f_flag & FNONBLOCK);
  679                 break;
  680 
  681         case PUFFSPUTOP:
  682                 rv =  puffsputop(pmp, data);
  683                 break;
  684 
  685         case PUFFSSIZEOP:
  686                 rv = puffssizeop(pmp, data);
  687                 break;
  688 
  689         case PUFFSSTARTOP:
  690                 rv = puffs_start2(pmp, data);
  691                 break;
  692 
  693         /* already done in sys_ioctl() */
  694         case FIONBIO:
  695                 rv = 0;
  696                 break;
  697 
  698         default:
  699                 rv = EINVAL;
  700                 break;
  701         }
  702 
  703         return rv;
  704 }
  705 
  706 static void
  707 filt_puffsdetach(struct knote *kn)
  708 {
  709         struct puffs_instance *pi = kn->kn_hook;
  710 
  711         simple_lock(&pi_lock);
  712         SLIST_REMOVE(&pi->pi_sel.sel_klist, kn, knote, kn_selnext);
  713         simple_unlock(&pi_lock);
  714 }
  715 
  716 static int
  717 filt_puffsioctl(struct knote *kn, long hint)
  718 {
  719         struct puffs_instance *pi = kn->kn_hook;
  720         struct puffs_mount *pmp;
  721         int error;
  722 
  723         error = 0;
  724         simple_lock(&pi_lock);
  725         pmp = pi->pi_pmp;
  726         if (pmp == PMP_EMBRYO || pmp == PMP_DEAD)
  727                 error = 1;
  728         simple_unlock(&pi_lock);
  729         if (error)
  730                 return 0;
  731 
  732         simple_lock(&pmp->pmp_lock);
  733         kn->kn_data = pmp->pmp_req_touser_waiters;
  734         simple_unlock(&pmp->pmp_lock);
  735 
  736         return kn->kn_data != 0;
  737 }
  738 
  739 static const struct filterops puffsioctl_filtops =
  740         { 1, NULL, filt_puffsdetach, filt_puffsioctl };
  741 
  742 static int
  743 puffs_fop_kqfilter(struct file *fp, struct knote *kn)
  744 {
  745         struct puffs_instance *pi = fp->f_data;
  746         struct klist *klist;
  747 
  748         if (kn->kn_filter != EVFILT_READ)
  749                 return 1;
  750 
  751         klist = &pi->pi_sel.sel_klist;
  752         kn->kn_fop = &puffsioctl_filtops;
  753         kn->kn_hook = pi;
  754 
  755         simple_lock(&pi_lock);
  756         SLIST_INSERT_HEAD(klist, kn, kn_selnext);
  757         simple_unlock(&pi_lock);
  758 
  759         return 0;
  760 }
  761 
  762 /*
  763  * ioctl handlers
  764  */
  765 
  766 static int
  767 puffsgetop(struct puffs_mount *pmp, struct puffs_req *preq, int nonblock)
  768 {
  769         struct puffs_park *park;
  770         int error;
  771 
  772         simple_lock(&pmp->pmp_lock);
  773  again:
  774         if (pmp->pmp_status != PUFFSTAT_RUNNING) {
  775                 simple_unlock(&pmp->pmp_lock);
  776                 return ENXIO;
  777         }
  778         if (TAILQ_EMPTY(&pmp->pmp_req_touser)) {
  779                 if (nonblock) {
  780                         simple_unlock(&pmp->pmp_lock);
  781                         return EWOULDBLOCK;
  782                 }
  783                 ltsleep(&pmp->pmp_req_touser, PUSER, "puffs2", 0,
  784                     &pmp->pmp_lock);
  785                 goto again;
  786         }
  787 
  788         park = TAILQ_FIRST(&pmp->pmp_req_touser);
  789         if (preq->preq_auxlen < park->park_copylen) {
  790                 simple_unlock(&pmp->pmp_lock);
  791                 return E2BIG;
  792         }
  793         TAILQ_REMOVE(&pmp->pmp_req_touser, park, park_entries);
  794         pmp->pmp_req_touser_waiters--;
  795         simple_unlock(&pmp->pmp_lock);
  796 
  797         preq->preq_id = park->park_id;
  798         preq->preq_opclass = park->park_opclass;
  799         preq->preq_optype = park->park_optype;
  800         preq->preq_cookie = park->park_cookie;
  801         preq->preq_auxlen = park->park_copylen;
  802 
  803         if ((error = copyout(park->park_kernbuf, preq->preq_aux,
  804             park->park_copylen)) != 0) {
  805                 /*
  806                  * ok, user server is probably trying to cheat.
  807                  * stuff op back & return error to user
  808                  */
  809                  simple_lock(&pmp->pmp_lock);
  810                  TAILQ_INSERT_HEAD(&pmp->pmp_req_touser, park, park_entries);
  811                  simple_unlock(&pmp->pmp_lock);
  812                  return error;
  813         }
  814 
  815         if (PUFFSOP_WANTREPLY(park->park_opclass)) {
  816                 simple_lock(&pmp->pmp_lock);
  817                 TAILQ_INSERT_TAIL(&pmp->pmp_req_replywait, park, park_entries);
  818                 simple_unlock(&pmp->pmp_lock);
  819         } else {
  820                 free(park->park_kernbuf, M_PUFFS);
  821                 free(park, M_PUFFS);
  822         }
  823 
  824         return 0;
  825 }
  826 
  827 static int
  828 puffsputop(struct puffs_mount *pmp, struct puffs_req *preq)
  829 {
  830         struct puffs_park *park;
  831         size_t copylen;
  832         int error;
  833 
  834         simple_lock(&pmp->pmp_lock);
  835         TAILQ_FOREACH(park, &pmp->pmp_req_replywait, park_entries) {
  836                 if (park->park_id == preq->preq_id) {
  837                         TAILQ_REMOVE(&pmp->pmp_req_replywait, park,
  838                             park_entries);
  839                         break;
  840                 }
  841         }
  842         simple_unlock(&pmp->pmp_lock);
  843 
  844         if (park == NULL)
  845                 return EINVAL;
  846 
  847         /*
  848          * check size of incoming transmission.  allow to allocate a
  849          * larger kernel buffer only if it was specified by the caller
  850          * by setting preq->preq_auxadj.  Else, just copy whatever the
  851          * kernel buffer size is unless.
  852          *
  853          * However, don't allow ludicrously large buffers
  854          */
  855         copylen = preq->preq_auxlen;
  856         if (copylen > pmp->pmp_req_maxsize) {
  857 #ifdef DIAGNOSTIC
  858                 printf("puffsputop: outrageous user buf size: %zu\n", copylen);
  859 #endif
  860                 error = EFAULT;
  861                 goto out;
  862         }
  863 
  864         if (park->park_buflen < copylen &&
  865             park->park_flags & PUFFS_REQFLAG_ADJBUF) {
  866                 free(park->park_kernbuf, M_PUFFS);
  867                 park->park_kernbuf = malloc(copylen, M_PUFFS, M_WAITOK);
  868                 park->park_buflen = copylen;
  869         }
  870 
  871         error = copyin(preq->preq_aux, park->park_kernbuf, copylen);
  872 
  873         /*
  874          * if copyin botched, inform both userspace and the vnodeop
  875          * desperately waiting for information
  876          */
  877  out:
  878         if (error)
  879                 park->park_rv = error;
  880         else
  881                 park->park_rv = preq->preq_rv;
  882         wakeup(park);
  883 
  884         return error;
  885 }
  886 
  887 /* this is probably going to die away at some point? */
  888 static int
  889 puffssizeop(struct puffs_mount *pmp, struct puffs_sizeop *psop_user)
  890 {
  891         struct puffs_sizepark *pspark;
  892         void *kernbuf;
  893         size_t copylen;
  894         int error;
  895 
  896         /* locate correct op */
  897         simple_lock(&pmp->pmp_lock);
  898         TAILQ_FOREACH(pspark, &pmp->pmp_req_sizepark, pkso_entries) {
  899                 if (pspark->pkso_reqid == psop_user->pso_reqid) {
  900                         TAILQ_REMOVE(&pmp->pmp_req_sizepark, pspark,
  901                             pkso_entries);
  902                         break;
  903                 }
  904         }
  905         simple_unlock(&pmp->pmp_lock);
  906 
  907         if (pspark == NULL)
  908                 return EINVAL;
  909 
  910         error = 0;
  911         copylen = MIN(pspark->pkso_bufsize, psop_user->pso_bufsize);
  912 
  913         /*
  914          * XXX: uvm stuff to avoid bouncy-bouncy copying?
  915          */
  916         if (PUFFS_SIZEOP_UIO(pspark->pkso_reqtype)) {
  917                 kernbuf = malloc(copylen, M_PUFFS, M_WAITOK | M_ZERO);
  918                 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_IN) {
  919                         error = copyin(psop_user->pso_userbuf,
  920                             kernbuf, copylen);
  921                         if (error) {
  922                                 printf("psop ERROR1 %d\n", error);
  923                                 goto escape;
  924                         }
  925                 }
  926                 error = uiomove(kernbuf, copylen, pspark->pkso_uio);
  927                 if (error) {
  928                         printf("uiomove from kernel %p, len %d failed: %d\n",
  929                             kernbuf, (int)copylen, error);
  930                         goto escape;
  931                 }
  932                         
  933                 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_UIO_OUT) {
  934                         error = copyout(kernbuf,
  935                             psop_user->pso_userbuf, copylen);
  936                         if (error) {
  937                                 printf("psop ERROR2 %d\n", error);
  938                                 goto escape;
  939                         }
  940                 }
  941  escape:
  942                 free(kernbuf, M_PUFFS);
  943         } else if (PUFFS_SIZEOP_BUF(pspark->pkso_reqtype)) {
  944                 copylen = MAX(pspark->pkso_bufsize, psop_user->pso_bufsize);
  945                 if (pspark->pkso_reqtype == PUFFS_SIZEOPREQ_BUF_IN) {
  946                         error = copyin(psop_user->pso_userbuf,
  947                         pspark->pkso_copybuf, copylen);
  948                 } else {
  949                         error = copyout(pspark->pkso_copybuf,
  950                             psop_user->pso_userbuf, copylen);
  951                 }
  952         }
  953 #ifdef DIAGNOSTIC
  954         else
  955                 panic("puffssizeop: invalid reqtype %d\n",
  956                     pspark->pkso_reqtype);
  957 #endif /* DIAGNOSTIC */
  958 
  959         return error;
  960 }

Cache object: 052be4d62f43d663943f781f662ceea5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.