The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/fs/devfs/devfs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000-2004
    3  *      Poul-Henning Kamp.  All rights reserved.
    4  * Copyright (c) 1989, 1992-1993, 1995
    5  *      The Regents of the University of California.  All rights reserved.
    6  *
    7  * This code is derived from software donated to Berkeley by
    8  * Jan-Simon Pendry.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Neither the name of the University nor the names of its contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  *      @(#)kernfs_vnops.c      8.15 (Berkeley) 5/21/95
   32  * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
   33  *
   34  * $FreeBSD: releng/11.0/sys/fs/devfs/devfs_vnops.c 301928 2016-06-15 15:55:14Z kib $
   35  */
   36 
   37 /*
   38  * TODO:
   39  *      mkdir: want it ?
   40  */
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/conf.h>
   45 #include <sys/dirent.h>
   46 #include <sys/fcntl.h>
   47 #include <sys/file.h>
   48 #include <sys/filedesc.h>
   49 #include <sys/filio.h>
   50 #include <sys/jail.h>
   51 #include <sys/kernel.h>
   52 #include <sys/lock.h>
   53 #include <sys/malloc.h>
   54 #include <sys/mman.h>
   55 #include <sys/mount.h>
   56 #include <sys/namei.h>
   57 #include <sys/priv.h>
   58 #include <sys/proc.h>
   59 #include <sys/stat.h>
   60 #include <sys/sx.h>
   61 #include <sys/sysctl.h>
   62 #include <sys/time.h>
   63 #include <sys/ttycom.h>
   64 #include <sys/unistd.h>
   65 #include <sys/vnode.h>
   66 
   67 static struct vop_vector devfs_vnodeops;
   68 static struct vop_vector devfs_specops;
   69 static struct fileops devfs_ops_f;
   70 
   71 #include <fs/devfs/devfs.h>
   72 #include <fs/devfs/devfs_int.h>
   73 
   74 #include <security/mac/mac_framework.h>
   75 
   76 #include <vm/vm.h>
   77 #include <vm/vm_extern.h>
   78 #include <vm/vm_object.h>
   79 
   80 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
   81 
   82 struct mtx      devfs_de_interlock;
   83 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
   84 struct sx       clone_drain_lock;
   85 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
   86 struct mtx      cdevpriv_mtx;
   87 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
   88 
   89 SYSCTL_DECL(_vfs_devfs);
   90 
   91 static int devfs_dotimes;
   92 SYSCTL_INT(_vfs_devfs, OID_AUTO, dotimes, CTLFLAG_RW,
   93     &devfs_dotimes, 0, "Update timestamps on DEVFS with default precision");
   94 
   95 /*
   96  * Update devfs node timestamp.  Note that updates are unlocked and
   97  * stat(2) could see partially updated times.
   98  */
   99 static void
  100 devfs_timestamp(struct timespec *tsp)
  101 {
  102         time_t ts;
  103 
  104         if (devfs_dotimes) {
  105                 vfs_timestamp(tsp);
  106         } else {
  107                 ts = time_second;
  108                 if (tsp->tv_sec != ts) {
  109                         tsp->tv_sec = ts;
  110                         tsp->tv_nsec = 0;
  111                 }
  112         }
  113 }
  114 
  115 static int
  116 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp,
  117     int *ref)
  118 {
  119 
  120         *dswp = devvn_refthread(fp->f_vnode, devp, ref);
  121         if (*devp != fp->f_data) {
  122                 if (*dswp != NULL)
  123                         dev_relthread(*devp, *ref);
  124                 return (ENXIO);
  125         }
  126         KASSERT((*devp)->si_refcount > 0,
  127             ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
  128         if (*dswp == NULL)
  129                 return (ENXIO);
  130         curthread->td_fpop = fp;
  131         return (0);
  132 }
  133 
  134 int
  135 devfs_get_cdevpriv(void **datap)
  136 {
  137         struct file *fp;
  138         struct cdev_privdata *p;
  139         int error;
  140 
  141         fp = curthread->td_fpop;
  142         if (fp == NULL)
  143                 return (EBADF);
  144         p = fp->f_cdevpriv;
  145         if (p != NULL) {
  146                 error = 0;
  147                 *datap = p->cdpd_data;
  148         } else
  149                 error = ENOENT;
  150         return (error);
  151 }
  152 
  153 int
  154 devfs_set_cdevpriv(void *priv, d_priv_dtor_t *priv_dtr)
  155 {
  156         struct file *fp;
  157         struct cdev_priv *cdp;
  158         struct cdev_privdata *p;
  159         int error;
  160 
  161         fp = curthread->td_fpop;
  162         if (fp == NULL)
  163                 return (ENOENT);
  164         cdp = cdev2priv((struct cdev *)fp->f_data);
  165         p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
  166         p->cdpd_data = priv;
  167         p->cdpd_dtr = priv_dtr;
  168         p->cdpd_fp = fp;
  169         mtx_lock(&cdevpriv_mtx);
  170         if (fp->f_cdevpriv == NULL) {
  171                 LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
  172                 fp->f_cdevpriv = p;
  173                 mtx_unlock(&cdevpriv_mtx);
  174                 error = 0;
  175         } else {
  176                 mtx_unlock(&cdevpriv_mtx);
  177                 free(p, M_CDEVPDATA);
  178                 error = EBUSY;
  179         }
  180         return (error);
  181 }
  182 
  183 void
  184 devfs_destroy_cdevpriv(struct cdev_privdata *p)
  185 {
  186 
  187         mtx_assert(&cdevpriv_mtx, MA_OWNED);
  188         KASSERT(p->cdpd_fp->f_cdevpriv == p,
  189             ("devfs_destoy_cdevpriv %p != %p", p->cdpd_fp->f_cdevpriv, p));
  190         p->cdpd_fp->f_cdevpriv = NULL;
  191         LIST_REMOVE(p, cdpd_list);
  192         mtx_unlock(&cdevpriv_mtx);
  193         (p->cdpd_dtr)(p->cdpd_data);
  194         free(p, M_CDEVPDATA);
  195 }
  196 
  197 static void
  198 devfs_fpdrop(struct file *fp)
  199 {
  200         struct cdev_privdata *p;
  201 
  202         mtx_lock(&cdevpriv_mtx);
  203         if ((p = fp->f_cdevpriv) == NULL) {
  204                 mtx_unlock(&cdevpriv_mtx);
  205                 return;
  206         }
  207         devfs_destroy_cdevpriv(p);
  208 }
  209 
  210 void
  211 devfs_clear_cdevpriv(void)
  212 {
  213         struct file *fp;
  214 
  215         fp = curthread->td_fpop;
  216         if (fp == NULL)
  217                 return;
  218         devfs_fpdrop(fp);
  219 }
  220 
  221 /*
  222  * On success devfs_populate_vp() returns with dmp->dm_lock held.
  223  */
  224 static int
  225 devfs_populate_vp(struct vnode *vp)
  226 {
  227         struct devfs_dirent *de;
  228         struct devfs_mount *dmp;
  229         int locked;
  230 
  231         ASSERT_VOP_LOCKED(vp, "devfs_populate_vp");
  232 
  233         dmp = VFSTODEVFS(vp->v_mount);
  234         locked = VOP_ISLOCKED(vp);
  235 
  236         sx_xlock(&dmp->dm_lock);
  237         DEVFS_DMP_HOLD(dmp);
  238 
  239         /* Can't call devfs_populate() with the vnode lock held. */
  240         VOP_UNLOCK(vp, 0);
  241         devfs_populate(dmp);
  242 
  243         sx_xunlock(&dmp->dm_lock);
  244         vn_lock(vp, locked | LK_RETRY);
  245         sx_xlock(&dmp->dm_lock);
  246         if (DEVFS_DMP_DROP(dmp)) {
  247                 sx_xunlock(&dmp->dm_lock);
  248                 devfs_unmount_final(dmp);
  249                 return (ERESTART);
  250         }
  251         if ((vp->v_iflag & VI_DOOMED) != 0) {
  252                 sx_xunlock(&dmp->dm_lock);
  253                 return (ERESTART);
  254         }
  255         de = vp->v_data;
  256         KASSERT(de != NULL,
  257             ("devfs_populate_vp: vp->v_data == NULL but vnode not doomed"));
  258         if ((de->de_flags & DE_DOOMED) != 0) {
  259                 sx_xunlock(&dmp->dm_lock);
  260                 return (ERESTART);
  261         }
  262 
  263         return (0);
  264 }
  265 
  266 static int
  267 devfs_vptocnp(struct vop_vptocnp_args *ap)
  268 {
  269         struct vnode *vp = ap->a_vp;
  270         struct vnode **dvp = ap->a_vpp;
  271         struct devfs_mount *dmp;
  272         char *buf = ap->a_buf;
  273         int *buflen = ap->a_buflen;
  274         struct devfs_dirent *dd, *de;
  275         int i, error;
  276 
  277         dmp = VFSTODEVFS(vp->v_mount);
  278 
  279         error = devfs_populate_vp(vp);
  280         if (error != 0)
  281                 return (error);
  282 
  283         i = *buflen;
  284         dd = vp->v_data;
  285 
  286         if (vp->v_type == VCHR) {
  287                 i -= strlen(dd->de_cdp->cdp_c.si_name);
  288                 if (i < 0) {
  289                         error = ENOMEM;
  290                         goto finished;
  291                 }
  292                 bcopy(dd->de_cdp->cdp_c.si_name, buf + i,
  293                     strlen(dd->de_cdp->cdp_c.si_name));
  294                 de = dd->de_dir;
  295         } else if (vp->v_type == VDIR) {
  296                 if (dd == dmp->dm_rootdir) {
  297                         *dvp = vp;
  298                         vref(*dvp);
  299                         goto finished;
  300                 }
  301                 i -= dd->de_dirent->d_namlen;
  302                 if (i < 0) {
  303                         error = ENOMEM;
  304                         goto finished;
  305                 }
  306                 bcopy(dd->de_dirent->d_name, buf + i,
  307                     dd->de_dirent->d_namlen);
  308                 de = dd;
  309         } else {
  310                 error = ENOENT;
  311                 goto finished;
  312         }
  313         *buflen = i;
  314         de = devfs_parent_dirent(de);
  315         if (de == NULL) {
  316                 error = ENOENT;
  317                 goto finished;
  318         }
  319         mtx_lock(&devfs_de_interlock);
  320         *dvp = de->de_vnode;
  321         if (*dvp != NULL) {
  322                 VI_LOCK(*dvp);
  323                 mtx_unlock(&devfs_de_interlock);
  324                 vholdl(*dvp);
  325                 VI_UNLOCK(*dvp);
  326                 vref(*dvp);
  327                 vdrop(*dvp);
  328         } else {
  329                 mtx_unlock(&devfs_de_interlock);
  330                 error = ENOENT;
  331         }
  332 finished:
  333         sx_xunlock(&dmp->dm_lock);
  334         return (error);
  335 }
  336 
  337 /*
  338  * Construct the fully qualified path name relative to the mountpoint.
  339  * If a NULL cnp is provided, no '/' is appended to the resulting path.
  340  */
  341 char *
  342 devfs_fqpn(char *buf, struct devfs_mount *dmp, struct devfs_dirent *dd,
  343     struct componentname *cnp)
  344 {
  345         int i;
  346         struct devfs_dirent *de;
  347 
  348         sx_assert(&dmp->dm_lock, SA_LOCKED);
  349 
  350         i = SPECNAMELEN;
  351         buf[i] = '\0';
  352         if (cnp != NULL)
  353                 i -= cnp->cn_namelen;
  354         if (i < 0)
  355                  return (NULL);
  356         if (cnp != NULL)
  357                 bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
  358         de = dd;
  359         while (de != dmp->dm_rootdir) {
  360                 if (cnp != NULL || i < SPECNAMELEN) {
  361                         i--;
  362                         if (i < 0)
  363                                  return (NULL);
  364                         buf[i] = '/';
  365                 }
  366                 i -= de->de_dirent->d_namlen;
  367                 if (i < 0)
  368                          return (NULL);
  369                 bcopy(de->de_dirent->d_name, buf + i,
  370                     de->de_dirent->d_namlen);
  371                 de = devfs_parent_dirent(de);
  372                 if (de == NULL)
  373                         return (NULL);
  374         }
  375         return (buf + i);
  376 }
  377 
  378 static int
  379 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
  380         struct devfs_dirent *de)
  381 {
  382         int not_found;
  383 
  384         not_found = 0;
  385         if (de->de_flags & DE_DOOMED)
  386                 not_found = 1;
  387         if (DEVFS_DE_DROP(de)) {
  388                 KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
  389                 devfs_dirent_free(de);
  390         }
  391         if (DEVFS_DMP_DROP(dmp)) {
  392                 KASSERT(not_found == 1,
  393                         ("DEVFS mount struct freed before dirent"));
  394                 not_found = 2;
  395                 sx_xunlock(&dmp->dm_lock);
  396                 devfs_unmount_final(dmp);
  397         }
  398         if (not_found == 1 || (drop_dm_lock && not_found != 2))
  399                 sx_unlock(&dmp->dm_lock);
  400         return (not_found);
  401 }
  402 
  403 static void
  404 devfs_insmntque_dtr(struct vnode *vp, void *arg)
  405 {
  406         struct devfs_dirent *de;
  407 
  408         de = (struct devfs_dirent *)arg;
  409         mtx_lock(&devfs_de_interlock);
  410         vp->v_data = NULL;
  411         de->de_vnode = NULL;
  412         mtx_unlock(&devfs_de_interlock);
  413         vgone(vp);
  414         vput(vp);
  415 }
  416 
  417 /*
  418  * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
  419  * it on return.
  420  */
  421 int
  422 devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode,
  423     struct vnode **vpp)
  424 {
  425         int error;
  426         struct vnode *vp;
  427         struct cdev *dev;
  428         struct devfs_mount *dmp;
  429         struct cdevsw *dsw;
  430 
  431         dmp = VFSTODEVFS(mp);
  432         if (de->de_flags & DE_DOOMED) {
  433                 sx_xunlock(&dmp->dm_lock);
  434                 return (ENOENT);
  435         }
  436 loop:
  437         DEVFS_DE_HOLD(de);
  438         DEVFS_DMP_HOLD(dmp);
  439         mtx_lock(&devfs_de_interlock);
  440         vp = de->de_vnode;
  441         if (vp != NULL) {
  442                 VI_LOCK(vp);
  443                 mtx_unlock(&devfs_de_interlock);
  444                 sx_xunlock(&dmp->dm_lock);
  445                 vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread);
  446                 sx_xlock(&dmp->dm_lock);
  447                 if (devfs_allocv_drop_refs(0, dmp, de)) {
  448                         vput(vp);
  449                         return (ENOENT);
  450                 }
  451                 else if ((vp->v_iflag & VI_DOOMED) != 0) {
  452                         mtx_lock(&devfs_de_interlock);
  453                         if (de->de_vnode == vp) {
  454                                 de->de_vnode = NULL;
  455                                 vp->v_data = NULL;
  456                         }
  457                         mtx_unlock(&devfs_de_interlock);
  458                         vput(vp);
  459                         goto loop;
  460                 }
  461                 sx_xunlock(&dmp->dm_lock);
  462                 *vpp = vp;
  463                 return (0);
  464         }
  465         mtx_unlock(&devfs_de_interlock);
  466         if (de->de_dirent->d_type == DT_CHR) {
  467                 if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
  468                         devfs_allocv_drop_refs(1, dmp, de);
  469                         return (ENOENT);
  470                 }
  471                 dev = &de->de_cdp->cdp_c;
  472         } else {
  473                 dev = NULL;
  474         }
  475         error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
  476         if (error != 0) {
  477                 devfs_allocv_drop_refs(1, dmp, de);
  478                 printf("devfs_allocv: failed to allocate new vnode\n");
  479                 return (error);
  480         }
  481 
  482         if (de->de_dirent->d_type == DT_CHR) {
  483                 vp->v_type = VCHR;
  484                 VI_LOCK(vp);
  485                 dev_lock();
  486                 dev_refl(dev);
  487                 /* XXX: v_rdev should be protect by vnode lock */
  488                 vp->v_rdev = dev;
  489                 KASSERT(vp->v_usecount == 1,
  490                     ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
  491                 dev->si_usecount += vp->v_usecount;
  492                 /* Special casing of ttys for deadfs.  Probably redundant. */
  493                 dsw = dev->si_devsw;
  494                 if (dsw != NULL && (dsw->d_flags & D_TTY) != 0)
  495                         vp->v_vflag |= VV_ISTTY;
  496                 dev_unlock();
  497                 VI_UNLOCK(vp);
  498                 if ((dev->si_flags & SI_ETERNAL) != 0)
  499                         vp->v_vflag |= VV_ETERNALDEV;
  500                 vp->v_op = &devfs_specops;
  501         } else if (de->de_dirent->d_type == DT_DIR) {
  502                 vp->v_type = VDIR;
  503         } else if (de->de_dirent->d_type == DT_LNK) {
  504                 vp->v_type = VLNK;
  505         } else {
  506                 vp->v_type = VBAD;
  507         }
  508         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS);
  509         VN_LOCK_ASHARE(vp);
  510         mtx_lock(&devfs_de_interlock);
  511         vp->v_data = de;
  512         de->de_vnode = vp;
  513         mtx_unlock(&devfs_de_interlock);
  514         error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
  515         if (error != 0) {
  516                 (void) devfs_allocv_drop_refs(1, dmp, de);
  517                 return (error);
  518         }
  519         if (devfs_allocv_drop_refs(0, dmp, de)) {
  520                 vput(vp);
  521                 return (ENOENT);
  522         }
  523 #ifdef MAC
  524         mac_devfs_vnode_associate(mp, de, vp);
  525 #endif
  526         sx_xunlock(&dmp->dm_lock);
  527         *vpp = vp;
  528         return (0);
  529 }
  530 
  531 static int
  532 devfs_access(struct vop_access_args *ap)
  533 {
  534         struct vnode *vp = ap->a_vp;
  535         struct devfs_dirent *de;
  536         struct proc *p;
  537         int error;
  538 
  539         de = vp->v_data;
  540         if (vp->v_type == VDIR)
  541                 de = de->de_dir;
  542 
  543         error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
  544             ap->a_accmode, ap->a_cred, NULL);
  545         if (error == 0)
  546                 return (0);
  547         if (error != EACCES)
  548                 return (error);
  549         p = ap->a_td->td_proc;
  550         /* We do, however, allow access to the controlling terminal */
  551         PROC_LOCK(p);
  552         if (!(p->p_flag & P_CONTROLT)) {
  553                 PROC_UNLOCK(p);
  554                 return (error);
  555         }
  556         if (p->p_session->s_ttydp == de->de_cdp)
  557                 error = 0;
  558         PROC_UNLOCK(p);
  559         return (error);
  560 }
  561 
  562 _Static_assert(((FMASK | FCNTLFLAGS) & (FLASTCLOSE | FREVOKE)) == 0,
  563     "devfs-only flag reuse failed");
  564 
  565 static int
  566 devfs_close(struct vop_close_args *ap)
  567 {
  568         struct vnode *vp = ap->a_vp, *oldvp;
  569         struct thread *td = ap->a_td;
  570         struct proc *p;
  571         struct cdev *dev = vp->v_rdev;
  572         struct cdevsw *dsw;
  573         int dflags, error, ref, vp_locked;
  574 
  575         /*
  576          * XXX: Don't call d_close() if we were called because of
  577          * XXX: insmntque1() failure.
  578          */
  579         if (vp->v_data == NULL)
  580                 return (0);
  581 
  582         /*
  583          * Hack: a tty device that is a controlling terminal
  584          * has a reference from the session structure.
  585          * We cannot easily tell that a character device is
  586          * a controlling terminal, unless it is the closing
  587          * process' controlling terminal.  In that case,
  588          * if the reference count is 2 (this last descriptor
  589          * plus the session), release the reference from the session.
  590          */
  591         if (td != NULL) {
  592                 p = td->td_proc;
  593                 PROC_LOCK(p);
  594                 if (vp == p->p_session->s_ttyvp) {
  595                         PROC_UNLOCK(p);
  596                         oldvp = NULL;
  597                         sx_xlock(&proctree_lock);
  598                         if (vp == p->p_session->s_ttyvp) {
  599                                 SESS_LOCK(p->p_session);
  600                                 VI_LOCK(vp);
  601                                 if (count_dev(dev) == 2 &&
  602                                     (vp->v_iflag & VI_DOOMED) == 0) {
  603                                         p->p_session->s_ttyvp = NULL;
  604                                         p->p_session->s_ttydp = NULL;
  605                                         oldvp = vp;
  606                                 }
  607                                 VI_UNLOCK(vp);
  608                                 SESS_UNLOCK(p->p_session);
  609                         }
  610                         sx_xunlock(&proctree_lock);
  611                         if (oldvp != NULL)
  612                                 vrele(oldvp);
  613                 } else
  614                         PROC_UNLOCK(p);
  615         }
  616         /*
  617          * We do not want to really close the device if it
  618          * is still in use unless we are trying to close it
  619          * forcibly. Since every use (buffer, vnode, swap, cmap)
  620          * holds a reference to the vnode, and because we mark
  621          * any other vnodes that alias this device, when the
  622          * sum of the reference counts on all the aliased
  623          * vnodes descends to one, we are on last close.
  624          */
  625         dsw = dev_refthread(dev, &ref);
  626         if (dsw == NULL)
  627                 return (ENXIO);
  628         dflags = 0;
  629         VI_LOCK(vp);
  630         if (vp->v_iflag & VI_DOOMED) {
  631                 /* Forced close. */
  632                 dflags |= FREVOKE | FNONBLOCK;
  633         } else if (dsw->d_flags & D_TRACKCLOSE) {
  634                 /* Keep device updated on status. */
  635         } else if (count_dev(dev) > 1) {
  636                 VI_UNLOCK(vp);
  637                 dev_relthread(dev, ref);
  638                 return (0);
  639         }
  640         if (count_dev(dev) == 1)
  641                 dflags |= FLASTCLOSE;
  642         vholdl(vp);
  643         VI_UNLOCK(vp);
  644         vp_locked = VOP_ISLOCKED(vp);
  645         VOP_UNLOCK(vp, 0);
  646         KASSERT(dev->si_refcount > 0,
  647             ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
  648         error = dsw->d_close(dev, ap->a_fflag | dflags, S_IFCHR, td);
  649         dev_relthread(dev, ref);
  650         vn_lock(vp, vp_locked | LK_RETRY);
  651         vdrop(vp);
  652         return (error);
  653 }
  654 
  655 static int
  656 devfs_close_f(struct file *fp, struct thread *td)
  657 {
  658         int error;
  659         struct file *fpop;
  660 
  661         /*
  662          * NB: td may be NULL if this descriptor is closed due to
  663          * garbage collection from a closed UNIX domain socket.
  664          */
  665         fpop = curthread->td_fpop;
  666         curthread->td_fpop = fp;
  667         error = vnops.fo_close(fp, td);
  668         curthread->td_fpop = fpop;
  669 
  670         /*
  671          * The f_cdevpriv cannot be assigned non-NULL value while we
  672          * are destroying the file.
  673          */
  674         if (fp->f_cdevpriv != NULL)
  675                 devfs_fpdrop(fp);
  676         return (error);
  677 }
  678 
  679 static int
  680 devfs_fsync(struct vop_fsync_args *ap)
  681 {
  682         int error;
  683         struct bufobj *bo;
  684         struct devfs_dirent *de;
  685 
  686         if (!vn_isdisk(ap->a_vp, &error)) {
  687                 bo = &ap->a_vp->v_bufobj;
  688                 de = ap->a_vp->v_data;
  689                 if (error == ENXIO && bo->bo_dirty.bv_cnt > 0) {
  690                         printf("Device %s went missing before all of the data "
  691                             "could be written to it; expect data loss.\n",
  692                             de->de_dirent->d_name);
  693 
  694                         error = vop_stdfsync(ap);
  695                         if (bo->bo_dirty.bv_cnt != 0 || error != 0)
  696                                 panic("devfs_fsync: vop_stdfsync failed.");
  697                 }
  698 
  699                 return (0);
  700         }
  701 
  702         return (vop_stdfsync(ap));
  703 }
  704 
  705 static int
  706 devfs_getattr(struct vop_getattr_args *ap)
  707 {
  708         struct vnode *vp = ap->a_vp;
  709         struct vattr *vap = ap->a_vap;
  710         int error;
  711         struct devfs_dirent *de;
  712         struct devfs_mount *dmp;
  713         struct cdev *dev;
  714 
  715         error = devfs_populate_vp(vp);
  716         if (error != 0)
  717                 return (error);
  718 
  719         dmp = VFSTODEVFS(vp->v_mount);
  720         sx_xunlock(&dmp->dm_lock);
  721 
  722         de = vp->v_data;
  723         KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
  724         if (vp->v_type == VDIR) {
  725                 de = de->de_dir;
  726                 KASSERT(de != NULL,
  727                     ("Null dir dirent in devfs_getattr vp=%p", vp));
  728         }
  729         vap->va_uid = de->de_uid;
  730         vap->va_gid = de->de_gid;
  731         vap->va_mode = de->de_mode;
  732         if (vp->v_type == VLNK)
  733                 vap->va_size = strlen(de->de_symlink);
  734         else if (vp->v_type == VDIR)
  735                 vap->va_size = vap->va_bytes = DEV_BSIZE;
  736         else
  737                 vap->va_size = 0;
  738         if (vp->v_type != VDIR)
  739                 vap->va_bytes = 0;
  740         vap->va_blocksize = DEV_BSIZE;
  741         vap->va_type = vp->v_type;
  742 
  743 #define fix(aa)                                                 \
  744         do {                                                    \
  745                 if ((aa).tv_sec <= 3600) {                      \
  746                         (aa).tv_sec = boottime.tv_sec;          \
  747                         (aa).tv_nsec = boottime.tv_usec * 1000; \
  748                 }                                               \
  749         } while (0)
  750 
  751         if (vp->v_type != VCHR)  {
  752                 fix(de->de_atime);
  753                 vap->va_atime = de->de_atime;
  754                 fix(de->de_mtime);
  755                 vap->va_mtime = de->de_mtime;
  756                 fix(de->de_ctime);
  757                 vap->va_ctime = de->de_ctime;
  758         } else {
  759                 dev = vp->v_rdev;
  760                 fix(dev->si_atime);
  761                 vap->va_atime = dev->si_atime;
  762                 fix(dev->si_mtime);
  763                 vap->va_mtime = dev->si_mtime;
  764                 fix(dev->si_ctime);
  765                 vap->va_ctime = dev->si_ctime;
  766 
  767                 vap->va_rdev = cdev2priv(dev)->cdp_inode;
  768         }
  769         vap->va_gen = 0;
  770         vap->va_flags = 0;
  771         vap->va_filerev = 0;
  772         vap->va_nlink = de->de_links;
  773         vap->va_fileid = de->de_inode;
  774 
  775         return (error);
  776 }
  777 
  778 /* ARGSUSED */
  779 static int
  780 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
  781 {
  782         struct cdev *dev;
  783         struct cdevsw *dsw;
  784         struct vnode *vp;
  785         struct vnode *vpold;
  786         int error, i, ref;
  787         const char *p;
  788         struct fiodgname_arg *fgn;
  789         struct file *fpop;
  790 
  791         fpop = td->td_fpop;
  792         error = devfs_fp_check(fp, &dev, &dsw, &ref);
  793         if (error != 0) {
  794                 error = vnops.fo_ioctl(fp, com, data, cred, td);
  795                 return (error);
  796         }
  797 
  798         if (com == FIODTYPE) {
  799                 *(int *)data = dsw->d_flags & D_TYPEMASK;
  800                 td->td_fpop = fpop;
  801                 dev_relthread(dev, ref);
  802                 return (0);
  803         } else if (com == FIODGNAME) {
  804                 fgn = data;
  805                 p = devtoname(dev);
  806                 i = strlen(p) + 1;
  807                 if (i > fgn->len)
  808                         error = EINVAL;
  809                 else
  810                         error = copyout(p, fgn->buf, i);
  811                 td->td_fpop = fpop;
  812                 dev_relthread(dev, ref);
  813                 return (error);
  814         }
  815         error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
  816         td->td_fpop = NULL;
  817         dev_relthread(dev, ref);
  818         if (error == ENOIOCTL)
  819                 error = ENOTTY;
  820         if (error == 0 && com == TIOCSCTTY) {
  821                 vp = fp->f_vnode;
  822 
  823                 /* Do nothing if reassigning same control tty */
  824                 sx_slock(&proctree_lock);
  825                 if (td->td_proc->p_session->s_ttyvp == vp) {
  826                         sx_sunlock(&proctree_lock);
  827                         return (0);
  828                 }
  829 
  830                 vpold = td->td_proc->p_session->s_ttyvp;
  831                 VREF(vp);
  832                 SESS_LOCK(td->td_proc->p_session);
  833                 td->td_proc->p_session->s_ttyvp = vp;
  834                 td->td_proc->p_session->s_ttydp = cdev2priv(dev);
  835                 SESS_UNLOCK(td->td_proc->p_session);
  836 
  837                 sx_sunlock(&proctree_lock);
  838 
  839                 /* Get rid of reference to old control tty */
  840                 if (vpold)
  841                         vrele(vpold);
  842         }
  843         return (error);
  844 }
  845 
  846 /* ARGSUSED */
  847 static int
  848 devfs_kqfilter_f(struct file *fp, struct knote *kn)
  849 {
  850         struct cdev *dev;
  851         struct cdevsw *dsw;
  852         int error, ref;
  853         struct file *fpop;
  854         struct thread *td;
  855 
  856         td = curthread;
  857         fpop = td->td_fpop;
  858         error = devfs_fp_check(fp, &dev, &dsw, &ref);
  859         if (error)
  860                 return (error);
  861         error = dsw->d_kqfilter(dev, kn);
  862         td->td_fpop = fpop;
  863         dev_relthread(dev, ref);
  864         return (error);
  865 }
  866 
  867 static inline int
  868 devfs_prison_check(struct devfs_dirent *de, struct thread *td)
  869 {
  870         struct cdev_priv *cdp;
  871         struct ucred *dcr;
  872         struct proc *p;
  873         int error;
  874 
  875         cdp = de->de_cdp;
  876         if (cdp == NULL)
  877                 return (0);
  878         dcr = cdp->cdp_c.si_cred;
  879         if (dcr == NULL)
  880                 return (0);
  881 
  882         error = prison_check(td->td_ucred, dcr);
  883         if (error == 0)
  884                 return (0);
  885         /* We do, however, allow access to the controlling terminal */
  886         p = td->td_proc;
  887         PROC_LOCK(p);
  888         if (!(p->p_flag & P_CONTROLT)) {
  889                 PROC_UNLOCK(p);
  890                 return (error);
  891         }
  892         if (p->p_session->s_ttydp == cdp)
  893                 error = 0;
  894         PROC_UNLOCK(p);
  895         return (error);
  896 }
  897 
  898 static int
  899 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
  900 {
  901         struct componentname *cnp;
  902         struct vnode *dvp, **vpp;
  903         struct thread *td;
  904         struct devfs_dirent *de, *dd;
  905         struct devfs_dirent **dde;
  906         struct devfs_mount *dmp;
  907         struct cdev *cdev;
  908         int error, flags, nameiop, dvplocked;
  909         char specname[SPECNAMELEN + 1], *pname;
  910 
  911         cnp = ap->a_cnp;
  912         vpp = ap->a_vpp;
  913         dvp = ap->a_dvp;
  914         pname = cnp->cn_nameptr;
  915         td = cnp->cn_thread;
  916         flags = cnp->cn_flags;
  917         nameiop = cnp->cn_nameiop;
  918         dmp = VFSTODEVFS(dvp->v_mount);
  919         dd = dvp->v_data;
  920         *vpp = NULLVP;
  921 
  922         if ((flags & ISLASTCN) && nameiop == RENAME)
  923                 return (EOPNOTSUPP);
  924 
  925         if (dvp->v_type != VDIR)
  926                 return (ENOTDIR);
  927 
  928         if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
  929                 return (EIO);
  930 
  931         error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
  932         if (error)
  933                 return (error);
  934 
  935         if (cnp->cn_namelen == 1 && *pname == '.') {
  936                 if ((flags & ISLASTCN) && nameiop != LOOKUP)
  937                         return (EINVAL);
  938                 *vpp = dvp;
  939                 VREF(dvp);
  940                 return (0);
  941         }
  942 
  943         if (flags & ISDOTDOT) {
  944                 if ((flags & ISLASTCN) && nameiop != LOOKUP)
  945                         return (EINVAL);
  946                 de = devfs_parent_dirent(dd);
  947                 if (de == NULL)
  948                         return (ENOENT);
  949                 dvplocked = VOP_ISLOCKED(dvp);
  950                 VOP_UNLOCK(dvp, 0);
  951                 error = devfs_allocv(de, dvp->v_mount,
  952                     cnp->cn_lkflags & LK_TYPE_MASK, vpp);
  953                 *dm_unlock = 0;
  954                 vn_lock(dvp, dvplocked | LK_RETRY);
  955                 return (error);
  956         }
  957 
  958         dd = dvp->v_data;
  959         de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen, 0);
  960         while (de == NULL) {    /* While(...) so we can use break */
  961 
  962                 if (nameiop == DELETE)
  963                         return (ENOENT);
  964 
  965                 /*
  966                  * OK, we didn't have an entry for the name we were asked for
  967                  * so we try to see if anybody can create it on demand.
  968                  */
  969                 pname = devfs_fqpn(specname, dmp, dd, cnp);
  970                 if (pname == NULL)
  971                         break;
  972 
  973                 cdev = NULL;
  974                 DEVFS_DMP_HOLD(dmp);
  975                 sx_xunlock(&dmp->dm_lock);
  976                 sx_slock(&clone_drain_lock);
  977                 EVENTHANDLER_INVOKE(dev_clone,
  978                     td->td_ucred, pname, strlen(pname), &cdev);
  979                 sx_sunlock(&clone_drain_lock);
  980 
  981                 if (cdev == NULL)
  982                         sx_xlock(&dmp->dm_lock);
  983                 else if (devfs_populate_vp(dvp) != 0) {
  984                         *dm_unlock = 0;
  985                         sx_xlock(&dmp->dm_lock);
  986                         if (DEVFS_DMP_DROP(dmp)) {
  987                                 sx_xunlock(&dmp->dm_lock);
  988                                 devfs_unmount_final(dmp);
  989                         } else
  990                                 sx_xunlock(&dmp->dm_lock);
  991                         dev_rel(cdev);
  992                         return (ENOENT);
  993                 }
  994                 if (DEVFS_DMP_DROP(dmp)) {
  995                         *dm_unlock = 0;
  996                         sx_xunlock(&dmp->dm_lock);
  997                         devfs_unmount_final(dmp);
  998                         if (cdev != NULL)
  999                                 dev_rel(cdev);
 1000                         return (ENOENT);
 1001                 }
 1002 
 1003                 if (cdev == NULL)
 1004                         break;
 1005 
 1006                 dev_lock();
 1007                 dde = &cdev2priv(cdev)->cdp_dirents[dmp->dm_idx];
 1008                 if (dde != NULL && *dde != NULL)
 1009                         de = *dde;
 1010                 dev_unlock();
 1011                 dev_rel(cdev);
 1012                 break;
 1013         }
 1014 
 1015         if (de == NULL || de->de_flags & DE_WHITEOUT) {
 1016                 if ((nameiop == CREATE || nameiop == RENAME) &&
 1017                     (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
 1018                         cnp->cn_flags |= SAVENAME;
 1019                         return (EJUSTRETURN);
 1020                 }
 1021                 return (ENOENT);
 1022         }
 1023 
 1024         if (devfs_prison_check(de, td))
 1025                 return (ENOENT);
 1026 
 1027         if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
 1028                 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
 1029                 if (error)
 1030                         return (error);
 1031                 if (*vpp == dvp) {
 1032                         VREF(dvp);
 1033                         *vpp = dvp;
 1034                         return (0);
 1035                 }
 1036         }
 1037         error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK,
 1038             vpp);
 1039         *dm_unlock = 0;
 1040         return (error);
 1041 }
 1042 
 1043 static int
 1044 devfs_lookup(struct vop_lookup_args *ap)
 1045 {
 1046         int j;
 1047         struct devfs_mount *dmp;
 1048         int dm_unlock;
 1049 
 1050         if (devfs_populate_vp(ap->a_dvp) != 0)
 1051                 return (ENOTDIR);
 1052 
 1053         dmp = VFSTODEVFS(ap->a_dvp->v_mount);
 1054         dm_unlock = 1;
 1055         j = devfs_lookupx(ap, &dm_unlock);
 1056         if (dm_unlock == 1)
 1057                 sx_xunlock(&dmp->dm_lock);
 1058         return (j);
 1059 }
 1060 
 1061 static int
 1062 devfs_mknod(struct vop_mknod_args *ap)
 1063 {
 1064         struct componentname *cnp;
 1065         struct vnode *dvp, **vpp;
 1066         struct devfs_dirent *dd, *de;
 1067         struct devfs_mount *dmp;
 1068         int error;
 1069 
 1070         /*
 1071          * The only type of node we should be creating here is a
 1072          * character device, for anything else return EOPNOTSUPP.
 1073          */
 1074         if (ap->a_vap->va_type != VCHR)
 1075                 return (EOPNOTSUPP);
 1076         dvp = ap->a_dvp;
 1077         dmp = VFSTODEVFS(dvp->v_mount);
 1078 
 1079         cnp = ap->a_cnp;
 1080         vpp = ap->a_vpp;
 1081         dd = dvp->v_data;
 1082 
 1083         error = ENOENT;
 1084         sx_xlock(&dmp->dm_lock);
 1085         TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
 1086                 if (cnp->cn_namelen != de->de_dirent->d_namlen)
 1087                         continue;
 1088                 if (de->de_dirent->d_type == DT_CHR &&
 1089                     (de->de_cdp->cdp_flags & CDP_ACTIVE) == 0)
 1090                         continue;
 1091                 if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
 1092                     de->de_dirent->d_namlen) != 0)
 1093                         continue;
 1094                 if (de->de_flags & DE_WHITEOUT)
 1095                         break;
 1096                 goto notfound;
 1097         }
 1098         if (de == NULL)
 1099                 goto notfound;
 1100         de->de_flags &= ~DE_WHITEOUT;
 1101         error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp);
 1102         return (error);
 1103 notfound:
 1104         sx_xunlock(&dmp->dm_lock);
 1105         return (error);
 1106 }
 1107 
 1108 /* ARGSUSED */
 1109 static int
 1110 devfs_open(struct vop_open_args *ap)
 1111 {
 1112         struct thread *td = ap->a_td;
 1113         struct vnode *vp = ap->a_vp;
 1114         struct cdev *dev = vp->v_rdev;
 1115         struct file *fp = ap->a_fp;
 1116         int error, ref, vlocked;
 1117         struct cdevsw *dsw;
 1118         struct file *fpop;
 1119         struct mtx *mtxp;
 1120 
 1121         if (vp->v_type == VBLK)
 1122                 return (ENXIO);
 1123 
 1124         if (dev == NULL)
 1125                 return (ENXIO);
 1126 
 1127         /* Make this field valid before any I/O in d_open. */
 1128         if (dev->si_iosize_max == 0)
 1129                 dev->si_iosize_max = DFLTPHYS;
 1130 
 1131         dsw = dev_refthread(dev, &ref);
 1132         if (dsw == NULL)
 1133                 return (ENXIO);
 1134         if (fp == NULL && dsw->d_fdopen != NULL) {
 1135                 dev_relthread(dev, ref);
 1136                 return (ENXIO);
 1137         }
 1138 
 1139         vlocked = VOP_ISLOCKED(vp);
 1140         VOP_UNLOCK(vp, 0);
 1141 
 1142         fpop = td->td_fpop;
 1143         td->td_fpop = fp;
 1144         if (fp != NULL) {
 1145                 fp->f_data = dev;
 1146                 fp->f_vnode = vp;
 1147         }
 1148         if (dsw->d_fdopen != NULL)
 1149                 error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
 1150         else
 1151                 error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
 1152         /* Clean up any cdevpriv upon error. */
 1153         if (error != 0)
 1154                 devfs_clear_cdevpriv();
 1155         td->td_fpop = fpop;
 1156 
 1157         vn_lock(vp, vlocked | LK_RETRY);
 1158         dev_relthread(dev, ref);
 1159         if (error != 0) {
 1160                 if (error == ERESTART)
 1161                         error = EINTR;
 1162                 return (error);
 1163         }
 1164 
 1165 #if 0   /* /dev/console */
 1166         KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp"));
 1167 #else
 1168         if (fp == NULL)
 1169                 return (error);
 1170 #endif
 1171         if (fp->f_ops == &badfileops)
 1172                 finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
 1173         mtxp = mtx_pool_find(mtxpool_sleep, fp);
 1174 
 1175         /*
 1176          * Hint to the dofilewrite() to not force the buffer draining
 1177          * on the writer to the file.  Most likely, the write would
 1178          * not need normal buffers.
 1179          */
 1180         mtx_lock(mtxp);
 1181         fp->f_vnread_flags |= FDEVFS_VNODE;
 1182         mtx_unlock(mtxp);
 1183         return (error);
 1184 }
 1185 
 1186 static int
 1187 devfs_pathconf(struct vop_pathconf_args *ap)
 1188 {
 1189 
 1190         switch (ap->a_name) {
 1191         case _PC_MAC_PRESENT:
 1192 #ifdef MAC
 1193                 /*
 1194                  * If MAC is enabled, devfs automatically supports
 1195                  * trivial non-persistant label storage.
 1196                  */
 1197                 *ap->a_retval = 1;
 1198 #else
 1199                 *ap->a_retval = 0;
 1200 #endif
 1201                 return (0);
 1202         default:
 1203                 return (vop_stdpathconf(ap));
 1204         }
 1205         /* NOTREACHED */
 1206 }
 1207 
 1208 /* ARGSUSED */
 1209 static int
 1210 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
 1211 {
 1212         struct cdev *dev;
 1213         struct cdevsw *dsw;
 1214         int error, ref;
 1215         struct file *fpop;
 1216 
 1217         fpop = td->td_fpop;
 1218         error = devfs_fp_check(fp, &dev, &dsw, &ref);
 1219         if (error != 0) {
 1220                 error = vnops.fo_poll(fp, events, cred, td);
 1221                 return (error);
 1222         }
 1223         error = dsw->d_poll(dev, events, td);
 1224         td->td_fpop = fpop;
 1225         dev_relthread(dev, ref);
 1226         return(error);
 1227 }
 1228 
 1229 /*
 1230  * Print out the contents of a special device vnode.
 1231  */
 1232 static int
 1233 devfs_print(struct vop_print_args *ap)
 1234 {
 1235 
 1236         printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
 1237         return (0);
 1238 }
 1239 
 1240 static int
 1241 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred,
 1242     int flags, struct thread *td)
 1243 {
 1244         struct cdev *dev;
 1245         int ioflag, error, ref;
 1246         ssize_t resid;
 1247         struct cdevsw *dsw;
 1248         struct file *fpop;
 1249 
 1250         if (uio->uio_resid > DEVFS_IOSIZE_MAX)
 1251                 return (EINVAL);
 1252         fpop = td->td_fpop;
 1253         error = devfs_fp_check(fp, &dev, &dsw, &ref);
 1254         if (error != 0) {
 1255                 error = vnops.fo_read(fp, uio, cred, flags, td);
 1256                 return (error);
 1257         }
 1258         resid = uio->uio_resid;
 1259         ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
 1260         if (ioflag & O_DIRECT)
 1261                 ioflag |= IO_DIRECT;
 1262 
 1263         foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
 1264         error = dsw->d_read(dev, uio, ioflag);
 1265         if (uio->uio_resid != resid || (error == 0 && resid != 0))
 1266                 devfs_timestamp(&dev->si_atime);
 1267         td->td_fpop = fpop;
 1268         dev_relthread(dev, ref);
 1269 
 1270         foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
 1271         return (error);
 1272 }
 1273 
 1274 static int
 1275 devfs_readdir(struct vop_readdir_args *ap)
 1276 {
 1277         int error;
 1278         struct uio *uio;
 1279         struct dirent *dp;
 1280         struct devfs_dirent *dd;
 1281         struct devfs_dirent *de;
 1282         struct devfs_mount *dmp;
 1283         off_t off;
 1284         int *tmp_ncookies = NULL;
 1285 
 1286         if (ap->a_vp->v_type != VDIR)
 1287                 return (ENOTDIR);
 1288 
 1289         uio = ap->a_uio;
 1290         if (uio->uio_offset < 0)
 1291                 return (EINVAL);
 1292 
 1293         /*
 1294          * XXX: This is a temporary hack to get around this filesystem not
 1295          * supporting cookies. We store the location of the ncookies pointer
 1296          * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
 1297          * and set the number of cookies to 0. We then set the pointer to
 1298          * NULL so that vfs_read_dirent doesn't try to call realloc() on 
 1299          * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
 1300          * pointer to its original location before returning to the caller.
 1301          */
 1302         if (ap->a_ncookies != NULL) {
 1303                 tmp_ncookies = ap->a_ncookies;
 1304                 *ap->a_ncookies = 0;
 1305                 ap->a_ncookies = NULL;
 1306         }
 1307 
 1308         dmp = VFSTODEVFS(ap->a_vp->v_mount);
 1309         if (devfs_populate_vp(ap->a_vp) != 0) {
 1310                 if (tmp_ncookies != NULL)
 1311                         ap->a_ncookies = tmp_ncookies;
 1312                 return (EIO);
 1313         }
 1314         error = 0;
 1315         de = ap->a_vp->v_data;
 1316         off = 0;
 1317         TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
 1318                 KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
 1319                 if (dd->de_flags & (DE_COVERED | DE_WHITEOUT))
 1320                         continue;
 1321                 if (devfs_prison_check(dd, uio->uio_td))
 1322                         continue;
 1323                 if (dd->de_dirent->d_type == DT_DIR)
 1324                         de = dd->de_dir;
 1325                 else
 1326                         de = dd;
 1327                 dp = dd->de_dirent;
 1328                 if (dp->d_reclen > uio->uio_resid)
 1329                         break;
 1330                 dp->d_fileno = de->de_inode;
 1331                 if (off >= uio->uio_offset) {
 1332                         error = vfs_read_dirent(ap, dp, off);
 1333                         if (error)
 1334                                 break;
 1335                 }
 1336                 off += dp->d_reclen;
 1337         }
 1338         sx_xunlock(&dmp->dm_lock);
 1339         uio->uio_offset = off;
 1340 
 1341         /*
 1342          * Restore ap->a_ncookies if it wasn't originally NULL in the first
 1343          * place.
 1344          */
 1345         if (tmp_ncookies != NULL)
 1346                 ap->a_ncookies = tmp_ncookies;
 1347 
 1348         return (error);
 1349 }
 1350 
 1351 static int
 1352 devfs_readlink(struct vop_readlink_args *ap)
 1353 {
 1354         struct devfs_dirent *de;
 1355 
 1356         de = ap->a_vp->v_data;
 1357         return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
 1358 }
 1359 
 1360 static int
 1361 devfs_reclaim(struct vop_reclaim_args *ap)
 1362 {
 1363         struct vnode *vp;
 1364         struct devfs_dirent *de;
 1365 
 1366         vp = ap->a_vp;
 1367         mtx_lock(&devfs_de_interlock);
 1368         de = vp->v_data;
 1369         if (de != NULL) {
 1370                 de->de_vnode = NULL;
 1371                 vp->v_data = NULL;
 1372         }
 1373         mtx_unlock(&devfs_de_interlock);
 1374         vnode_destroy_vobject(vp);
 1375         return (0);
 1376 }
 1377 
 1378 static int
 1379 devfs_reclaim_vchr(struct vop_reclaim_args *ap)
 1380 {
 1381         struct vnode *vp;
 1382         struct cdev *dev;
 1383 
 1384         vp = ap->a_vp;
 1385         MPASS(vp->v_type == VCHR);
 1386 
 1387         devfs_reclaim(ap);
 1388 
 1389         VI_LOCK(vp);
 1390         dev_lock();
 1391         dev = vp->v_rdev;
 1392         vp->v_rdev = NULL;
 1393         if (dev != NULL)
 1394                 dev->si_usecount -= vp->v_usecount;
 1395         dev_unlock();
 1396         VI_UNLOCK(vp);
 1397         if (dev != NULL)
 1398                 dev_rel(dev);
 1399         return (0);
 1400 }
 1401 
 1402 static int
 1403 devfs_remove(struct vop_remove_args *ap)
 1404 {
 1405         struct vnode *dvp = ap->a_dvp;
 1406         struct vnode *vp = ap->a_vp;
 1407         struct devfs_dirent *dd;
 1408         struct devfs_dirent *de, *de_covered;
 1409         struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
 1410 
 1411         ASSERT_VOP_ELOCKED(dvp, "devfs_remove");
 1412         ASSERT_VOP_ELOCKED(vp, "devfs_remove");
 1413 
 1414         sx_xlock(&dmp->dm_lock);
 1415         dd = ap->a_dvp->v_data;
 1416         de = vp->v_data;
 1417         if (de->de_cdp == NULL) {
 1418                 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
 1419                 if (de->de_dirent->d_type == DT_LNK) {
 1420                         de_covered = devfs_find(dd, de->de_dirent->d_name,
 1421                             de->de_dirent->d_namlen, 0);
 1422                         if (de_covered != NULL)
 1423                                 de_covered->de_flags &= ~DE_COVERED;
 1424                 }
 1425                 /* We need to unlock dvp because devfs_delete() may lock it. */
 1426                 VOP_UNLOCK(vp, 0);
 1427                 if (dvp != vp)
 1428                         VOP_UNLOCK(dvp, 0);
 1429                 devfs_delete(dmp, de, 0);
 1430                 sx_xunlock(&dmp->dm_lock);
 1431                 if (dvp != vp)
 1432                         vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
 1433                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 1434         } else {
 1435                 de->de_flags |= DE_WHITEOUT;
 1436                 sx_xunlock(&dmp->dm_lock);
 1437         }
 1438         return (0);
 1439 }
 1440 
 1441 /*
 1442  * Revoke is called on a tty when a terminal session ends.  The vnode
 1443  * is orphaned by setting v_op to deadfs so we need to let go of it
 1444  * as well so that we create a new one next time around.
 1445  *
 1446  */
 1447 static int
 1448 devfs_revoke(struct vop_revoke_args *ap)
 1449 {
 1450         struct vnode *vp = ap->a_vp, *vp2;
 1451         struct cdev *dev;
 1452         struct cdev_priv *cdp;
 1453         struct devfs_dirent *de;
 1454         u_int i;
 1455 
 1456         KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
 1457 
 1458         dev = vp->v_rdev;
 1459         cdp = cdev2priv(dev);
 1460  
 1461         dev_lock();
 1462         cdp->cdp_inuse++;
 1463         dev_unlock();
 1464 
 1465         vhold(vp);
 1466         vgone(vp);
 1467         vdrop(vp);
 1468 
 1469         VOP_UNLOCK(vp,0);
 1470  loop:
 1471         for (;;) {
 1472                 mtx_lock(&devfs_de_interlock);
 1473                 dev_lock();
 1474                 vp2 = NULL;
 1475                 for (i = 0; i <= cdp->cdp_maxdirent; i++) {
 1476                         de = cdp->cdp_dirents[i];
 1477                         if (de == NULL)
 1478                                 continue;
 1479 
 1480                         vp2 = de->de_vnode;
 1481                         if (vp2 != NULL) {
 1482                                 dev_unlock();
 1483                                 VI_LOCK(vp2);
 1484                                 mtx_unlock(&devfs_de_interlock);
 1485                                 if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
 1486                                     curthread))
 1487                                         goto loop;
 1488                                 vhold(vp2);
 1489                                 vgone(vp2);
 1490                                 vdrop(vp2);
 1491                                 vput(vp2);
 1492                                 break;
 1493                         } 
 1494                 }
 1495                 if (vp2 != NULL) {
 1496                         continue;
 1497                 }
 1498                 dev_unlock();
 1499                 mtx_unlock(&devfs_de_interlock);
 1500                 break;
 1501         }
 1502         dev_lock();
 1503         cdp->cdp_inuse--;
 1504         if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
 1505                 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
 1506                 dev_unlock();
 1507                 dev_rel(&cdp->cdp_c);
 1508         } else
 1509                 dev_unlock();
 1510 
 1511         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 1512         return (0);
 1513 }
 1514 
 1515 static int
 1516 devfs_rioctl(struct vop_ioctl_args *ap)
 1517 {
 1518         struct vnode *vp;
 1519         struct devfs_mount *dmp;
 1520         int error;
 1521 
 1522         vp = ap->a_vp;
 1523         vn_lock(vp, LK_SHARED | LK_RETRY);
 1524         if (vp->v_iflag & VI_DOOMED) {
 1525                 VOP_UNLOCK(vp, 0);
 1526                 return (EBADF);
 1527         }
 1528         dmp = VFSTODEVFS(vp->v_mount);
 1529         sx_xlock(&dmp->dm_lock);
 1530         VOP_UNLOCK(vp, 0);
 1531         DEVFS_DMP_HOLD(dmp);
 1532         devfs_populate(dmp);
 1533         if (DEVFS_DMP_DROP(dmp)) {
 1534                 sx_xunlock(&dmp->dm_lock);
 1535                 devfs_unmount_final(dmp);
 1536                 return (ENOENT);
 1537         }
 1538         error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
 1539         sx_xunlock(&dmp->dm_lock);
 1540         return (error);
 1541 }
 1542 
 1543 static int
 1544 devfs_rread(struct vop_read_args *ap)
 1545 {
 1546 
 1547         if (ap->a_vp->v_type != VDIR)
 1548                 return (EINVAL);
 1549         return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
 1550 }
 1551 
 1552 static int
 1553 devfs_setattr(struct vop_setattr_args *ap)
 1554 {
 1555         struct devfs_dirent *de;
 1556         struct vattr *vap;
 1557         struct vnode *vp;
 1558         struct thread *td;
 1559         int c, error;
 1560         uid_t uid;
 1561         gid_t gid;
 1562 
 1563         vap = ap->a_vap;
 1564         vp = ap->a_vp;
 1565         td = curthread;
 1566         if ((vap->va_type != VNON) ||
 1567             (vap->va_nlink != VNOVAL) ||
 1568             (vap->va_fsid != VNOVAL) ||
 1569             (vap->va_fileid != VNOVAL) ||
 1570             (vap->va_blocksize != VNOVAL) ||
 1571             (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
 1572             (vap->va_rdev != VNOVAL) ||
 1573             ((int)vap->va_bytes != VNOVAL) ||
 1574             (vap->va_gen != VNOVAL)) {
 1575                 return (EINVAL);
 1576         }
 1577 
 1578         error = devfs_populate_vp(vp);
 1579         if (error != 0)
 1580                 return (error);
 1581 
 1582         de = vp->v_data;
 1583         if (vp->v_type == VDIR)
 1584                 de = de->de_dir;
 1585 
 1586         c = 0;
 1587         if (vap->va_uid == (uid_t)VNOVAL)
 1588                 uid = de->de_uid;
 1589         else
 1590                 uid = vap->va_uid;
 1591         if (vap->va_gid == (gid_t)VNOVAL)
 1592                 gid = de->de_gid;
 1593         else
 1594                 gid = vap->va_gid;
 1595         if (uid != de->de_uid || gid != de->de_gid) {
 1596                 if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
 1597                     (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
 1598                         error = priv_check(td, PRIV_VFS_CHOWN);
 1599                         if (error != 0)
 1600                                 goto ret;
 1601                 }
 1602                 de->de_uid = uid;
 1603                 de->de_gid = gid;
 1604                 c = 1;
 1605         }
 1606 
 1607         if (vap->va_mode != (mode_t)VNOVAL) {
 1608                 if (ap->a_cred->cr_uid != de->de_uid) {
 1609                         error = priv_check(td, PRIV_VFS_ADMIN);
 1610                         if (error != 0)
 1611                                 goto ret;
 1612                 }
 1613                 de->de_mode = vap->va_mode;
 1614                 c = 1;
 1615         }
 1616 
 1617         if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
 1618                 error = vn_utimes_perm(vp, vap, ap->a_cred, td);
 1619                 if (error != 0)
 1620                         goto ret;
 1621                 if (vap->va_atime.tv_sec != VNOVAL) {
 1622                         if (vp->v_type == VCHR)
 1623                                 vp->v_rdev->si_atime = vap->va_atime;
 1624                         else
 1625                                 de->de_atime = vap->va_atime;
 1626                 }
 1627                 if (vap->va_mtime.tv_sec != VNOVAL) {
 1628                         if (vp->v_type == VCHR)
 1629                                 vp->v_rdev->si_mtime = vap->va_mtime;
 1630                         else
 1631                                 de->de_mtime = vap->va_mtime;
 1632                 }
 1633                 c = 1;
 1634         }
 1635 
 1636         if (c) {
 1637                 if (vp->v_type == VCHR)
 1638                         vfs_timestamp(&vp->v_rdev->si_ctime);
 1639                 else
 1640                         vfs_timestamp(&de->de_mtime);
 1641         }
 1642 
 1643 ret:
 1644         sx_xunlock(&VFSTODEVFS(vp->v_mount)->dm_lock);
 1645         return (error);
 1646 }
 1647 
 1648 #ifdef MAC
 1649 static int
 1650 devfs_setlabel(struct vop_setlabel_args *ap)
 1651 {
 1652         struct vnode *vp;
 1653         struct devfs_dirent *de;
 1654 
 1655         vp = ap->a_vp;
 1656         de = vp->v_data;
 1657 
 1658         mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
 1659         mac_devfs_update(vp->v_mount, de, vp);
 1660 
 1661         return (0);
 1662 }
 1663 #endif
 1664 
 1665 static int
 1666 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
 1667 {
 1668 
 1669         return (vnops.fo_stat(fp, sb, cred, td));
 1670 }
 1671 
 1672 static int
 1673 devfs_symlink(struct vop_symlink_args *ap)
 1674 {
 1675         int i, error;
 1676         struct devfs_dirent *dd;
 1677         struct devfs_dirent *de, *de_covered, *de_dotdot;
 1678         struct devfs_mount *dmp;
 1679 
 1680         error = priv_check(curthread, PRIV_DEVFS_SYMLINK);
 1681         if (error)
 1682                 return(error);
 1683         dmp = VFSTODEVFS(ap->a_dvp->v_mount);
 1684         if (devfs_populate_vp(ap->a_dvp) != 0)
 1685                 return (ENOENT);
 1686 
 1687         dd = ap->a_dvp->v_data;
 1688         de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
 1689         de->de_flags = DE_USER;
 1690         de->de_uid = 0;
 1691         de->de_gid = 0;
 1692         de->de_mode = 0755;
 1693         de->de_inode = alloc_unr(devfs_inos);
 1694         de->de_dir = dd;
 1695         de->de_dirent->d_type = DT_LNK;
 1696         i = strlen(ap->a_target) + 1;
 1697         de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
 1698         bcopy(ap->a_target, de->de_symlink, i);
 1699 #ifdef MAC
 1700         mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
 1701 #endif
 1702         de_covered = devfs_find(dd, de->de_dirent->d_name,
 1703             de->de_dirent->d_namlen, 0);
 1704         if (de_covered != NULL) {
 1705                 if ((de_covered->de_flags & DE_USER) != 0) {
 1706                         devfs_delete(dmp, de, DEVFS_DEL_NORECURSE);
 1707                         sx_xunlock(&dmp->dm_lock);
 1708                         return (EEXIST);
 1709                 }
 1710                 KASSERT((de_covered->de_flags & DE_COVERED) == 0,
 1711                     ("devfs_symlink: entry %p already covered", de_covered));
 1712                 de_covered->de_flags |= DE_COVERED;
 1713         }
 1714 
 1715         de_dotdot = TAILQ_FIRST(&dd->de_dlist);         /* "." */
 1716         de_dotdot = TAILQ_NEXT(de_dotdot, de_list);     /* ".." */
 1717         TAILQ_INSERT_AFTER(&dd->de_dlist, de_dotdot, de, de_list);
 1718         devfs_dir_ref_de(dmp, dd);
 1719         devfs_rules_apply(dmp, de);
 1720 
 1721         return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp));
 1722 }
 1723 
 1724 static int
 1725 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
 1726 {
 1727 
 1728         return (vnops.fo_truncate(fp, length, cred, td));
 1729 }
 1730 
 1731 static int
 1732 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred,
 1733     int flags, struct thread *td)
 1734 {
 1735         struct cdev *dev;
 1736         int error, ioflag, ref;
 1737         ssize_t resid;
 1738         struct cdevsw *dsw;
 1739         struct file *fpop;
 1740 
 1741         if (uio->uio_resid > DEVFS_IOSIZE_MAX)
 1742                 return (EINVAL);
 1743         fpop = td->td_fpop;
 1744         error = devfs_fp_check(fp, &dev, &dsw, &ref);
 1745         if (error != 0) {
 1746                 error = vnops.fo_write(fp, uio, cred, flags, td);
 1747                 return (error);
 1748         }
 1749         KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
 1750         ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
 1751         if (ioflag & O_DIRECT)
 1752                 ioflag |= IO_DIRECT;
 1753         foffset_lock_uio(fp, uio, flags | FOF_NOLOCK);
 1754 
 1755         resid = uio->uio_resid;
 1756 
 1757         error = dsw->d_write(dev, uio, ioflag);
 1758         if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
 1759                 devfs_timestamp(&dev->si_ctime);
 1760                 dev->si_mtime = dev->si_ctime;
 1761         }
 1762         td->td_fpop = fpop;
 1763         dev_relthread(dev, ref);
 1764 
 1765         foffset_unlock_uio(fp, uio, flags | FOF_NOLOCK | FOF_NEXTOFF);
 1766         return (error);
 1767 }
 1768 
 1769 static int
 1770 devfs_mmap_f(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
 1771     vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
 1772     struct thread *td)
 1773 {
 1774         struct cdev *dev;
 1775         struct cdevsw *dsw;
 1776         struct mount *mp;
 1777         struct vnode *vp;
 1778         struct file *fpop;
 1779         vm_object_t object;
 1780         vm_prot_t maxprot;
 1781         int error, ref;
 1782 
 1783         vp = fp->f_vnode;
 1784 
 1785         /*
 1786          * Ensure that file and memory protections are
 1787          * compatible.
 1788          */
 1789         mp = vp->v_mount;
 1790         if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0)
 1791                 maxprot = VM_PROT_NONE;
 1792         else
 1793                 maxprot = VM_PROT_EXECUTE;
 1794         if ((fp->f_flag & FREAD) != 0)
 1795                 maxprot |= VM_PROT_READ;
 1796         else if ((prot & VM_PROT_READ) != 0)
 1797                 return (EACCES);
 1798 
 1799         /*
 1800          * If we are sharing potential changes via MAP_SHARED and we
 1801          * are trying to get write permission although we opened it
 1802          * without asking for it, bail out.
 1803          *
 1804          * Note that most character devices always share mappings.
 1805          * The one exception is that D_MMAP_ANON devices
 1806          * (i.e. /dev/zero) permit private writable mappings.
 1807          *
 1808          * Rely on vm_mmap_cdev() to fail invalid MAP_PRIVATE requests
 1809          * as well as updating maxprot to permit writing for
 1810          * D_MMAP_ANON devices rather than doing that here.
 1811          */
 1812         if ((flags & MAP_SHARED) != 0) {
 1813                 if ((fp->f_flag & FWRITE) != 0)
 1814                         maxprot |= VM_PROT_WRITE;
 1815                 else if ((prot & VM_PROT_WRITE) != 0)
 1816                         return (EACCES);
 1817         }
 1818         maxprot &= cap_maxprot;
 1819 
 1820         fpop = td->td_fpop;
 1821         error = devfs_fp_check(fp, &dev, &dsw, &ref);
 1822         if (error != 0)
 1823                 return (error);
 1824 
 1825         error = vm_mmap_cdev(td, size, prot, &maxprot, &flags, dev, dsw, &foff,
 1826             &object);
 1827         td->td_fpop = fpop;
 1828         dev_relthread(dev, ref);
 1829         if (error != 0)
 1830                 return (error);
 1831 
 1832         error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
 1833             foff, FALSE, td);
 1834         if (error != 0)
 1835                 vm_object_deallocate(object);
 1836         return (error);
 1837 }
 1838 
 1839 dev_t
 1840 dev2udev(struct cdev *x)
 1841 {
 1842         if (x == NULL)
 1843                 return (NODEV);
 1844         return (cdev2priv(x)->cdp_inode);
 1845 }
 1846 
 1847 static struct fileops devfs_ops_f = {
 1848         .fo_read =      devfs_read_f,
 1849         .fo_write =     devfs_write_f,
 1850         .fo_truncate =  devfs_truncate_f,
 1851         .fo_ioctl =     devfs_ioctl_f,
 1852         .fo_poll =      devfs_poll_f,
 1853         .fo_kqfilter =  devfs_kqfilter_f,
 1854         .fo_stat =      devfs_stat_f,
 1855         .fo_close =     devfs_close_f,
 1856         .fo_chmod =     vn_chmod,
 1857         .fo_chown =     vn_chown,
 1858         .fo_sendfile =  vn_sendfile,
 1859         .fo_seek =      vn_seek,
 1860         .fo_fill_kinfo = vn_fill_kinfo,
 1861         .fo_mmap =      devfs_mmap_f,
 1862         .fo_flags =     DFLAG_PASSABLE | DFLAG_SEEKABLE
 1863 };
 1864 
 1865 static struct vop_vector devfs_vnodeops = {
 1866         .vop_default =          &default_vnodeops,
 1867 
 1868         .vop_access =           devfs_access,
 1869         .vop_getattr =          devfs_getattr,
 1870         .vop_ioctl =            devfs_rioctl,
 1871         .vop_lookup =           devfs_lookup,
 1872         .vop_mknod =            devfs_mknod,
 1873         .vop_pathconf =         devfs_pathconf,
 1874         .vop_read =             devfs_rread,
 1875         .vop_readdir =          devfs_readdir,
 1876         .vop_readlink =         devfs_readlink,
 1877         .vop_reclaim =          devfs_reclaim,
 1878         .vop_remove =           devfs_remove,
 1879         .vop_revoke =           devfs_revoke,
 1880         .vop_setattr =          devfs_setattr,
 1881 #ifdef MAC
 1882         .vop_setlabel =         devfs_setlabel,
 1883 #endif
 1884         .vop_symlink =          devfs_symlink,
 1885         .vop_vptocnp =          devfs_vptocnp,
 1886 };
 1887 
 1888 static struct vop_vector devfs_specops = {
 1889         .vop_default =          &default_vnodeops,
 1890 
 1891         .vop_access =           devfs_access,
 1892         .vop_bmap =             VOP_PANIC,
 1893         .vop_close =            devfs_close,
 1894         .vop_create =           VOP_PANIC,
 1895         .vop_fsync =            devfs_fsync,
 1896         .vop_getattr =          devfs_getattr,
 1897         .vop_link =             VOP_PANIC,
 1898         .vop_mkdir =            VOP_PANIC,
 1899         .vop_mknod =            VOP_PANIC,
 1900         .vop_open =             devfs_open,
 1901         .vop_pathconf =         devfs_pathconf,
 1902         .vop_poll =             dead_poll,
 1903         .vop_print =            devfs_print,
 1904         .vop_read =             dead_read,
 1905         .vop_readdir =          VOP_PANIC,
 1906         .vop_readlink =         VOP_PANIC,
 1907         .vop_reallocblks =      VOP_PANIC,
 1908         .vop_reclaim =          devfs_reclaim_vchr,
 1909         .vop_remove =           devfs_remove,
 1910         .vop_rename =           VOP_PANIC,
 1911         .vop_revoke =           devfs_revoke,
 1912         .vop_rmdir =            VOP_PANIC,
 1913         .vop_setattr =          devfs_setattr,
 1914 #ifdef MAC
 1915         .vop_setlabel =         devfs_setlabel,
 1916 #endif
 1917         .vop_strategy =         VOP_PANIC,
 1918         .vop_symlink =          VOP_PANIC,
 1919         .vop_vptocnp =          devfs_vptocnp,
 1920         .vop_write =            dead_write,
 1921 };
 1922 
 1923 /*
 1924  * Our calling convention to the device drivers used to be that we passed
 1925  * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_ 
 1926  * flags instead since that's what open(), close() and ioctl() takes and
 1927  * we don't really want vnode.h in device drivers.
 1928  * We solved the source compatibility by redefining some vnode flags to
 1929  * be the same as the fcntl ones and by sending down the bitwise OR of
 1930  * the respective fcntl/vnode flags.  These CTASSERTS make sure nobody
 1931  * pulls the rug out under this.
 1932  */
 1933 CTASSERT(O_NONBLOCK == IO_NDELAY);
 1934 CTASSERT(O_FSYNC == IO_SYNC);

Cache object: 8a93f9102112785f798f84419f168b61


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.