The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ufs/ffs/ffs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
    3  * All rights reserved.
    4  *
    5  * This software was developed for the FreeBSD Project by Marshall
    6  * Kirk McKusick and Network Associates Laboratories, the Security
    7  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
    8  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
    9  * research program
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  * Copyright (c) 1982, 1986, 1989, 1993
   33  *      The Regents of the University of California.  All rights reserved.
   34  *
   35  * Redistribution and use in source and binary forms, with or without
   36  * modification, are permitted provided that the following conditions
   37  * are met:
   38  * 1. Redistributions of source code must retain the above copyright
   39  *    notice, this list of conditions and the following disclaimer.
   40  * 2. Redistributions in binary form must reproduce the above copyright
   41  *    notice, this list of conditions and the following disclaimer in the
   42  *    documentation and/or other materials provided with the distribution.
   43  * 4. Neither the name of the University nor the names of its contributors
   44  *    may be used to endorse or promote products derived from this software
   45  *    without specific prior written permission.
   46  *
   47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   57  * SUCH DAMAGE.
   58  *
   59  *      from: @(#)ufs_readwrite.c       8.11 (Berkeley) 5/8/95
   60  * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
   61  *      @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
   62  */
   63 
   64 #include <sys/cdefs.h>
   65 __FBSDID("$FreeBSD: releng/10.0/sys/ufs/ffs/ffs_vnops.c 248521 2013-03-19 15:08:15Z kib $");
   66 
   67 #include <sys/param.h>
   68 #include <sys/bio.h>
   69 #include <sys/systm.h>
   70 #include <sys/buf.h>
   71 #include <sys/conf.h>
   72 #include <sys/extattr.h>
   73 #include <sys/kernel.h>
   74 #include <sys/limits.h>
   75 #include <sys/malloc.h>
   76 #include <sys/mount.h>
   77 #include <sys/priv.h>
   78 #include <sys/rwlock.h>
   79 #include <sys/stat.h>
   80 #include <sys/vmmeter.h>
   81 #include <sys/vnode.h>
   82 
   83 #include <vm/vm.h>
   84 #include <vm/vm_param.h>
   85 #include <vm/vm_extern.h>
   86 #include <vm/vm_object.h>
   87 #include <vm/vm_page.h>
   88 #include <vm/vm_pager.h>
   89 #include <vm/vnode_pager.h>
   90 
   91 #include <ufs/ufs/extattr.h>
   92 #include <ufs/ufs/quota.h>
   93 #include <ufs/ufs/inode.h>
   94 #include <ufs/ufs/ufs_extern.h>
   95 #include <ufs/ufs/ufsmount.h>
   96 
   97 #include <ufs/ffs/fs.h>
   98 #include <ufs/ffs/ffs_extern.h>
   99 #include "opt_directio.h"
  100 #include "opt_ffs.h"
  101 
  102 #ifdef DIRECTIO
  103 extern int      ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
  104 #endif
  105 static vop_fsync_t      ffs_fsync;
  106 static vop_lock1_t      ffs_lock;
  107 static vop_getpages_t   ffs_getpages;
  108 static vop_read_t       ffs_read;
  109 static vop_write_t      ffs_write;
  110 static int      ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
  111 static int      ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
  112                     struct ucred *cred);
  113 static vop_strategy_t   ffsext_strategy;
  114 static vop_closeextattr_t       ffs_closeextattr;
  115 static vop_deleteextattr_t      ffs_deleteextattr;
  116 static vop_getextattr_t ffs_getextattr;
  117 static vop_listextattr_t        ffs_listextattr;
  118 static vop_openextattr_t        ffs_openextattr;
  119 static vop_setextattr_t ffs_setextattr;
  120 static vop_vptofh_t     ffs_vptofh;
  121 
  122 
  123 /* Global vfs data structures for ufs. */
  124 struct vop_vector ffs_vnodeops1 = {
  125         .vop_default =          &ufs_vnodeops,
  126         .vop_fsync =            ffs_fsync,
  127         .vop_getpages =         ffs_getpages,
  128         .vop_lock1 =            ffs_lock,
  129         .vop_read =             ffs_read,
  130         .vop_reallocblks =      ffs_reallocblks,
  131         .vop_write =            ffs_write,
  132         .vop_vptofh =           ffs_vptofh,
  133 };
  134 
  135 struct vop_vector ffs_fifoops1 = {
  136         .vop_default =          &ufs_fifoops,
  137         .vop_fsync =            ffs_fsync,
  138         .vop_reallocblks =      ffs_reallocblks, /* XXX: really ??? */
  139         .vop_vptofh =           ffs_vptofh,
  140 };
  141 
  142 /* Global vfs data structures for ufs. */
  143 struct vop_vector ffs_vnodeops2 = {
  144         .vop_default =          &ufs_vnodeops,
  145         .vop_fsync =            ffs_fsync,
  146         .vop_getpages =         ffs_getpages,
  147         .vop_lock1 =            ffs_lock,
  148         .vop_read =             ffs_read,
  149         .vop_reallocblks =      ffs_reallocblks,
  150         .vop_write =            ffs_write,
  151         .vop_closeextattr =     ffs_closeextattr,
  152         .vop_deleteextattr =    ffs_deleteextattr,
  153         .vop_getextattr =       ffs_getextattr,
  154         .vop_listextattr =      ffs_listextattr,
  155         .vop_openextattr =      ffs_openextattr,
  156         .vop_setextattr =       ffs_setextattr,
  157         .vop_vptofh =           ffs_vptofh,
  158 };
  159 
  160 struct vop_vector ffs_fifoops2 = {
  161         .vop_default =          &ufs_fifoops,
  162         .vop_fsync =            ffs_fsync,
  163         .vop_lock1 =            ffs_lock,
  164         .vop_reallocblks =      ffs_reallocblks,
  165         .vop_strategy =         ffsext_strategy,
  166         .vop_closeextattr =     ffs_closeextattr,
  167         .vop_deleteextattr =    ffs_deleteextattr,
  168         .vop_getextattr =       ffs_getextattr,
  169         .vop_listextattr =      ffs_listextattr,
  170         .vop_openextattr =      ffs_openextattr,
  171         .vop_setextattr =       ffs_setextattr,
  172         .vop_vptofh =           ffs_vptofh,
  173 };
  174 
  175 /*
  176  * Synch an open file.
  177  */
  178 /* ARGSUSED */
  179 static int
  180 ffs_fsync(struct vop_fsync_args *ap)
  181 {
  182         struct vnode *vp;
  183         struct bufobj *bo;
  184         int error;
  185 
  186         vp = ap->a_vp;
  187         bo = &vp->v_bufobj;
  188 retry:
  189         error = ffs_syncvnode(vp, ap->a_waitfor, 0);
  190         if (error)
  191                 return (error);
  192         if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
  193                 error = softdep_fsync(vp);
  194                 if (error)
  195                         return (error);
  196 
  197                 /*
  198                  * The softdep_fsync() function may drop vp lock,
  199                  * allowing for dirty buffers to reappear on the
  200                  * bo_dirty list. Recheck and resync as needed.
  201                  */
  202                 BO_LOCK(bo);
  203                 if (vp->v_type == VREG && (bo->bo_numoutput > 0 ||
  204                     bo->bo_dirty.bv_cnt > 0)) {
  205                         BO_UNLOCK(bo);
  206                         goto retry;
  207                 }
  208                 BO_UNLOCK(bo);
  209         }
  210         return (0);
  211 }
  212 
  213 int
  214 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
  215 {
  216         struct inode *ip;
  217         struct bufobj *bo;
  218         struct buf *bp;
  219         struct buf *nbp;
  220         ufs_lbn_t lbn;
  221         int error, wait, passes;
  222 
  223         ip = VTOI(vp);
  224         ip->i_flag &= ~IN_NEEDSYNC;
  225         bo = &vp->v_bufobj;
  226 
  227         /*
  228          * When doing MNT_WAIT we must first flush all dependencies
  229          * on the inode.
  230          */
  231         if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
  232             (error = softdep_sync_metadata(vp)) != 0)
  233                 return (error);
  234 
  235         /*
  236          * Flush all dirty buffers associated with a vnode.
  237          */
  238         error = 0;
  239         passes = 0;
  240         wait = 0;       /* Always do an async pass first. */
  241         lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1));
  242         BO_LOCK(bo);
  243 loop:
  244         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
  245                 bp->b_vflags &= ~BV_SCANNED;
  246         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
  247                 /*
  248                  * Reasons to skip this buffer: it has already been considered
  249                  * on this pass, the buffer has dependencies that will cause
  250                  * it to be redirtied and it has not already been deferred,
  251                  * or it is already being written.
  252                  */
  253                 if ((bp->b_vflags & BV_SCANNED) != 0)
  254                         continue;
  255                 bp->b_vflags |= BV_SCANNED;
  256                 /* Flush indirects in order. */
  257                 if (waitfor == MNT_WAIT && bp->b_lblkno <= -NDADDR &&
  258                     lbn_level(bp->b_lblkno) >= passes)
  259                         continue;
  260                 if (bp->b_lblkno > lbn)
  261                         panic("ffs_syncvnode: syncing truncated data.");
  262                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
  263                         continue;
  264                 BO_UNLOCK(bo);
  265                 if ((bp->b_flags & B_DELWRI) == 0)
  266                         panic("ffs_fsync: not dirty");
  267                 /*
  268                  * Check for dependencies and potentially complete them.
  269                  */
  270                 if (!LIST_EMPTY(&bp->b_dep) &&
  271                     (error = softdep_sync_buf(vp, bp,
  272                     wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
  273                         /* I/O error. */
  274                         if (error != EBUSY) {
  275                                 BUF_UNLOCK(bp);
  276                                 return (error);
  277                         }
  278                         /* If we deferred once, don't defer again. */
  279                         if ((bp->b_flags & B_DEFERRED) == 0) {
  280                                 bp->b_flags |= B_DEFERRED;
  281                                 BUF_UNLOCK(bp);
  282                                 goto next;
  283                         }
  284                 }
  285                 if (wait) {
  286                         bremfree(bp);
  287                         if ((error = bwrite(bp)) != 0)
  288                                 return (error);
  289                 } else if ((bp->b_flags & B_CLUSTEROK)) {
  290                         (void) vfs_bio_awrite(bp);
  291                 } else {
  292                         bremfree(bp);
  293                         (void) bawrite(bp);
  294                 }
  295 next:
  296                 /*
  297                  * Since we may have slept during the I/O, we need
  298                  * to start from a known point.
  299                  */
  300                 BO_LOCK(bo);
  301                 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
  302         }
  303         if (waitfor != MNT_WAIT) {
  304                 BO_UNLOCK(bo);
  305                 if ((flags & NO_INO_UPDT) != 0)
  306                         return (0);
  307                 else
  308                         return (ffs_update(vp, 0));
  309         }
  310         /* Drain IO to see if we're done. */
  311         bufobj_wwait(bo, 0, 0);
  312         /*
  313          * Block devices associated with filesystems may have new I/O
  314          * requests posted for them even if the vnode is locked, so no
  315          * amount of trying will get them clean.  We make several passes
  316          * as a best effort.
  317          *
  318          * Regular files may need multiple passes to flush all dependency
  319          * work as it is possible that we must write once per indirect
  320          * level, once for the leaf, and once for the inode and each of
  321          * these will be done with one sync and one async pass.
  322          */
  323         if (bo->bo_dirty.bv_cnt > 0) {
  324                 /* Write the inode after sync passes to flush deps. */
  325                 if (wait && DOINGSOFTDEP(vp) && (flags & NO_INO_UPDT) == 0) {
  326                         BO_UNLOCK(bo);
  327                         ffs_update(vp, 1);
  328                         BO_LOCK(bo);
  329                 }
  330                 /* switch between sync/async. */
  331                 wait = !wait;
  332                 if (wait == 1 || ++passes < NIADDR + 2)
  333                         goto loop;
  334 #ifdef INVARIANTS
  335                 if (!vn_isdisk(vp, NULL))
  336                         vprint("ffs_fsync: dirty", vp);
  337 #endif
  338         }
  339         BO_UNLOCK(bo);
  340         error = 0;
  341         if ((flags & NO_INO_UPDT) == 0)
  342                 error = ffs_update(vp, 1);
  343         if (DOINGSUJ(vp))
  344                 softdep_journal_fsync(VTOI(vp));
  345         return (error);
  346 }
  347 
  348 static int
  349 ffs_lock(ap)
  350         struct vop_lock1_args /* {
  351                 struct vnode *a_vp;
  352                 int a_flags;
  353                 struct thread *a_td;
  354                 char *file;
  355                 int line;
  356         } */ *ap;
  357 {
  358 #ifndef NO_FFS_SNAPSHOT
  359         struct vnode *vp;
  360         int flags;
  361         struct lock *lkp;
  362         int result;
  363 
  364         switch (ap->a_flags & LK_TYPE_MASK) {
  365         case LK_SHARED:
  366         case LK_UPGRADE:
  367         case LK_EXCLUSIVE:
  368                 vp = ap->a_vp;
  369                 flags = ap->a_flags;
  370                 for (;;) {
  371 #ifdef DEBUG_VFS_LOCKS
  372                         KASSERT(vp->v_holdcnt != 0,
  373                             ("ffs_lock %p: zero hold count", vp));
  374 #endif
  375                         lkp = vp->v_vnlock;
  376                         result = _lockmgr_args(lkp, flags, VI_MTX(vp),
  377                             LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
  378                             ap->a_file, ap->a_line);
  379                         if (lkp == vp->v_vnlock || result != 0)
  380                                 break;
  381                         /*
  382                          * Apparent success, except that the vnode
  383                          * mutated between snapshot file vnode and
  384                          * regular file vnode while this process
  385                          * slept.  The lock currently held is not the
  386                          * right lock.  Release it, and try to get the
  387                          * new lock.
  388                          */
  389                         (void) _lockmgr_args(lkp, LK_RELEASE, NULL,
  390                             LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
  391                             ap->a_file, ap->a_line);
  392                         if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
  393                             (LK_INTERLOCK | LK_NOWAIT))
  394                                 return (EBUSY);
  395                         if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
  396                                 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
  397                         flags &= ~LK_INTERLOCK;
  398                 }
  399                 break;
  400         default:
  401                 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
  402         }
  403         return (result);
  404 #else
  405         return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
  406 #endif
  407 }
  408 
  409 /*
  410  * Vnode op for reading.
  411  */
  412 static int
  413 ffs_read(ap)
  414         struct vop_read_args /* {
  415                 struct vnode *a_vp;
  416                 struct uio *a_uio;
  417                 int a_ioflag;
  418                 struct ucred *a_cred;
  419         } */ *ap;
  420 {
  421         struct vnode *vp;
  422         struct inode *ip;
  423         struct uio *uio;
  424         struct fs *fs;
  425         struct buf *bp;
  426         ufs_lbn_t lbn, nextlbn;
  427         off_t bytesinfile;
  428         long size, xfersize, blkoffset;
  429         ssize_t orig_resid;
  430         int error;
  431         int seqcount;
  432         int ioflag;
  433 
  434         vp = ap->a_vp;
  435         uio = ap->a_uio;
  436         ioflag = ap->a_ioflag;
  437         if (ap->a_ioflag & IO_EXT)
  438 #ifdef notyet
  439                 return (ffs_extread(vp, uio, ioflag));
  440 #else
  441                 panic("ffs_read+IO_EXT");
  442 #endif
  443 #ifdef DIRECTIO
  444         if ((ioflag & IO_DIRECT) != 0) {
  445                 int workdone;
  446 
  447                 error = ffs_rawread(vp, uio, &workdone);
  448                 if (error != 0 || workdone != 0)
  449                         return error;
  450         }
  451 #endif
  452 
  453         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  454         ip = VTOI(vp);
  455 
  456 #ifdef INVARIANTS
  457         if (uio->uio_rw != UIO_READ)
  458                 panic("ffs_read: mode");
  459 
  460         if (vp->v_type == VLNK) {
  461                 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
  462                         panic("ffs_read: short symlink");
  463         } else if (vp->v_type != VREG && vp->v_type != VDIR)
  464                 panic("ffs_read: type %d",  vp->v_type);
  465 #endif
  466         orig_resid = uio->uio_resid;
  467         KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
  468         if (orig_resid == 0)
  469                 return (0);
  470         KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
  471         fs = ip->i_fs;
  472         if (uio->uio_offset < ip->i_size &&
  473             uio->uio_offset >= fs->fs_maxfilesize)
  474                 return (EOVERFLOW);
  475 
  476         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  477                 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
  478                         break;
  479                 lbn = lblkno(fs, uio->uio_offset);
  480                 nextlbn = lbn + 1;
  481 
  482                 /*
  483                  * size of buffer.  The buffer representing the
  484                  * end of the file is rounded up to the size of
  485                  * the block type ( fragment or full block,
  486                  * depending ).
  487                  */
  488                 size = blksize(fs, ip, lbn);
  489                 blkoffset = blkoff(fs, uio->uio_offset);
  490 
  491                 /*
  492                  * The amount we want to transfer in this iteration is
  493                  * one FS block less the amount of the data before
  494                  * our startpoint (duh!)
  495                  */
  496                 xfersize = fs->fs_bsize - blkoffset;
  497 
  498                 /*
  499                  * But if we actually want less than the block,
  500                  * or the file doesn't have a whole block more of data,
  501                  * then use the lesser number.
  502                  */
  503                 if (uio->uio_resid < xfersize)
  504                         xfersize = uio->uio_resid;
  505                 if (bytesinfile < xfersize)
  506                         xfersize = bytesinfile;
  507 
  508                 if (lblktosize(fs, nextlbn) >= ip->i_size) {
  509                         /*
  510                          * Don't do readahead if this is the end of the file.
  511                          */
  512                         error = bread_gb(vp, lbn, size, NOCRED,
  513                             GB_UNMAPPED, &bp);
  514                 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
  515                         /*
  516                          * Otherwise if we are allowed to cluster,
  517                          * grab as much as we can.
  518                          *
  519                          * XXX  This may not be a win if we are not
  520                          * doing sequential access.
  521                          */
  522                         error = cluster_read(vp, ip->i_size, lbn,
  523                             size, NOCRED, blkoffset + uio->uio_resid,
  524                             seqcount, GB_UNMAPPED, &bp);
  525                 } else if (seqcount > 1) {
  526                         /*
  527                          * If we are NOT allowed to cluster, then
  528                          * if we appear to be acting sequentially,
  529                          * fire off a request for a readahead
  530                          * as well as a read. Note that the 4th and 5th
  531                          * arguments point to arrays of the size specified in
  532                          * the 6th argument.
  533                          */
  534                         int nextsize = blksize(fs, ip, nextlbn);
  535                         error = breadn_flags(vp, lbn, size, &nextlbn,
  536                             &nextsize, 1, NOCRED, GB_UNMAPPED, &bp);
  537                 } else {
  538                         /*
  539                          * Failing all of the above, just read what the
  540                          * user asked for. Interestingly, the same as
  541                          * the first option above.
  542                          */
  543                         error = bread_gb(vp, lbn, size, NOCRED,
  544                             GB_UNMAPPED, &bp);
  545                 }
  546                 if (error) {
  547                         brelse(bp);
  548                         bp = NULL;
  549                         break;
  550                 }
  551 
  552                 /*
  553                  * If IO_DIRECT then set B_DIRECT for the buffer.  This
  554                  * will cause us to attempt to release the buffer later on
  555                  * and will cause the buffer cache to attempt to free the
  556                  * underlying pages.
  557                  */
  558                 if (ioflag & IO_DIRECT)
  559                         bp->b_flags |= B_DIRECT;
  560 
  561                 /*
  562                  * We should only get non-zero b_resid when an I/O error
  563                  * has occurred, which should cause us to break above.
  564                  * However, if the short read did not cause an error,
  565                  * then we want to ensure that we do not uiomove bad
  566                  * or uninitialized data.
  567                  */
  568                 size -= bp->b_resid;
  569                 if (size < xfersize) {
  570                         if (size == 0)
  571                                 break;
  572                         xfersize = size;
  573                 }
  574 
  575                 if ((bp->b_flags & B_UNMAPPED) == 0) {
  576                         error = vn_io_fault_uiomove((char *)bp->b_data +
  577                             blkoffset, (int)xfersize, uio);
  578                 } else {
  579                         error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
  580                             (int)xfersize, uio);
  581                 }
  582                 if (error)
  583                         break;
  584 
  585                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
  586                    (LIST_EMPTY(&bp->b_dep))) {
  587                         /*
  588                          * If there are no dependencies, and it's VMIO,
  589                          * then we don't need the buf, mark it available
  590                          * for freeing.  For non-direct VMIO reads, the VM
  591                          * has the data.
  592                          */
  593                         bp->b_flags |= B_RELBUF;
  594                         brelse(bp);
  595                 } else {
  596                         /*
  597                          * Otherwise let whoever
  598                          * made the request take care of
  599                          * freeing it. We just queue
  600                          * it onto another list.
  601                          */
  602                         bqrelse(bp);
  603                 }
  604         }
  605 
  606         /*
  607          * This can only happen in the case of an error
  608          * because the loop above resets bp to NULL on each iteration
  609          * and on normal completion has not set a new value into it.
  610          * so it must have come from a 'break' statement
  611          */
  612         if (bp != NULL) {
  613                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
  614                    (LIST_EMPTY(&bp->b_dep))) {
  615                         bp->b_flags |= B_RELBUF;
  616                         brelse(bp);
  617                 } else {
  618                         bqrelse(bp);
  619                 }
  620         }
  621 
  622         if ((error == 0 || uio->uio_resid != orig_resid) &&
  623             (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 &&
  624             (ip->i_flag & IN_ACCESS) == 0) {
  625                 VI_LOCK(vp);
  626                 ip->i_flag |= IN_ACCESS;
  627                 VI_UNLOCK(vp);
  628         }
  629         return (error);
  630 }
  631 
  632 /*
  633  * Vnode op for writing.
  634  */
  635 static int
  636 ffs_write(ap)
  637         struct vop_write_args /* {
  638                 struct vnode *a_vp;
  639                 struct uio *a_uio;
  640                 int a_ioflag;
  641                 struct ucred *a_cred;
  642         } */ *ap;
  643 {
  644         struct vnode *vp;
  645         struct uio *uio;
  646         struct inode *ip;
  647         struct fs *fs;
  648         struct buf *bp;
  649         ufs_lbn_t lbn;
  650         off_t osize;
  651         ssize_t resid;
  652         int seqcount;
  653         int blkoffset, error, flags, ioflag, size, xfersize;
  654 
  655         vp = ap->a_vp;
  656         uio = ap->a_uio;
  657         ioflag = ap->a_ioflag;
  658         if (ap->a_ioflag & IO_EXT)
  659 #ifdef notyet
  660                 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
  661 #else
  662                 panic("ffs_write+IO_EXT");
  663 #endif
  664 
  665         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  666         ip = VTOI(vp);
  667 
  668 #ifdef INVARIANTS
  669         if (uio->uio_rw != UIO_WRITE)
  670                 panic("ffs_write: mode");
  671 #endif
  672 
  673         switch (vp->v_type) {
  674         case VREG:
  675                 if (ioflag & IO_APPEND)
  676                         uio->uio_offset = ip->i_size;
  677                 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
  678                         return (EPERM);
  679                 /* FALLTHROUGH */
  680         case VLNK:
  681                 break;
  682         case VDIR:
  683                 panic("ffs_write: dir write");
  684                 break;
  685         default:
  686                 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
  687                         (int)uio->uio_offset,
  688                         (int)uio->uio_resid
  689                 );
  690         }
  691 
  692         KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
  693         KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
  694         fs = ip->i_fs;
  695         if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
  696                 return (EFBIG);
  697         /*
  698          * Maybe this should be above the vnode op call, but so long as
  699          * file servers have no limits, I don't think it matters.
  700          */
  701         if (vn_rlimit_fsize(vp, uio, uio->uio_td))
  702                 return (EFBIG);
  703 
  704         resid = uio->uio_resid;
  705         osize = ip->i_size;
  706         if (seqcount > BA_SEQMAX)
  707                 flags = BA_SEQMAX << BA_SEQSHIFT;
  708         else
  709                 flags = seqcount << BA_SEQSHIFT;
  710         if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
  711                 flags |= IO_SYNC;
  712         flags |= BA_UNMAPPED;
  713 
  714         for (error = 0; uio->uio_resid > 0;) {
  715                 lbn = lblkno(fs, uio->uio_offset);
  716                 blkoffset = blkoff(fs, uio->uio_offset);
  717                 xfersize = fs->fs_bsize - blkoffset;
  718                 if (uio->uio_resid < xfersize)
  719                         xfersize = uio->uio_resid;
  720                 if (uio->uio_offset + xfersize > ip->i_size)
  721                         vnode_pager_setsize(vp, uio->uio_offset + xfersize);
  722 
  723                 /*
  724                  * We must perform a read-before-write if the transfer size
  725                  * does not cover the entire buffer.
  726                  */
  727                 if (fs->fs_bsize > xfersize)
  728                         flags |= BA_CLRBUF;
  729                 else
  730                         flags &= ~BA_CLRBUF;
  731 /* XXX is uio->uio_offset the right thing here? */
  732                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
  733                     ap->a_cred, flags, &bp);
  734                 if (error != 0) {
  735                         vnode_pager_setsize(vp, ip->i_size);
  736                         break;
  737                 }
  738                 if (ioflag & IO_DIRECT)
  739                         bp->b_flags |= B_DIRECT;
  740                 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
  741                         bp->b_flags |= B_NOCACHE;
  742 
  743                 if (uio->uio_offset + xfersize > ip->i_size) {
  744                         ip->i_size = uio->uio_offset + xfersize;
  745                         DIP_SET(ip, i_size, ip->i_size);
  746                 }
  747 
  748                 size = blksize(fs, ip, lbn) - bp->b_resid;
  749                 if (size < xfersize)
  750                         xfersize = size;
  751 
  752                 if ((bp->b_flags & B_UNMAPPED) == 0) {
  753                         error = vn_io_fault_uiomove((char *)bp->b_data +
  754                             blkoffset, (int)xfersize, uio);
  755                 } else {
  756                         error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
  757                             (int)xfersize, uio);
  758                 }
  759                 /*
  760                  * If the buffer is not already filled and we encounter an
  761                  * error while trying to fill it, we have to clear out any
  762                  * garbage data from the pages instantiated for the buffer.
  763                  * If we do not, a failed uiomove() during a write can leave
  764                  * the prior contents of the pages exposed to a userland mmap.
  765                  *
  766                  * Note that we need only clear buffers with a transfer size
  767                  * equal to the block size because buffers with a shorter
  768                  * transfer size were cleared above by the call to UFS_BALLOC()
  769                  * with the BA_CLRBUF flag set.
  770                  *
  771                  * If the source region for uiomove identically mmaps the
  772                  * buffer, uiomove() performed the NOP copy, and the buffer
  773                  * content remains valid because the page fault handler
  774                  * validated the pages.
  775                  */
  776                 if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
  777                     fs->fs_bsize == xfersize)
  778                         vfs_bio_clrbuf(bp);
  779                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
  780                    (LIST_EMPTY(&bp->b_dep))) {
  781                         bp->b_flags |= B_RELBUF;
  782                 }
  783 
  784                 /*
  785                  * If IO_SYNC each buffer is written synchronously.  Otherwise
  786                  * if we have a severe page deficiency write the buffer
  787                  * asynchronously.  Otherwise try to cluster, and if that
  788                  * doesn't do it then either do an async write (if O_DIRECT),
  789                  * or a delayed write (if not).
  790                  */
  791                 if (ioflag & IO_SYNC) {
  792                         (void)bwrite(bp);
  793                 } else if (vm_page_count_severe() ||
  794                             buf_dirty_count_severe() ||
  795                             (ioflag & IO_ASYNC)) {
  796                         bp->b_flags |= B_CLUSTEROK;
  797                         bawrite(bp);
  798                 } else if (xfersize + blkoffset == fs->fs_bsize) {
  799                         if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
  800                                 bp->b_flags |= B_CLUSTEROK;
  801                                 cluster_write(vp, bp, ip->i_size, seqcount,
  802                                     GB_UNMAPPED);
  803                         } else {
  804                                 bawrite(bp);
  805                         }
  806                 } else if (ioflag & IO_DIRECT) {
  807                         bp->b_flags |= B_CLUSTEROK;
  808                         bawrite(bp);
  809                 } else {
  810                         bp->b_flags |= B_CLUSTEROK;
  811                         bdwrite(bp);
  812                 }
  813                 if (error || xfersize == 0)
  814                         break;
  815                 ip->i_flag |= IN_CHANGE | IN_UPDATE;
  816         }
  817         /*
  818          * If we successfully wrote any data, and we are not the superuser
  819          * we clear the setuid and setgid bits as a precaution against
  820          * tampering.
  821          */
  822         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
  823             ap->a_cred) {
  824                 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
  825                         ip->i_mode &= ~(ISUID | ISGID);
  826                         DIP_SET(ip, i_mode, ip->i_mode);
  827                 }
  828         }
  829         if (error) {
  830                 if (ioflag & IO_UNIT) {
  831                         (void)ffs_truncate(vp, osize,
  832                             IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
  833                         uio->uio_offset -= resid - uio->uio_resid;
  834                         uio->uio_resid = resid;
  835                 }
  836         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
  837                 error = ffs_update(vp, 1);
  838         return (error);
  839 }
  840 
  841 /*
  842  * get page routine
  843  */
  844 static int
  845 ffs_getpages(ap)
  846         struct vop_getpages_args *ap;
  847 {
  848         int i;
  849         vm_page_t mreq;
  850         int pcount;
  851 
  852         pcount = round_page(ap->a_count) / PAGE_SIZE;
  853         mreq = ap->a_m[ap->a_reqpage];
  854 
  855         /*
  856          * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
  857          * then the entire page is valid.  Since the page may be mapped,
  858          * user programs might reference data beyond the actual end of file
  859          * occuring within the page.  We have to zero that data.
  860          */
  861         VM_OBJECT_WLOCK(mreq->object);
  862         if (mreq->valid) {
  863                 if (mreq->valid != VM_PAGE_BITS_ALL)
  864                         vm_page_zero_invalid(mreq, TRUE);
  865                 for (i = 0; i < pcount; i++) {
  866                         if (i != ap->a_reqpage) {
  867                                 vm_page_lock(ap->a_m[i]);
  868                                 vm_page_free(ap->a_m[i]);
  869                                 vm_page_unlock(ap->a_m[i]);
  870                         }
  871                 }
  872                 VM_OBJECT_WUNLOCK(mreq->object);
  873                 return VM_PAGER_OK;
  874         }
  875         VM_OBJECT_WUNLOCK(mreq->object);
  876 
  877         return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
  878                                             ap->a_count,
  879                                             ap->a_reqpage);
  880 }
  881 
  882 
  883 /*
  884  * Extended attribute area reading.
  885  */
  886 static int
  887 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
  888 {
  889         struct inode *ip;
  890         struct ufs2_dinode *dp;
  891         struct fs *fs;
  892         struct buf *bp;
  893         ufs_lbn_t lbn, nextlbn;
  894         off_t bytesinfile;
  895         long size, xfersize, blkoffset;
  896         ssize_t orig_resid;
  897         int error;
  898 
  899         ip = VTOI(vp);
  900         fs = ip->i_fs;
  901         dp = ip->i_din2;
  902 
  903 #ifdef INVARIANTS
  904         if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
  905                 panic("ffs_extread: mode");
  906 
  907 #endif
  908         orig_resid = uio->uio_resid;
  909         KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
  910         if (orig_resid == 0)
  911                 return (0);
  912         KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
  913 
  914         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  915                 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
  916                         break;
  917                 lbn = lblkno(fs, uio->uio_offset);
  918                 nextlbn = lbn + 1;
  919 
  920                 /*
  921                  * size of buffer.  The buffer representing the
  922                  * end of the file is rounded up to the size of
  923                  * the block type ( fragment or full block,
  924                  * depending ).
  925                  */
  926                 size = sblksize(fs, dp->di_extsize, lbn);
  927                 blkoffset = blkoff(fs, uio->uio_offset);
  928 
  929                 /*
  930                  * The amount we want to transfer in this iteration is
  931                  * one FS block less the amount of the data before
  932                  * our startpoint (duh!)
  933                  */
  934                 xfersize = fs->fs_bsize - blkoffset;
  935 
  936                 /*
  937                  * But if we actually want less than the block,
  938                  * or the file doesn't have a whole block more of data,
  939                  * then use the lesser number.
  940                  */
  941                 if (uio->uio_resid < xfersize)
  942                         xfersize = uio->uio_resid;
  943                 if (bytesinfile < xfersize)
  944                         xfersize = bytesinfile;
  945 
  946                 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
  947                         /*
  948                          * Don't do readahead if this is the end of the info.
  949                          */
  950                         error = bread(vp, -1 - lbn, size, NOCRED, &bp);
  951                 } else {
  952                         /*
  953                          * If we have a second block, then
  954                          * fire off a request for a readahead
  955                          * as well as a read. Note that the 4th and 5th
  956                          * arguments point to arrays of the size specified in
  957                          * the 6th argument.
  958                          */
  959                         int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
  960 
  961                         nextlbn = -1 - nextlbn;
  962                         error = breadn(vp, -1 - lbn,
  963                             size, &nextlbn, &nextsize, 1, NOCRED, &bp);
  964                 }
  965                 if (error) {
  966                         brelse(bp);
  967                         bp = NULL;
  968                         break;
  969                 }
  970 
  971                 /*
  972                  * If IO_DIRECT then set B_DIRECT for the buffer.  This
  973                  * will cause us to attempt to release the buffer later on
  974                  * and will cause the buffer cache to attempt to free the
  975                  * underlying pages.
  976                  */
  977                 if (ioflag & IO_DIRECT)
  978                         bp->b_flags |= B_DIRECT;
  979 
  980                 /*
  981                  * We should only get non-zero b_resid when an I/O error
  982                  * has occurred, which should cause us to break above.
  983                  * However, if the short read did not cause an error,
  984                  * then we want to ensure that we do not uiomove bad
  985                  * or uninitialized data.
  986                  */
  987                 size -= bp->b_resid;
  988                 if (size < xfersize) {
  989                         if (size == 0)
  990                                 break;
  991                         xfersize = size;
  992                 }
  993 
  994                 error = uiomove((char *)bp->b_data + blkoffset,
  995                                         (int)xfersize, uio);
  996                 if (error)
  997                         break;
  998 
  999                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
 1000                    (LIST_EMPTY(&bp->b_dep))) {
 1001                         /*
 1002                          * If there are no dependencies, and it's VMIO,
 1003                          * then we don't need the buf, mark it available
 1004                          * for freeing.  For non-direct VMIO reads, the VM
 1005                          * has the data.
 1006                          */
 1007                         bp->b_flags |= B_RELBUF;
 1008                         brelse(bp);
 1009                 } else {
 1010                         /*
 1011                          * Otherwise let whoever
 1012                          * made the request take care of
 1013                          * freeing it. We just queue
 1014                          * it onto another list.
 1015                          */
 1016                         bqrelse(bp);
 1017                 }
 1018         }
 1019 
 1020         /*
 1021          * This can only happen in the case of an error
 1022          * because the loop above resets bp to NULL on each iteration
 1023          * and on normal completion has not set a new value into it.
 1024          * so it must have come from a 'break' statement
 1025          */
 1026         if (bp != NULL) {
 1027                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
 1028                    (LIST_EMPTY(&bp->b_dep))) {
 1029                         bp->b_flags |= B_RELBUF;
 1030                         brelse(bp);
 1031                 } else {
 1032                         bqrelse(bp);
 1033                 }
 1034         }
 1035         return (error);
 1036 }
 1037 
 1038 /*
 1039  * Extended attribute area writing.
 1040  */
 1041 static int
 1042 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
 1043 {
 1044         struct inode *ip;
 1045         struct ufs2_dinode *dp;
 1046         struct fs *fs;
 1047         struct buf *bp;
 1048         ufs_lbn_t lbn;
 1049         off_t osize;
 1050         ssize_t resid;
 1051         int blkoffset, error, flags, size, xfersize;
 1052 
 1053         ip = VTOI(vp);
 1054         fs = ip->i_fs;
 1055         dp = ip->i_din2;
 1056 
 1057 #ifdef INVARIANTS
 1058         if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
 1059                 panic("ffs_extwrite: mode");
 1060 #endif
 1061 
 1062         if (ioflag & IO_APPEND)
 1063                 uio->uio_offset = dp->di_extsize;
 1064         KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
 1065         KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
 1066         if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
 1067                 return (EFBIG);
 1068 
 1069         resid = uio->uio_resid;
 1070         osize = dp->di_extsize;
 1071         flags = IO_EXT;
 1072         if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
 1073                 flags |= IO_SYNC;
 1074 
 1075         for (error = 0; uio->uio_resid > 0;) {
 1076                 lbn = lblkno(fs, uio->uio_offset);
 1077                 blkoffset = blkoff(fs, uio->uio_offset);
 1078                 xfersize = fs->fs_bsize - blkoffset;
 1079                 if (uio->uio_resid < xfersize)
 1080                         xfersize = uio->uio_resid;
 1081 
 1082                 /*
 1083                  * We must perform a read-before-write if the transfer size
 1084                  * does not cover the entire buffer.
 1085                  */
 1086                 if (fs->fs_bsize > xfersize)
 1087                         flags |= BA_CLRBUF;
 1088                 else
 1089                         flags &= ~BA_CLRBUF;
 1090                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
 1091                     ucred, flags, &bp);
 1092                 if (error != 0)
 1093                         break;
 1094                 /*
 1095                  * If the buffer is not valid we have to clear out any
 1096                  * garbage data from the pages instantiated for the buffer.
 1097                  * If we do not, a failed uiomove() during a write can leave
 1098                  * the prior contents of the pages exposed to a userland
 1099                  * mmap().  XXX deal with uiomove() errors a better way.
 1100                  */
 1101                 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
 1102                         vfs_bio_clrbuf(bp);
 1103                 if (ioflag & IO_DIRECT)
 1104                         bp->b_flags |= B_DIRECT;
 1105 
 1106                 if (uio->uio_offset + xfersize > dp->di_extsize)
 1107                         dp->di_extsize = uio->uio_offset + xfersize;
 1108 
 1109                 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
 1110                 if (size < xfersize)
 1111                         xfersize = size;
 1112 
 1113                 error =
 1114                     uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
 1115                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
 1116                    (LIST_EMPTY(&bp->b_dep))) {
 1117                         bp->b_flags |= B_RELBUF;
 1118                 }
 1119 
 1120                 /*
 1121                  * If IO_SYNC each buffer is written synchronously.  Otherwise
 1122                  * if we have a severe page deficiency write the buffer
 1123                  * asynchronously.  Otherwise try to cluster, and if that
 1124                  * doesn't do it then either do an async write (if O_DIRECT),
 1125                  * or a delayed write (if not).
 1126                  */
 1127                 if (ioflag & IO_SYNC) {
 1128                         (void)bwrite(bp);
 1129                 } else if (vm_page_count_severe() ||
 1130                             buf_dirty_count_severe() ||
 1131                             xfersize + blkoffset == fs->fs_bsize ||
 1132                             (ioflag & (IO_ASYNC | IO_DIRECT)))
 1133                         bawrite(bp);
 1134                 else
 1135                         bdwrite(bp);
 1136                 if (error || xfersize == 0)
 1137                         break;
 1138                 ip->i_flag |= IN_CHANGE;
 1139         }
 1140         /*
 1141          * If we successfully wrote any data, and we are not the superuser
 1142          * we clear the setuid and setgid bits as a precaution against
 1143          * tampering.
 1144          */
 1145         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
 1146                 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
 1147                         ip->i_mode &= ~(ISUID | ISGID);
 1148                         dp->di_mode = ip->i_mode;
 1149                 }
 1150         }
 1151         if (error) {
 1152                 if (ioflag & IO_UNIT) {
 1153                         (void)ffs_truncate(vp, osize,
 1154                             IO_EXT | (ioflag&IO_SYNC), ucred);
 1155                         uio->uio_offset -= resid - uio->uio_resid;
 1156                         uio->uio_resid = resid;
 1157                 }
 1158         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
 1159                 error = ffs_update(vp, 1);
 1160         return (error);
 1161 }
 1162 
 1163 
 1164 /*
 1165  * Vnode operating to retrieve a named extended attribute.
 1166  *
 1167  * Locate a particular EA (nspace:name) in the area (ptr:length), and return
 1168  * the length of the EA, and possibly the pointer to the entry and to the data.
 1169  */
 1170 static int
 1171 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
 1172 {
 1173         u_char *p, *pe, *pn, *p0;
 1174         int eapad1, eapad2, ealength, ealen, nlen;
 1175         uint32_t ul;
 1176 
 1177         pe = ptr + length;
 1178         nlen = strlen(name);
 1179 
 1180         for (p = ptr; p < pe; p = pn) {
 1181                 p0 = p;
 1182                 bcopy(p, &ul, sizeof(ul));
 1183                 pn = p + ul;
 1184                 /* make sure this entry is complete */
 1185                 if (pn > pe)
 1186                         break;
 1187                 p += sizeof(uint32_t);
 1188                 if (*p != nspace)
 1189                         continue;
 1190                 p++;
 1191                 eapad2 = *p++;
 1192                 if (*p != nlen)
 1193                         continue;
 1194                 p++;
 1195                 if (bcmp(p, name, nlen))
 1196                         continue;
 1197                 ealength = sizeof(uint32_t) + 3 + nlen;
 1198                 eapad1 = 8 - (ealength % 8);
 1199                 if (eapad1 == 8)
 1200                         eapad1 = 0;
 1201                 ealength += eapad1;
 1202                 ealen = ul - ealength - eapad2;
 1203                 p += nlen + eapad1;
 1204                 if (eap != NULL)
 1205                         *eap = p0;
 1206                 if (eac != NULL)
 1207                         *eac = p;
 1208                 return (ealen);
 1209         }
 1210         return(-1);
 1211 }
 1212 
 1213 static int
 1214 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
 1215 {
 1216         struct inode *ip;
 1217         struct ufs2_dinode *dp;
 1218         struct fs *fs;
 1219         struct uio luio;
 1220         struct iovec liovec;
 1221         int easize, error;
 1222         u_char *eae;
 1223 
 1224         ip = VTOI(vp);
 1225         fs = ip->i_fs;
 1226         dp = ip->i_din2;
 1227         easize = dp->di_extsize;
 1228         if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
 1229                 return (EFBIG);
 1230 
 1231         eae = malloc(easize + extra, M_TEMP, M_WAITOK);
 1232 
 1233         liovec.iov_base = eae;
 1234         liovec.iov_len = easize;
 1235         luio.uio_iov = &liovec;
 1236         luio.uio_iovcnt = 1;
 1237         luio.uio_offset = 0;
 1238         luio.uio_resid = easize;
 1239         luio.uio_segflg = UIO_SYSSPACE;
 1240         luio.uio_rw = UIO_READ;
 1241         luio.uio_td = td;
 1242 
 1243         error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
 1244         if (error) {
 1245                 free(eae, M_TEMP);
 1246                 return(error);
 1247         }
 1248         *p = eae;
 1249         return (0);
 1250 }
 1251 
 1252 static void
 1253 ffs_lock_ea(struct vnode *vp)
 1254 {
 1255         struct inode *ip;
 1256 
 1257         ip = VTOI(vp);
 1258         VI_LOCK(vp);
 1259         while (ip->i_flag & IN_EA_LOCKED) {
 1260                 ip->i_flag |= IN_EA_LOCKWAIT;
 1261                 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
 1262                     0);
 1263         }
 1264         ip->i_flag |= IN_EA_LOCKED;
 1265         VI_UNLOCK(vp);
 1266 }
 1267 
 1268 static void
 1269 ffs_unlock_ea(struct vnode *vp)
 1270 {
 1271         struct inode *ip;
 1272 
 1273         ip = VTOI(vp);
 1274         VI_LOCK(vp);
 1275         if (ip->i_flag & IN_EA_LOCKWAIT)
 1276                 wakeup(&ip->i_ea_refs);
 1277         ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
 1278         VI_UNLOCK(vp);
 1279 }
 1280 
 1281 static int
 1282 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
 1283 {
 1284         struct inode *ip;
 1285         struct ufs2_dinode *dp;
 1286         int error;
 1287 
 1288         ip = VTOI(vp);
 1289 
 1290         ffs_lock_ea(vp);
 1291         if (ip->i_ea_area != NULL) {
 1292                 ip->i_ea_refs++;
 1293                 ffs_unlock_ea(vp);
 1294                 return (0);
 1295         }
 1296         dp = ip->i_din2;
 1297         error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
 1298         if (error) {
 1299                 ffs_unlock_ea(vp);
 1300                 return (error);
 1301         }
 1302         ip->i_ea_len = dp->di_extsize;
 1303         ip->i_ea_error = 0;
 1304         ip->i_ea_refs++;
 1305         ffs_unlock_ea(vp);
 1306         return (0);
 1307 }
 1308 
 1309 /*
 1310  * Vnode extattr transaction commit/abort
 1311  */
 1312 static int
 1313 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
 1314 {
 1315         struct inode *ip;
 1316         struct uio luio;
 1317         struct iovec liovec;
 1318         int error;
 1319         struct ufs2_dinode *dp;
 1320 
 1321         ip = VTOI(vp);
 1322 
 1323         ffs_lock_ea(vp);
 1324         if (ip->i_ea_area == NULL) {
 1325                 ffs_unlock_ea(vp);
 1326                 return (EINVAL);
 1327         }
 1328         dp = ip->i_din2;
 1329         error = ip->i_ea_error;
 1330         if (commit && error == 0) {
 1331                 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
 1332                 if (cred == NOCRED)
 1333                         cred =  vp->v_mount->mnt_cred;
 1334                 liovec.iov_base = ip->i_ea_area;
 1335                 liovec.iov_len = ip->i_ea_len;
 1336                 luio.uio_iov = &liovec;
 1337                 luio.uio_iovcnt = 1;
 1338                 luio.uio_offset = 0;
 1339                 luio.uio_resid = ip->i_ea_len;
 1340                 luio.uio_segflg = UIO_SYSSPACE;
 1341                 luio.uio_rw = UIO_WRITE;
 1342                 luio.uio_td = td;
 1343                 /* XXX: I'm not happy about truncating to zero size */
 1344                 if (ip->i_ea_len < dp->di_extsize)
 1345                         error = ffs_truncate(vp, 0, IO_EXT, cred);
 1346                 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
 1347         }
 1348         if (--ip->i_ea_refs == 0) {
 1349                 free(ip->i_ea_area, M_TEMP);
 1350                 ip->i_ea_area = NULL;
 1351                 ip->i_ea_len = 0;
 1352                 ip->i_ea_error = 0;
 1353         }
 1354         ffs_unlock_ea(vp);
 1355         return (error);
 1356 }
 1357 
 1358 /*
 1359  * Vnode extattr strategy routine for fifos.
 1360  *
 1361  * We need to check for a read or write of the external attributes.
 1362  * Otherwise we just fall through and do the usual thing.
 1363  */
 1364 static int
 1365 ffsext_strategy(struct vop_strategy_args *ap)
 1366 /*
 1367 struct vop_strategy_args {
 1368         struct vnodeop_desc *a_desc;
 1369         struct vnode *a_vp;
 1370         struct buf *a_bp;
 1371 };
 1372 */
 1373 {
 1374         struct vnode *vp;
 1375         daddr_t lbn;
 1376 
 1377         vp = ap->a_vp;
 1378         lbn = ap->a_bp->b_lblkno;
 1379         if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC &&
 1380             lbn < 0 && lbn >= -NXADDR)
 1381                 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
 1382         if (vp->v_type == VFIFO)
 1383                 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
 1384         panic("spec nodes went here");
 1385 }
 1386 
 1387 /*
 1388  * Vnode extattr transaction commit/abort
 1389  */
 1390 static int
 1391 ffs_openextattr(struct vop_openextattr_args *ap)
 1392 /*
 1393 struct vop_openextattr_args {
 1394         struct vnodeop_desc *a_desc;
 1395         struct vnode *a_vp;
 1396         IN struct ucred *a_cred;
 1397         IN struct thread *a_td;
 1398 };
 1399 */
 1400 {
 1401         struct inode *ip;
 1402         struct fs *fs;
 1403 
 1404         ip = VTOI(ap->a_vp);
 1405         fs = ip->i_fs;
 1406 
 1407         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1408                 return (EOPNOTSUPP);
 1409 
 1410         return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
 1411 }
 1412 
 1413 
 1414 /*
 1415  * Vnode extattr transaction commit/abort
 1416  */
 1417 static int
 1418 ffs_closeextattr(struct vop_closeextattr_args *ap)
 1419 /*
 1420 struct vop_closeextattr_args {
 1421         struct vnodeop_desc *a_desc;
 1422         struct vnode *a_vp;
 1423         int a_commit;
 1424         IN struct ucred *a_cred;
 1425         IN struct thread *a_td;
 1426 };
 1427 */
 1428 {
 1429         struct inode *ip;
 1430         struct fs *fs;
 1431 
 1432         ip = VTOI(ap->a_vp);
 1433         fs = ip->i_fs;
 1434 
 1435         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1436                 return (EOPNOTSUPP);
 1437 
 1438         if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
 1439                 return (EROFS);
 1440 
 1441         return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
 1442 }
 1443 
 1444 /*
 1445  * Vnode operation to remove a named attribute.
 1446  */
 1447 static int
 1448 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
 1449 /*
 1450 vop_deleteextattr {
 1451         IN struct vnode *a_vp;
 1452         IN int a_attrnamespace;
 1453         IN const char *a_name;
 1454         IN struct ucred *a_cred;
 1455         IN struct thread *a_td;
 1456 };
 1457 */
 1458 {
 1459         struct inode *ip;
 1460         struct fs *fs;
 1461         uint32_t ealength, ul;
 1462         int ealen, olen, eapad1, eapad2, error, i, easize;
 1463         u_char *eae, *p;
 1464 
 1465         ip = VTOI(ap->a_vp);
 1466         fs = ip->i_fs;
 1467 
 1468         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1469                 return (EOPNOTSUPP);
 1470 
 1471         if (strlen(ap->a_name) == 0)
 1472                 return (EINVAL);
 1473 
 1474         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
 1475                 return (EROFS);
 1476 
 1477         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1478             ap->a_cred, ap->a_td, VWRITE);
 1479         if (error) {
 1480 
 1481                 /*
 1482                  * ffs_lock_ea is not needed there, because the vnode
 1483                  * must be exclusively locked.
 1484                  */
 1485                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1486                         ip->i_ea_error = error;
 1487                 return (error);
 1488         }
 1489 
 1490         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1491         if (error)
 1492                 return (error);
 1493 
 1494         ealength = eapad1 = ealen = eapad2 = 0;
 1495 
 1496         eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
 1497         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1498         easize = ip->i_ea_len;
 1499 
 1500         olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1501             &p, NULL);
 1502         if (olen == -1) {
 1503                 /* delete but nonexistent */
 1504                 free(eae, M_TEMP);
 1505                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1506                 return(ENOATTR);
 1507         }
 1508         bcopy(p, &ul, sizeof ul);
 1509         i = p - eae + ul;
 1510         if (ul != ealength) {
 1511                 bcopy(p + ul, p + ealength, easize - i);
 1512                 easize += (ealength - ul);
 1513         }
 1514         if (easize > NXADDR * fs->fs_bsize) {
 1515                 free(eae, M_TEMP);
 1516                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1517                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1518                         ip->i_ea_error = ENOSPC;
 1519                 return(ENOSPC);
 1520         }
 1521         p = ip->i_ea_area;
 1522         ip->i_ea_area = eae;
 1523         ip->i_ea_len = easize;
 1524         free(p, M_TEMP);
 1525         error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
 1526         return(error);
 1527 }
 1528 
 1529 /*
 1530  * Vnode operation to retrieve a named extended attribute.
 1531  */
 1532 static int
 1533 ffs_getextattr(struct vop_getextattr_args *ap)
 1534 /*
 1535 vop_getextattr {
 1536         IN struct vnode *a_vp;
 1537         IN int a_attrnamespace;
 1538         IN const char *a_name;
 1539         INOUT struct uio *a_uio;
 1540         OUT size_t *a_size;
 1541         IN struct ucred *a_cred;
 1542         IN struct thread *a_td;
 1543 };
 1544 */
 1545 {
 1546         struct inode *ip;
 1547         struct fs *fs;
 1548         u_char *eae, *p;
 1549         unsigned easize;
 1550         int error, ealen;
 1551 
 1552         ip = VTOI(ap->a_vp);
 1553         fs = ip->i_fs;
 1554 
 1555         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1556                 return (EOPNOTSUPP);
 1557 
 1558         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1559             ap->a_cred, ap->a_td, VREAD);
 1560         if (error)
 1561                 return (error);
 1562 
 1563         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1564         if (error)
 1565                 return (error);
 1566 
 1567         eae = ip->i_ea_area;
 1568         easize = ip->i_ea_len;
 1569 
 1570         ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1571             NULL, &p);
 1572         if (ealen >= 0) {
 1573                 error = 0;
 1574                 if (ap->a_size != NULL)
 1575                         *ap->a_size = ealen;
 1576                 else if (ap->a_uio != NULL)
 1577                         error = uiomove(p, ealen, ap->a_uio);
 1578         } else
 1579                 error = ENOATTR;
 1580 
 1581         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1582         return(error);
 1583 }
 1584 
 1585 /*
 1586  * Vnode operation to retrieve extended attributes on a vnode.
 1587  */
 1588 static int
 1589 ffs_listextattr(struct vop_listextattr_args *ap)
 1590 /*
 1591 vop_listextattr {
 1592         IN struct vnode *a_vp;
 1593         IN int a_attrnamespace;
 1594         INOUT struct uio *a_uio;
 1595         OUT size_t *a_size;
 1596         IN struct ucred *a_cred;
 1597         IN struct thread *a_td;
 1598 };
 1599 */
 1600 {
 1601         struct inode *ip;
 1602         struct fs *fs;
 1603         u_char *eae, *p, *pe, *pn;
 1604         unsigned easize;
 1605         uint32_t ul;
 1606         int error, ealen;
 1607 
 1608         ip = VTOI(ap->a_vp);
 1609         fs = ip->i_fs;
 1610 
 1611         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1612                 return (EOPNOTSUPP);
 1613 
 1614         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1615             ap->a_cred, ap->a_td, VREAD);
 1616         if (error)
 1617                 return (error);
 1618 
 1619         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1620         if (error)
 1621                 return (error);
 1622         eae = ip->i_ea_area;
 1623         easize = ip->i_ea_len;
 1624 
 1625         error = 0;
 1626         if (ap->a_size != NULL)
 1627                 *ap->a_size = 0;
 1628         pe = eae + easize;
 1629         for(p = eae; error == 0 && p < pe; p = pn) {
 1630                 bcopy(p, &ul, sizeof(ul));
 1631                 pn = p + ul;
 1632                 if (pn > pe)
 1633                         break;
 1634                 p += sizeof(ul);
 1635                 if (*p++ != ap->a_attrnamespace)
 1636                         continue;
 1637                 p++;    /* pad2 */
 1638                 ealen = *p;
 1639                 if (ap->a_size != NULL) {
 1640                         *ap->a_size += ealen + 1;
 1641                 } else if (ap->a_uio != NULL) {
 1642                         error = uiomove(p, ealen + 1, ap->a_uio);
 1643                 }
 1644         }
 1645         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1646         return(error);
 1647 }
 1648 
 1649 /*
 1650  * Vnode operation to set a named attribute.
 1651  */
 1652 static int
 1653 ffs_setextattr(struct vop_setextattr_args *ap)
 1654 /*
 1655 vop_setextattr {
 1656         IN struct vnode *a_vp;
 1657         IN int a_attrnamespace;
 1658         IN const char *a_name;
 1659         INOUT struct uio *a_uio;
 1660         IN struct ucred *a_cred;
 1661         IN struct thread *a_td;
 1662 };
 1663 */
 1664 {
 1665         struct inode *ip;
 1666         struct fs *fs;
 1667         uint32_t ealength, ul;
 1668         ssize_t ealen;
 1669         int olen, eapad1, eapad2, error, i, easize;
 1670         u_char *eae, *p;
 1671 
 1672         ip = VTOI(ap->a_vp);
 1673         fs = ip->i_fs;
 1674 
 1675         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1676                 return (EOPNOTSUPP);
 1677 
 1678         if (strlen(ap->a_name) == 0)
 1679                 return (EINVAL);
 1680 
 1681         /* XXX Now unsupported API to delete EAs using NULL uio. */
 1682         if (ap->a_uio == NULL)
 1683                 return (EOPNOTSUPP);
 1684 
 1685         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
 1686                 return (EROFS);
 1687 
 1688         ealen = ap->a_uio->uio_resid;
 1689         if (ealen < 0 || ealen > lblktosize(fs, NXADDR))
 1690                 return (EINVAL);
 1691 
 1692         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1693             ap->a_cred, ap->a_td, VWRITE);
 1694         if (error) {
 1695 
 1696                 /*
 1697                  * ffs_lock_ea is not needed there, because the vnode
 1698                  * must be exclusively locked.
 1699                  */
 1700                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1701                         ip->i_ea_error = error;
 1702                 return (error);
 1703         }
 1704 
 1705         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1706         if (error)
 1707                 return (error);
 1708 
 1709         ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
 1710         eapad1 = 8 - (ealength % 8);
 1711         if (eapad1 == 8)
 1712                 eapad1 = 0;
 1713         eapad2 = 8 - (ealen % 8);
 1714         if (eapad2 == 8)
 1715                 eapad2 = 0;
 1716         ealength += eapad1 + ealen + eapad2;
 1717 
 1718         eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
 1719         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1720         easize = ip->i_ea_len;
 1721 
 1722         olen = ffs_findextattr(eae, easize,
 1723             ap->a_attrnamespace, ap->a_name, &p, NULL);
 1724         if (olen == -1) {
 1725                 /* new, append at end */
 1726                 p = eae + easize;
 1727                 easize += ealength;
 1728         } else {
 1729                 bcopy(p, &ul, sizeof ul);
 1730                 i = p - eae + ul;
 1731                 if (ul != ealength) {
 1732                         bcopy(p + ul, p + ealength, easize - i);
 1733                         easize += (ealength - ul);
 1734                 }
 1735         }
 1736         if (easize > lblktosize(fs, NXADDR)) {
 1737                 free(eae, M_TEMP);
 1738                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1739                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1740                         ip->i_ea_error = ENOSPC;
 1741                 return(ENOSPC);
 1742         }
 1743         bcopy(&ealength, p, sizeof(ealength));
 1744         p += sizeof(ealength);
 1745         *p++ = ap->a_attrnamespace;
 1746         *p++ = eapad2;
 1747         *p++ = strlen(ap->a_name);
 1748         strcpy(p, ap->a_name);
 1749         p += strlen(ap->a_name);
 1750         bzero(p, eapad1);
 1751         p += eapad1;
 1752         error = uiomove(p, ealen, ap->a_uio);
 1753         if (error) {
 1754                 free(eae, M_TEMP);
 1755                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1756                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1757                         ip->i_ea_error = error;
 1758                 return(error);
 1759         }
 1760         p += ealen;
 1761         bzero(p, eapad2);
 1762 
 1763         p = ip->i_ea_area;
 1764         ip->i_ea_area = eae;
 1765         ip->i_ea_len = easize;
 1766         free(p, M_TEMP);
 1767         error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
 1768         return(error);
 1769 }
 1770 
 1771 /*
 1772  * Vnode pointer to File handle
 1773  */
 1774 static int
 1775 ffs_vptofh(struct vop_vptofh_args *ap)
 1776 /*
 1777 vop_vptofh {
 1778         IN struct vnode *a_vp;
 1779         IN struct fid *a_fhp;
 1780 };
 1781 */
 1782 {
 1783         struct inode *ip;
 1784         struct ufid *ufhp;
 1785 
 1786         ip = VTOI(ap->a_vp);
 1787         ufhp = (struct ufid *)ap->a_fhp;
 1788         ufhp->ufid_len = sizeof(struct ufid);
 1789         ufhp->ufid_ino = ip->i_number;
 1790         ufhp->ufid_gen = ip->i_gen;
 1791         return (0);
 1792 }

Cache object: 99937ab0d281c3f74d90bd7f66b2f177


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.