The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ufs/ffs/ffs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
    3  *
    4  * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
    5  * All rights reserved.
    6  *
    7  * This software was developed for the FreeBSD Project by Marshall
    8  * Kirk McKusick and Network Associates Laboratories, the Security
    9  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
   10  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
   11  * research program
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  * Copyright (c) 1982, 1986, 1989, 1993
   35  *      The Regents of the University of California.  All rights reserved.
   36  *
   37  * Redistribution and use in source and binary forms, with or without
   38  * modification, are permitted provided that the following conditions
   39  * are met:
   40  * 1. Redistributions of source code must retain the above copyright
   41  *    notice, this list of conditions and the following disclaimer.
   42  * 2. Redistributions in binary form must reproduce the above copyright
   43  *    notice, this list of conditions and the following disclaimer in the
   44  *    documentation and/or other materials provided with the distribution.
   45  * 3. Neither the name of the University nor the names of its contributors
   46  *    may be used to endorse or promote products derived from this software
   47  *    without specific prior written permission.
   48  *
   49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   59  * SUCH DAMAGE.
   60  *
   61  *      from: @(#)ufs_readwrite.c       8.11 (Berkeley) 5/8/95
   62  * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
   63  *      @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
   64  */
   65 
   66 #include <sys/cdefs.h>
   67 __FBSDID("$FreeBSD$");
   68 
   69 #include "opt_directio.h"
   70 #include "opt_ffs.h"
   71 #include "opt_ufs.h"
   72 
   73 #include <sys/param.h>
   74 #include <sys/bio.h>
   75 #include <sys/systm.h>
   76 #include <sys/buf.h>
   77 #include <sys/conf.h>
   78 #include <sys/extattr.h>
   79 #include <sys/kernel.h>
   80 #include <sys/limits.h>
   81 #include <sys/malloc.h>
   82 #include <sys/mount.h>
   83 #include <sys/priv.h>
   84 #include <sys/rwlock.h>
   85 #include <sys/stat.h>
   86 #include <sys/sysctl.h>
   87 #include <sys/vmmeter.h>
   88 #include <sys/vnode.h>
   89 
   90 #include <vm/vm.h>
   91 #include <vm/vm_param.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/vm_object.h>
   94 #include <vm/vm_page.h>
   95 #include <vm/vm_pager.h>
   96 #include <vm/vnode_pager.h>
   97 
   98 #include <ufs/ufs/extattr.h>
   99 #include <ufs/ufs/quota.h>
  100 #include <ufs/ufs/inode.h>
  101 #include <ufs/ufs/ufs_extern.h>
  102 #include <ufs/ufs/ufsmount.h>
  103 #include <ufs/ufs/dir.h>
  104 #ifdef UFS_DIRHASH
  105 #include <ufs/ufs/dirhash.h>
  106 #endif
  107 
  108 #include <ufs/ffs/fs.h>
  109 #include <ufs/ffs/ffs_extern.h>
  110 
  111 #define ALIGNED_TO(ptr, s)      \
  112         (((uintptr_t)(ptr) & (_Alignof(s) - 1)) == 0)
  113 
  114 #ifdef DIRECTIO
  115 extern int      ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
  116 #endif
  117 static vop_fdatasync_t  ffs_fdatasync;
  118 static vop_fsync_t      ffs_fsync;
  119 static vop_getpages_t   ffs_getpages;
  120 static vop_getpages_async_t     ffs_getpages_async;
  121 static vop_lock1_t      ffs_lock;
  122 #ifdef INVARIANTS
  123 static vop_unlock_t     ffs_unlock_debug;
  124 #endif
  125 static vop_read_t       ffs_read;
  126 static vop_write_t      ffs_write;
  127 static int      ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
  128 static int      ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
  129                     struct ucred *cred);
  130 static vop_strategy_t   ffsext_strategy;
  131 static vop_closeextattr_t       ffs_closeextattr;
  132 static vop_deleteextattr_t      ffs_deleteextattr;
  133 static vop_getextattr_t ffs_getextattr;
  134 static vop_listextattr_t        ffs_listextattr;
  135 static vop_openextattr_t        ffs_openextattr;
  136 static vop_setextattr_t ffs_setextattr;
  137 static vop_vptofh_t     ffs_vptofh;
  138 static vop_vput_pair_t  ffs_vput_pair;
  139 
  140 /* Global vfs data structures for ufs. */
  141 struct vop_vector ffs_vnodeops1 = {
  142         .vop_default =          &ufs_vnodeops,
  143         .vop_fsync =            ffs_fsync,
  144         .vop_fdatasync =        ffs_fdatasync,
  145         .vop_getpages =         ffs_getpages,
  146         .vop_getpages_async =   ffs_getpages_async,
  147         .vop_lock1 =            ffs_lock,
  148 #ifdef INVARIANTS
  149         .vop_unlock =           ffs_unlock_debug,
  150 #endif
  151         .vop_read =             ffs_read,
  152         .vop_reallocblks =      ffs_reallocblks,
  153         .vop_write =            ffs_write,
  154         .vop_vptofh =           ffs_vptofh,
  155         .vop_vput_pair =        ffs_vput_pair,
  156 };
  157 VFS_VOP_VECTOR_REGISTER(ffs_vnodeops1);
  158 
  159 struct vop_vector ffs_fifoops1 = {
  160         .vop_default =          &ufs_fifoops,
  161         .vop_fsync =            ffs_fsync,
  162         .vop_fdatasync =        ffs_fdatasync,
  163         .vop_lock1 =            ffs_lock,
  164 #ifdef INVARIANTS
  165         .vop_unlock =           ffs_unlock_debug,
  166 #endif
  167         .vop_vptofh =           ffs_vptofh,
  168 };
  169 VFS_VOP_VECTOR_REGISTER(ffs_fifoops1);
  170 
  171 /* Global vfs data structures for ufs. */
  172 struct vop_vector ffs_vnodeops2 = {
  173         .vop_default =          &ufs_vnodeops,
  174         .vop_fsync =            ffs_fsync,
  175         .vop_fdatasync =        ffs_fdatasync,
  176         .vop_getpages =         ffs_getpages,
  177         .vop_getpages_async =   ffs_getpages_async,
  178         .vop_lock1 =            ffs_lock,
  179 #ifdef INVARIANTS
  180         .vop_unlock =           ffs_unlock_debug,
  181 #endif
  182         .vop_read =             ffs_read,
  183         .vop_reallocblks =      ffs_reallocblks,
  184         .vop_write =            ffs_write,
  185         .vop_closeextattr =     ffs_closeextattr,
  186         .vop_deleteextattr =    ffs_deleteextattr,
  187         .vop_getextattr =       ffs_getextattr,
  188         .vop_listextattr =      ffs_listextattr,
  189         .vop_openextattr =      ffs_openextattr,
  190         .vop_setextattr =       ffs_setextattr,
  191         .vop_vptofh =           ffs_vptofh,
  192         .vop_vput_pair =        ffs_vput_pair,
  193 };
  194 VFS_VOP_VECTOR_REGISTER(ffs_vnodeops2);
  195 
  196 struct vop_vector ffs_fifoops2 = {
  197         .vop_default =          &ufs_fifoops,
  198         .vop_fsync =            ffs_fsync,
  199         .vop_fdatasync =        ffs_fdatasync,
  200         .vop_lock1 =            ffs_lock,
  201 #ifdef INVARIANTS
  202         .vop_unlock =           ffs_unlock_debug,
  203 #endif
  204         .vop_reallocblks =      ffs_reallocblks,
  205         .vop_strategy =         ffsext_strategy,
  206         .vop_closeextattr =     ffs_closeextattr,
  207         .vop_deleteextattr =    ffs_deleteextattr,
  208         .vop_getextattr =       ffs_getextattr,
  209         .vop_listextattr =      ffs_listextattr,
  210         .vop_openextattr =      ffs_openextattr,
  211         .vop_setextattr =       ffs_setextattr,
  212         .vop_vptofh =           ffs_vptofh,
  213 };
  214 VFS_VOP_VECTOR_REGISTER(ffs_fifoops2);
  215 
  216 /*
  217  * Synch an open file.
  218  */
  219 /* ARGSUSED */
  220 static int
  221 ffs_fsync(struct vop_fsync_args *ap)
  222 {
  223         struct vnode *vp;
  224         struct bufobj *bo;
  225         int error;
  226 
  227         vp = ap->a_vp;
  228         bo = &vp->v_bufobj;
  229 retry:
  230         error = ffs_syncvnode(vp, ap->a_waitfor, 0);
  231         if (error)
  232                 return (error);
  233         if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
  234                 error = softdep_fsync(vp);
  235                 if (error)
  236                         return (error);
  237 
  238                 /*
  239                  * The softdep_fsync() function may drop vp lock,
  240                  * allowing for dirty buffers to reappear on the
  241                  * bo_dirty list. Recheck and resync as needed.
  242                  */
  243                 BO_LOCK(bo);
  244                 if ((vp->v_type == VREG || vp->v_type == VDIR) &&
  245                     (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) {
  246                         BO_UNLOCK(bo);
  247                         goto retry;
  248                 }
  249                 BO_UNLOCK(bo);
  250         }
  251         if (ffs_fsfail_cleanup(VFSTOUFS(vp->v_mount), 0))
  252                 return (ENXIO);
  253         return (0);
  254 }
  255 
  256 int
  257 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
  258 {
  259         struct inode *ip;
  260         struct bufobj *bo;
  261         struct ufsmount *ump;
  262         struct buf *bp, *nbp;
  263         ufs_lbn_t lbn;
  264         int error, passes, wflag;
  265         bool still_dirty, unlocked, wait;
  266 
  267         ip = VTOI(vp);
  268         bo = &vp->v_bufobj;
  269         ump = VFSTOUFS(vp->v_mount);
  270 #ifdef WITNESS
  271         wflag = IS_SNAPSHOT(ip) ? LK_NOWITNESS : 0;
  272 #else
  273         wflag = 0;
  274 #endif
  275 
  276         /*
  277          * When doing MNT_WAIT we must first flush all dependencies
  278          * on the inode.
  279          */
  280         if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
  281             (error = softdep_sync_metadata(vp)) != 0) {
  282                 if (ffs_fsfail_cleanup(ump, error))
  283                         error = 0;
  284                 return (error);
  285         }
  286 
  287         /*
  288          * Flush all dirty buffers associated with a vnode.
  289          */
  290         error = 0;
  291         passes = 0;
  292         wait = false;   /* Always do an async pass first. */
  293         unlocked = false;
  294         lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1));
  295         BO_LOCK(bo);
  296 loop:
  297         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
  298                 bp->b_vflags &= ~BV_SCANNED;
  299         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
  300                 /*
  301                  * Reasons to skip this buffer: it has already been considered
  302                  * on this pass, the buffer has dependencies that will cause
  303                  * it to be redirtied and it has not already been deferred,
  304                  * or it is already being written.
  305                  */
  306                 if ((bp->b_vflags & BV_SCANNED) != 0)
  307                         continue;
  308                 bp->b_vflags |= BV_SCANNED;
  309                 /*
  310                  * Flush indirects in order, if requested.
  311                  *
  312                  * Note that if only datasync is requested, we can
  313                  * skip indirect blocks when softupdates are not
  314                  * active.  Otherwise we must flush them with data,
  315                  * since dependencies prevent data block writes.
  316                  */
  317                 if (waitfor == MNT_WAIT && bp->b_lblkno <= -UFS_NDADDR &&
  318                     (lbn_level(bp->b_lblkno) >= passes ||
  319                     ((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp))))
  320                         continue;
  321                 if (bp->b_lblkno > lbn)
  322                         panic("ffs_syncvnode: syncing truncated data.");
  323                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
  324                         BO_UNLOCK(bo);
  325                 } else if (wait) {
  326                         if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
  327                             LK_INTERLOCK | wflag, BO_LOCKPTR(bo)) != 0) {
  328                                 BO_LOCK(bo);
  329                                 bp->b_vflags &= ~BV_SCANNED;
  330                                 goto next_locked;
  331                         }
  332                 } else
  333                         continue;
  334                 if ((bp->b_flags & B_DELWRI) == 0)
  335                         panic("ffs_fsync: not dirty");
  336                 /*
  337                  * Check for dependencies and potentially complete them.
  338                  */
  339                 if (!LIST_EMPTY(&bp->b_dep) &&
  340                     (error = softdep_sync_buf(vp, bp,
  341                     wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
  342                         /*
  343                          * Lock order conflict, buffer was already unlocked,
  344                          * and vnode possibly unlocked.
  345                          */
  346                         if (error == ERELOOKUP) {
  347                                 if (vp->v_data == NULL)
  348                                         return (EBADF);
  349                                 unlocked = true;
  350                                 if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
  351                                     (error = softdep_sync_metadata(vp)) != 0) {
  352                                         if (ffs_fsfail_cleanup(ump, error))
  353                                                 error = 0;
  354                                         return (unlocked && error == 0 ?
  355                                             ERELOOKUP : error);
  356                                 }
  357                                 /* Re-evaluate inode size */
  358                                 lbn = lblkno(ITOFS(ip), (ip->i_size +
  359                                     ITOFS(ip)->fs_bsize - 1));
  360                                 goto next;
  361                         }
  362                         /* I/O error. */
  363                         if (error != EBUSY) {
  364                                 BUF_UNLOCK(bp);
  365                                 return (error);
  366                         }
  367                         /* If we deferred once, don't defer again. */
  368                         if ((bp->b_flags & B_DEFERRED) == 0) {
  369                                 bp->b_flags |= B_DEFERRED;
  370                                 BUF_UNLOCK(bp);
  371                                 goto next;
  372                         }
  373                 }
  374                 if (wait) {
  375                         bremfree(bp);
  376                         error = bwrite(bp);
  377                         if (ffs_fsfail_cleanup(ump, error))
  378                                 error = 0;
  379                         if (error != 0)
  380                                 return (error);
  381                 } else if ((bp->b_flags & B_CLUSTEROK)) {
  382                         (void) vfs_bio_awrite(bp);
  383                 } else {
  384                         bremfree(bp);
  385                         (void) bawrite(bp);
  386                 }
  387 next:
  388                 /*
  389                  * Since we may have slept during the I/O, we need
  390                  * to start from a known point.
  391                  */
  392                 BO_LOCK(bo);
  393 next_locked:
  394                 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
  395         }
  396         if (waitfor != MNT_WAIT) {
  397                 BO_UNLOCK(bo);
  398                 if ((flags & NO_INO_UPDT) != 0)
  399                         return (unlocked ? ERELOOKUP : 0);
  400                 error = ffs_update(vp, 0);
  401                 if (error == 0 && unlocked)
  402                         error = ERELOOKUP;
  403                 return (error);
  404         }
  405         /* Drain IO to see if we're done. */
  406         bufobj_wwait(bo, 0, 0);
  407         /*
  408          * Block devices associated with filesystems may have new I/O
  409          * requests posted for them even if the vnode is locked, so no
  410          * amount of trying will get them clean.  We make several passes
  411          * as a best effort.
  412          *
  413          * Regular files may need multiple passes to flush all dependency
  414          * work as it is possible that we must write once per indirect
  415          * level, once for the leaf, and once for the inode and each of
  416          * these will be done with one sync and one async pass.
  417          */
  418         if (bo->bo_dirty.bv_cnt > 0) {
  419                 if ((flags & DATA_ONLY) == 0) {
  420                         still_dirty = true;
  421                 } else {
  422                         /*
  423                          * For data-only sync, dirty indirect buffers
  424                          * are ignored.
  425                          */
  426                         still_dirty = false;
  427                         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
  428                                 if (bp->b_lblkno > -UFS_NDADDR) {
  429                                         still_dirty = true;
  430                                         break;
  431                                 }
  432                         }
  433                 }
  434 
  435                 if (still_dirty) {
  436                         /* Write the inode after sync passes to flush deps. */
  437                         if (wait && DOINGSOFTDEP(vp) &&
  438                             (flags & NO_INO_UPDT) == 0) {
  439                                 BO_UNLOCK(bo);
  440                                 ffs_update(vp, 1);
  441                                 BO_LOCK(bo);
  442                         }
  443                         /* switch between sync/async. */
  444                         wait = !wait;
  445                         if (wait || ++passes < UFS_NIADDR + 2)
  446                                 goto loop;
  447                 }
  448         }
  449         BO_UNLOCK(bo);
  450         error = 0;
  451         if ((flags & DATA_ONLY) == 0) {
  452                 if ((flags & NO_INO_UPDT) == 0)
  453                         error = ffs_update(vp, 1);
  454                 if (DOINGSUJ(vp))
  455                         softdep_journal_fsync(VTOI(vp));
  456         } else if ((ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)) != 0) {
  457                 error = ffs_update(vp, 1);
  458         }
  459         if (error == 0 && unlocked)
  460                 error = ERELOOKUP;
  461         if (error == 0)
  462                 ip->i_flag &= ~IN_NEEDSYNC;
  463         return (error);
  464 }
  465 
  466 static int
  467 ffs_fdatasync(struct vop_fdatasync_args *ap)
  468 {
  469 
  470         return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY));
  471 }
  472 
  473 static int
  474 ffs_lock(
  475         struct vop_lock1_args /* {
  476                 struct vnode *a_vp;
  477                 int a_flags;
  478                 char *file;
  479                 int line;
  480         } */ *ap)
  481 {
  482 #if !defined(NO_FFS_SNAPSHOT) || defined(DIAGNOSTIC)
  483         struct vnode *vp = ap->a_vp;
  484 #endif  /* !NO_FFS_SNAPSHOT || DIAGNOSTIC */
  485 #ifdef DIAGNOSTIC
  486         struct inode *ip;
  487 #endif  /* DIAGNOSTIC */
  488         int result;
  489 #ifndef NO_FFS_SNAPSHOT
  490         int flags;
  491         struct lock *lkp;
  492 
  493         /*
  494          * Adaptive spinning mixed with SU leads to trouble. use a giant hammer
  495          * and only use it when LK_NODDLKTREAT is set. Currently this means it
  496          * is only used during path lookup.
  497          */
  498         if ((ap->a_flags & LK_NODDLKTREAT) != 0)
  499                 ap->a_flags |= LK_ADAPTIVE;
  500         switch (ap->a_flags & LK_TYPE_MASK) {
  501         case LK_SHARED:
  502         case LK_UPGRADE:
  503         case LK_EXCLUSIVE:
  504                 flags = ap->a_flags;
  505                 for (;;) {
  506 #ifdef DEBUG_VFS_LOCKS
  507                         VNPASS(vp->v_holdcnt != 0, vp);
  508 #endif  /* DEBUG_VFS_LOCKS */
  509                         lkp = vp->v_vnlock;
  510                         result = lockmgr_lock_flags(lkp, flags,
  511                             &VI_MTX(vp)->lock_object, ap->a_file, ap->a_line);
  512                         if (lkp == vp->v_vnlock || result != 0)
  513                                 break;
  514                         /*
  515                          * Apparent success, except that the vnode
  516                          * mutated between snapshot file vnode and
  517                          * regular file vnode while this process
  518                          * slept.  The lock currently held is not the
  519                          * right lock.  Release it, and try to get the
  520                          * new lock.
  521                          */
  522                         lockmgr_unlock(lkp);
  523                         if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
  524                             (LK_INTERLOCK | LK_NOWAIT))
  525                                 return (EBUSY);
  526                         if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
  527                                 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
  528                         flags &= ~LK_INTERLOCK;
  529                 }
  530 #ifdef DIAGNOSTIC
  531                 switch (ap->a_flags & LK_TYPE_MASK) {
  532                 case LK_UPGRADE:
  533                 case LK_EXCLUSIVE:
  534                         if (result == 0 && vp->v_vnlock->lk_recurse == 0) {
  535                                 ip = VTOI(vp);
  536                                 if (ip != NULL)
  537                                         ip->i_lock_gen++;
  538                         }
  539                 }
  540 #endif  /* DIAGNOSTIC */
  541                 break;
  542         default:
  543 #ifdef DIAGNOSTIC
  544                 if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) {
  545                         ip = VTOI(vp);
  546                         if (ip != NULL)
  547                                 ufs_unlock_tracker(ip);
  548                 }
  549 #endif  /* DIAGNOSTIC */
  550                 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
  551                 break;
  552         }
  553 #else   /* NO_FFS_SNAPSHOT */
  554         /*
  555          * See above for an explanation.
  556          */
  557         if ((ap->a_flags & LK_NODDLKTREAT) != 0)
  558                 ap->a_flags |= LK_ADAPTIVE;
  559 #ifdef DIAGNOSTIC
  560         if ((ap->a_flags & LK_TYPE_MASK) == LK_DOWNGRADE) {
  561                 ip = VTOI(vp);
  562                 if (ip != NULL)
  563                         ufs_unlock_tracker(ip);
  564         }
  565 #endif  /* DIAGNOSTIC */
  566         result =  VOP_LOCK1_APV(&ufs_vnodeops, ap);
  567 #endif  /* NO_FFS_SNAPSHOT */
  568 #ifdef DIAGNOSTIC
  569         switch (ap->a_flags & LK_TYPE_MASK) {
  570         case LK_UPGRADE:
  571         case LK_EXCLUSIVE:
  572                 if (result == 0 && vp->v_vnlock->lk_recurse == 0) {
  573                         ip = VTOI(vp);
  574                         if (ip != NULL)
  575                                 ip->i_lock_gen++;
  576                 }
  577         }
  578 #endif  /* DIAGNOSTIC */
  579         return (result);
  580 }
  581 
  582 #ifdef INVARIANTS
  583 static int
  584 ffs_unlock_debug(struct vop_unlock_args *ap)
  585 {
  586         struct vnode *vp;
  587         struct inode *ip;
  588 
  589         vp = ap->a_vp;
  590         ip = VTOI(vp);
  591         if (ip->i_flag & UFS_INODE_FLAG_LAZY_MASK_ASSERTABLE) {
  592                 if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
  593                         VI_LOCK(vp);
  594                         VNASSERT((vp->v_mflag & VMP_LAZYLIST), vp,
  595                             ("%s: modified vnode (%x) not on lazy list",
  596                             __func__, ip->i_flag));
  597                         VI_UNLOCK(vp);
  598                 }
  599         }
  600         KASSERT(vp->v_type != VDIR || vp->v_vnlock->lk_recurse != 0 ||
  601             (ip->i_flag & IN_ENDOFF) == 0,
  602             ("ufs dir vp %p ip %p flags %#x", vp, ip, ip->i_flag));
  603 #ifdef DIAGNOSTIC
  604         if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE && ip != NULL &&
  605             vp->v_vnlock->lk_recurse == 0)
  606                 ufs_unlock_tracker(ip);
  607 #endif
  608         return (VOP_UNLOCK_APV(&ufs_vnodeops, ap));
  609 }
  610 #endif
  611 
  612 static int
  613 ffs_read_hole(struct uio *uio, long xfersize, long *size)
  614 {
  615         ssize_t saved_resid, tlen;
  616         int error;
  617 
  618         while (xfersize > 0) {
  619                 tlen = min(xfersize, ZERO_REGION_SIZE);
  620                 saved_resid = uio->uio_resid;
  621                 error = vn_io_fault_uiomove(__DECONST(void *, zero_region),
  622                     tlen, uio);
  623                 if (error != 0)
  624                         return (error);
  625                 tlen = saved_resid - uio->uio_resid;
  626                 xfersize -= tlen;
  627                 *size -= tlen;
  628         }
  629         return (0);
  630 }
  631 
  632 /*
  633  * Vnode op for reading.
  634  */
  635 static int
  636 ffs_read(
  637         struct vop_read_args /* {
  638                 struct vnode *a_vp;
  639                 struct uio *a_uio;
  640                 int a_ioflag;
  641                 struct ucred *a_cred;
  642         } */ *ap)
  643 {
  644         struct vnode *vp;
  645         struct inode *ip;
  646         struct uio *uio;
  647         struct fs *fs;
  648         struct buf *bp;
  649         ufs_lbn_t lbn, nextlbn;
  650         off_t bytesinfile;
  651         long size, xfersize, blkoffset;
  652         ssize_t orig_resid;
  653         int bflag, error, ioflag, seqcount;
  654 
  655         vp = ap->a_vp;
  656         uio = ap->a_uio;
  657         ioflag = ap->a_ioflag;
  658         if (ap->a_ioflag & IO_EXT)
  659 #ifdef notyet
  660                 return (ffs_extread(vp, uio, ioflag));
  661 #else
  662                 panic("ffs_read+IO_EXT");
  663 #endif
  664 #ifdef DIRECTIO
  665         if ((ioflag & IO_DIRECT) != 0) {
  666                 int workdone;
  667 
  668                 error = ffs_rawread(vp, uio, &workdone);
  669                 if (error != 0 || workdone != 0)
  670                         return error;
  671         }
  672 #endif
  673 
  674         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  675         ip = VTOI(vp);
  676 
  677 #ifdef INVARIANTS
  678         if (uio->uio_rw != UIO_READ)
  679                 panic("ffs_read: mode");
  680 
  681         if (vp->v_type == VLNK) {
  682                 if ((int)ip->i_size < VFSTOUFS(vp->v_mount)->um_maxsymlinklen)
  683                         panic("ffs_read: short symlink");
  684         } else if (vp->v_type != VREG && vp->v_type != VDIR)
  685                 panic("ffs_read: type %d",  vp->v_type);
  686 #endif
  687         orig_resid = uio->uio_resid;
  688         KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
  689         if (orig_resid == 0)
  690                 return (0);
  691         KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
  692         fs = ITOFS(ip);
  693         if (uio->uio_offset < ip->i_size &&
  694             uio->uio_offset >= fs->fs_maxfilesize)
  695                 return (EOVERFLOW);
  696 
  697         bflag = GB_UNMAPPED | (uio->uio_segflg == UIO_NOCOPY ? 0 : GB_NOSPARSE);
  698 #ifdef WITNESS
  699         bflag |= IS_SNAPSHOT(ip) ? GB_NOWITNESS : 0;
  700 #endif
  701         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  702                 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
  703                         break;
  704                 lbn = lblkno(fs, uio->uio_offset);
  705                 nextlbn = lbn + 1;
  706 
  707                 /*
  708                  * size of buffer.  The buffer representing the
  709                  * end of the file is rounded up to the size of
  710                  * the block type ( fragment or full block,
  711                  * depending ).
  712                  */
  713                 size = blksize(fs, ip, lbn);
  714                 blkoffset = blkoff(fs, uio->uio_offset);
  715 
  716                 /*
  717                  * The amount we want to transfer in this iteration is
  718                  * one FS block less the amount of the data before
  719                  * our startpoint (duh!)
  720                  */
  721                 xfersize = fs->fs_bsize - blkoffset;
  722 
  723                 /*
  724                  * But if we actually want less than the block,
  725                  * or the file doesn't have a whole block more of data,
  726                  * then use the lesser number.
  727                  */
  728                 if (uio->uio_resid < xfersize)
  729                         xfersize = uio->uio_resid;
  730                 if (bytesinfile < xfersize)
  731                         xfersize = bytesinfile;
  732 
  733                 if (lblktosize(fs, nextlbn) >= ip->i_size) {
  734                         /*
  735                          * Don't do readahead if this is the end of the file.
  736                          */
  737                         error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
  738                 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
  739                         /*
  740                          * Otherwise if we are allowed to cluster,
  741                          * grab as much as we can.
  742                          *
  743                          * XXX  This may not be a win if we are not
  744                          * doing sequential access.
  745                          */
  746                         error = cluster_read(vp, ip->i_size, lbn,
  747                             size, NOCRED, blkoffset + uio->uio_resid,
  748                             seqcount, bflag, &bp);
  749                 } else if (seqcount > 1) {
  750                         /*
  751                          * If we are NOT allowed to cluster, then
  752                          * if we appear to be acting sequentially,
  753                          * fire off a request for a readahead
  754                          * as well as a read. Note that the 4th and 5th
  755                          * arguments point to arrays of the size specified in
  756                          * the 6th argument.
  757                          */
  758                         u_int nextsize = blksize(fs, ip, nextlbn);
  759                         error = breadn_flags(vp, lbn, lbn, size, &nextlbn,
  760                             &nextsize, 1, NOCRED, bflag, NULL, &bp);
  761                 } else {
  762                         /*
  763                          * Failing all of the above, just read what the
  764                          * user asked for. Interestingly, the same as
  765                          * the first option above.
  766                          */
  767                         error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
  768                 }
  769                 if (error == EJUSTRETURN) {
  770                         error = ffs_read_hole(uio, xfersize, &size);
  771                         if (error == 0)
  772                                 continue;
  773                 }
  774                 if (error != 0) {
  775                         brelse(bp);
  776                         bp = NULL;
  777                         break;
  778                 }
  779 
  780                 /*
  781                  * We should only get non-zero b_resid when an I/O error
  782                  * has occurred, which should cause us to break above.
  783                  * However, if the short read did not cause an error,
  784                  * then we want to ensure that we do not uiomove bad
  785                  * or uninitialized data.
  786                  */
  787                 size -= bp->b_resid;
  788                 if (size < xfersize) {
  789                         if (size == 0)
  790                                 break;
  791                         xfersize = size;
  792                 }
  793 
  794                 if (buf_mapped(bp)) {
  795                         error = vn_io_fault_uiomove((char *)bp->b_data +
  796                             blkoffset, (int)xfersize, uio);
  797                 } else {
  798                         error = vn_io_fault_pgmove(bp->b_pages,
  799                             blkoffset + (bp->b_offset & PAGE_MASK),
  800                             (int)xfersize, uio);
  801                 }
  802                 if (error)
  803                         break;
  804 
  805                 vfs_bio_brelse(bp, ioflag);
  806         }
  807 
  808         /*
  809          * This can only happen in the case of an error
  810          * because the loop above resets bp to NULL on each iteration
  811          * and on normal completion has not set a new value into it.
  812          * so it must have come from a 'break' statement
  813          */
  814         if (bp != NULL)
  815                 vfs_bio_brelse(bp, ioflag);
  816 
  817         if ((error == 0 || uio->uio_resid != orig_resid) &&
  818             (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
  819                 UFS_INODE_SET_FLAG_SHARED(ip, IN_ACCESS);
  820         return (error);
  821 }
  822 
  823 /*
  824  * Vnode op for writing.
  825  */
  826 static int
  827 ffs_write(
  828         struct vop_write_args /* {
  829                 struct vnode *a_vp;
  830                 struct uio *a_uio;
  831                 int a_ioflag;
  832                 struct ucred *a_cred;
  833         } */ *ap)
  834 {
  835         struct vnode *vp;
  836         struct uio *uio;
  837         struct inode *ip;
  838         struct fs *fs;
  839         struct buf *bp;
  840         ufs_lbn_t lbn;
  841         off_t osize;
  842         ssize_t resid, r;
  843         int seqcount;
  844         int blkoffset, error, flags, ioflag, size, xfersize;
  845 
  846         vp = ap->a_vp;
  847         if (DOINGSUJ(vp))
  848                 softdep_prealloc(vp, MNT_WAIT);
  849         if (vp->v_data == NULL)
  850                 return (EBADF);
  851 
  852         uio = ap->a_uio;
  853         ioflag = ap->a_ioflag;
  854         if (ap->a_ioflag & IO_EXT)
  855 #ifdef notyet
  856                 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
  857 #else
  858                 panic("ffs_write+IO_EXT");
  859 #endif
  860 
  861         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  862         ip = VTOI(vp);
  863 
  864 #ifdef INVARIANTS
  865         if (uio->uio_rw != UIO_WRITE)
  866                 panic("ffs_write: mode");
  867 #endif
  868 
  869         switch (vp->v_type) {
  870         case VREG:
  871                 if (ioflag & IO_APPEND)
  872                         uio->uio_offset = ip->i_size;
  873                 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
  874                         return (EPERM);
  875                 /* FALLTHROUGH */
  876         case VLNK:
  877                 break;
  878         case VDIR:
  879                 panic("ffs_write: dir write");
  880                 break;
  881         default:
  882                 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
  883                         (int)uio->uio_offset,
  884                         (int)uio->uio_resid
  885                 );
  886         }
  887 
  888         KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
  889         KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
  890         fs = ITOFS(ip);
  891 
  892         /*
  893          * Maybe this should be above the vnode op call, but so long as
  894          * file servers have no limits, I don't think it matters.
  895          */
  896         error = vn_rlimit_fsizex(vp, uio, fs->fs_maxfilesize, &r,
  897             uio->uio_td);
  898         if (error != 0) {
  899                 vn_rlimit_fsizex_res(uio, r);
  900                 return (error);
  901         }
  902 
  903         resid = uio->uio_resid;
  904         osize = ip->i_size;
  905         if (seqcount > BA_SEQMAX)
  906                 flags = BA_SEQMAX << BA_SEQSHIFT;
  907         else
  908                 flags = seqcount << BA_SEQSHIFT;
  909         if (ioflag & IO_SYNC)
  910                 flags |= IO_SYNC;
  911         flags |= BA_UNMAPPED;
  912 
  913         for (error = 0; uio->uio_resid > 0;) {
  914                 lbn = lblkno(fs, uio->uio_offset);
  915                 blkoffset = blkoff(fs, uio->uio_offset);
  916                 xfersize = fs->fs_bsize - blkoffset;
  917                 if (uio->uio_resid < xfersize)
  918                         xfersize = uio->uio_resid;
  919                 if (uio->uio_offset + xfersize > ip->i_size)
  920                         vnode_pager_setsize(vp, uio->uio_offset + xfersize);
  921 
  922                 /*
  923                  * We must perform a read-before-write if the transfer size
  924                  * does not cover the entire buffer.
  925                  */
  926                 if (fs->fs_bsize > xfersize)
  927                         flags |= BA_CLRBUF;
  928                 else
  929                         flags &= ~BA_CLRBUF;
  930 /* XXX is uio->uio_offset the right thing here? */
  931                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
  932                     ap->a_cred, flags, &bp);
  933                 if (error != 0) {
  934                         vnode_pager_setsize(vp, ip->i_size);
  935                         break;
  936                 }
  937                 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
  938                         bp->b_flags |= B_NOCACHE;
  939 
  940                 if (uio->uio_offset + xfersize > ip->i_size) {
  941                         ip->i_size = uio->uio_offset + xfersize;
  942                         DIP_SET(ip, i_size, ip->i_size);
  943                         UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
  944                 }
  945 
  946                 size = blksize(fs, ip, lbn) - bp->b_resid;
  947                 if (size < xfersize)
  948                         xfersize = size;
  949 
  950                 if (buf_mapped(bp)) {
  951                         error = vn_io_fault_uiomove((char *)bp->b_data +
  952                             blkoffset, (int)xfersize, uio);
  953                 } else {
  954                         error = vn_io_fault_pgmove(bp->b_pages,
  955                             blkoffset + (bp->b_offset & PAGE_MASK),
  956                             (int)xfersize, uio);
  957                 }
  958                 /*
  959                  * If the buffer is not already filled and we encounter an
  960                  * error while trying to fill it, we have to clear out any
  961                  * garbage data from the pages instantiated for the buffer.
  962                  * If we do not, a failed uiomove() during a write can leave
  963                  * the prior contents of the pages exposed to a userland mmap.
  964                  *
  965                  * Note that we need only clear buffers with a transfer size
  966                  * equal to the block size because buffers with a shorter
  967                  * transfer size were cleared above by the call to UFS_BALLOC()
  968                  * with the BA_CLRBUF flag set.
  969                  *
  970                  * If the source region for uiomove identically mmaps the
  971                  * buffer, uiomove() performed the NOP copy, and the buffer
  972                  * content remains valid because the page fault handler
  973                  * validated the pages.
  974                  */
  975                 if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
  976                     fs->fs_bsize == xfersize)
  977                         vfs_bio_clrbuf(bp);
  978 
  979                 vfs_bio_set_flags(bp, ioflag);
  980 
  981                 /*
  982                  * If IO_SYNC each buffer is written synchronously.  Otherwise
  983                  * if we have a severe page deficiency write the buffer
  984                  * asynchronously.  Otherwise try to cluster, and if that
  985                  * doesn't do it then either do an async write (if O_DIRECT),
  986                  * or a delayed write (if not).
  987                  */
  988                 if (ioflag & IO_SYNC) {
  989                         (void)bwrite(bp);
  990                 } else if (vm_page_count_severe() ||
  991                             buf_dirty_count_severe() ||
  992                             (ioflag & IO_ASYNC)) {
  993                         bp->b_flags |= B_CLUSTEROK;
  994                         bawrite(bp);
  995                 } else if (xfersize + blkoffset == fs->fs_bsize) {
  996                         if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
  997                                 bp->b_flags |= B_CLUSTEROK;
  998                                 cluster_write(vp, &ip->i_clusterw, bp,
  999                                     ip->i_size, seqcount, GB_UNMAPPED);
 1000                         } else {
 1001                                 bawrite(bp);
 1002                         }
 1003                 } else if (ioflag & IO_DIRECT) {
 1004                         bp->b_flags |= B_CLUSTEROK;
 1005                         bawrite(bp);
 1006                 } else {
 1007                         bp->b_flags |= B_CLUSTEROK;
 1008                         bdwrite(bp);
 1009                 }
 1010                 if (error || xfersize == 0)
 1011                         break;
 1012                 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
 1013         }
 1014         /*
 1015          * If we successfully wrote any data, and we are not the superuser
 1016          * we clear the setuid and setgid bits as a precaution against
 1017          * tampering.
 1018          */
 1019         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
 1020             ap->a_cred) {
 1021                 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID)) {
 1022                         vn_seqc_write_begin(vp);
 1023                         UFS_INODE_SET_MODE(ip, ip->i_mode & ~(ISUID | ISGID));
 1024                         DIP_SET(ip, i_mode, ip->i_mode);
 1025                         vn_seqc_write_end(vp);
 1026                 }
 1027         }
 1028         if (error) {
 1029                 if (ioflag & IO_UNIT) {
 1030                         (void)ffs_truncate(vp, osize,
 1031                             IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
 1032                         uio->uio_offset -= resid - uio->uio_resid;
 1033                         uio->uio_resid = resid;
 1034                 }
 1035         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
 1036                 if (!(ioflag & IO_DATASYNC) ||
 1037                     (ip->i_flags & (IN_SIZEMOD | IN_IBLKDATA)))
 1038                         error = ffs_update(vp, 1);
 1039                 if (ffs_fsfail_cleanup(VFSTOUFS(vp->v_mount), error))
 1040                         error = ENXIO;
 1041         }
 1042         vn_rlimit_fsizex_res(uio, r);
 1043         return (error);
 1044 }
 1045 
 1046 /*
 1047  * Extended attribute area reading.
 1048  */
 1049 static int
 1050 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
 1051 {
 1052         struct inode *ip;
 1053         struct ufs2_dinode *dp;
 1054         struct fs *fs;
 1055         struct buf *bp;
 1056         ufs_lbn_t lbn, nextlbn;
 1057         off_t bytesinfile;
 1058         long size, xfersize, blkoffset;
 1059         ssize_t orig_resid;
 1060         int error;
 1061 
 1062         ip = VTOI(vp);
 1063         fs = ITOFS(ip);
 1064         dp = ip->i_din2;
 1065 
 1066 #ifdef INVARIANTS
 1067         if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
 1068                 panic("ffs_extread: mode");
 1069 
 1070 #endif
 1071         orig_resid = uio->uio_resid;
 1072         KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
 1073         if (orig_resid == 0)
 1074                 return (0);
 1075         KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
 1076 
 1077         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
 1078                 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
 1079                         break;
 1080                 lbn = lblkno(fs, uio->uio_offset);
 1081                 nextlbn = lbn + 1;
 1082 
 1083                 /*
 1084                  * size of buffer.  The buffer representing the
 1085                  * end of the file is rounded up to the size of
 1086                  * the block type ( fragment or full block,
 1087                  * depending ).
 1088                  */
 1089                 size = sblksize(fs, dp->di_extsize, lbn);
 1090                 blkoffset = blkoff(fs, uio->uio_offset);
 1091 
 1092                 /*
 1093                  * The amount we want to transfer in this iteration is
 1094                  * one FS block less the amount of the data before
 1095                  * our startpoint (duh!)
 1096                  */
 1097                 xfersize = fs->fs_bsize - blkoffset;
 1098 
 1099                 /*
 1100                  * But if we actually want less than the block,
 1101                  * or the file doesn't have a whole block more of data,
 1102                  * then use the lesser number.
 1103                  */
 1104                 if (uio->uio_resid < xfersize)
 1105                         xfersize = uio->uio_resid;
 1106                 if (bytesinfile < xfersize)
 1107                         xfersize = bytesinfile;
 1108 
 1109                 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
 1110                         /*
 1111                          * Don't do readahead if this is the end of the info.
 1112                          */
 1113                         error = bread(vp, -1 - lbn, size, NOCRED, &bp);
 1114                 } else {
 1115                         /*
 1116                          * If we have a second block, then
 1117                          * fire off a request for a readahead
 1118                          * as well as a read. Note that the 4th and 5th
 1119                          * arguments point to arrays of the size specified in
 1120                          * the 6th argument.
 1121                          */
 1122                         u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
 1123 
 1124                         nextlbn = -1 - nextlbn;
 1125                         error = breadn(vp, -1 - lbn,
 1126                             size, &nextlbn, &nextsize, 1, NOCRED, &bp);
 1127                 }
 1128                 if (error) {
 1129                         brelse(bp);
 1130                         bp = NULL;
 1131                         break;
 1132                 }
 1133 
 1134                 /*
 1135                  * We should only get non-zero b_resid when an I/O error
 1136                  * has occurred, which should cause us to break above.
 1137                  * However, if the short read did not cause an error,
 1138                  * then we want to ensure that we do not uiomove bad
 1139                  * or uninitialized data.
 1140                  */
 1141                 size -= bp->b_resid;
 1142                 if (size < xfersize) {
 1143                         if (size == 0)
 1144                                 break;
 1145                         xfersize = size;
 1146                 }
 1147 
 1148                 error = uiomove((char *)bp->b_data + blkoffset,
 1149                                         (int)xfersize, uio);
 1150                 if (error)
 1151                         break;
 1152                 vfs_bio_brelse(bp, ioflag);
 1153         }
 1154 
 1155         /*
 1156          * This can only happen in the case of an error
 1157          * because the loop above resets bp to NULL on each iteration
 1158          * and on normal completion has not set a new value into it.
 1159          * so it must have come from a 'break' statement
 1160          */
 1161         if (bp != NULL)
 1162                 vfs_bio_brelse(bp, ioflag);
 1163         return (error);
 1164 }
 1165 
 1166 /*
 1167  * Extended attribute area writing.
 1168  */
 1169 static int
 1170 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
 1171 {
 1172         struct inode *ip;
 1173         struct ufs2_dinode *dp;
 1174         struct fs *fs;
 1175         struct buf *bp;
 1176         ufs_lbn_t lbn;
 1177         off_t osize;
 1178         ssize_t resid;
 1179         int blkoffset, error, flags, size, xfersize;
 1180 
 1181         ip = VTOI(vp);
 1182         fs = ITOFS(ip);
 1183         dp = ip->i_din2;
 1184 
 1185 #ifdef INVARIANTS
 1186         if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
 1187                 panic("ffs_extwrite: mode");
 1188 #endif
 1189 
 1190         if (ioflag & IO_APPEND)
 1191                 uio->uio_offset = dp->di_extsize;
 1192         KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
 1193         KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
 1194         if ((uoff_t)uio->uio_offset + uio->uio_resid >
 1195             UFS_NXADDR * fs->fs_bsize)
 1196                 return (EFBIG);
 1197 
 1198         resid = uio->uio_resid;
 1199         osize = dp->di_extsize;
 1200         flags = IO_EXT;
 1201         if (ioflag & IO_SYNC)
 1202                 flags |= IO_SYNC;
 1203 
 1204         for (error = 0; uio->uio_resid > 0;) {
 1205                 lbn = lblkno(fs, uio->uio_offset);
 1206                 blkoffset = blkoff(fs, uio->uio_offset);
 1207                 xfersize = fs->fs_bsize - blkoffset;
 1208                 if (uio->uio_resid < xfersize)
 1209                         xfersize = uio->uio_resid;
 1210 
 1211                 /*
 1212                  * We must perform a read-before-write if the transfer size
 1213                  * does not cover the entire buffer.
 1214                  */
 1215                 if (fs->fs_bsize > xfersize)
 1216                         flags |= BA_CLRBUF;
 1217                 else
 1218                         flags &= ~BA_CLRBUF;
 1219                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
 1220                     ucred, flags, &bp);
 1221                 if (error != 0)
 1222                         break;
 1223                 /*
 1224                  * If the buffer is not valid we have to clear out any
 1225                  * garbage data from the pages instantiated for the buffer.
 1226                  * If we do not, a failed uiomove() during a write can leave
 1227                  * the prior contents of the pages exposed to a userland
 1228                  * mmap().  XXX deal with uiomove() errors a better way.
 1229                  */
 1230                 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
 1231                         vfs_bio_clrbuf(bp);
 1232 
 1233                 if (uio->uio_offset + xfersize > dp->di_extsize) {
 1234                         dp->di_extsize = uio->uio_offset + xfersize;
 1235                         UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
 1236                 }
 1237 
 1238                 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
 1239                 if (size < xfersize)
 1240                         xfersize = size;
 1241 
 1242                 error =
 1243                     uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
 1244 
 1245                 vfs_bio_set_flags(bp, ioflag);
 1246 
 1247                 /*
 1248                  * If IO_SYNC each buffer is written synchronously.  Otherwise
 1249                  * if we have a severe page deficiency write the buffer
 1250                  * asynchronously.  Otherwise try to cluster, and if that
 1251                  * doesn't do it then either do an async write (if O_DIRECT),
 1252                  * or a delayed write (if not).
 1253                  */
 1254                 if (ioflag & IO_SYNC) {
 1255                         (void)bwrite(bp);
 1256                 } else if (vm_page_count_severe() ||
 1257                             buf_dirty_count_severe() ||
 1258                             xfersize + blkoffset == fs->fs_bsize ||
 1259                             (ioflag & (IO_ASYNC | IO_DIRECT)))
 1260                         bawrite(bp);
 1261                 else
 1262                         bdwrite(bp);
 1263                 if (error || xfersize == 0)
 1264                         break;
 1265                 UFS_INODE_SET_FLAG(ip, IN_CHANGE);
 1266         }
 1267         /*
 1268          * If we successfully wrote any data, and we are not the superuser
 1269          * we clear the setuid and setgid bits as a precaution against
 1270          * tampering.
 1271          */
 1272         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
 1273                 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID)) {
 1274                         vn_seqc_write_begin(vp);
 1275                         UFS_INODE_SET_MODE(ip, ip->i_mode & ~(ISUID | ISGID));
 1276                         dp->di_mode = ip->i_mode;
 1277                         vn_seqc_write_end(vp);
 1278                 }
 1279         }
 1280         if (error) {
 1281                 if (ioflag & IO_UNIT) {
 1282                         (void)ffs_truncate(vp, osize,
 1283                             IO_EXT | (ioflag&IO_SYNC), ucred);
 1284                         uio->uio_offset -= resid - uio->uio_resid;
 1285                         uio->uio_resid = resid;
 1286                 }
 1287         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
 1288                 error = ffs_update(vp, 1);
 1289         return (error);
 1290 }
 1291 
 1292 /*
 1293  * Vnode operating to retrieve a named extended attribute.
 1294  *
 1295  * Locate a particular EA (nspace:name) in the area (ptr:length), and return
 1296  * the length of the EA, and possibly the pointer to the entry and to the data.
 1297  */
 1298 static int
 1299 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name,
 1300     struct extattr **eapp, u_char **eac)
 1301 {
 1302         struct extattr *eap, *eaend;
 1303         size_t nlen;
 1304 
 1305         nlen = strlen(name);
 1306         KASSERT(ALIGNED_TO(ptr, struct extattr), ("unaligned"));
 1307         eap = (struct extattr *)ptr;
 1308         eaend = (struct extattr *)(ptr + length);
 1309         for (; eap < eaend; eap = EXTATTR_NEXT(eap)) {
 1310                 KASSERT(EXTATTR_NEXT(eap) <= eaend,
 1311                     ("extattr next %p beyond %p", EXTATTR_NEXT(eap), eaend));
 1312                 if (eap->ea_namespace != nspace || eap->ea_namelength != nlen
 1313                     || memcmp(eap->ea_name, name, nlen) != 0)
 1314                         continue;
 1315                 if (eapp != NULL)
 1316                         *eapp = eap;
 1317                 if (eac != NULL)
 1318                         *eac = EXTATTR_CONTENT(eap);
 1319                 return (EXTATTR_CONTENT_SIZE(eap));
 1320         }
 1321         return (-1);
 1322 }
 1323 
 1324 static int
 1325 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td)
 1326 {
 1327         const struct extattr *eap, *eaend, *eapnext;
 1328         struct inode *ip;
 1329         struct ufs2_dinode *dp;
 1330         struct fs *fs;
 1331         struct uio luio;
 1332         struct iovec liovec;
 1333         u_int easize;
 1334         int error;
 1335         u_char *eae;
 1336 
 1337         ip = VTOI(vp);
 1338         fs = ITOFS(ip);
 1339         dp = ip->i_din2;
 1340         easize = dp->di_extsize;
 1341         if ((uoff_t)easize > UFS_NXADDR * fs->fs_bsize)
 1342                 return (EFBIG);
 1343 
 1344         eae = malloc(easize, M_TEMP, M_WAITOK);
 1345 
 1346         liovec.iov_base = eae;
 1347         liovec.iov_len = easize;
 1348         luio.uio_iov = &liovec;
 1349         luio.uio_iovcnt = 1;
 1350         luio.uio_offset = 0;
 1351         luio.uio_resid = easize;
 1352         luio.uio_segflg = UIO_SYSSPACE;
 1353         luio.uio_rw = UIO_READ;
 1354         luio.uio_td = td;
 1355 
 1356         error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
 1357         if (error) {
 1358                 free(eae, M_TEMP);
 1359                 return (error);
 1360         }
 1361         /* Validate disk xattrfile contents. */
 1362         for (eap = (void *)eae, eaend = (void *)(eae + easize); eap < eaend;
 1363             eap = eapnext) {
 1364                 /* Detect zeroed out tail */
 1365                 if (eap->ea_length < sizeof(*eap) || eap->ea_length == 0) {
 1366                         easize = (const u_char *)eap - eae;
 1367                         break;
 1368                 }
 1369                         
 1370                 eapnext = EXTATTR_NEXT(eap);
 1371                 /* Bogusly long entry. */
 1372                 if (eapnext > eaend) {
 1373                         free(eae, M_TEMP);
 1374                         return (EINTEGRITY);
 1375                 }
 1376         }
 1377         ip->i_ea_len = easize;
 1378         *p = eae;
 1379         return (0);
 1380 }
 1381 
 1382 static void
 1383 ffs_lock_ea(struct vnode *vp)
 1384 {
 1385         struct inode *ip;
 1386 
 1387         ip = VTOI(vp);
 1388         VI_LOCK(vp);
 1389         while (ip->i_flag & IN_EA_LOCKED) {
 1390                 UFS_INODE_SET_FLAG(ip, IN_EA_LOCKWAIT);
 1391                 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
 1392                     0);
 1393         }
 1394         UFS_INODE_SET_FLAG(ip, IN_EA_LOCKED);
 1395         VI_UNLOCK(vp);
 1396 }
 1397 
 1398 static void
 1399 ffs_unlock_ea(struct vnode *vp)
 1400 {
 1401         struct inode *ip;
 1402 
 1403         ip = VTOI(vp);
 1404         VI_LOCK(vp);
 1405         if (ip->i_flag & IN_EA_LOCKWAIT)
 1406                 wakeup(&ip->i_ea_refs);
 1407         ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
 1408         VI_UNLOCK(vp);
 1409 }
 1410 
 1411 static int
 1412 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
 1413 {
 1414         struct inode *ip;
 1415         int error;
 1416 
 1417         ip = VTOI(vp);
 1418 
 1419         ffs_lock_ea(vp);
 1420         if (ip->i_ea_area != NULL) {
 1421                 ip->i_ea_refs++;
 1422                 ffs_unlock_ea(vp);
 1423                 return (0);
 1424         }
 1425         error = ffs_rdextattr(&ip->i_ea_area, vp, td);
 1426         if (error) {
 1427                 ffs_unlock_ea(vp);
 1428                 return (error);
 1429         }
 1430         ip->i_ea_error = 0;
 1431         ip->i_ea_refs++;
 1432         ffs_unlock_ea(vp);
 1433         return (0);
 1434 }
 1435 
 1436 /*
 1437  * Vnode extattr transaction commit/abort
 1438  */
 1439 static int
 1440 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
 1441 {
 1442         struct inode *ip;
 1443         struct uio luio;
 1444         struct iovec *liovec;
 1445         struct ufs2_dinode *dp;
 1446         size_t ea_len, tlen;
 1447         int error, i, lcnt;
 1448         bool truncate;
 1449 
 1450         ip = VTOI(vp);
 1451 
 1452         ffs_lock_ea(vp);
 1453         if (ip->i_ea_area == NULL) {
 1454                 ffs_unlock_ea(vp);
 1455                 return (EINVAL);
 1456         }
 1457         dp = ip->i_din2;
 1458         error = ip->i_ea_error;
 1459         truncate = false;
 1460         if (commit && error == 0) {
 1461                 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
 1462                 if (cred == NOCRED)
 1463                         cred =  vp->v_mount->mnt_cred;
 1464 
 1465                 ea_len = MAX(ip->i_ea_len, dp->di_extsize);
 1466                 for (lcnt = 1, tlen = ea_len - ip->i_ea_len; tlen > 0;) {
 1467                         tlen -= MIN(ZERO_REGION_SIZE, tlen);
 1468                         lcnt++;
 1469                 }
 1470 
 1471                 liovec = __builtin_alloca(lcnt * sizeof(struct iovec));
 1472                 luio.uio_iovcnt = lcnt;
 1473 
 1474                 liovec[0].iov_base = ip->i_ea_area;
 1475                 liovec[0].iov_len = ip->i_ea_len;
 1476                 for (i = 1, tlen = ea_len - ip->i_ea_len; i < lcnt; i++) {
 1477                         liovec[i].iov_base = __DECONST(void *, zero_region);
 1478                         liovec[i].iov_len = MIN(ZERO_REGION_SIZE, tlen);
 1479                         tlen -= liovec[i].iov_len;
 1480                 }
 1481                 MPASS(tlen == 0);
 1482 
 1483                 luio.uio_iov = liovec;
 1484                 luio.uio_offset = 0;
 1485                 luio.uio_resid = ea_len;
 1486                 luio.uio_segflg = UIO_SYSSPACE;
 1487                 luio.uio_rw = UIO_WRITE;
 1488                 luio.uio_td = td;
 1489                 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
 1490                 if (error == 0 && ip->i_ea_len == 0)
 1491                         truncate = true;
 1492         }
 1493         if (--ip->i_ea_refs == 0) {
 1494                 free(ip->i_ea_area, M_TEMP);
 1495                 ip->i_ea_area = NULL;
 1496                 ip->i_ea_len = 0;
 1497                 ip->i_ea_error = 0;
 1498         }
 1499         ffs_unlock_ea(vp);
 1500 
 1501         if (truncate)
 1502                 ffs_truncate(vp, 0, IO_EXT, cred);
 1503         return (error);
 1504 }
 1505 
 1506 /*
 1507  * Vnode extattr strategy routine for fifos.
 1508  *
 1509  * We need to check for a read or write of the external attributes.
 1510  * Otherwise we just fall through and do the usual thing.
 1511  */
 1512 static int
 1513 ffsext_strategy(
 1514         struct vop_strategy_args /* {
 1515                 struct vnodeop_desc *a_desc;
 1516                 struct vnode *a_vp;
 1517                 struct buf *a_bp;
 1518         } */ *ap)
 1519 {
 1520         struct vnode *vp;
 1521         daddr_t lbn;
 1522 
 1523         vp = ap->a_vp;
 1524         lbn = ap->a_bp->b_lblkno;
 1525         if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -UFS_NXADDR)
 1526                 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
 1527         if (vp->v_type == VFIFO)
 1528                 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
 1529         panic("spec nodes went here");
 1530 }
 1531 
 1532 /*
 1533  * Vnode extattr transaction commit/abort
 1534  */
 1535 static int
 1536 ffs_openextattr(
 1537         struct vop_openextattr_args /* {
 1538                 struct vnodeop_desc *a_desc;
 1539                 struct vnode *a_vp;
 1540                 IN struct ucred *a_cred;
 1541                 IN struct thread *a_td;
 1542         } */ *ap)
 1543 {
 1544 
 1545         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1546                 return (EOPNOTSUPP);
 1547 
 1548         return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
 1549 }
 1550 
 1551 /*
 1552  * Vnode extattr transaction commit/abort
 1553  */
 1554 static int
 1555 ffs_closeextattr(
 1556         struct vop_closeextattr_args /* {
 1557                 struct vnodeop_desc *a_desc;
 1558                 struct vnode *a_vp;
 1559                 int a_commit;
 1560                 IN struct ucred *a_cred;
 1561                 IN struct thread *a_td;
 1562         } */ *ap)
 1563 {
 1564         struct vnode *vp;
 1565 
 1566         vp = ap->a_vp;
 1567         if (vp->v_type == VCHR || vp->v_type == VBLK)
 1568                 return (EOPNOTSUPP);
 1569         if (ap->a_commit && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
 1570                 return (EROFS);
 1571 
 1572         if (ap->a_commit && DOINGSUJ(vp)) {
 1573                 ASSERT_VOP_ELOCKED(vp, "ffs_closeextattr commit");
 1574                 softdep_prealloc(vp, MNT_WAIT);
 1575                 if (vp->v_data == NULL)
 1576                         return (EBADF);
 1577         }
 1578         return (ffs_close_ea(vp, ap->a_commit, ap->a_cred, ap->a_td));
 1579 }
 1580 
 1581 /*
 1582  * Vnode operation to remove a named attribute.
 1583  */
 1584 static int
 1585 ffs_deleteextattr(
 1586         struct vop_deleteextattr_args /* {
 1587                 IN struct vnode *a_vp;
 1588                 IN int a_attrnamespace;
 1589                 IN const char *a_name;
 1590                 IN struct ucred *a_cred;
 1591                 IN struct thread *a_td;
 1592         } */ *ap)
 1593 {
 1594         struct vnode *vp;
 1595         struct inode *ip;
 1596         struct extattr *eap;
 1597         uint32_t ul;
 1598         int olen, error, i, easize;
 1599         u_char *eae;
 1600         void *tmp;
 1601 
 1602         vp = ap->a_vp;
 1603         ip = VTOI(vp);
 1604 
 1605         if (vp->v_type == VCHR || vp->v_type == VBLK)
 1606                 return (EOPNOTSUPP);
 1607         if (strlen(ap->a_name) == 0)
 1608                 return (EINVAL);
 1609         if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1610                 return (EROFS);
 1611 
 1612         error = extattr_check_cred(vp, ap->a_attrnamespace,
 1613             ap->a_cred, ap->a_td, VWRITE);
 1614         if (error) {
 1615                 /*
 1616                  * ffs_lock_ea is not needed there, because the vnode
 1617                  * must be exclusively locked.
 1618                  */
 1619                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1620                         ip->i_ea_error = error;
 1621                 return (error);
 1622         }
 1623 
 1624         if (DOINGSUJ(vp)) {
 1625                 ASSERT_VOP_ELOCKED(vp, "ffs_deleteextattr");
 1626                 softdep_prealloc(vp, MNT_WAIT);
 1627                 if (vp->v_data == NULL)
 1628                         return (EBADF);
 1629         }
 1630 
 1631         error = ffs_open_ea(vp, ap->a_cred, ap->a_td);
 1632         if (error)
 1633                 return (error);
 1634 
 1635         /* CEM: delete could be done in-place instead */
 1636         eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
 1637         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1638         easize = ip->i_ea_len;
 1639 
 1640         olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1641             &eap, NULL);
 1642         if (olen == -1) {
 1643                 /* delete but nonexistent */
 1644                 free(eae, M_TEMP);
 1645                 ffs_close_ea(vp, 0, ap->a_cred, ap->a_td);
 1646                 return (ENOATTR);
 1647         }
 1648         ul = eap->ea_length;
 1649         i = (u_char *)EXTATTR_NEXT(eap) - eae;
 1650         bcopy(EXTATTR_NEXT(eap), eap, easize - i);
 1651         easize -= ul;
 1652 
 1653         tmp = ip->i_ea_area;
 1654         ip->i_ea_area = eae;
 1655         ip->i_ea_len = easize;
 1656         free(tmp, M_TEMP);
 1657         error = ffs_close_ea(vp, 1, ap->a_cred, ap->a_td);
 1658         return (error);
 1659 }
 1660 
 1661 /*
 1662  * Vnode operation to retrieve a named extended attribute.
 1663  */
 1664 static int
 1665 ffs_getextattr(
 1666         struct vop_getextattr_args /* {
 1667                 IN struct vnode *a_vp;
 1668                 IN int a_attrnamespace;
 1669                 IN const char *a_name;
 1670                 INOUT struct uio *a_uio;
 1671                 OUT size_t *a_size;
 1672                 IN struct ucred *a_cred;
 1673                 IN struct thread *a_td;
 1674         } */ *ap)
 1675 {
 1676         struct inode *ip;
 1677         u_char *eae, *p;
 1678         unsigned easize;
 1679         int error, ealen;
 1680 
 1681         ip = VTOI(ap->a_vp);
 1682 
 1683         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1684                 return (EOPNOTSUPP);
 1685 
 1686         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1687             ap->a_cred, ap->a_td, VREAD);
 1688         if (error)
 1689                 return (error);
 1690 
 1691         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1692         if (error)
 1693                 return (error);
 1694 
 1695         eae = ip->i_ea_area;
 1696         easize = ip->i_ea_len;
 1697 
 1698         ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1699             NULL, &p);
 1700         if (ealen >= 0) {
 1701                 error = 0;
 1702                 if (ap->a_size != NULL)
 1703                         *ap->a_size = ealen;
 1704                 else if (ap->a_uio != NULL)
 1705                         error = uiomove(p, ealen, ap->a_uio);
 1706         } else
 1707                 error = ENOATTR;
 1708 
 1709         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1710         return (error);
 1711 }
 1712 
 1713 /*
 1714  * Vnode operation to retrieve extended attributes on a vnode.
 1715  */
 1716 static int
 1717 ffs_listextattr(
 1718         struct vop_listextattr_args /* {
 1719                 IN struct vnode *a_vp;
 1720                 IN int a_attrnamespace;
 1721                 INOUT struct uio *a_uio;
 1722                 OUT size_t *a_size;
 1723                 IN struct ucred *a_cred;
 1724                 IN struct thread *a_td;
 1725         } */ *ap)
 1726 {
 1727         struct inode *ip;
 1728         struct extattr *eap, *eaend;
 1729         int error, ealen;
 1730 
 1731         ip = VTOI(ap->a_vp);
 1732 
 1733         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1734                 return (EOPNOTSUPP);
 1735 
 1736         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1737             ap->a_cred, ap->a_td, VREAD);
 1738         if (error)
 1739                 return (error);
 1740 
 1741         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1742         if (error)
 1743                 return (error);
 1744 
 1745         error = 0;
 1746         if (ap->a_size != NULL)
 1747                 *ap->a_size = 0;
 1748 
 1749         KASSERT(ALIGNED_TO(ip->i_ea_area, struct extattr), ("unaligned"));
 1750         eap = (struct extattr *)ip->i_ea_area;
 1751         eaend = (struct extattr *)(ip->i_ea_area + ip->i_ea_len);
 1752         for (; error == 0 && eap < eaend; eap = EXTATTR_NEXT(eap)) {
 1753                 KASSERT(EXTATTR_NEXT(eap) <= eaend,
 1754                     ("extattr next %p beyond %p", EXTATTR_NEXT(eap), eaend));
 1755                 if (eap->ea_namespace != ap->a_attrnamespace)
 1756                         continue;
 1757 
 1758                 ealen = eap->ea_namelength;
 1759                 if (ap->a_size != NULL)
 1760                         *ap->a_size += ealen + 1;
 1761                 else if (ap->a_uio != NULL)
 1762                         error = uiomove(&eap->ea_namelength, ealen + 1,
 1763                             ap->a_uio);
 1764         }
 1765 
 1766         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1767         return (error);
 1768 }
 1769 
 1770 /*
 1771  * Vnode operation to set a named attribute.
 1772  */
 1773 static int
 1774 ffs_setextattr(
 1775         struct vop_setextattr_args /* {
 1776                 IN struct vnode *a_vp;
 1777                 IN int a_attrnamespace;
 1778                 IN const char *a_name;
 1779                 INOUT struct uio *a_uio;
 1780                 IN struct ucred *a_cred;
 1781                 IN struct thread *a_td;
 1782         } */ *ap)
 1783 {
 1784         struct vnode *vp;
 1785         struct inode *ip;
 1786         struct fs *fs;
 1787         struct extattr *eap;
 1788         uint32_t ealength, ul;
 1789         ssize_t ealen;
 1790         int olen, eapad1, eapad2, error, i, easize;
 1791         u_char *eae;
 1792         void *tmp;
 1793 
 1794         vp = ap->a_vp;
 1795         ip = VTOI(vp);
 1796         fs = ITOFS(ip);
 1797 
 1798         if (vp->v_type == VCHR || vp->v_type == VBLK)
 1799                 return (EOPNOTSUPP);
 1800         if (strlen(ap->a_name) == 0)
 1801                 return (EINVAL);
 1802 
 1803         /* XXX Now unsupported API to delete EAs using NULL uio. */
 1804         if (ap->a_uio == NULL)
 1805                 return (EOPNOTSUPP);
 1806 
 1807         if (vp->v_mount->mnt_flag & MNT_RDONLY)
 1808                 return (EROFS);
 1809 
 1810         ealen = ap->a_uio->uio_resid;
 1811         if (ealen < 0 || ealen > lblktosize(fs, UFS_NXADDR))
 1812                 return (EINVAL);
 1813 
 1814         error = extattr_check_cred(vp, ap->a_attrnamespace,
 1815             ap->a_cred, ap->a_td, VWRITE);
 1816         if (error) {
 1817                 /*
 1818                  * ffs_lock_ea is not needed there, because the vnode
 1819                  * must be exclusively locked.
 1820                  */
 1821                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1822                         ip->i_ea_error = error;
 1823                 return (error);
 1824         }
 1825 
 1826         if (DOINGSUJ(vp)) {
 1827                 ASSERT_VOP_ELOCKED(vp, "ffs_deleteextattr");
 1828                 softdep_prealloc(vp, MNT_WAIT);
 1829                 if (vp->v_data == NULL)
 1830                         return (EBADF);
 1831         }
 1832 
 1833         error = ffs_open_ea(vp, ap->a_cred, ap->a_td);
 1834         if (error)
 1835                 return (error);
 1836 
 1837         ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
 1838         eapad1 = roundup2(ealength, 8) - ealength;
 1839         eapad2 = roundup2(ealen, 8) - ealen;
 1840         ealength += eapad1 + ealen + eapad2;
 1841 
 1842         /*
 1843          * CEM: rewrites of the same size or smaller could be done in-place
 1844          * instead.  (We don't acquire any fine-grained locks in here either,
 1845          * so we could also do bigger writes in-place.)
 1846          */
 1847         eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
 1848         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1849         easize = ip->i_ea_len;
 1850 
 1851         olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1852             &eap, NULL);
 1853         if (olen == -1) {
 1854                 /* new, append at end */
 1855                 KASSERT(ALIGNED_TO(eae + easize, struct extattr),
 1856                     ("unaligned"));
 1857                 eap = (struct extattr *)(eae + easize);
 1858                 easize += ealength;
 1859         } else {
 1860                 ul = eap->ea_length;
 1861                 i = (u_char *)EXTATTR_NEXT(eap) - eae;
 1862                 if (ul != ealength) {
 1863                         bcopy(EXTATTR_NEXT(eap), (u_char *)eap + ealength,
 1864                             easize - i);
 1865                         easize += (ealength - ul);
 1866                 }
 1867         }
 1868         if (easize > lblktosize(fs, UFS_NXADDR)) {
 1869                 free(eae, M_TEMP);
 1870                 ffs_close_ea(vp, 0, ap->a_cred, ap->a_td);
 1871                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1872                         ip->i_ea_error = ENOSPC;
 1873                 return (ENOSPC);
 1874         }
 1875         eap->ea_length = ealength;
 1876         eap->ea_namespace = ap->a_attrnamespace;
 1877         eap->ea_contentpadlen = eapad2;
 1878         eap->ea_namelength = strlen(ap->a_name);
 1879         memcpy(eap->ea_name, ap->a_name, strlen(ap->a_name));
 1880         bzero(&eap->ea_name[strlen(ap->a_name)], eapad1);
 1881         error = uiomove(EXTATTR_CONTENT(eap), ealen, ap->a_uio);
 1882         if (error) {
 1883                 free(eae, M_TEMP);
 1884                 ffs_close_ea(vp, 0, ap->a_cred, ap->a_td);
 1885                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1886                         ip->i_ea_error = error;
 1887                 return (error);
 1888         }
 1889         bzero((u_char *)EXTATTR_CONTENT(eap) + ealen, eapad2);
 1890 
 1891         tmp = ip->i_ea_area;
 1892         ip->i_ea_area = eae;
 1893         ip->i_ea_len = easize;
 1894         free(tmp, M_TEMP);
 1895         error = ffs_close_ea(vp, 1, ap->a_cred, ap->a_td);
 1896         return (error);
 1897 }
 1898 
 1899 /*
 1900  * Vnode pointer to File handle
 1901  */
 1902 static int
 1903 ffs_vptofh(
 1904         struct vop_vptofh_args /* {
 1905                 IN struct vnode *a_vp;
 1906                 IN struct fid *a_fhp;
 1907         } */ *ap)
 1908 {
 1909         struct inode *ip;
 1910         struct ufid *ufhp;
 1911 
 1912         ip = VTOI(ap->a_vp);
 1913         ufhp = (struct ufid *)ap->a_fhp;
 1914         ufhp->ufid_len = sizeof(struct ufid);
 1915         ufhp->ufid_ino = ip->i_number;
 1916         ufhp->ufid_gen = ip->i_gen;
 1917         return (0);
 1918 }
 1919 
 1920 SYSCTL_DECL(_vfs_ffs);
 1921 static int use_buf_pager = 1;
 1922 SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0,
 1923     "Always use buffer pager instead of bmap");
 1924 
 1925 static daddr_t
 1926 ffs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
 1927 {
 1928 
 1929         return (lblkno(VFSTOUFS(vp->v_mount)->um_fs, off));
 1930 }
 1931 
 1932 static int
 1933 ffs_gbp_getblksz(struct vnode *vp, daddr_t lbn, long *sz)
 1934 {
 1935 
 1936         *sz = blksize(VFSTOUFS(vp->v_mount)->um_fs, VTOI(vp), lbn);
 1937         return (0);
 1938 }
 1939 
 1940 static int
 1941 ffs_getpages(struct vop_getpages_args *ap)
 1942 {
 1943         struct vnode *vp;
 1944         struct ufsmount *um;
 1945 
 1946         vp = ap->a_vp;
 1947         um = VFSTOUFS(vp->v_mount);
 1948 
 1949         if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE)
 1950                 return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
 1951                     ap->a_rbehind, ap->a_rahead, NULL, NULL));
 1952         return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind,
 1953             ap->a_rahead, ffs_gbp_getblkno, ffs_gbp_getblksz));
 1954 }
 1955 
 1956 static int
 1957 ffs_getpages_async(struct vop_getpages_async_args *ap)
 1958 {
 1959         struct vnode *vp;
 1960         struct ufsmount *um;
 1961         bool do_iodone;
 1962         int error;
 1963 
 1964         vp = ap->a_vp;
 1965         um = VFSTOUFS(vp->v_mount);
 1966         do_iodone = true;
 1967 
 1968         if (um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE) {
 1969                 error = vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
 1970                     ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg);
 1971                 if (error == 0)
 1972                         do_iodone = false;
 1973         } else {
 1974                 error = vfs_bio_getpages(vp, ap->a_m, ap->a_count,
 1975                     ap->a_rbehind, ap->a_rahead, ffs_gbp_getblkno,
 1976                     ffs_gbp_getblksz);
 1977         }
 1978         if (do_iodone && ap->a_iodone != NULL)
 1979                 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
 1980 
 1981         return (error);
 1982 }
 1983 
 1984 static int
 1985 ffs_vput_pair(struct vop_vput_pair_args *ap)
 1986 {
 1987         struct mount *mp;
 1988         struct vnode *dvp, *vp, *vp1, **vpp;
 1989         struct inode *dp, *ip;
 1990         ino_t ip_ino;
 1991         u_int64_t ip_gen;
 1992         int error, vp_locked;
 1993 
 1994         dvp = ap->a_dvp;
 1995         dp = VTOI(dvp);
 1996         vpp = ap->a_vpp;
 1997         vp = vpp != NULL ? *vpp : NULL;
 1998 
 1999         if ((dp->i_flag & (IN_NEEDSYNC | IN_ENDOFF)) == 0) {
 2000                 vput(dvp);
 2001                 if (vp != NULL && ap->a_unlock_vp)
 2002                         vput(vp);
 2003                 return (0);
 2004         }
 2005 
 2006         mp = dvp->v_mount;
 2007         if (vp != NULL) {
 2008                 if (ap->a_unlock_vp) {
 2009                         vput(vp);
 2010                 } else {
 2011                         MPASS(vp->v_type != VNON);
 2012                         vp_locked = VOP_ISLOCKED(vp);
 2013                         ip = VTOI(vp);
 2014                         ip_ino = ip->i_number;
 2015                         ip_gen = ip->i_gen;
 2016                         VOP_UNLOCK(vp);
 2017                 }
 2018         }
 2019 
 2020         /*
 2021          * If compaction or fsync was requested do it in ffs_vput_pair()
 2022          * now that other locks are no longer held.
 2023          */
 2024         if ((dp->i_flag & IN_ENDOFF) != 0) {
 2025                 VNASSERT(I_ENDOFF(dp) != 0 && I_ENDOFF(dp) < dp->i_size, dvp,
 2026                     ("IN_ENDOFF set but I_ENDOFF() is not"));
 2027                 dp->i_flag &= ~IN_ENDOFF;
 2028                 error = UFS_TRUNCATE(dvp, (off_t)I_ENDOFF(dp), IO_NORMAL |
 2029                     (DOINGASYNC(dvp) ? 0 : IO_SYNC), curthread->td_ucred);
 2030                 if (error != 0 && error != ERELOOKUP) {
 2031                         if (!ffs_fsfail_cleanup(VFSTOUFS(mp), error)) {
 2032                                 vn_printf(dvp,
 2033                                     "IN_ENDOFF: failed to truncate, "
 2034                                     "error %d\n", error);
 2035                         }
 2036 #ifdef UFS_DIRHASH
 2037                         ufsdirhash_free(dp);
 2038 #endif
 2039                 }
 2040                 SET_I_ENDOFF(dp, 0);
 2041         }
 2042         if ((dp->i_flag & IN_NEEDSYNC) != 0) {
 2043                 do {
 2044                         error = ffs_syncvnode(dvp, MNT_WAIT, 0);
 2045                 } while (error == ERELOOKUP);
 2046         }
 2047 
 2048         vput(dvp);
 2049 
 2050         if (vp == NULL || ap->a_unlock_vp)
 2051                 return (0);
 2052         MPASS(mp != NULL);
 2053 
 2054         /*
 2055          * It is possible that vp is reclaimed at this point. Only
 2056          * routines that call us with a_unlock_vp == false can find
 2057          * that their vp has been reclaimed. There are three areas
 2058          * that are affected:
 2059          * 1) vn_open_cred() - later VOPs could fail, but
 2060          *    dead_open() returns 0 to simulate successful open.
 2061          * 2) ffs_snapshot() - creation of snapshot fails with EBADF.
 2062          * 3) NFS server (several places) - code is prepared to detect
 2063          *    and respond to dead vnodes by returning ESTALE.
 2064          */
 2065         VOP_LOCK(vp, vp_locked | LK_RETRY);
 2066         if (IS_UFS(vp))
 2067                 return (0);
 2068 
 2069         /*
 2070          * Try harder to recover from reclaimed vp if reclaim was not
 2071          * because underlying inode was cleared.  We saved inode
 2072          * number and inode generation, so we can try to reinstantiate
 2073          * exactly same version of inode.  If this fails, return
 2074          * original doomed vnode and let caller to handle
 2075          * consequences.
 2076          *
 2077          * Note that callers must keep write started around
 2078          * VOP_VPUT_PAIR() calls, so it is safe to use mp without
 2079          * busying it.
 2080          */
 2081         VOP_UNLOCK(vp);
 2082         error = ffs_inotovp(mp, ip_ino, ip_gen, LK_EXCLUSIVE, &vp1,
 2083             FFSV_REPLACE_DOOMED);
 2084         if (error != 0) {
 2085                 VOP_LOCK(vp, vp_locked | LK_RETRY);
 2086         } else {
 2087                 vrele(vp);
 2088                 *vpp = vp1;
 2089         }
 2090         return (error);
 2091 }

Cache object: bc1b17d2270b54f6d6d1aaa8eb23e23b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.