The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ufs/ffs/ffs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
    3  *
    4  * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
    5  * All rights reserved.
    6  *
    7  * This software was developed for the FreeBSD Project by Marshall
    8  * Kirk McKusick and Network Associates Laboratories, the Security
    9  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
   10  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
   11  * research program
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  * Copyright (c) 1982, 1986, 1989, 1993
   35  *      The Regents of the University of California.  All rights reserved.
   36  *
   37  * Redistribution and use in source and binary forms, with or without
   38  * modification, are permitted provided that the following conditions
   39  * are met:
   40  * 1. Redistributions of source code must retain the above copyright
   41  *    notice, this list of conditions and the following disclaimer.
   42  * 2. Redistributions in binary form must reproduce the above copyright
   43  *    notice, this list of conditions and the following disclaimer in the
   44  *    documentation and/or other materials provided with the distribution.
   45  * 3. Neither the name of the University nor the names of its contributors
   46  *    may be used to endorse or promote products derived from this software
   47  *    without specific prior written permission.
   48  *
   49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   59  * SUCH DAMAGE.
   60  *
   61  *      from: @(#)ufs_readwrite.c       8.11 (Berkeley) 5/8/95
   62  * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
   63  *      @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
   64  */
   65 
   66 #include <sys/cdefs.h>
   67 __FBSDID("$FreeBSD: releng/12.0/sys/ufs/ffs/ffs_vnops.c 336280 2018-07-14 15:45:11Z cem $");
   68 
   69 #include <sys/param.h>
   70 #include <sys/bio.h>
   71 #include <sys/systm.h>
   72 #include <sys/buf.h>
   73 #include <sys/conf.h>
   74 #include <sys/extattr.h>
   75 #include <sys/kernel.h>
   76 #include <sys/limits.h>
   77 #include <sys/malloc.h>
   78 #include <sys/mount.h>
   79 #include <sys/priv.h>
   80 #include <sys/rwlock.h>
   81 #include <sys/stat.h>
   82 #include <sys/sysctl.h>
   83 #include <sys/vmmeter.h>
   84 #include <sys/vnode.h>
   85 
   86 #include <vm/vm.h>
   87 #include <vm/vm_param.h>
   88 #include <vm/vm_extern.h>
   89 #include <vm/vm_object.h>
   90 #include <vm/vm_page.h>
   91 #include <vm/vm_pager.h>
   92 #include <vm/vnode_pager.h>
   93 
   94 #include <ufs/ufs/extattr.h>
   95 #include <ufs/ufs/quota.h>
   96 #include <ufs/ufs/inode.h>
   97 #include <ufs/ufs/ufs_extern.h>
   98 #include <ufs/ufs/ufsmount.h>
   99 
  100 #include <ufs/ffs/fs.h>
  101 #include <ufs/ffs/ffs_extern.h>
  102 #include "opt_directio.h"
  103 #include "opt_ffs.h"
  104 
  105 #define ALIGNED_TO(ptr, s)      \
  106         (((uintptr_t)(ptr) & (_Alignof(s) - 1)) == 0)
  107 
  108 #ifdef DIRECTIO
  109 extern int      ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
  110 #endif
  111 static vop_fdatasync_t  ffs_fdatasync;
  112 static vop_fsync_t      ffs_fsync;
  113 static vop_getpages_t   ffs_getpages;
  114 static vop_lock1_t      ffs_lock;
  115 static vop_read_t       ffs_read;
  116 static vop_write_t      ffs_write;
  117 static int      ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
  118 static int      ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
  119                     struct ucred *cred);
  120 static vop_strategy_t   ffsext_strategy;
  121 static vop_closeextattr_t       ffs_closeextattr;
  122 static vop_deleteextattr_t      ffs_deleteextattr;
  123 static vop_getextattr_t ffs_getextattr;
  124 static vop_listextattr_t        ffs_listextattr;
  125 static vop_openextattr_t        ffs_openextattr;
  126 static vop_setextattr_t ffs_setextattr;
  127 static vop_vptofh_t     ffs_vptofh;
  128 
  129 /* Global vfs data structures for ufs. */
  130 struct vop_vector ffs_vnodeops1 = {
  131         .vop_default =          &ufs_vnodeops,
  132         .vop_fsync =            ffs_fsync,
  133         .vop_fdatasync =        ffs_fdatasync,
  134         .vop_getpages =         ffs_getpages,
  135         .vop_getpages_async =   vnode_pager_local_getpages_async,
  136         .vop_lock1 =            ffs_lock,
  137         .vop_read =             ffs_read,
  138         .vop_reallocblks =      ffs_reallocblks,
  139         .vop_write =            ffs_write,
  140         .vop_vptofh =           ffs_vptofh,
  141 };
  142 
  143 struct vop_vector ffs_fifoops1 = {
  144         .vop_default =          &ufs_fifoops,
  145         .vop_fsync =            ffs_fsync,
  146         .vop_fdatasync =        ffs_fdatasync,
  147         .vop_reallocblks =      ffs_reallocblks, /* XXX: really ??? */
  148         .vop_vptofh =           ffs_vptofh,
  149 };
  150 
  151 /* Global vfs data structures for ufs. */
  152 struct vop_vector ffs_vnodeops2 = {
  153         .vop_default =          &ufs_vnodeops,
  154         .vop_fsync =            ffs_fsync,
  155         .vop_fdatasync =        ffs_fdatasync,
  156         .vop_getpages =         ffs_getpages,
  157         .vop_getpages_async =   vnode_pager_local_getpages_async,
  158         .vop_lock1 =            ffs_lock,
  159         .vop_read =             ffs_read,
  160         .vop_reallocblks =      ffs_reallocblks,
  161         .vop_write =            ffs_write,
  162         .vop_closeextattr =     ffs_closeextattr,
  163         .vop_deleteextattr =    ffs_deleteextattr,
  164         .vop_getextattr =       ffs_getextattr,
  165         .vop_listextattr =      ffs_listextattr,
  166         .vop_openextattr =      ffs_openextattr,
  167         .vop_setextattr =       ffs_setextattr,
  168         .vop_vptofh =           ffs_vptofh,
  169 };
  170 
  171 struct vop_vector ffs_fifoops2 = {
  172         .vop_default =          &ufs_fifoops,
  173         .vop_fsync =            ffs_fsync,
  174         .vop_fdatasync =        ffs_fdatasync,
  175         .vop_lock1 =            ffs_lock,
  176         .vop_reallocblks =      ffs_reallocblks,
  177         .vop_strategy =         ffsext_strategy,
  178         .vop_closeextattr =     ffs_closeextattr,
  179         .vop_deleteextattr =    ffs_deleteextattr,
  180         .vop_getextattr =       ffs_getextattr,
  181         .vop_listextattr =      ffs_listextattr,
  182         .vop_openextattr =      ffs_openextattr,
  183         .vop_setextattr =       ffs_setextattr,
  184         .vop_vptofh =           ffs_vptofh,
  185 };
  186 
  187 /*
  188  * Synch an open file.
  189  */
  190 /* ARGSUSED */
  191 static int
  192 ffs_fsync(struct vop_fsync_args *ap)
  193 {
  194         struct vnode *vp;
  195         struct bufobj *bo;
  196         int error;
  197 
  198         vp = ap->a_vp;
  199         bo = &vp->v_bufobj;
  200 retry:
  201         error = ffs_syncvnode(vp, ap->a_waitfor, 0);
  202         if (error)
  203                 return (error);
  204         if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
  205                 error = softdep_fsync(vp);
  206                 if (error)
  207                         return (error);
  208 
  209                 /*
  210                  * The softdep_fsync() function may drop vp lock,
  211                  * allowing for dirty buffers to reappear on the
  212                  * bo_dirty list. Recheck and resync as needed.
  213                  */
  214                 BO_LOCK(bo);
  215                 if ((vp->v_type == VREG || vp->v_type == VDIR) &&
  216                     (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) {
  217                         BO_UNLOCK(bo);
  218                         goto retry;
  219                 }
  220                 BO_UNLOCK(bo);
  221         }
  222         return (0);
  223 }
  224 
  225 int
  226 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
  227 {
  228         struct inode *ip;
  229         struct bufobj *bo;
  230         struct buf *bp, *nbp;
  231         ufs_lbn_t lbn;
  232         int error, passes;
  233         bool still_dirty, wait;
  234 
  235         ip = VTOI(vp);
  236         ip->i_flag &= ~IN_NEEDSYNC;
  237         bo = &vp->v_bufobj;
  238 
  239         /*
  240          * When doing MNT_WAIT we must first flush all dependencies
  241          * on the inode.
  242          */
  243         if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
  244             (error = softdep_sync_metadata(vp)) != 0)
  245                 return (error);
  246 
  247         /*
  248          * Flush all dirty buffers associated with a vnode.
  249          */
  250         error = 0;
  251         passes = 0;
  252         wait = false;   /* Always do an async pass first. */
  253         lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1));
  254         BO_LOCK(bo);
  255 loop:
  256         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
  257                 bp->b_vflags &= ~BV_SCANNED;
  258         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
  259                 /*
  260                  * Reasons to skip this buffer: it has already been considered
  261                  * on this pass, the buffer has dependencies that will cause
  262                  * it to be redirtied and it has not already been deferred,
  263                  * or it is already being written.
  264                  */
  265                 if ((bp->b_vflags & BV_SCANNED) != 0)
  266                         continue;
  267                 bp->b_vflags |= BV_SCANNED;
  268                 /*
  269                  * Flush indirects in order, if requested.
  270                  *
  271                  * Note that if only datasync is requested, we can
  272                  * skip indirect blocks when softupdates are not
  273                  * active.  Otherwise we must flush them with data,
  274                  * since dependencies prevent data block writes.
  275                  */
  276                 if (waitfor == MNT_WAIT && bp->b_lblkno <= -UFS_NDADDR &&
  277                     (lbn_level(bp->b_lblkno) >= passes ||
  278                     ((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp))))
  279                         continue;
  280                 if (bp->b_lblkno > lbn)
  281                         panic("ffs_syncvnode: syncing truncated data.");
  282                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
  283                         BO_UNLOCK(bo);
  284                 } else if (wait) {
  285                         if (BUF_LOCK(bp,
  286                             LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
  287                             BO_LOCKPTR(bo)) != 0) {
  288                                 bp->b_vflags &= ~BV_SCANNED;
  289                                 goto next;
  290                         }
  291                 } else
  292                         continue;
  293                 if ((bp->b_flags & B_DELWRI) == 0)
  294                         panic("ffs_fsync: not dirty");
  295                 /*
  296                  * Check for dependencies and potentially complete them.
  297                  */
  298                 if (!LIST_EMPTY(&bp->b_dep) &&
  299                     (error = softdep_sync_buf(vp, bp,
  300                     wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
  301                         /* I/O error. */
  302                         if (error != EBUSY) {
  303                                 BUF_UNLOCK(bp);
  304                                 return (error);
  305                         }
  306                         /* If we deferred once, don't defer again. */
  307                         if ((bp->b_flags & B_DEFERRED) == 0) {
  308                                 bp->b_flags |= B_DEFERRED;
  309                                 BUF_UNLOCK(bp);
  310                                 goto next;
  311                         }
  312                 }
  313                 if (wait) {
  314                         bremfree(bp);
  315                         if ((error = bwrite(bp)) != 0)
  316                                 return (error);
  317                 } else if ((bp->b_flags & B_CLUSTEROK)) {
  318                         (void) vfs_bio_awrite(bp);
  319                 } else {
  320                         bremfree(bp);
  321                         (void) bawrite(bp);
  322                 }
  323 next:
  324                 /*
  325                  * Since we may have slept during the I/O, we need
  326                  * to start from a known point.
  327                  */
  328                 BO_LOCK(bo);
  329                 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
  330         }
  331         if (waitfor != MNT_WAIT) {
  332                 BO_UNLOCK(bo);
  333                 if ((flags & NO_INO_UPDT) != 0)
  334                         return (0);
  335                 else
  336                         return (ffs_update(vp, 0));
  337         }
  338         /* Drain IO to see if we're done. */
  339         bufobj_wwait(bo, 0, 0);
  340         /*
  341          * Block devices associated with filesystems may have new I/O
  342          * requests posted for them even if the vnode is locked, so no
  343          * amount of trying will get them clean.  We make several passes
  344          * as a best effort.
  345          *
  346          * Regular files may need multiple passes to flush all dependency
  347          * work as it is possible that we must write once per indirect
  348          * level, once for the leaf, and once for the inode and each of
  349          * these will be done with one sync and one async pass.
  350          */
  351         if (bo->bo_dirty.bv_cnt > 0) {
  352                 if ((flags & DATA_ONLY) == 0) {
  353                         still_dirty = true;
  354                 } else {
  355                         /*
  356                          * For data-only sync, dirty indirect buffers
  357                          * are ignored.
  358                          */
  359                         still_dirty = false;
  360                         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
  361                                 if (bp->b_lblkno > -UFS_NDADDR) {
  362                                         still_dirty = true;
  363                                         break;
  364                                 }
  365                         }
  366                 }
  367 
  368                 if (still_dirty) {
  369                         /* Write the inode after sync passes to flush deps. */
  370                         if (wait && DOINGSOFTDEP(vp) &&
  371                             (flags & NO_INO_UPDT) == 0) {
  372                                 BO_UNLOCK(bo);
  373                                 ffs_update(vp, 1);
  374                                 BO_LOCK(bo);
  375                         }
  376                         /* switch between sync/async. */
  377                         wait = !wait;
  378                         if (wait || ++passes < UFS_NIADDR + 2)
  379                                 goto loop;
  380                 }
  381         }
  382         BO_UNLOCK(bo);
  383         error = 0;
  384         if ((flags & DATA_ONLY) == 0) {
  385                 if ((flags & NO_INO_UPDT) == 0)
  386                         error = ffs_update(vp, 1);
  387                 if (DOINGSUJ(vp))
  388                         softdep_journal_fsync(VTOI(vp));
  389         }
  390         return (error);
  391 }
  392 
  393 static int
  394 ffs_fdatasync(struct vop_fdatasync_args *ap)
  395 {
  396 
  397         return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY));
  398 }
  399 
  400 static int
  401 ffs_lock(ap)
  402         struct vop_lock1_args /* {
  403                 struct vnode *a_vp;
  404                 int a_flags;
  405                 struct thread *a_td;
  406                 char *file;
  407                 int line;
  408         } */ *ap;
  409 {
  410 #ifndef NO_FFS_SNAPSHOT
  411         struct vnode *vp;
  412         int flags;
  413         struct lock *lkp;
  414         int result;
  415 
  416         switch (ap->a_flags & LK_TYPE_MASK) {
  417         case LK_SHARED:
  418         case LK_UPGRADE:
  419         case LK_EXCLUSIVE:
  420                 vp = ap->a_vp;
  421                 flags = ap->a_flags;
  422                 for (;;) {
  423 #ifdef DEBUG_VFS_LOCKS
  424                         KASSERT(vp->v_holdcnt != 0,
  425                             ("ffs_lock %p: zero hold count", vp));
  426 #endif
  427                         lkp = vp->v_vnlock;
  428                         result = _lockmgr_args(lkp, flags, VI_MTX(vp),
  429                             LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
  430                             ap->a_file, ap->a_line);
  431                         if (lkp == vp->v_vnlock || result != 0)
  432                                 break;
  433                         /*
  434                          * Apparent success, except that the vnode
  435                          * mutated between snapshot file vnode and
  436                          * regular file vnode while this process
  437                          * slept.  The lock currently held is not the
  438                          * right lock.  Release it, and try to get the
  439                          * new lock.
  440                          */
  441                         (void) _lockmgr_args(lkp, LK_RELEASE, NULL,
  442                             LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
  443                             ap->a_file, ap->a_line);
  444                         if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
  445                             (LK_INTERLOCK | LK_NOWAIT))
  446                                 return (EBUSY);
  447                         if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
  448                                 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
  449                         flags &= ~LK_INTERLOCK;
  450                 }
  451                 break;
  452         default:
  453                 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
  454         }
  455         return (result);
  456 #else
  457         return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
  458 #endif
  459 }
  460 
  461 static int
  462 ffs_read_hole(struct uio *uio, long xfersize, long *size)
  463 {
  464         ssize_t saved_resid, tlen;
  465         int error;
  466 
  467         while (xfersize > 0) {
  468                 tlen = min(xfersize, ZERO_REGION_SIZE);
  469                 saved_resid = uio->uio_resid;
  470                 error = vn_io_fault_uiomove(__DECONST(void *, zero_region),
  471                     tlen, uio);
  472                 if (error != 0)
  473                         return (error);
  474                 tlen = saved_resid - uio->uio_resid;
  475                 xfersize -= tlen;
  476                 *size -= tlen;
  477         }
  478         return (0);
  479 }
  480 
  481 /*
  482  * Vnode op for reading.
  483  */
  484 static int
  485 ffs_read(ap)
  486         struct vop_read_args /* {
  487                 struct vnode *a_vp;
  488                 struct uio *a_uio;
  489                 int a_ioflag;
  490                 struct ucred *a_cred;
  491         } */ *ap;
  492 {
  493         struct vnode *vp;
  494         struct inode *ip;
  495         struct uio *uio;
  496         struct fs *fs;
  497         struct buf *bp;
  498         ufs_lbn_t lbn, nextlbn;
  499         off_t bytesinfile;
  500         long size, xfersize, blkoffset;
  501         ssize_t orig_resid;
  502         int bflag, error, ioflag, seqcount;
  503 
  504         vp = ap->a_vp;
  505         uio = ap->a_uio;
  506         ioflag = ap->a_ioflag;
  507         if (ap->a_ioflag & IO_EXT)
  508 #ifdef notyet
  509                 return (ffs_extread(vp, uio, ioflag));
  510 #else
  511                 panic("ffs_read+IO_EXT");
  512 #endif
  513 #ifdef DIRECTIO
  514         if ((ioflag & IO_DIRECT) != 0) {
  515                 int workdone;
  516 
  517                 error = ffs_rawread(vp, uio, &workdone);
  518                 if (error != 0 || workdone != 0)
  519                         return error;
  520         }
  521 #endif
  522 
  523         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  524         ip = VTOI(vp);
  525 
  526 #ifdef INVARIANTS
  527         if (uio->uio_rw != UIO_READ)
  528                 panic("ffs_read: mode");
  529 
  530         if (vp->v_type == VLNK) {
  531                 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
  532                         panic("ffs_read: short symlink");
  533         } else if (vp->v_type != VREG && vp->v_type != VDIR)
  534                 panic("ffs_read: type %d",  vp->v_type);
  535 #endif
  536         orig_resid = uio->uio_resid;
  537         KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
  538         if (orig_resid == 0)
  539                 return (0);
  540         KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
  541         fs = ITOFS(ip);
  542         if (uio->uio_offset < ip->i_size &&
  543             uio->uio_offset >= fs->fs_maxfilesize)
  544                 return (EOVERFLOW);
  545 
  546         bflag = GB_UNMAPPED | (uio->uio_segflg == UIO_NOCOPY ? 0 : GB_NOSPARSE);
  547         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  548                 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
  549                         break;
  550                 lbn = lblkno(fs, uio->uio_offset);
  551                 nextlbn = lbn + 1;
  552 
  553                 /*
  554                  * size of buffer.  The buffer representing the
  555                  * end of the file is rounded up to the size of
  556                  * the block type ( fragment or full block,
  557                  * depending ).
  558                  */
  559                 size = blksize(fs, ip, lbn);
  560                 blkoffset = blkoff(fs, uio->uio_offset);
  561 
  562                 /*
  563                  * The amount we want to transfer in this iteration is
  564                  * one FS block less the amount of the data before
  565                  * our startpoint (duh!)
  566                  */
  567                 xfersize = fs->fs_bsize - blkoffset;
  568 
  569                 /*
  570                  * But if we actually want less than the block,
  571                  * or the file doesn't have a whole block more of data,
  572                  * then use the lesser number.
  573                  */
  574                 if (uio->uio_resid < xfersize)
  575                         xfersize = uio->uio_resid;
  576                 if (bytesinfile < xfersize)
  577                         xfersize = bytesinfile;
  578 
  579                 if (lblktosize(fs, nextlbn) >= ip->i_size) {
  580                         /*
  581                          * Don't do readahead if this is the end of the file.
  582                          */
  583                         error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
  584                 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
  585                         /*
  586                          * Otherwise if we are allowed to cluster,
  587                          * grab as much as we can.
  588                          *
  589                          * XXX  This may not be a win if we are not
  590                          * doing sequential access.
  591                          */
  592                         error = cluster_read(vp, ip->i_size, lbn,
  593                             size, NOCRED, blkoffset + uio->uio_resid,
  594                             seqcount, bflag, &bp);
  595                 } else if (seqcount > 1) {
  596                         /*
  597                          * If we are NOT allowed to cluster, then
  598                          * if we appear to be acting sequentially,
  599                          * fire off a request for a readahead
  600                          * as well as a read. Note that the 4th and 5th
  601                          * arguments point to arrays of the size specified in
  602                          * the 6th argument.
  603                          */
  604                         u_int nextsize = blksize(fs, ip, nextlbn);
  605                         error = breadn_flags(vp, lbn, size, &nextlbn,
  606                             &nextsize, 1, NOCRED, bflag, NULL, &bp);
  607                 } else {
  608                         /*
  609                          * Failing all of the above, just read what the
  610                          * user asked for. Interestingly, the same as
  611                          * the first option above.
  612                          */
  613                         error = bread_gb(vp, lbn, size, NOCRED, bflag, &bp);
  614                 }
  615                 if (error == EJUSTRETURN) {
  616                         error = ffs_read_hole(uio, xfersize, &size);
  617                         if (error == 0)
  618                                 continue;
  619                 }
  620                 if (error != 0) {
  621                         brelse(bp);
  622                         bp = NULL;
  623                         break;
  624                 }
  625 
  626                 /*
  627                  * We should only get non-zero b_resid when an I/O error
  628                  * has occurred, which should cause us to break above.
  629                  * However, if the short read did not cause an error,
  630                  * then we want to ensure that we do not uiomove bad
  631                  * or uninitialized data.
  632                  */
  633                 size -= bp->b_resid;
  634                 if (size < xfersize) {
  635                         if (size == 0)
  636                                 break;
  637                         xfersize = size;
  638                 }
  639 
  640                 if (buf_mapped(bp)) {
  641                         error = vn_io_fault_uiomove((char *)bp->b_data +
  642                             blkoffset, (int)xfersize, uio);
  643                 } else {
  644                         error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
  645                             (int)xfersize, uio);
  646                 }
  647                 if (error)
  648                         break;
  649 
  650                 vfs_bio_brelse(bp, ioflag);
  651         }
  652 
  653         /*
  654          * This can only happen in the case of an error
  655          * because the loop above resets bp to NULL on each iteration
  656          * and on normal completion has not set a new value into it.
  657          * so it must have come from a 'break' statement
  658          */
  659         if (bp != NULL)
  660                 vfs_bio_brelse(bp, ioflag);
  661 
  662         if ((error == 0 || uio->uio_resid != orig_resid) &&
  663             (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0 &&
  664             (ip->i_flag & IN_ACCESS) == 0) {
  665                 VI_LOCK(vp);
  666                 ip->i_flag |= IN_ACCESS;
  667                 VI_UNLOCK(vp);
  668         }
  669         return (error);
  670 }
  671 
  672 /*
  673  * Vnode op for writing.
  674  */
  675 static int
  676 ffs_write(ap)
  677         struct vop_write_args /* {
  678                 struct vnode *a_vp;
  679                 struct uio *a_uio;
  680                 int a_ioflag;
  681                 struct ucred *a_cred;
  682         } */ *ap;
  683 {
  684         struct vnode *vp;
  685         struct uio *uio;
  686         struct inode *ip;
  687         struct fs *fs;
  688         struct buf *bp;
  689         ufs_lbn_t lbn;
  690         off_t osize;
  691         ssize_t resid;
  692         int seqcount;
  693         int blkoffset, error, flags, ioflag, size, xfersize;
  694 
  695         vp = ap->a_vp;
  696         uio = ap->a_uio;
  697         ioflag = ap->a_ioflag;
  698         if (ap->a_ioflag & IO_EXT)
  699 #ifdef notyet
  700                 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
  701 #else
  702                 panic("ffs_write+IO_EXT");
  703 #endif
  704 
  705         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  706         ip = VTOI(vp);
  707 
  708 #ifdef INVARIANTS
  709         if (uio->uio_rw != UIO_WRITE)
  710                 panic("ffs_write: mode");
  711 #endif
  712 
  713         switch (vp->v_type) {
  714         case VREG:
  715                 if (ioflag & IO_APPEND)
  716                         uio->uio_offset = ip->i_size;
  717                 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
  718                         return (EPERM);
  719                 /* FALLTHROUGH */
  720         case VLNK:
  721                 break;
  722         case VDIR:
  723                 panic("ffs_write: dir write");
  724                 break;
  725         default:
  726                 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
  727                         (int)uio->uio_offset,
  728                         (int)uio->uio_resid
  729                 );
  730         }
  731 
  732         KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
  733         KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
  734         fs = ITOFS(ip);
  735         if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
  736                 return (EFBIG);
  737         /*
  738          * Maybe this should be above the vnode op call, but so long as
  739          * file servers have no limits, I don't think it matters.
  740          */
  741         if (vn_rlimit_fsize(vp, uio, uio->uio_td))
  742                 return (EFBIG);
  743 
  744         resid = uio->uio_resid;
  745         osize = ip->i_size;
  746         if (seqcount > BA_SEQMAX)
  747                 flags = BA_SEQMAX << BA_SEQSHIFT;
  748         else
  749                 flags = seqcount << BA_SEQSHIFT;
  750         if (ioflag & IO_SYNC)
  751                 flags |= IO_SYNC;
  752         flags |= BA_UNMAPPED;
  753 
  754         for (error = 0; uio->uio_resid > 0;) {
  755                 lbn = lblkno(fs, uio->uio_offset);
  756                 blkoffset = blkoff(fs, uio->uio_offset);
  757                 xfersize = fs->fs_bsize - blkoffset;
  758                 if (uio->uio_resid < xfersize)
  759                         xfersize = uio->uio_resid;
  760                 if (uio->uio_offset + xfersize > ip->i_size)
  761                         vnode_pager_setsize(vp, uio->uio_offset + xfersize);
  762 
  763                 /*
  764                  * We must perform a read-before-write if the transfer size
  765                  * does not cover the entire buffer.
  766                  */
  767                 if (fs->fs_bsize > xfersize)
  768                         flags |= BA_CLRBUF;
  769                 else
  770                         flags &= ~BA_CLRBUF;
  771 /* XXX is uio->uio_offset the right thing here? */
  772                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
  773                     ap->a_cred, flags, &bp);
  774                 if (error != 0) {
  775                         vnode_pager_setsize(vp, ip->i_size);
  776                         break;
  777                 }
  778                 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
  779                         bp->b_flags |= B_NOCACHE;
  780 
  781                 if (uio->uio_offset + xfersize > ip->i_size) {
  782                         ip->i_size = uio->uio_offset + xfersize;
  783                         DIP_SET(ip, i_size, ip->i_size);
  784                 }
  785 
  786                 size = blksize(fs, ip, lbn) - bp->b_resid;
  787                 if (size < xfersize)
  788                         xfersize = size;
  789 
  790                 if (buf_mapped(bp)) {
  791                         error = vn_io_fault_uiomove((char *)bp->b_data +
  792                             blkoffset, (int)xfersize, uio);
  793                 } else {
  794                         error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
  795                             (int)xfersize, uio);
  796                 }
  797                 /*
  798                  * If the buffer is not already filled and we encounter an
  799                  * error while trying to fill it, we have to clear out any
  800                  * garbage data from the pages instantiated for the buffer.
  801                  * If we do not, a failed uiomove() during a write can leave
  802                  * the prior contents of the pages exposed to a userland mmap.
  803                  *
  804                  * Note that we need only clear buffers with a transfer size
  805                  * equal to the block size because buffers with a shorter
  806                  * transfer size were cleared above by the call to UFS_BALLOC()
  807                  * with the BA_CLRBUF flag set.
  808                  *
  809                  * If the source region for uiomove identically mmaps the
  810                  * buffer, uiomove() performed the NOP copy, and the buffer
  811                  * content remains valid because the page fault handler
  812                  * validated the pages.
  813                  */
  814                 if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
  815                     fs->fs_bsize == xfersize)
  816                         vfs_bio_clrbuf(bp);
  817 
  818                 vfs_bio_set_flags(bp, ioflag);
  819 
  820                 /*
  821                  * If IO_SYNC each buffer is written synchronously.  Otherwise
  822                  * if we have a severe page deficiency write the buffer
  823                  * asynchronously.  Otherwise try to cluster, and if that
  824                  * doesn't do it then either do an async write (if O_DIRECT),
  825                  * or a delayed write (if not).
  826                  */
  827                 if (ioflag & IO_SYNC) {
  828                         (void)bwrite(bp);
  829                 } else if (vm_page_count_severe() ||
  830                             buf_dirty_count_severe() ||
  831                             (ioflag & IO_ASYNC)) {
  832                         bp->b_flags |= B_CLUSTEROK;
  833                         bawrite(bp);
  834                 } else if (xfersize + blkoffset == fs->fs_bsize) {
  835                         if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
  836                                 bp->b_flags |= B_CLUSTEROK;
  837                                 cluster_write(vp, bp, ip->i_size, seqcount,
  838                                     GB_UNMAPPED);
  839                         } else {
  840                                 bawrite(bp);
  841                         }
  842                 } else if (ioflag & IO_DIRECT) {
  843                         bp->b_flags |= B_CLUSTEROK;
  844                         bawrite(bp);
  845                 } else {
  846                         bp->b_flags |= B_CLUSTEROK;
  847                         bdwrite(bp);
  848                 }
  849                 if (error || xfersize == 0)
  850                         break;
  851                 ip->i_flag |= IN_CHANGE | IN_UPDATE;
  852         }
  853         /*
  854          * If we successfully wrote any data, and we are not the superuser
  855          * we clear the setuid and setgid bits as a precaution against
  856          * tampering.
  857          */
  858         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
  859             ap->a_cred) {
  860                 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
  861                         ip->i_mode &= ~(ISUID | ISGID);
  862                         DIP_SET(ip, i_mode, ip->i_mode);
  863                 }
  864         }
  865         if (error) {
  866                 if (ioflag & IO_UNIT) {
  867                         (void)ffs_truncate(vp, osize,
  868                             IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
  869                         uio->uio_offset -= resid - uio->uio_resid;
  870                         uio->uio_resid = resid;
  871                 }
  872         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
  873                 error = ffs_update(vp, 1);
  874         return (error);
  875 }
  876 
  877 /*
  878  * Extended attribute area reading.
  879  */
  880 static int
  881 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
  882 {
  883         struct inode *ip;
  884         struct ufs2_dinode *dp;
  885         struct fs *fs;
  886         struct buf *bp;
  887         ufs_lbn_t lbn, nextlbn;
  888         off_t bytesinfile;
  889         long size, xfersize, blkoffset;
  890         ssize_t orig_resid;
  891         int error;
  892 
  893         ip = VTOI(vp);
  894         fs = ITOFS(ip);
  895         dp = ip->i_din2;
  896 
  897 #ifdef INVARIANTS
  898         if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
  899                 panic("ffs_extread: mode");
  900 
  901 #endif
  902         orig_resid = uio->uio_resid;
  903         KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
  904         if (orig_resid == 0)
  905                 return (0);
  906         KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
  907 
  908         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  909                 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
  910                         break;
  911                 lbn = lblkno(fs, uio->uio_offset);
  912                 nextlbn = lbn + 1;
  913 
  914                 /*
  915                  * size of buffer.  The buffer representing the
  916                  * end of the file is rounded up to the size of
  917                  * the block type ( fragment or full block,
  918                  * depending ).
  919                  */
  920                 size = sblksize(fs, dp->di_extsize, lbn);
  921                 blkoffset = blkoff(fs, uio->uio_offset);
  922 
  923                 /*
  924                  * The amount we want to transfer in this iteration is
  925                  * one FS block less the amount of the data before
  926                  * our startpoint (duh!)
  927                  */
  928                 xfersize = fs->fs_bsize - blkoffset;
  929 
  930                 /*
  931                  * But if we actually want less than the block,
  932                  * or the file doesn't have a whole block more of data,
  933                  * then use the lesser number.
  934                  */
  935                 if (uio->uio_resid < xfersize)
  936                         xfersize = uio->uio_resid;
  937                 if (bytesinfile < xfersize)
  938                         xfersize = bytesinfile;
  939 
  940                 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
  941                         /*
  942                          * Don't do readahead if this is the end of the info.
  943                          */
  944                         error = bread(vp, -1 - lbn, size, NOCRED, &bp);
  945                 } else {
  946                         /*
  947                          * If we have a second block, then
  948                          * fire off a request for a readahead
  949                          * as well as a read. Note that the 4th and 5th
  950                          * arguments point to arrays of the size specified in
  951                          * the 6th argument.
  952                          */
  953                         u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
  954 
  955                         nextlbn = -1 - nextlbn;
  956                         error = breadn(vp, -1 - lbn,
  957                             size, &nextlbn, &nextsize, 1, NOCRED, &bp);
  958                 }
  959                 if (error) {
  960                         brelse(bp);
  961                         bp = NULL;
  962                         break;
  963                 }
  964 
  965                 /*
  966                  * We should only get non-zero b_resid when an I/O error
  967                  * has occurred, which should cause us to break above.
  968                  * However, if the short read did not cause an error,
  969                  * then we want to ensure that we do not uiomove bad
  970                  * or uninitialized data.
  971                  */
  972                 size -= bp->b_resid;
  973                 if (size < xfersize) {
  974                         if (size == 0)
  975                                 break;
  976                         xfersize = size;
  977                 }
  978 
  979                 error = uiomove((char *)bp->b_data + blkoffset,
  980                                         (int)xfersize, uio);
  981                 if (error)
  982                         break;
  983                 vfs_bio_brelse(bp, ioflag);
  984         }
  985 
  986         /*
  987          * This can only happen in the case of an error
  988          * because the loop above resets bp to NULL on each iteration
  989          * and on normal completion has not set a new value into it.
  990          * so it must have come from a 'break' statement
  991          */
  992         if (bp != NULL)
  993                 vfs_bio_brelse(bp, ioflag);
  994         return (error);
  995 }
  996 
  997 /*
  998  * Extended attribute area writing.
  999  */
 1000 static int
 1001 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
 1002 {
 1003         struct inode *ip;
 1004         struct ufs2_dinode *dp;
 1005         struct fs *fs;
 1006         struct buf *bp;
 1007         ufs_lbn_t lbn;
 1008         off_t osize;
 1009         ssize_t resid;
 1010         int blkoffset, error, flags, size, xfersize;
 1011 
 1012         ip = VTOI(vp);
 1013         fs = ITOFS(ip);
 1014         dp = ip->i_din2;
 1015 
 1016 #ifdef INVARIANTS
 1017         if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
 1018                 panic("ffs_extwrite: mode");
 1019 #endif
 1020 
 1021         if (ioflag & IO_APPEND)
 1022                 uio->uio_offset = dp->di_extsize;
 1023         KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
 1024         KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
 1025         if ((uoff_t)uio->uio_offset + uio->uio_resid >
 1026             UFS_NXADDR * fs->fs_bsize)
 1027                 return (EFBIG);
 1028 
 1029         resid = uio->uio_resid;
 1030         osize = dp->di_extsize;
 1031         flags = IO_EXT;
 1032         if (ioflag & IO_SYNC)
 1033                 flags |= IO_SYNC;
 1034 
 1035         for (error = 0; uio->uio_resid > 0;) {
 1036                 lbn = lblkno(fs, uio->uio_offset);
 1037                 blkoffset = blkoff(fs, uio->uio_offset);
 1038                 xfersize = fs->fs_bsize - blkoffset;
 1039                 if (uio->uio_resid < xfersize)
 1040                         xfersize = uio->uio_resid;
 1041 
 1042                 /*
 1043                  * We must perform a read-before-write if the transfer size
 1044                  * does not cover the entire buffer.
 1045                  */
 1046                 if (fs->fs_bsize > xfersize)
 1047                         flags |= BA_CLRBUF;
 1048                 else
 1049                         flags &= ~BA_CLRBUF;
 1050                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
 1051                     ucred, flags, &bp);
 1052                 if (error != 0)
 1053                         break;
 1054                 /*
 1055                  * If the buffer is not valid we have to clear out any
 1056                  * garbage data from the pages instantiated for the buffer.
 1057                  * If we do not, a failed uiomove() during a write can leave
 1058                  * the prior contents of the pages exposed to a userland
 1059                  * mmap().  XXX deal with uiomove() errors a better way.
 1060                  */
 1061                 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
 1062                         vfs_bio_clrbuf(bp);
 1063 
 1064                 if (uio->uio_offset + xfersize > dp->di_extsize)
 1065                         dp->di_extsize = uio->uio_offset + xfersize;
 1066 
 1067                 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
 1068                 if (size < xfersize)
 1069                         xfersize = size;
 1070 
 1071                 error =
 1072                     uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
 1073 
 1074                 vfs_bio_set_flags(bp, ioflag);
 1075 
 1076                 /*
 1077                  * If IO_SYNC each buffer is written synchronously.  Otherwise
 1078                  * if we have a severe page deficiency write the buffer
 1079                  * asynchronously.  Otherwise try to cluster, and if that
 1080                  * doesn't do it then either do an async write (if O_DIRECT),
 1081                  * or a delayed write (if not).
 1082                  */
 1083                 if (ioflag & IO_SYNC) {
 1084                         (void)bwrite(bp);
 1085                 } else if (vm_page_count_severe() ||
 1086                             buf_dirty_count_severe() ||
 1087                             xfersize + blkoffset == fs->fs_bsize ||
 1088                             (ioflag & (IO_ASYNC | IO_DIRECT)))
 1089                         bawrite(bp);
 1090                 else
 1091                         bdwrite(bp);
 1092                 if (error || xfersize == 0)
 1093                         break;
 1094                 ip->i_flag |= IN_CHANGE;
 1095         }
 1096         /*
 1097          * If we successfully wrote any data, and we are not the superuser
 1098          * we clear the setuid and setgid bits as a precaution against
 1099          * tampering.
 1100          */
 1101         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
 1102                 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
 1103                         ip->i_mode &= ~(ISUID | ISGID);
 1104                         dp->di_mode = ip->i_mode;
 1105                 }
 1106         }
 1107         if (error) {
 1108                 if (ioflag & IO_UNIT) {
 1109                         (void)ffs_truncate(vp, osize,
 1110                             IO_EXT | (ioflag&IO_SYNC), ucred);
 1111                         uio->uio_offset -= resid - uio->uio_resid;
 1112                         uio->uio_resid = resid;
 1113                 }
 1114         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
 1115                 error = ffs_update(vp, 1);
 1116         return (error);
 1117 }
 1118 
 1119 
 1120 /*
 1121  * Vnode operating to retrieve a named extended attribute.
 1122  *
 1123  * Locate a particular EA (nspace:name) in the area (ptr:length), and return
 1124  * the length of the EA, and possibly the pointer to the entry and to the data.
 1125  */
 1126 static int
 1127 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name,
 1128     struct extattr **eapp, u_char **eac)
 1129 {
 1130         struct extattr *eap, *eaend;
 1131         size_t nlen;
 1132 
 1133         nlen = strlen(name);
 1134         KASSERT(ALIGNED_TO(ptr, struct extattr), ("unaligned"));
 1135         eap = (struct extattr *)ptr;
 1136         eaend = (struct extattr *)(ptr + length);
 1137         for (; eap < eaend; eap = EXTATTR_NEXT(eap)) {
 1138                 /* make sure this entry is complete */
 1139                 if (EXTATTR_NEXT(eap) > eaend)
 1140                         break;
 1141                 if (eap->ea_namespace != nspace || eap->ea_namelength != nlen
 1142                     || memcmp(eap->ea_name, name, nlen) != 0)
 1143                         continue;
 1144                 if (eapp != NULL)
 1145                         *eapp = eap;
 1146                 if (eac != NULL)
 1147                         *eac = EXTATTR_CONTENT(eap);
 1148                 return (EXTATTR_CONTENT_SIZE(eap));
 1149         }
 1150         return (-1);
 1151 }
 1152 
 1153 static int
 1154 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
 1155 {
 1156         struct inode *ip;
 1157         struct ufs2_dinode *dp;
 1158         struct fs *fs;
 1159         struct uio luio;
 1160         struct iovec liovec;
 1161         u_int easize;
 1162         int error;
 1163         u_char *eae;
 1164 
 1165         ip = VTOI(vp);
 1166         fs = ITOFS(ip);
 1167         dp = ip->i_din2;
 1168         easize = dp->di_extsize;
 1169         if ((uoff_t)easize + extra > UFS_NXADDR * fs->fs_bsize)
 1170                 return (EFBIG);
 1171 
 1172         eae = malloc(easize + extra, M_TEMP, M_WAITOK);
 1173 
 1174         liovec.iov_base = eae;
 1175         liovec.iov_len = easize;
 1176         luio.uio_iov = &liovec;
 1177         luio.uio_iovcnt = 1;
 1178         luio.uio_offset = 0;
 1179         luio.uio_resid = easize;
 1180         luio.uio_segflg = UIO_SYSSPACE;
 1181         luio.uio_rw = UIO_READ;
 1182         luio.uio_td = td;
 1183 
 1184         error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
 1185         if (error) {
 1186                 free(eae, M_TEMP);
 1187                 return(error);
 1188         }
 1189         *p = eae;
 1190         return (0);
 1191 }
 1192 
 1193 static void
 1194 ffs_lock_ea(struct vnode *vp)
 1195 {
 1196         struct inode *ip;
 1197 
 1198         ip = VTOI(vp);
 1199         VI_LOCK(vp);
 1200         while (ip->i_flag & IN_EA_LOCKED) {
 1201                 ip->i_flag |= IN_EA_LOCKWAIT;
 1202                 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
 1203                     0);
 1204         }
 1205         ip->i_flag |= IN_EA_LOCKED;
 1206         VI_UNLOCK(vp);
 1207 }
 1208 
 1209 static void
 1210 ffs_unlock_ea(struct vnode *vp)
 1211 {
 1212         struct inode *ip;
 1213 
 1214         ip = VTOI(vp);
 1215         VI_LOCK(vp);
 1216         if (ip->i_flag & IN_EA_LOCKWAIT)
 1217                 wakeup(&ip->i_ea_refs);
 1218         ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
 1219         VI_UNLOCK(vp);
 1220 }
 1221 
 1222 static int
 1223 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
 1224 {
 1225         struct inode *ip;
 1226         struct ufs2_dinode *dp;
 1227         int error;
 1228 
 1229         ip = VTOI(vp);
 1230 
 1231         ffs_lock_ea(vp);
 1232         if (ip->i_ea_area != NULL) {
 1233                 ip->i_ea_refs++;
 1234                 ffs_unlock_ea(vp);
 1235                 return (0);
 1236         }
 1237         dp = ip->i_din2;
 1238         error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
 1239         if (error) {
 1240                 ffs_unlock_ea(vp);
 1241                 return (error);
 1242         }
 1243         ip->i_ea_len = dp->di_extsize;
 1244         ip->i_ea_error = 0;
 1245         ip->i_ea_refs++;
 1246         ffs_unlock_ea(vp);
 1247         return (0);
 1248 }
 1249 
 1250 /*
 1251  * Vnode extattr transaction commit/abort
 1252  */
 1253 static int
 1254 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
 1255 {
 1256         struct inode *ip;
 1257         struct uio luio;
 1258         struct iovec liovec;
 1259         int error;
 1260         struct ufs2_dinode *dp;
 1261 
 1262         ip = VTOI(vp);
 1263 
 1264         ffs_lock_ea(vp);
 1265         if (ip->i_ea_area == NULL) {
 1266                 ffs_unlock_ea(vp);
 1267                 return (EINVAL);
 1268         }
 1269         dp = ip->i_din2;
 1270         error = ip->i_ea_error;
 1271         if (commit && error == 0) {
 1272                 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
 1273                 if (cred == NOCRED)
 1274                         cred =  vp->v_mount->mnt_cred;
 1275                 liovec.iov_base = ip->i_ea_area;
 1276                 liovec.iov_len = ip->i_ea_len;
 1277                 luio.uio_iov = &liovec;
 1278                 luio.uio_iovcnt = 1;
 1279                 luio.uio_offset = 0;
 1280                 luio.uio_resid = ip->i_ea_len;
 1281                 luio.uio_segflg = UIO_SYSSPACE;
 1282                 luio.uio_rw = UIO_WRITE;
 1283                 luio.uio_td = td;
 1284                 /* XXX: I'm not happy about truncating to zero size */
 1285                 if (ip->i_ea_len < dp->di_extsize)
 1286                         error = ffs_truncate(vp, 0, IO_EXT, cred);
 1287                 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
 1288         }
 1289         if (--ip->i_ea_refs == 0) {
 1290                 free(ip->i_ea_area, M_TEMP);
 1291                 ip->i_ea_area = NULL;
 1292                 ip->i_ea_len = 0;
 1293                 ip->i_ea_error = 0;
 1294         }
 1295         ffs_unlock_ea(vp);
 1296         return (error);
 1297 }
 1298 
 1299 /*
 1300  * Vnode extattr strategy routine for fifos.
 1301  *
 1302  * We need to check for a read or write of the external attributes.
 1303  * Otherwise we just fall through and do the usual thing.
 1304  */
 1305 static int
 1306 ffsext_strategy(struct vop_strategy_args *ap)
 1307 /*
 1308 struct vop_strategy_args {
 1309         struct vnodeop_desc *a_desc;
 1310         struct vnode *a_vp;
 1311         struct buf *a_bp;
 1312 };
 1313 */
 1314 {
 1315         struct vnode *vp;
 1316         daddr_t lbn;
 1317 
 1318         vp = ap->a_vp;
 1319         lbn = ap->a_bp->b_lblkno;
 1320         if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -UFS_NXADDR)
 1321                 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
 1322         if (vp->v_type == VFIFO)
 1323                 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
 1324         panic("spec nodes went here");
 1325 }
 1326 
 1327 /*
 1328  * Vnode extattr transaction commit/abort
 1329  */
 1330 static int
 1331 ffs_openextattr(struct vop_openextattr_args *ap)
 1332 /*
 1333 struct vop_openextattr_args {
 1334         struct vnodeop_desc *a_desc;
 1335         struct vnode *a_vp;
 1336         IN struct ucred *a_cred;
 1337         IN struct thread *a_td;
 1338 };
 1339 */
 1340 {
 1341 
 1342         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1343                 return (EOPNOTSUPP);
 1344 
 1345         return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
 1346 }
 1347 
 1348 
 1349 /*
 1350  * Vnode extattr transaction commit/abort
 1351  */
 1352 static int
 1353 ffs_closeextattr(struct vop_closeextattr_args *ap)
 1354 /*
 1355 struct vop_closeextattr_args {
 1356         struct vnodeop_desc *a_desc;
 1357         struct vnode *a_vp;
 1358         int a_commit;
 1359         IN struct ucred *a_cred;
 1360         IN struct thread *a_td;
 1361 };
 1362 */
 1363 {
 1364 
 1365         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1366                 return (EOPNOTSUPP);
 1367 
 1368         if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
 1369                 return (EROFS);
 1370 
 1371         return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
 1372 }
 1373 
 1374 /*
 1375  * Vnode operation to remove a named attribute.
 1376  */
 1377 static int
 1378 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
 1379 /*
 1380 vop_deleteextattr {
 1381         IN struct vnode *a_vp;
 1382         IN int a_attrnamespace;
 1383         IN const char *a_name;
 1384         IN struct ucred *a_cred;
 1385         IN struct thread *a_td;
 1386 };
 1387 */
 1388 {
 1389         struct inode *ip;
 1390         struct extattr *eap;
 1391         uint32_t ul;
 1392         int olen, error, i, easize;
 1393         u_char *eae;
 1394         void *tmp;
 1395 
 1396         ip = VTOI(ap->a_vp);
 1397 
 1398         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1399                 return (EOPNOTSUPP);
 1400 
 1401         if (strlen(ap->a_name) == 0)
 1402                 return (EINVAL);
 1403 
 1404         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
 1405                 return (EROFS);
 1406 
 1407         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1408             ap->a_cred, ap->a_td, VWRITE);
 1409         if (error) {
 1410 
 1411                 /*
 1412                  * ffs_lock_ea is not needed there, because the vnode
 1413                  * must be exclusively locked.
 1414                  */
 1415                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1416                         ip->i_ea_error = error;
 1417                 return (error);
 1418         }
 1419 
 1420         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1421         if (error)
 1422                 return (error);
 1423 
 1424         /* CEM: delete could be done in-place instead */
 1425         eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
 1426         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1427         easize = ip->i_ea_len;
 1428 
 1429         olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1430             &eap, NULL);
 1431         if (olen == -1) {
 1432                 /* delete but nonexistent */
 1433                 free(eae, M_TEMP);
 1434                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1435                 return (ENOATTR);
 1436         }
 1437         ul = eap->ea_length;
 1438         i = (u_char *)EXTATTR_NEXT(eap) - eae;
 1439         bcopy(EXTATTR_NEXT(eap), eap, easize - i);
 1440         easize -= ul;
 1441 
 1442         tmp = ip->i_ea_area;
 1443         ip->i_ea_area = eae;
 1444         ip->i_ea_len = easize;
 1445         free(tmp, M_TEMP);
 1446         error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
 1447         return (error);
 1448 }
 1449 
 1450 /*
 1451  * Vnode operation to retrieve a named extended attribute.
 1452  */
 1453 static int
 1454 ffs_getextattr(struct vop_getextattr_args *ap)
 1455 /*
 1456 vop_getextattr {
 1457         IN struct vnode *a_vp;
 1458         IN int a_attrnamespace;
 1459         IN const char *a_name;
 1460         INOUT struct uio *a_uio;
 1461         OUT size_t *a_size;
 1462         IN struct ucred *a_cred;
 1463         IN struct thread *a_td;
 1464 };
 1465 */
 1466 {
 1467         struct inode *ip;
 1468         u_char *eae, *p;
 1469         unsigned easize;
 1470         int error, ealen;
 1471 
 1472         ip = VTOI(ap->a_vp);
 1473 
 1474         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1475                 return (EOPNOTSUPP);
 1476 
 1477         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1478             ap->a_cred, ap->a_td, VREAD);
 1479         if (error)
 1480                 return (error);
 1481 
 1482         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1483         if (error)
 1484                 return (error);
 1485 
 1486         eae = ip->i_ea_area;
 1487         easize = ip->i_ea_len;
 1488 
 1489         ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1490             NULL, &p);
 1491         if (ealen >= 0) {
 1492                 error = 0;
 1493                 if (ap->a_size != NULL)
 1494                         *ap->a_size = ealen;
 1495                 else if (ap->a_uio != NULL)
 1496                         error = uiomove(p, ealen, ap->a_uio);
 1497         } else
 1498                 error = ENOATTR;
 1499 
 1500         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1501         return (error);
 1502 }
 1503 
 1504 /*
 1505  * Vnode operation to retrieve extended attributes on a vnode.
 1506  */
 1507 static int
 1508 ffs_listextattr(struct vop_listextattr_args *ap)
 1509 /*
 1510 vop_listextattr {
 1511         IN struct vnode *a_vp;
 1512         IN int a_attrnamespace;
 1513         INOUT struct uio *a_uio;
 1514         OUT size_t *a_size;
 1515         IN struct ucred *a_cred;
 1516         IN struct thread *a_td;
 1517 };
 1518 */
 1519 {
 1520         struct inode *ip;
 1521         struct extattr *eap, *eaend;
 1522         int error, ealen;
 1523 
 1524         ip = VTOI(ap->a_vp);
 1525 
 1526         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1527                 return (EOPNOTSUPP);
 1528 
 1529         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1530             ap->a_cred, ap->a_td, VREAD);
 1531         if (error)
 1532                 return (error);
 1533 
 1534         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1535         if (error)
 1536                 return (error);
 1537 
 1538         error = 0;
 1539         if (ap->a_size != NULL)
 1540                 *ap->a_size = 0;
 1541 
 1542         KASSERT(ALIGNED_TO(ip->i_ea_area, struct extattr), ("unaligned"));
 1543         eap = (struct extattr *)ip->i_ea_area;
 1544         eaend = (struct extattr *)(ip->i_ea_area + ip->i_ea_len);
 1545         for (; error == 0 && eap < eaend; eap = EXTATTR_NEXT(eap)) {
 1546                 /* make sure this entry is complete */
 1547                 if (EXTATTR_NEXT(eap) > eaend)
 1548                         break;
 1549                 if (eap->ea_namespace != ap->a_attrnamespace)
 1550                         continue;
 1551 
 1552                 ealen = eap->ea_namelength;
 1553                 if (ap->a_size != NULL)
 1554                         *ap->a_size += ealen + 1;
 1555                 else if (ap->a_uio != NULL)
 1556                         error = uiomove(&eap->ea_namelength, ealen + 1,
 1557                             ap->a_uio);
 1558         }
 1559 
 1560         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1561         return (error);
 1562 }
 1563 
 1564 /*
 1565  * Vnode operation to set a named attribute.
 1566  */
 1567 static int
 1568 ffs_setextattr(struct vop_setextattr_args *ap)
 1569 /*
 1570 vop_setextattr {
 1571         IN struct vnode *a_vp;
 1572         IN int a_attrnamespace;
 1573         IN const char *a_name;
 1574         INOUT struct uio *a_uio;
 1575         IN struct ucred *a_cred;
 1576         IN struct thread *a_td;
 1577 };
 1578 */
 1579 {
 1580         struct inode *ip;
 1581         struct fs *fs;
 1582         struct extattr *eap;
 1583         uint32_t ealength, ul;
 1584         ssize_t ealen;
 1585         int olen, eapad1, eapad2, error, i, easize;
 1586         u_char *eae;
 1587         void *tmp;
 1588 
 1589         ip = VTOI(ap->a_vp);
 1590         fs = ITOFS(ip);
 1591 
 1592         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1593                 return (EOPNOTSUPP);
 1594 
 1595         if (strlen(ap->a_name) == 0)
 1596                 return (EINVAL);
 1597 
 1598         /* XXX Now unsupported API to delete EAs using NULL uio. */
 1599         if (ap->a_uio == NULL)
 1600                 return (EOPNOTSUPP);
 1601 
 1602         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
 1603                 return (EROFS);
 1604 
 1605         ealen = ap->a_uio->uio_resid;
 1606         if (ealen < 0 || ealen > lblktosize(fs, UFS_NXADDR))
 1607                 return (EINVAL);
 1608 
 1609         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1610             ap->a_cred, ap->a_td, VWRITE);
 1611         if (error) {
 1612 
 1613                 /*
 1614                  * ffs_lock_ea is not needed there, because the vnode
 1615                  * must be exclusively locked.
 1616                  */
 1617                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1618                         ip->i_ea_error = error;
 1619                 return (error);
 1620         }
 1621 
 1622         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1623         if (error)
 1624                 return (error);
 1625 
 1626         ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
 1627         eapad1 = roundup2(ealength, 8) - ealength;
 1628         eapad2 = roundup2(ealen, 8) - ealen;
 1629         ealength += eapad1 + ealen + eapad2;
 1630 
 1631         /*
 1632          * CEM: rewrites of the same size or smaller could be done in-place
 1633          * instead.  (We don't acquire any fine-grained locks in here either,
 1634          * so we could also do bigger writes in-place.)
 1635          */
 1636         eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
 1637         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1638         easize = ip->i_ea_len;
 1639 
 1640         olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1641             &eap, NULL);
 1642         if (olen == -1) {
 1643                 /* new, append at end */
 1644                 KASSERT(ALIGNED_TO(eae + easize, struct extattr),
 1645                     ("unaligned"));
 1646                 eap = (struct extattr *)(eae + easize);
 1647                 easize += ealength;
 1648         } else {
 1649                 ul = eap->ea_length;
 1650                 i = (u_char *)EXTATTR_NEXT(eap) - eae;
 1651                 if (ul != ealength) {
 1652                         bcopy(EXTATTR_NEXT(eap), (u_char *)eap + ealength,
 1653                             easize - i);
 1654                         easize += (ealength - ul);
 1655                 }
 1656         }
 1657         if (easize > lblktosize(fs, UFS_NXADDR)) {
 1658                 free(eae, M_TEMP);
 1659                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1660                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1661                         ip->i_ea_error = ENOSPC;
 1662                 return (ENOSPC);
 1663         }
 1664         eap->ea_length = ealength;
 1665         eap->ea_namespace = ap->a_attrnamespace;
 1666         eap->ea_contentpadlen = eapad2;
 1667         eap->ea_namelength = strlen(ap->a_name);
 1668         memcpy(eap->ea_name, ap->a_name, strlen(ap->a_name));
 1669         bzero(&eap->ea_name[strlen(ap->a_name)], eapad1);
 1670         error = uiomove(EXTATTR_CONTENT(eap), ealen, ap->a_uio);
 1671         if (error) {
 1672                 free(eae, M_TEMP);
 1673                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1674                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1675                         ip->i_ea_error = error;
 1676                 return (error);
 1677         }
 1678         bzero((u_char *)EXTATTR_CONTENT(eap) + ealen, eapad2);
 1679 
 1680         tmp = ip->i_ea_area;
 1681         ip->i_ea_area = eae;
 1682         ip->i_ea_len = easize;
 1683         free(tmp, M_TEMP);
 1684         error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
 1685         return (error);
 1686 }
 1687 
 1688 /*
 1689  * Vnode pointer to File handle
 1690  */
 1691 static int
 1692 ffs_vptofh(struct vop_vptofh_args *ap)
 1693 /*
 1694 vop_vptofh {
 1695         IN struct vnode *a_vp;
 1696         IN struct fid *a_fhp;
 1697 };
 1698 */
 1699 {
 1700         struct inode *ip;
 1701         struct ufid *ufhp;
 1702 
 1703         ip = VTOI(ap->a_vp);
 1704         ufhp = (struct ufid *)ap->a_fhp;
 1705         ufhp->ufid_len = sizeof(struct ufid);
 1706         ufhp->ufid_ino = ip->i_number;
 1707         ufhp->ufid_gen = ip->i_gen;
 1708         return (0);
 1709 }
 1710 
 1711 SYSCTL_DECL(_vfs_ffs);
 1712 static int use_buf_pager = 1;
 1713 SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0,
 1714     "Always use buffer pager instead of bmap");
 1715 
 1716 static daddr_t
 1717 ffs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
 1718 {
 1719 
 1720         return (lblkno(VFSTOUFS(vp->v_mount)->um_fs, off));
 1721 }
 1722 
 1723 static int
 1724 ffs_gbp_getblksz(struct vnode *vp, daddr_t lbn)
 1725 {
 1726 
 1727         return (blksize(VFSTOUFS(vp->v_mount)->um_fs, VTOI(vp), lbn));
 1728 }
 1729 
 1730 static int
 1731 ffs_getpages(struct vop_getpages_args *ap)
 1732 {
 1733         struct vnode *vp;
 1734         struct ufsmount *um;
 1735 
 1736         vp = ap->a_vp;
 1737         um = VFSTOUFS(vp->v_mount);
 1738 
 1739         if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE)
 1740                 return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
 1741                     ap->a_rbehind, ap->a_rahead, NULL, NULL));
 1742         return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind,
 1743             ap->a_rahead, ffs_gbp_getblkno, ffs_gbp_getblksz));
 1744 }

Cache object: 1f6270e3f778dc3624e7dd328b849410


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.