The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ufs/ffs/ffs_vnops.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
    3  * All rights reserved.
    4  *
    5  * This software was developed for the FreeBSD Project by Marshall
    6  * Kirk McKusick and Network Associates Laboratories, the Security
    7  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
    8  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
    9  * research program
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  * Copyright (c) 1982, 1986, 1989, 1993
   33  *      The Regents of the University of California.  All rights reserved.
   34  *
   35  * Redistribution and use in source and binary forms, with or without
   36  * modification, are permitted provided that the following conditions
   37  * are met:
   38  * 1. Redistributions of source code must retain the above copyright
   39  *    notice, this list of conditions and the following disclaimer.
   40  * 2. Redistributions in binary form must reproduce the above copyright
   41  *    notice, this list of conditions and the following disclaimer in the
   42  *    documentation and/or other materials provided with the distribution.
   43  * 4. Neither the name of the University nor the names of its contributors
   44  *    may be used to endorse or promote products derived from this software
   45  *    without specific prior written permission.
   46  *
   47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   57  * SUCH DAMAGE.
   58  *
   59  *      from: @(#)ufs_readwrite.c       8.11 (Berkeley) 5/8/95
   60  * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
   61  *      @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
   62  */
   63 
   64 #include <sys/cdefs.h>
   65 __FBSDID("$FreeBSD: releng/8.4/sys/ufs/ffs/ffs_vnops.c 237717 2012-06-28 16:54:10Z kib $");
   66 
   67 #include <sys/param.h>
   68 #include <sys/bio.h>
   69 #include <sys/systm.h>
   70 #include <sys/buf.h>
   71 #include <sys/conf.h>
   72 #include <sys/extattr.h>
   73 #include <sys/kernel.h>
   74 #include <sys/limits.h>
   75 #include <sys/malloc.h>
   76 #include <sys/mount.h>
   77 #include <sys/priv.h>
   78 #include <sys/stat.h>
   79 #include <sys/vmmeter.h>
   80 #include <sys/vnode.h>
   81 
   82 #include <vm/vm.h>
   83 #include <vm/vm_extern.h>
   84 #include <vm/vm_object.h>
   85 #include <vm/vm_page.h>
   86 #include <vm/vm_pager.h>
   87 #include <vm/vnode_pager.h>
   88 
   89 #include <ufs/ufs/extattr.h>
   90 #include <ufs/ufs/quota.h>
   91 #include <ufs/ufs/inode.h>
   92 #include <ufs/ufs/ufs_extern.h>
   93 #include <ufs/ufs/ufsmount.h>
   94 
   95 #include <ufs/ffs/fs.h>
   96 #include <ufs/ffs/ffs_extern.h>
   97 #include "opt_directio.h"
   98 #include "opt_ffs.h"
   99 
  100 #ifdef DIRECTIO
  101 extern int      ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
  102 #endif
  103 static vop_fsync_t      ffs_fsync;
  104 static vop_lock1_t      ffs_lock;
  105 static vop_getpages_t   ffs_getpages;
  106 static vop_read_t       ffs_read;
  107 static vop_write_t      ffs_write;
  108 static int      ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
  109 static int      ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
  110                     struct ucred *cred);
  111 static vop_strategy_t   ffsext_strategy;
  112 static vop_closeextattr_t       ffs_closeextattr;
  113 static vop_deleteextattr_t      ffs_deleteextattr;
  114 static vop_getextattr_t ffs_getextattr;
  115 static vop_listextattr_t        ffs_listextattr;
  116 static vop_openextattr_t        ffs_openextattr;
  117 static vop_setextattr_t ffs_setextattr;
  118 static vop_vptofh_t     ffs_vptofh;
  119 
  120 
  121 /* Global vfs data structures for ufs. */
  122 struct vop_vector ffs_vnodeops1 = {
  123         .vop_default =          &ufs_vnodeops,
  124         .vop_fsync =            ffs_fsync,
  125         .vop_getpages =         ffs_getpages,
  126         .vop_lock1 =            ffs_lock,
  127         .vop_read =             ffs_read,
  128         .vop_reallocblks =      ffs_reallocblks,
  129         .vop_write =            ffs_write,
  130         .vop_vptofh =           ffs_vptofh,
  131 };
  132 
  133 struct vop_vector ffs_fifoops1 = {
  134         .vop_default =          &ufs_fifoops,
  135         .vop_fsync =            ffs_fsync,
  136         .vop_reallocblks =      ffs_reallocblks, /* XXX: really ??? */
  137         .vop_vptofh =           ffs_vptofh,
  138 };
  139 
  140 /* Global vfs data structures for ufs. */
  141 struct vop_vector ffs_vnodeops2 = {
  142         .vop_default =          &ufs_vnodeops,
  143         .vop_fsync =            ffs_fsync,
  144         .vop_getpages =         ffs_getpages,
  145         .vop_lock1 =            ffs_lock,
  146         .vop_read =             ffs_read,
  147         .vop_reallocblks =      ffs_reallocblks,
  148         .vop_write =            ffs_write,
  149         .vop_closeextattr =     ffs_closeextattr,
  150         .vop_deleteextattr =    ffs_deleteextattr,
  151         .vop_getextattr =       ffs_getextattr,
  152         .vop_listextattr =      ffs_listextattr,
  153         .vop_openextattr =      ffs_openextattr,
  154         .vop_setextattr =       ffs_setextattr,
  155         .vop_vptofh =           ffs_vptofh,
  156 };
  157 
  158 struct vop_vector ffs_fifoops2 = {
  159         .vop_default =          &ufs_fifoops,
  160         .vop_fsync =            ffs_fsync,
  161         .vop_lock1 =            ffs_lock,
  162         .vop_reallocblks =      ffs_reallocblks,
  163         .vop_strategy =         ffsext_strategy,
  164         .vop_closeextattr =     ffs_closeextattr,
  165         .vop_deleteextattr =    ffs_deleteextattr,
  166         .vop_getextattr =       ffs_getextattr,
  167         .vop_listextattr =      ffs_listextattr,
  168         .vop_openextattr =      ffs_openextattr,
  169         .vop_setextattr =       ffs_setextattr,
  170         .vop_vptofh =           ffs_vptofh,
  171 };
  172 
  173 /*
  174  * Synch an open file.
  175  */
  176 /* ARGSUSED */
  177 static int
  178 ffs_fsync(struct vop_fsync_args *ap)
  179 {
  180         struct vnode *vp;
  181         struct bufobj *bo;
  182         int error;
  183 
  184         vp = ap->a_vp;
  185         bo = &vp->v_bufobj;
  186 retry:
  187         error = ffs_syncvnode(vp, ap->a_waitfor);
  188         if (error)
  189                 return (error);
  190         if (ap->a_waitfor == MNT_WAIT &&
  191             (vp->v_mount->mnt_flag & MNT_SOFTDEP)) {
  192                 error = softdep_fsync(vp);
  193                 if (error)
  194                         return (error);
  195 
  196                 /*
  197                  * The softdep_fsync() function may drop vp lock,
  198                  * allowing for dirty buffers to reappear on the
  199                  * bo_dirty list. Recheck and resync as needed.
  200                  */
  201                 BO_LOCK(bo);
  202                 if (vp->v_type == VREG && (bo->bo_numoutput > 0 ||
  203                     bo->bo_dirty.bv_cnt > 0)) {
  204                         BO_UNLOCK(bo);
  205                         goto retry;
  206                 }
  207                 BO_UNLOCK(bo);
  208         }
  209         return (0);
  210 }
  211 
  212 int
  213 ffs_syncvnode(struct vnode *vp, int waitfor)
  214 {
  215         struct inode *ip = VTOI(vp);
  216         struct bufobj *bo;
  217         struct buf *bp;
  218         struct buf *nbp;
  219         int s, error, wait, passes, skipmeta;
  220         ufs_lbn_t lbn;
  221 
  222         wait = (waitfor == MNT_WAIT);
  223         lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1));
  224         bo = &vp->v_bufobj;
  225 
  226         /*
  227          * Flush all dirty buffers associated with a vnode.
  228          */
  229         passes = NIADDR + 1;
  230         skipmeta = 0;
  231         if (wait)
  232                 skipmeta = 1;
  233         s = splbio();
  234         BO_LOCK(bo);
  235 loop:
  236         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
  237                 bp->b_vflags &= ~BV_SCANNED;
  238         TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
  239                 /*
  240                  * Reasons to skip this buffer: it has already been considered
  241                  * on this pass, this pass is the first time through on a
  242                  * synchronous flush request and the buffer being considered
  243                  * is metadata, the buffer has dependencies that will cause
  244                  * it to be redirtied and it has not already been deferred,
  245                  * or it is already being written.
  246                  */
  247                 if ((bp->b_vflags & BV_SCANNED) != 0)
  248                         continue;
  249                 bp->b_vflags |= BV_SCANNED;
  250                 if ((skipmeta == 1 && bp->b_lblkno < 0))
  251                         continue;
  252                 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
  253                         continue;
  254                 BO_UNLOCK(bo);
  255                 if (!wait && !LIST_EMPTY(&bp->b_dep) &&
  256                     (bp->b_flags & B_DEFERRED) == 0 &&
  257                     buf_countdeps(bp, 0)) {
  258                         bp->b_flags |= B_DEFERRED;
  259                         BUF_UNLOCK(bp);
  260                         BO_LOCK(bo);
  261                         continue;
  262                 }
  263                 if ((bp->b_flags & B_DELWRI) == 0)
  264                         panic("ffs_fsync: not dirty");
  265                 /*
  266                  * If this is a synchronous flush request, or it is not a
  267                  * file or device, start the write on this buffer immediately.
  268                  */
  269                 if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) {
  270 
  271                         /*
  272                          * On our final pass through, do all I/O synchronously
  273                          * so that we can find out if our flush is failing
  274                          * because of write errors.
  275                          */
  276                         if (passes > 0 || !wait) {
  277                                 if ((bp->b_flags & B_CLUSTEROK) && !wait) {
  278                                         (void) vfs_bio_awrite(bp);
  279                                 } else {
  280                                         bremfree(bp);
  281                                         splx(s);
  282                                         (void) bawrite(bp);
  283                                         s = splbio();
  284                                 }
  285                         } else {
  286                                 bremfree(bp);
  287                                 splx(s);
  288                                 if ((error = bwrite(bp)) != 0)
  289                                         return (error);
  290                                 s = splbio();
  291                         }
  292                 } else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) {
  293                         /*
  294                          * If the buffer is for data that has been truncated
  295                          * off the file, then throw it away.
  296                          */
  297                         bremfree(bp);
  298                         bp->b_flags |= B_INVAL | B_NOCACHE;
  299                         splx(s);
  300                         brelse(bp);
  301                         s = splbio();
  302                 } else
  303                         vfs_bio_awrite(bp);
  304 
  305                 /*
  306                  * Since we may have slept during the I/O, we need
  307                  * to start from a known point.
  308                  */
  309                 BO_LOCK(bo);
  310                 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
  311         }
  312         /*
  313          * If we were asked to do this synchronously, then go back for
  314          * another pass, this time doing the metadata.
  315          */
  316         if (skipmeta) {
  317                 skipmeta = 0;
  318                 goto loop;
  319         }
  320 
  321         if (wait) {
  322                 bufobj_wwait(bo, 0, 0);
  323                 BO_UNLOCK(bo);
  324 
  325                 /*
  326                  * Ensure that any filesystem metatdata associated
  327                  * with the vnode has been written.
  328                  */
  329                 splx(s);
  330                 if ((error = softdep_sync_metadata(vp)) != 0)
  331                         return (error);
  332                 s = splbio();
  333 
  334                 BO_LOCK(bo);
  335                 if (bo->bo_dirty.bv_cnt > 0) {
  336                         /*
  337                          * Block devices associated with filesystems may
  338                          * have new I/O requests posted for them even if
  339                          * the vnode is locked, so no amount of trying will
  340                          * get them clean. Thus we give block devices a
  341                          * good effort, then just give up. For all other file
  342                          * types, go around and try again until it is clean.
  343                          */
  344                         if (passes > 0) {
  345                                 passes -= 1;
  346                                 goto loop;
  347                         }
  348 #ifdef INVARIANTS
  349                         if (!vn_isdisk(vp, NULL))
  350                                 vprint("ffs_fsync: dirty", vp);
  351 #endif
  352                 }
  353         }
  354         BO_UNLOCK(bo);
  355         splx(s);
  356         return (ffs_update(vp, wait));
  357 }
  358 
  359 static int
  360 ffs_lock(ap)
  361         struct vop_lock1_args /* {
  362                 struct vnode *a_vp;
  363                 int a_flags;
  364                 struct thread *a_td;
  365                 char *file;
  366                 int line;
  367         } */ *ap;
  368 {
  369 #ifndef NO_FFS_SNAPSHOT
  370         struct vnode *vp;
  371         int flags;
  372         struct lock *lkp;
  373         int result;
  374 
  375         switch (ap->a_flags & LK_TYPE_MASK) {
  376         case LK_SHARED:
  377         case LK_UPGRADE:
  378         case LK_EXCLUSIVE:
  379                 vp = ap->a_vp;
  380                 flags = ap->a_flags;
  381                 for (;;) {
  382 #ifdef DEBUG_VFS_LOCKS
  383                         KASSERT(vp->v_holdcnt != 0,
  384                             ("ffs_lock %p: zero hold count", vp));
  385 #endif
  386                         lkp = vp->v_vnlock;
  387                         result = _lockmgr_args(lkp, flags, VI_MTX(vp),
  388                             LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
  389                             ap->a_file, ap->a_line);
  390                         if (lkp == vp->v_vnlock || result != 0)
  391                                 break;
  392                         /*
  393                          * Apparent success, except that the vnode
  394                          * mutated between snapshot file vnode and
  395                          * regular file vnode while this process
  396                          * slept.  The lock currently held is not the
  397                          * right lock.  Release it, and try to get the
  398                          * new lock.
  399                          */
  400                         (void) _lockmgr_args(lkp, LK_RELEASE, NULL,
  401                             LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
  402                             ap->a_file, ap->a_line);
  403                         if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
  404                             (LK_INTERLOCK | LK_NOWAIT))
  405                                 return (EBUSY);
  406                         if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
  407                                 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
  408                         flags &= ~LK_INTERLOCK;
  409                 }
  410                 break;
  411         default:
  412                 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
  413         }
  414         return (result);
  415 #else
  416         return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
  417 #endif
  418 }
  419 
  420 /*
  421  * Vnode op for reading.
  422  */
  423 static int
  424 ffs_read(ap)
  425         struct vop_read_args /* {
  426                 struct vnode *a_vp;
  427                 struct uio *a_uio;
  428                 int a_ioflag;
  429                 struct ucred *a_cred;
  430         } */ *ap;
  431 {
  432         struct vnode *vp;
  433         struct inode *ip;
  434         struct uio *uio;
  435         struct fs *fs;
  436         struct buf *bp;
  437         ufs_lbn_t lbn, nextlbn;
  438         off_t bytesinfile;
  439         long size, xfersize, blkoffset;
  440         int error, orig_resid;
  441         int seqcount;
  442         int ioflag;
  443 
  444         vp = ap->a_vp;
  445         uio = ap->a_uio;
  446         ioflag = ap->a_ioflag;
  447         if (ap->a_ioflag & IO_EXT)
  448 #ifdef notyet
  449                 return (ffs_extread(vp, uio, ioflag));
  450 #else
  451                 panic("ffs_read+IO_EXT");
  452 #endif
  453 #ifdef DIRECTIO
  454         if ((ioflag & IO_DIRECT) != 0) {
  455                 int workdone;
  456 
  457                 error = ffs_rawread(vp, uio, &workdone);
  458                 if (error != 0 || workdone != 0)
  459                         return error;
  460         }
  461 #endif
  462 
  463         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  464         ip = VTOI(vp);
  465 
  466 #ifdef INVARIANTS
  467         if (uio->uio_rw != UIO_READ)
  468                 panic("ffs_read: mode");
  469 
  470         if (vp->v_type == VLNK) {
  471                 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
  472                         panic("ffs_read: short symlink");
  473         } else if (vp->v_type != VREG && vp->v_type != VDIR)
  474                 panic("ffs_read: type %d",  vp->v_type);
  475 #endif
  476         orig_resid = uio->uio_resid;
  477         KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
  478         if (orig_resid == 0)
  479                 return (0);
  480         KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
  481         fs = ip->i_fs;
  482         if (uio->uio_offset < ip->i_size &&
  483             uio->uio_offset >= fs->fs_maxfilesize)
  484                 return (EOVERFLOW);
  485 
  486         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  487                 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
  488                         break;
  489                 lbn = lblkno(fs, uio->uio_offset);
  490                 nextlbn = lbn + 1;
  491 
  492                 /*
  493                  * size of buffer.  The buffer representing the
  494                  * end of the file is rounded up to the size of
  495                  * the block type ( fragment or full block,
  496                  * depending ).
  497                  */
  498                 size = blksize(fs, ip, lbn);
  499                 blkoffset = blkoff(fs, uio->uio_offset);
  500 
  501                 /*
  502                  * The amount we want to transfer in this iteration is
  503                  * one FS block less the amount of the data before
  504                  * our startpoint (duh!)
  505                  */
  506                 xfersize = fs->fs_bsize - blkoffset;
  507 
  508                 /*
  509                  * But if we actually want less than the block,
  510                  * or the file doesn't have a whole block more of data,
  511                  * then use the lesser number.
  512                  */
  513                 if (uio->uio_resid < xfersize)
  514                         xfersize = uio->uio_resid;
  515                 if (bytesinfile < xfersize)
  516                         xfersize = bytesinfile;
  517 
  518                 if (lblktosize(fs, nextlbn) >= ip->i_size) {
  519                         /*
  520                          * Don't do readahead if this is the end of the file.
  521                          */
  522                         error = bread(vp, lbn, size, NOCRED, &bp);
  523                 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
  524                         /*
  525                          * Otherwise if we are allowed to cluster,
  526                          * grab as much as we can.
  527                          *
  528                          * XXX  This may not be a win if we are not
  529                          * doing sequential access.
  530                          */
  531                         error = cluster_read(vp, ip->i_size, lbn,
  532                                 size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
  533                 } else if (seqcount > 1) {
  534                         /*
  535                          * If we are NOT allowed to cluster, then
  536                          * if we appear to be acting sequentially,
  537                          * fire off a request for a readahead
  538                          * as well as a read. Note that the 4th and 5th
  539                          * arguments point to arrays of the size specified in
  540                          * the 6th argument.
  541                          */
  542                         int nextsize = blksize(fs, ip, nextlbn);
  543                         error = breadn(vp, lbn,
  544                             size, &nextlbn, &nextsize, 1, NOCRED, &bp);
  545                 } else {
  546                         /*
  547                          * Failing all of the above, just read what the
  548                          * user asked for. Interestingly, the same as
  549                          * the first option above.
  550                          */
  551                         error = bread(vp, lbn, size, NOCRED, &bp);
  552                 }
  553                 if (error) {
  554                         brelse(bp);
  555                         bp = NULL;
  556                         break;
  557                 }
  558 
  559                 /*
  560                  * If IO_DIRECT then set B_DIRECT for the buffer.  This
  561                  * will cause us to attempt to release the buffer later on
  562                  * and will cause the buffer cache to attempt to free the
  563                  * underlying pages.
  564                  */
  565                 if (ioflag & IO_DIRECT)
  566                         bp->b_flags |= B_DIRECT;
  567 
  568                 /*
  569                  * We should only get non-zero b_resid when an I/O error
  570                  * has occurred, which should cause us to break above.
  571                  * However, if the short read did not cause an error,
  572                  * then we want to ensure that we do not uiomove bad
  573                  * or uninitialized data.
  574                  */
  575                 size -= bp->b_resid;
  576                 if (size < xfersize) {
  577                         if (size == 0)
  578                                 break;
  579                         xfersize = size;
  580                 }
  581 
  582                 error = uiomove((char *)bp->b_data + blkoffset,
  583                     (int)xfersize, uio);
  584                 if (error)
  585                         break;
  586 
  587                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
  588                    (LIST_EMPTY(&bp->b_dep))) {
  589                         /*
  590                          * If there are no dependencies, and it's VMIO,
  591                          * then we don't need the buf, mark it available
  592                          * for freeing. The VM has the data.
  593                          */
  594                         bp->b_flags |= B_RELBUF;
  595                         brelse(bp);
  596                 } else {
  597                         /*
  598                          * Otherwise let whoever
  599                          * made the request take care of
  600                          * freeing it. We just queue
  601                          * it onto another list.
  602                          */
  603                         bqrelse(bp);
  604                 }
  605         }
  606 
  607         /*
  608          * This can only happen in the case of an error
  609          * because the loop above resets bp to NULL on each iteration
  610          * and on normal completion has not set a new value into it.
  611          * so it must have come from a 'break' statement
  612          */
  613         if (bp != NULL) {
  614                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
  615                    (LIST_EMPTY(&bp->b_dep))) {
  616                         bp->b_flags |= B_RELBUF;
  617                         brelse(bp);
  618                 } else {
  619                         bqrelse(bp);
  620                 }
  621         }
  622 
  623         if ((error == 0 || uio->uio_resid != orig_resid) &&
  624             (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 &&
  625             (ip->i_flag & IN_ACCESS) == 0) {
  626                 VI_LOCK(vp);
  627                 ip->i_flag |= IN_ACCESS;
  628                 VI_UNLOCK(vp);
  629         }
  630         return (error);
  631 }
  632 
  633 /*
  634  * Vnode op for writing.
  635  */
  636 static int
  637 ffs_write(ap)
  638         struct vop_write_args /* {
  639                 struct vnode *a_vp;
  640                 struct uio *a_uio;
  641                 int a_ioflag;
  642                 struct ucred *a_cred;
  643         } */ *ap;
  644 {
  645         struct vnode *vp;
  646         struct uio *uio;
  647         struct inode *ip;
  648         struct fs *fs;
  649         struct buf *bp;
  650         ufs_lbn_t lbn;
  651         off_t osize;
  652         int seqcount;
  653         int blkoffset, error, flags, ioflag, resid, size, xfersize;
  654 
  655         vp = ap->a_vp;
  656         uio = ap->a_uio;
  657         ioflag = ap->a_ioflag;
  658         if (ap->a_ioflag & IO_EXT)
  659 #ifdef notyet
  660                 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
  661 #else
  662                 panic("ffs_write+IO_EXT");
  663 #endif
  664 
  665         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
  666         ip = VTOI(vp);
  667 
  668 #ifdef INVARIANTS
  669         if (uio->uio_rw != UIO_WRITE)
  670                 panic("ffs_write: mode");
  671 #endif
  672 
  673         switch (vp->v_type) {
  674         case VREG:
  675                 if (ioflag & IO_APPEND)
  676                         uio->uio_offset = ip->i_size;
  677                 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
  678                         return (EPERM);
  679                 /* FALLTHROUGH */
  680         case VLNK:
  681                 break;
  682         case VDIR:
  683                 panic("ffs_write: dir write");
  684                 break;
  685         default:
  686                 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
  687                         (int)uio->uio_offset,
  688                         (int)uio->uio_resid
  689                 );
  690         }
  691 
  692         KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
  693         KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
  694         fs = ip->i_fs;
  695         if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
  696                 return (EFBIG);
  697         /*
  698          * Maybe this should be above the vnode op call, but so long as
  699          * file servers have no limits, I don't think it matters.
  700          */
  701         if (vn_rlimit_fsize(vp, uio, uio->uio_td))
  702                 return (EFBIG);
  703 
  704         resid = uio->uio_resid;
  705         osize = ip->i_size;
  706         if (seqcount > BA_SEQMAX)
  707                 flags = BA_SEQMAX << BA_SEQSHIFT;
  708         else
  709                 flags = seqcount << BA_SEQSHIFT;
  710         if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
  711                 flags |= IO_SYNC;
  712 
  713         for (error = 0; uio->uio_resid > 0;) {
  714                 lbn = lblkno(fs, uio->uio_offset);
  715                 blkoffset = blkoff(fs, uio->uio_offset);
  716                 xfersize = fs->fs_bsize - blkoffset;
  717                 if (uio->uio_resid < xfersize)
  718                         xfersize = uio->uio_resid;
  719                 if (uio->uio_offset + xfersize > ip->i_size)
  720                         vnode_pager_setsize(vp, uio->uio_offset + xfersize);
  721 
  722                 /*
  723                  * We must perform a read-before-write if the transfer size
  724                  * does not cover the entire buffer.
  725                  */
  726                 if (fs->fs_bsize > xfersize)
  727                         flags |= BA_CLRBUF;
  728                 else
  729                         flags &= ~BA_CLRBUF;
  730 /* XXX is uio->uio_offset the right thing here? */
  731                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
  732                     ap->a_cred, flags, &bp);
  733                 if (error != 0) {
  734                         vnode_pager_setsize(vp, ip->i_size);
  735                         break;
  736                 }
  737                 /*
  738                  * If the buffer is not valid we have to clear out any
  739                  * garbage data from the pages instantiated for the buffer.
  740                  * If we do not, a failed uiomove() during a write can leave
  741                  * the prior contents of the pages exposed to a userland
  742                  * mmap().  XXX deal with uiomove() errors a better way.
  743                  */
  744                 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
  745                         vfs_bio_clrbuf(bp);
  746                 if (ioflag & IO_DIRECT)
  747                         bp->b_flags |= B_DIRECT;
  748                 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
  749                         bp->b_flags |= B_NOCACHE;
  750 
  751                 if (uio->uio_offset + xfersize > ip->i_size) {
  752                         ip->i_size = uio->uio_offset + xfersize;
  753                         DIP_SET(ip, i_size, ip->i_size);
  754                 }
  755 
  756                 size = blksize(fs, ip, lbn) - bp->b_resid;
  757                 if (size < xfersize)
  758                         xfersize = size;
  759 
  760                 error =
  761                     uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
  762                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
  763                    (LIST_EMPTY(&bp->b_dep))) {
  764                         bp->b_flags |= B_RELBUF;
  765                 }
  766 
  767                 /*
  768                  * If IO_SYNC each buffer is written synchronously.  Otherwise
  769                  * if we have a severe page deficiency write the buffer
  770                  * asynchronously.  Otherwise try to cluster, and if that
  771                  * doesn't do it then either do an async write (if O_DIRECT),
  772                  * or a delayed write (if not).
  773                  */
  774                 if (ioflag & IO_SYNC) {
  775                         (void)bwrite(bp);
  776                 } else if (vm_page_count_severe() ||
  777                             buf_dirty_count_severe() ||
  778                             (ioflag & IO_ASYNC)) {
  779                         bp->b_flags |= B_CLUSTEROK;
  780                         bawrite(bp);
  781                 } else if (xfersize + blkoffset == fs->fs_bsize) {
  782                         if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
  783                                 bp->b_flags |= B_CLUSTEROK;
  784                                 cluster_write(vp, bp, ip->i_size, seqcount);
  785                         } else {
  786                                 bawrite(bp);
  787                         }
  788                 } else if (ioflag & IO_DIRECT) {
  789                         bp->b_flags |= B_CLUSTEROK;
  790                         bawrite(bp);
  791                 } else {
  792                         bp->b_flags |= B_CLUSTEROK;
  793                         bdwrite(bp);
  794                 }
  795                 if (error || xfersize == 0)
  796                         break;
  797                 ip->i_flag |= IN_CHANGE | IN_UPDATE;
  798         }
  799         /*
  800          * If we successfully wrote any data, and we are not the superuser
  801          * we clear the setuid and setgid bits as a precaution against
  802          * tampering.
  803          */
  804         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
  805             ap->a_cred) {
  806                 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
  807                         ip->i_mode &= ~(ISUID | ISGID);
  808                         DIP_SET(ip, i_mode, ip->i_mode);
  809                 }
  810         }
  811         if (error) {
  812                 if (ioflag & IO_UNIT) {
  813                         (void)ffs_truncate(vp, osize,
  814                             IO_NORMAL | (ioflag & IO_SYNC),
  815                             ap->a_cred, uio->uio_td);
  816                         uio->uio_offset -= resid - uio->uio_resid;
  817                         uio->uio_resid = resid;
  818                 }
  819         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
  820                 error = ffs_update(vp, 1);
  821         return (error);
  822 }
  823 
  824 /*
  825  * get page routine
  826  */
  827 static int
  828 ffs_getpages(ap)
  829         struct vop_getpages_args *ap;
  830 {
  831         int i;
  832         vm_page_t mreq;
  833         int pcount;
  834 
  835         pcount = round_page(ap->a_count) / PAGE_SIZE;
  836         mreq = ap->a_m[ap->a_reqpage];
  837 
  838         /*
  839          * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
  840          * then the entire page is valid.  Since the page may be mapped,
  841          * user programs might reference data beyond the actual end of file
  842          * occuring within the page.  We have to zero that data.
  843          */
  844         VM_OBJECT_LOCK(mreq->object);
  845         if (mreq->valid) {
  846                 if (mreq->valid != VM_PAGE_BITS_ALL)
  847                         vm_page_zero_invalid(mreq, TRUE);
  848                 vm_page_lock_queues();
  849                 for (i = 0; i < pcount; i++) {
  850                         if (i != ap->a_reqpage) {
  851                                 vm_page_free(ap->a_m[i]);
  852                         }
  853                 }
  854                 vm_page_unlock_queues();
  855                 VM_OBJECT_UNLOCK(mreq->object);
  856                 return VM_PAGER_OK;
  857         }
  858         VM_OBJECT_UNLOCK(mreq->object);
  859 
  860         return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
  861                                             ap->a_count,
  862                                             ap->a_reqpage);
  863 }
  864 
  865 
  866 /*
  867  * Extended attribute area reading.
  868  */
  869 static int
  870 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
  871 {
  872         struct inode *ip;
  873         struct ufs2_dinode *dp;
  874         struct fs *fs;
  875         struct buf *bp;
  876         ufs_lbn_t lbn, nextlbn;
  877         off_t bytesinfile;
  878         long size, xfersize, blkoffset;
  879         int error, orig_resid;
  880 
  881         ip = VTOI(vp);
  882         fs = ip->i_fs;
  883         dp = ip->i_din2;
  884 
  885 #ifdef INVARIANTS
  886         if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
  887                 panic("ffs_extread: mode");
  888 
  889 #endif
  890         orig_resid = uio->uio_resid;
  891         KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
  892         if (orig_resid == 0)
  893                 return (0);
  894         KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
  895 
  896         for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
  897                 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
  898                         break;
  899                 lbn = lblkno(fs, uio->uio_offset);
  900                 nextlbn = lbn + 1;
  901 
  902                 /*
  903                  * size of buffer.  The buffer representing the
  904                  * end of the file is rounded up to the size of
  905                  * the block type ( fragment or full block,
  906                  * depending ).
  907                  */
  908                 size = sblksize(fs, dp->di_extsize, lbn);
  909                 blkoffset = blkoff(fs, uio->uio_offset);
  910 
  911                 /*
  912                  * The amount we want to transfer in this iteration is
  913                  * one FS block less the amount of the data before
  914                  * our startpoint (duh!)
  915                  */
  916                 xfersize = fs->fs_bsize - blkoffset;
  917 
  918                 /*
  919                  * But if we actually want less than the block,
  920                  * or the file doesn't have a whole block more of data,
  921                  * then use the lesser number.
  922                  */
  923                 if (uio->uio_resid < xfersize)
  924                         xfersize = uio->uio_resid;
  925                 if (bytesinfile < xfersize)
  926                         xfersize = bytesinfile;
  927 
  928                 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
  929                         /*
  930                          * Don't do readahead if this is the end of the info.
  931                          */
  932                         error = bread(vp, -1 - lbn, size, NOCRED, &bp);
  933                 } else {
  934                         /*
  935                          * If we have a second block, then
  936                          * fire off a request for a readahead
  937                          * as well as a read. Note that the 4th and 5th
  938                          * arguments point to arrays of the size specified in
  939                          * the 6th argument.
  940                          */
  941                         int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
  942 
  943                         nextlbn = -1 - nextlbn;
  944                         error = breadn(vp, -1 - lbn,
  945                             size, &nextlbn, &nextsize, 1, NOCRED, &bp);
  946                 }
  947                 if (error) {
  948                         brelse(bp);
  949                         bp = NULL;
  950                         break;
  951                 }
  952 
  953                 /*
  954                  * If IO_DIRECT then set B_DIRECT for the buffer.  This
  955                  * will cause us to attempt to release the buffer later on
  956                  * and will cause the buffer cache to attempt to free the
  957                  * underlying pages.
  958                  */
  959                 if (ioflag & IO_DIRECT)
  960                         bp->b_flags |= B_DIRECT;
  961 
  962                 /*
  963                  * We should only get non-zero b_resid when an I/O error
  964                  * has occurred, which should cause us to break above.
  965                  * However, if the short read did not cause an error,
  966                  * then we want to ensure that we do not uiomove bad
  967                  * or uninitialized data.
  968                  */
  969                 size -= bp->b_resid;
  970                 if (size < xfersize) {
  971                         if (size == 0)
  972                                 break;
  973                         xfersize = size;
  974                 }
  975 
  976                 error = uiomove((char *)bp->b_data + blkoffset,
  977                                         (int)xfersize, uio);
  978                 if (error)
  979                         break;
  980 
  981                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
  982                    (LIST_EMPTY(&bp->b_dep))) {
  983                         /*
  984                          * If there are no dependencies, and it's VMIO,
  985                          * then we don't need the buf, mark it available
  986                          * for freeing. The VM has the data.
  987                          */
  988                         bp->b_flags |= B_RELBUF;
  989                         brelse(bp);
  990                 } else {
  991                         /*
  992                          * Otherwise let whoever
  993                          * made the request take care of
  994                          * freeing it. We just queue
  995                          * it onto another list.
  996                          */
  997                         bqrelse(bp);
  998                 }
  999         }
 1000 
 1001         /*
 1002          * This can only happen in the case of an error
 1003          * because the loop above resets bp to NULL on each iteration
 1004          * and on normal completion has not set a new value into it.
 1005          * so it must have come from a 'break' statement
 1006          */
 1007         if (bp != NULL) {
 1008                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
 1009                    (LIST_EMPTY(&bp->b_dep))) {
 1010                         bp->b_flags |= B_RELBUF;
 1011                         brelse(bp);
 1012                 } else {
 1013                         bqrelse(bp);
 1014                 }
 1015         }
 1016         return (error);
 1017 }
 1018 
 1019 /*
 1020  * Extended attribute area writing.
 1021  */
 1022 static int
 1023 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
 1024 {
 1025         struct inode *ip;
 1026         struct ufs2_dinode *dp;
 1027         struct fs *fs;
 1028         struct buf *bp;
 1029         ufs_lbn_t lbn;
 1030         off_t osize;
 1031         int blkoffset, error, flags, resid, size, xfersize;
 1032 
 1033         ip = VTOI(vp);
 1034         fs = ip->i_fs;
 1035         dp = ip->i_din2;
 1036 
 1037         KASSERT(!(ip->i_flag & IN_SPACECOUNTED), ("inode %u: inode is dead",
 1038             ip->i_number));
 1039 
 1040 #ifdef INVARIANTS
 1041         if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
 1042                 panic("ffs_extwrite: mode");
 1043 #endif
 1044 
 1045         if (ioflag & IO_APPEND)
 1046                 uio->uio_offset = dp->di_extsize;
 1047         KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
 1048         KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
 1049         if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
 1050                 return (EFBIG);
 1051 
 1052         resid = uio->uio_resid;
 1053         osize = dp->di_extsize;
 1054         flags = IO_EXT;
 1055         if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
 1056                 flags |= IO_SYNC;
 1057 
 1058         for (error = 0; uio->uio_resid > 0;) {
 1059                 lbn = lblkno(fs, uio->uio_offset);
 1060                 blkoffset = blkoff(fs, uio->uio_offset);
 1061                 xfersize = fs->fs_bsize - blkoffset;
 1062                 if (uio->uio_resid < xfersize)
 1063                         xfersize = uio->uio_resid;
 1064 
 1065                 /*
 1066                  * We must perform a read-before-write if the transfer size
 1067                  * does not cover the entire buffer.
 1068                  */
 1069                 if (fs->fs_bsize > xfersize)
 1070                         flags |= BA_CLRBUF;
 1071                 else
 1072                         flags &= ~BA_CLRBUF;
 1073                 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
 1074                     ucred, flags, &bp);
 1075                 if (error != 0)
 1076                         break;
 1077                 /*
 1078                  * If the buffer is not valid we have to clear out any
 1079                  * garbage data from the pages instantiated for the buffer.
 1080                  * If we do not, a failed uiomove() during a write can leave
 1081                  * the prior contents of the pages exposed to a userland
 1082                  * mmap().  XXX deal with uiomove() errors a better way.
 1083                  */
 1084                 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
 1085                         vfs_bio_clrbuf(bp);
 1086                 if (ioflag & IO_DIRECT)
 1087                         bp->b_flags |= B_DIRECT;
 1088 
 1089                 if (uio->uio_offset + xfersize > dp->di_extsize)
 1090                         dp->di_extsize = uio->uio_offset + xfersize;
 1091 
 1092                 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
 1093                 if (size < xfersize)
 1094                         xfersize = size;
 1095 
 1096                 error =
 1097                     uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
 1098                 if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
 1099                    (LIST_EMPTY(&bp->b_dep))) {
 1100                         bp->b_flags |= B_RELBUF;
 1101                 }
 1102 
 1103                 /*
 1104                  * If IO_SYNC each buffer is written synchronously.  Otherwise
 1105                  * if we have a severe page deficiency write the buffer
 1106                  * asynchronously.  Otherwise try to cluster, and if that
 1107                  * doesn't do it then either do an async write (if O_DIRECT),
 1108                  * or a delayed write (if not).
 1109                  */
 1110                 if (ioflag & IO_SYNC) {
 1111                         (void)bwrite(bp);
 1112                 } else if (vm_page_count_severe() ||
 1113                             buf_dirty_count_severe() ||
 1114                             xfersize + blkoffset == fs->fs_bsize ||
 1115                             (ioflag & (IO_ASYNC | IO_DIRECT)))
 1116                         bawrite(bp);
 1117                 else
 1118                         bdwrite(bp);
 1119                 if (error || xfersize == 0)
 1120                         break;
 1121                 ip->i_flag |= IN_CHANGE;
 1122         }
 1123         /*
 1124          * If we successfully wrote any data, and we are not the superuser
 1125          * we clear the setuid and setgid bits as a precaution against
 1126          * tampering.
 1127          */
 1128         if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
 1129                 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
 1130                         ip->i_mode &= ~(ISUID | ISGID);
 1131                         dp->di_mode = ip->i_mode;
 1132                 }
 1133         }
 1134         if (error) {
 1135                 if (ioflag & IO_UNIT) {
 1136                         (void)ffs_truncate(vp, osize,
 1137                             IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td);
 1138                         uio->uio_offset -= resid - uio->uio_resid;
 1139                         uio->uio_resid = resid;
 1140                 }
 1141         } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
 1142                 error = ffs_update(vp, 1);
 1143         return (error);
 1144 }
 1145 
 1146 
 1147 /*
 1148  * Vnode operating to retrieve a named extended attribute.
 1149  *
 1150  * Locate a particular EA (nspace:name) in the area (ptr:length), and return
 1151  * the length of the EA, and possibly the pointer to the entry and to the data.
 1152  */
 1153 static int
 1154 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
 1155 {
 1156         u_char *p, *pe, *pn, *p0;
 1157         int eapad1, eapad2, ealength, ealen, nlen;
 1158         uint32_t ul;
 1159 
 1160         pe = ptr + length;
 1161         nlen = strlen(name);
 1162 
 1163         for (p = ptr; p < pe; p = pn) {
 1164                 p0 = p;
 1165                 bcopy(p, &ul, sizeof(ul));
 1166                 pn = p + ul;
 1167                 /* make sure this entry is complete */
 1168                 if (pn > pe)
 1169                         break;
 1170                 p += sizeof(uint32_t);
 1171                 if (*p != nspace)
 1172                         continue;
 1173                 p++;
 1174                 eapad2 = *p++;
 1175                 if (*p != nlen)
 1176                         continue;
 1177                 p++;
 1178                 if (bcmp(p, name, nlen))
 1179                         continue;
 1180                 ealength = sizeof(uint32_t) + 3 + nlen;
 1181                 eapad1 = 8 - (ealength % 8);
 1182                 if (eapad1 == 8)
 1183                         eapad1 = 0;
 1184                 ealength += eapad1;
 1185                 ealen = ul - ealength - eapad2;
 1186                 p += nlen + eapad1;
 1187                 if (eap != NULL)
 1188                         *eap = p0;
 1189                 if (eac != NULL)
 1190                         *eac = p;
 1191                 return (ealen);
 1192         }
 1193         return(-1);
 1194 }
 1195 
 1196 static int
 1197 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
 1198 {
 1199         struct inode *ip;
 1200         struct ufs2_dinode *dp;
 1201         struct fs *fs;
 1202         struct uio luio;
 1203         struct iovec liovec;
 1204         int easize, error;
 1205         u_char *eae;
 1206 
 1207         ip = VTOI(vp);
 1208         fs = ip->i_fs;
 1209         dp = ip->i_din2;
 1210         easize = dp->di_extsize;
 1211         if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
 1212                 return (EFBIG);
 1213 
 1214         eae = malloc(easize + extra, M_TEMP, M_WAITOK);
 1215 
 1216         liovec.iov_base = eae;
 1217         liovec.iov_len = easize;
 1218         luio.uio_iov = &liovec;
 1219         luio.uio_iovcnt = 1;
 1220         luio.uio_offset = 0;
 1221         luio.uio_resid = easize;
 1222         luio.uio_segflg = UIO_SYSSPACE;
 1223         luio.uio_rw = UIO_READ;
 1224         luio.uio_td = td;
 1225 
 1226         error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
 1227         if (error) {
 1228                 free(eae, M_TEMP);
 1229                 return(error);
 1230         }
 1231         *p = eae;
 1232         return (0);
 1233 }
 1234 
 1235 static void
 1236 ffs_lock_ea(struct vnode *vp)
 1237 {
 1238         struct inode *ip;
 1239 
 1240         ip = VTOI(vp);
 1241         VI_LOCK(vp);
 1242         while (ip->i_flag & IN_EA_LOCKED) {
 1243                 ip->i_flag |= IN_EA_LOCKWAIT;
 1244                 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
 1245                     0);
 1246         }
 1247         ip->i_flag |= IN_EA_LOCKED;
 1248         VI_UNLOCK(vp);
 1249 }
 1250 
 1251 static void
 1252 ffs_unlock_ea(struct vnode *vp)
 1253 {
 1254         struct inode *ip;
 1255 
 1256         ip = VTOI(vp);
 1257         VI_LOCK(vp);
 1258         if (ip->i_flag & IN_EA_LOCKWAIT)
 1259                 wakeup(&ip->i_ea_refs);
 1260         ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
 1261         VI_UNLOCK(vp);
 1262 }
 1263 
 1264 static int
 1265 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
 1266 {
 1267         struct inode *ip;
 1268         struct ufs2_dinode *dp;
 1269         int error;
 1270 
 1271         ip = VTOI(vp);
 1272 
 1273         ffs_lock_ea(vp);
 1274         if (ip->i_ea_area != NULL) {
 1275                 ip->i_ea_refs++;
 1276                 ffs_unlock_ea(vp);
 1277                 return (0);
 1278         }
 1279         dp = ip->i_din2;
 1280         error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
 1281         if (error) {
 1282                 ffs_unlock_ea(vp);
 1283                 return (error);
 1284         }
 1285         ip->i_ea_len = dp->di_extsize;
 1286         ip->i_ea_error = 0;
 1287         ip->i_ea_refs++;
 1288         ffs_unlock_ea(vp);
 1289         return (0);
 1290 }
 1291 
 1292 /*
 1293  * Vnode extattr transaction commit/abort
 1294  */
 1295 static int
 1296 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
 1297 {
 1298         struct inode *ip;
 1299         struct uio luio;
 1300         struct iovec liovec;
 1301         int error;
 1302         struct ufs2_dinode *dp;
 1303 
 1304         ip = VTOI(vp);
 1305 
 1306         ffs_lock_ea(vp);
 1307         if (ip->i_ea_area == NULL) {
 1308                 ffs_unlock_ea(vp);
 1309                 return (EINVAL);
 1310         }
 1311         dp = ip->i_din2;
 1312         error = ip->i_ea_error;
 1313         if (commit && error == 0) {
 1314                 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
 1315                 if (cred == NOCRED)
 1316                         cred =  vp->v_mount->mnt_cred;
 1317                 liovec.iov_base = ip->i_ea_area;
 1318                 liovec.iov_len = ip->i_ea_len;
 1319                 luio.uio_iov = &liovec;
 1320                 luio.uio_iovcnt = 1;
 1321                 luio.uio_offset = 0;
 1322                 luio.uio_resid = ip->i_ea_len;
 1323                 luio.uio_segflg = UIO_SYSSPACE;
 1324                 luio.uio_rw = UIO_WRITE;
 1325                 luio.uio_td = td;
 1326                 /* XXX: I'm not happy about truncating to zero size */
 1327                 if (ip->i_ea_len < dp->di_extsize)
 1328                         error = ffs_truncate(vp, 0, IO_EXT, cred, td);
 1329                 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
 1330         }
 1331         if (--ip->i_ea_refs == 0) {
 1332                 free(ip->i_ea_area, M_TEMP);
 1333                 ip->i_ea_area = NULL;
 1334                 ip->i_ea_len = 0;
 1335                 ip->i_ea_error = 0;
 1336         }
 1337         ffs_unlock_ea(vp);
 1338         return (error);
 1339 }
 1340 
 1341 /*
 1342  * Vnode extattr strategy routine for fifos.
 1343  *
 1344  * We need to check for a read or write of the external attributes.
 1345  * Otherwise we just fall through and do the usual thing.
 1346  */
 1347 static int
 1348 ffsext_strategy(struct vop_strategy_args *ap)
 1349 /*
 1350 struct vop_strategy_args {
 1351         struct vnodeop_desc *a_desc;
 1352         struct vnode *a_vp;
 1353         struct buf *a_bp;
 1354 };
 1355 */
 1356 {
 1357         struct vnode *vp;
 1358         daddr_t lbn;
 1359 
 1360         vp = ap->a_vp;
 1361         lbn = ap->a_bp->b_lblkno;
 1362         if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC &&
 1363             lbn < 0 && lbn >= -NXADDR)
 1364                 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
 1365         if (vp->v_type == VFIFO)
 1366                 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
 1367         panic("spec nodes went here");
 1368 }
 1369 
 1370 /*
 1371  * Vnode extattr transaction commit/abort
 1372  */
 1373 static int
 1374 ffs_openextattr(struct vop_openextattr_args *ap)
 1375 /*
 1376 struct vop_openextattr_args {
 1377         struct vnodeop_desc *a_desc;
 1378         struct vnode *a_vp;
 1379         IN struct ucred *a_cred;
 1380         IN struct thread *a_td;
 1381 };
 1382 */
 1383 {
 1384         struct inode *ip;
 1385         struct fs *fs;
 1386 
 1387         ip = VTOI(ap->a_vp);
 1388         fs = ip->i_fs;
 1389 
 1390         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1391                 return (EOPNOTSUPP);
 1392 
 1393         return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
 1394 }
 1395 
 1396 
 1397 /*
 1398  * Vnode extattr transaction commit/abort
 1399  */
 1400 static int
 1401 ffs_closeextattr(struct vop_closeextattr_args *ap)
 1402 /*
 1403 struct vop_closeextattr_args {
 1404         struct vnodeop_desc *a_desc;
 1405         struct vnode *a_vp;
 1406         int a_commit;
 1407         IN struct ucred *a_cred;
 1408         IN struct thread *a_td;
 1409 };
 1410 */
 1411 {
 1412         struct inode *ip;
 1413         struct fs *fs;
 1414 
 1415         ip = VTOI(ap->a_vp);
 1416         fs = ip->i_fs;
 1417 
 1418         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1419                 return (EOPNOTSUPP);
 1420 
 1421         if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
 1422                 return (EROFS);
 1423 
 1424         return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
 1425 }
 1426 
 1427 /*
 1428  * Vnode operation to remove a named attribute.
 1429  */
 1430 static int
 1431 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
 1432 /*
 1433 vop_deleteextattr {
 1434         IN struct vnode *a_vp;
 1435         IN int a_attrnamespace;
 1436         IN const char *a_name;
 1437         IN struct ucred *a_cred;
 1438         IN struct thread *a_td;
 1439 };
 1440 */
 1441 {
 1442         struct inode *ip;
 1443         struct fs *fs;
 1444         uint32_t ealength, ul;
 1445         int ealen, olen, eapad1, eapad2, error, i, easize;
 1446         u_char *eae, *p;
 1447 
 1448         ip = VTOI(ap->a_vp);
 1449         fs = ip->i_fs;
 1450 
 1451         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1452                 return (EOPNOTSUPP);
 1453 
 1454         if (strlen(ap->a_name) == 0)
 1455                 return (EINVAL);
 1456 
 1457         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
 1458                 return (EROFS);
 1459 
 1460         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1461             ap->a_cred, ap->a_td, VWRITE);
 1462         if (error) {
 1463 
 1464                 /*
 1465                  * ffs_lock_ea is not needed there, because the vnode
 1466                  * must be exclusively locked.
 1467                  */
 1468                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1469                         ip->i_ea_error = error;
 1470                 return (error);
 1471         }
 1472 
 1473         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1474         if (error)
 1475                 return (error);
 1476 
 1477         ealength = eapad1 = ealen = eapad2 = 0;
 1478 
 1479         eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
 1480         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1481         easize = ip->i_ea_len;
 1482 
 1483         olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1484             &p, NULL);
 1485         if (olen == -1) {
 1486                 /* delete but nonexistent */
 1487                 free(eae, M_TEMP);
 1488                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1489                 return(ENOATTR);
 1490         }
 1491         bcopy(p, &ul, sizeof ul);
 1492         i = p - eae + ul;
 1493         if (ul != ealength) {
 1494                 bcopy(p + ul, p + ealength, easize - i);
 1495                 easize += (ealength - ul);
 1496         }
 1497         if (easize > NXADDR * fs->fs_bsize) {
 1498                 free(eae, M_TEMP);
 1499                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1500                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1501                         ip->i_ea_error = ENOSPC;
 1502                 return(ENOSPC);
 1503         }
 1504         p = ip->i_ea_area;
 1505         ip->i_ea_area = eae;
 1506         ip->i_ea_len = easize;
 1507         free(p, M_TEMP);
 1508         error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
 1509         return(error);
 1510 }
 1511 
 1512 /*
 1513  * Vnode operation to retrieve a named extended attribute.
 1514  */
 1515 static int
 1516 ffs_getextattr(struct vop_getextattr_args *ap)
 1517 /*
 1518 vop_getextattr {
 1519         IN struct vnode *a_vp;
 1520         IN int a_attrnamespace;
 1521         IN const char *a_name;
 1522         INOUT struct uio *a_uio;
 1523         OUT size_t *a_size;
 1524         IN struct ucred *a_cred;
 1525         IN struct thread *a_td;
 1526 };
 1527 */
 1528 {
 1529         struct inode *ip;
 1530         struct fs *fs;
 1531         u_char *eae, *p;
 1532         unsigned easize;
 1533         int error, ealen;
 1534 
 1535         ip = VTOI(ap->a_vp);
 1536         fs = ip->i_fs;
 1537 
 1538         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1539                 return (EOPNOTSUPP);
 1540 
 1541         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1542             ap->a_cred, ap->a_td, VREAD);
 1543         if (error)
 1544                 return (error);
 1545 
 1546         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1547         if (error)
 1548                 return (error);
 1549 
 1550         eae = ip->i_ea_area;
 1551         easize = ip->i_ea_len;
 1552 
 1553         ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
 1554             NULL, &p);
 1555         if (ealen >= 0) {
 1556                 error = 0;
 1557                 if (ap->a_size != NULL)
 1558                         *ap->a_size = ealen;
 1559                 else if (ap->a_uio != NULL)
 1560                         error = uiomove(p, ealen, ap->a_uio);
 1561         } else
 1562                 error = ENOATTR;
 1563 
 1564         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1565         return(error);
 1566 }
 1567 
 1568 /*
 1569  * Vnode operation to retrieve extended attributes on a vnode.
 1570  */
 1571 static int
 1572 ffs_listextattr(struct vop_listextattr_args *ap)
 1573 /*
 1574 vop_listextattr {
 1575         IN struct vnode *a_vp;
 1576         IN int a_attrnamespace;
 1577         INOUT struct uio *a_uio;
 1578         OUT size_t *a_size;
 1579         IN struct ucred *a_cred;
 1580         IN struct thread *a_td;
 1581 };
 1582 */
 1583 {
 1584         struct inode *ip;
 1585         struct fs *fs;
 1586         u_char *eae, *p, *pe, *pn;
 1587         unsigned easize;
 1588         uint32_t ul;
 1589         int error, ealen;
 1590 
 1591         ip = VTOI(ap->a_vp);
 1592         fs = ip->i_fs;
 1593 
 1594         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1595                 return (EOPNOTSUPP);
 1596 
 1597         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1598             ap->a_cred, ap->a_td, VREAD);
 1599         if (error)
 1600                 return (error);
 1601 
 1602         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1603         if (error)
 1604                 return (error);
 1605         eae = ip->i_ea_area;
 1606         easize = ip->i_ea_len;
 1607 
 1608         error = 0;
 1609         if (ap->a_size != NULL)
 1610                 *ap->a_size = 0;
 1611         pe = eae + easize;
 1612         for(p = eae; error == 0 && p < pe; p = pn) {
 1613                 bcopy(p, &ul, sizeof(ul));
 1614                 pn = p + ul;
 1615                 if (pn > pe)
 1616                         break;
 1617                 p += sizeof(ul);
 1618                 if (*p++ != ap->a_attrnamespace)
 1619                         continue;
 1620                 p++;    /* pad2 */
 1621                 ealen = *p;
 1622                 if (ap->a_size != NULL) {
 1623                         *ap->a_size += ealen + 1;
 1624                 } else if (ap->a_uio != NULL) {
 1625                         error = uiomove(p, ealen + 1, ap->a_uio);
 1626                 }
 1627         }
 1628         ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1629         return(error);
 1630 }
 1631 
 1632 /*
 1633  * Vnode operation to set a named attribute.
 1634  */
 1635 static int
 1636 ffs_setextattr(struct vop_setextattr_args *ap)
 1637 /*
 1638 vop_setextattr {
 1639         IN struct vnode *a_vp;
 1640         IN int a_attrnamespace;
 1641         IN const char *a_name;
 1642         INOUT struct uio *a_uio;
 1643         IN struct ucred *a_cred;
 1644         IN struct thread *a_td;
 1645 };
 1646 */
 1647 {
 1648         struct inode *ip;
 1649         struct fs *fs;
 1650         uint32_t ealength, ul;
 1651         ssize_t ealen;
 1652         int olen, eapad1, eapad2, error, i, easize;
 1653         u_char *eae, *p;
 1654 
 1655         ip = VTOI(ap->a_vp);
 1656         fs = ip->i_fs;
 1657 
 1658         if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
 1659                 return (EOPNOTSUPP);
 1660 
 1661         if (strlen(ap->a_name) == 0)
 1662                 return (EINVAL);
 1663 
 1664         /* XXX Now unsupported API to delete EAs using NULL uio. */
 1665         if (ap->a_uio == NULL)
 1666                 return (EOPNOTSUPP);
 1667 
 1668         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
 1669                 return (EROFS);
 1670 
 1671         ealen = ap->a_uio->uio_resid;
 1672         if (ealen < 0 || ealen > lblktosize(fs, NXADDR))
 1673                 return (EINVAL);
 1674 
 1675         error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
 1676             ap->a_cred, ap->a_td, VWRITE);
 1677         if (error) {
 1678 
 1679                 /*
 1680                  * ffs_lock_ea is not needed there, because the vnode
 1681                  * must be exclusively locked.
 1682                  */
 1683                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1684                         ip->i_ea_error = error;
 1685                 return (error);
 1686         }
 1687 
 1688         error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
 1689         if (error)
 1690                 return (error);
 1691 
 1692         ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
 1693         eapad1 = 8 - (ealength % 8);
 1694         if (eapad1 == 8)
 1695                 eapad1 = 0;
 1696         eapad2 = 8 - (ealen % 8);
 1697         if (eapad2 == 8)
 1698                 eapad2 = 0;
 1699         ealength += eapad1 + ealen + eapad2;
 1700 
 1701         eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
 1702         bcopy(ip->i_ea_area, eae, ip->i_ea_len);
 1703         easize = ip->i_ea_len;
 1704 
 1705         olen = ffs_findextattr(eae, easize,
 1706             ap->a_attrnamespace, ap->a_name, &p, NULL);
 1707         if (olen == -1) {
 1708                 /* new, append at end */
 1709                 p = eae + easize;
 1710                 easize += ealength;
 1711         } else {
 1712                 bcopy(p, &ul, sizeof ul);
 1713                 i = p - eae + ul;
 1714                 if (ul != ealength) {
 1715                         bcopy(p + ul, p + ealength, easize - i);
 1716                         easize += (ealength - ul);
 1717                 }
 1718         }
 1719         if (easize > lblktosize(fs, NXADDR)) {
 1720                 free(eae, M_TEMP);
 1721                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1722                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1723                         ip->i_ea_error = ENOSPC;
 1724                 return(ENOSPC);
 1725         }
 1726         bcopy(&ealength, p, sizeof(ealength));
 1727         p += sizeof(ealength);
 1728         *p++ = ap->a_attrnamespace;
 1729         *p++ = eapad2;
 1730         *p++ = strlen(ap->a_name);
 1731         strcpy(p, ap->a_name);
 1732         p += strlen(ap->a_name);
 1733         bzero(p, eapad1);
 1734         p += eapad1;
 1735         error = uiomove(p, ealen, ap->a_uio);
 1736         if (error) {
 1737                 free(eae, M_TEMP);
 1738                 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
 1739                 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
 1740                         ip->i_ea_error = error;
 1741                 return(error);
 1742         }
 1743         p += ealen;
 1744         bzero(p, eapad2);
 1745 
 1746         p = ip->i_ea_area;
 1747         ip->i_ea_area = eae;
 1748         ip->i_ea_len = easize;
 1749         free(p, M_TEMP);
 1750         error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
 1751         return(error);
 1752 }
 1753 
 1754 /*
 1755  * Vnode pointer to File handle
 1756  */
 1757 static int
 1758 ffs_vptofh(struct vop_vptofh_args *ap)
 1759 /*
 1760 vop_vptofh {
 1761         IN struct vnode *a_vp;
 1762         IN struct fid *a_fhp;
 1763 };
 1764 */
 1765 {
 1766         struct inode *ip;
 1767         struct ufid *ufhp;
 1768 
 1769         ip = VTOI(ap->a_vp);
 1770         ufhp = (struct ufid *)ap->a_fhp;
 1771         ufhp->ufid_len = sizeof(struct ufid);
 1772         ufhp->ufid_ino = ip->i_number;
 1773         ufhp->ufid_gen = ip->i_gen;
 1774         return (0);
 1775 }

Cache object: 2776b5dbd42a94a11262b2f087f7d9b9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.