The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/ufs/ffs/ffs_alloc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-2-Clause-FreeBSD AND BSD-3-Clause)
    3  *
    4  * Copyright (c) 2002 Networks Associates Technology, Inc.
    5  * All rights reserved.
    6  *
    7  * This software was developed for the FreeBSD Project by Marshall
    8  * Kirk McKusick and Network Associates Laboratories, the Security
    9  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
   10  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
   11  * research program
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  * Copyright (c) 1982, 1986, 1989, 1993
   35  *      The Regents of the University of California.  All rights reserved.
   36  *
   37  * Redistribution and use in source and binary forms, with or without
   38  * modification, are permitted provided that the following conditions
   39  * are met:
   40  * 1. Redistributions of source code must retain the above copyright
   41  *    notice, this list of conditions and the following disclaimer.
   42  * 2. Redistributions in binary form must reproduce the above copyright
   43  *    notice, this list of conditions and the following disclaimer in the
   44  *    documentation and/or other materials provided with the distribution.
   45  * 3. Neither the name of the University nor the names of its contributors
   46  *    may be used to endorse or promote products derived from this software
   47  *    without specific prior written permission.
   48  *
   49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   59  * SUCH DAMAGE.
   60  *
   61  *      @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
   62  */
   63 
   64 #include <sys/cdefs.h>
   65 __FBSDID("$FreeBSD$");
   66 
   67 #include "opt_quota.h"
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/bio.h>
   72 #include <sys/buf.h>
   73 #include <sys/capsicum.h>
   74 #include <sys/conf.h>
   75 #include <sys/fcntl.h>
   76 #include <sys/file.h>
   77 #include <sys/filedesc.h>
   78 #include <sys/gsb_crc32.h>
   79 #include <sys/kernel.h>
   80 #include <sys/mount.h>
   81 #include <sys/priv.h>
   82 #include <sys/proc.h>
   83 #include <sys/stat.h>
   84 #include <sys/syscallsubr.h>
   85 #include <sys/sysctl.h>
   86 #include <sys/syslog.h>
   87 #include <sys/taskqueue.h>
   88 #include <sys/vnode.h>
   89 
   90 #include <security/audit/audit.h>
   91 
   92 #include <geom/geom.h>
   93 #include <geom/geom_vfs.h>
   94 
   95 #include <ufs/ufs/dir.h>
   96 #include <ufs/ufs/extattr.h>
   97 #include <ufs/ufs/quota.h>
   98 #include <ufs/ufs/inode.h>
   99 #include <ufs/ufs/ufs_extern.h>
  100 #include <ufs/ufs/ufsmount.h>
  101 
  102 #include <ufs/ffs/fs.h>
  103 #include <ufs/ffs/ffs_extern.h>
  104 #include <ufs/ffs/softdep.h>
  105 
  106 typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref,
  107                                   int size, int rsize);
  108 
  109 static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int);
  110 static ufs2_daddr_t
  111               ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int);
  112 static void     ffs_blkfree_cg(struct ufsmount *, struct fs *,
  113                     struct vnode *, ufs2_daddr_t, long, ino_t,
  114                     struct workhead *);
  115 #ifdef INVARIANTS
  116 static int      ffs_checkblk(struct inode *, ufs2_daddr_t, long);
  117 #endif
  118 static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int);
  119 static ino_t    ffs_dirpref(struct inode *);
  120 static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t,
  121                     int, int);
  122 static ufs2_daddr_t     ffs_hashalloc
  123                 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *);
  124 static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int,
  125                     int);
  126 static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int);
  127 static int      ffs_reallocblks_ufs1(struct vop_reallocblks_args *);
  128 static int      ffs_reallocblks_ufs2(struct vop_reallocblks_args *);
  129 static void     ffs_ckhash_cg(struct buf *);
  130 
  131 /*
  132  * Allocate a block in the filesystem.
  133  *
  134  * The size of the requested block is given, which must be some
  135  * multiple of fs_fsize and <= fs_bsize.
  136  * A preference may be optionally specified. If a preference is given
  137  * the following hierarchy is used to allocate a block:
  138  *   1) allocate the requested block.
  139  *   2) allocate a rotationally optimal block in the same cylinder.
  140  *   3) allocate a block in the same cylinder group.
  141  *   4) quadratically rehash into other cylinder groups, until an
  142  *      available block is located.
  143  * If no block preference is given the following hierarchy is used
  144  * to allocate a block:
  145  *   1) allocate a block in the cylinder group that contains the
  146  *      inode for the file.
  147  *   2) quadratically rehash into other cylinder groups, until an
  148  *      available block is located.
  149  */
  150 int
  151 ffs_alloc(struct inode *ip,
  152         ufs2_daddr_t lbn,
  153         ufs2_daddr_t bpref,
  154         int size,
  155         int flags,
  156         struct ucred *cred,
  157         ufs2_daddr_t *bnp)
  158 {
  159         struct fs *fs;
  160         struct ufsmount *ump;
  161         ufs2_daddr_t bno;
  162         u_int cg, reclaimed;
  163         int64_t delta;
  164 #ifdef QUOTA
  165         int error;
  166 #endif
  167 
  168         *bnp = 0;
  169         ump = ITOUMP(ip);
  170         fs = ump->um_fs;
  171         mtx_assert(UFS_MTX(ump), MA_OWNED);
  172 #ifdef INVARIANTS
  173         if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
  174                 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
  175                     devtoname(ump->um_dev), (long)fs->fs_bsize, size,
  176                     fs->fs_fsmnt);
  177                 panic("ffs_alloc: bad size");
  178         }
  179         if (cred == NOCRED)
  180                 panic("ffs_alloc: missing credential");
  181 #endif /* INVARIANTS */
  182         reclaimed = 0;
  183 retry:
  184 #ifdef QUOTA
  185         UFS_UNLOCK(ump);
  186         error = chkdq(ip, btodb(size), cred, 0);
  187         if (error)
  188                 return (error);
  189         UFS_LOCK(ump);
  190 #endif
  191         if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
  192                 goto nospace;
  193         if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE) &&
  194             freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0)
  195                 goto nospace;
  196         if (bpref >= fs->fs_size)
  197                 bpref = 0;
  198         if (bpref == 0)
  199                 cg = ino_to_cg(fs, ip->i_number);
  200         else
  201                 cg = dtog(fs, bpref);
  202         bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg);
  203         if (bno > 0) {
  204                 delta = btodb(size);
  205                 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
  206                 if (flags & IO_EXT)
  207                         UFS_INODE_SET_FLAG(ip, IN_CHANGE);
  208                 else
  209                         UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
  210                 *bnp = bno;
  211                 return (0);
  212         }
  213 nospace:
  214 #ifdef QUOTA
  215         UFS_UNLOCK(ump);
  216         /*
  217          * Restore user's disk quota because allocation failed.
  218          */
  219         (void) chkdq(ip, -btodb(size), cred, FORCE);
  220         UFS_LOCK(ump);
  221 #endif
  222         if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
  223                 reclaimed = 1;
  224                 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT);
  225                 goto retry;
  226         }
  227         if (ffs_fsfail_cleanup_locked(ump, 0)) {
  228                 UFS_UNLOCK(ump);
  229                 return (ENXIO);
  230         }
  231         if (reclaimed > 0 &&
  232             ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
  233                 UFS_UNLOCK(ump);
  234                 ffs_fserr(fs, ip->i_number, "filesystem full");
  235                 uprintf("\n%s: write failed, filesystem is full\n",
  236                     fs->fs_fsmnt);
  237         } else {
  238                 UFS_UNLOCK(ump);
  239         }
  240         return (ENOSPC);
  241 }
  242 
  243 /*
  244  * Reallocate a fragment to a bigger size
  245  *
  246  * The number and size of the old block is given, and a preference
  247  * and new size is also specified. The allocator attempts to extend
  248  * the original block. Failing that, the regular block allocator is
  249  * invoked to get an appropriate block.
  250  */
  251 int
  252 ffs_realloccg(struct inode *ip,
  253         ufs2_daddr_t lbprev,
  254         ufs2_daddr_t bprev,
  255         ufs2_daddr_t bpref,
  256         int osize,
  257         int nsize,
  258         int flags,
  259         struct ucred *cred,
  260         struct buf **bpp)
  261 {
  262         struct vnode *vp;
  263         struct fs *fs;
  264         struct buf *bp;
  265         struct ufsmount *ump;
  266         u_int cg, request, reclaimed;
  267         int error, gbflags;
  268         ufs2_daddr_t bno;
  269         int64_t delta;
  270 
  271         vp = ITOV(ip);
  272         ump = ITOUMP(ip);
  273         fs = ump->um_fs;
  274         bp = NULL;
  275         gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;
  276 #ifdef WITNESS
  277         gbflags |= IS_SNAPSHOT(ip) ? GB_NOWITNESS : 0;
  278 #endif
  279 
  280         mtx_assert(UFS_MTX(ump), MA_OWNED);
  281 #ifdef INVARIANTS
  282         if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
  283                 panic("ffs_realloccg: allocation on suspended filesystem");
  284         if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
  285             (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
  286                 printf(
  287                 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
  288                     devtoname(ump->um_dev), (long)fs->fs_bsize, osize,
  289                     nsize, fs->fs_fsmnt);
  290                 panic("ffs_realloccg: bad size");
  291         }
  292         if (cred == NOCRED)
  293                 panic("ffs_realloccg: missing credential");
  294 #endif /* INVARIANTS */
  295         reclaimed = 0;
  296 retry:
  297         if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE) &&
  298             freespace(fs, fs->fs_minfree) -  numfrags(fs, nsize - osize) < 0) {
  299                 goto nospace;
  300         }
  301         if (bprev == 0) {
  302                 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n",
  303                     devtoname(ump->um_dev), (long)fs->fs_bsize, (intmax_t)bprev,
  304                     fs->fs_fsmnt);
  305                 panic("ffs_realloccg: bad bprev");
  306         }
  307         UFS_UNLOCK(ump);
  308         /*
  309          * Allocate the extra space in the buffer.
  310          */
  311         error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp);
  312         if (error) {
  313                 return (error);
  314         }
  315 
  316         if (bp->b_blkno == bp->b_lblkno) {
  317                 if (lbprev >= UFS_NDADDR)
  318                         panic("ffs_realloccg: lbprev out of range");
  319                 bp->b_blkno = fsbtodb(fs, bprev);
  320         }
  321 
  322 #ifdef QUOTA
  323         error = chkdq(ip, btodb(nsize - osize), cred, 0);
  324         if (error) {
  325                 brelse(bp);
  326                 return (error);
  327         }
  328 #endif
  329         /*
  330          * Check for extension in the existing location.
  331          */
  332         *bpp = NULL;
  333         cg = dtog(fs, bprev);
  334         UFS_LOCK(ump);
  335         bno = ffs_fragextend(ip, cg, bprev, osize, nsize);
  336         if (bno) {
  337                 if (bp->b_blkno != fsbtodb(fs, bno))
  338                         panic("ffs_realloccg: bad blockno");
  339                 delta = btodb(nsize - osize);
  340                 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
  341                 if (flags & IO_EXT)
  342                         UFS_INODE_SET_FLAG(ip, IN_CHANGE);
  343                 else
  344                         UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
  345                 allocbuf(bp, nsize);
  346                 bp->b_flags |= B_DONE;
  347                 vfs_bio_bzero_buf(bp, osize, nsize - osize);
  348                 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
  349                         vfs_bio_set_valid(bp, osize, nsize - osize);
  350                 *bpp = bp;
  351                 return (0);
  352         }
  353         /*
  354          * Allocate a new disk location.
  355          */
  356         if (bpref >= fs->fs_size)
  357                 bpref = 0;
  358         switch ((int)fs->fs_optim) {
  359         case FS_OPTSPACE:
  360                 /*
  361                  * Allocate an exact sized fragment. Although this makes
  362                  * best use of space, we will waste time relocating it if
  363                  * the file continues to grow. If the fragmentation is
  364                  * less than half of the minimum free reserve, we choose
  365                  * to begin optimizing for time.
  366                  */
  367                 request = nsize;
  368                 if (fs->fs_minfree <= 5 ||
  369                     fs->fs_cstotal.cs_nffree >
  370                     (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100))
  371                         break;
  372                 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
  373                         fs->fs_fsmnt);
  374                 fs->fs_optim = FS_OPTTIME;
  375                 break;
  376         case FS_OPTTIME:
  377                 /*
  378                  * At this point we have discovered a file that is trying to
  379                  * grow a small fragment to a larger fragment. To save time,
  380                  * we allocate a full sized block, then free the unused portion.
  381                  * If the file continues to grow, the `ffs_fragextend' call
  382                  * above will be able to grow it in place without further
  383                  * copying. If aberrant programs cause disk fragmentation to
  384                  * grow within 2% of the free reserve, we choose to begin
  385                  * optimizing for space.
  386                  */
  387                 request = fs->fs_bsize;
  388                 if (fs->fs_cstotal.cs_nffree <
  389                     (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100)
  390                         break;
  391                 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
  392                         fs->fs_fsmnt);
  393                 fs->fs_optim = FS_OPTSPACE;
  394                 break;
  395         default:
  396                 printf("dev = %s, optim = %ld, fs = %s\n",
  397                     devtoname(ump->um_dev), (long)fs->fs_optim, fs->fs_fsmnt);
  398                 panic("ffs_realloccg: bad optim");
  399                 /* NOTREACHED */
  400         }
  401         bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg);
  402         if (bno > 0) {
  403                 bp->b_blkno = fsbtodb(fs, bno);
  404                 if (!DOINGSOFTDEP(vp))
  405                         /*
  406                          * The usual case is that a smaller fragment that
  407                          * was just allocated has been replaced with a bigger
  408                          * fragment or a full-size block. If it is marked as
  409                          * B_DELWRI, the current contents have not been written
  410                          * to disk. It is possible that the block was written
  411                          * earlier, but very uncommon. If the block has never
  412                          * been written, there is no need to send a BIO_DELETE
  413                          * for it when it is freed. The gain from avoiding the
  414                          * TRIMs for the common case of unwritten blocks far
  415                          * exceeds the cost of the write amplification for the
  416                          * uncommon case of failing to send a TRIM for a block
  417                          * that had been written.
  418                          */
  419                         ffs_blkfree(ump, fs, ump->um_devvp, bprev, (long)osize,
  420                             ip->i_number, vp->v_type, NULL,
  421                             (bp->b_flags & B_DELWRI) != 0 ?
  422                             NOTRIM_KEY : SINGLETON_KEY);
  423                 delta = btodb(nsize - osize);
  424                 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta);
  425                 if (flags & IO_EXT)
  426                         UFS_INODE_SET_FLAG(ip, IN_CHANGE);
  427                 else
  428                         UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
  429                 allocbuf(bp, nsize);
  430                 bp->b_flags |= B_DONE;
  431                 vfs_bio_bzero_buf(bp, osize, nsize - osize);
  432                 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO)
  433                         vfs_bio_set_valid(bp, osize, nsize - osize);
  434                 *bpp = bp;
  435                 return (0);
  436         }
  437 #ifdef QUOTA
  438         UFS_UNLOCK(ump);
  439         /*
  440          * Restore user's disk quota because allocation failed.
  441          */
  442         (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE);
  443         UFS_LOCK(ump);
  444 #endif
  445 nospace:
  446         /*
  447          * no space available
  448          */
  449         if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) {
  450                 reclaimed = 1;
  451                 UFS_UNLOCK(ump);
  452                 if (bp) {
  453                         brelse(bp);
  454                         bp = NULL;
  455                 }
  456                 UFS_LOCK(ump);
  457                 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT);
  458                 goto retry;
  459         }
  460         if (bp)
  461                 brelse(bp);
  462         if (ffs_fsfail_cleanup_locked(ump, 0)) {
  463                 UFS_UNLOCK(ump);
  464                 return (ENXIO);
  465         }
  466         if (reclaimed > 0 &&
  467             ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
  468                 UFS_UNLOCK(ump);
  469                 ffs_fserr(fs, ip->i_number, "filesystem full");
  470                 uprintf("\n%s: write failed, filesystem is full\n",
  471                     fs->fs_fsmnt);
  472         } else {
  473                 UFS_UNLOCK(ump);
  474         }
  475         return (ENOSPC);
  476 }
  477 
  478 /*
  479  * Reallocate a sequence of blocks into a contiguous sequence of blocks.
  480  *
  481  * The vnode and an array of buffer pointers for a range of sequential
  482  * logical blocks to be made contiguous is given. The allocator attempts
  483  * to find a range of sequential blocks starting as close as possible
  484  * from the end of the allocation for the logical block immediately
  485  * preceding the current range. If successful, the physical block numbers
  486  * in the buffer pointers and in the inode are changed to reflect the new
  487  * allocation. If unsuccessful, the allocation is left unchanged. The
  488  * success in doing the reallocation is returned. Note that the error
  489  * return is not reflected back to the user. Rather the previous block
  490  * allocation will be used.
  491  */
  492 
  493 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  494     "FFS filesystem");
  495 
  496 static int doasyncfree = 1;
  497 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
  498 "do not force synchronous writes when blocks are reallocated");
  499 
  500 static int doreallocblks = 1;
  501 SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0,
  502 "enable block reallocation");
  503 
  504 static int dotrimcons = 1;
  505 SYSCTL_INT(_vfs_ffs, OID_AUTO, dotrimcons, CTLFLAG_RWTUN, &dotrimcons, 0,
  506 "enable BIO_DELETE / TRIM consolidation");
  507 
  508 static int maxclustersearch = 10;
  509 SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch,
  510 0, "max number of cylinder group to search for contigous blocks");
  511 
  512 #ifdef DIAGNOSTIC
  513 static int prtrealloc = 0;
  514 SYSCTL_INT(_debug, OID_AUTO, ffs_prtrealloc, CTLFLAG_RW, &prtrealloc, 0,
  515         "print out FFS filesystem block reallocation operations");
  516 #endif
  517 
  518 int
  519 ffs_reallocblks(
  520         struct vop_reallocblks_args /* {
  521                 struct vnode *a_vp;
  522                 struct cluster_save *a_buflist;
  523         } */ *ap)
  524 {
  525         struct ufsmount *ump;
  526         int error;
  527 
  528         /*
  529          * We used to skip reallocating the blocks of a file into a
  530          * contiguous sequence if the underlying flash device requested
  531          * BIO_DELETE notifications, because devices that benefit from
  532          * BIO_DELETE also benefit from not moving the data. However,
  533          * the destination for the data is usually moved before the data
  534          * is written to the initially allocated location, so we rarely
  535          * suffer the penalty of extra writes. With the addition of the
  536          * consolidation of contiguous blocks into single BIO_DELETE
  537          * operations, having fewer but larger contiguous blocks reduces
  538          * the number of (slow and expensive) BIO_DELETE operations. So
  539          * when doing BIO_DELETE consolidation, we do block reallocation.
  540          *
  541          * Skip if reallocblks has been disabled globally.
  542          */
  543         ump = ap->a_vp->v_mount->mnt_data;
  544         if ((((ump->um_flags) & UM_CANDELETE) != 0 && dotrimcons == 0) ||
  545             doreallocblks == 0)
  546                 return (ENOSPC);
  547 
  548         /*
  549          * We can't wait in softdep prealloc as it may fsync and recurse
  550          * here.  Instead we simply fail to reallocate blocks if this
  551          * rare condition arises.
  552          */
  553         if (DOINGSUJ(ap->a_vp))
  554                 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0)
  555                         return (ENOSPC);
  556         vn_seqc_write_begin(ap->a_vp);
  557         error = ump->um_fstype == UFS1 ? ffs_reallocblks_ufs1(ap) :
  558             ffs_reallocblks_ufs2(ap);
  559         vn_seqc_write_end(ap->a_vp);
  560         return (error);
  561 }
  562 
  563 static int
  564 ffs_reallocblks_ufs1(
  565         struct vop_reallocblks_args /* {
  566                 struct vnode *a_vp;
  567                 struct cluster_save *a_buflist;
  568         } */ *ap)
  569 {
  570         struct fs *fs;
  571         struct inode *ip;
  572         struct vnode *vp;
  573         struct buf *sbp, *ebp, *bp;
  574         ufs1_daddr_t *bap, *sbap, *ebap;
  575         struct cluster_save *buflist;
  576         struct ufsmount *ump;
  577         ufs_lbn_t start_lbn, end_lbn;
  578         ufs1_daddr_t soff, newblk, blkno;
  579         ufs2_daddr_t pref;
  580         struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
  581         int i, cg, len, start_lvl, end_lvl, ssize;
  582 
  583         vp = ap->a_vp;
  584         ip = VTOI(vp);
  585         ump = ITOUMP(ip);
  586         fs = ump->um_fs;
  587         /*
  588          * If we are not tracking block clusters or if we have less than 4%
  589          * free blocks left, then do not attempt to cluster. Running with
  590          * less than 5% free block reserve is not recommended and those that
  591          * choose to do so do not expect to have good file layout.
  592          */
  593         if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
  594                 return (ENOSPC);
  595         buflist = ap->a_buflist;
  596         len = buflist->bs_nchildren;
  597         start_lbn = buflist->bs_children[0]->b_lblkno;
  598         end_lbn = start_lbn + len - 1;
  599 #ifdef INVARIANTS
  600         for (i = 0; i < len; i++)
  601                 if (!ffs_checkblk(ip,
  602                    dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
  603                         panic("ffs_reallocblks: unallocated block 1");
  604         for (i = 1; i < len; i++)
  605                 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
  606                         panic("ffs_reallocblks: non-logical cluster");
  607         blkno = buflist->bs_children[0]->b_blkno;
  608         ssize = fsbtodb(fs, fs->fs_frag);
  609         for (i = 1; i < len - 1; i++)
  610                 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
  611                         panic("ffs_reallocblks: non-physical cluster %d", i);
  612 #endif
  613         /*
  614          * If the cluster crosses the boundary for the first indirect
  615          * block, leave space for the indirect block. Indirect blocks
  616          * are initially laid out in a position after the last direct
  617          * block. Block reallocation would usually destroy locality by
  618          * moving the indirect block out of the way to make room for
  619          * data blocks if we didn't compensate here. We should also do
  620          * this for other indirect block boundaries, but it is only
  621          * important for the first one.
  622          */
  623         if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
  624                 return (ENOSPC);
  625         /*
  626          * If the latest allocation is in a new cylinder group, assume that
  627          * the filesystem has decided to move and do not force it back to
  628          * the previous cylinder group.
  629          */
  630         if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
  631             dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
  632                 return (ENOSPC);
  633         if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
  634             ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
  635                 return (ENOSPC);
  636         /*
  637          * Get the starting offset and block map for the first block.
  638          */
  639         if (start_lvl == 0) {
  640                 sbap = &ip->i_din1->di_db[0];
  641                 soff = start_lbn;
  642         } else {
  643                 idp = &start_ap[start_lvl - 1];
  644                 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
  645                         brelse(sbp);
  646                         return (ENOSPC);
  647                 }
  648                 sbap = (ufs1_daddr_t *)sbp->b_data;
  649                 soff = idp->in_off;
  650         }
  651         /*
  652          * If the block range spans two block maps, get the second map.
  653          */
  654         ebap = NULL;
  655         if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
  656                 ssize = len;
  657         } else {
  658 #ifdef INVARIANTS
  659                 if (start_lvl > 0 &&
  660                     start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
  661                         panic("ffs_reallocblk: start == end");
  662 #endif
  663                 ssize = len - (idp->in_off + 1);
  664                 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
  665                         goto fail;
  666                 ebap = (ufs1_daddr_t *)ebp->b_data;
  667         }
  668         /*
  669          * Find the preferred location for the cluster. If we have not
  670          * previously failed at this endeavor, then follow our standard
  671          * preference calculation. If we have failed at it, then pick up
  672          * where we last ended our search.
  673          */
  674         UFS_LOCK(ump);
  675         if (ip->i_nextclustercg == -1)
  676                 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap);
  677         else
  678                 pref = cgdata(fs, ip->i_nextclustercg);
  679         /*
  680          * Search the block map looking for an allocation of the desired size.
  681          * To avoid wasting too much time, we limit the number of cylinder
  682          * groups that we will search.
  683          */
  684         cg = dtog(fs, pref);
  685         for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
  686                 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
  687                         break;
  688                 cg += 1;
  689                 if (cg >= fs->fs_ncg)
  690                         cg = 0;
  691         }
  692         /*
  693          * If we have failed in our search, record where we gave up for
  694          * next time. Otherwise, fall back to our usual search citerion.
  695          */
  696         if (newblk == 0) {
  697                 ip->i_nextclustercg = cg;
  698                 UFS_UNLOCK(ump);
  699                 goto fail;
  700         }
  701         ip->i_nextclustercg = -1;
  702         /*
  703          * We have found a new contiguous block.
  704          *
  705          * First we have to replace the old block pointers with the new
  706          * block pointers in the inode and indirect blocks associated
  707          * with the file.
  708          */
  709 #ifdef DIAGNOSTIC
  710         if (prtrealloc)
  711                 printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
  712                     (uintmax_t)ip->i_number,
  713                     (intmax_t)start_lbn, (intmax_t)end_lbn);
  714 #endif
  715         blkno = newblk;
  716         for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
  717                 if (i == ssize) {
  718                         bap = ebap;
  719                         soff = -i;
  720                 }
  721 #ifdef INVARIANTS
  722                 if (!ffs_checkblk(ip,
  723                    dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
  724                         panic("ffs_reallocblks: unallocated block 2");
  725                 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
  726                         panic("ffs_reallocblks: alloc mismatch");
  727 #endif
  728 #ifdef DIAGNOSTIC
  729                 if (prtrealloc)
  730                         printf(" %d,", *bap);
  731 #endif
  732                 if (DOINGSOFTDEP(vp)) {
  733                         if (sbap == &ip->i_din1->di_db[0] && i < ssize)
  734                                 softdep_setup_allocdirect(ip, start_lbn + i,
  735                                     blkno, *bap, fs->fs_bsize, fs->fs_bsize,
  736                                     buflist->bs_children[i]);
  737                         else
  738                                 softdep_setup_allocindir_page(ip, start_lbn + i,
  739                                     i < ssize ? sbp : ebp, soff + i, blkno,
  740                                     *bap, buflist->bs_children[i]);
  741                 }
  742                 *bap++ = blkno;
  743         }
  744         /*
  745          * Next we must write out the modified inode and indirect blocks.
  746          * For strict correctness, the writes should be synchronous since
  747          * the old block values may have been written to disk. In practise
  748          * they are almost never written, but if we are concerned about
  749          * strict correctness, the `doasyncfree' flag should be set to zero.
  750          *
  751          * The test on `doasyncfree' should be changed to test a flag
  752          * that shows whether the associated buffers and inodes have
  753          * been written. The flag should be set when the cluster is
  754          * started and cleared whenever the buffer or inode is flushed.
  755          * We can then check below to see if it is set, and do the
  756          * synchronous write only when it has been cleared.
  757          */
  758         if (sbap != &ip->i_din1->di_db[0]) {
  759                 if (doasyncfree)
  760                         bdwrite(sbp);
  761                 else
  762                         bwrite(sbp);
  763         } else {
  764                 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
  765                 if (!doasyncfree)
  766                         ffs_update(vp, 1);
  767         }
  768         if (ssize < len) {
  769                 if (doasyncfree)
  770                         bdwrite(ebp);
  771                 else
  772                         bwrite(ebp);
  773         }
  774         /*
  775          * Last, free the old blocks and assign the new blocks to the buffers.
  776          */
  777 #ifdef DIAGNOSTIC
  778         if (prtrealloc)
  779                 printf("\n\tnew:");
  780 #endif
  781         for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
  782                 bp = buflist->bs_children[i];
  783                 if (!DOINGSOFTDEP(vp))
  784                         /*
  785                          * The usual case is that a set of N-contiguous blocks
  786                          * that was just allocated has been replaced with a
  787                          * set of N+1-contiguous blocks. If they are marked as
  788                          * B_DELWRI, the current contents have not been written
  789                          * to disk. It is possible that the blocks were written
  790                          * earlier, but very uncommon. If the blocks have never
  791                          * been written, there is no need to send a BIO_DELETE
  792                          * for them when they are freed. The gain from avoiding
  793                          * the TRIMs for the common case of unwritten blocks
  794                          * far exceeds the cost of the write amplification for
  795                          * the uncommon case of failing to send a TRIM for the
  796                          * blocks that had been written.
  797                          */
  798                         ffs_blkfree(ump, fs, ump->um_devvp,
  799                             dbtofsb(fs, bp->b_blkno),
  800                             fs->fs_bsize, ip->i_number, vp->v_type, NULL,
  801                             (bp->b_flags & B_DELWRI) != 0 ?
  802                             NOTRIM_KEY : SINGLETON_KEY);
  803                 bp->b_blkno = fsbtodb(fs, blkno);
  804 #ifdef INVARIANTS
  805                 if (!ffs_checkblk(ip, dbtofsb(fs, bp->b_blkno), fs->fs_bsize))
  806                         panic("ffs_reallocblks: unallocated block 3");
  807 #endif
  808 #ifdef DIAGNOSTIC
  809                 if (prtrealloc)
  810                         printf(" %d,", blkno);
  811 #endif
  812         }
  813 #ifdef DIAGNOSTIC
  814         if (prtrealloc) {
  815                 prtrealloc--;
  816                 printf("\n");
  817         }
  818 #endif
  819         return (0);
  820 
  821 fail:
  822         if (ssize < len)
  823                 brelse(ebp);
  824         if (sbap != &ip->i_din1->di_db[0])
  825                 brelse(sbp);
  826         return (ENOSPC);
  827 }
  828 
  829 static int
  830 ffs_reallocblks_ufs2(
  831         struct vop_reallocblks_args /* {
  832                 struct vnode *a_vp;
  833                 struct cluster_save *a_buflist;
  834         } */ *ap)
  835 {
  836         struct fs *fs;
  837         struct inode *ip;
  838         struct vnode *vp;
  839         struct buf *sbp, *ebp, *bp;
  840         ufs2_daddr_t *bap, *sbap, *ebap;
  841         struct cluster_save *buflist;
  842         struct ufsmount *ump;
  843         ufs_lbn_t start_lbn, end_lbn;
  844         ufs2_daddr_t soff, newblk, blkno, pref;
  845         struct indir start_ap[UFS_NIADDR + 1], end_ap[UFS_NIADDR + 1], *idp;
  846         int i, cg, len, start_lvl, end_lvl, ssize;
  847 
  848         vp = ap->a_vp;
  849         ip = VTOI(vp);
  850         ump = ITOUMP(ip);
  851         fs = ump->um_fs;
  852         /*
  853          * If we are not tracking block clusters or if we have less than 4%
  854          * free blocks left, then do not attempt to cluster. Running with
  855          * less than 5% free block reserve is not recommended and those that
  856          * choose to do so do not expect to have good file layout.
  857          */
  858         if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0)
  859                 return (ENOSPC);
  860         buflist = ap->a_buflist;
  861         len = buflist->bs_nchildren;
  862         start_lbn = buflist->bs_children[0]->b_lblkno;
  863         end_lbn = start_lbn + len - 1;
  864 #ifdef INVARIANTS
  865         for (i = 0; i < len; i++)
  866                 if (!ffs_checkblk(ip,
  867                    dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
  868                         panic("ffs_reallocblks: unallocated block 1");
  869         for (i = 1; i < len; i++)
  870                 if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
  871                         panic("ffs_reallocblks: non-logical cluster");
  872         blkno = buflist->bs_children[0]->b_blkno;
  873         ssize = fsbtodb(fs, fs->fs_frag);
  874         for (i = 1; i < len - 1; i++)
  875                 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize))
  876                         panic("ffs_reallocblks: non-physical cluster %d", i);
  877 #endif
  878         /*
  879          * If the cluster crosses the boundary for the first indirect
  880          * block, do not move anything in it. Indirect blocks are
  881          * usually initially laid out in a position between the data
  882          * blocks. Block reallocation would usually destroy locality by
  883          * moving the indirect block out of the way to make room for
  884          * data blocks if we didn't compensate here. We should also do
  885          * this for other indirect block boundaries, but it is only
  886          * important for the first one.
  887          */
  888         if (start_lbn < UFS_NDADDR && end_lbn >= UFS_NDADDR)
  889                 return (ENOSPC);
  890         /*
  891          * If the latest allocation is in a new cylinder group, assume that
  892          * the filesystem has decided to move and do not force it back to
  893          * the previous cylinder group.
  894          */
  895         if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
  896             dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
  897                 return (ENOSPC);
  898         if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
  899             ufs_getlbns(vp, end_lbn, end_ap, &end_lvl))
  900                 return (ENOSPC);
  901         /*
  902          * Get the starting offset and block map for the first block.
  903          */
  904         if (start_lvl == 0) {
  905                 sbap = &ip->i_din2->di_db[0];
  906                 soff = start_lbn;
  907         } else {
  908                 idp = &start_ap[start_lvl - 1];
  909                 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) {
  910                         brelse(sbp);
  911                         return (ENOSPC);
  912                 }
  913                 sbap = (ufs2_daddr_t *)sbp->b_data;
  914                 soff = idp->in_off;
  915         }
  916         /*
  917          * If the block range spans two block maps, get the second map.
  918          */
  919         ebap = NULL;
  920         if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
  921                 ssize = len;
  922         } else {
  923 #ifdef INVARIANTS
  924                 if (start_lvl > 0 &&
  925                     start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
  926                         panic("ffs_reallocblk: start == end");
  927 #endif
  928                 ssize = len - (idp->in_off + 1);
  929                 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp))
  930                         goto fail;
  931                 ebap = (ufs2_daddr_t *)ebp->b_data;
  932         }
  933         /*
  934          * Find the preferred location for the cluster. If we have not
  935          * previously failed at this endeavor, then follow our standard
  936          * preference calculation. If we have failed at it, then pick up
  937          * where we last ended our search.
  938          */
  939         UFS_LOCK(ump);
  940         if (ip->i_nextclustercg == -1)
  941                 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap);
  942         else
  943                 pref = cgdata(fs, ip->i_nextclustercg);
  944         /*
  945          * Search the block map looking for an allocation of the desired size.
  946          * To avoid wasting too much time, we limit the number of cylinder
  947          * groups that we will search.
  948          */
  949         cg = dtog(fs, pref);
  950         for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) {
  951                 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0)
  952                         break;
  953                 cg += 1;
  954                 if (cg >= fs->fs_ncg)
  955                         cg = 0;
  956         }
  957         /*
  958          * If we have failed in our search, record where we gave up for
  959          * next time. Otherwise, fall back to our usual search citerion.
  960          */
  961         if (newblk == 0) {
  962                 ip->i_nextclustercg = cg;
  963                 UFS_UNLOCK(ump);
  964                 goto fail;
  965         }
  966         ip->i_nextclustercg = -1;
  967         /*
  968          * We have found a new contiguous block.
  969          *
  970          * First we have to replace the old block pointers with the new
  971          * block pointers in the inode and indirect blocks associated
  972          * with the file.
  973          */
  974 #ifdef DIAGNOSTIC
  975         if (prtrealloc)
  976                 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", (uintmax_t)ip->i_number,
  977                     (intmax_t)start_lbn, (intmax_t)end_lbn);
  978 #endif
  979         blkno = newblk;
  980         for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) {
  981                 if (i == ssize) {
  982                         bap = ebap;
  983                         soff = -i;
  984                 }
  985 #ifdef INVARIANTS
  986                 if (!ffs_checkblk(ip,
  987                    dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize))
  988                         panic("ffs_reallocblks: unallocated block 2");
  989                 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap)
  990                         panic("ffs_reallocblks: alloc mismatch");
  991 #endif
  992 #ifdef DIAGNOSTIC
  993                 if (prtrealloc)
  994                         printf(" %jd,", (intmax_t)*bap);
  995 #endif
  996                 if (DOINGSOFTDEP(vp)) {
  997                         if (sbap == &ip->i_din2->di_db[0] && i < ssize)
  998                                 softdep_setup_allocdirect(ip, start_lbn + i,
  999                                     blkno, *bap, fs->fs_bsize, fs->fs_bsize,
 1000                                     buflist->bs_children[i]);
 1001                         else
 1002                                 softdep_setup_allocindir_page(ip, start_lbn + i,
 1003                                     i < ssize ? sbp : ebp, soff + i, blkno,
 1004                                     *bap, buflist->bs_children[i]);
 1005                 }
 1006                 *bap++ = blkno;
 1007         }
 1008         /*
 1009          * Next we must write out the modified inode and indirect blocks.
 1010          * For strict correctness, the writes should be synchronous since
 1011          * the old block values may have been written to disk. In practise
 1012          * they are almost never written, but if we are concerned about
 1013          * strict correctness, the `doasyncfree' flag should be set to zero.
 1014          *
 1015          * The test on `doasyncfree' should be changed to test a flag
 1016          * that shows whether the associated buffers and inodes have
 1017          * been written. The flag should be set when the cluster is
 1018          * started and cleared whenever the buffer or inode is flushed.
 1019          * We can then check below to see if it is set, and do the
 1020          * synchronous write only when it has been cleared.
 1021          */
 1022         if (sbap != &ip->i_din2->di_db[0]) {
 1023                 if (doasyncfree)
 1024                         bdwrite(sbp);
 1025                 else
 1026                         bwrite(sbp);
 1027         } else {
 1028                 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE);
 1029                 if (!doasyncfree)
 1030                         ffs_update(vp, 1);
 1031         }
 1032         if (ssize < len) {
 1033                 if (doasyncfree)
 1034                         bdwrite(ebp);
 1035                 else
 1036                         bwrite(ebp);
 1037         }
 1038         /*
 1039          * Last, free the old blocks and assign the new blocks to the buffers.
 1040          */
 1041 #ifdef DIAGNOSTIC
 1042         if (prtrealloc)
 1043                 printf("\n\tnew:");
 1044 #endif
 1045         for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) {
 1046                 bp = buflist->bs_children[i];
 1047                 if (!DOINGSOFTDEP(vp))
 1048                         /*
 1049                          * The usual case is that a set of N-contiguous blocks
 1050                          * that was just allocated has been replaced with a
 1051                          * set of N+1-contiguous blocks. If they are marked as
 1052                          * B_DELWRI, the current contents have not been written
 1053                          * to disk. It is possible that the blocks were written
 1054                          * earlier, but very uncommon. If the blocks have never
 1055                          * been written, there is no need to send a BIO_DELETE
 1056                          * for them when they are freed. The gain from avoiding
 1057                          * the TRIMs for the common case of unwritten blocks
 1058                          * far exceeds the cost of the write amplification for
 1059                          * the uncommon case of failing to send a TRIM for the
 1060                          * blocks that had been written.
 1061                          */
 1062                         ffs_blkfree(ump, fs, ump->um_devvp,
 1063                             dbtofsb(fs, bp->b_blkno),
 1064                             fs->fs_bsize, ip->i_number, vp->v_type, NULL,
 1065                             (bp->b_flags & B_DELWRI) != 0 ?
 1066                             NOTRIM_KEY : SINGLETON_KEY);
 1067                 bp->b_blkno = fsbtodb(fs, blkno);
 1068 #ifdef INVARIANTS
 1069                 if (!ffs_checkblk(ip, dbtofsb(fs, bp->b_blkno), fs->fs_bsize))
 1070                         panic("ffs_reallocblks: unallocated block 3");
 1071 #endif
 1072 #ifdef DIAGNOSTIC
 1073                 if (prtrealloc)
 1074                         printf(" %jd,", (intmax_t)blkno);
 1075 #endif
 1076         }
 1077 #ifdef DIAGNOSTIC
 1078         if (prtrealloc) {
 1079                 prtrealloc--;
 1080                 printf("\n");
 1081         }
 1082 #endif
 1083         return (0);
 1084 
 1085 fail:
 1086         if (ssize < len)
 1087                 brelse(ebp);
 1088         if (sbap != &ip->i_din2->di_db[0])
 1089                 brelse(sbp);
 1090         return (ENOSPC);
 1091 }
 1092 
 1093 /*
 1094  * Allocate an inode in the filesystem.
 1095  *
 1096  * If allocating a directory, use ffs_dirpref to select the inode.
 1097  * If allocating in a directory, the following hierarchy is followed:
 1098  *   1) allocate the preferred inode.
 1099  *   2) allocate an inode in the same cylinder group.
 1100  *   3) quadratically rehash into other cylinder groups, until an
 1101  *      available inode is located.
 1102  * If no inode preference is given the following hierarchy is used
 1103  * to allocate an inode:
 1104  *   1) allocate an inode in cylinder group 0.
 1105  *   2) quadratically rehash into other cylinder groups, until an
 1106  *      available inode is located.
 1107  */
 1108 int
 1109 ffs_valloc(struct vnode *pvp,
 1110         int mode,
 1111         struct ucred *cred,
 1112         struct vnode **vpp)
 1113 {
 1114         struct inode *pip;
 1115         struct fs *fs;
 1116         struct inode *ip;
 1117         struct timespec ts;
 1118         struct ufsmount *ump;
 1119         ino_t ino, ipref;
 1120         u_int cg;
 1121         int error, reclaimed;
 1122 
 1123         *vpp = NULL;
 1124         pip = VTOI(pvp);
 1125         ump = ITOUMP(pip);
 1126         fs = ump->um_fs;
 1127 
 1128         UFS_LOCK(ump);
 1129         reclaimed = 0;
 1130 retry:
 1131         if (fs->fs_cstotal.cs_nifree == 0)
 1132                 goto noinodes;
 1133 
 1134         if ((mode & IFMT) == IFDIR)
 1135                 ipref = ffs_dirpref(pip);
 1136         else
 1137                 ipref = pip->i_number;
 1138         if (ipref >= fs->fs_ncg * fs->fs_ipg)
 1139                 ipref = 0;
 1140         cg = ino_to_cg(fs, ipref);
 1141         /*
 1142          * Track number of dirs created one after another
 1143          * in a same cg without intervening by files.
 1144          */
 1145         if ((mode & IFMT) == IFDIR) {
 1146                 if (fs->fs_contigdirs[cg] < 255)
 1147                         fs->fs_contigdirs[cg]++;
 1148         } else {
 1149                 if (fs->fs_contigdirs[cg] > 0)
 1150                         fs->fs_contigdirs[cg]--;
 1151         }
 1152         ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0,
 1153                                         (allocfcn_t *)ffs_nodealloccg);
 1154         if (ino == 0)
 1155                 goto noinodes;
 1156         /*
 1157          * Get rid of the cached old vnode, force allocation of a new vnode
 1158          * for this inode. If this fails, release the allocated ino and
 1159          * return the error.
 1160          */
 1161         if ((error = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp,
 1162             FFSV_FORCEINSMQ | FFSV_REPLACE | FFSV_NEWINODE)) != 0) {
 1163                 ffs_vfree(pvp, ino, mode);
 1164                 return (error);
 1165         }
 1166         /*
 1167          * We got an inode, so check mode and panic if it is already allocated.
 1168          */
 1169         ip = VTOI(*vpp);
 1170         if (ip->i_mode) {
 1171                 printf("mode = 0%o, inum = %ju, fs = %s\n",
 1172                     ip->i_mode, (uintmax_t)ip->i_number, fs->fs_fsmnt);
 1173                 panic("ffs_valloc: dup alloc");
 1174         }
 1175         if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) {  /* XXX */
 1176                 printf("free inode %s/%lu had %ld blocks\n",
 1177                     fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks));
 1178                 DIP_SET(ip, i_blocks, 0);
 1179         }
 1180         ip->i_flags = 0;
 1181         DIP_SET(ip, i_flags, 0);
 1182         /*
 1183          * Set up a new generation number for this inode.
 1184          */
 1185         while (ip->i_gen == 0 || ++ip->i_gen == 0)
 1186                 ip->i_gen = arc4random();
 1187         DIP_SET(ip, i_gen, ip->i_gen);
 1188         if (fs->fs_magic == FS_UFS2_MAGIC) {
 1189                 vfs_timestamp(&ts);
 1190                 ip->i_din2->di_birthtime = ts.tv_sec;
 1191                 ip->i_din2->di_birthnsec = ts.tv_nsec;
 1192         }
 1193         ip->i_flag = 0;
 1194         (*vpp)->v_vflag = 0;
 1195         (*vpp)->v_type = VNON;
 1196         if (fs->fs_magic == FS_UFS2_MAGIC) {
 1197                 (*vpp)->v_op = &ffs_vnodeops2;
 1198                 UFS_INODE_SET_FLAG(ip, IN_UFS2);
 1199         } else {
 1200                 (*vpp)->v_op = &ffs_vnodeops1;
 1201         }
 1202         return (0);
 1203 noinodes:
 1204         if (reclaimed == 0) {
 1205                 reclaimed = 1;
 1206                 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT);
 1207                 goto retry;
 1208         }
 1209         if (ffs_fsfail_cleanup_locked(ump, 0)) {
 1210                 UFS_UNLOCK(ump);
 1211                 return (ENXIO);
 1212         }
 1213         if (ppsratecheck(&ump->um_last_fullmsg, &ump->um_secs_fullmsg, 1)) {
 1214                 UFS_UNLOCK(ump);
 1215                 ffs_fserr(fs, pip->i_number, "out of inodes");
 1216                 uprintf("\n%s: create/symlink failed, no inodes free\n",
 1217                     fs->fs_fsmnt);
 1218         } else {
 1219                 UFS_UNLOCK(ump);
 1220         }
 1221         return (ENOSPC);
 1222 }
 1223 
 1224 /*
 1225  * Find a cylinder group to place a directory.
 1226  *
 1227  * The policy implemented by this algorithm is to allocate a
 1228  * directory inode in the same cylinder group as its parent
 1229  * directory, but also to reserve space for its files inodes
 1230  * and data. Restrict the number of directories which may be
 1231  * allocated one after another in the same cylinder group
 1232  * without intervening allocation of files.
 1233  *
 1234  * If we allocate a first level directory then force allocation
 1235  * in another cylinder group.
 1236  */
 1237 static ino_t
 1238 ffs_dirpref(struct inode *pip)
 1239 {
 1240         struct fs *fs;
 1241         int cg, prefcg, dirsize, cgsize;
 1242         u_int avgifree, avgbfree, avgndir, curdirsize;
 1243         u_int minifree, minbfree, maxndir;
 1244         u_int mincg, minndir;
 1245         u_int maxcontigdirs;
 1246 
 1247         mtx_assert(UFS_MTX(ITOUMP(pip)), MA_OWNED);
 1248         fs = ITOFS(pip);
 1249 
 1250         avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
 1251         avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
 1252         avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg;
 1253 
 1254         /*
 1255          * Force allocation in another cg if creating a first level dir.
 1256          */
 1257         ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref");
 1258         if (ITOV(pip)->v_vflag & VV_ROOT) {
 1259                 prefcg = arc4random() % fs->fs_ncg;
 1260                 mincg = prefcg;
 1261                 minndir = fs->fs_ipg;
 1262                 for (cg = prefcg; cg < fs->fs_ncg; cg++)
 1263                         if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
 1264                             fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
 1265                             fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 1266                                 mincg = cg;
 1267                                 minndir = fs->fs_cs(fs, cg).cs_ndir;
 1268                         }
 1269                 for (cg = 0; cg < prefcg; cg++)
 1270                         if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
 1271                             fs->fs_cs(fs, cg).cs_nifree >= avgifree &&
 1272                             fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 1273                                 mincg = cg;
 1274                                 minndir = fs->fs_cs(fs, cg).cs_ndir;
 1275                         }
 1276                 return ((ino_t)(fs->fs_ipg * mincg));
 1277         }
 1278 
 1279         /*
 1280          * Count various limits which used for
 1281          * optimal allocation of a directory inode.
 1282          */
 1283         maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg);
 1284         minifree = avgifree - avgifree / 4;
 1285         if (minifree < 1)
 1286                 minifree = 1;
 1287         minbfree = avgbfree - avgbfree / 4;
 1288         if (minbfree < 1)
 1289                 minbfree = 1;
 1290         cgsize = fs->fs_fsize * fs->fs_fpg;
 1291         dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir;
 1292         curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0;
 1293         if (dirsize < curdirsize)
 1294                 dirsize = curdirsize;
 1295         if (dirsize <= 0)
 1296                 maxcontigdirs = 0;              /* dirsize overflowed */
 1297         else
 1298                 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255);
 1299         if (fs->fs_avgfpdir > 0)
 1300                 maxcontigdirs = min(maxcontigdirs,
 1301                                     fs->fs_ipg / fs->fs_avgfpdir);
 1302         if (maxcontigdirs == 0)
 1303                 maxcontigdirs = 1;
 1304 
 1305         /*
 1306          * Limit number of dirs in one cg and reserve space for 
 1307          * regular files, but only if we have no deficit in
 1308          * inodes or space.
 1309          *
 1310          * We are trying to find a suitable cylinder group nearby
 1311          * our preferred cylinder group to place a new directory.
 1312          * We scan from our preferred cylinder group forward looking
 1313          * for a cylinder group that meets our criterion. If we get
 1314          * to the final cylinder group and do not find anything,
 1315          * we start scanning forwards from the beginning of the
 1316          * filesystem. While it might seem sensible to start scanning
 1317          * backwards or even to alternate looking forward and backward,
 1318          * this approach fails badly when the filesystem is nearly full.
 1319          * Specifically, we first search all the areas that have no space
 1320          * and finally try the one preceding that. We repeat this on
 1321          * every request and in the case of the final block end up
 1322          * searching the entire filesystem. By jumping to the front
 1323          * of the filesystem, our future forward searches always look
 1324          * in new cylinder groups so finds every possible block after
 1325          * one pass over the filesystem.
 1326          */
 1327         prefcg = ino_to_cg(fs, pip->i_number);
 1328         for (cg = prefcg; cg < fs->fs_ncg; cg++)
 1329                 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
 1330                     fs->fs_cs(fs, cg).cs_nifree >= minifree &&
 1331                     fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
 1332                         if (fs->fs_contigdirs[cg] < maxcontigdirs)
 1333                                 return ((ino_t)(fs->fs_ipg * cg));
 1334                 }
 1335         for (cg = 0; cg < prefcg; cg++)
 1336                 if (fs->fs_cs(fs, cg).cs_ndir < maxndir &&
 1337                     fs->fs_cs(fs, cg).cs_nifree >= minifree &&
 1338                     fs->fs_cs(fs, cg).cs_nbfree >= minbfree) {
 1339                         if (fs->fs_contigdirs[cg] < maxcontigdirs)
 1340                                 return ((ino_t)(fs->fs_ipg * cg));
 1341                 }
 1342         /*
 1343          * This is a backstop when we have deficit in space.
 1344          */
 1345         for (cg = prefcg; cg < fs->fs_ncg; cg++)
 1346                 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
 1347                         return ((ino_t)(fs->fs_ipg * cg));
 1348         for (cg = 0; cg < prefcg; cg++)
 1349                 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree)
 1350                         break;
 1351         return ((ino_t)(fs->fs_ipg * cg));
 1352 }
 1353 
 1354 /*
 1355  * Select the desired position for the next block in a file.  The file is
 1356  * logically divided into sections. The first section is composed of the
 1357  * direct blocks and the next fs_maxbpg blocks. Each additional section
 1358  * contains fs_maxbpg blocks.
 1359  *
 1360  * If no blocks have been allocated in the first section, the policy is to
 1361  * request a block in the same cylinder group as the inode that describes
 1362  * the file. The first indirect is allocated immediately following the last
 1363  * direct block and the data blocks for the first indirect immediately
 1364  * follow it.
 1365  *
 1366  * If no blocks have been allocated in any other section, the indirect 
 1367  * block(s) are allocated in the same cylinder group as its inode in an
 1368  * area reserved immediately following the inode blocks. The policy for
 1369  * the data blocks is to place them in a cylinder group with a greater than
 1370  * average number of free blocks. An appropriate cylinder group is found
 1371  * by using a rotor that sweeps the cylinder groups. When a new group of
 1372  * blocks is needed, the sweep begins in the cylinder group following the
 1373  * cylinder group from which the previous allocation was made. The sweep
 1374  * continues until a cylinder group with greater than the average number
 1375  * of free blocks is found. If the allocation is for the first block in an
 1376  * indirect block or the previous block is a hole, then the information on
 1377  * the previous allocation is unavailable; here a best guess is made based
 1378  * on the logical block number being allocated.
 1379  *
 1380  * If a section is already partially allocated, the policy is to
 1381  * allocate blocks contiguously within the section if possible.
 1382  */
 1383 ufs2_daddr_t
 1384 ffs_blkpref_ufs1(struct inode *ip,
 1385         ufs_lbn_t lbn,
 1386         int indx,
 1387         ufs1_daddr_t *bap)
 1388 {
 1389         struct fs *fs;
 1390         u_int cg, inocg;
 1391         u_int avgbfree, startcg;
 1392         ufs2_daddr_t pref, prevbn;
 1393 
 1394         KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
 1395         mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
 1396         fs = ITOFS(ip);
 1397         /*
 1398          * Allocation of indirect blocks is indicated by passing negative
 1399          * values in indx: -1 for single indirect, -2 for double indirect,
 1400          * -3 for triple indirect. As noted below, we attempt to allocate
 1401          * the first indirect inline with the file data. For all later
 1402          * indirect blocks, the data is often allocated in other cylinder
 1403          * groups. However to speed random file access and to speed up
 1404          * fsck, the filesystem reserves the first fs_metaspace blocks
 1405          * (typically half of fs_minfree) of the data area of each cylinder
 1406          * group to hold these later indirect blocks.
 1407          */
 1408         inocg = ino_to_cg(fs, ip->i_number);
 1409         if (indx < 0) {
 1410                 /*
 1411                  * Our preference for indirect blocks is the zone at the
 1412                  * beginning of the inode's cylinder group data area that
 1413                  * we try to reserve for indirect blocks.
 1414                  */
 1415                 pref = cgmeta(fs, inocg);
 1416                 /*
 1417                  * If we are allocating the first indirect block, try to
 1418                  * place it immediately following the last direct block.
 1419                  */
 1420                 if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
 1421                     ip->i_din1->di_db[UFS_NDADDR - 1] != 0)
 1422                         pref = ip->i_din1->di_db[UFS_NDADDR - 1] + fs->fs_frag;
 1423                 return (pref);
 1424         }
 1425         /*
 1426          * If we are allocating the first data block in the first indirect
 1427          * block and the indirect has been allocated in the data block area,
 1428          * try to place it immediately following the indirect block.
 1429          */
 1430         if (lbn == UFS_NDADDR) {
 1431                 pref = ip->i_din1->di_ib[0];
 1432                 if (pref != 0 && pref >= cgdata(fs, inocg) &&
 1433                     pref < cgbase(fs, inocg + 1))
 1434                         return (pref + fs->fs_frag);
 1435         }
 1436         /*
 1437          * If we are at the beginning of a file, or we have already allocated
 1438          * the maximum number of blocks per cylinder group, or we do not
 1439          * have a block allocated immediately preceding us, then we need
 1440          * to decide where to start allocating new blocks.
 1441          */
 1442         if (indx ==  0) {
 1443                 prevbn = 0;
 1444         } else {
 1445                 prevbn = bap[indx - 1];
 1446                 if (UFS_CHECK_BLKNO(ITOVFS(ip), ip->i_number, prevbn,
 1447                     fs->fs_bsize) != 0)
 1448                         prevbn = 0;
 1449         }
 1450         if (indx % fs->fs_maxbpg == 0 || prevbn == 0) {
 1451                 /*
 1452                  * If we are allocating a directory data block, we want
 1453                  * to place it in the metadata area.
 1454                  */
 1455                 if ((ip->i_mode & IFMT) == IFDIR)
 1456                         return (cgmeta(fs, inocg));
 1457                 /*
 1458                  * Until we fill all the direct and all the first indirect's
 1459                  * blocks, we try to allocate in the data area of the inode's
 1460                  * cylinder group.
 1461                  */
 1462                 if (lbn < UFS_NDADDR + NINDIR(fs))
 1463                         return (cgdata(fs, inocg));
 1464                 /*
 1465                  * Find a cylinder with greater than average number of
 1466                  * unused data blocks.
 1467                  */
 1468                 if (indx == 0 || prevbn == 0)
 1469                         startcg = inocg + lbn / fs->fs_maxbpg;
 1470                 else
 1471                         startcg = dtog(fs, prevbn) + 1;
 1472                 startcg %= fs->fs_ncg;
 1473                 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
 1474                 for (cg = startcg; cg < fs->fs_ncg; cg++)
 1475                         if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 1476                                 fs->fs_cgrotor = cg;
 1477                                 return (cgdata(fs, cg));
 1478                         }
 1479                 for (cg = 0; cg <= startcg; cg++)
 1480                         if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 1481                                 fs->fs_cgrotor = cg;
 1482                                 return (cgdata(fs, cg));
 1483                         }
 1484                 return (0);
 1485         }
 1486         /*
 1487          * Otherwise, we just always try to lay things out contiguously.
 1488          */
 1489         return (prevbn + fs->fs_frag);
 1490 }
 1491 
 1492 /*
 1493  * Same as above, but for UFS2
 1494  */
 1495 ufs2_daddr_t
 1496 ffs_blkpref_ufs2(struct inode *ip,
 1497         ufs_lbn_t lbn,
 1498         int indx,
 1499         ufs2_daddr_t *bap)
 1500 {
 1501         struct fs *fs;
 1502         u_int cg, inocg;
 1503         u_int avgbfree, startcg;
 1504         ufs2_daddr_t pref, prevbn;
 1505 
 1506         KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap"));
 1507         mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
 1508         fs = ITOFS(ip);
 1509         /*
 1510          * Allocation of indirect blocks is indicated by passing negative
 1511          * values in indx: -1 for single indirect, -2 for double indirect,
 1512          * -3 for triple indirect. As noted below, we attempt to allocate
 1513          * the first indirect inline with the file data. For all later
 1514          * indirect blocks, the data is often allocated in other cylinder
 1515          * groups. However to speed random file access and to speed up
 1516          * fsck, the filesystem reserves the first fs_metaspace blocks
 1517          * (typically half of fs_minfree) of the data area of each cylinder
 1518          * group to hold these later indirect blocks.
 1519          */
 1520         inocg = ino_to_cg(fs, ip->i_number);
 1521         if (indx < 0) {
 1522                 /*
 1523                  * Our preference for indirect blocks is the zone at the
 1524                  * beginning of the inode's cylinder group data area that
 1525                  * we try to reserve for indirect blocks.
 1526                  */
 1527                 pref = cgmeta(fs, inocg);
 1528                 /*
 1529                  * If we are allocating the first indirect block, try to
 1530                  * place it immediately following the last direct block.
 1531                  */
 1532                 if (indx == -1 && lbn < UFS_NDADDR + NINDIR(fs) &&
 1533                     ip->i_din2->di_db[UFS_NDADDR - 1] != 0)
 1534                         pref = ip->i_din2->di_db[UFS_NDADDR - 1] + fs->fs_frag;
 1535                 return (pref);
 1536         }
 1537         /*
 1538          * If we are allocating the first data block in the first indirect
 1539          * block and the indirect has been allocated in the data block area,
 1540          * try to place it immediately following the indirect block.
 1541          */
 1542         if (lbn == UFS_NDADDR) {
 1543                 pref = ip->i_din2->di_ib[0];
 1544                 if (pref != 0 && pref >= cgdata(fs, inocg) &&
 1545                     pref < cgbase(fs, inocg + 1))
 1546                         return (pref + fs->fs_frag);
 1547         }
 1548         /*
 1549          * If we are at the beginning of a file, or we have already allocated
 1550          * the maximum number of blocks per cylinder group, or we do not
 1551          * have a block allocated immediately preceding us, then we need
 1552          * to decide where to start allocating new blocks.
 1553          */
 1554         if (indx ==  0) {
 1555                 prevbn = 0;
 1556         } else {
 1557                 prevbn = bap[indx - 1];
 1558                 if (UFS_CHECK_BLKNO(ITOVFS(ip), ip->i_number, prevbn,
 1559                     fs->fs_bsize) != 0)
 1560                         prevbn = 0;
 1561         }
 1562         if (indx % fs->fs_maxbpg == 0 || prevbn == 0) {
 1563                 /*
 1564                  * If we are allocating a directory data block, we want
 1565                  * to place it in the metadata area.
 1566                  */
 1567                 if ((ip->i_mode & IFMT) == IFDIR)
 1568                         return (cgmeta(fs, inocg));
 1569                 /*
 1570                  * Until we fill all the direct and all the first indirect's
 1571                  * blocks, we try to allocate in the data area of the inode's
 1572                  * cylinder group.
 1573                  */
 1574                 if (lbn < UFS_NDADDR + NINDIR(fs))
 1575                         return (cgdata(fs, inocg));
 1576                 /*
 1577                  * Find a cylinder with greater than average number of
 1578                  * unused data blocks.
 1579                  */
 1580                 if (indx == 0 || prevbn == 0)
 1581                         startcg = inocg + lbn / fs->fs_maxbpg;
 1582                 else
 1583                         startcg = dtog(fs, prevbn) + 1;
 1584                 startcg %= fs->fs_ncg;
 1585                 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
 1586                 for (cg = startcg; cg < fs->fs_ncg; cg++)
 1587                         if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 1588                                 fs->fs_cgrotor = cg;
 1589                                 return (cgdata(fs, cg));
 1590                         }
 1591                 for (cg = 0; cg <= startcg; cg++)
 1592                         if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
 1593                                 fs->fs_cgrotor = cg;
 1594                                 return (cgdata(fs, cg));
 1595                         }
 1596                 return (0);
 1597         }
 1598         /*
 1599          * Otherwise, we just always try to lay things out contiguously.
 1600          */
 1601         return (prevbn + fs->fs_frag);
 1602 }
 1603 
 1604 /*
 1605  * Implement the cylinder overflow algorithm.
 1606  *
 1607  * The policy implemented by this algorithm is:
 1608  *   1) allocate the block in its requested cylinder group.
 1609  *   2) quadratically rehash on the cylinder group number.
 1610  *   3) brute force search for a free block.
 1611  *
 1612  * Must be called with the UFS lock held.  Will release the lock on success
 1613  * and return with it held on failure.
 1614  */
 1615 /*VARARGS5*/
 1616 static ufs2_daddr_t
 1617 ffs_hashalloc(struct inode *ip,
 1618         u_int cg,
 1619         ufs2_daddr_t pref,
 1620         int size,       /* Search size for data blocks, mode for inodes */
 1621         int rsize,      /* Real allocated size. */
 1622         allocfcn_t *allocator)
 1623 {
 1624         struct fs *fs;
 1625         ufs2_daddr_t result;
 1626         u_int i, icg = cg;
 1627 
 1628         mtx_assert(UFS_MTX(ITOUMP(ip)), MA_OWNED);
 1629 #ifdef INVARIANTS
 1630         if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED)
 1631                 panic("ffs_hashalloc: allocation on suspended filesystem");
 1632 #endif
 1633         fs = ITOFS(ip);
 1634         /*
 1635          * 1: preferred cylinder group
 1636          */
 1637         result = (*allocator)(ip, cg, pref, size, rsize);
 1638         if (result)
 1639                 return (result);
 1640         /*
 1641          * 2: quadratic rehash
 1642          */
 1643         for (i = 1; i < fs->fs_ncg; i *= 2) {
 1644                 cg += i;
 1645                 if (cg >= fs->fs_ncg)
 1646                         cg -= fs->fs_ncg;
 1647                 result = (*allocator)(ip, cg, 0, size, rsize);
 1648                 if (result)
 1649                         return (result);
 1650         }
 1651         /*
 1652          * 3: brute force search
 1653          * Note that we start at i == 2, since 0 was checked initially,
 1654          * and 1 is always checked in the quadratic rehash.
 1655          */
 1656         cg = (icg + 2) % fs->fs_ncg;
 1657         for (i = 2; i < fs->fs_ncg; i++) {
 1658                 result = (*allocator)(ip, cg, 0, size, rsize);
 1659                 if (result)
 1660                         return (result);
 1661                 cg++;
 1662                 if (cg == fs->fs_ncg)
 1663                         cg = 0;
 1664         }
 1665         return (0);
 1666 }
 1667 
 1668 /*
 1669  * Determine whether a fragment can be extended.
 1670  *
 1671  * Check to see if the necessary fragments are available, and
 1672  * if they are, allocate them.
 1673  */
 1674 static ufs2_daddr_t
 1675 ffs_fragextend(struct inode *ip,
 1676         u_int cg,
 1677         ufs2_daddr_t bprev,
 1678         int osize,
 1679         int nsize)
 1680 {
 1681         struct fs *fs;
 1682         struct cg *cgp;
 1683         struct buf *bp;
 1684         struct ufsmount *ump;
 1685         int nffree;
 1686         long bno;
 1687         int frags, bbase;
 1688         int i, error;
 1689         u_int8_t *blksfree;
 1690 
 1691         ump = ITOUMP(ip);
 1692         fs = ump->um_fs;
 1693         if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
 1694                 return (0);
 1695         frags = numfrags(fs, nsize);
 1696         bbase = fragnum(fs, bprev);
 1697         if (bbase > fragnum(fs, (bprev + frags - 1))) {
 1698                 /* cannot extend across a block boundary */
 1699                 return (0);
 1700         }
 1701         UFS_UNLOCK(ump);
 1702         if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0)
 1703                 goto fail;
 1704         bno = dtogd(fs, bprev);
 1705         blksfree = cg_blksfree(cgp);
 1706         for (i = numfrags(fs, osize); i < frags; i++)
 1707                 if (isclr(blksfree, bno + i))
 1708                         goto fail;
 1709         /*
 1710          * the current fragment can be extended
 1711          * deduct the count on fragment being extended into
 1712          * increase the count on the remaining fragment (if any)
 1713          * allocate the extended piece
 1714          */
 1715         for (i = frags; i < fs->fs_frag - bbase; i++)
 1716                 if (isclr(blksfree, bno + i))
 1717                         break;
 1718         cgp->cg_frsum[i - numfrags(fs, osize)]--;
 1719         if (i != frags)
 1720                 cgp->cg_frsum[i - frags]++;
 1721         for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) {
 1722                 clrbit(blksfree, bno + i);
 1723                 cgp->cg_cs.cs_nffree--;
 1724                 nffree++;
 1725         }
 1726         UFS_LOCK(ump);
 1727         fs->fs_cstotal.cs_nffree -= nffree;
 1728         fs->fs_cs(fs, cg).cs_nffree -= nffree;
 1729         fs->fs_fmod = 1;
 1730         ACTIVECLEAR(fs, cg);
 1731         UFS_UNLOCK(ump);
 1732         if (DOINGSOFTDEP(ITOV(ip)))
 1733                 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev,
 1734                     frags, numfrags(fs, osize));
 1735         bdwrite(bp);
 1736         return (bprev);
 1737 
 1738 fail:
 1739         brelse(bp);
 1740         UFS_LOCK(ump);
 1741         return (0);
 1742 
 1743 }
 1744 
 1745 /*
 1746  * Determine whether a block can be allocated.
 1747  *
 1748  * Check to see if a block of the appropriate size is available,
 1749  * and if it is, allocate it.
 1750  */
 1751 static ufs2_daddr_t
 1752 ffs_alloccg(struct inode *ip,
 1753         u_int cg,
 1754         ufs2_daddr_t bpref,
 1755         int size,
 1756         int rsize)
 1757 {
 1758         struct fs *fs;
 1759         struct cg *cgp;
 1760         struct buf *bp;
 1761         struct ufsmount *ump;
 1762         ufs1_daddr_t bno;
 1763         ufs2_daddr_t blkno;
 1764         int i, allocsiz, error, frags;
 1765         u_int8_t *blksfree;
 1766 
 1767         ump = ITOUMP(ip);
 1768         fs = ump->um_fs;
 1769         if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
 1770                 return (0);
 1771         UFS_UNLOCK(ump);
 1772         if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0 ||
 1773            (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize))
 1774                 goto fail;
 1775         if (size == fs->fs_bsize) {
 1776                 UFS_LOCK(ump);
 1777                 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
 1778                 ACTIVECLEAR(fs, cg);
 1779                 UFS_UNLOCK(ump);
 1780                 bdwrite(bp);
 1781                 return (blkno);
 1782         }
 1783         /*
 1784          * check to see if any fragments are already available
 1785          * allocsiz is the size which will be allocated, hacking
 1786          * it down to a smaller size if necessary
 1787          */
 1788         blksfree = cg_blksfree(cgp);
 1789         frags = numfrags(fs, size);
 1790         for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
 1791                 if (cgp->cg_frsum[allocsiz] != 0)
 1792                         break;
 1793         if (allocsiz == fs->fs_frag) {
 1794                 /*
 1795                  * no fragments were available, so a block will be
 1796                  * allocated, and hacked up
 1797                  */
 1798                 if (cgp->cg_cs.cs_nbfree == 0)
 1799                         goto fail;
 1800                 UFS_LOCK(ump);
 1801                 blkno = ffs_alloccgblk(ip, bp, bpref, rsize);
 1802                 ACTIVECLEAR(fs, cg);
 1803                 UFS_UNLOCK(ump);
 1804                 bdwrite(bp);
 1805                 return (blkno);
 1806         }
 1807         KASSERT(size == rsize,
 1808             ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize));
 1809         bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
 1810         if (bno < 0)
 1811                 goto fail;
 1812         for (i = 0; i < frags; i++)
 1813                 clrbit(blksfree, bno + i);
 1814         cgp->cg_cs.cs_nffree -= frags;
 1815         cgp->cg_frsum[allocsiz]--;
 1816         if (frags != allocsiz)
 1817                 cgp->cg_frsum[allocsiz - frags]++;
 1818         UFS_LOCK(ump);
 1819         fs->fs_cstotal.cs_nffree -= frags;
 1820         fs->fs_cs(fs, cg).cs_nffree -= frags;
 1821         fs->fs_fmod = 1;
 1822         blkno = cgbase(fs, cg) + bno;
 1823         ACTIVECLEAR(fs, cg);
 1824         UFS_UNLOCK(ump);
 1825         if (DOINGSOFTDEP(ITOV(ip)))
 1826                 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0);
 1827         bdwrite(bp);
 1828         return (blkno);
 1829 
 1830 fail:
 1831         brelse(bp);
 1832         UFS_LOCK(ump);
 1833         return (0);
 1834 }
 1835 
 1836 /*
 1837  * Allocate a block in a cylinder group.
 1838  *
 1839  * This algorithm implements the following policy:
 1840  *   1) allocate the requested block.
 1841  *   2) allocate a rotationally optimal block in the same cylinder.
 1842  *   3) allocate the next available block on the block rotor for the
 1843  *      specified cylinder group.
 1844  * Note that this routine only allocates fs_bsize blocks; these
 1845  * blocks may be fragmented by the routine that allocates them.
 1846  */
 1847 static ufs2_daddr_t
 1848 ffs_alloccgblk(struct inode *ip,
 1849         struct buf *bp,
 1850         ufs2_daddr_t bpref,
 1851         int size)
 1852 {
 1853         struct fs *fs;
 1854         struct cg *cgp;
 1855         struct ufsmount *ump;
 1856         ufs1_daddr_t bno;
 1857         ufs2_daddr_t blkno;
 1858         u_int8_t *blksfree;
 1859         int i, cgbpref;
 1860 
 1861         ump = ITOUMP(ip);
 1862         fs = ump->um_fs;
 1863         mtx_assert(UFS_MTX(ump), MA_OWNED);
 1864         cgp = (struct cg *)bp->b_data;
 1865         blksfree = cg_blksfree(cgp);
 1866         if (bpref == 0) {
 1867                 bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag;
 1868         } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) {
 1869                 /* map bpref to correct zone in this cg */
 1870                 if (bpref < cgdata(fs, cgbpref))
 1871                         bpref = cgmeta(fs, cgp->cg_cgx);
 1872                 else
 1873                         bpref = cgdata(fs, cgp->cg_cgx);
 1874         }
 1875         /*
 1876          * if the requested block is available, use it
 1877          */
 1878         bno = dtogd(fs, blknum(fs, bpref));
 1879         if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
 1880                 goto gotit;
 1881         /*
 1882          * Take the next available block in this cylinder group.
 1883          */
 1884         bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
 1885         if (bno < 0)
 1886                 return (0);
 1887         /* Update cg_rotor only if allocated from the data zone */
 1888         if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx)))
 1889                 cgp->cg_rotor = bno;
 1890 gotit:
 1891         blkno = fragstoblks(fs, bno);
 1892         ffs_clrblock(fs, blksfree, (long)blkno);
 1893         ffs_clusteracct(fs, cgp, blkno, -1);
 1894         cgp->cg_cs.cs_nbfree--;
 1895         fs->fs_cstotal.cs_nbfree--;
 1896         fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
 1897         fs->fs_fmod = 1;
 1898         blkno = cgbase(fs, cgp->cg_cgx) + bno;
 1899         /*
 1900          * If the caller didn't want the whole block free the frags here.
 1901          */
 1902         size = numfrags(fs, size);
 1903         if (size != fs->fs_frag) {
 1904                 bno = dtogd(fs, blkno);
 1905                 for (i = size; i < fs->fs_frag; i++)
 1906                         setbit(blksfree, bno + i);
 1907                 i = fs->fs_frag - size;
 1908                 cgp->cg_cs.cs_nffree += i;
 1909                 fs->fs_cstotal.cs_nffree += i;
 1910                 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i;
 1911                 fs->fs_fmod = 1;
 1912                 cgp->cg_frsum[i]++;
 1913         }
 1914         /* XXX Fixme. */
 1915         UFS_UNLOCK(ump);
 1916         if (DOINGSOFTDEP(ITOV(ip)))
 1917                 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, size, 0);
 1918         UFS_LOCK(ump);
 1919         return (blkno);
 1920 }
 1921 
 1922 /*
 1923  * Determine whether a cluster can be allocated.
 1924  *
 1925  * We do not currently check for optimal rotational layout if there
 1926  * are multiple choices in the same cylinder group. Instead we just
 1927  * take the first one that we find following bpref.
 1928  */
 1929 static ufs2_daddr_t
 1930 ffs_clusteralloc(struct inode *ip,
 1931         u_int cg,
 1932         ufs2_daddr_t bpref,
 1933         int len)
 1934 {
 1935         struct fs *fs;
 1936         struct cg *cgp;
 1937         struct buf *bp;
 1938         struct ufsmount *ump;
 1939         int i, run, bit, map, got, error;
 1940         ufs2_daddr_t bno;
 1941         u_char *mapp;
 1942         int32_t *lp;
 1943         u_int8_t *blksfree;
 1944 
 1945         ump = ITOUMP(ip);
 1946         fs = ump->um_fs;
 1947         if (fs->fs_maxcluster[cg] < len)
 1948                 return (0);
 1949         UFS_UNLOCK(ump);
 1950         if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) {
 1951                 UFS_LOCK(ump);
 1952                 return (0);
 1953         }
 1954         /*
 1955          * Check to see if a cluster of the needed size (or bigger) is
 1956          * available in this cylinder group.
 1957          */
 1958         lp = &cg_clustersum(cgp)[len];
 1959         for (i = len; i <= fs->fs_contigsumsize; i++)
 1960                 if (*lp++ > 0)
 1961                         break;
 1962         if (i > fs->fs_contigsumsize) {
 1963                 /*
 1964                  * This is the first time looking for a cluster in this
 1965                  * cylinder group. Update the cluster summary information
 1966                  * to reflect the true maximum sized cluster so that
 1967                  * future cluster allocation requests can avoid reading
 1968                  * the cylinder group map only to find no clusters.
 1969                  */
 1970                 lp = &cg_clustersum(cgp)[len - 1];
 1971                 for (i = len - 1; i > 0; i--)
 1972                         if (*lp-- > 0)
 1973                                 break;
 1974                 UFS_LOCK(ump);
 1975                 fs->fs_maxcluster[cg] = i;
 1976                 brelse(bp);
 1977                 return (0);
 1978         }
 1979         /*
 1980          * Search the cluster map to find a big enough cluster.
 1981          * We take the first one that we find, even if it is larger
 1982          * than we need as we prefer to get one close to the previous
 1983          * block allocation. We do not search before the current
 1984          * preference point as we do not want to allocate a block
 1985          * that is allocated before the previous one (as we will
 1986          * then have to wait for another pass of the elevator
 1987          * algorithm before it will be read). We prefer to fail and
 1988          * be recalled to try an allocation in the next cylinder group.
 1989          */
 1990         if (dtog(fs, bpref) != cg)
 1991                 bpref = cgdata(fs, cg);
 1992         else
 1993                 bpref = blknum(fs, bpref);
 1994         bpref = fragstoblks(fs, dtogd(fs, bpref));
 1995         mapp = &cg_clustersfree(cgp)[bpref / NBBY];
 1996         map = *mapp++;
 1997         bit = 1 << (bpref % NBBY);
 1998         for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) {
 1999                 if ((map & bit) == 0) {
 2000                         run = 0;
 2001                 } else {
 2002                         run++;
 2003                         if (run == len)
 2004                                 break;
 2005                 }
 2006                 if ((got & (NBBY - 1)) != (NBBY - 1)) {
 2007                         bit <<= 1;
 2008                 } else {
 2009                         map = *mapp++;
 2010                         bit = 1;
 2011                 }
 2012         }
 2013         if (got >= cgp->cg_nclusterblks) {
 2014                 UFS_LOCK(ump);
 2015                 brelse(bp);
 2016                 return (0);
 2017         }
 2018         /*
 2019          * Allocate the cluster that we have found.
 2020          */
 2021         blksfree = cg_blksfree(cgp);
 2022         for (i = 1; i <= len; i++)
 2023                 if (!ffs_isblock(fs, blksfree, got - run + i))
 2024                         panic("ffs_clusteralloc: map mismatch");
 2025         bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1);
 2026         if (dtog(fs, bno) != cg)
 2027                 panic("ffs_clusteralloc: allocated out of group");
 2028         len = blkstofrags(fs, len);
 2029         UFS_LOCK(ump);
 2030         for (i = 0; i < len; i += fs->fs_frag)
 2031                 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i)
 2032                         panic("ffs_clusteralloc: lost block");
 2033         ACTIVECLEAR(fs, cg);
 2034         UFS_UNLOCK(ump);
 2035         bdwrite(bp);
 2036         return (bno);
 2037 }
 2038 
 2039 static inline struct buf *
 2040 getinobuf(struct inode *ip,
 2041         u_int cg,
 2042         u_int32_t cginoblk,
 2043         int gbflags)
 2044 {
 2045         struct fs *fs;
 2046 
 2047         fs = ITOFS(ip);
 2048         return (getblk(ITODEVVP(ip), fsbtodb(fs, ino_to_fsba(fs,
 2049             cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0,
 2050             gbflags));
 2051 }
 2052 
 2053 /*
 2054  * Synchronous inode initialization is needed only when barrier writes do not
 2055  * work as advertised, and will impose a heavy cost on file creation in a newly
 2056  * created filesystem.
 2057  */
 2058 static int doasyncinodeinit = 1;
 2059 SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncinodeinit, CTLFLAG_RWTUN,
 2060     &doasyncinodeinit, 0,
 2061     "Perform inode block initialization using asynchronous writes");
 2062 
 2063 /*
 2064  * Determine whether an inode can be allocated.
 2065  *
 2066  * Check to see if an inode is available, and if it is,
 2067  * allocate it using the following policy:
 2068  *   1) allocate the requested inode.
 2069  *   2) allocate the next available inode after the requested
 2070  *      inode in the specified cylinder group.
 2071  */
 2072 static ufs2_daddr_t
 2073 ffs_nodealloccg(struct inode *ip,
 2074         u_int cg,
 2075         ufs2_daddr_t ipref,
 2076         int mode,
 2077         int unused)
 2078 {
 2079         struct fs *fs;
 2080         struct cg *cgp;
 2081         struct buf *bp, *ibp;
 2082         struct ufsmount *ump;
 2083         u_int8_t *inosused, *loc;
 2084         struct ufs2_dinode *dp2;
 2085         int error, start, len, i;
 2086         u_int32_t old_initediblk;
 2087 
 2088         ump = ITOUMP(ip);
 2089         fs = ump->um_fs;
 2090 check_nifree:
 2091         if (fs->fs_cs(fs, cg).cs_nifree == 0)
 2092                 return (0);
 2093         UFS_UNLOCK(ump);
 2094         if ((error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp)) != 0) {
 2095                 UFS_LOCK(ump);
 2096                 return (0);
 2097         }
 2098 restart:
 2099         if (cgp->cg_cs.cs_nifree == 0) {
 2100                 brelse(bp);
 2101                 UFS_LOCK(ump);
 2102                 return (0);
 2103         }
 2104         inosused = cg_inosused(cgp);
 2105         if (ipref) {
 2106                 ipref %= fs->fs_ipg;
 2107                 if (isclr(inosused, ipref))
 2108                         goto gotit;
 2109         }
 2110         start = cgp->cg_irotor / NBBY;
 2111         len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
 2112         loc = memcchr(&inosused[start], 0xff, len);
 2113         if (loc == NULL) {
 2114                 len = start + 1;
 2115                 start = 0;
 2116                 loc = memcchr(&inosused[start], 0xff, len);
 2117                 if (loc == NULL) {
 2118                         printf("cg = %d, irotor = %ld, fs = %s\n",
 2119                             cg, (long)cgp->cg_irotor, fs->fs_fsmnt);
 2120                         panic("ffs_nodealloccg: map corrupted");
 2121                         /* NOTREACHED */
 2122                 }
 2123         }
 2124         ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1;
 2125 gotit:
 2126         /*
 2127          * Check to see if we need to initialize more inodes.
 2128          */
 2129         if (fs->fs_magic == FS_UFS2_MAGIC &&
 2130             ipref + INOPB(fs) > cgp->cg_initediblk &&
 2131             cgp->cg_initediblk < cgp->cg_niblk) {
 2132                 old_initediblk = cgp->cg_initediblk;
 2133 
 2134                 /*
 2135                  * Free the cylinder group lock before writing the
 2136                  * initialized inode block.  Entering the
 2137                  * babarrierwrite() with the cylinder group lock
 2138                  * causes lock order violation between the lock and
 2139                  * snaplk.
 2140                  *
 2141                  * Another thread can decide to initialize the same
 2142                  * inode block, but whichever thread first gets the
 2143                  * cylinder group lock after writing the newly
 2144                  * allocated inode block will update it and the other
 2145                  * will realize that it has lost and leave the
 2146                  * cylinder group unchanged.
 2147                  */
 2148                 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT);
 2149                 brelse(bp);
 2150                 if (ibp == NULL) {
 2151                         /*
 2152                          * The inode block buffer is already owned by
 2153                          * another thread, which must initialize it.
 2154                          * Wait on the buffer to allow another thread
 2155                          * to finish the updates, with dropped cg
 2156                          * buffer lock, then retry.
 2157                          */
 2158                         ibp = getinobuf(ip, cg, old_initediblk, 0);
 2159                         brelse(ibp);
 2160                         UFS_LOCK(ump);
 2161                         goto check_nifree;
 2162                 }
 2163                 bzero(ibp->b_data, (int)fs->fs_bsize);
 2164                 dp2 = (struct ufs2_dinode *)(ibp->b_data);
 2165                 for (i = 0; i < INOPB(fs); i++) {
 2166                         while (dp2->di_gen == 0)
 2167                                 dp2->di_gen = arc4random();
 2168                         dp2++;
 2169                 }
 2170 
 2171                 /*
 2172                  * Rather than adding a soft updates dependency to ensure
 2173                  * that the new inode block is written before it is claimed
 2174                  * by the cylinder group map, we just do a barrier write
 2175                  * here. The barrier write will ensure that the inode block
 2176                  * gets written before the updated cylinder group map can be
 2177                  * written. The barrier write should only slow down bulk
 2178                  * loading of newly created filesystems.
 2179                  */
 2180                 if (doasyncinodeinit)
 2181                         babarrierwrite(ibp);
 2182                 else
 2183                         bwrite(ibp);
 2184 
 2185                 /*
 2186                  * After the inode block is written, try to update the
 2187                  * cg initediblk pointer.  If another thread beat us
 2188                  * to it, then leave it unchanged as the other thread
 2189                  * has already set it correctly.
 2190                  */
 2191                 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp);
 2192                 UFS_LOCK(ump);
 2193                 ACTIVECLEAR(fs, cg);
 2194                 UFS_UNLOCK(ump);
 2195                 if (error != 0)
 2196                         return (error);
 2197                 if (cgp->cg_initediblk == old_initediblk)
 2198                         cgp->cg_initediblk += INOPB(fs);
 2199                 goto restart;
 2200         }
 2201         cgp->cg_irotor = ipref;
 2202         UFS_LOCK(ump);
 2203         ACTIVECLEAR(fs, cg);
 2204         setbit(inosused, ipref);
 2205         cgp->cg_cs.cs_nifree--;
 2206         fs->fs_cstotal.cs_nifree--;
 2207         fs->fs_cs(fs, cg).cs_nifree--;
 2208         fs->fs_fmod = 1;
 2209         if ((mode & IFMT) == IFDIR) {
 2210                 cgp->cg_cs.cs_ndir++;
 2211                 fs->fs_cstotal.cs_ndir++;
 2212                 fs->fs_cs(fs, cg).cs_ndir++;
 2213         }
 2214         UFS_UNLOCK(ump);
 2215         if (DOINGSOFTDEP(ITOV(ip)))
 2216                 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode);
 2217         bdwrite(bp);
 2218         return ((ino_t)(cg * fs->fs_ipg + ipref));
 2219 }
 2220 
 2221 /*
 2222  * Free a block or fragment.
 2223  *
 2224  * The specified block or fragment is placed back in the
 2225  * free map. If a fragment is deallocated, a possible
 2226  * block reassembly is checked.
 2227  */
 2228 static void
 2229 ffs_blkfree_cg(struct ufsmount *ump,
 2230         struct fs *fs,
 2231         struct vnode *devvp,
 2232         ufs2_daddr_t bno,
 2233         long size,
 2234         ino_t inum,
 2235         struct workhead *dephd)
 2236 {
 2237         struct mount *mp;
 2238         struct cg *cgp;
 2239         struct buf *bp;
 2240         daddr_t dbn;
 2241         ufs1_daddr_t fragno, cgbno;
 2242         int i, blk, frags, bbase, error;
 2243         u_int cg;
 2244         u_int8_t *blksfree;
 2245         struct cdev *dev;
 2246 
 2247         cg = dtog(fs, bno);
 2248         if (devvp->v_type == VREG) {
 2249                 /* devvp is a snapshot */
 2250                 MPASS(devvp->v_mount->mnt_data == ump);
 2251                 dev = ump->um_devvp->v_rdev;
 2252         } else if (devvp->v_type == VCHR) {
 2253                 /*
 2254                  * devvp is a normal disk device
 2255                  * XXXKIB: devvp is not locked there, v_rdev access depends on
 2256                  * busy mount, which prevents mntfs devvp from reclamation.
 2257                  */
 2258                 dev = devvp->v_rdev;
 2259         } else
 2260                 return;
 2261 #ifdef INVARIANTS
 2262         if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
 2263             fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
 2264                 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n",
 2265                     devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize,
 2266                     size, fs->fs_fsmnt);
 2267                 panic("ffs_blkfree_cg: bad size");
 2268         }
 2269 #endif
 2270         if ((u_int)bno >= fs->fs_size) {
 2271                 printf("bad block %jd, ino %lu\n", (intmax_t)bno,
 2272                     (u_long)inum);
 2273                 ffs_fserr(fs, inum, "bad block");
 2274                 return;
 2275         }
 2276         if ((error = ffs_getcg(fs, devvp, cg, GB_CVTENXIO, &bp, &cgp)) != 0) {
 2277                 if (!ffs_fsfail_cleanup(ump, error) ||
 2278                     !MOUNTEDSOFTDEP(UFSTOVFS(ump)) || devvp->v_type != VCHR)
 2279                         return;
 2280                 if (devvp->v_type == VREG)
 2281                         dbn = fragstoblks(fs, cgtod(fs, cg));
 2282                 else
 2283                         dbn = fsbtodb(fs, cgtod(fs, cg));
 2284                 error = getblkx(devvp, dbn, dbn, fs->fs_cgsize, 0, 0, 0, &bp);
 2285                 KASSERT(error == 0, ("getblkx failed"));
 2286                 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
 2287                     numfrags(fs, size), dephd);
 2288                 bp->b_flags |= B_RELBUF | B_NOCACHE;
 2289                 bp->b_flags &= ~B_CACHE;
 2290                 bawrite(bp);
 2291                 return;
 2292         }
 2293         cgbno = dtogd(fs, bno);
 2294         blksfree = cg_blksfree(cgp);
 2295         UFS_LOCK(ump);
 2296         if (size == fs->fs_bsize) {
 2297                 fragno = fragstoblks(fs, cgbno);
 2298                 if (!ffs_isfreeblock(fs, blksfree, fragno)) {
 2299                         if (devvp->v_type == VREG) {
 2300                                 UFS_UNLOCK(ump);
 2301                                 /* devvp is a snapshot */
 2302                                 brelse(bp);
 2303                                 return;
 2304                         }
 2305                         printf("dev = %s, block = %jd, fs = %s\n",
 2306                             devtoname(dev), (intmax_t)bno, fs->fs_fsmnt);
 2307                         panic("ffs_blkfree_cg: freeing free block");
 2308                 }
 2309                 ffs_setblock(fs, blksfree, fragno);
 2310                 ffs_clusteracct(fs, cgp, fragno, 1);
 2311                 cgp->cg_cs.cs_nbfree++;
 2312                 fs->fs_cstotal.cs_nbfree++;
 2313                 fs->fs_cs(fs, cg).cs_nbfree++;
 2314         } else {
 2315                 bbase = cgbno - fragnum(fs, cgbno);
 2316                 /*
 2317                  * decrement the counts associated with the old frags
 2318                  */
 2319                 blk = blkmap(fs, blksfree, bbase);
 2320                 ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
 2321                 /*
 2322                  * deallocate the fragment
 2323                  */
 2324                 frags = numfrags(fs, size);
 2325                 for (i = 0; i < frags; i++) {
 2326                         if (isset(blksfree, cgbno + i)) {
 2327                                 printf("dev = %s, block = %jd, fs = %s\n",
 2328                                     devtoname(dev), (intmax_t)(bno + i),
 2329                                     fs->fs_fsmnt);
 2330                                 panic("ffs_blkfree_cg: freeing free frag");
 2331                         }
 2332                         setbit(blksfree, cgbno + i);
 2333                 }
 2334                 cgp->cg_cs.cs_nffree += i;
 2335                 fs->fs_cstotal.cs_nffree += i;
 2336                 fs->fs_cs(fs, cg).cs_nffree += i;
 2337                 /*
 2338                  * add back in counts associated with the new frags
 2339                  */
 2340                 blk = blkmap(fs, blksfree, bbase);
 2341                 ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
 2342                 /*
 2343                  * if a complete block has been reassembled, account for it
 2344                  */
 2345                 fragno = fragstoblks(fs, bbase);
 2346                 if (ffs_isblock(fs, blksfree, fragno)) {
 2347                         cgp->cg_cs.cs_nffree -= fs->fs_frag;
 2348                         fs->fs_cstotal.cs_nffree -= fs->fs_frag;
 2349                         fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
 2350                         ffs_clusteracct(fs, cgp, fragno, 1);
 2351                         cgp->cg_cs.cs_nbfree++;
 2352                         fs->fs_cstotal.cs_nbfree++;
 2353                         fs->fs_cs(fs, cg).cs_nbfree++;
 2354                 }
 2355         }
 2356         fs->fs_fmod = 1;
 2357         ACTIVECLEAR(fs, cg);
 2358         UFS_UNLOCK(ump);
 2359         mp = UFSTOVFS(ump);
 2360         if (MOUNTEDSOFTDEP(mp) && devvp->v_type == VCHR)
 2361                 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno,
 2362                     numfrags(fs, size), dephd);
 2363         bdwrite(bp);
 2364 }
 2365 
 2366 /*
 2367  * Structures and routines associated with trim management.
 2368  *
 2369  * The following requests are passed to trim_lookup to indicate
 2370  * the actions that should be taken.
 2371  */
 2372 #define NEW     1       /* if found, error else allocate and hash it */
 2373 #define OLD     2       /* if not found, error, else return it */
 2374 #define REPLACE 3       /* if not found, error else unhash and reallocate it */
 2375 #define DONE    4       /* if not found, error else unhash and return it */
 2376 #define SINGLE  5       /* don't look up, just allocate it and don't hash it */
 2377 
 2378 MALLOC_DEFINE(M_TRIM, "ufs_trim", "UFS trim structures");
 2379 
 2380 #define TRIMLIST_HASH(ump, key) \
 2381         (&(ump)->um_trimhash[(key) & (ump)->um_trimlisthashsize])
 2382 
 2383 /*
 2384  * These structures describe each of the block free requests aggregated
 2385  * together to make up a trim request.
 2386  */
 2387 struct trim_blkreq {
 2388         TAILQ_ENTRY(trim_blkreq) blkreqlist;
 2389         ufs2_daddr_t bno;
 2390         long size;
 2391         struct workhead *pdephd;
 2392         struct workhead dephd;
 2393 };
 2394 
 2395 /*
 2396  * Description of a trim request.
 2397  */
 2398 struct ffs_blkfree_trim_params {
 2399         TAILQ_HEAD(, trim_blkreq) blklist;
 2400         LIST_ENTRY(ffs_blkfree_trim_params) hashlist;
 2401         struct task task;
 2402         struct ufsmount *ump;
 2403         struct vnode *devvp;
 2404         ino_t inum;
 2405         ufs2_daddr_t bno;
 2406         long size;
 2407         long key;
 2408 };
 2409 
 2410 static void     ffs_blkfree_trim_completed(struct buf *);
 2411 static void     ffs_blkfree_trim_task(void *ctx, int pending __unused);
 2412 static struct   ffs_blkfree_trim_params *trim_lookup(struct ufsmount *,
 2413                     struct vnode *, ufs2_daddr_t, long, ino_t, u_long, int);
 2414 static void     ffs_blkfree_sendtrim(struct ffs_blkfree_trim_params *);
 2415 
 2416 /*
 2417  * Called on trim completion to start a task to free the associated block(s).
 2418  */
 2419 static void
 2420 ffs_blkfree_trim_completed(struct buf *bp)
 2421 {
 2422         struct ffs_blkfree_trim_params *tp;
 2423 
 2424         tp = bp->b_fsprivate1;
 2425         free(bp, M_TRIM);
 2426         TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp);
 2427         taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task);
 2428 }
 2429 
 2430 /*
 2431  * Trim completion task that free associated block(s).
 2432  */
 2433 static void
 2434 ffs_blkfree_trim_task(void *ctx, int pending)
 2435 {
 2436         struct ffs_blkfree_trim_params *tp;
 2437         struct trim_blkreq *blkelm;
 2438         struct ufsmount *ump;
 2439 
 2440         tp = ctx;
 2441         ump = tp->ump;
 2442         while ((blkelm = TAILQ_FIRST(&tp->blklist)) != NULL) {
 2443                 ffs_blkfree_cg(ump, ump->um_fs, tp->devvp, blkelm->bno,
 2444                     blkelm->size, tp->inum, blkelm->pdephd);
 2445                 TAILQ_REMOVE(&tp->blklist, blkelm, blkreqlist);
 2446                 free(blkelm, M_TRIM);
 2447         }
 2448         vn_finished_secondary_write(UFSTOVFS(ump));
 2449         UFS_LOCK(ump);
 2450         ump->um_trim_inflight -= 1;
 2451         ump->um_trim_inflight_blks -= numfrags(ump->um_fs, tp->size);
 2452         UFS_UNLOCK(ump);
 2453         free(tp, M_TRIM);
 2454 }
 2455 
 2456 /*
 2457  * Lookup a trim request by inode number.
 2458  * Allocate if requested (NEW, REPLACE, SINGLE).
 2459  */
 2460 static struct ffs_blkfree_trim_params *
 2461 trim_lookup(struct ufsmount *ump,
 2462         struct vnode *devvp,
 2463         ufs2_daddr_t bno,
 2464         long size,
 2465         ino_t inum,
 2466         u_long key,
 2467         int alloctype)
 2468 {
 2469         struct trimlist_hashhead *tphashhead;
 2470         struct ffs_blkfree_trim_params *tp, *ntp;
 2471 
 2472         ntp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TRIM, M_WAITOK);
 2473         if (alloctype != SINGLE) {
 2474                 KASSERT(key >= FIRST_VALID_KEY, ("trim_lookup: invalid key"));
 2475                 UFS_LOCK(ump);
 2476                 tphashhead = TRIMLIST_HASH(ump, key);
 2477                 LIST_FOREACH(tp, tphashhead, hashlist)
 2478                         if (key == tp->key)
 2479                                 break;
 2480         }
 2481         switch (alloctype) {
 2482         case NEW:
 2483                 KASSERT(tp == NULL, ("trim_lookup: found trim"));
 2484                 break;
 2485         case OLD:
 2486                 KASSERT(tp != NULL,
 2487                     ("trim_lookup: missing call to ffs_blkrelease_start()"));
 2488                 UFS_UNLOCK(ump);
 2489                 free(ntp, M_TRIM);
 2490                 return (tp);
 2491         case REPLACE:
 2492                 KASSERT(tp != NULL, ("trim_lookup: missing REPLACE trim"));
 2493                 LIST_REMOVE(tp, hashlist);
 2494                 /* tp will be freed by caller */
 2495                 break;
 2496         case DONE:
 2497                 KASSERT(tp != NULL, ("trim_lookup: missing DONE trim"));
 2498                 LIST_REMOVE(tp, hashlist);
 2499                 UFS_UNLOCK(ump);
 2500                 free(ntp, M_TRIM);
 2501                 return (tp);
 2502         }
 2503         TAILQ_INIT(&ntp->blklist);
 2504         ntp->ump = ump;
 2505         ntp->devvp = devvp;
 2506         ntp->bno = bno;
 2507         ntp->size = size;
 2508         ntp->inum = inum;
 2509         ntp->key = key;
 2510         if (alloctype != SINGLE) {
 2511                 LIST_INSERT_HEAD(tphashhead, ntp, hashlist);
 2512                 UFS_UNLOCK(ump);
 2513         }
 2514         return (ntp);
 2515 }
 2516 
 2517 /*
 2518  * Dispatch a trim request.
 2519  */
 2520 static void
 2521 ffs_blkfree_sendtrim(struct ffs_blkfree_trim_params *tp)
 2522 { 
 2523         struct ufsmount *ump;
 2524         struct mount *mp;
 2525         struct buf *bp;
 2526 
 2527         /*
 2528          * Postpone the set of the free bit in the cg bitmap until the
 2529          * BIO_DELETE is completed.  Otherwise, due to disk queue
 2530          * reordering, TRIM might be issued after we reuse the block
 2531          * and write some new data into it.
 2532          */
 2533         ump = tp->ump;
 2534         bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO);
 2535         bp->b_iocmd = BIO_DELETE;
 2536         bp->b_iooffset = dbtob(fsbtodb(ump->um_fs, tp->bno));
 2537         bp->b_iodone = ffs_blkfree_trim_completed;
 2538         bp->b_bcount = tp->size;
 2539         bp->b_fsprivate1 = tp;
 2540         UFS_LOCK(ump);
 2541         ump->um_trim_total += 1;
 2542         ump->um_trim_inflight += 1;
 2543         ump->um_trim_inflight_blks += numfrags(ump->um_fs, tp->size);
 2544         ump->um_trim_total_blks += numfrags(ump->um_fs, tp->size);
 2545         UFS_UNLOCK(ump);
 2546 
 2547         mp = UFSTOVFS(ump);
 2548         vn_start_secondary_write(NULL, &mp, 0);
 2549         g_vfs_strategy(ump->um_bo, bp);
 2550 }
 2551 
 2552 /*
 2553  * Allocate a new key to use to identify a range of blocks.
 2554  */
 2555 u_long
 2556 ffs_blkrelease_start(struct ufsmount *ump,
 2557         struct vnode *devvp,
 2558         ino_t inum)
 2559 {
 2560         static u_long masterkey;
 2561         u_long key;
 2562 
 2563         if (((ump->um_flags & UM_CANDELETE) == 0) || dotrimcons == 0)
 2564                 return (SINGLETON_KEY);
 2565         do {
 2566                 key = atomic_fetchadd_long(&masterkey, 1);
 2567         } while (key < FIRST_VALID_KEY);
 2568         (void) trim_lookup(ump, devvp, 0, 0, inum, key, NEW);
 2569         return (key);
 2570 }
 2571 
 2572 /*
 2573  * Deallocate a key that has been used to identify a range of blocks.
 2574  */
 2575 void
 2576 ffs_blkrelease_finish(struct ufsmount *ump, u_long key)
 2577 {
 2578         struct ffs_blkfree_trim_params *tp;
 2579 
 2580         if (((ump->um_flags & UM_CANDELETE) == 0) || dotrimcons == 0)
 2581                 return;
 2582         /*
 2583          * If the vfs.ffs.dotrimcons sysctl option is enabled while
 2584          * a file deletion is active, specifically after a call
 2585          * to ffs_blkrelease_start() but before the call to
 2586          * ffs_blkrelease_finish(), ffs_blkrelease_start() will
 2587          * have handed out SINGLETON_KEY rather than starting a
 2588          * collection sequence. Thus if we get a SINGLETON_KEY
 2589          * passed to ffs_blkrelease_finish(), we just return rather
 2590          * than trying to finish the nonexistent sequence.
 2591          */
 2592         if (key == SINGLETON_KEY) {
 2593 #ifdef INVARIANTS
 2594                 printf("%s: vfs.ffs.dotrimcons enabled on active filesystem\n",
 2595                     ump->um_mountp->mnt_stat.f_mntonname);
 2596 #endif
 2597                 return;
 2598         }
 2599         /*
 2600          * We are done with sending blocks using this key. Look up the key
 2601          * using the DONE alloctype (in tp) to request that it be unhashed
 2602          * as we will not be adding to it. If the key has never been used,
 2603          * tp->size will be zero, so we can just free tp. Otherwise the call
 2604          * to ffs_blkfree_sendtrim(tp) causes the block range described by
 2605          * tp to be issued (and then tp to be freed).
 2606          */
 2607         tp = trim_lookup(ump, NULL, 0, 0, 0, key, DONE);
 2608         if (tp->size == 0)
 2609                 free(tp, M_TRIM);
 2610         else
 2611                 ffs_blkfree_sendtrim(tp);
 2612 }
 2613 
 2614 /*
 2615  * Setup to free a block or fragment.
 2616  *
 2617  * Check for snapshots that might want to claim the block.
 2618  * If trims are requested, prepare a trim request. Attempt to
 2619  * aggregate consecutive blocks into a single trim request.
 2620  */
 2621 void
 2622 ffs_blkfree(struct ufsmount *ump,
 2623         struct fs *fs,
 2624         struct vnode *devvp,
 2625         ufs2_daddr_t bno,
 2626         long size,
 2627         ino_t inum,
 2628         enum vtype vtype,
 2629         struct workhead *dephd,
 2630         u_long key)
 2631 {
 2632         struct ffs_blkfree_trim_params *tp, *ntp;
 2633         struct trim_blkreq *blkelm;
 2634 
 2635         /*
 2636          * Check to see if a snapshot wants to claim the block.
 2637          * Check that devvp is a normal disk device, not a snapshot,
 2638          * it has a snapshot(s) associated with it, and one of the
 2639          * snapshots wants to claim the block.
 2640          */
 2641         if (devvp->v_type == VCHR &&
 2642             (devvp->v_vflag & VV_COPYONWRITE) &&
 2643             ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) {
 2644                 return;
 2645         }
 2646         /*
 2647          * Nothing to delay if TRIM is not required for this block or TRIM
 2648          * is disabled or the operation is performed on a snapshot.
 2649          */
 2650         if (key == NOTRIM_KEY || ((ump->um_flags & UM_CANDELETE) == 0) ||
 2651             devvp->v_type == VREG) {
 2652                 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd);
 2653                 return;
 2654         }
 2655         blkelm = malloc(sizeof(struct trim_blkreq), M_TRIM, M_WAITOK);
 2656         blkelm->bno = bno;
 2657         blkelm->size = size;
 2658         if (dephd == NULL) {
 2659                 blkelm->pdephd = NULL;
 2660         } else {
 2661                 LIST_INIT(&blkelm->dephd);
 2662                 LIST_SWAP(dephd, &blkelm->dephd, worklist, wk_list);
 2663                 blkelm->pdephd = &blkelm->dephd;
 2664         }
 2665         if (key == SINGLETON_KEY) {
 2666                 /*
 2667                  * Just a single non-contiguous piece. Use the SINGLE
 2668                  * alloctype to return a trim request that will not be
 2669                  * hashed for future lookup.
 2670                  */
 2671                 tp = trim_lookup(ump, devvp, bno, size, inum, key, SINGLE);
 2672                 TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
 2673                 ffs_blkfree_sendtrim(tp);
 2674                 return;
 2675         }
 2676         /*
 2677          * The callers of this function are not tracking whether or not
 2678          * the blocks are contiguous. They are just saying that they
 2679          * are freeing a set of blocks. It is this code that determines
 2680          * the pieces of that range that are actually contiguous.
 2681          *
 2682          * Calling ffs_blkrelease_start() will have created an entry
 2683          * that we will use.
 2684          */
 2685         tp = trim_lookup(ump, devvp, bno, size, inum, key, OLD);
 2686         if (tp->size == 0) {
 2687                 /*
 2688                  * First block of a potential range, set block and size
 2689                  * for the trim block.
 2690                  */
 2691                 tp->bno = bno;
 2692                 tp->size = size;
 2693                 TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
 2694                 return;
 2695         }
 2696         /*
 2697          * If this block is a continuation of the range (either
 2698          * follows at the end or preceeds in the front) then we
 2699          * add it to the front or back of the list and return.
 2700          *
 2701          * If it is not a continuation of the trim that we were
 2702          * building, using the REPLACE alloctype, we request that
 2703          * the old trim request (still in tp) be unhashed and a
 2704          * new range started (in ntp). The ffs_blkfree_sendtrim(tp)
 2705          * call causes the block range described by tp to be issued
 2706          * (and then tp to be freed).
 2707          */
 2708         if (bno + numfrags(fs, size) == tp->bno) {
 2709                 TAILQ_INSERT_HEAD(&tp->blklist, blkelm, blkreqlist);
 2710                 tp->bno = bno;
 2711                 tp->size += size;
 2712                 return;
 2713         } else if (bno == tp->bno + numfrags(fs, tp->size)) {
 2714                 TAILQ_INSERT_TAIL(&tp->blklist, blkelm, blkreqlist);
 2715                 tp->size += size;
 2716                 return;
 2717         }
 2718         ntp = trim_lookup(ump, devvp, bno, size, inum, key, REPLACE);
 2719         TAILQ_INSERT_HEAD(&ntp->blklist, blkelm, blkreqlist);
 2720         ffs_blkfree_sendtrim(tp);
 2721 }
 2722 
 2723 #ifdef INVARIANTS
 2724 /*
 2725  * Verify allocation of a block or fragment. Returns true if block or
 2726  * fragment is allocated, false if it is free.
 2727  */
 2728 static int
 2729 ffs_checkblk(struct inode *ip,
 2730         ufs2_daddr_t bno,
 2731         long size)
 2732 {
 2733         struct fs *fs;
 2734         struct cg *cgp;
 2735         struct buf *bp;
 2736         ufs1_daddr_t cgbno;
 2737         int i, error, frags, free;
 2738         u_int8_t *blksfree;
 2739 
 2740         fs = ITOFS(ip);
 2741         if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
 2742                 printf("bsize = %ld, size = %ld, fs = %s\n",
 2743                     (long)fs->fs_bsize, size, fs->fs_fsmnt);
 2744                 panic("ffs_checkblk: bad size");
 2745         }
 2746         if ((u_int)bno >= fs->fs_size)
 2747                 panic("ffs_checkblk: bad block %jd", (intmax_t)bno);
 2748         error = ffs_getcg(fs, ITODEVVP(ip), dtog(fs, bno), 0, &bp, &cgp);
 2749         if (error)
 2750                 panic("ffs_checkblk: cylinder group read failed");
 2751         blksfree = cg_blksfree(cgp);
 2752         cgbno = dtogd(fs, bno);
 2753         if (size == fs->fs_bsize) {
 2754                 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno));
 2755         } else {
 2756                 frags = numfrags(fs, size);
 2757                 for (free = 0, i = 0; i < frags; i++)
 2758                         if (isset(blksfree, cgbno + i))
 2759                                 free++;
 2760                 if (free != 0 && free != frags)
 2761                         panic("ffs_checkblk: partially free fragment");
 2762         }
 2763         brelse(bp);
 2764         return (!free);
 2765 }
 2766 #endif /* INVARIANTS */
 2767 
 2768 /*
 2769  * Free an inode.
 2770  */
 2771 int
 2772 ffs_vfree(struct vnode *pvp,
 2773         ino_t ino,
 2774         int mode)
 2775 {
 2776         struct ufsmount *ump;
 2777 
 2778         if (DOINGSOFTDEP(pvp)) {
 2779                 softdep_freefile(pvp, ino, mode);
 2780                 return (0);
 2781         }
 2782         ump = VFSTOUFS(pvp->v_mount);
 2783         return (ffs_freefile(ump, ump->um_fs, ump->um_devvp, ino, mode, NULL));
 2784 }
 2785 
 2786 /*
 2787  * Do the actual free operation.
 2788  * The specified inode is placed back in the free map.
 2789  */
 2790 int
 2791 ffs_freefile(struct ufsmount *ump,
 2792         struct fs *fs,
 2793         struct vnode *devvp,
 2794         ino_t ino,
 2795         int mode,
 2796         struct workhead *wkhd)
 2797 {
 2798         struct cg *cgp;
 2799         struct buf *bp;
 2800         daddr_t dbn;
 2801         int error;
 2802         u_int cg;
 2803         u_int8_t *inosused;
 2804         struct cdev *dev;
 2805         ino_t cgino;
 2806 
 2807         cg = ino_to_cg(fs, ino);
 2808         if (devvp->v_type == VREG) {
 2809                 /* devvp is a snapshot */
 2810                 MPASS(devvp->v_mount->mnt_data == ump);
 2811                 dev = ump->um_devvp->v_rdev;
 2812         } else if (devvp->v_type == VCHR) {
 2813                 /* devvp is a normal disk device */
 2814                 dev = devvp->v_rdev;
 2815         } else {
 2816                 bp = NULL;
 2817                 return (0);
 2818         }
 2819         if (ino >= fs->fs_ipg * fs->fs_ncg)
 2820                 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s",
 2821                     devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt);
 2822         if ((error = ffs_getcg(fs, devvp, cg, GB_CVTENXIO, &bp, &cgp)) != 0) {
 2823                 if (!ffs_fsfail_cleanup(ump, error) ||
 2824                     !MOUNTEDSOFTDEP(UFSTOVFS(ump)) || devvp->v_type != VCHR)
 2825                         return (error);
 2826                 if (devvp->v_type == VREG)
 2827                         dbn = fragstoblks(fs, cgtod(fs, cg));
 2828                 else
 2829                         dbn = fsbtodb(fs, cgtod(fs, cg));
 2830                 error = getblkx(devvp, dbn, dbn, fs->fs_cgsize, 0, 0, 0, &bp);
 2831                 KASSERT(error == 0, ("getblkx failed"));
 2832                 softdep_setup_inofree(UFSTOVFS(ump), bp, ino, wkhd);
 2833                 bp->b_flags |= B_RELBUF | B_NOCACHE;
 2834                 bp->b_flags &= ~B_CACHE;
 2835                 bawrite(bp);
 2836                 return (error);
 2837         }
 2838         inosused = cg_inosused(cgp);
 2839         cgino = ino % fs->fs_ipg;
 2840         if (isclr(inosused, cgino)) {
 2841                 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev),
 2842                     (uintmax_t)ino, fs->fs_fsmnt);
 2843                 if (fs->fs_ronly == 0)
 2844                         panic("ffs_freefile: freeing free inode");
 2845         }
 2846         clrbit(inosused, cgino);
 2847         if (cgino < cgp->cg_irotor)
 2848                 cgp->cg_irotor = cgino;
 2849         cgp->cg_cs.cs_nifree++;
 2850         UFS_LOCK(ump);
 2851         fs->fs_cstotal.cs_nifree++;
 2852         fs->fs_cs(fs, cg).cs_nifree++;
 2853         if ((mode & IFMT) == IFDIR) {
 2854                 cgp->cg_cs.cs_ndir--;
 2855                 fs->fs_cstotal.cs_ndir--;
 2856                 fs->fs_cs(fs, cg).cs_ndir--;
 2857         }
 2858         fs->fs_fmod = 1;
 2859         ACTIVECLEAR(fs, cg);
 2860         UFS_UNLOCK(ump);
 2861         if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type == VCHR)
 2862                 softdep_setup_inofree(UFSTOVFS(ump), bp, ino, wkhd);
 2863         bdwrite(bp);
 2864         return (0);
 2865 }
 2866 
 2867 /*
 2868  * Check to see if a file is free.
 2869  * Used to check for allocated files in snapshots.
 2870  */
 2871 int
 2872 ffs_checkfreefile(struct fs *fs,
 2873         struct vnode *devvp,
 2874         ino_t ino)
 2875 {
 2876         struct cg *cgp;
 2877         struct buf *bp;
 2878         int ret, error;
 2879         u_int cg;
 2880         u_int8_t *inosused;
 2881 
 2882         cg = ino_to_cg(fs, ino);
 2883         if ((devvp->v_type != VREG) && (devvp->v_type != VCHR))
 2884                 return (1);
 2885         if (ino >= fs->fs_ipg * fs->fs_ncg)
 2886                 return (1);
 2887         if ((error = ffs_getcg(fs, devvp, cg, 0, &bp, &cgp)) != 0)
 2888                 return (1);
 2889         inosused = cg_inosused(cgp);
 2890         ino %= fs->fs_ipg;
 2891         ret = isclr(inosused, ino);
 2892         brelse(bp);
 2893         return (ret);
 2894 }
 2895 
 2896 /*
 2897  * Find a block of the specified size in the specified cylinder group.
 2898  *
 2899  * It is a panic if a request is made to find a block if none are
 2900  * available.
 2901  */
 2902 static ufs1_daddr_t
 2903 ffs_mapsearch(struct fs *fs,
 2904         struct cg *cgp,
 2905         ufs2_daddr_t bpref,
 2906         int allocsiz)
 2907 {
 2908         ufs1_daddr_t bno;
 2909         int start, len, loc, i;
 2910         int blk, field, subfield, pos;
 2911         u_int8_t *blksfree;
 2912 
 2913         /*
 2914          * find the fragment by searching through the free block
 2915          * map for an appropriate bit pattern
 2916          */
 2917         if (bpref)
 2918                 start = dtogd(fs, bpref) / NBBY;
 2919         else
 2920                 start = cgp->cg_frotor / NBBY;
 2921         blksfree = cg_blksfree(cgp);
 2922         len = howmany(fs->fs_fpg, NBBY) - start;
 2923         loc = scanc((u_int)len, (u_char *)&blksfree[start],
 2924                 fragtbl[fs->fs_frag],
 2925                 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
 2926         if (loc == 0) {
 2927                 len = start + 1;
 2928                 start = 0;
 2929                 loc = scanc((u_int)len, (u_char *)&blksfree[0],
 2930                         fragtbl[fs->fs_frag],
 2931                         (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
 2932                 if (loc == 0) {
 2933                         printf("start = %d, len = %d, fs = %s\n",
 2934                             start, len, fs->fs_fsmnt);
 2935                         panic("ffs_alloccg: map corrupted");
 2936                         /* NOTREACHED */
 2937                 }
 2938         }
 2939         bno = (start + len - loc) * NBBY;
 2940         cgp->cg_frotor = bno;
 2941         /*
 2942          * found the byte in the map
 2943          * sift through the bits to find the selected frag
 2944          */
 2945         for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
 2946                 blk = blkmap(fs, blksfree, bno);
 2947                 blk <<= 1;
 2948                 field = around[allocsiz];
 2949                 subfield = inside[allocsiz];
 2950                 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
 2951                         if ((blk & field) == subfield)
 2952                                 return (bno + pos);
 2953                         field <<= 1;
 2954                         subfield <<= 1;
 2955                 }
 2956         }
 2957         printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt);
 2958         panic("ffs_alloccg: block not in map");
 2959         return (-1);
 2960 }
 2961 
 2962 static const struct statfs *
 2963 ffs_getmntstat(struct vnode *devvp)
 2964 {
 2965 
 2966         if (devvp->v_type == VCHR)
 2967                 return (&devvp->v_rdev->si_mountpt->mnt_stat);
 2968         return (ffs_getmntstat(VFSTOUFS(devvp->v_mount)->um_devvp));
 2969 }
 2970 
 2971 /*
 2972  * Fetch and verify a cylinder group.
 2973  */
 2974 int
 2975 ffs_getcg(struct fs *fs,
 2976         struct vnode *devvp,
 2977         u_int cg,
 2978         int flags,
 2979         struct buf **bpp,
 2980         struct cg **cgpp)
 2981 {
 2982         struct buf *bp;
 2983         struct cg *cgp;
 2984         const struct statfs *sfs;
 2985         daddr_t blkno;
 2986         int error;
 2987 
 2988         *bpp = NULL;
 2989         *cgpp = NULL;
 2990         if ((fs->fs_metackhash & CK_CYLGRP) != 0)
 2991                 flags |= GB_CKHASH;
 2992         if (devvp->v_type == VREG)
 2993                 blkno = fragstoblks(fs, cgtod(fs, cg));
 2994         else
 2995                 blkno = fsbtodb(fs, cgtod(fs, cg));
 2996         error = breadn_flags(devvp, blkno, blkno, (int)fs->fs_cgsize, NULL,
 2997             NULL, 0, NOCRED, flags, ffs_ckhash_cg, &bp);
 2998         if (error != 0)
 2999                 return (error);
 3000         cgp = (struct cg *)bp->b_data;
 3001         if ((fs->fs_metackhash & CK_CYLGRP) != 0 &&
 3002             (bp->b_flags & B_CKHASH) != 0 &&
 3003             cgp->cg_ckhash != bp->b_ckhash) {
 3004                 sfs = ffs_getmntstat(devvp);
 3005                 printf("UFS %s%s (%s) cylinder checksum failed: cg %u, cgp: "
 3006                     "0x%x != bp: 0x%jx\n",
 3007                     devvp->v_type == VCHR ? "" : "snapshot of ",
 3008                     sfs->f_mntfromname, sfs->f_mntonname,
 3009                     cg, cgp->cg_ckhash, (uintmax_t)bp->b_ckhash);
 3010                 bp->b_flags &= ~B_CKHASH;
 3011                 bp->b_flags |= B_INVAL | B_NOCACHE;
 3012                 brelse(bp);
 3013                 return (EIO);
 3014         }
 3015         if (!cg_chkmagic(cgp) || cgp->cg_cgx != cg) {
 3016                 sfs = ffs_getmntstat(devvp);
 3017                 printf("UFS %s%s (%s)",
 3018                     devvp->v_type == VCHR ? "" : "snapshot of ",
 3019                     sfs->f_mntfromname, sfs->f_mntonname);
 3020                 if (!cg_chkmagic(cgp))
 3021                         printf(" cg %u: bad magic number 0x%x should be 0x%x\n",
 3022                             cg, cgp->cg_magic, CG_MAGIC);
 3023                 else
 3024                         printf(": wrong cylinder group cg %u != cgx %u\n", cg,
 3025                             cgp->cg_cgx);
 3026                 bp->b_flags &= ~B_CKHASH;
 3027                 bp->b_flags |= B_INVAL | B_NOCACHE;
 3028                 brelse(bp);
 3029                 return (EIO);
 3030         }
 3031         bp->b_flags &= ~B_CKHASH;
 3032         bp->b_xflags |= BX_BKGRDWRITE;
 3033         /*
 3034          * If we are using check hashes on the cylinder group then we want
 3035          * to limit changing the cylinder group time to when we are actually
 3036          * going to write it to disk so that its check hash remains correct
 3037          * in memory. If the CK_CYLGRP flag is set the time is updated in
 3038          * ffs_bufwrite() as the buffer is queued for writing. Otherwise we
 3039          * update the time here as we have done historically.
 3040          */
 3041         if ((fs->fs_metackhash & CK_CYLGRP) != 0)
 3042                 bp->b_xflags |= BX_CYLGRP;
 3043         else
 3044                 cgp->cg_old_time = cgp->cg_time = time_second;
 3045         *bpp = bp;
 3046         *cgpp = cgp;
 3047         return (0);
 3048 }
 3049 
 3050 static void
 3051 ffs_ckhash_cg(struct buf *bp)
 3052 {
 3053         uint32_t ckhash;
 3054         struct cg *cgp;
 3055 
 3056         cgp = (struct cg *)bp->b_data;
 3057         ckhash = cgp->cg_ckhash;
 3058         cgp->cg_ckhash = 0;
 3059         bp->b_ckhash = calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
 3060         cgp->cg_ckhash = ckhash;
 3061 }
 3062 
 3063 /*
 3064  * Fserr prints the name of a filesystem with an error diagnostic.
 3065  *
 3066  * The form of the error message is:
 3067  *      fs: error message
 3068  */
 3069 void
 3070 ffs_fserr(struct fs *fs,
 3071         ino_t inum,
 3072         char *cp)
 3073 {
 3074         struct thread *td = curthread;  /* XXX */
 3075         struct proc *p = td->td_proc;
 3076 
 3077         log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n",
 3078             p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum,
 3079             fs->fs_fsmnt, cp);
 3080 }
 3081 
 3082 /*
 3083  * This function provides the capability for the fsck program to
 3084  * update an active filesystem. Fourteen operations are provided:
 3085  *
 3086  * adjrefcnt(inode, amt) - adjusts the reference count on the
 3087  *      specified inode by the specified amount. Under normal
 3088  *      operation the count should always go down. Decrementing
 3089  *      the count to zero will cause the inode to be freed.
 3090  * adjblkcnt(inode, amt) - adjust the number of blocks used by the
 3091  *      inode by the specified amount.
 3092  * setsize(inode, size) - set the size of the inode to the
 3093  *      specified size.
 3094  * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) -
 3095  *      adjust the superblock summary.
 3096  * freedirs(inode, count) - directory inodes [inode..inode + count - 1]
 3097  *      are marked as free. Inodes should never have to be marked
 3098  *      as in use.
 3099  * freefiles(inode, count) - file inodes [inode..inode + count - 1]
 3100  *      are marked as free. Inodes should never have to be marked
 3101  *      as in use.
 3102  * freeblks(blockno, size) - blocks [blockno..blockno + size - 1]
 3103  *      are marked as free. Blocks should never have to be marked
 3104  *      as in use.
 3105  * setflags(flags, set/clear) - the fs_flags field has the specified
 3106  *      flags set (second parameter +1) or cleared (second parameter -1).
 3107  * setcwd(dirinode) - set the current directory to dirinode in the
 3108  *      filesystem associated with the snapshot.
 3109  * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".."
 3110  *      in the current directory is oldvalue then change it to newvalue.
 3111  * unlink(nameptr, oldvalue) - Verify that the inode number associated
 3112  *      with nameptr in the current directory is oldvalue then unlink it.
 3113  */
 3114 
 3115 static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS);
 3116 
 3117 SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt,
 3118     CTLFLAG_WR | CTLTYPE_STRUCT | CTLFLAG_NEEDGIANT,
 3119     0, 0, sysctl_ffs_fsck, "S,fsck",
 3120     "Adjust Inode Reference Count");
 3121 
 3122 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt,
 3123     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3124     "Adjust Inode Used Blocks Count");
 3125 
 3126 static SYSCTL_NODE(_vfs_ffs, FFS_SET_SIZE, setsize,
 3127     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3128     "Set the inode size");
 3129 
 3130 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir,
 3131     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3132     "Adjust number of directories");
 3133 
 3134 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree,
 3135     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3136     "Adjust number of free blocks");
 3137 
 3138 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree,
 3139     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3140     "Adjust number of free inodes");
 3141 
 3142 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree,
 3143     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3144     "Adjust number of free frags");
 3145 
 3146 static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters,
 3147     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3148     "Adjust number of free clusters");
 3149 
 3150 static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs,
 3151     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3152     "Free Range of Directory Inodes");
 3153 
 3154 static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles,
 3155     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3156     "Free Range of File Inodes");
 3157 
 3158 static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks,
 3159     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3160     "Free Range of Blocks");
 3161 
 3162 static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags,
 3163     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3164     "Change Filesystem Flags");
 3165 
 3166 static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd,
 3167     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3168     "Set Current Working Directory");
 3169 
 3170 static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot,
 3171     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3172     "Change Value of .. Entry");
 3173 
 3174 static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink,
 3175     CTLFLAG_WR | CTLFLAG_NEEDGIANT, sysctl_ffs_fsck,
 3176     "Unlink a Duplicate Name");
 3177 
 3178 #ifdef DIAGNOSTIC
 3179 static int fsckcmds = 0;
 3180 SYSCTL_INT(_debug, OID_AUTO, ffs_fsckcmds, CTLFLAG_RW, &fsckcmds, 0,
 3181         "print out fsck_ffs-based filesystem update commands");
 3182 #endif /* DIAGNOSTIC */
 3183 
 3184 static int
 3185 sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS)
 3186 {
 3187         struct thread *td = curthread;
 3188         struct fsck_cmd cmd;
 3189         struct ufsmount *ump;
 3190         struct vnode *vp, *dvp, *fdvp;
 3191         struct inode *ip, *dp;
 3192         struct mount *mp;
 3193         struct fs *fs;
 3194         struct pwd *pwd;
 3195         ufs2_daddr_t blkno;
 3196         long blkcnt, blksize;
 3197         u_long key;
 3198         struct file *fp;
 3199         cap_rights_t rights;
 3200         int filetype, error;
 3201 
 3202         if (req->newptr == NULL || req->newlen > sizeof(cmd))
 3203                 return (EBADRPC);
 3204         if ((error = SYSCTL_IN(req, &cmd, sizeof(cmd))) != 0)
 3205                 return (error);
 3206         if (cmd.version != FFS_CMD_VERSION)
 3207                 return (ERPCMISMATCH);
 3208         if ((error = getvnode(td, cmd.handle,
 3209             cap_rights_init_one(&rights, CAP_FSCK), &fp)) != 0)
 3210                 return (error);
 3211         vp = fp->f_vnode;
 3212         if (vp->v_type != VREG && vp->v_type != VDIR) {
 3213                 fdrop(fp, td);
 3214                 return (EINVAL);
 3215         }
 3216         vn_start_write(vp, &mp, V_WAIT);
 3217         if (mp == NULL ||
 3218             strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) {
 3219                 vn_finished_write(mp);
 3220                 fdrop(fp, td);
 3221                 return (EINVAL);
 3222         }
 3223         ump = VFSTOUFS(mp);
 3224         if (mp->mnt_flag & MNT_RDONLY) {
 3225                 vn_finished_write(mp);
 3226                 fdrop(fp, td);
 3227                 return (EROFS);
 3228         }
 3229         fs = ump->um_fs;
 3230         filetype = IFREG;
 3231 
 3232         switch (oidp->oid_number) {
 3233         case FFS_SET_FLAGS:
 3234 #ifdef DIAGNOSTIC
 3235                 if (fsckcmds)
 3236                         printf("%s: %s flags\n", mp->mnt_stat.f_mntonname,
 3237                             cmd.size > 0 ? "set" : "clear");
 3238 #endif /* DIAGNOSTIC */
 3239                 if (cmd.size > 0)
 3240                         fs->fs_flags |= (long)cmd.value;
 3241                 else
 3242                         fs->fs_flags &= ~(long)cmd.value;
 3243                 break;
 3244 
 3245         case FFS_ADJ_REFCNT:
 3246 #ifdef DIAGNOSTIC
 3247                 if (fsckcmds) {
 3248                         printf("%s: adjust inode %jd link count by %jd\n",
 3249                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
 3250                             (intmax_t)cmd.size);
 3251                 }
 3252 #endif /* DIAGNOSTIC */
 3253                 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
 3254                         break;
 3255                 ip = VTOI(vp);
 3256                 ip->i_nlink += cmd.size;
 3257                 DIP_SET(ip, i_nlink, ip->i_nlink);
 3258                 ip->i_effnlink += cmd.size;
 3259                 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_MODIFIED);
 3260                 error = ffs_update(vp, 1);
 3261                 if (DOINGSOFTDEP(vp))
 3262                         softdep_change_linkcnt(ip);
 3263                 vput(vp);
 3264                 break;
 3265 
 3266         case FFS_ADJ_BLKCNT:
 3267 #ifdef DIAGNOSTIC
 3268                 if (fsckcmds) {
 3269                         printf("%s: adjust inode %jd block count by %jd\n",
 3270                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
 3271                             (intmax_t)cmd.size);
 3272                 }
 3273 #endif /* DIAGNOSTIC */
 3274                 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
 3275                         break;
 3276                 ip = VTOI(vp);
 3277                 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size);
 3278                 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_MODIFIED);
 3279                 error = ffs_update(vp, 1);
 3280                 vput(vp);
 3281                 break;
 3282 
 3283         case FFS_SET_SIZE:
 3284 #ifdef DIAGNOSTIC
 3285                 if (fsckcmds) {
 3286                         printf("%s: set inode %jd size to %jd\n",
 3287                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
 3288                             (intmax_t)cmd.size);
 3289                 }
 3290 #endif /* DIAGNOSTIC */
 3291                 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp)))
 3292                         break;
 3293                 ip = VTOI(vp);
 3294                 DIP_SET(ip, i_size, cmd.size);
 3295                 UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_MODIFIED);
 3296                 error = ffs_update(vp, 1);
 3297                 vput(vp);
 3298                 break;
 3299 
 3300         case FFS_DIR_FREE:
 3301                 filetype = IFDIR;
 3302                 /* fall through */
 3303 
 3304         case FFS_FILE_FREE:
 3305 #ifdef DIAGNOSTIC
 3306                 if (fsckcmds) {
 3307                         if (cmd.size == 1)
 3308                                 printf("%s: free %s inode %ju\n",
 3309                                     mp->mnt_stat.f_mntonname,
 3310                                     filetype == IFDIR ? "directory" : "file",
 3311                                     (uintmax_t)cmd.value);
 3312                         else
 3313                                 printf("%s: free %s inodes %ju-%ju\n",
 3314                                     mp->mnt_stat.f_mntonname,
 3315                                     filetype == IFDIR ? "directory" : "file",
 3316                                     (uintmax_t)cmd.value,
 3317                                     (uintmax_t)(cmd.value + cmd.size - 1));
 3318                 }
 3319 #endif /* DIAGNOSTIC */
 3320                 while (cmd.size > 0) {
 3321                         if ((error = ffs_freefile(ump, fs, ump->um_devvp,
 3322                             cmd.value, filetype, NULL)))
 3323                                 break;
 3324                         cmd.size -= 1;
 3325                         cmd.value += 1;
 3326                 }
 3327                 break;
 3328 
 3329         case FFS_BLK_FREE:
 3330 #ifdef DIAGNOSTIC
 3331                 if (fsckcmds) {
 3332                         if (cmd.size == 1)
 3333                                 printf("%s: free block %jd\n",
 3334                                     mp->mnt_stat.f_mntonname,
 3335                                     (intmax_t)cmd.value);
 3336                         else
 3337                                 printf("%s: free blocks %jd-%jd\n",
 3338                                     mp->mnt_stat.f_mntonname, 
 3339                                     (intmax_t)cmd.value,
 3340                                     (intmax_t)cmd.value + cmd.size - 1);
 3341                 }
 3342 #endif /* DIAGNOSTIC */
 3343                 blkno = cmd.value;
 3344                 blkcnt = cmd.size;
 3345                 blksize = fs->fs_frag - (blkno % fs->fs_frag);
 3346                 key = ffs_blkrelease_start(ump, ump->um_devvp, UFS_ROOTINO);
 3347                 while (blkcnt > 0) {
 3348                         if (blkcnt < blksize)
 3349                                 blksize = blkcnt;
 3350                         ffs_blkfree(ump, fs, ump->um_devvp, blkno,
 3351                             blksize * fs->fs_fsize, UFS_ROOTINO, 
 3352                             VDIR, NULL, key);
 3353                         blkno += blksize;
 3354                         blkcnt -= blksize;
 3355                         blksize = fs->fs_frag;
 3356                 }
 3357                 ffs_blkrelease_finish(ump, key);
 3358                 break;
 3359 
 3360         /*
 3361          * Adjust superblock summaries.  fsck(8) is expected to
 3362          * submit deltas when necessary.
 3363          */
 3364         case FFS_ADJ_NDIR:
 3365 #ifdef DIAGNOSTIC
 3366                 if (fsckcmds) {
 3367                         printf("%s: adjust number of directories by %jd\n",
 3368                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
 3369                 }
 3370 #endif /* DIAGNOSTIC */
 3371                 fs->fs_cstotal.cs_ndir += cmd.value;
 3372                 break;
 3373 
 3374         case FFS_ADJ_NBFREE:
 3375 #ifdef DIAGNOSTIC
 3376                 if (fsckcmds) {
 3377                         printf("%s: adjust number of free blocks by %+jd\n",
 3378                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
 3379                 }
 3380 #endif /* DIAGNOSTIC */
 3381                 fs->fs_cstotal.cs_nbfree += cmd.value;
 3382                 break;
 3383 
 3384         case FFS_ADJ_NIFREE:
 3385 #ifdef DIAGNOSTIC
 3386                 if (fsckcmds) {
 3387                         printf("%s: adjust number of free inodes by %+jd\n",
 3388                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
 3389                 }
 3390 #endif /* DIAGNOSTIC */
 3391                 fs->fs_cstotal.cs_nifree += cmd.value;
 3392                 break;
 3393 
 3394         case FFS_ADJ_NFFREE:
 3395 #ifdef DIAGNOSTIC
 3396                 if (fsckcmds) {
 3397                         printf("%s: adjust number of free frags by %+jd\n",
 3398                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
 3399                 }
 3400 #endif /* DIAGNOSTIC */
 3401                 fs->fs_cstotal.cs_nffree += cmd.value;
 3402                 break;
 3403 
 3404         case FFS_ADJ_NUMCLUSTERS:
 3405 #ifdef DIAGNOSTIC
 3406                 if (fsckcmds) {
 3407                         printf("%s: adjust number of free clusters by %+jd\n",
 3408                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
 3409                 }
 3410 #endif /* DIAGNOSTIC */
 3411                 fs->fs_cstotal.cs_numclusters += cmd.value;
 3412                 break;
 3413 
 3414         case FFS_SET_CWD:
 3415 #ifdef DIAGNOSTIC
 3416                 if (fsckcmds) {
 3417                         printf("%s: set current directory to inode %jd\n",
 3418                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value);
 3419                 }
 3420 #endif /* DIAGNOSTIC */
 3421                 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp)))
 3422                         break;
 3423                 AUDIT_ARG_VNODE1(vp);
 3424                 if ((error = change_dir(vp, td)) != 0) {
 3425                         vput(vp);
 3426                         break;
 3427                 }
 3428                 VOP_UNLOCK(vp);
 3429                 pwd_chdir(td, vp);
 3430                 break;
 3431 
 3432         case FFS_SET_DOTDOT:
 3433 #ifdef DIAGNOSTIC
 3434                 if (fsckcmds) {
 3435                         printf("%s: change .. in cwd from %jd to %jd\n",
 3436                             mp->mnt_stat.f_mntonname, (intmax_t)cmd.value,
 3437                             (intmax_t)cmd.size);
 3438                 }
 3439 #endif /* DIAGNOSTIC */
 3440                 /*
 3441                  * First we have to get and lock the parent directory
 3442                  * to which ".." points.
 3443                  */
 3444                 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp);
 3445                 if (error)
 3446                         break;
 3447                 /*
 3448                  * Now we get and lock the child directory containing "..".
 3449                  */
 3450                 pwd = pwd_hold(td);
 3451                 dvp = pwd->pwd_cdir;
 3452                 if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
 3453                         vput(fdvp);
 3454                         pwd_drop(pwd);
 3455                         break;
 3456                 }
 3457                 dp = VTOI(dvp);
 3458                 SET_I_OFFSET(dp, 12);   /* XXX mastertemplate.dot_reclen */
 3459                 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size,
 3460                     DT_DIR, 0);
 3461                 cache_purge(fdvp);
 3462                 cache_purge(dvp);
 3463                 vput(dvp);
 3464                 vput(fdvp);
 3465                 pwd_drop(pwd);
 3466                 break;
 3467 
 3468         case FFS_UNLINK:
 3469 #ifdef DIAGNOSTIC
 3470                 if (fsckcmds) {
 3471                         char buf[32];
 3472 
 3473                         if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL))
 3474                                 strncpy(buf, "Name_too_long", 32);
 3475                         printf("%s: unlink %s (inode %jd)\n",
 3476                             mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size);
 3477                 }
 3478 #endif /* DIAGNOSTIC */
 3479                 /*
 3480                  * kern_funlinkat will do its own start/finish writes and
 3481                  * they do not nest, so drop ours here. Setting mp == NULL
 3482                  * indicates that vn_finished_write is not needed down below.
 3483                  */
 3484                 vn_finished_write(mp);
 3485                 mp = NULL;
 3486                 error = kern_funlinkat(td, AT_FDCWD,
 3487                     (char *)(intptr_t)cmd.value, FD_NONE, UIO_USERSPACE,
 3488                     0, (ino_t)cmd.size);
 3489                 break;
 3490 
 3491         default:
 3492 #ifdef DIAGNOSTIC
 3493                 if (fsckcmds) {
 3494                         printf("Invalid request %d from fsck\n",
 3495                             oidp->oid_number);
 3496                 }
 3497 #endif /* DIAGNOSTIC */
 3498                 error = EINVAL;
 3499                 break;
 3500         }
 3501         fdrop(fp, td);
 3502         vn_finished_write(mp);
 3503         return (error);
 3504 }

Cache object: 9198be8cb917a108fb5a830eeccf442a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.