The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_cluster.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * Modifications/enhancements:
    5  *      Copyright (c) 1995 John S. Dyson.  All rights reserved.
    6  *      Copyright (c) 2012-2013 Matthew Dillon.  All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  */
   32 
   33 #include "opt_debug_cluster.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/kernel.h>
   38 #include <sys/proc.h>
   39 #include <sys/buf.h>
   40 #include <sys/vnode.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mount.h>
   43 #include <sys/resourcevar.h>
   44 #include <sys/vmmeter.h>
   45 #include <vm/vm.h>
   46 #include <vm/vm_object.h>
   47 #include <vm/vm_page.h>
   48 #include <sys/sysctl.h>
   49 
   50 #include <sys/buf2.h>
   51 #include <vm/vm_page2.h>
   52 
   53 #include <machine/limits.h>
   54 
   55 /*
   56  * Cluster tracking cache - replaces the original vnode v_* fields which had
   57  * limited utility and were not MP safe.
   58  *
   59  * The cluster tracking cache is a simple 4-way set-associative non-chained
   60  * cache.  It is capable of tracking up to four zones separated by 1MB or
   61  * more per vnode.
   62  *
   63  * NOTE: We want this structure to be cache-line friendly so the iterator
   64  *       is embedded rather than in a separate array.
   65  *
   66  * NOTE: A cluster cache entry can become stale when a vnode is recycled.
   67  *       For now we treat the values as heuristical but also self-consistent.
   68  *       i.e. the values cannot be completely random and cannot be SMP unsafe
   69  *       or the cluster code might end-up clustering non-contiguous buffers
   70  *       at the wrong offsets.
   71  */
   72 struct cluster_cache {
   73         struct vnode *vp;
   74         u_int   locked;
   75         off_t   v_lastw;                /* last write (write cluster) */
   76         off_t   v_cstart;               /* start block of cluster */
   77         off_t   v_lasta;                /* last allocation */
   78         u_int   v_clen;                 /* length of current cluster */
   79         u_int   iterator;
   80 } __cachealign;
   81 
   82 typedef struct cluster_cache cluster_cache_t;
   83 
   84 #define CLUSTER_CACHE_SIZE      512
   85 #define CLUSTER_CACHE_MASK      (CLUSTER_CACHE_SIZE - 1)
   86 
   87 #define CLUSTER_ZONE            ((off_t)(1024 * 1024))
   88 
   89 cluster_cache_t cluster_array[CLUSTER_CACHE_SIZE];
   90 
   91 #if defined(CLUSTERDEBUG)
   92 #include <sys/sysctl.h>
   93 static int      rcluster= 0;
   94 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
   95 #endif
   96 
   97 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
   98 
   99 static struct cluster_save *
  100         cluster_collectbufs (cluster_cache_t *cc, struct vnode *vp,
  101                                 struct buf *last_bp, int blksize);
  102 static struct buf *
  103         cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
  104                             off_t doffset, int blksize, int run, 
  105                             struct buf *fbp);
  106 static void cluster_callback (struct bio *);
  107 static void cluster_setram (struct buf *);
  108 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
  109                             off_t start_loffset, int bytes);
  110 
  111 static int write_behind = 1;
  112 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
  113     "Cluster write-behind setting");
  114 static quad_t write_behind_minfilesize = 10 * 1024 * 1024;
  115 SYSCTL_QUAD(_vfs, OID_AUTO, write_behind_minfilesize, CTLFLAG_RW,
  116     &write_behind_minfilesize, 0, "Cluster write-behind setting");
  117 static int max_readahead = 2 * 1024 * 1024;
  118 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
  119     "Limit in bytes for desired cluster read-ahead");
  120 
  121 extern vm_page_t        bogus_page;
  122 
  123 extern int cluster_pbuf_freecnt;
  124 
  125 /*
  126  * Acquire/release cluster cache (can return dummy entry)
  127  */
  128 static
  129 cluster_cache_t *
  130 cluster_getcache(cluster_cache_t *dummy, struct vnode *vp, off_t loffset)
  131 {
  132         cluster_cache_t *cc;
  133         size_t hv;
  134         int i;
  135         int xact;
  136 
  137         hv = (size_t)(intptr_t)vp ^ (size_t)(intptr_t)vp / sizeof(*vp);
  138         hv &= CLUSTER_CACHE_MASK & ~3;
  139         cc = &cluster_array[hv];
  140 
  141         xact = -1;
  142         for (i = 0; i < 4; ++i) {
  143                 if (cc[i].vp != vp)
  144                         continue;
  145                 if (((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
  146                         xact = i;
  147                         break;
  148                 }
  149         }
  150         if (xact >= 0 && atomic_swap_int(&cc[xact].locked, 1) == 0) {
  151                 if (cc[xact].vp == vp &&
  152                     ((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
  153                         return(&cc[xact]);
  154                 }
  155                 atomic_swap_int(&cc[xact].locked, 0);
  156         }
  157 
  158         /*
  159          * New entry.  If we can't acquire the cache line then use the
  160          * passed-in dummy element and reset all fields.
  161          *
  162          * When we are able to acquire the cache line we only clear the
  163          * fields if the vp does not match.  This allows us to multi-zone
  164          * a vp and for excessive zones / partial clusters to be retired.
  165          */
  166         i = cc->iterator++ & 3;
  167         cc += i;
  168         if (atomic_swap_int(&cc->locked, 1) != 0) {
  169                 cc = dummy;
  170                 cc->locked = 1;
  171                 cc->vp = NULL;
  172         }
  173         if (cc->vp != vp) {
  174                 cc->vp = vp;
  175                 cc->v_lasta = 0;
  176                 cc->v_clen = 0;
  177                 cc->v_cstart = 0;
  178                 cc->v_lastw = 0;
  179         }
  180         return(cc);
  181 }
  182 
  183 static
  184 void
  185 cluster_putcache(cluster_cache_t *cc)
  186 {
  187         atomic_swap_int(&cc->locked, 0);
  188 }
  189 
  190 /*
  191  * This replaces bread(), providing a synchronous read of the requested
  192  * buffer plus asynchronous read-ahead within the specified bounds.
  193  *
  194  * The caller may pre-populate *bpp if it already has the requested buffer
  195  * in-hand, else must set *bpp to NULL.  Note that the cluster_read() inline
  196  * sets *bpp to NULL and then calls cluster_readx() for compatibility.
  197  *
  198  * filesize     - read-ahead @ blksize will not cross this boundary
  199  * loffset      - loffset for returned *bpp
  200  * blksize      - blocksize for returned *bpp and read-ahead bps
  201  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
  202  *                a higher level uio resid.
  203  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
  204  * bpp          - return buffer (*bpp) for (loffset,blksize)
  205  */
  206 int
  207 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset,
  208              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
  209 {
  210         struct buf *bp, *rbp, *reqbp;
  211         off_t origoffset;
  212         off_t doffset;
  213         int error;
  214         int i;
  215         int maxra;
  216         int maxrbuild;
  217 
  218         error = 0;
  219 
  220         /*
  221          * Calculate the desired read-ahead in blksize'd blocks (maxra).
  222          * To do this we calculate maxreq.
  223          *
  224          * maxreq typically starts out as a sequential heuristic.  If the
  225          * high level uio/resid is bigger (minreq), we pop maxreq up to
  226          * minreq.  This represents the case where random I/O is being
  227          * performed by the userland is issuing big read()'s.
  228          *
  229          * Then we limit maxreq to max_readahead to ensure it is a reasonable
  230          * value.
  231          *
  232          * Finally we must ensure that (loffset + maxreq) does not cross the
  233          * boundary (filesize) for the current blocksize.  If we allowed it
  234          * to cross we could end up with buffers past the boundary with the
  235          * wrong block size (HAMMER large-data areas use mixed block sizes).
  236          * minreq is also absolutely limited to filesize.
  237          */
  238         if (maxreq < minreq)
  239                 maxreq = minreq;
  240         /* minreq not used beyond this point */
  241 
  242         if (maxreq > max_readahead) {
  243                 maxreq = max_readahead;
  244                 if (maxreq > 16 * 1024 * 1024)
  245                         maxreq = 16 * 1024 * 1024;
  246         }
  247         if (maxreq < blksize)
  248                 maxreq = blksize;
  249         if (loffset + maxreq > filesize) {
  250                 if (loffset > filesize)
  251                         maxreq = 0;
  252                 else
  253                         maxreq = filesize - loffset;
  254         }
  255 
  256         maxra = (int)(maxreq / blksize);
  257 
  258         /*
  259          * Get the requested block.
  260          */
  261         if (*bpp)
  262                 reqbp = bp = *bpp;
  263         else
  264                 *bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
  265         origoffset = loffset;
  266 
  267         /*
  268          * Calculate the maximum cluster size for a single I/O, used
  269          * by cluster_rbuild().
  270          */
  271         maxrbuild = vmaxiosize(vp) / blksize;
  272 
  273         /*
  274          * if it is in the cache, then check to see if the reads have been
  275          * sequential.  If they have, then try some read-ahead, otherwise
  276          * back-off on prospective read-aheads.
  277          */
  278         if (bp->b_flags & B_CACHE) {
  279                 /*
  280                  * Not sequential, do not do any read-ahead
  281                  */
  282                 if (maxra <= 1)
  283                         return 0;
  284 
  285                 /*
  286                  * No read-ahead mark, do not do any read-ahead
  287                  * yet.
  288                  */
  289                 if ((bp->b_flags & B_RAM) == 0)
  290                         return 0;
  291 
  292                 /*
  293                  * We hit a read-ahead-mark, figure out how much read-ahead
  294                  * to do (maxra) and where to start (loffset).
  295                  *
  296                  * Shortcut the scan.  Typically the way this works is that
  297                  * we've built up all the blocks inbetween except for the
  298                  * last in previous iterations, so if the second-to-last
  299                  * block is present we just skip ahead to it.
  300                  *
  301                  * This algorithm has O(1) cpu in the steady state no
  302                  * matter how large maxra is.
  303                  */
  304                 bp->b_flags &= ~B_RAM;
  305 
  306                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
  307                         i = maxra - 1;
  308                 else
  309                         i = 1;
  310                 while (i < maxra) {
  311                         if (findblk(vp, loffset + i * blksize,
  312                                     FINDBLK_TEST) == NULL) {
  313                                 break;
  314                         }
  315                         ++i;
  316                 }
  317 
  318                 /*
  319                  * We got everything or everything is in the cache, no
  320                  * point continuing.
  321                  */
  322                 if (i >= maxra)
  323                         return 0;
  324 
  325                 /*
  326                  * Calculate where to start the read-ahead and how much
  327                  * to do.  Generally speaking we want to read-ahead by
  328                  * (maxra) when we've found a read-ahead mark.  We do
  329                  * not want to reduce maxra here as it will cause
  330                  * successive read-ahead I/O's to be smaller and smaller.
  331                  *
  332                  * However, we have to make sure we don't break the
  333                  * filesize limitation for the clustered operation.
  334                  */
  335                 loffset += i * blksize;
  336                 reqbp = bp = NULL;
  337 
  338                 if (loffset >= filesize)
  339                         return 0;
  340                 if (loffset + maxra * blksize > filesize) {
  341                         maxreq = filesize - loffset;
  342                         maxra = (int)(maxreq / blksize);
  343                 }
  344         } else {
  345                 __debugvar off_t firstread = bp->b_loffset;
  346                 int nblks;
  347 
  348                 /*
  349                  * Set-up synchronous read for bp.
  350                  */
  351                 bp->b_cmd = BUF_CMD_READ;
  352                 bp->b_bio1.bio_done = biodone_sync;
  353                 bp->b_bio1.bio_flags |= BIO_SYNC;
  354 
  355                 KASSERT(firstread != NOOFFSET, 
  356                         ("cluster_read: no buffer offset"));
  357 
  358                 /*
  359                  * nblks is our cluster_rbuild request size, limited
  360                  * primarily by the device.
  361                  */
  362                 if ((nblks = maxra) > maxrbuild)
  363                         nblks = maxrbuild;
  364 
  365                 if (nblks > 1) {
  366                         int burstbytes;
  367 
  368                         error = VOP_BMAP(vp, loffset, &doffset,
  369                                          &burstbytes, NULL, BUF_CMD_READ);
  370                         if (error)
  371                                 goto single_block_read;
  372                         if (nblks > burstbytes / blksize)
  373                                 nblks = burstbytes / blksize;
  374                         if (doffset == NOOFFSET)
  375                                 goto single_block_read;
  376                         if (nblks <= 1)
  377                                 goto single_block_read;
  378 
  379                         bp = cluster_rbuild(vp, filesize, loffset,
  380                                             doffset, blksize, nblks, bp);
  381                         loffset += bp->b_bufsize;
  382                         maxra -= bp->b_bufsize / blksize;
  383                 } else {
  384 single_block_read:
  385                         /*
  386                          * If it isn't in the cache, then get a chunk from
  387                          * disk if sequential, otherwise just get the block.
  388                          */
  389                         cluster_setram(bp);
  390                         loffset += blksize;
  391                         --maxra;
  392                 }
  393         }
  394 
  395         /*
  396          * If B_CACHE was not set issue bp.  bp will either be an
  397          * asynchronous cluster buf or a synchronous single-buf.
  398          * If it is a single buf it will be the same as reqbp.
  399          *
  400          * NOTE: Once an async cluster buf is issued bp becomes invalid.
  401          */
  402         if (bp) {
  403 #if defined(CLUSTERDEBUG)
  404                 if (rcluster)
  405                         kprintf("S(%012jx,%d,%d)\n",
  406                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
  407 #endif
  408                 if ((bp->b_flags & B_CLUSTER) == 0)
  409                         vfs_busy_pages(vp, bp);
  410                 bp->b_flags &= ~(B_ERROR|B_INVAL);
  411                 vn_strategy(vp, &bp->b_bio1);
  412                 error = 0;
  413                 /* bp invalid now */
  414                 bp = NULL;
  415         }
  416 
  417         /*
  418          * If we have been doing sequential I/O, then do some read-ahead.
  419          * The code above us should have positioned us at the next likely
  420          * offset.
  421          *
  422          * Only mess with buffers which we can immediately lock.  HAMMER
  423          * will do device-readahead irrespective of what the blocks
  424          * represent.
  425          */
  426         while (error == 0 && maxra > 0) {
  427                 int burstbytes;
  428                 int tmp_error;
  429                 int nblks;
  430 
  431                 rbp = getblk(vp, loffset, blksize,
  432                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
  433                 if (rbp == NULL)
  434                         goto no_read_ahead;
  435                 if ((rbp->b_flags & B_CACHE)) {
  436                         bqrelse(rbp);
  437                         goto no_read_ahead;
  438                 }
  439 
  440                 /*
  441                  * An error from the read-ahead bmap has nothing to do
  442                  * with the caller's original request.
  443                  */
  444                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
  445                                      &burstbytes, NULL, BUF_CMD_READ);
  446                 if (tmp_error || doffset == NOOFFSET) {
  447                         rbp->b_flags |= B_INVAL;
  448                         brelse(rbp);
  449                         rbp = NULL;
  450                         goto no_read_ahead;
  451                 }
  452                 if ((nblks = maxra) > maxrbuild)
  453                         nblks = maxrbuild;
  454                 if (nblks > burstbytes / blksize)
  455                         nblks = burstbytes / blksize;
  456 
  457                 /*
  458                  * rbp: async read
  459                  */
  460                 rbp->b_cmd = BUF_CMD_READ;
  461                 /*rbp->b_flags |= B_AGE*/;
  462                 cluster_setram(rbp);
  463 
  464                 if (nblks > 1) {
  465                         rbp = cluster_rbuild(vp, filesize, loffset,
  466                                              doffset, blksize, 
  467                                              nblks, rbp);
  468                 } else {
  469                         rbp->b_bio2.bio_offset = doffset;
  470                 }
  471 
  472                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
  473 
  474                 if ((rbp->b_flags & B_CLUSTER) == 0)
  475                         vfs_busy_pages(vp, rbp);
  476                 BUF_KERNPROC(rbp);
  477                 loffset += rbp->b_bufsize;
  478                 maxra -= rbp->b_bufsize / blksize;
  479                 vn_strategy(vp, &rbp->b_bio1);
  480                 /* rbp invalid now */
  481         }
  482 
  483         /*
  484          * Wait for our original buffer to complete its I/O.  reqbp will
  485          * be NULL if the original buffer was B_CACHE.  We are returning
  486          * (*bpp) which is the same as reqbp when reqbp != NULL.
  487          */
  488 no_read_ahead:
  489         if (reqbp) {
  490                 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
  491                 error = biowait(&reqbp->b_bio1, "clurd");
  492         }
  493         return (error);
  494 }
  495 
  496 /*
  497  * This replaces breadcb(), providing an asynchronous read of the requested
  498  * buffer with a callback, plus an asynchronous read-ahead within the
  499  * specified bounds.
  500  *
  501  * The callback must check whether BIO_DONE is set in the bio and issue
  502  * the bpdone(bp, 0) if it isn't.  The callback is responsible for clearing
  503  * BIO_DONE and disposing of the I/O (bqrelse()ing it).
  504  *
  505  * filesize     - read-ahead @ blksize will not cross this boundary
  506  * loffset      - loffset for returned *bpp
  507  * blksize      - blocksize for returned *bpp and read-ahead bps
  508  * minreq       - minimum (not a hard minimum) in bytes, typically reflects
  509  *                a higher level uio resid.
  510  * maxreq       - maximum (sequential heuristic) in bytes (highet typ ~2MB)
  511  * bpp          - return buffer (*bpp) for (loffset,blksize)
  512  */
  513 void
  514 cluster_readcb(struct vnode *vp, off_t filesize, off_t loffset,
  515              int blksize, size_t minreq, size_t maxreq,
  516              void (*func)(struct bio *), void *arg)
  517 {
  518         struct buf *bp, *rbp, *reqbp;
  519         off_t origoffset;
  520         off_t doffset;
  521         int i;
  522         int maxra;
  523         int maxrbuild;
  524 
  525         /*
  526          * Calculate the desired read-ahead in blksize'd blocks (maxra).
  527          * To do this we calculate maxreq.
  528          *
  529          * maxreq typically starts out as a sequential heuristic.  If the
  530          * high level uio/resid is bigger (minreq), we pop maxreq up to
  531          * minreq.  This represents the case where random I/O is being
  532          * performed by the userland is issuing big read()'s.
  533          *
  534          * Then we limit maxreq to max_readahead to ensure it is a reasonable
  535          * value.
  536          *
  537          * Finally we must ensure that (loffset + maxreq) does not cross the
  538          * boundary (filesize) for the current blocksize.  If we allowed it
  539          * to cross we could end up with buffers past the boundary with the
  540          * wrong block size (HAMMER large-data areas use mixed block sizes).
  541          * minreq is also absolutely limited to filesize.
  542          */
  543         if (maxreq < minreq)
  544                 maxreq = minreq;
  545         /* minreq not used beyond this point */
  546 
  547         if (maxreq > max_readahead) {
  548                 maxreq = max_readahead;
  549                 if (maxreq > 16 * 1024 * 1024)
  550                         maxreq = 16 * 1024 * 1024;
  551         }
  552         if (maxreq < blksize)
  553                 maxreq = blksize;
  554         if (loffset + maxreq > filesize) {
  555                 if (loffset > filesize)
  556                         maxreq = 0;
  557                 else
  558                         maxreq = filesize - loffset;
  559         }
  560 
  561         maxra = (int)(maxreq / blksize);
  562 
  563         /*
  564          * Get the requested block.
  565          */
  566         reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
  567         origoffset = loffset;
  568 
  569         /*
  570          * Calculate the maximum cluster size for a single I/O, used
  571          * by cluster_rbuild().
  572          */
  573         maxrbuild = vmaxiosize(vp) / blksize;
  574 
  575         /*
  576          * if it is in the cache, then check to see if the reads have been
  577          * sequential.  If they have, then try some read-ahead, otherwise
  578          * back-off on prospective read-aheads.
  579          */
  580         if (bp->b_flags & B_CACHE) {
  581                 /*
  582                  * Setup for func() call whether we do read-ahead or not.
  583                  */
  584                 bp->b_bio1.bio_caller_info1.ptr = arg;
  585                 bp->b_bio1.bio_flags |= BIO_DONE;
  586 
  587                 /*
  588                  * Not sequential, do not do any read-ahead
  589                  */
  590                 if (maxra <= 1)
  591                         goto no_read_ahead;
  592 
  593                 /*
  594                  * No read-ahead mark, do not do any read-ahead
  595                  * yet.
  596                  */
  597                 if ((bp->b_flags & B_RAM) == 0)
  598                         goto no_read_ahead;
  599                 bp->b_flags &= ~B_RAM;
  600 
  601                 /*
  602                  * We hit a read-ahead-mark, figure out how much read-ahead
  603                  * to do (maxra) and where to start (loffset).
  604                  *
  605                  * Shortcut the scan.  Typically the way this works is that
  606                  * we've built up all the blocks inbetween except for the
  607                  * last in previous iterations, so if the second-to-last
  608                  * block is present we just skip ahead to it.
  609                  *
  610                  * This algorithm has O(1) cpu in the steady state no
  611                  * matter how large maxra is.
  612                  */
  613                 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
  614                         i = maxra - 1;
  615                 else
  616                         i = 1;
  617                 while (i < maxra) {
  618                         if (findblk(vp, loffset + i * blksize,
  619                                     FINDBLK_TEST) == NULL) {
  620                                 break;
  621                         }
  622                         ++i;
  623                 }
  624 
  625                 /*
  626                  * We got everything or everything is in the cache, no
  627                  * point continuing.
  628                  */
  629                 if (i >= maxra)
  630                         goto no_read_ahead;
  631 
  632                 /*
  633                  * Calculate where to start the read-ahead and how much
  634                  * to do.  Generally speaking we want to read-ahead by
  635                  * (maxra) when we've found a read-ahead mark.  We do
  636                  * not want to reduce maxra here as it will cause
  637                  * successive read-ahead I/O's to be smaller and smaller.
  638                  *
  639                  * However, we have to make sure we don't break the
  640                  * filesize limitation for the clustered operation.
  641                  */
  642                 loffset += i * blksize;
  643                 bp = NULL;
  644                 /* leave reqbp intact to force function callback */
  645 
  646                 if (loffset >= filesize)
  647                         goto no_read_ahead;
  648                 if (loffset + maxra * blksize > filesize) {
  649                         maxreq = filesize - loffset;
  650                         maxra = (int)(maxreq / blksize);
  651                 }
  652         } else {
  653                 __debugvar off_t firstread = bp->b_loffset;
  654                 int nblks;
  655                 int tmp_error;
  656 
  657                 /*
  658                  * Set-up synchronous read for bp.
  659                  */
  660                 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL);
  661                 bp->b_cmd = BUF_CMD_READ;
  662                 bp->b_bio1.bio_done = func;
  663                 bp->b_bio1.bio_caller_info1.ptr = arg;
  664                 BUF_KERNPROC(bp);
  665                 reqbp = NULL;   /* don't func() reqbp, it's running async */
  666 
  667                 KASSERT(firstread != NOOFFSET,
  668                         ("cluster_read: no buffer offset"));
  669 
  670                 /*
  671                  * nblks is our cluster_rbuild request size, limited
  672                  * primarily by the device.
  673                  */
  674                 if ((nblks = maxra) > maxrbuild)
  675                         nblks = maxrbuild;
  676 
  677                 if (nblks > 1) {
  678                         int burstbytes;
  679 
  680                         tmp_error = VOP_BMAP(vp, loffset, &doffset,
  681                                              &burstbytes, NULL, BUF_CMD_READ);
  682                         if (tmp_error)
  683                                 goto single_block_read;
  684                         if (nblks > burstbytes / blksize)
  685                                 nblks = burstbytes / blksize;
  686                         if (doffset == NOOFFSET)
  687                                 goto single_block_read;
  688                         if (nblks <= 1)
  689                                 goto single_block_read;
  690 
  691                         bp = cluster_rbuild(vp, filesize, loffset,
  692                                             doffset, blksize, nblks, bp);
  693                         loffset += bp->b_bufsize;
  694                         maxra -= bp->b_bufsize / blksize;
  695                 } else {
  696 single_block_read:
  697                         /*
  698                          * If it isn't in the cache, then get a chunk from
  699                          * disk if sequential, otherwise just get the block.
  700                          */
  701                         cluster_setram(bp);
  702                         loffset += blksize;
  703                         --maxra;
  704                 }
  705         }
  706 
  707         /*
  708          * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
  709          * bp will either be an asynchronous cluster buf or an asynchronous
  710          * single-buf.
  711          *
  712          * NOTE: Once an async cluster buf is issued bp becomes invalid.
  713          */
  714         if (bp) {
  715 #if defined(CLUSTERDEBUG)
  716                 if (rcluster)
  717                         kprintf("S(%012jx,%d,%d)\n",
  718                             (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
  719 #endif
  720                 if ((bp->b_flags & B_CLUSTER) == 0)
  721                         vfs_busy_pages(vp, bp);
  722                 bp->b_flags &= ~(B_ERROR|B_INVAL);
  723                 vn_strategy(vp, &bp->b_bio1);
  724                 /* bp invalid now */
  725                 bp = NULL;
  726         }
  727 
  728         /*
  729          * If we have been doing sequential I/O, then do some read-ahead.
  730          * The code above us should have positioned us at the next likely
  731          * offset.
  732          *
  733          * Only mess with buffers which we can immediately lock.  HAMMER
  734          * will do device-readahead irrespective of what the blocks
  735          * represent.
  736          */
  737         while (maxra > 0) {
  738                 int burstbytes;
  739                 int tmp_error;
  740                 int nblks;
  741 
  742                 rbp = getblk(vp, loffset, blksize,
  743                              GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
  744                 if (rbp == NULL)
  745                         goto no_read_ahead;
  746                 if ((rbp->b_flags & B_CACHE)) {
  747                         bqrelse(rbp);
  748                         goto no_read_ahead;
  749                 }
  750 
  751                 /*
  752                  * An error from the read-ahead bmap has nothing to do
  753                  * with the caller's original request.
  754                  */
  755                 tmp_error = VOP_BMAP(vp, loffset, &doffset,
  756                                      &burstbytes, NULL, BUF_CMD_READ);
  757                 if (tmp_error || doffset == NOOFFSET) {
  758                         rbp->b_flags |= B_INVAL;
  759                         brelse(rbp);
  760                         rbp = NULL;
  761                         goto no_read_ahead;
  762                 }
  763                 if ((nblks = maxra) > maxrbuild)
  764                         nblks = maxrbuild;
  765                 if (nblks > burstbytes / blksize)
  766                         nblks = burstbytes / blksize;
  767 
  768                 /*
  769                  * rbp: async read
  770                  */
  771                 rbp->b_cmd = BUF_CMD_READ;
  772                 /*rbp->b_flags |= B_AGE*/;
  773                 cluster_setram(rbp);
  774 
  775                 if (nblks > 1) {
  776                         rbp = cluster_rbuild(vp, filesize, loffset,
  777                                              doffset, blksize,
  778                                              nblks, rbp);
  779                 } else {
  780                         rbp->b_bio2.bio_offset = doffset;
  781                 }
  782 
  783                 rbp->b_flags &= ~(B_ERROR|B_INVAL);
  784 
  785                 if ((rbp->b_flags & B_CLUSTER) == 0)
  786                         vfs_busy_pages(vp, rbp);
  787                 BUF_KERNPROC(rbp);
  788                 loffset += rbp->b_bufsize;
  789                 maxra -= rbp->b_bufsize / blksize;
  790                 vn_strategy(vp, &rbp->b_bio1);
  791                 /* rbp invalid now */
  792         }
  793 
  794         /*
  795          * If reqbp is non-NULL it had B_CACHE set and we issue the
  796          * function callback synchronously.
  797          *
  798          * Note that we may start additional asynchronous I/O before doing
  799          * the func() callback for the B_CACHE case
  800          */
  801 no_read_ahead:
  802         if (reqbp)
  803                 func(&reqbp->b_bio1);
  804 }
  805 
  806 /*
  807  * If blocks are contiguous on disk, use this to provide clustered
  808  * read ahead.  We will read as many blocks as possible sequentially
  809  * and then parcel them up into logical blocks in the buffer hash table.
  810  *
  811  * This function either returns a cluster buf or it returns fbp.  fbp is
  812  * already expected to be set up as a synchronous or asynchronous request.
  813  *
  814  * If a cluster buf is returned it will always be async.
  815  */
  816 static struct buf *
  817 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
  818                int blksize, int run, struct buf *fbp)
  819 {
  820         struct buf *bp, *tbp;
  821         off_t boffset;
  822         int i, j;
  823         int maxiosize = vmaxiosize(vp);
  824 
  825         /*
  826          * avoid a division
  827          */
  828         while (loffset + run * blksize > filesize) {
  829                 --run;
  830         }
  831 
  832         tbp = fbp;
  833         tbp->b_bio2.bio_offset = doffset;
  834         if((tbp->b_flags & B_MALLOC) ||
  835             ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
  836                 return tbp;
  837         }
  838 
  839         bp = trypbuf_kva(&cluster_pbuf_freecnt);
  840         if (bp == NULL) {
  841                 return tbp;
  842         }
  843 
  844         /*
  845          * We are synthesizing a buffer out of vm_page_t's, but
  846          * if the block size is not page aligned then the starting
  847          * address may not be either.  Inherit the b_data offset
  848          * from the original buffer.
  849          */
  850         bp->b_data = (char *)((vm_offset_t)bp->b_data |
  851             ((vm_offset_t)tbp->b_data & PAGE_MASK));
  852         bp->b_flags |= B_CLUSTER | B_VMIO;
  853         bp->b_cmd = BUF_CMD_READ;
  854         bp->b_bio1.bio_done = cluster_callback;         /* default to async */
  855         bp->b_bio1.bio_caller_info1.cluster_head = NULL;
  856         bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
  857         bp->b_loffset = loffset;
  858         bp->b_bio2.bio_offset = doffset;
  859         KASSERT(bp->b_loffset != NOOFFSET,
  860                 ("cluster_rbuild: no buffer offset"));
  861 
  862         bp->b_bcount = 0;
  863         bp->b_bufsize = 0;
  864         bp->b_xio.xio_npages = 0;
  865 
  866         for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
  867                 if (i) {
  868                         if ((bp->b_xio.xio_npages * PAGE_SIZE) +
  869                             round_page(blksize) > maxiosize) {
  870                                 break;
  871                         }
  872 
  873                         /*
  874                          * Shortcut some checks and try to avoid buffers that
  875                          * would block in the lock.  The same checks have to
  876                          * be made again after we officially get the buffer.
  877                          */
  878                         tbp = getblk(vp, loffset + i * blksize, blksize,
  879                                      GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
  880                         if (tbp == NULL)
  881                                 break;
  882                         for (j = 0; j < tbp->b_xio.xio_npages; j++) {
  883                                 if (tbp->b_xio.xio_pages[j]->valid)
  884                                         break;
  885                         }
  886                         if (j != tbp->b_xio.xio_npages) {
  887                                 bqrelse(tbp);
  888                                 break;
  889                         }
  890 
  891                         /*
  892                          * Stop scanning if the buffer is fuly valid 
  893                          * (marked B_CACHE), or locked (may be doing a
  894                          * background write), or if the buffer is not
  895                          * VMIO backed.  The clustering code can only deal
  896                          * with VMIO-backed buffers.
  897                          */
  898                         if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
  899                             (tbp->b_flags & B_VMIO) == 0 ||
  900                             (LIST_FIRST(&tbp->b_dep) != NULL &&
  901                              buf_checkread(tbp))
  902                         ) {
  903                                 bqrelse(tbp);
  904                                 break;
  905                         }
  906 
  907                         /*
  908                          * The buffer must be completely invalid in order to
  909                          * take part in the cluster.  If it is partially valid
  910                          * then we stop.
  911                          */
  912                         for (j = 0;j < tbp->b_xio.xio_npages; j++) {
  913                                 if (tbp->b_xio.xio_pages[j]->valid)
  914                                         break;
  915                         }
  916                         if (j != tbp->b_xio.xio_npages) {
  917                                 bqrelse(tbp);
  918                                 break;
  919                         }
  920 
  921                         /*
  922                          * Set a read-ahead mark as appropriate.  Always
  923                          * set the read-ahead mark at (run - 1).  It is
  924                          * unclear why we were also setting it at i == 1.
  925                          */
  926                         if (/*i == 1 ||*/ i == (run - 1))
  927                                 cluster_setram(tbp);
  928 
  929                         /*
  930                          * Depress the priority of buffers not explicitly
  931                          * requested.
  932                          */
  933                         /* tbp->b_flags |= B_AGE; */
  934 
  935                         /*
  936                          * Set the block number if it isn't set, otherwise
  937                          * if it is make sure it matches the block number we
  938                          * expect.
  939                          */
  940                         if (tbp->b_bio2.bio_offset == NOOFFSET) {
  941                                 tbp->b_bio2.bio_offset = boffset;
  942                         } else if (tbp->b_bio2.bio_offset != boffset) {
  943                                 brelse(tbp);
  944                                 break;
  945                         }
  946                 }
  947 
  948                 /*
  949                  * The passed-in tbp (i == 0) will already be set up for
  950                  * async or sync operation.  All other tbp's acquire in
  951                  * our loop are set up for async operation.
  952                  */
  953                 tbp->b_cmd = BUF_CMD_READ;
  954                 BUF_KERNPROC(tbp);
  955                 cluster_append(&bp->b_bio1, tbp);
  956                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
  957                         vm_page_t m;
  958 
  959                         m = tbp->b_xio.xio_pages[j];
  960                         vm_page_busy_wait(m, FALSE, "clurpg");
  961                         vm_page_io_start(m);
  962                         vm_page_wakeup(m);
  963                         vm_object_pip_add(m->object, 1);
  964                         if ((bp->b_xio.xio_npages == 0) ||
  965                                 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
  966                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
  967                                 bp->b_xio.xio_npages++;
  968                         }
  969                         if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
  970                                 tbp->b_xio.xio_pages[j] = bogus_page;
  971                 }
  972                 /*
  973                  * XXX shouldn't this be += size for both, like in 
  974                  * cluster_wbuild()?
  975                  *
  976                  * Don't inherit tbp->b_bufsize as it may be larger due to
  977                  * a non-page-aligned size.  Instead just aggregate using
  978                  * 'size'.
  979                  */
  980                 if (tbp->b_bcount != blksize)
  981                     kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
  982                 if (tbp->b_bufsize != blksize)
  983                     kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
  984                 bp->b_bcount += blksize;
  985                 bp->b_bufsize += blksize;
  986         }
  987 
  988         /*
  989          * Fully valid pages in the cluster are already good and do not need
  990          * to be re-read from disk.  Replace the page with bogus_page
  991          */
  992         for (j = 0; j < bp->b_xio.xio_npages; j++) {
  993                 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
  994                     VM_PAGE_BITS_ALL) {
  995                         bp->b_xio.xio_pages[j] = bogus_page;
  996                 }
  997         }
  998         if (bp->b_bufsize > bp->b_kvasize) {
  999                 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
 1000                     bp->b_bufsize, bp->b_kvasize);
 1001         }
 1002         pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
 1003                 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
 1004         BUF_KERNPROC(bp);
 1005         return (bp);
 1006 }
 1007 
 1008 /*
 1009  * Cleanup after a clustered read or write.
 1010  * This is complicated by the fact that any of the buffers might have
 1011  * extra memory (if there were no empty buffer headers at allocbuf time)
 1012  * that we will need to shift around.
 1013  *
 1014  * The returned bio is &bp->b_bio1
 1015  */
 1016 void
 1017 cluster_callback(struct bio *bio)
 1018 {
 1019         struct buf *bp = bio->bio_buf;
 1020         struct buf *tbp;
 1021         int error = 0;
 1022 
 1023         /*
 1024          * Must propogate errors to all the components.  A short read (EOF)
 1025          * is a critical error.
 1026          */
 1027         if (bp->b_flags & B_ERROR) {
 1028                 error = bp->b_error;
 1029         } else if (bp->b_bcount != bp->b_bufsize) {
 1030                 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
 1031         }
 1032 
 1033         pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
 1034         /*
 1035          * Move memory from the large cluster buffer into the component
 1036          * buffers and mark IO as done on these.  Since the memory map
 1037          * is the same, no actual copying is required.
 1038          */
 1039         while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
 1040                 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
 1041                 if (error) {
 1042                         tbp->b_flags |= B_ERROR | B_IODEBUG;
 1043                         tbp->b_error = error;
 1044                 } else {
 1045                         tbp->b_dirtyoff = tbp->b_dirtyend = 0;
 1046                         tbp->b_flags &= ~(B_ERROR|B_INVAL);
 1047                         tbp->b_flags |= B_IODEBUG;
 1048                         /*
 1049                          * XXX the bdwrite()/bqrelse() issued during
 1050                          * cluster building clears B_RELBUF (see bqrelse()
 1051                          * comment).  If direct I/O was specified, we have
 1052                          * to restore it here to allow the buffer and VM
 1053                          * to be freed.
 1054                          */
 1055                         if (tbp->b_flags & B_DIRECT)
 1056                                 tbp->b_flags |= B_RELBUF;
 1057                 }
 1058                 biodone(&tbp->b_bio1);
 1059         }
 1060         relpbuf(bp, &cluster_pbuf_freecnt);
 1061 }
 1062 
 1063 /*
 1064  * Implement modified write build for cluster.
 1065  *
 1066  *      write_behind = 0        write behind disabled
 1067  *      write_behind = 1        write behind normal (default)
 1068  *      write_behind = 2        write behind backed-off
 1069  *
 1070  * In addition, write_behind is only activated for files that have
 1071  * grown past a certain size (default 10MB).  Otherwise temporary files
 1072  * wind up generating a lot of unnecessary disk I/O.
 1073  */
 1074 static __inline int
 1075 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
 1076 {
 1077         int r = 0;
 1078 
 1079         switch(write_behind) {
 1080         case 2:
 1081                 if (start_loffset < len)
 1082                         break;
 1083                 start_loffset -= len;
 1084                 /* fall through */
 1085         case 1:
 1086                 if (vp->v_filesize >= write_behind_minfilesize) {
 1087                         r = cluster_wbuild(vp, NULL, blksize,
 1088                                            start_loffset, len);
 1089                 }
 1090                 /* fall through */
 1091         default:
 1092                 /* fall through */
 1093                 break;
 1094         }
 1095         return(r);
 1096 }
 1097 
 1098 /*
 1099  * Do clustered write for FFS.
 1100  *
 1101  * Three cases:
 1102  *      1. Write is not sequential (write asynchronously)
 1103  *      Write is sequential:
 1104  *      2.      beginning of cluster - begin cluster
 1105  *      3.      middle of a cluster - add to cluster
 1106  *      4.      end of a cluster - asynchronously write cluster
 1107  *
 1108  * WARNING! vnode fields are not locked and must ONLY be used heuristically.
 1109  */
 1110 void
 1111 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
 1112 {
 1113         struct vnode *vp;
 1114         off_t loffset;
 1115         int maxclen, cursize;
 1116         int async;
 1117         cluster_cache_t dummy;
 1118         cluster_cache_t *cc;
 1119 
 1120         vp = bp->b_vp;
 1121         if (vp->v_type == VREG)
 1122                 async = vp->v_mount->mnt_flag & MNT_ASYNC;
 1123         else
 1124                 async = 0;
 1125         loffset = bp->b_loffset;
 1126         KASSERT(bp->b_loffset != NOOFFSET, 
 1127                 ("cluster_write: no buffer offset"));
 1128 
 1129         cc = cluster_getcache(&dummy, vp, loffset);
 1130 
 1131         /*
 1132          * Initialize vnode to beginning of file.
 1133          */
 1134         if (loffset == 0)
 1135                 cc->v_lasta = cc->v_clen = cc->v_cstart = cc->v_lastw = 0;
 1136 
 1137         if (cc->v_clen == 0 || loffset != cc->v_lastw + blksize ||
 1138             bp->b_bio2.bio_offset == NOOFFSET ||
 1139             (bp->b_bio2.bio_offset != cc->v_lasta + blksize)) {
 1140                 maxclen = vmaxiosize(vp);
 1141                 if (cc->v_clen != 0) {
 1142                         /*
 1143                          * Next block is not sequential.
 1144                          *
 1145                          * If we are not writing at end of file, the process
 1146                          * seeked to another point in the file since its last
 1147                          * write, or we have reached our maximum cluster size,
 1148                          * then push the previous cluster. Otherwise try
 1149                          * reallocating to make it sequential.
 1150                          *
 1151                          * Change to algorithm: only push previous cluster if
 1152                          * it was sequential from the point of view of the
 1153                          * seqcount heuristic, otherwise leave the buffer 
 1154                          * intact so we can potentially optimize the I/O
 1155                          * later on in the buf_daemon or update daemon
 1156                          * flush.
 1157                          */
 1158                         cursize = cc->v_lastw - cc->v_cstart + blksize;
 1159                         if (bp->b_loffset + blksize < filesize ||
 1160                             loffset != cc->v_lastw + blksize ||
 1161                             cc->v_clen <= cursize) {
 1162                                 if (!async && seqcount > 0) {
 1163                                         cluster_wbuild_wb(vp, blksize,
 1164                                                 cc->v_cstart, cursize);
 1165                                 }
 1166                         } else {
 1167                                 struct buf **bpp, **endbp;
 1168                                 struct cluster_save *buflist;
 1169 
 1170                                 buflist = cluster_collectbufs(cc, vp,
 1171                                                               bp, blksize);
 1172                                 endbp = &buflist->bs_children
 1173                                     [buflist->bs_nchildren - 1];
 1174                                 if (VOP_REALLOCBLKS(vp, buflist)) {
 1175                                         /*
 1176                                          * Failed, push the previous cluster
 1177                                          * if *really* writing sequentially
 1178                                          * in the logical file (seqcount > 1),
 1179                                          * otherwise delay it in the hopes that
 1180                                          * the low level disk driver can
 1181                                          * optimize the write ordering.
 1182                                          *
 1183                                          * NOTE: We do not brelse the last
 1184                                          *       element which is bp, and we
 1185                                          *       do not return here.
 1186                                          */
 1187                                         for (bpp = buflist->bs_children;
 1188                                              bpp < endbp; bpp++)
 1189                                                 brelse(*bpp);
 1190                                         kfree(buflist, M_SEGMENT);
 1191                                         if (seqcount > 1) {
 1192                                                 cluster_wbuild_wb(vp, 
 1193                                                     blksize, cc->v_cstart,
 1194                                                     cursize);
 1195                                         }
 1196                                 } else {
 1197                                         /*
 1198                                          * Succeeded, keep building cluster.
 1199                                          */
 1200                                         for (bpp = buflist->bs_children;
 1201                                              bpp <= endbp; bpp++)
 1202                                                 bdwrite(*bpp);
 1203                                         kfree(buflist, M_SEGMENT);
 1204                                         cc->v_lastw = loffset;
 1205                                         cc->v_lasta = bp->b_bio2.bio_offset;
 1206                                         cluster_putcache(cc);
 1207                                         return;
 1208                                 }
 1209                         }
 1210                 }
 1211                 /*
 1212                  * Consider beginning a cluster. If at end of file, make
 1213                  * cluster as large as possible, otherwise find size of
 1214                  * existing cluster.
 1215                  */
 1216                 if ((vp->v_type == VREG) &&
 1217                     bp->b_loffset + blksize < filesize &&
 1218                     (bp->b_bio2.bio_offset == NOOFFSET) &&
 1219                     (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
 1220                      bp->b_bio2.bio_offset == NOOFFSET)) {
 1221                         bdwrite(bp);
 1222                         cc->v_clen = 0;
 1223                         cc->v_lasta = bp->b_bio2.bio_offset;
 1224                         cc->v_cstart = loffset + blksize;
 1225                         cc->v_lastw = loffset;
 1226                         cluster_putcache(cc);
 1227                         return;
 1228                 }
 1229                 if (maxclen > blksize)
 1230                         cc->v_clen = maxclen - blksize;
 1231                 else
 1232                         cc->v_clen = 0;
 1233                 if (!async && cc->v_clen == 0) { /* I/O not contiguous */
 1234                         cc->v_cstart = loffset + blksize;
 1235                         bdwrite(bp);
 1236                 } else {        /* Wait for rest of cluster */
 1237                         cc->v_cstart = loffset;
 1238                         bdwrite(bp);
 1239                 }
 1240         } else if (loffset == cc->v_cstart + cc->v_clen) {
 1241                 /*
 1242                  * At end of cluster, write it out if seqcount tells us we
 1243                  * are operating sequentially, otherwise let the buf or
 1244                  * update daemon handle it.
 1245                  */
 1246                 bdwrite(bp);
 1247                 if (seqcount > 1)
 1248                         cluster_wbuild_wb(vp, blksize, cc->v_cstart,
 1249                                           cc->v_clen + blksize);
 1250                 cc->v_clen = 0;
 1251                 cc->v_cstart = loffset + blksize;
 1252         } else if (vm_page_count_severe() &&
 1253                    bp->b_loffset + blksize < filesize) {
 1254                 /*
 1255                  * We are low on memory, get it going NOW.  However, do not
 1256                  * try to push out a partial block at the end of the file
 1257                  * as this could lead to extremely non-optimal write activity.
 1258                  */
 1259                 bawrite(bp);
 1260         } else {
 1261                 /*
 1262                  * In the middle of a cluster, so just delay the I/O for now.
 1263                  */
 1264                 bdwrite(bp);
 1265         }
 1266         cc->v_lastw = loffset;
 1267         cc->v_lasta = bp->b_bio2.bio_offset;
 1268         cluster_putcache(cc);
 1269 }
 1270 
 1271 /*
 1272  * This is the clustered version of bawrite().  It works similarly to
 1273  * cluster_write() except I/O on the buffer is guaranteed to occur.
 1274  */
 1275 int
 1276 cluster_awrite(struct buf *bp)
 1277 {
 1278         int total;
 1279 
 1280         /*
 1281          * Don't bother if it isn't clusterable.
 1282          */
 1283         if ((bp->b_flags & B_CLUSTEROK) == 0 ||
 1284             bp->b_vp == NULL ||
 1285             (bp->b_vp->v_flag & VOBJBUF) == 0) {
 1286                 total = bp->b_bufsize;
 1287                 bawrite(bp);
 1288                 return (total);
 1289         }
 1290 
 1291         total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
 1292                                bp->b_loffset, vmaxiosize(bp->b_vp));
 1293         if (bp)
 1294                 bawrite(bp);
 1295 
 1296         return total;
 1297 }
 1298 
 1299 /*
 1300  * This is an awful lot like cluster_rbuild...wish they could be combined.
 1301  * The last lbn argument is the current block on which I/O is being
 1302  * performed.  Check to see that it doesn't fall in the middle of
 1303  * the current block (if last_bp == NULL).
 1304  *
 1305  * cluster_wbuild() normally does not guarantee anything.  If bpp is
 1306  * non-NULL and cluster_wbuild() is able to incorporate it into the
 1307  * I/O it will set *bpp to NULL, otherwise it will leave it alone and
 1308  * the caller must dispose of *bpp.
 1309  */
 1310 static int
 1311 cluster_wbuild(struct vnode *vp, struct buf **bpp,
 1312                int blksize, off_t start_loffset, int bytes)
 1313 {
 1314         struct buf *bp, *tbp;
 1315         int i, j;
 1316         int totalwritten = 0;
 1317         int must_initiate;
 1318         int maxiosize = vmaxiosize(vp);
 1319 
 1320         while (bytes > 0) {
 1321                 /*
 1322                  * If the buffer matches the passed locked & removed buffer
 1323                  * we used the passed buffer (which might not be B_DELWRI).
 1324                  *
 1325                  * Otherwise locate the buffer and determine if it is
 1326                  * compatible.
 1327                  */
 1328                 if (bpp && (*bpp)->b_loffset == start_loffset) {
 1329                         tbp = *bpp;
 1330                         *bpp = NULL;
 1331                         bpp = NULL;
 1332                 } else {
 1333                         tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
 1334                         if (tbp == NULL ||
 1335                             (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
 1336                              B_DELWRI ||
 1337                             (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
 1338                                 if (tbp)
 1339                                         BUF_UNLOCK(tbp);
 1340                                 start_loffset += blksize;
 1341                                 bytes -= blksize;
 1342                                 continue;
 1343                         }
 1344                         bremfree(tbp);
 1345                 }
 1346                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
 1347 
 1348                 /*
 1349                  * Extra memory in the buffer, punt on this buffer.
 1350                  * XXX we could handle this in most cases, but we would
 1351                  * have to push the extra memory down to after our max
 1352                  * possible cluster size and then potentially pull it back
 1353                  * up if the cluster was terminated prematurely--too much
 1354                  * hassle.
 1355                  */
 1356                 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
 1357                     (tbp->b_bcount != tbp->b_bufsize) ||
 1358                     (tbp->b_bcount != blksize) ||
 1359                     (bytes == blksize) ||
 1360                     ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
 1361                         totalwritten += tbp->b_bufsize;
 1362                         bawrite(tbp);
 1363                         start_loffset += blksize;
 1364                         bytes -= blksize;
 1365                         continue;
 1366                 }
 1367 
 1368                 /*
 1369                  * Set up the pbuf.  Track our append point with b_bcount
 1370                  * and b_bufsize.  b_bufsize is not used by the device but
 1371                  * our caller uses it to loop clusters and we use it to
 1372                  * detect a premature EOF on the block device.
 1373                  */
 1374                 bp->b_bcount = 0;
 1375                 bp->b_bufsize = 0;
 1376                 bp->b_xio.xio_npages = 0;
 1377                 bp->b_loffset = tbp->b_loffset;
 1378                 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
 1379 
 1380                 /*
 1381                  * We are synthesizing a buffer out of vm_page_t's, but
 1382                  * if the block size is not page aligned then the starting
 1383                  * address may not be either.  Inherit the b_data offset
 1384                  * from the original buffer.
 1385                  */
 1386                 bp->b_data = (char *)((vm_offset_t)bp->b_data |
 1387                     ((vm_offset_t)tbp->b_data & PAGE_MASK));
 1388                 bp->b_flags &= ~B_ERROR;
 1389                 bp->b_flags |= B_CLUSTER | B_BNOCLIP |
 1390                         (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
 1391                 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
 1392                 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
 1393 
 1394                 /*
 1395                  * From this location in the file, scan forward to see
 1396                  * if there are buffers with adjacent data that need to
 1397                  * be written as well.
 1398                  *
 1399                  * IO *must* be initiated on index 0 at this point
 1400                  * (particularly when called from cluster_awrite()).
 1401                  */
 1402                 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
 1403                         if (i == 0) {
 1404                                 must_initiate = 1;
 1405                         } else {
 1406                                 /*
 1407                                  * Not first buffer.
 1408                                  */
 1409                                 must_initiate = 0;
 1410                                 tbp = findblk(vp, start_loffset,
 1411                                               FINDBLK_NBLOCK);
 1412                                 /*
 1413                                  * Buffer not found or could not be locked
 1414                                  * non-blocking.
 1415                                  */
 1416                                 if (tbp == NULL)
 1417                                         break;
 1418 
 1419                                 /*
 1420                                  * If it IS in core, but has different
 1421                                  * characteristics, then don't cluster
 1422                                  * with it.
 1423                                  */
 1424                                 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
 1425                                      B_INVAL | B_DELWRI | B_NEEDCOMMIT))
 1426                                     != (B_DELWRI | B_CLUSTEROK |
 1427                                      (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
 1428                                     (tbp->b_flags & B_LOCKED)
 1429                                 ) {
 1430                                         BUF_UNLOCK(tbp);
 1431                                         break;
 1432                                 }
 1433 
 1434                                 /*
 1435                                  * Check that the combined cluster
 1436                                  * would make sense with regard to pages
 1437                                  * and would not be too large
 1438                                  *
 1439                                  * WARNING! buf_checkwrite() must be the last
 1440                                  *          check made.  If it returns 0 then
 1441                                  *          we must initiate the I/O.
 1442                                  */
 1443                                 if ((tbp->b_bcount != blksize) ||
 1444                                   ((bp->b_bio2.bio_offset + i) !=
 1445                                     tbp->b_bio2.bio_offset) ||
 1446                                   ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
 1447                                     (maxiosize / PAGE_SIZE)) ||
 1448                                   (LIST_FIRST(&tbp->b_dep) &&
 1449                                    buf_checkwrite(tbp))
 1450                                 ) {
 1451                                         BUF_UNLOCK(tbp);
 1452                                         break;
 1453                                 }
 1454                                 if (LIST_FIRST(&tbp->b_dep))
 1455                                         must_initiate = 1;
 1456                                 /*
 1457                                  * Ok, it's passed all the tests,
 1458                                  * so remove it from the free list
 1459                                  * and mark it busy. We will use it.
 1460                                  */
 1461                                 bremfree(tbp);
 1462                                 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
 1463                         }
 1464 
 1465                         /*
 1466                          * If the IO is via the VM then we do some
 1467                          * special VM hackery (yuck).  Since the buffer's
 1468                          * block size may not be page-aligned it is possible
 1469                          * for a page to be shared between two buffers.  We
 1470                          * have to get rid of the duplication when building
 1471                          * the cluster.
 1472                          */
 1473                         if (tbp->b_flags & B_VMIO) {
 1474                                 vm_page_t m;
 1475 
 1476                                 /*
 1477                                  * Try to avoid deadlocks with the VM system.
 1478                                  * However, we cannot abort the I/O if
 1479                                  * must_initiate is non-zero.
 1480                                  */
 1481                                 if (must_initiate == 0) {
 1482                                         for (j = 0;
 1483                                              j < tbp->b_xio.xio_npages;
 1484                                              ++j) {
 1485                                                 m = tbp->b_xio.xio_pages[j];
 1486                                                 if (m->flags & PG_BUSY) {
 1487                                                         bqrelse(tbp);
 1488                                                         goto finishcluster;
 1489                                                 }
 1490                                         }
 1491                                 }
 1492                                         
 1493                                 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
 1494                                         m = tbp->b_xio.xio_pages[j];
 1495                                         vm_page_busy_wait(m, FALSE, "clurpg");
 1496                                         vm_page_io_start(m);
 1497                                         vm_page_wakeup(m);
 1498                                         vm_object_pip_add(m->object, 1);
 1499                                         if ((bp->b_xio.xio_npages == 0) ||
 1500                                           (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
 1501                                                 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
 1502                                                 bp->b_xio.xio_npages++;
 1503                                         }
 1504                                 }
 1505                         }
 1506                         bp->b_bcount += blksize;
 1507                         bp->b_bufsize += blksize;
 1508 
 1509                         bundirty(tbp);
 1510                         tbp->b_flags &= ~B_ERROR;
 1511                         tbp->b_cmd = BUF_CMD_WRITE;
 1512                         BUF_KERNPROC(tbp);
 1513                         cluster_append(&bp->b_bio1, tbp);
 1514 
 1515                         /*
 1516                          * check for latent dependencies to be handled 
 1517                          */
 1518                         if (LIST_FIRST(&tbp->b_dep) != NULL)
 1519                                 buf_start(tbp);
 1520                 }
 1521         finishcluster:
 1522                 pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
 1523                             (vm_page_t *)bp->b_xio.xio_pages,
 1524                             bp->b_xio.xio_npages);
 1525                 if (bp->b_bufsize > bp->b_kvasize) {
 1526                         panic("cluster_wbuild: b_bufsize(%d) "
 1527                               "> b_kvasize(%d)\n",
 1528                               bp->b_bufsize, bp->b_kvasize);
 1529                 }
 1530                 totalwritten += bp->b_bufsize;
 1531                 bp->b_dirtyoff = 0;
 1532                 bp->b_dirtyend = bp->b_bufsize;
 1533                 bp->b_bio1.bio_done = cluster_callback;
 1534                 bp->b_cmd = BUF_CMD_WRITE;
 1535 
 1536                 vfs_busy_pages(vp, bp);
 1537                 bsetrunningbufspace(bp, bp->b_bufsize);
 1538                 BUF_KERNPROC(bp);
 1539                 vn_strategy(vp, &bp->b_bio1);
 1540 
 1541                 bytes -= i;
 1542         }
 1543         return totalwritten;
 1544 }
 1545 
 1546 /*
 1547  * Collect together all the buffers in a cluster, plus add one
 1548  * additional buffer passed-in.
 1549  *
 1550  * Only pre-existing buffers whos block size matches blksize are collected.
 1551  * (this is primarily because HAMMER1 uses varying block sizes and we don't
 1552  * want to override its choices).
 1553  */
 1554 static struct cluster_save *
 1555 cluster_collectbufs(cluster_cache_t *cc, struct vnode *vp,
 1556                     struct buf *last_bp, int blksize)
 1557 {
 1558         struct cluster_save *buflist;
 1559         struct buf *bp;
 1560         off_t loffset;
 1561         int i, len;
 1562         int j;
 1563         int k;
 1564 
 1565         len = (int)(cc->v_lastw - cc->v_cstart + blksize) / blksize;
 1566         KKASSERT(len > 0);
 1567         buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
 1568                          M_SEGMENT, M_WAITOK);
 1569         buflist->bs_nchildren = 0;
 1570         buflist->bs_children = (struct buf **) (buflist + 1);
 1571         for (loffset = cc->v_cstart, i = 0, j = 0;
 1572              i < len;
 1573              (loffset += blksize), i++) {
 1574                 bp = getcacheblk(vp, loffset,
 1575                                  last_bp->b_bcount, GETBLK_SZMATCH);
 1576                 buflist->bs_children[i] = bp;
 1577                 if (bp == NULL) {
 1578                         j = i + 1;
 1579                 } else if (bp->b_bio2.bio_offset == NOOFFSET) {
 1580                         VOP_BMAP(bp->b_vp, bp->b_loffset,
 1581                                  &bp->b_bio2.bio_offset,
 1582                                  NULL, NULL, BUF_CMD_WRITE);
 1583                 }
 1584         }
 1585 
 1586         /*
 1587          * Get rid of gaps
 1588          */
 1589         for (k = 0; k < j; ++k) {
 1590                 if (buflist->bs_children[k]) {
 1591                         bqrelse(buflist->bs_children[k]);
 1592                         buflist->bs_children[k] = NULL;
 1593                 }
 1594         }
 1595         if (j != 0) {
 1596                 if (j != i) {
 1597                         bcopy(buflist->bs_children + j,
 1598                               buflist->bs_children + 0,
 1599                               sizeof(buflist->bs_children[0]) * (i - j));
 1600                 }
 1601                 i -= j;
 1602         }
 1603         buflist->bs_children[i] = bp = last_bp;
 1604         if (bp->b_bio2.bio_offset == NOOFFSET) {
 1605                 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
 1606                          NULL, NULL, BUF_CMD_WRITE);
 1607         }
 1608         buflist->bs_nchildren = i + 1;
 1609         return (buflist);
 1610 }
 1611 
 1612 void
 1613 cluster_append(struct bio *bio, struct buf *tbp)
 1614 {
 1615         tbp->b_cluster_next = NULL;
 1616         if (bio->bio_caller_info1.cluster_head == NULL) {
 1617                 bio->bio_caller_info1.cluster_head = tbp;
 1618                 bio->bio_caller_info2.cluster_tail = tbp;
 1619         } else {
 1620                 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
 1621                 bio->bio_caller_info2.cluster_tail = tbp;
 1622         }
 1623 }
 1624 
 1625 static
 1626 void
 1627 cluster_setram (struct buf *bp)
 1628 {
 1629         bp->b_flags |= B_RAM;
 1630         if (bp->b_xio.xio_npages)
 1631                 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
 1632 }

Cache object: 6366a3b42ebb5c291cc2e7d74b768e06


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.