The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vnode_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 University of Utah.
    3  * Copyright (c) 1991 The Regents of the University of California.
    4  * All rights reserved.
    5  * Copyright (c) 1993, 1994 John S. Dyson
    6  * Copyright (c) 1995, David Greenman
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *      from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
   41  */
   42 
   43 /*
   44  * Page to/from files (vnodes).
   45  */
   46 
   47 /*
   48  * TODO:
   49  *      Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
   50  *      greatly re-simplify the vnode_pager.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD$");
   55 
   56 #include "opt_vm.h"
   57 
   58 #include <sys/param.h>
   59 #include <sys/systm.h>
   60 #include <sys/proc.h>
   61 #include <sys/vnode.h>
   62 #include <sys/mount.h>
   63 #include <sys/bio.h>
   64 #include <sys/buf.h>
   65 #include <sys/vmmeter.h>
   66 #include <sys/limits.h>
   67 #include <sys/conf.h>
   68 #include <sys/rwlock.h>
   69 #include <sys/sf_buf.h>
   70 
   71 #include <machine/atomic.h>
   72 
   73 #include <vm/vm.h>
   74 #include <vm/vm_param.h>
   75 #include <vm/vm_object.h>
   76 #include <vm/vm_page.h>
   77 #include <vm/vm_pager.h>
   78 #include <vm/vm_map.h>
   79 #include <vm/vnode_pager.h>
   80 #include <vm/vm_extern.h>
   81 
   82 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
   83     daddr_t *rtaddress, int *run);
   84 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
   85 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
   86 static void vnode_pager_dealloc(vm_object_t);
   87 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
   88 static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
   89     int *, vop_getpages_iodone_t, void *);
   90 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
   91 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
   92 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
   93     vm_ooffset_t, struct ucred *cred);
   94 static int vnode_pager_generic_getpages_done(struct buf *);
   95 static void vnode_pager_generic_getpages_done_async(struct buf *);
   96 
   97 struct pagerops vnodepagerops = {
   98         .pgo_alloc =    vnode_pager_alloc,
   99         .pgo_dealloc =  vnode_pager_dealloc,
  100         .pgo_getpages = vnode_pager_getpages,
  101         .pgo_getpages_async = vnode_pager_getpages_async,
  102         .pgo_putpages = vnode_pager_putpages,
  103         .pgo_haspage =  vnode_pager_haspage,
  104 };
  105 
  106 int vnode_pbuf_freecnt;
  107 int vnode_async_pbuf_freecnt;
  108 
  109 /* Create the VM system backing object for this vnode */
  110 int
  111 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
  112 {
  113         vm_object_t object;
  114         vm_ooffset_t size = isize;
  115         struct vattr va;
  116 
  117         if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
  118                 return (0);
  119 
  120         while ((object = vp->v_object) != NULL) {
  121                 VM_OBJECT_WLOCK(object);
  122                 if (!(object->flags & OBJ_DEAD)) {
  123                         VM_OBJECT_WUNLOCK(object);
  124                         return (0);
  125                 }
  126                 VOP_UNLOCK(vp, 0);
  127                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  128                 VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vodead", 0);
  129                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
  130         }
  131 
  132         if (size == 0) {
  133                 if (vn_isdisk(vp, NULL)) {
  134                         size = IDX_TO_OFF(INT_MAX);
  135                 } else {
  136                         if (VOP_GETATTR(vp, &va, td->td_ucred))
  137                                 return (0);
  138                         size = va.va_size;
  139                 }
  140         }
  141 
  142         object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
  143         /*
  144          * Dereference the reference we just created.  This assumes
  145          * that the object is associated with the vp.
  146          */
  147         VM_OBJECT_WLOCK(object);
  148         object->ref_count--;
  149         VM_OBJECT_WUNLOCK(object);
  150         vrele(vp);
  151 
  152         KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
  153 
  154         return (0);
  155 }
  156 
  157 void
  158 vnode_destroy_vobject(struct vnode *vp)
  159 {
  160         struct vm_object *obj;
  161 
  162         obj = vp->v_object;
  163         if (obj == NULL)
  164                 return;
  165         ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
  166         VM_OBJECT_WLOCK(obj);
  167         umtx_shm_object_terminated(obj);
  168         if (obj->ref_count == 0) {
  169                 /*
  170                  * don't double-terminate the object
  171                  */
  172                 if ((obj->flags & OBJ_DEAD) == 0) {
  173                         vm_object_terminate(obj);
  174                 } else {
  175                         /*
  176                          * Waiters were already handled during object
  177                          * termination.  The exclusive vnode lock hopefully
  178                          * prevented new waiters from referencing the dying
  179                          * object.
  180                          */
  181                         KASSERT((obj->flags & OBJ_DISCONNECTWNT) == 0,
  182                             ("OBJ_DISCONNECTWNT set obj %p flags %x",
  183                             obj, obj->flags));
  184                         vp->v_object = NULL;
  185                         VM_OBJECT_WUNLOCK(obj);
  186                 }
  187         } else {
  188                 /*
  189                  * Woe to the process that tries to page now :-).
  190                  */
  191                 vm_pager_deallocate(obj);
  192                 VM_OBJECT_WUNLOCK(obj);
  193         }
  194         KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object));
  195 }
  196 
  197 
  198 /*
  199  * Allocate (or lookup) pager for a vnode.
  200  * Handle is a vnode pointer.
  201  *
  202  * MPSAFE
  203  */
  204 vm_object_t
  205 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  206     vm_ooffset_t offset, struct ucred *cred)
  207 {
  208         vm_object_t object;
  209         struct vnode *vp;
  210 
  211         /*
  212          * Pageout to vnode, no can do yet.
  213          */
  214         if (handle == NULL)
  215                 return (NULL);
  216 
  217         vp = (struct vnode *) handle;
  218 
  219         /*
  220          * If the object is being terminated, wait for it to
  221          * go away.
  222          */
  223 retry:
  224         while ((object = vp->v_object) != NULL) {
  225                 VM_OBJECT_WLOCK(object);
  226                 if ((object->flags & OBJ_DEAD) == 0)
  227                         break;
  228                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  229                 VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vadead", 0);
  230         }
  231 
  232         KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference"));
  233 
  234         if (object == NULL) {
  235                 /*
  236                  * Add an object of the appropriate size
  237                  */
  238                 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
  239 
  240                 object->un_pager.vnp.vnp_size = size;
  241                 object->un_pager.vnp.writemappings = 0;
  242 
  243                 object->handle = handle;
  244                 VI_LOCK(vp);
  245                 if (vp->v_object != NULL) {
  246                         /*
  247                          * Object has been created while we were sleeping
  248                          */
  249                         VI_UNLOCK(vp);
  250                         VM_OBJECT_WLOCK(object);
  251                         KASSERT(object->ref_count == 1,
  252                             ("leaked ref %p %d", object, object->ref_count));
  253                         object->type = OBJT_DEAD;
  254                         object->ref_count = 0;
  255                         VM_OBJECT_WUNLOCK(object);
  256                         vm_object_destroy(object);
  257                         goto retry;
  258                 }
  259                 vp->v_object = object;
  260                 VI_UNLOCK(vp);
  261         } else {
  262                 object->ref_count++;
  263 #if VM_NRESERVLEVEL > 0
  264                 vm_object_color(object, 0);
  265 #endif
  266                 VM_OBJECT_WUNLOCK(object);
  267         }
  268         vrefact(vp);
  269         return (object);
  270 }
  271 
  272 /*
  273  *      The object must be locked.
  274  */
  275 static void
  276 vnode_pager_dealloc(vm_object_t object)
  277 {
  278         struct vnode *vp;
  279         int refs;
  280 
  281         vp = object->handle;
  282         if (vp == NULL)
  283                 panic("vnode_pager_dealloc: pager already dealloced");
  284 
  285         VM_OBJECT_ASSERT_WLOCKED(object);
  286         vm_object_pip_wait(object, "vnpdea");
  287         refs = object->ref_count;
  288 
  289         object->handle = NULL;
  290         object->type = OBJT_DEAD;
  291         if (object->flags & OBJ_DISCONNECTWNT) {
  292                 vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
  293                 wakeup(object);
  294         }
  295         ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
  296         if (object->un_pager.vnp.writemappings > 0) {
  297                 object->un_pager.vnp.writemappings = 0;
  298                 VOP_ADD_WRITECOUNT(vp, -1);
  299                 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
  300                     __func__, vp, vp->v_writecount);
  301         }
  302         vp->v_object = NULL;
  303         VOP_UNSET_TEXT(vp);
  304         VM_OBJECT_WUNLOCK(object);
  305         while (refs-- > 0)
  306                 vunref(vp);
  307         VM_OBJECT_WLOCK(object);
  308 }
  309 
  310 static boolean_t
  311 vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
  312     int *after)
  313 {
  314         struct vnode *vp = object->handle;
  315         daddr_t bn;
  316         int err;
  317         daddr_t reqblock;
  318         int poff;
  319         int bsize;
  320         int pagesperblock, blocksperpage;
  321 
  322         VM_OBJECT_ASSERT_WLOCKED(object);
  323         /*
  324          * If no vp or vp is doomed or marked transparent to VM, we do not
  325          * have the page.
  326          */
  327         if (vp == NULL || vp->v_iflag & VI_DOOMED)
  328                 return FALSE;
  329         /*
  330          * If the offset is beyond end of file we do
  331          * not have the page.
  332          */
  333         if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
  334                 return FALSE;
  335 
  336         bsize = vp->v_mount->mnt_stat.f_iosize;
  337         pagesperblock = bsize / PAGE_SIZE;
  338         blocksperpage = 0;
  339         if (pagesperblock > 0) {
  340                 reqblock = pindex / pagesperblock;
  341         } else {
  342                 blocksperpage = (PAGE_SIZE / bsize);
  343                 reqblock = pindex * blocksperpage;
  344         }
  345         VM_OBJECT_WUNLOCK(object);
  346         err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
  347         VM_OBJECT_WLOCK(object);
  348         if (err)
  349                 return TRUE;
  350         if (bn == -1)
  351                 return FALSE;
  352         if (pagesperblock > 0) {
  353                 poff = pindex - (reqblock * pagesperblock);
  354                 if (before) {
  355                         *before *= pagesperblock;
  356                         *before += poff;
  357                 }
  358                 if (after) {
  359                         /*
  360                          * The BMAP vop can report a partial block in the
  361                          * 'after', but must not report blocks after EOF.
  362                          * Assert the latter, and truncate 'after' in case
  363                          * of the former.
  364                          */
  365                         KASSERT((reqblock + *after) * pagesperblock <
  366                             roundup2(object->size, pagesperblock),
  367                             ("%s: reqblock %jd after %d size %ju", __func__,
  368                             (intmax_t )reqblock, *after,
  369                             (uintmax_t )object->size));
  370                         *after *= pagesperblock;
  371                         *after += pagesperblock - (poff + 1);
  372                         if (pindex + *after >= object->size)
  373                                 *after = object->size - 1 - pindex;
  374                 }
  375         } else {
  376                 if (before) {
  377                         *before /= blocksperpage;
  378                 }
  379 
  380                 if (after) {
  381                         *after /= blocksperpage;
  382                 }
  383         }
  384         return TRUE;
  385 }
  386 
  387 /*
  388  * Lets the VM system know about a change in size for a file.
  389  * We adjust our own internal size and flush any cached pages in
  390  * the associated object that are affected by the size change.
  391  *
  392  * Note: this routine may be invoked as a result of a pager put
  393  * operation (possibly at object termination time), so we must be careful.
  394  */
  395 void
  396 vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize)
  397 {
  398         vm_object_t object;
  399         vm_page_t m;
  400         vm_pindex_t nobjsize;
  401 
  402         if ((object = vp->v_object) == NULL)
  403                 return;
  404 /*      ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
  405         VM_OBJECT_WLOCK(object);
  406         if (object->type == OBJT_DEAD) {
  407                 VM_OBJECT_WUNLOCK(object);
  408                 return;
  409         }
  410         KASSERT(object->type == OBJT_VNODE,
  411             ("not vnode-backed object %p", object));
  412         if (nsize == object->un_pager.vnp.vnp_size) {
  413                 /*
  414                  * Hasn't changed size
  415                  */
  416                 VM_OBJECT_WUNLOCK(object);
  417                 return;
  418         }
  419         nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
  420         if (nsize < object->un_pager.vnp.vnp_size) {
  421                 /*
  422                  * File has shrunk. Toss any cached pages beyond the new EOF.
  423                  */
  424                 if (nobjsize < object->size)
  425                         vm_object_page_remove(object, nobjsize, object->size,
  426                             0);
  427                 /*
  428                  * this gets rid of garbage at the end of a page that is now
  429                  * only partially backed by the vnode.
  430                  *
  431                  * XXX for some reason (I don't know yet), if we take a
  432                  * completely invalid page and mark it partially valid
  433                  * it can screw up NFS reads, so we don't allow the case.
  434                  */
  435                 if ((nsize & PAGE_MASK) &&
  436                     (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL &&
  437                     m->valid != 0) {
  438                         int base = (int)nsize & PAGE_MASK;
  439                         int size = PAGE_SIZE - base;
  440 
  441                         /*
  442                          * Clear out partial-page garbage in case
  443                          * the page has been mapped.
  444                          */
  445                         pmap_zero_page_area(m, base, size);
  446 
  447                         /*
  448                          * Update the valid bits to reflect the blocks that
  449                          * have been zeroed.  Some of these valid bits may
  450                          * have already been set.
  451                          */
  452                         vm_page_set_valid_range(m, base, size);
  453 
  454                         /*
  455                          * Round "base" to the next block boundary so that the
  456                          * dirty bit for a partially zeroed block is not
  457                          * cleared.
  458                          */
  459                         base = roundup2(base, DEV_BSIZE);
  460 
  461                         /*
  462                          * Clear out partial-page dirty bits.
  463                          *
  464                          * note that we do not clear out the valid
  465                          * bits.  This would prevent bogus_page
  466                          * replacement from working properly.
  467                          */
  468                         vm_page_clear_dirty(m, base, PAGE_SIZE - base);
  469                 }
  470         }
  471         object->un_pager.vnp.vnp_size = nsize;
  472         object->size = nobjsize;
  473         VM_OBJECT_WUNLOCK(object);
  474 }
  475 
  476 /*
  477  * calculate the linear (byte) disk address of specified virtual
  478  * file address
  479  */
  480 static int
  481 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
  482     int *run)
  483 {
  484         int bsize;
  485         int err;
  486         daddr_t vblock;
  487         daddr_t voffset;
  488 
  489         if (address < 0)
  490                 return -1;
  491 
  492         if (vp->v_iflag & VI_DOOMED)
  493                 return -1;
  494 
  495         bsize = vp->v_mount->mnt_stat.f_iosize;
  496         vblock = address / bsize;
  497         voffset = address % bsize;
  498 
  499         err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL);
  500         if (err == 0) {
  501                 if (*rtaddress != -1)
  502                         *rtaddress += voffset / DEV_BSIZE;
  503                 if (run) {
  504                         *run += 1;
  505                         *run *= bsize/PAGE_SIZE;
  506                         *run -= voffset/PAGE_SIZE;
  507                 }
  508         }
  509 
  510         return (err);
  511 }
  512 
  513 /*
  514  * small block filesystem vnode pager input
  515  */
  516 static int
  517 vnode_pager_input_smlfs(vm_object_t object, vm_page_t m)
  518 {
  519         struct vnode *vp;
  520         struct bufobj *bo;
  521         struct buf *bp;
  522         struct sf_buf *sf;
  523         daddr_t fileaddr;
  524         vm_offset_t bsize;
  525         vm_page_bits_t bits;
  526         int error, i;
  527 
  528         error = 0;
  529         vp = object->handle;
  530         if (vp->v_iflag & VI_DOOMED)
  531                 return VM_PAGER_BAD;
  532 
  533         bsize = vp->v_mount->mnt_stat.f_iosize;
  534 
  535         VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
  536 
  537         sf = sf_buf_alloc(m, 0);
  538 
  539         for (i = 0; i < PAGE_SIZE / bsize; i++) {
  540                 vm_ooffset_t address;
  541 
  542                 bits = vm_page_bits(i * bsize, bsize);
  543                 if (m->valid & bits)
  544                         continue;
  545 
  546                 address = IDX_TO_OFF(m->pindex) + i * bsize;
  547                 if (address >= object->un_pager.vnp.vnp_size) {
  548                         fileaddr = -1;
  549                 } else {
  550                         error = vnode_pager_addr(vp, address, &fileaddr, NULL);
  551                         if (error)
  552                                 break;
  553                 }
  554                 if (fileaddr != -1) {
  555                         bp = getpbuf(&vnode_pbuf_freecnt);
  556 
  557                         /* build a minimal buffer header */
  558                         bp->b_iocmd = BIO_READ;
  559                         bp->b_iodone = bdone;
  560                         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  561                         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  562                         bp->b_rcred = crhold(curthread->td_ucred);
  563                         bp->b_wcred = crhold(curthread->td_ucred);
  564                         bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
  565                         bp->b_blkno = fileaddr;
  566                         pbgetbo(bo, bp);
  567                         bp->b_vp = vp;
  568                         bp->b_bcount = bsize;
  569                         bp->b_bufsize = bsize;
  570                         bp->b_runningbufspace = bp->b_bufsize;
  571                         atomic_add_long(&runningbufspace, bp->b_runningbufspace);
  572 
  573                         /* do the input */
  574                         bp->b_iooffset = dbtob(bp->b_blkno);
  575                         bstrategy(bp);
  576 
  577                         bwait(bp, PVM, "vnsrd");
  578 
  579                         if ((bp->b_ioflags & BIO_ERROR) != 0)
  580                                 error = EIO;
  581 
  582                         /*
  583                          * free the buffer header back to the swap buffer pool
  584                          */
  585                         bp->b_vp = NULL;
  586                         pbrelbo(bp);
  587                         relpbuf(bp, &vnode_pbuf_freecnt);
  588                         if (error)
  589                                 break;
  590                 } else
  591                         bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
  592                 KASSERT((m->dirty & bits) == 0,
  593                     ("vnode_pager_input_smlfs: page %p is dirty", m));
  594                 VM_OBJECT_WLOCK(object);
  595                 m->valid |= bits;
  596                 VM_OBJECT_WUNLOCK(object);
  597         }
  598         sf_buf_free(sf);
  599         if (error) {
  600                 return VM_PAGER_ERROR;
  601         }
  602         return VM_PAGER_OK;
  603 }
  604 
  605 /*
  606  * old style vnode pager input routine
  607  */
  608 static int
  609 vnode_pager_input_old(vm_object_t object, vm_page_t m)
  610 {
  611         struct uio auio;
  612         struct iovec aiov;
  613         int error;
  614         int size;
  615         struct sf_buf *sf;
  616         struct vnode *vp;
  617 
  618         VM_OBJECT_ASSERT_WLOCKED(object);
  619         error = 0;
  620 
  621         /*
  622          * Return failure if beyond current EOF
  623          */
  624         if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
  625                 return VM_PAGER_BAD;
  626         } else {
  627                 size = PAGE_SIZE;
  628                 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
  629                         size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
  630                 vp = object->handle;
  631                 VM_OBJECT_WUNLOCK(object);
  632 
  633                 /*
  634                  * Allocate a kernel virtual address and initialize so that
  635                  * we can use VOP_READ/WRITE routines.
  636                  */
  637                 sf = sf_buf_alloc(m, 0);
  638 
  639                 aiov.iov_base = (caddr_t)sf_buf_kva(sf);
  640                 aiov.iov_len = size;
  641                 auio.uio_iov = &aiov;
  642                 auio.uio_iovcnt = 1;
  643                 auio.uio_offset = IDX_TO_OFF(m->pindex);
  644                 auio.uio_segflg = UIO_SYSSPACE;
  645                 auio.uio_rw = UIO_READ;
  646                 auio.uio_resid = size;
  647                 auio.uio_td = curthread;
  648 
  649                 error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
  650                 if (!error) {
  651                         int count = size - auio.uio_resid;
  652 
  653                         if (count == 0)
  654                                 error = EINVAL;
  655                         else if (count != PAGE_SIZE)
  656                                 bzero((caddr_t)sf_buf_kva(sf) + count,
  657                                     PAGE_SIZE - count);
  658                 }
  659                 sf_buf_free(sf);
  660 
  661                 VM_OBJECT_WLOCK(object);
  662         }
  663         KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
  664         if (!error)
  665                 m->valid = VM_PAGE_BITS_ALL;
  666         return error ? VM_PAGER_ERROR : VM_PAGER_OK;
  667 }
  668 
  669 /*
  670  * generic vnode pager input routine
  671  */
  672 
  673 /*
  674  * Local media VFS's that do not implement their own VOP_GETPAGES
  675  * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
  676  * to implement the previous behaviour.
  677  *
  678  * All other FS's should use the bypass to get to the local media
  679  * backing vp's VOP_GETPAGES.
  680  */
  681 static int
  682 vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
  683     int *rahead)
  684 {
  685         struct vnode *vp;
  686         int rtval;
  687 
  688         vp = object->handle;
  689         VM_OBJECT_WUNLOCK(object);
  690         rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead);
  691         KASSERT(rtval != EOPNOTSUPP,
  692             ("vnode_pager: FS getpages not implemented\n"));
  693         VM_OBJECT_WLOCK(object);
  694         return rtval;
  695 }
  696 
  697 static int
  698 vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
  699     int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg)
  700 {
  701         struct vnode *vp;
  702         int rtval;
  703 
  704         vp = object->handle;
  705         VM_OBJECT_WUNLOCK(object);
  706         rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg);
  707         KASSERT(rtval != EOPNOTSUPP,
  708             ("vnode_pager: FS getpages_async not implemented\n"));
  709         VM_OBJECT_WLOCK(object);
  710         return (rtval);
  711 }
  712 
  713 /*
  714  * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for
  715  * local filesystems, where partially valid pages can only occur at
  716  * the end of file.
  717  */
  718 int
  719 vnode_pager_local_getpages(struct vop_getpages_args *ap)
  720 {
  721 
  722         return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
  723             ap->a_rbehind, ap->a_rahead, NULL, NULL));
  724 }
  725 
  726 int
  727 vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap)
  728 {
  729 
  730         return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
  731             ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg));
  732 }
  733 
  734 /*
  735  * This is now called from local media FS's to operate against their
  736  * own vnodes if they fail to implement VOP_GETPAGES.
  737  */
  738 int
  739 vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
  740     int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg)
  741 {
  742         vm_object_t object;
  743         struct bufobj *bo;
  744         struct buf *bp;
  745         off_t foff;
  746         int bsize, pagesperblock, *freecnt;
  747         int error, before, after, rbehind, rahead, poff, i;
  748         int bytecount, secmask;
  749 
  750         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
  751             ("%s does not support devices", __func__));
  752 
  753         if (vp->v_iflag & VI_DOOMED)
  754                 return (VM_PAGER_BAD);
  755 
  756         object = vp->v_object;
  757         foff = IDX_TO_OFF(m[0]->pindex);
  758         bsize = vp->v_mount->mnt_stat.f_iosize;
  759         pagesperblock = bsize / PAGE_SIZE;
  760 
  761         KASSERT(foff < object->un_pager.vnp.vnp_size,
  762             ("%s: page %p offset beyond vp %p size", __func__, m[0], vp));
  763         KASSERT(count <= sizeof(bp->b_pages),
  764             ("%s: requested %d pages", __func__, count));
  765 
  766         /*
  767          * The last page has valid blocks.  Invalid part can only
  768          * exist at the end of file, and the page is made fully valid
  769          * by zeroing in vm_pager_get_pages().
  770          */
  771         if (m[count - 1]->valid != 0 && --count == 0) {
  772                 if (iodone != NULL)
  773                         iodone(arg, m, 1, 0);
  774                 return (VM_PAGER_OK);
  775         }
  776 
  777         /*
  778          * Synchronous and asynchronous paging operations use different
  779          * free pbuf counters.  This is done to avoid asynchronous requests
  780          * to consume all pbufs.
  781          * Allocate the pbuf at the very beginning of the function, so that
  782          * if we are low on certain kind of pbufs don't even proceed to BMAP,
  783          * but sleep.
  784          */
  785         freecnt = iodone != NULL ?
  786             &vnode_async_pbuf_freecnt : &vnode_pbuf_freecnt;
  787         bp = getpbuf(freecnt);
  788 
  789         /*
  790          * Get the underlying device blocks for the file with VOP_BMAP().
  791          * If the file system doesn't support VOP_BMAP, use old way of
  792          * getting pages via VOP_READ.
  793          */
  794         error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before);
  795         if (error == EOPNOTSUPP) {
  796                 relpbuf(bp, freecnt);
  797                 VM_OBJECT_WLOCK(object);
  798                 for (i = 0; i < count; i++) {
  799                         PCPU_INC(cnt.v_vnodein);
  800                         PCPU_INC(cnt.v_vnodepgsin);
  801                         error = vnode_pager_input_old(object, m[i]);
  802                         if (error)
  803                                 break;
  804                 }
  805                 VM_OBJECT_WUNLOCK(object);
  806                 return (error);
  807         } else if (error != 0) {
  808                 relpbuf(bp, freecnt);
  809                 return (VM_PAGER_ERROR);
  810         }
  811 
  812         /*
  813          * If the file system supports BMAP, but blocksize is smaller
  814          * than a page size, then use special small filesystem code.
  815          */
  816         if (pagesperblock == 0) {
  817                 relpbuf(bp, freecnt);
  818                 for (i = 0; i < count; i++) {
  819                         PCPU_INC(cnt.v_vnodein);
  820                         PCPU_INC(cnt.v_vnodepgsin);
  821                         error = vnode_pager_input_smlfs(object, m[i]);
  822                         if (error)
  823                                 break;
  824                 }
  825                 return (error);
  826         }
  827 
  828         /*
  829          * A sparse file can be encountered only for a single page request,
  830          * which may not be preceded by call to vm_pager_haspage().
  831          */
  832         if (bp->b_blkno == -1) {
  833                 KASSERT(count == 1,
  834                     ("%s: array[%d] request to a sparse file %p", __func__,
  835                     count, vp));
  836                 relpbuf(bp, freecnt);
  837                 pmap_zero_page(m[0]);
  838                 KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty",
  839                     __func__, m[0]));
  840                 VM_OBJECT_WLOCK(object);
  841                 m[0]->valid = VM_PAGE_BITS_ALL;
  842                 VM_OBJECT_WUNLOCK(object);
  843                 return (VM_PAGER_OK);
  844         }
  845 
  846         bp->b_blkno += (foff % bsize) / DEV_BSIZE;
  847 
  848         /* Recalculate blocks available after/before to pages. */
  849         poff = (foff % bsize) / PAGE_SIZE;
  850         before *= pagesperblock;
  851         before += poff;
  852         after *= pagesperblock;
  853         after += pagesperblock - (poff + 1);
  854         if (m[0]->pindex + after >= object->size)
  855                 after = object->size - 1 - m[0]->pindex;
  856         KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d",
  857             __func__, count, after + 1));
  858         after -= count - 1;
  859 
  860         /* Trim requested rbehind/rahead to possible values. */   
  861         rbehind = a_rbehind ? *a_rbehind : 0;
  862         rahead = a_rahead ? *a_rahead : 0;
  863         rbehind = min(rbehind, before);
  864         rbehind = min(rbehind, m[0]->pindex);
  865         rahead = min(rahead, after);
  866         rahead = min(rahead, object->size - m[count - 1]->pindex);
  867         KASSERT(rbehind + rahead + count <= sizeof(bp->b_pages),
  868             ("%s: behind %d ahead %d count %d", __func__,
  869             rbehind, rahead, count));
  870 
  871         /*
  872          * Fill in the bp->b_pages[] array with requested and optional   
  873          * read behind or read ahead pages.  Read behind pages are looked
  874          * up in a backward direction, down to a first cached page.  Same
  875          * for read ahead pages, but there is no need to shift the array
  876          * in case of encountering a cached page.
  877          */
  878         i = bp->b_npages = 0;
  879         if (rbehind) {
  880                 vm_pindex_t startpindex, tpindex;
  881                 vm_page_t p;
  882 
  883                 VM_OBJECT_WLOCK(object);
  884                 startpindex = m[0]->pindex - rbehind;
  885                 if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL &&
  886                     p->pindex >= startpindex)
  887                         startpindex = p->pindex + 1;
  888 
  889                 /* tpindex is unsigned; beware of numeric underflow. */
  890                 for (tpindex = m[0]->pindex - 1;
  891                     tpindex >= startpindex && tpindex < m[0]->pindex;
  892                     tpindex--, i++) {
  893                         p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
  894                         if (p == NULL) {
  895                                 /* Shift the array. */
  896                                 for (int j = 0; j < i; j++)
  897                                         bp->b_pages[j] = bp->b_pages[j + 
  898                                             tpindex + 1 - startpindex]; 
  899                                 break;
  900                         }
  901                         bp->b_pages[tpindex - startpindex] = p;
  902                 }
  903 
  904                 bp->b_pgbefore = i;
  905                 bp->b_npages += i;
  906                 bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE;
  907         } else
  908                 bp->b_pgbefore = 0;
  909 
  910         /* Requested pages. */
  911         for (int j = 0; j < count; j++, i++)
  912                 bp->b_pages[i] = m[j];
  913         bp->b_npages += count;
  914 
  915         if (rahead) {
  916                 vm_pindex_t endpindex, tpindex;
  917                 vm_page_t p;
  918 
  919                 if (!VM_OBJECT_WOWNED(object))
  920                         VM_OBJECT_WLOCK(object);
  921                 endpindex = m[count - 1]->pindex + rahead + 1;
  922                 if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL &&
  923                     p->pindex < endpindex)
  924                         endpindex = p->pindex;
  925                 if (endpindex > object->size)
  926                         endpindex = object->size;
  927 
  928                 for (tpindex = m[count - 1]->pindex + 1;
  929                     tpindex < endpindex; i++, tpindex++) {
  930                         p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
  931                         if (p == NULL)
  932                                 break;
  933                         bp->b_pages[i] = p;
  934                 }
  935 
  936                 bp->b_pgafter = i - bp->b_npages;
  937                 bp->b_npages = i;
  938         } else
  939                 bp->b_pgafter = 0;
  940 
  941         if (VM_OBJECT_WOWNED(object))
  942                 VM_OBJECT_WUNLOCK(object);
  943 
  944         /* Report back actual behind/ahead read. */
  945         if (a_rbehind)
  946                 *a_rbehind = bp->b_pgbefore;
  947         if (a_rahead)
  948                 *a_rahead = bp->b_pgafter;
  949 
  950         KASSERT(bp->b_npages <= sizeof(bp->b_pages),
  951             ("%s: buf %p overflowed", __func__, bp));
  952 
  953         /*
  954          * Recalculate first offset and bytecount with regards to read behind.
  955          * Truncate bytecount to vnode real size and round up physical size
  956          * for real devices.
  957          */
  958         foff = IDX_TO_OFF(bp->b_pages[0]->pindex);
  959         bytecount = bp->b_npages << PAGE_SHIFT;
  960         if ((foff + bytecount) > object->un_pager.vnp.vnp_size)
  961                 bytecount = object->un_pager.vnp.vnp_size - foff;
  962         secmask = bo->bo_bsize - 1;
  963         KASSERT(secmask < PAGE_SIZE && secmask > 0,
  964             ("%s: sector size %d too large", __func__, secmask + 1));
  965         bytecount = (bytecount + secmask) & ~secmask;
  966 
  967         /*
  968          * And map the pages to be read into the kva, if the filesystem
  969          * requires mapped buffers.
  970          */
  971         if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
  972             unmapped_buf_allowed) {
  973                 bp->b_data = unmapped_buf;
  974                 bp->b_offset = 0;
  975         } else {
  976                 bp->b_data = bp->b_kvabase;
  977                 pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
  978         }
  979 
  980         /* Build a minimal buffer header. */
  981         bp->b_iocmd = BIO_READ;
  982         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  983         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  984         bp->b_rcred = crhold(curthread->td_ucred);
  985         bp->b_wcred = crhold(curthread->td_ucred);
  986         pbgetbo(bo, bp);
  987         bp->b_vp = vp;
  988         bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount;
  989         bp->b_iooffset = dbtob(bp->b_blkno);
  990 
  991         atomic_add_long(&runningbufspace, bp->b_runningbufspace);
  992         PCPU_INC(cnt.v_vnodein);
  993         PCPU_ADD(cnt.v_vnodepgsin, bp->b_npages);
  994 
  995         if (iodone != NULL) { /* async */
  996                 bp->b_pgiodone = iodone;
  997                 bp->b_caller1 = arg;
  998                 bp->b_iodone = vnode_pager_generic_getpages_done_async;
  999                 bp->b_flags |= B_ASYNC;
 1000                 BUF_KERNPROC(bp);
 1001                 bstrategy(bp);
 1002                 return (VM_PAGER_OK);
 1003         } else {
 1004                 bp->b_iodone = bdone;
 1005                 bstrategy(bp);
 1006                 bwait(bp, PVM, "vnread");
 1007                 error = vnode_pager_generic_getpages_done(bp);
 1008                 for (i = 0; i < bp->b_npages; i++)
 1009                         bp->b_pages[i] = NULL;
 1010                 bp->b_vp = NULL;
 1011                 pbrelbo(bp);
 1012                 relpbuf(bp, &vnode_pbuf_freecnt);
 1013                 return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
 1014         }
 1015 }
 1016 
 1017 static void
 1018 vnode_pager_generic_getpages_done_async(struct buf *bp)
 1019 {
 1020         int error;
 1021 
 1022         error = vnode_pager_generic_getpages_done(bp);
 1023         /* Run the iodone upon the requested range. */
 1024         bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore,
 1025             bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error);
 1026         for (int i = 0; i < bp->b_npages; i++)
 1027                 bp->b_pages[i] = NULL;
 1028         bp->b_vp = NULL;
 1029         pbrelbo(bp);
 1030         relpbuf(bp, &vnode_async_pbuf_freecnt);
 1031 }
 1032 
 1033 static int
 1034 vnode_pager_generic_getpages_done(struct buf *bp)
 1035 {
 1036         vm_object_t object;
 1037         off_t tfoff, nextoff;
 1038         int i, error;
 1039 
 1040         error = (bp->b_ioflags & BIO_ERROR) != 0 ? EIO : 0;
 1041         object = bp->b_vp->v_object;
 1042 
 1043         if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) {
 1044                 if (!buf_mapped(bp)) {
 1045                         bp->b_data = bp->b_kvabase;
 1046                         pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages,
 1047                             bp->b_npages);
 1048                 }
 1049                 bzero(bp->b_data + bp->b_bcount,
 1050                     PAGE_SIZE * bp->b_npages - bp->b_bcount);
 1051         }
 1052         if (buf_mapped(bp)) {
 1053                 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
 1054                 bp->b_data = unmapped_buf;
 1055         }
 1056 
 1057         VM_OBJECT_WLOCK(object);
 1058         for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex);
 1059             i < bp->b_npages; i++, tfoff = nextoff) {
 1060                 vm_page_t mt;
 1061 
 1062                 nextoff = tfoff + PAGE_SIZE;
 1063                 mt = bp->b_pages[i];
 1064 
 1065                 if (nextoff <= object->un_pager.vnp.vnp_size) {
 1066                         /*
 1067                          * Read filled up entire page.
 1068                          */
 1069                         mt->valid = VM_PAGE_BITS_ALL;
 1070                         KASSERT(mt->dirty == 0,
 1071                             ("%s: page %p is dirty", __func__, mt));
 1072                         KASSERT(!pmap_page_is_mapped(mt),
 1073                             ("%s: page %p is mapped", __func__, mt));
 1074                 } else {
 1075                         /*
 1076                          * Read did not fill up entire page.
 1077                          *
 1078                          * Currently we do not set the entire page valid,
 1079                          * we just try to clear the piece that we couldn't
 1080                          * read.
 1081                          */
 1082                         vm_page_set_valid_range(mt, 0,
 1083                             object->un_pager.vnp.vnp_size - tfoff);
 1084                         KASSERT((mt->dirty & vm_page_bits(0,
 1085                             object->un_pager.vnp.vnp_size - tfoff)) == 0,
 1086                             ("%s: page %p is dirty", __func__, mt));
 1087                 }
 1088 
 1089                 if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter)
 1090                         vm_page_readahead_finish(mt);
 1091         }
 1092         VM_OBJECT_WUNLOCK(object);
 1093         if (error != 0)
 1094                 printf("%s: I/O read error %d\n", __func__, error);
 1095 
 1096         return (error);
 1097 }
 1098 
 1099 /*
 1100  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
 1101  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
 1102  * vnode_pager_generic_putpages() to implement the previous behaviour.
 1103  *
 1104  * All other FS's should use the bypass to get to the local media
 1105  * backing vp's VOP_PUTPAGES.
 1106  */
 1107 static void
 1108 vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
 1109     int flags, int *rtvals)
 1110 {
 1111         int rtval;
 1112         struct vnode *vp;
 1113         int bytes = count * PAGE_SIZE;
 1114 
 1115         /*
 1116          * Force synchronous operation if we are extremely low on memory
 1117          * to prevent a low-memory deadlock.  VOP operations often need to
 1118          * allocate more memory to initiate the I/O ( i.e. do a BMAP
 1119          * operation ).  The swapper handles the case by limiting the amount
 1120          * of asynchronous I/O, but that sort of solution doesn't scale well
 1121          * for the vnode pager without a lot of work.
 1122          *
 1123          * Also, the backing vnode's iodone routine may not wake the pageout
 1124          * daemon up.  This should be probably be addressed XXX.
 1125          */
 1126 
 1127         if (vm_cnt.v_free_count < vm_cnt.v_pageout_free_min)
 1128                 flags |= VM_PAGER_PUT_SYNC;
 1129 
 1130         /*
 1131          * Call device-specific putpages function
 1132          */
 1133         vp = object->handle;
 1134         VM_OBJECT_WUNLOCK(object);
 1135         rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals);
 1136         KASSERT(rtval != EOPNOTSUPP, 
 1137             ("vnode_pager: stale FS putpages\n"));
 1138         VM_OBJECT_WLOCK(object);
 1139 }
 1140 
 1141 static int
 1142 vn_off2bidx(vm_ooffset_t offset)
 1143 {
 1144 
 1145         return ((offset & PAGE_MASK) / DEV_BSIZE);
 1146 }
 1147 
 1148 static bool
 1149 vn_dirty_blk(vm_page_t m, vm_ooffset_t offset)
 1150 {
 1151 
 1152         KASSERT(IDX_TO_OFF(m->pindex) <= offset &&
 1153             offset < IDX_TO_OFF(m->pindex + 1),
 1154             ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex,
 1155             (uintmax_t)offset));
 1156         return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0);
 1157 }
 1158 
 1159 /*
 1160  * This is now called from local media FS's to operate against their
 1161  * own vnodes if they fail to implement VOP_PUTPAGES.
 1162  *
 1163  * This is typically called indirectly via the pageout daemon and
 1164  * clustering has already typically occurred, so in general we ask the
 1165  * underlying filesystem to write the data out asynchronously rather
 1166  * then delayed.
 1167  */
 1168 int
 1169 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
 1170     int flags, int *rtvals)
 1171 {
 1172         vm_object_t object;
 1173         vm_page_t m;
 1174         vm_ooffset_t maxblksz, next_offset, poffset, prev_offset;
 1175         struct uio auio;
 1176         struct iovec aiov;
 1177         off_t prev_resid, wrsz;
 1178         int count, error, i, maxsize, ncount, pgoff, ppscheck;
 1179         bool in_hole;
 1180         static struct timeval lastfail;
 1181         static int curfail;
 1182 
 1183         object = vp->v_object;
 1184         count = bytecount / PAGE_SIZE;
 1185 
 1186         for (i = 0; i < count; i++)
 1187                 rtvals[i] = VM_PAGER_ERROR;
 1188 
 1189         if ((int64_t)ma[0]->pindex < 0) {
 1190                 printf("vnode_pager_generic_putpages: "
 1191                     "attempt to write meta-data 0x%jx(%lx)\n",
 1192                     (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty);
 1193                 rtvals[0] = VM_PAGER_BAD;
 1194                 return (VM_PAGER_BAD);
 1195         }
 1196 
 1197         maxsize = count * PAGE_SIZE;
 1198         ncount = count;
 1199 
 1200         poffset = IDX_TO_OFF(ma[0]->pindex);
 1201 
 1202         /*
 1203          * If the page-aligned write is larger then the actual file we
 1204          * have to invalidate pages occurring beyond the file EOF.  However,
 1205          * there is an edge case where a file may not be page-aligned where
 1206          * the last page is partially invalid.  In this case the filesystem
 1207          * may not properly clear the dirty bits for the entire page (which
 1208          * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
 1209          * With the page locked we are free to fix-up the dirty bits here.
 1210          *
 1211          * We do not under any circumstances truncate the valid bits, as
 1212          * this will screw up bogus page replacement.
 1213          */
 1214         VM_OBJECT_RLOCK(object);
 1215         if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
 1216                 if (!VM_OBJECT_TRYUPGRADE(object)) {
 1217                         VM_OBJECT_RUNLOCK(object);
 1218                         VM_OBJECT_WLOCK(object);
 1219                         if (maxsize + poffset <= object->un_pager.vnp.vnp_size)
 1220                                 goto downgrade;
 1221                 }
 1222                 if (object->un_pager.vnp.vnp_size > poffset) {
 1223                         maxsize = object->un_pager.vnp.vnp_size - poffset;
 1224                         ncount = btoc(maxsize);
 1225                         if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
 1226                                 pgoff = roundup2(pgoff, DEV_BSIZE);
 1227 
 1228                                 /*
 1229                                  * If the object is locked and the following
 1230                                  * conditions hold, then the page's dirty
 1231                                  * field cannot be concurrently changed by a
 1232                                  * pmap operation.
 1233                                  */
 1234                                 m = ma[ncount - 1];
 1235                                 vm_page_assert_sbusied(m);
 1236                                 KASSERT(!pmap_page_is_write_mapped(m),
 1237                 ("vnode_pager_generic_putpages: page %p is not read-only", m));
 1238                                 MPASS(m->dirty != 0);
 1239                                 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
 1240                                     pgoff);
 1241                         }
 1242                 } else {
 1243                         maxsize = 0;
 1244                         ncount = 0;
 1245                 }
 1246                 for (i = ncount; i < count; i++)
 1247                         rtvals[i] = VM_PAGER_BAD;
 1248 downgrade:
 1249                 VM_OBJECT_LOCK_DOWNGRADE(object);
 1250         }
 1251 
 1252         auio.uio_iov = &aiov;
 1253         auio.uio_segflg = UIO_NOCOPY;
 1254         auio.uio_rw = UIO_WRITE;
 1255         auio.uio_td = NULL;
 1256         maxblksz = roundup2(poffset + maxsize, DEV_BSIZE);
 1257 
 1258         for (prev_offset = poffset; prev_offset < maxblksz;) {
 1259                 /* Skip clean blocks. */
 1260                 for (in_hole = true; in_hole && prev_offset < maxblksz;) {
 1261                         m = ma[OFF_TO_IDX(prev_offset - poffset)];
 1262                         for (i = vn_off2bidx(prev_offset);
 1263                             i < sizeof(vm_page_bits_t) * NBBY &&
 1264                             prev_offset < maxblksz; i++) {
 1265                                 if (vn_dirty_blk(m, prev_offset)) {
 1266                                         in_hole = false;
 1267                                         break;
 1268                                 }
 1269                                 prev_offset += DEV_BSIZE;
 1270                         }
 1271                 }
 1272                 if (in_hole)
 1273                         goto write_done;
 1274 
 1275                 /* Find longest run of dirty blocks. */
 1276                 for (next_offset = prev_offset; next_offset < maxblksz;) {
 1277                         m = ma[OFF_TO_IDX(next_offset - poffset)];
 1278                         for (i = vn_off2bidx(next_offset);
 1279                             i < sizeof(vm_page_bits_t) * NBBY &&
 1280                             next_offset < maxblksz; i++) {
 1281                                 if (!vn_dirty_blk(m, next_offset))
 1282                                         goto start_write;
 1283                                 next_offset += DEV_BSIZE;
 1284                         }
 1285                 }
 1286 start_write:
 1287                 if (next_offset > poffset + maxsize)
 1288                         next_offset = poffset + maxsize;
 1289 
 1290                 /*
 1291                  * Getting here requires finding a dirty block in the
 1292                  * 'skip clean blocks' loop.
 1293                  */
 1294                 MPASS(prev_offset < next_offset);
 1295 
 1296                 VM_OBJECT_RUNLOCK(object);
 1297                 aiov.iov_base = NULL;
 1298                 auio.uio_iovcnt = 1;
 1299                 auio.uio_offset = prev_offset;
 1300                 prev_resid = auio.uio_resid = aiov.iov_len = next_offset -
 1301                     prev_offset;
 1302                 error = VOP_WRITE(vp, &auio,
 1303                     vnode_pager_putpages_ioflags(flags), curthread->td_ucred);
 1304 
 1305                 wrsz = prev_resid - auio.uio_resid;
 1306                 if (wrsz == 0) {
 1307                         if (ppsratecheck(&lastfail, &curfail, 1) != 0) {
 1308                                 vn_printf(vp, "vnode_pager_putpages: "
 1309                                     "zero-length write at %ju resid %zd\n",
 1310                                     auio.uio_offset, auio.uio_resid);
 1311                         }
 1312                         VM_OBJECT_RLOCK(object);
 1313                         break;
 1314                 }
 1315 
 1316                 /* Adjust the starting offset for next iteration. */
 1317                 prev_offset += wrsz;
 1318                 MPASS(auio.uio_offset == prev_offset);
 1319 
 1320                 ppscheck = 0;
 1321                 if (error != 0 && (ppscheck = ppsratecheck(&lastfail,
 1322                     &curfail, 1)) != 0)
 1323                         vn_printf(vp, "vnode_pager_putpages: I/O error %d\n",
 1324                             error);
 1325                 if (auio.uio_resid != 0 && (ppscheck != 0 ||
 1326                     ppsratecheck(&lastfail, &curfail, 1) != 0))
 1327                         vn_printf(vp, "vnode_pager_putpages: residual I/O %zd "
 1328                             "at %ju\n", auio.uio_resid,
 1329                             (uintmax_t)ma[0]->pindex);
 1330                 VM_OBJECT_RLOCK(object);
 1331                 if (error != 0 || auio.uio_resid != 0)
 1332                         break;
 1333         }
 1334 write_done:
 1335         /* Mark completely processed pages. */
 1336         for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++)
 1337                 rtvals[i] = VM_PAGER_OK;
 1338         /* Mark partial EOF page. */
 1339         if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0)
 1340                 rtvals[i++] = VM_PAGER_OK;
 1341         /* Unwritten pages in range, free bonus if the page is clean. */
 1342         for (; i < ncount; i++)
 1343                 rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR;
 1344         VM_OBJECT_RUNLOCK(object);
 1345         PCPU_ADD(cnt.v_vnodepgsout, i);
 1346         PCPU_INC(cnt.v_vnodeout);
 1347         return (rtvals[0]);
 1348 }
 1349 
 1350 int
 1351 vnode_pager_putpages_ioflags(int pager_flags)
 1352 {
 1353         int ioflags;
 1354 
 1355         /*
 1356          * Pageouts are already clustered, use IO_ASYNC to force a
 1357          * bawrite() rather then a bdwrite() to prevent paging I/O
 1358          * from saturating the buffer cache.  Dummy-up the sequential
 1359          * heuristic to cause large ranges to cluster.  If neither
 1360          * IO_SYNC or IO_ASYNC is set, the system decides how to
 1361          * cluster.
 1362          */
 1363         ioflags = IO_VMIO;
 1364         if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0)
 1365                 ioflags |= IO_SYNC;
 1366         else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0)
 1367                 ioflags |= IO_ASYNC;
 1368         ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0;
 1369         ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0;
 1370         ioflags |= IO_SEQMAX << IO_SEQSHIFT;
 1371         return (ioflags);
 1372 }
 1373 
 1374 /*
 1375  * vnode_pager_undirty_pages().
 1376  *
 1377  * A helper to mark pages as clean after pageout that was possibly
 1378  * done with a short write.  The lpos argument specifies the page run
 1379  * length in bytes, and the written argument specifies how many bytes
 1380  * were actually written.  eof is the offset past the last valid byte
 1381  * in the vnode using the absolute file position of the first byte in
 1382  * the run as the base from which it is computed.
 1383  */
 1384 void
 1385 vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof,
 1386     int lpos)
 1387 {
 1388         vm_object_t obj;
 1389         int i, pos, pos_devb;
 1390 
 1391         if (written == 0 && eof >= lpos)
 1392                 return;
 1393         obj = ma[0]->object;
 1394         VM_OBJECT_WLOCK(obj);
 1395         for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
 1396                 if (pos < trunc_page(written)) {
 1397                         rtvals[i] = VM_PAGER_OK;
 1398                         vm_page_undirty(ma[i]);
 1399                 } else {
 1400                         /* Partially written page. */
 1401                         rtvals[i] = VM_PAGER_AGAIN;
 1402                         vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
 1403                 }
 1404         }
 1405         if (eof >= lpos) /* avoid truncation */
 1406                 goto done;
 1407         for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) {
 1408                 if (pos != trunc_page(pos)) {
 1409                         /*
 1410                          * The page contains the last valid byte in
 1411                          * the vnode, mark the rest of the page as
 1412                          * clean, potentially making the whole page
 1413                          * clean.
 1414                          */
 1415                         pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE);
 1416                         vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE -
 1417                             pos_devb);
 1418 
 1419                         /*
 1420                          * If the page was cleaned, report the pageout
 1421                          * on it as successful.  msync() no longer
 1422                          * needs to write out the page, endlessly
 1423                          * creating write requests and dirty buffers.
 1424                          */
 1425                         if (ma[i]->dirty == 0)
 1426                                 rtvals[i] = VM_PAGER_OK;
 1427 
 1428                         pos = round_page(pos);
 1429                 } else {
 1430                         /* vm_pageout_flush() clears dirty */
 1431                         rtvals[i] = VM_PAGER_BAD;
 1432                         pos += PAGE_SIZE;
 1433                 }
 1434         }
 1435 done:
 1436         VM_OBJECT_WUNLOCK(obj);
 1437 }
 1438 
 1439 void
 1440 vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
 1441     vm_offset_t end)
 1442 {
 1443         struct vnode *vp;
 1444         vm_ooffset_t old_wm;
 1445 
 1446         VM_OBJECT_WLOCK(object);
 1447         if (object->type != OBJT_VNODE) {
 1448                 VM_OBJECT_WUNLOCK(object);
 1449                 return;
 1450         }
 1451         old_wm = object->un_pager.vnp.writemappings;
 1452         object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
 1453         vp = object->handle;
 1454         if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
 1455                 ASSERT_VOP_ELOCKED(vp, "v_writecount inc");
 1456                 VOP_ADD_WRITECOUNT(vp, 1);
 1457                 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
 1458                     __func__, vp, vp->v_writecount);
 1459         } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
 1460                 ASSERT_VOP_ELOCKED(vp, "v_writecount dec");
 1461                 VOP_ADD_WRITECOUNT(vp, -1);
 1462                 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
 1463                     __func__, vp, vp->v_writecount);
 1464         }
 1465         VM_OBJECT_WUNLOCK(object);
 1466 }
 1467 
 1468 void
 1469 vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
 1470     vm_offset_t end)
 1471 {
 1472         struct vnode *vp;
 1473         struct mount *mp;
 1474         vm_offset_t inc;
 1475 
 1476         VM_OBJECT_WLOCK(object);
 1477 
 1478         /*
 1479          * First, recheck the object type to account for the race when
 1480          * the vnode is reclaimed.
 1481          */
 1482         if (object->type != OBJT_VNODE) {
 1483                 VM_OBJECT_WUNLOCK(object);
 1484                 return;
 1485         }
 1486 
 1487         /*
 1488          * Optimize for the case when writemappings is not going to
 1489          * zero.
 1490          */
 1491         inc = end - start;
 1492         if (object->un_pager.vnp.writemappings != inc) {
 1493                 object->un_pager.vnp.writemappings -= inc;
 1494                 VM_OBJECT_WUNLOCK(object);
 1495                 return;
 1496         }
 1497 
 1498         vp = object->handle;
 1499         vhold(vp);
 1500         VM_OBJECT_WUNLOCK(object);
 1501         mp = NULL;
 1502         vn_start_write(vp, &mp, V_WAIT);
 1503         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 1504 
 1505         /*
 1506          * Decrement the object's writemappings, by swapping the start
 1507          * and end arguments for vnode_pager_update_writecount().  If
 1508          * there was not a race with vnode reclaimation, then the
 1509          * vnode's v_writecount is decremented.
 1510          */
 1511         vnode_pager_update_writecount(object, end, start);
 1512         VOP_UNLOCK(vp, 0);
 1513         vdrop(vp);
 1514         if (mp != NULL)
 1515                 vn_finished_write(mp);
 1516 }

Cache object: 3ee0a8f7f487f0d421ebe8a5f07c74df


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.