The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vnode_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 University of Utah.
    3  * Copyright (c) 1991 The Regents of the University of California.
    4  * All rights reserved.
    5  * Copyright (c) 1993, 1994 John S. Dyson
    6  * Copyright (c) 1995, David Greenman
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *      from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
   41  */
   42 
   43 /*
   44  * Page to/from files (vnodes).
   45  */
   46 
   47 /*
   48  * TODO:
   49  *      Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
   50  *      greatly re-simplify the vnode_pager.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD$");
   55 
   56 #include <sys/param.h>
   57 #include <sys/systm.h>
   58 #include <sys/proc.h>
   59 #include <sys/vnode.h>
   60 #include <sys/mount.h>
   61 #include <sys/bio.h>
   62 #include <sys/buf.h>
   63 #include <sys/vmmeter.h>
   64 #include <sys/limits.h>
   65 #include <sys/conf.h>
   66 #include <sys/sf_buf.h>
   67 
   68 #include <machine/atomic.h>
   69 
   70 #include <vm/vm.h>
   71 #include <vm/vm_param.h>
   72 #include <vm/vm_object.h>
   73 #include <vm/vm_page.h>
   74 #include <vm/vm_pager.h>
   75 #include <vm/vm_map.h>
   76 #include <vm/vnode_pager.h>
   77 #include <vm/vm_extern.h>
   78 
   79 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
   80     daddr_t *rtaddress, int *run);
   81 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
   82 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
   83 static void vnode_pager_dealloc(vm_object_t);
   84 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
   85 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
   86 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
   87 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
   88     vm_ooffset_t, struct ucred *cred);
   89 
   90 struct pagerops vnodepagerops = {
   91         .pgo_alloc =    vnode_pager_alloc,
   92         .pgo_dealloc =  vnode_pager_dealloc,
   93         .pgo_getpages = vnode_pager_getpages,
   94         .pgo_putpages = vnode_pager_putpages,
   95         .pgo_haspage =  vnode_pager_haspage,
   96 };
   97 
   98 int vnode_pbuf_freecnt;
   99 
  100 /* Create the VM system backing object for this vnode */
  101 int
  102 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
  103 {
  104         vm_object_t object;
  105         vm_ooffset_t size = isize;
  106         struct vattr va;
  107 
  108         if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
  109                 return (0);
  110 
  111         while ((object = vp->v_object) != NULL) {
  112                 VM_OBJECT_LOCK(object);
  113                 if (!(object->flags & OBJ_DEAD)) {
  114                         VM_OBJECT_UNLOCK(object);
  115                         return (0);
  116                 }
  117                 VOP_UNLOCK(vp, 0);
  118                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  119                 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0);
  120                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
  121         }
  122 
  123         if (size == 0) {
  124                 if (vn_isdisk(vp, NULL)) {
  125                         size = IDX_TO_OFF(INT_MAX);
  126                 } else {
  127                         if (VOP_GETATTR(vp, &va, td->td_ucred))
  128                                 return (0);
  129                         size = va.va_size;
  130                 }
  131         }
  132 
  133         object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
  134         /*
  135          * Dereference the reference we just created.  This assumes
  136          * that the object is associated with the vp.
  137          */
  138         VM_OBJECT_LOCK(object);
  139         object->ref_count--;
  140         VM_OBJECT_UNLOCK(object);
  141         vrele(vp);
  142 
  143         KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
  144 
  145         return (0);
  146 }
  147 
  148 void
  149 vnode_destroy_vobject(struct vnode *vp)
  150 {
  151         struct vm_object *obj;
  152 
  153         obj = vp->v_object;
  154         if (obj == NULL)
  155                 return;
  156         ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
  157         VM_OBJECT_LOCK(obj);
  158         if (obj->ref_count == 0) {
  159                 /*
  160                  * vclean() may be called twice. The first time
  161                  * removes the primary reference to the object,
  162                  * the second time goes one further and is a
  163                  * special-case to terminate the object.
  164                  *
  165                  * don't double-terminate the object
  166                  */
  167                 if ((obj->flags & OBJ_DEAD) == 0)
  168                         vm_object_terminate(obj);
  169                 else
  170                         VM_OBJECT_UNLOCK(obj);
  171         } else {
  172                 /*
  173                  * Woe to the process that tries to page now :-).
  174                  */
  175                 vm_pager_deallocate(obj);
  176                 VM_OBJECT_UNLOCK(obj);
  177         }
  178         vp->v_object = NULL;
  179 }
  180 
  181 
  182 /*
  183  * Allocate (or lookup) pager for a vnode.
  184  * Handle is a vnode pointer.
  185  *
  186  * MPSAFE
  187  */
  188 vm_object_t
  189 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  190     vm_ooffset_t offset, struct ucred *cred)
  191 {
  192         vm_object_t object;
  193         struct vnode *vp;
  194 
  195         /*
  196          * Pageout to vnode, no can do yet.
  197          */
  198         if (handle == NULL)
  199                 return (NULL);
  200 
  201         vp = (struct vnode *) handle;
  202 
  203         /*
  204          * If the object is being terminated, wait for it to
  205          * go away.
  206          */
  207 retry:
  208         while ((object = vp->v_object) != NULL) {
  209                 VM_OBJECT_LOCK(object);
  210                 if ((object->flags & OBJ_DEAD) == 0)
  211                         break;
  212                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  213                 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0);
  214         }
  215 
  216         KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference"));
  217 
  218         if (object == NULL) {
  219                 /*
  220                  * Add an object of the appropriate size
  221                  */
  222                 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
  223 
  224                 object->un_pager.vnp.vnp_size = size;
  225                 object->un_pager.vnp.writemappings = 0;
  226 
  227                 object->handle = handle;
  228                 VI_LOCK(vp);
  229                 if (vp->v_object != NULL) {
  230                         /*
  231                          * Object has been created while we were sleeping
  232                          */
  233                         VI_UNLOCK(vp);
  234                         VM_OBJECT_LOCK(object);
  235                         KASSERT(object->ref_count == 1,
  236                             ("leaked ref %p %d", object, object->ref_count));
  237                         object->type = OBJT_DEAD;
  238                         object->ref_count = 0;
  239                         VM_OBJECT_UNLOCK(object);
  240                         vm_object_destroy(object);
  241                         goto retry;
  242                 }
  243                 vp->v_object = object;
  244                 VI_UNLOCK(vp);
  245         } else {
  246                 object->ref_count++;
  247                 VM_OBJECT_UNLOCK(object);
  248         }
  249         vref(vp);
  250         return (object);
  251 }
  252 
  253 /*
  254  *      The object must be locked.
  255  */
  256 static void
  257 vnode_pager_dealloc(object)
  258         vm_object_t object;
  259 {
  260         struct vnode *vp;
  261         int refs;
  262 
  263         vp = object->handle;
  264         if (vp == NULL)
  265                 panic("vnode_pager_dealloc: pager already dealloced");
  266 
  267         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  268         vm_object_pip_wait(object, "vnpdea");
  269         refs = object->ref_count;
  270 
  271         object->handle = NULL;
  272         object->type = OBJT_DEAD;
  273         if (object->flags & OBJ_DISCONNECTWNT) {
  274                 vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
  275                 wakeup(object);
  276         }
  277         ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
  278         if (object->un_pager.vnp.writemappings > 0) {
  279                 object->un_pager.vnp.writemappings = 0;
  280                 VOP_ADD_WRITECOUNT(vp, -1);
  281         }
  282         vp->v_object = NULL;
  283         VOP_UNSET_TEXT(vp);
  284         VM_OBJECT_UNLOCK(object);
  285         while (refs-- > 0)
  286                 vunref(vp);
  287         VM_OBJECT_LOCK(object);
  288 }
  289 
  290 static boolean_t
  291 vnode_pager_haspage(object, pindex, before, after)
  292         vm_object_t object;
  293         vm_pindex_t pindex;
  294         int *before;
  295         int *after;
  296 {
  297         struct vnode *vp = object->handle;
  298         daddr_t bn;
  299         int err;
  300         daddr_t reqblock;
  301         int poff;
  302         int bsize;
  303         int pagesperblock, blocksperpage;
  304         int vfslocked;
  305 
  306         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  307         /*
  308          * If no vp or vp is doomed or marked transparent to VM, we do not
  309          * have the page.
  310          */
  311         if (vp == NULL || vp->v_iflag & VI_DOOMED)
  312                 return FALSE;
  313         /*
  314          * If the offset is beyond end of file we do
  315          * not have the page.
  316          */
  317         if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
  318                 return FALSE;
  319 
  320         bsize = vp->v_mount->mnt_stat.f_iosize;
  321         pagesperblock = bsize / PAGE_SIZE;
  322         blocksperpage = 0;
  323         if (pagesperblock > 0) {
  324                 reqblock = pindex / pagesperblock;
  325         } else {
  326                 blocksperpage = (PAGE_SIZE / bsize);
  327                 reqblock = pindex * blocksperpage;
  328         }
  329         VM_OBJECT_UNLOCK(object);
  330         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  331         err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
  332         VFS_UNLOCK_GIANT(vfslocked);
  333         VM_OBJECT_LOCK(object);
  334         if (err)
  335                 return TRUE;
  336         if (bn == -1)
  337                 return FALSE;
  338         if (pagesperblock > 0) {
  339                 poff = pindex - (reqblock * pagesperblock);
  340                 if (before) {
  341                         *before *= pagesperblock;
  342                         *before += poff;
  343                 }
  344                 if (after) {
  345                         int numafter;
  346                         *after *= pagesperblock;
  347                         numafter = pagesperblock - (poff + 1);
  348                         if (IDX_TO_OFF(pindex + numafter) >
  349                             object->un_pager.vnp.vnp_size) {
  350                                 numafter =
  351                                     OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
  352                                     pindex;
  353                         }
  354                         *after += numafter;
  355                 }
  356         } else {
  357                 if (before) {
  358                         *before /= blocksperpage;
  359                 }
  360 
  361                 if (after) {
  362                         *after /= blocksperpage;
  363                 }
  364         }
  365         return TRUE;
  366 }
  367 
  368 /*
  369  * Lets the VM system know about a change in size for a file.
  370  * We adjust our own internal size and flush any cached pages in
  371  * the associated object that are affected by the size change.
  372  *
  373  * Note: this routine may be invoked as a result of a pager put
  374  * operation (possibly at object termination time), so we must be careful.
  375  */
  376 void
  377 vnode_pager_setsize(vp, nsize)
  378         struct vnode *vp;
  379         vm_ooffset_t nsize;
  380 {
  381         vm_object_t object;
  382         vm_page_t m;
  383         vm_pindex_t nobjsize;
  384 
  385         if ((object = vp->v_object) == NULL)
  386                 return;
  387 /*      ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
  388         VM_OBJECT_LOCK(object);
  389         if (object->type == OBJT_DEAD) {
  390                 VM_OBJECT_UNLOCK(object);
  391                 return;
  392         }
  393         KASSERT(object->type == OBJT_VNODE,
  394             ("not vnode-backed object %p", object));
  395         if (nsize == object->un_pager.vnp.vnp_size) {
  396                 /*
  397                  * Hasn't changed size
  398                  */
  399                 VM_OBJECT_UNLOCK(object);
  400                 return;
  401         }
  402         nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
  403         if (nsize < object->un_pager.vnp.vnp_size) {
  404                 /*
  405                  * File has shrunk. Toss any cached pages beyond the new EOF.
  406                  */
  407                 if (nobjsize < object->size)
  408                         vm_object_page_remove(object, nobjsize, object->size,
  409                             0);
  410                 /*
  411                  * this gets rid of garbage at the end of a page that is now
  412                  * only partially backed by the vnode.
  413                  *
  414                  * XXX for some reason (I don't know yet), if we take a
  415                  * completely invalid page and mark it partially valid
  416                  * it can screw up NFS reads, so we don't allow the case.
  417                  */
  418                 if ((nsize & PAGE_MASK) &&
  419                     (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL &&
  420                     m->valid != 0) {
  421                         int base = (int)nsize & PAGE_MASK;
  422                         int size = PAGE_SIZE - base;
  423 
  424                         /*
  425                          * Clear out partial-page garbage in case
  426                          * the page has been mapped.
  427                          */
  428                         pmap_zero_page_area(m, base, size);
  429 
  430                         /*
  431                          * Update the valid bits to reflect the blocks that
  432                          * have been zeroed.  Some of these valid bits may
  433                          * have already been set.
  434                          */
  435                         vm_page_set_valid(m, base, size);
  436 
  437                         /*
  438                          * Round "base" to the next block boundary so that the
  439                          * dirty bit for a partially zeroed block is not
  440                          * cleared.
  441                          */
  442                         base = roundup2(base, DEV_BSIZE);
  443 
  444                         /*
  445                          * Clear out partial-page dirty bits.
  446                          *
  447                          * note that we do not clear out the valid
  448                          * bits.  This would prevent bogus_page
  449                          * replacement from working properly.
  450                          */
  451                         vm_page_clear_dirty(m, base, PAGE_SIZE - base);
  452                 } else if ((nsize & PAGE_MASK) &&
  453                     __predict_false(object->cache != NULL)) {
  454                         vm_page_cache_free(object, OFF_TO_IDX(nsize),
  455                             nobjsize);
  456                 }
  457         }
  458         object->un_pager.vnp.vnp_size = nsize;
  459         object->size = nobjsize;
  460         VM_OBJECT_UNLOCK(object);
  461 }
  462 
  463 /*
  464  * calculate the linear (byte) disk address of specified virtual
  465  * file address
  466  */
  467 static int
  468 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
  469     int *run)
  470 {
  471         int bsize;
  472         int err;
  473         daddr_t vblock;
  474         daddr_t voffset;
  475 
  476         if (address < 0)
  477                 return -1;
  478 
  479         if (vp->v_iflag & VI_DOOMED)
  480                 return -1;
  481 
  482         bsize = vp->v_mount->mnt_stat.f_iosize;
  483         vblock = address / bsize;
  484         voffset = address % bsize;
  485 
  486         err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL);
  487         if (err == 0) {
  488                 if (*rtaddress != -1)
  489                         *rtaddress += voffset / DEV_BSIZE;
  490                 if (run) {
  491                         *run += 1;
  492                         *run *= bsize/PAGE_SIZE;
  493                         *run -= voffset/PAGE_SIZE;
  494                 }
  495         }
  496 
  497         return (err);
  498 }
  499 
  500 /*
  501  * small block filesystem vnode pager input
  502  */
  503 static int
  504 vnode_pager_input_smlfs(object, m)
  505         vm_object_t object;
  506         vm_page_t m;
  507 {
  508         struct vnode *vp;
  509         struct bufobj *bo;
  510         struct buf *bp;
  511         struct sf_buf *sf;
  512         daddr_t fileaddr;
  513         vm_offset_t bsize;
  514         vm_page_bits_t bits;
  515         int error, i;
  516 
  517         error = 0;
  518         vp = object->handle;
  519         if (vp->v_iflag & VI_DOOMED)
  520                 return VM_PAGER_BAD;
  521 
  522         bsize = vp->v_mount->mnt_stat.f_iosize;
  523 
  524         VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
  525 
  526         sf = sf_buf_alloc(m, 0);
  527 
  528         for (i = 0; i < PAGE_SIZE / bsize; i++) {
  529                 vm_ooffset_t address;
  530 
  531                 bits = vm_page_bits(i * bsize, bsize);
  532                 if (m->valid & bits)
  533                         continue;
  534 
  535                 address = IDX_TO_OFF(m->pindex) + i * bsize;
  536                 if (address >= object->un_pager.vnp.vnp_size) {
  537                         fileaddr = -1;
  538                 } else {
  539                         error = vnode_pager_addr(vp, address, &fileaddr, NULL);
  540                         if (error)
  541                                 break;
  542                 }
  543                 if (fileaddr != -1) {
  544                         bp = getpbuf(&vnode_pbuf_freecnt);
  545 
  546                         /* build a minimal buffer header */
  547                         bp->b_iocmd = BIO_READ;
  548                         bp->b_iodone = bdone;
  549                         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  550                         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  551                         bp->b_rcred = crhold(curthread->td_ucred);
  552                         bp->b_wcred = crhold(curthread->td_ucred);
  553                         bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
  554                         bp->b_blkno = fileaddr;
  555                         pbgetbo(bo, bp);
  556                         bp->b_vp = vp;
  557                         bp->b_bcount = bsize;
  558                         bp->b_bufsize = bsize;
  559                         bp->b_runningbufspace = bp->b_bufsize;
  560                         atomic_add_long(&runningbufspace, bp->b_runningbufspace);
  561 
  562                         /* do the input */
  563                         bp->b_iooffset = dbtob(bp->b_blkno);
  564                         bstrategy(bp);
  565 
  566                         bwait(bp, PVM, "vnsrd");
  567 
  568                         if ((bp->b_ioflags & BIO_ERROR) != 0)
  569                                 error = EIO;
  570 
  571                         /*
  572                          * free the buffer header back to the swap buffer pool
  573                          */
  574                         bp->b_vp = NULL;
  575                         pbrelbo(bp);
  576                         relpbuf(bp, &vnode_pbuf_freecnt);
  577                         if (error)
  578                                 break;
  579                 } else
  580                         bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
  581                 KASSERT((m->dirty & bits) == 0,
  582                     ("vnode_pager_input_smlfs: page %p is dirty", m));
  583                 VM_OBJECT_LOCK(object);
  584                 m->valid |= bits;
  585                 VM_OBJECT_UNLOCK(object);
  586         }
  587         sf_buf_free(sf);
  588         if (error) {
  589                 return VM_PAGER_ERROR;
  590         }
  591         return VM_PAGER_OK;
  592 }
  593 
  594 /*
  595  * old style vnode pager input routine
  596  */
  597 static int
  598 vnode_pager_input_old(object, m)
  599         vm_object_t object;
  600         vm_page_t m;
  601 {
  602         struct uio auio;
  603         struct iovec aiov;
  604         int error;
  605         int size;
  606         struct sf_buf *sf;
  607         struct vnode *vp;
  608 
  609         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  610         error = 0;
  611 
  612         /*
  613          * Return failure if beyond current EOF
  614          */
  615         if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
  616                 return VM_PAGER_BAD;
  617         } else {
  618                 size = PAGE_SIZE;
  619                 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
  620                         size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
  621                 vp = object->handle;
  622                 VM_OBJECT_UNLOCK(object);
  623 
  624                 /*
  625                  * Allocate a kernel virtual address and initialize so that
  626                  * we can use VOP_READ/WRITE routines.
  627                  */
  628                 sf = sf_buf_alloc(m, 0);
  629 
  630                 aiov.iov_base = (caddr_t)sf_buf_kva(sf);
  631                 aiov.iov_len = size;
  632                 auio.uio_iov = &aiov;
  633                 auio.uio_iovcnt = 1;
  634                 auio.uio_offset = IDX_TO_OFF(m->pindex);
  635                 auio.uio_segflg = UIO_SYSSPACE;
  636                 auio.uio_rw = UIO_READ;
  637                 auio.uio_resid = size;
  638                 auio.uio_td = curthread;
  639 
  640                 error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
  641                 if (!error) {
  642                         int count = size - auio.uio_resid;
  643 
  644                         if (count == 0)
  645                                 error = EINVAL;
  646                         else if (count != PAGE_SIZE)
  647                                 bzero((caddr_t)sf_buf_kva(sf) + count,
  648                                     PAGE_SIZE - count);
  649                 }
  650                 sf_buf_free(sf);
  651 
  652                 VM_OBJECT_LOCK(object);
  653         }
  654         KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
  655         if (!error)
  656                 m->valid = VM_PAGE_BITS_ALL;
  657         return error ? VM_PAGER_ERROR : VM_PAGER_OK;
  658 }
  659 
  660 /*
  661  * generic vnode pager input routine
  662  */
  663 
  664 /*
  665  * Local media VFS's that do not implement their own VOP_GETPAGES
  666  * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
  667  * to implement the previous behaviour.
  668  *
  669  * All other FS's should use the bypass to get to the local media
  670  * backing vp's VOP_GETPAGES.
  671  */
  672 static int
  673 vnode_pager_getpages(object, m, count, reqpage)
  674         vm_object_t object;
  675         vm_page_t *m;
  676         int count;
  677         int reqpage;
  678 {
  679         int rtval;
  680         struct vnode *vp;
  681         int bytes = count * PAGE_SIZE;
  682         int vfslocked;
  683 
  684         vp = object->handle;
  685         VM_OBJECT_UNLOCK(object);
  686         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  687         rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
  688         KASSERT(rtval != EOPNOTSUPP,
  689             ("vnode_pager: FS getpages not implemented\n"));
  690         VFS_UNLOCK_GIANT(vfslocked);
  691         VM_OBJECT_LOCK(object);
  692         return rtval;
  693 }
  694 
  695 /*
  696  * This is now called from local media FS's to operate against their
  697  * own vnodes if they fail to implement VOP_GETPAGES.
  698  */
  699 int
  700 vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
  701         struct vnode *vp;
  702         vm_page_t *m;
  703         int bytecount;
  704         int reqpage;
  705 {
  706         vm_object_t object;
  707         vm_offset_t kva;
  708         off_t foff, tfoff, nextoff;
  709         int i, j, size, bsize, first;
  710         daddr_t firstaddr, reqblock;
  711         struct bufobj *bo;
  712         int runpg;
  713         int runend;
  714         struct buf *bp;
  715         struct mount *mp;
  716         int count;
  717         int error;
  718 
  719         object = vp->v_object;
  720         count = bytecount / PAGE_SIZE;
  721 
  722         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
  723             ("vnode_pager_generic_getpages does not support devices"));
  724         if (vp->v_iflag & VI_DOOMED)
  725                 return VM_PAGER_BAD;
  726 
  727         bsize = vp->v_mount->mnt_stat.f_iosize;
  728 
  729         /* get the UNDERLYING device for the file with VOP_BMAP() */
  730 
  731         /*
  732          * originally, we did not check for an error return value -- assuming
  733          * an fs always has a bmap entry point -- that assumption is wrong!!!
  734          */
  735         foff = IDX_TO_OFF(m[reqpage]->pindex);
  736 
  737         /*
  738          * if we can't bmap, use old VOP code
  739          */
  740         error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
  741         if (error == EOPNOTSUPP) {
  742                 VM_OBJECT_LOCK(object);
  743                 
  744                 for (i = 0; i < count; i++)
  745                         if (i != reqpage) {
  746                                 vm_page_lock(m[i]);
  747                                 vm_page_free(m[i]);
  748                                 vm_page_unlock(m[i]);
  749                         }
  750                 PCPU_INC(cnt.v_vnodein);
  751                 PCPU_INC(cnt.v_vnodepgsin);
  752                 error = vnode_pager_input_old(object, m[reqpage]);
  753                 VM_OBJECT_UNLOCK(object);
  754                 return (error);
  755         } else if (error != 0) {
  756                 VM_OBJECT_LOCK(object);
  757                 for (i = 0; i < count; i++)
  758                         if (i != reqpage) {
  759                                 vm_page_lock(m[i]);
  760                                 vm_page_free(m[i]);
  761                                 vm_page_unlock(m[i]);
  762                         }
  763                 VM_OBJECT_UNLOCK(object);
  764                 return (VM_PAGER_ERROR);
  765 
  766                 /*
  767                  * if the blocksize is smaller than a page size, then use
  768                  * special small filesystem code.  NFS sometimes has a small
  769                  * blocksize, but it can handle large reads itself.
  770                  */
  771         } else if ((PAGE_SIZE / bsize) > 1 &&
  772             (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
  773                 VM_OBJECT_LOCK(object);
  774                 for (i = 0; i < count; i++)
  775                         if (i != reqpage) {
  776                                 vm_page_lock(m[i]);
  777                                 vm_page_free(m[i]);
  778                                 vm_page_unlock(m[i]);
  779                         }
  780                 VM_OBJECT_UNLOCK(object);
  781                 PCPU_INC(cnt.v_vnodein);
  782                 PCPU_INC(cnt.v_vnodepgsin);
  783                 return vnode_pager_input_smlfs(object, m[reqpage]);
  784         }
  785 
  786         /*
  787          * If we have a completely valid page available to us, we can
  788          * clean up and return.  Otherwise we have to re-read the
  789          * media.
  790          */
  791         VM_OBJECT_LOCK(object);
  792         if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
  793                 for (i = 0; i < count; i++)
  794                         if (i != reqpage) {
  795                                 vm_page_lock(m[i]);
  796                                 vm_page_free(m[i]);
  797                                 vm_page_unlock(m[i]);
  798                         }
  799                 VM_OBJECT_UNLOCK(object);
  800                 return VM_PAGER_OK;
  801         } else if (reqblock == -1) {
  802                 pmap_zero_page(m[reqpage]);
  803                 KASSERT(m[reqpage]->dirty == 0,
  804                     ("vnode_pager_generic_getpages: page %p is dirty", m));
  805                 m[reqpage]->valid = VM_PAGE_BITS_ALL;
  806                 for (i = 0; i < count; i++)
  807                         if (i != reqpage) {
  808                                 vm_page_lock(m[i]);
  809                                 vm_page_free(m[i]);
  810                                 vm_page_unlock(m[i]);
  811                         }
  812                 VM_OBJECT_UNLOCK(object);
  813                 return (VM_PAGER_OK);
  814         }
  815         m[reqpage]->valid = 0;
  816         VM_OBJECT_UNLOCK(object);
  817 
  818         /*
  819          * here on direct device I/O
  820          */
  821         firstaddr = -1;
  822 
  823         /*
  824          * calculate the run that includes the required page
  825          */
  826         for (first = 0, i = 0; i < count; i = runend) {
  827                 if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
  828                     &runpg) != 0) {
  829                         VM_OBJECT_LOCK(object);
  830                         for (; i < count; i++)
  831                                 if (i != reqpage) {
  832                                         vm_page_lock(m[i]);
  833                                         vm_page_free(m[i]);
  834                                         vm_page_unlock(m[i]);
  835                                 }
  836                         VM_OBJECT_UNLOCK(object);
  837                         return (VM_PAGER_ERROR);
  838                 }
  839                 if (firstaddr == -1) {
  840                         VM_OBJECT_LOCK(object);
  841                         if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
  842                                 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
  843                                     (intmax_t)firstaddr, (uintmax_t)(foff >> 32),
  844                                     (uintmax_t)foff,
  845                                     (uintmax_t)
  846                                     (object->un_pager.vnp.vnp_size >> 32),
  847                                     (uintmax_t)object->un_pager.vnp.vnp_size);
  848                         }
  849                         vm_page_lock(m[i]);
  850                         vm_page_free(m[i]);
  851                         vm_page_unlock(m[i]);
  852                         VM_OBJECT_UNLOCK(object);
  853                         runend = i + 1;
  854                         first = runend;
  855                         continue;
  856                 }
  857                 runend = i + runpg;
  858                 if (runend <= reqpage) {
  859                         VM_OBJECT_LOCK(object);
  860                         for (j = i; j < runend; j++) {
  861                                 vm_page_lock(m[j]);
  862                                 vm_page_free(m[j]);
  863                                 vm_page_unlock(m[j]);
  864                         }
  865                         VM_OBJECT_UNLOCK(object);
  866                 } else {
  867                         if (runpg < (count - first)) {
  868                                 VM_OBJECT_LOCK(object);
  869                                 for (i = first + runpg; i < count; i++) {
  870                                         vm_page_lock(m[i]);
  871                                         vm_page_free(m[i]);
  872                                         vm_page_unlock(m[i]);
  873                                 }
  874                                 VM_OBJECT_UNLOCK(object);
  875                                 count = first + runpg;
  876                         }
  877                         break;
  878                 }
  879                 first = runend;
  880         }
  881 
  882         /*
  883          * the first and last page have been calculated now, move input pages
  884          * to be zero based...
  885          */
  886         if (first != 0) {
  887                 m += first;
  888                 count -= first;
  889                 reqpage -= first;
  890         }
  891 
  892         /*
  893          * calculate the file virtual address for the transfer
  894          */
  895         foff = IDX_TO_OFF(m[0]->pindex);
  896 
  897         /*
  898          * calculate the size of the transfer
  899          */
  900         size = count * PAGE_SIZE;
  901         KASSERT(count > 0, ("zero count"));
  902         if ((foff + size) > object->un_pager.vnp.vnp_size)
  903                 size = object->un_pager.vnp.vnp_size - foff;
  904         KASSERT(size > 0, ("zero size"));
  905 
  906         /*
  907          * round up physical size for real devices.
  908          */
  909         if (1) {
  910                 int secmask = bo->bo_bsize - 1;
  911                 KASSERT(secmask < PAGE_SIZE && secmask > 0,
  912                     ("vnode_pager_generic_getpages: sector size %d too large",
  913                     secmask + 1));
  914                 size = (size + secmask) & ~secmask;
  915         }
  916 
  917         bp = getpbuf(&vnode_pbuf_freecnt);
  918         kva = (vm_offset_t)bp->b_data;
  919 
  920         /*
  921          * and map the pages to be read into the kva, if the filesystem
  922          * requires mapped buffers.
  923          */
  924         mp = vp->v_mount;
  925         if (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
  926             unmapped_buf_allowed) {
  927                 bp->b_data = unmapped_buf;
  928                 bp->b_kvabase = unmapped_buf;
  929                 bp->b_offset = 0;
  930                 bp->b_flags |= B_UNMAPPED;
  931                 bp->b_npages = count;
  932                 for (i = 0; i < count; i++)
  933                         bp->b_pages[i] = m[i];
  934         } else
  935                 pmap_qenter(kva, m, count);
  936 
  937         /* build a minimal buffer header */
  938         bp->b_iocmd = BIO_READ;
  939         bp->b_iodone = bdone;
  940         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  941         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  942         bp->b_rcred = crhold(curthread->td_ucred);
  943         bp->b_wcred = crhold(curthread->td_ucred);
  944         bp->b_blkno = firstaddr;
  945         pbgetbo(bo, bp);
  946         bp->b_vp = vp;
  947         bp->b_bcount = size;
  948         bp->b_bufsize = size;
  949         bp->b_runningbufspace = bp->b_bufsize;
  950         atomic_add_long(&runningbufspace, bp->b_runningbufspace);
  951 
  952         PCPU_INC(cnt.v_vnodein);
  953         PCPU_ADD(cnt.v_vnodepgsin, count);
  954 
  955         /* do the input */
  956         bp->b_iooffset = dbtob(bp->b_blkno);
  957         bstrategy(bp);
  958 
  959         bwait(bp, PVM, "vnread");
  960 
  961         if ((bp->b_ioflags & BIO_ERROR) != 0)
  962                 error = EIO;
  963 
  964         if (error == 0 && size != count * PAGE_SIZE) {
  965                 if ((bp->b_flags & B_UNMAPPED) != 0) {
  966                         bp->b_flags &= ~B_UNMAPPED;
  967                         pmap_qenter(kva, m, count);
  968                 }
  969                 bzero((caddr_t)kva + size, PAGE_SIZE * count - size);
  970         }
  971         if ((bp->b_flags & B_UNMAPPED) == 0)
  972                 pmap_qremove(kva, count);
  973         if (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0) {
  974                 bp->b_data = (caddr_t)kva;
  975                 bp->b_kvabase = (caddr_t)kva;
  976                 bp->b_flags &= ~B_UNMAPPED;
  977                 for (i = 0; i < count; i++)
  978                         bp->b_pages[i] = NULL;
  979         }
  980 
  981         /*
  982          * free the buffer header back to the swap buffer pool
  983          */
  984         bp->b_vp = NULL;
  985         pbrelbo(bp);
  986         relpbuf(bp, &vnode_pbuf_freecnt);
  987 
  988         VM_OBJECT_LOCK(object);
  989         for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
  990                 vm_page_t mt;
  991 
  992                 nextoff = tfoff + PAGE_SIZE;
  993                 mt = m[i];
  994 
  995                 if (nextoff <= object->un_pager.vnp.vnp_size) {
  996                         /*
  997                          * Read filled up entire page.
  998                          */
  999                         mt->valid = VM_PAGE_BITS_ALL;
 1000                         KASSERT(mt->dirty == 0,
 1001                             ("vnode_pager_generic_getpages: page %p is dirty",
 1002                             mt));
 1003                         KASSERT(!pmap_page_is_mapped(mt),
 1004                             ("vnode_pager_generic_getpages: page %p is mapped",
 1005                             mt));
 1006                 } else {
 1007                         /*
 1008                          * Read did not fill up entire page.
 1009                          *
 1010                          * Currently we do not set the entire page valid,
 1011                          * we just try to clear the piece that we couldn't
 1012                          * read.
 1013                          */
 1014                         vm_page_set_valid(mt, 0,
 1015                             object->un_pager.vnp.vnp_size - tfoff);
 1016                         KASSERT((mt->dirty & vm_page_bits(0,
 1017                             object->un_pager.vnp.vnp_size - tfoff)) == 0,
 1018                             ("vnode_pager_generic_getpages: page %p is dirty",
 1019                             mt));
 1020                 }
 1021                 
 1022                 if (i != reqpage)
 1023                         vm_page_readahead_finish(mt);
 1024         }
 1025         VM_OBJECT_UNLOCK(object);
 1026         if (error) {
 1027                 printf("vnode_pager_getpages: I/O read error\n");
 1028         }
 1029         return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
 1030 }
 1031 
 1032 /*
 1033  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
 1034  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
 1035  * vnode_pager_generic_putpages() to implement the previous behaviour.
 1036  *
 1037  * All other FS's should use the bypass to get to the local media
 1038  * backing vp's VOP_PUTPAGES.
 1039  */
 1040 static void
 1041 vnode_pager_putpages(object, m, count, sync, rtvals)
 1042         vm_object_t object;
 1043         vm_page_t *m;
 1044         int count;
 1045         boolean_t sync;
 1046         int *rtvals;
 1047 {
 1048         int rtval;
 1049         struct vnode *vp;
 1050         int bytes = count * PAGE_SIZE;
 1051 
 1052         /*
 1053          * Force synchronous operation if we are extremely low on memory
 1054          * to prevent a low-memory deadlock.  VOP operations often need to
 1055          * allocate more memory to initiate the I/O ( i.e. do a BMAP 
 1056          * operation ).  The swapper handles the case by limiting the amount
 1057          * of asynchronous I/O, but that sort of solution doesn't scale well
 1058          * for the vnode pager without a lot of work.
 1059          *
 1060          * Also, the backing vnode's iodone routine may not wake the pageout
 1061          * daemon up.  This should be probably be addressed XXX.
 1062          */
 1063 
 1064         if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
 1065                 sync |= OBJPC_SYNC;
 1066 
 1067         /*
 1068          * Call device-specific putpages function
 1069          */
 1070         vp = object->handle;
 1071         VM_OBJECT_UNLOCK(object);
 1072         rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
 1073         KASSERT(rtval != EOPNOTSUPP, 
 1074             ("vnode_pager: stale FS putpages\n"));
 1075         VM_OBJECT_LOCK(object);
 1076 }
 1077 
 1078 
 1079 /*
 1080  * This is now called from local media FS's to operate against their
 1081  * own vnodes if they fail to implement VOP_PUTPAGES.
 1082  *
 1083  * This is typically called indirectly via the pageout daemon and
 1084  * clustering has already typically occured, so in general we ask the
 1085  * underlying filesystem to write the data out asynchronously rather
 1086  * then delayed.
 1087  */
 1088 int
 1089 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
 1090     int flags, int *rtvals)
 1091 {
 1092         int i;
 1093         vm_object_t object;
 1094         vm_page_t m;
 1095         int count;
 1096 
 1097         int maxsize, ncount;
 1098         vm_ooffset_t poffset;
 1099         struct uio auio;
 1100         struct iovec aiov;
 1101         int error;
 1102         int ioflags;
 1103         int ppscheck = 0;
 1104         static struct timeval lastfail;
 1105         static int curfail;
 1106 
 1107         object = vp->v_object;
 1108         count = bytecount / PAGE_SIZE;
 1109 
 1110         for (i = 0; i < count; i++)
 1111                 rtvals[i] = VM_PAGER_ERROR;
 1112 
 1113         if ((int64_t)ma[0]->pindex < 0) {
 1114                 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
 1115                     (long)ma[0]->pindex, (u_long)ma[0]->dirty);
 1116                 rtvals[0] = VM_PAGER_BAD;
 1117                 return VM_PAGER_BAD;
 1118         }
 1119 
 1120         maxsize = count * PAGE_SIZE;
 1121         ncount = count;
 1122 
 1123         poffset = IDX_TO_OFF(ma[0]->pindex);
 1124 
 1125         /*
 1126          * If the page-aligned write is larger then the actual file we
 1127          * have to invalidate pages occuring beyond the file EOF.  However,
 1128          * there is an edge case where a file may not be page-aligned where
 1129          * the last page is partially invalid.  In this case the filesystem
 1130          * may not properly clear the dirty bits for the entire page (which
 1131          * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
 1132          * With the page locked we are free to fix-up the dirty bits here.
 1133          *
 1134          * We do not under any circumstances truncate the valid bits, as
 1135          * this will screw up bogus page replacement.
 1136          */
 1137         VM_OBJECT_LOCK(object);
 1138         if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
 1139                 if (object->un_pager.vnp.vnp_size > poffset) {
 1140                         int pgoff;
 1141 
 1142                         maxsize = object->un_pager.vnp.vnp_size - poffset;
 1143                         ncount = btoc(maxsize);
 1144                         if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
 1145                                 /*
 1146                                  * If the object is locked and the following
 1147                                  * conditions hold, then the page's dirty
 1148                                  * field cannot be concurrently changed by a
 1149                                  * pmap operation.
 1150                                  */
 1151                                 m = ma[ncount - 1];
 1152                                 KASSERT(m->busy > 0,
 1153                 ("vnode_pager_generic_putpages: page %p is not busy", m));
 1154                                 KASSERT(!pmap_page_is_write_mapped(m),
 1155                 ("vnode_pager_generic_putpages: page %p is not read-only", m));
 1156                                 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
 1157                                     pgoff);
 1158                         }
 1159                 } else {
 1160                         maxsize = 0;
 1161                         ncount = 0;
 1162                 }
 1163                 if (ncount < count) {
 1164                         for (i = ncount; i < count; i++) {
 1165                                 rtvals[i] = VM_PAGER_BAD;
 1166                         }
 1167                 }
 1168         }
 1169         VM_OBJECT_UNLOCK(object);
 1170 
 1171         /*
 1172          * pageouts are already clustered, use IO_ASYNC t o force a bawrite()
 1173          * rather then a bdwrite() to prevent paging I/O from saturating 
 1174          * the buffer cache.  Dummy-up the sequential heuristic to cause
 1175          * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
 1176          * the system decides how to cluster.
 1177          */
 1178         ioflags = IO_VMIO;
 1179         if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
 1180                 ioflags |= IO_SYNC;
 1181         else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
 1182                 ioflags |= IO_ASYNC;
 1183         ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
 1184         ioflags |= IO_SEQMAX << IO_SEQSHIFT;
 1185 
 1186         aiov.iov_base = (caddr_t) 0;
 1187         aiov.iov_len = maxsize;
 1188         auio.uio_iov = &aiov;
 1189         auio.uio_iovcnt = 1;
 1190         auio.uio_offset = poffset;
 1191         auio.uio_segflg = UIO_NOCOPY;
 1192         auio.uio_rw = UIO_WRITE;
 1193         auio.uio_resid = maxsize;
 1194         auio.uio_td = (struct thread *) 0;
 1195         error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
 1196         PCPU_INC(cnt.v_vnodeout);
 1197         PCPU_ADD(cnt.v_vnodepgsout, ncount);
 1198 
 1199         if (error) {
 1200                 if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
 1201                         printf("vnode_pager_putpages: I/O error %d\n", error);
 1202         }
 1203         if (auio.uio_resid) {
 1204                 if (ppscheck || ppsratecheck(&lastfail, &curfail, 1))
 1205                         printf("vnode_pager_putpages: residual I/O %zd at %lu\n",
 1206                             auio.uio_resid, (u_long)ma[0]->pindex);
 1207         }
 1208         for (i = 0; i < ncount; i++) {
 1209                 rtvals[i] = VM_PAGER_OK;
 1210         }
 1211         return rtvals[0];
 1212 }
 1213 
 1214 void
 1215 vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
 1216 {
 1217         vm_object_t obj;
 1218         int i, pos;
 1219 
 1220         if (written == 0)
 1221                 return;
 1222         obj = ma[0]->object;
 1223         VM_OBJECT_LOCK(obj);
 1224         for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
 1225                 if (pos < trunc_page(written)) {
 1226                         rtvals[i] = VM_PAGER_OK;
 1227                         vm_page_undirty(ma[i]);
 1228                 } else {
 1229                         /* Partially written page. */
 1230                         rtvals[i] = VM_PAGER_AGAIN;
 1231                         vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
 1232                 }
 1233         }
 1234         VM_OBJECT_UNLOCK(obj);
 1235 }
 1236 
 1237 void
 1238 vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
 1239     vm_offset_t end)
 1240 {
 1241         struct vnode *vp;
 1242         vm_ooffset_t old_wm;
 1243 
 1244         VM_OBJECT_LOCK(object);
 1245         if (object->type != OBJT_VNODE) {
 1246                 VM_OBJECT_UNLOCK(object);
 1247                 return;
 1248         }
 1249         old_wm = object->un_pager.vnp.writemappings;
 1250         object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
 1251         vp = object->handle;
 1252         if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
 1253                 ASSERT_VOP_ELOCKED(vp, "v_writecount inc");
 1254                 VOP_ADD_WRITECOUNT(vp, 1);
 1255         } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
 1256                 ASSERT_VOP_ELOCKED(vp, "v_writecount dec");
 1257                 VOP_ADD_WRITECOUNT(vp, -1);
 1258         }
 1259         VM_OBJECT_UNLOCK(object);
 1260 }
 1261 
 1262 void
 1263 vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
 1264     vm_offset_t end)
 1265 {
 1266         struct vnode *vp;
 1267         struct mount *mp;
 1268         vm_offset_t inc;
 1269         int vfslocked;
 1270 
 1271         VM_OBJECT_LOCK(object);
 1272 
 1273         /*
 1274          * First, recheck the object type to account for the race when
 1275          * the vnode is reclaimed.
 1276          */
 1277         if (object->type != OBJT_VNODE) {
 1278                 VM_OBJECT_UNLOCK(object);
 1279                 return;
 1280         }
 1281 
 1282         /*
 1283          * Optimize for the case when writemappings is not going to
 1284          * zero.
 1285          */
 1286         inc = end - start;
 1287         if (object->un_pager.vnp.writemappings != inc) {
 1288                 object->un_pager.vnp.writemappings -= inc;
 1289                 VM_OBJECT_UNLOCK(object);
 1290                 return;
 1291         }
 1292 
 1293         vp = object->handle;
 1294         vhold(vp);
 1295         VM_OBJECT_UNLOCK(object);
 1296         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 1297         mp = NULL;
 1298         vn_start_write(vp, &mp, V_WAIT);
 1299         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 1300 
 1301         /*
 1302          * Decrement the object's writemappings, by swapping the start
 1303          * and end arguments for vnode_pager_update_writecount().  If
 1304          * there was not a race with vnode reclaimation, then the
 1305          * vnode's v_writecount is decremented.
 1306          */
 1307         vnode_pager_update_writecount(object, end, start);
 1308         VOP_UNLOCK(vp, 0);
 1309         vdrop(vp);
 1310         if (mp != NULL)
 1311                 vn_finished_write(mp);
 1312         VFS_UNLOCK_GIANT(vfslocked);
 1313 }

Cache object: a5e86eed88a70040c620b226ff0e54a9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.