The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vnode_pager.c

Version: -  FREEBSD  -  FREEBSD11  -  FREEBSD10  -  FREEBSD9  -  FREEBSD92  -  FREEBSD91  -  FREEBSD90  -  FREEBSD8  -  FREEBSD82  -  FREEBSD81  -  FREEBSD80  -  FREEBSD7  -  FREEBSD74  -  FREEBSD73  -  FREEBSD72  -  FREEBSD71  -  FREEBSD70  -  FREEBSD6  -  FREEBSD64  -  FREEBSD63  -  FREEBSD62  -  FREEBSD61  -  FREEBSD60  -  FREEBSD5  -  FREEBSD55  -  FREEBSD54  -  FREEBSD53  -  FREEBSD52  -  FREEBSD51  -  FREEBSD50  -  FREEBSD4  -  FREEBSD3  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 University of Utah.
    3  * Copyright (c) 1991 The Regents of the University of California.
    4  * All rights reserved.
    5  * Copyright (c) 1993, 1994 John S. Dyson
    6  * Copyright (c) 1995, David Greenman
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *      from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
   41  */
   42 
   43 /*
   44  * Page to/from files (vnodes).
   45  */
   46 
   47 /*
   48  * TODO:
   49  *      Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
   50  *      greatly re-simplify the vnode_pager.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD: stable/10/sys/vm/vnode_pager.c 291454 2015-11-29 14:44:40Z kib $");
   55 
   56 #include <sys/param.h>
   57 #include <sys/systm.h>
   58 #include <sys/proc.h>
   59 #include <sys/vnode.h>
   60 #include <sys/mount.h>
   61 #include <sys/bio.h>
   62 #include <sys/buf.h>
   63 #include <sys/vmmeter.h>
   64 #include <sys/limits.h>
   65 #include <sys/conf.h>
   66 #include <sys/rwlock.h>
   67 #include <sys/sf_buf.h>
   68 
   69 #include <machine/atomic.h>
   70 
   71 #include <vm/vm.h>
   72 #include <vm/vm_param.h>
   73 #include <vm/vm_object.h>
   74 #include <vm/vm_page.h>
   75 #include <vm/vm_pager.h>
   76 #include <vm/vm_map.h>
   77 #include <vm/vnode_pager.h>
   78 #include <vm/vm_extern.h>
   79 
   80 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
   81     daddr_t *rtaddress, int *run);
   82 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
   83 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
   84 static void vnode_pager_dealloc(vm_object_t);
   85 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
   86 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
   87 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
   88 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
   89     vm_ooffset_t, struct ucred *cred);
   90 
   91 struct pagerops vnodepagerops = {
   92         .pgo_alloc =    vnode_pager_alloc,
   93         .pgo_dealloc =  vnode_pager_dealloc,
   94         .pgo_getpages = vnode_pager_getpages,
   95         .pgo_putpages = vnode_pager_putpages,
   96         .pgo_haspage =  vnode_pager_haspage,
   97 };
   98 
   99 int vnode_pbuf_freecnt;
  100 
  101 /* Create the VM system backing object for this vnode */
  102 int
  103 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
  104 {
  105         vm_object_t object;
  106         vm_ooffset_t size = isize;
  107         struct vattr va;
  108 
  109         if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
  110                 return (0);
  111 
  112         while ((object = vp->v_object) != NULL) {
  113                 VM_OBJECT_WLOCK(object);
  114                 if (!(object->flags & OBJ_DEAD)) {
  115                         VM_OBJECT_WUNLOCK(object);
  116                         return (0);
  117                 }
  118                 VOP_UNLOCK(vp, 0);
  119                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  120                 VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vodead", 0);
  121                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
  122         }
  123 
  124         if (size == 0) {
  125                 if (vn_isdisk(vp, NULL)) {
  126                         size = IDX_TO_OFF(INT_MAX);
  127                 } else {
  128                         if (VOP_GETATTR(vp, &va, td->td_ucred))
  129                                 return (0);
  130                         size = va.va_size;
  131                 }
  132         }
  133 
  134         object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
  135         /*
  136          * Dereference the reference we just created.  This assumes
  137          * that the object is associated with the vp.
  138          */
  139         VM_OBJECT_WLOCK(object);
  140         object->ref_count--;
  141         VM_OBJECT_WUNLOCK(object);
  142         vrele(vp);
  143 
  144         KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
  145 
  146         return (0);
  147 }
  148 
  149 void
  150 vnode_destroy_vobject(struct vnode *vp)
  151 {
  152         struct vm_object *obj;
  153 
  154         obj = vp->v_object;
  155         if (obj == NULL)
  156                 return;
  157         ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
  158         VM_OBJECT_WLOCK(obj);
  159         if (obj->ref_count == 0) {
  160                 /*
  161                  * don't double-terminate the object
  162                  */
  163                 if ((obj->flags & OBJ_DEAD) == 0)
  164                         vm_object_terminate(obj);
  165                 else
  166                         VM_OBJECT_WUNLOCK(obj);
  167         } else {
  168                 /*
  169                  * Woe to the process that tries to page now :-).
  170                  */
  171                 vm_pager_deallocate(obj);
  172                 VM_OBJECT_WUNLOCK(obj);
  173         }
  174         vp->v_object = NULL;
  175 }
  176 
  177 
  178 /*
  179  * Allocate (or lookup) pager for a vnode.
  180  * Handle is a vnode pointer.
  181  *
  182  * MPSAFE
  183  */
  184 vm_object_t
  185 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  186     vm_ooffset_t offset, struct ucred *cred)
  187 {
  188         vm_object_t object;
  189         struct vnode *vp;
  190 
  191         /*
  192          * Pageout to vnode, no can do yet.
  193          */
  194         if (handle == NULL)
  195                 return (NULL);
  196 
  197         vp = (struct vnode *) handle;
  198 
  199         /*
  200          * If the object is being terminated, wait for it to
  201          * go away.
  202          */
  203 retry:
  204         while ((object = vp->v_object) != NULL) {
  205                 VM_OBJECT_WLOCK(object);
  206                 if ((object->flags & OBJ_DEAD) == 0)
  207                         break;
  208                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  209                 VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vadead", 0);
  210         }
  211 
  212         KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference"));
  213 
  214         if (object == NULL) {
  215                 /*
  216                  * Add an object of the appropriate size
  217                  */
  218                 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
  219 
  220                 object->un_pager.vnp.vnp_size = size;
  221                 object->un_pager.vnp.writemappings = 0;
  222 
  223                 object->handle = handle;
  224                 VI_LOCK(vp);
  225                 if (vp->v_object != NULL) {
  226                         /*
  227                          * Object has been created while we were sleeping
  228                          */
  229                         VI_UNLOCK(vp);
  230                         VM_OBJECT_WLOCK(object);
  231                         KASSERT(object->ref_count == 1,
  232                             ("leaked ref %p %d", object, object->ref_count));
  233                         object->type = OBJT_DEAD;
  234                         object->ref_count = 0;
  235                         VM_OBJECT_WUNLOCK(object);
  236                         vm_object_destroy(object);
  237                         goto retry;
  238                 }
  239                 vp->v_object = object;
  240                 VI_UNLOCK(vp);
  241         } else {
  242                 object->ref_count++;
  243                 VM_OBJECT_WUNLOCK(object);
  244         }
  245         vref(vp);
  246         return (object);
  247 }
  248 
  249 /*
  250  *      The object must be locked.
  251  */
  252 static void
  253 vnode_pager_dealloc(object)
  254         vm_object_t object;
  255 {
  256         struct vnode *vp;
  257         int refs;
  258 
  259         vp = object->handle;
  260         if (vp == NULL)
  261                 panic("vnode_pager_dealloc: pager already dealloced");
  262 
  263         VM_OBJECT_ASSERT_WLOCKED(object);
  264         vm_object_pip_wait(object, "vnpdea");
  265         refs = object->ref_count;
  266 
  267         object->handle = NULL;
  268         object->type = OBJT_DEAD;
  269         if (object->flags & OBJ_DISCONNECTWNT) {
  270                 vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
  271                 wakeup(object);
  272         }
  273         ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
  274         if (object->un_pager.vnp.writemappings > 0) {
  275                 object->un_pager.vnp.writemappings = 0;
  276                 VOP_ADD_WRITECOUNT(vp, -1);
  277                 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
  278                     __func__, vp, vp->v_writecount);
  279         }
  280         vp->v_object = NULL;
  281         VOP_UNSET_TEXT(vp);
  282         VM_OBJECT_WUNLOCK(object);
  283         while (refs-- > 0)
  284                 vunref(vp);
  285         VM_OBJECT_WLOCK(object);
  286 }
  287 
  288 static boolean_t
  289 vnode_pager_haspage(object, pindex, before, after)
  290         vm_object_t object;
  291         vm_pindex_t pindex;
  292         int *before;
  293         int *after;
  294 {
  295         struct vnode *vp = object->handle;
  296         daddr_t bn;
  297         int err;
  298         daddr_t reqblock;
  299         int poff;
  300         int bsize;
  301         int pagesperblock, blocksperpage;
  302 
  303         VM_OBJECT_ASSERT_WLOCKED(object);
  304         /*
  305          * If no vp or vp is doomed or marked transparent to VM, we do not
  306          * have the page.
  307          */
  308         if (vp == NULL || vp->v_iflag & VI_DOOMED)
  309                 return FALSE;
  310         /*
  311          * If the offset is beyond end of file we do
  312          * not have the page.
  313          */
  314         if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
  315                 return FALSE;
  316 
  317         bsize = vp->v_mount->mnt_stat.f_iosize;
  318         pagesperblock = bsize / PAGE_SIZE;
  319         blocksperpage = 0;
  320         if (pagesperblock > 0) {
  321                 reqblock = pindex / pagesperblock;
  322         } else {
  323                 blocksperpage = (PAGE_SIZE / bsize);
  324                 reqblock = pindex * blocksperpage;
  325         }
  326         VM_OBJECT_WUNLOCK(object);
  327         err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
  328         VM_OBJECT_WLOCK(object);
  329         if (err)
  330                 return TRUE;
  331         if (bn == -1)
  332                 return FALSE;
  333         if (pagesperblock > 0) {
  334                 poff = pindex - (reqblock * pagesperblock);
  335                 if (before) {
  336                         *before *= pagesperblock;
  337                         *before += poff;
  338                 }
  339                 if (after) {
  340                         int numafter;
  341                         *after *= pagesperblock;
  342                         numafter = pagesperblock - (poff + 1);
  343                         if (IDX_TO_OFF(pindex + numafter) >
  344                             object->un_pager.vnp.vnp_size) {
  345                                 numafter =
  346                                     OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
  347                                     pindex;
  348                         }
  349                         *after += numafter;
  350                 }
  351         } else {
  352                 if (before) {
  353                         *before /= blocksperpage;
  354                 }
  355 
  356                 if (after) {
  357                         *after /= blocksperpage;
  358                 }
  359         }
  360         return TRUE;
  361 }
  362 
  363 /*
  364  * Lets the VM system know about a change in size for a file.
  365  * We adjust our own internal size and flush any cached pages in
  366  * the associated object that are affected by the size change.
  367  *
  368  * Note: this routine may be invoked as a result of a pager put
  369  * operation (possibly at object termination time), so we must be careful.
  370  */
  371 void
  372 vnode_pager_setsize(vp, nsize)
  373         struct vnode *vp;
  374         vm_ooffset_t nsize;
  375 {
  376         vm_object_t object;
  377         vm_page_t m;
  378         vm_pindex_t nobjsize;
  379 
  380         if ((object = vp->v_object) == NULL)
  381                 return;
  382 /*      ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
  383         VM_OBJECT_WLOCK(object);
  384         if (object->type == OBJT_DEAD) {
  385                 VM_OBJECT_WUNLOCK(object);
  386                 return;
  387         }
  388         KASSERT(object->type == OBJT_VNODE,
  389             ("not vnode-backed object %p", object));
  390         if (nsize == object->un_pager.vnp.vnp_size) {
  391                 /*
  392                  * Hasn't changed size
  393                  */
  394                 VM_OBJECT_WUNLOCK(object);
  395                 return;
  396         }
  397         nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
  398         if (nsize < object->un_pager.vnp.vnp_size) {
  399                 /*
  400                  * File has shrunk. Toss any cached pages beyond the new EOF.
  401                  */
  402                 if (nobjsize < object->size)
  403                         vm_object_page_remove(object, nobjsize, object->size,
  404                             0);
  405                 /*
  406                  * this gets rid of garbage at the end of a page that is now
  407                  * only partially backed by the vnode.
  408                  *
  409                  * XXX for some reason (I don't know yet), if we take a
  410                  * completely invalid page and mark it partially valid
  411                  * it can screw up NFS reads, so we don't allow the case.
  412                  */
  413                 if ((nsize & PAGE_MASK) &&
  414                     (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL &&
  415                     m->valid != 0) {
  416                         int base = (int)nsize & PAGE_MASK;
  417                         int size = PAGE_SIZE - base;
  418 
  419                         /*
  420                          * Clear out partial-page garbage in case
  421                          * the page has been mapped.
  422                          */
  423                         pmap_zero_page_area(m, base, size);
  424 
  425                         /*
  426                          * Update the valid bits to reflect the blocks that
  427                          * have been zeroed.  Some of these valid bits may
  428                          * have already been set.
  429                          */
  430                         vm_page_set_valid_range(m, base, size);
  431 
  432                         /*
  433                          * Round "base" to the next block boundary so that the
  434                          * dirty bit for a partially zeroed block is not
  435                          * cleared.
  436                          */
  437                         base = roundup2(base, DEV_BSIZE);
  438 
  439                         /*
  440                          * Clear out partial-page dirty bits.
  441                          *
  442                          * note that we do not clear out the valid
  443                          * bits.  This would prevent bogus_page
  444                          * replacement from working properly.
  445                          */
  446                         vm_page_clear_dirty(m, base, PAGE_SIZE - base);
  447                 } else if ((nsize & PAGE_MASK) &&
  448                     vm_page_is_cached(object, OFF_TO_IDX(nsize))) {
  449                         vm_page_cache_free(object, OFF_TO_IDX(nsize),
  450                             nobjsize);
  451                 }
  452         }
  453         object->un_pager.vnp.vnp_size = nsize;
  454         object->size = nobjsize;
  455         VM_OBJECT_WUNLOCK(object);
  456 }
  457 
  458 /*
  459  * calculate the linear (byte) disk address of specified virtual
  460  * file address
  461  */
  462 static int
  463 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
  464     int *run)
  465 {
  466         int bsize;
  467         int err;
  468         daddr_t vblock;
  469         daddr_t voffset;
  470 
  471         if (address < 0)
  472                 return -1;
  473 
  474         if (vp->v_iflag & VI_DOOMED)
  475                 return -1;
  476 
  477         bsize = vp->v_mount->mnt_stat.f_iosize;
  478         vblock = address / bsize;
  479         voffset = address % bsize;
  480 
  481         err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL);
  482         if (err == 0) {
  483                 if (*rtaddress != -1)
  484                         *rtaddress += voffset / DEV_BSIZE;
  485                 if (run) {
  486                         *run += 1;
  487                         *run *= bsize/PAGE_SIZE;
  488                         *run -= voffset/PAGE_SIZE;
  489                 }
  490         }
  491 
  492         return (err);
  493 }
  494 
  495 /*
  496  * small block filesystem vnode pager input
  497  */
  498 static int
  499 vnode_pager_input_smlfs(object, m)
  500         vm_object_t object;
  501         vm_page_t m;
  502 {
  503         struct vnode *vp;
  504         struct bufobj *bo;
  505         struct buf *bp;
  506         struct sf_buf *sf;
  507         daddr_t fileaddr;
  508         vm_offset_t bsize;
  509         vm_page_bits_t bits;
  510         int error, i;
  511 
  512         error = 0;
  513         vp = object->handle;
  514         if (vp->v_iflag & VI_DOOMED)
  515                 return VM_PAGER_BAD;
  516 
  517         bsize = vp->v_mount->mnt_stat.f_iosize;
  518 
  519         VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
  520 
  521         sf = sf_buf_alloc(m, 0);
  522 
  523         for (i = 0; i < PAGE_SIZE / bsize; i++) {
  524                 vm_ooffset_t address;
  525 
  526                 bits = vm_page_bits(i * bsize, bsize);
  527                 if (m->valid & bits)
  528                         continue;
  529 
  530                 address = IDX_TO_OFF(m->pindex) + i * bsize;
  531                 if (address >= object->un_pager.vnp.vnp_size) {
  532                         fileaddr = -1;
  533                 } else {
  534                         error = vnode_pager_addr(vp, address, &fileaddr, NULL);
  535                         if (error)
  536                                 break;
  537                 }
  538                 if (fileaddr != -1) {
  539                         bp = getpbuf(&vnode_pbuf_freecnt);
  540 
  541                         /* build a minimal buffer header */
  542                         bp->b_iocmd = BIO_READ;
  543                         bp->b_iodone = bdone;
  544                         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  545                         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  546                         bp->b_rcred = crhold(curthread->td_ucred);
  547                         bp->b_wcred = crhold(curthread->td_ucred);
  548                         bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
  549                         bp->b_blkno = fileaddr;
  550                         pbgetbo(bo, bp);
  551                         bp->b_vp = vp;
  552                         bp->b_bcount = bsize;
  553                         bp->b_bufsize = bsize;
  554                         bp->b_runningbufspace = bp->b_bufsize;
  555                         atomic_add_long(&runningbufspace, bp->b_runningbufspace);
  556 
  557                         /* do the input */
  558                         bp->b_iooffset = dbtob(bp->b_blkno);
  559                         bstrategy(bp);
  560 
  561                         bwait(bp, PVM, "vnsrd");
  562 
  563                         if ((bp->b_ioflags & BIO_ERROR) != 0)
  564                                 error = EIO;
  565 
  566                         /*
  567                          * free the buffer header back to the swap buffer pool
  568                          */
  569                         bp->b_vp = NULL;
  570                         pbrelbo(bp);
  571                         relpbuf(bp, &vnode_pbuf_freecnt);
  572                         if (error)
  573                                 break;
  574                 } else
  575                         bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
  576                 KASSERT((m->dirty & bits) == 0,
  577                     ("vnode_pager_input_smlfs: page %p is dirty", m));
  578                 VM_OBJECT_WLOCK(object);
  579                 m->valid |= bits;
  580                 VM_OBJECT_WUNLOCK(object);
  581         }
  582         sf_buf_free(sf);
  583         if (error) {
  584                 return VM_PAGER_ERROR;
  585         }
  586         return VM_PAGER_OK;
  587 }
  588 
  589 /*
  590  * old style vnode pager input routine
  591  */
  592 static int
  593 vnode_pager_input_old(object, m)
  594         vm_object_t object;
  595         vm_page_t m;
  596 {
  597         struct uio auio;
  598         struct iovec aiov;
  599         int error;
  600         int size;
  601         struct sf_buf *sf;
  602         struct vnode *vp;
  603 
  604         VM_OBJECT_ASSERT_WLOCKED(object);
  605         error = 0;
  606 
  607         /*
  608          * Return failure if beyond current EOF
  609          */
  610         if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
  611                 return VM_PAGER_BAD;
  612         } else {
  613                 size = PAGE_SIZE;
  614                 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
  615                         size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
  616                 vp = object->handle;
  617                 VM_OBJECT_WUNLOCK(object);
  618 
  619                 /*
  620                  * Allocate a kernel virtual address and initialize so that
  621                  * we can use VOP_READ/WRITE routines.
  622                  */
  623                 sf = sf_buf_alloc(m, 0);
  624 
  625                 aiov.iov_base = (caddr_t)sf_buf_kva(sf);
  626                 aiov.iov_len = size;
  627                 auio.uio_iov = &aiov;
  628                 auio.uio_iovcnt = 1;
  629                 auio.uio_offset = IDX_TO_OFF(m->pindex);
  630                 auio.uio_segflg = UIO_SYSSPACE;
  631                 auio.uio_rw = UIO_READ;
  632                 auio.uio_resid = size;
  633                 auio.uio_td = curthread;
  634 
  635                 error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
  636                 if (!error) {
  637                         int count = size - auio.uio_resid;
  638 
  639                         if (count == 0)
  640                                 error = EINVAL;
  641                         else if (count != PAGE_SIZE)
  642                                 bzero((caddr_t)sf_buf_kva(sf) + count,
  643                                     PAGE_SIZE - count);
  644                 }
  645                 sf_buf_free(sf);
  646 
  647                 VM_OBJECT_WLOCK(object);
  648         }
  649         KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
  650         if (!error)
  651                 m->valid = VM_PAGE_BITS_ALL;
  652         return error ? VM_PAGER_ERROR : VM_PAGER_OK;
  653 }
  654 
  655 /*
  656  * generic vnode pager input routine
  657  */
  658 
  659 /*
  660  * Local media VFS's that do not implement their own VOP_GETPAGES
  661  * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
  662  * to implement the previous behaviour.
  663  *
  664  * All other FS's should use the bypass to get to the local media
  665  * backing vp's VOP_GETPAGES.
  666  */
  667 static int
  668 vnode_pager_getpages(object, m, count, reqpage)
  669         vm_object_t object;
  670         vm_page_t *m;
  671         int count;
  672         int reqpage;
  673 {
  674         int rtval;
  675         struct vnode *vp;
  676         int bytes = count * PAGE_SIZE;
  677 
  678         vp = object->handle;
  679         VM_OBJECT_WUNLOCK(object);
  680         rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
  681         KASSERT(rtval != EOPNOTSUPP,
  682             ("vnode_pager: FS getpages not implemented\n"));
  683         VM_OBJECT_WLOCK(object);
  684         return rtval;
  685 }
  686 
  687 /*
  688  * This is now called from local media FS's to operate against their
  689  * own vnodes if they fail to implement VOP_GETPAGES.
  690  */
  691 int
  692 vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
  693         struct vnode *vp;
  694         vm_page_t *m;
  695         int bytecount;
  696         int reqpage;
  697 {
  698         vm_object_t object;
  699         struct bufobj *bo;
  700         struct buf *bp;
  701         struct mount *mp;
  702         vm_offset_t kva;
  703         daddr_t firstaddr, reqblock;
  704         off_t foff, nextoff, tfoff, pib;
  705         int pbefore, pafter, i, size, bsize, first, last;
  706         int count, error, before, after, secmask;
  707 
  708         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
  709             ("vnode_pager_generic_getpages does not support devices"));
  710         if (vp->v_iflag & VI_DOOMED)
  711                 return (VM_PAGER_BAD);
  712 
  713         object = vp->v_object;
  714         count = bytecount / PAGE_SIZE;
  715         bsize = vp->v_mount->mnt_stat.f_iosize;
  716 
  717         /* get the UNDERLYING device for the file with VOP_BMAP() */
  718 
  719         /*
  720          * originally, we did not check for an error return value -- assuming
  721          * an fs always has a bmap entry point -- that assumption is wrong!!!
  722          */
  723         foff = IDX_TO_OFF(m[reqpage]->pindex);
  724 
  725         /*
  726          * if we can't bmap, use old VOP code
  727          */
  728         error = VOP_BMAP(vp, IDX_TO_OFF(m[reqpage]->pindex) / bsize, &bo,
  729             &reqblock, &after, &before);
  730         if (error == EOPNOTSUPP) {
  731                 VM_OBJECT_WLOCK(object);
  732                 
  733                 for (i = 0; i < count; i++)
  734                         if (i != reqpage) {
  735                                 vm_page_lock(m[i]);
  736                                 vm_page_free(m[i]);
  737                                 vm_page_unlock(m[i]);
  738                         }
  739                 PCPU_INC(cnt.v_vnodein);
  740                 PCPU_INC(cnt.v_vnodepgsin);
  741                 error = vnode_pager_input_old(object, m[reqpage]);
  742                 VM_OBJECT_WUNLOCK(object);
  743                 return (error);
  744         } else if (error != 0) {
  745                 VM_OBJECT_WLOCK(object);
  746                 for (i = 0; i < count; i++)
  747                         if (i != reqpage) {
  748                                 vm_page_lock(m[i]);
  749                                 vm_page_free(m[i]);
  750                                 vm_page_unlock(m[i]);
  751                         }
  752                 VM_OBJECT_WUNLOCK(object);
  753                 return (VM_PAGER_ERROR);
  754 
  755                 /*
  756                  * if the blocksize is smaller than a page size, then use
  757                  * special small filesystem code.  NFS sometimes has a small
  758                  * blocksize, but it can handle large reads itself.
  759                  */
  760         } else if ((PAGE_SIZE / bsize) > 1 &&
  761             (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
  762                 VM_OBJECT_WLOCK(object);
  763                 for (i = 0; i < count; i++)
  764                         if (i != reqpage) {
  765                                 vm_page_lock(m[i]);
  766                                 vm_page_free(m[i]);
  767                                 vm_page_unlock(m[i]);
  768                         }
  769                 VM_OBJECT_WUNLOCK(object);
  770                 PCPU_INC(cnt.v_vnodein);
  771                 PCPU_INC(cnt.v_vnodepgsin);
  772                 return (vnode_pager_input_smlfs(object, m[reqpage]));
  773         }
  774 
  775         /*
  776          * If we have a completely valid page available to us, we can
  777          * clean up and return.  Otherwise we have to re-read the
  778          * media.
  779          */
  780         VM_OBJECT_WLOCK(object);
  781         if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
  782                 for (i = 0; i < count; i++)
  783                         if (i != reqpage) {
  784                                 vm_page_lock(m[i]);
  785                                 vm_page_free(m[i]);
  786                                 vm_page_unlock(m[i]);
  787                         }
  788                 VM_OBJECT_WUNLOCK(object);
  789                 return VM_PAGER_OK;
  790         } else if (reqblock == -1) {
  791                 pmap_zero_page(m[reqpage]);
  792                 KASSERT(m[reqpage]->dirty == 0,
  793                     ("vnode_pager_generic_getpages: page %p is dirty", m));
  794                 m[reqpage]->valid = VM_PAGE_BITS_ALL;
  795                 for (i = 0; i < count; i++)
  796                         if (i != reqpage) {
  797                                 vm_page_lock(m[i]);
  798                                 vm_page_free(m[i]);
  799                                 vm_page_unlock(m[i]);
  800                         }
  801                 VM_OBJECT_WUNLOCK(object);
  802                 return (VM_PAGER_OK);
  803         }
  804         m[reqpage]->valid = 0;
  805         VM_OBJECT_WUNLOCK(object);
  806 
  807         pib = IDX_TO_OFF(m[reqpage]->pindex) % bsize;
  808         pbefore = ((daddr_t)before * bsize + pib) / PAGE_SIZE;
  809         pafter = ((daddr_t)(after + 1) * bsize - pib) / PAGE_SIZE - 1;
  810         first = reqpage < pbefore ? 0 : reqpage - pbefore;
  811         last = reqpage + pafter >= count ? count - 1 : reqpage + pafter;
  812         if (first > 0 || last + 1 < count) {
  813                 VM_OBJECT_WLOCK(object);
  814                 for (i = 0; i < first; i++) {
  815                         vm_page_lock(m[i]);
  816                         vm_page_free(m[i]);
  817                         vm_page_unlock(m[i]);
  818                 }
  819                 for (i = last + 1; i < count; i++) {
  820                         vm_page_lock(m[i]);
  821                         vm_page_free(m[i]);
  822                         vm_page_unlock(m[i]);
  823                 }
  824                 VM_OBJECT_WUNLOCK(object);
  825         }
  826 
  827         /*
  828          * here on direct device I/O
  829          */
  830         firstaddr = reqblock;
  831         firstaddr += pib / DEV_BSIZE;
  832         firstaddr -= IDX_TO_OFF(reqpage - first) / DEV_BSIZE;
  833 
  834         /*
  835          * The first and last page have been calculated now, move
  836          * input pages to be zero based, and adjust the count.
  837          */
  838         m += first;
  839         reqpage -= first;
  840         count = last - first + 1;
  841 
  842         /*
  843          * calculate the file virtual address for the transfer
  844          */
  845         foff = IDX_TO_OFF(m[0]->pindex);
  846 
  847         /*
  848          * calculate the size of the transfer
  849          */
  850         size = count * PAGE_SIZE;
  851         KASSERT(count > 0, ("zero count"));
  852         if ((foff + size) > object->un_pager.vnp.vnp_size)
  853                 size = object->un_pager.vnp.vnp_size - foff;
  854         KASSERT(size > 0, ("zero size"));
  855 
  856         /*
  857          * round up physical size for real devices.
  858          */
  859         secmask = bo->bo_bsize - 1;
  860         KASSERT(secmask < PAGE_SIZE && secmask > 0,
  861             ("vnode_pager_generic_getpages: sector size %d too large",
  862             secmask + 1));
  863         size = (size + secmask) & ~secmask;
  864 
  865         bp = getpbuf(&vnode_pbuf_freecnt);
  866         kva = (vm_offset_t)bp->b_data;
  867 
  868         /*
  869          * and map the pages to be read into the kva, if the filesystem
  870          * requires mapped buffers.
  871          */
  872         mp = vp->v_mount;
  873         if (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
  874             unmapped_buf_allowed) {
  875                 bp->b_data = unmapped_buf;
  876                 bp->b_kvabase = unmapped_buf;
  877                 bp->b_offset = 0;
  878                 bp->b_flags |= B_UNMAPPED;
  879                 bp->b_npages = count;
  880                 for (i = 0; i < count; i++)
  881                         bp->b_pages[i] = m[i];
  882         } else
  883                 pmap_qenter(kva, m, count);
  884 
  885         /* build a minimal buffer header */
  886         bp->b_iocmd = BIO_READ;
  887         bp->b_iodone = bdone;
  888         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  889         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  890         bp->b_rcred = crhold(curthread->td_ucred);
  891         bp->b_wcred = crhold(curthread->td_ucred);
  892         bp->b_blkno = firstaddr;
  893         pbgetbo(bo, bp);
  894         bp->b_vp = vp;
  895         bp->b_bcount = size;
  896         bp->b_bufsize = size;
  897         bp->b_runningbufspace = bp->b_bufsize;
  898         atomic_add_long(&runningbufspace, bp->b_runningbufspace);
  899 
  900         PCPU_INC(cnt.v_vnodein);
  901         PCPU_ADD(cnt.v_vnodepgsin, count);
  902 
  903         /* do the input */
  904         bp->b_iooffset = dbtob(bp->b_blkno);
  905         bstrategy(bp);
  906 
  907         bwait(bp, PVM, "vnread");
  908 
  909         if ((bp->b_ioflags & BIO_ERROR) != 0)
  910                 error = EIO;
  911 
  912         if (error == 0 && size != count * PAGE_SIZE) {
  913                 if ((bp->b_flags & B_UNMAPPED) != 0) {
  914                         bp->b_flags &= ~B_UNMAPPED;
  915                         pmap_qenter(kva, m, count);
  916                 }
  917                 bzero((caddr_t)kva + size, PAGE_SIZE * count - size);
  918         }
  919         if ((bp->b_flags & B_UNMAPPED) == 0)
  920                 pmap_qremove(kva, count);
  921         if (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0) {
  922                 bp->b_data = (caddr_t)kva;
  923                 bp->b_kvabase = (caddr_t)kva;
  924                 bp->b_flags &= ~B_UNMAPPED;
  925                 for (i = 0; i < count; i++)
  926                         bp->b_pages[i] = NULL;
  927         }
  928 
  929         /*
  930          * free the buffer header back to the swap buffer pool
  931          */
  932         bp->b_vp = NULL;
  933         pbrelbo(bp);
  934         relpbuf(bp, &vnode_pbuf_freecnt);
  935 
  936         VM_OBJECT_WLOCK(object);
  937         for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
  938                 vm_page_t mt;
  939 
  940                 nextoff = tfoff + PAGE_SIZE;
  941                 mt = m[i];
  942 
  943                 if (nextoff <= object->un_pager.vnp.vnp_size) {
  944                         /*
  945                          * Read filled up entire page.
  946                          */
  947                         mt->valid = VM_PAGE_BITS_ALL;
  948                         KASSERT(mt->dirty == 0,
  949                             ("vnode_pager_generic_getpages: page %p is dirty",
  950                             mt));
  951                         KASSERT(!pmap_page_is_mapped(mt),
  952                             ("vnode_pager_generic_getpages: page %p is mapped",
  953                             mt));
  954                 } else {
  955                         /*
  956                          * Read did not fill up entire page.
  957                          *
  958                          * Currently we do not set the entire page valid,
  959                          * we just try to clear the piece that we couldn't
  960                          * read.
  961                          */
  962                         vm_page_set_valid_range(mt, 0,
  963                             object->un_pager.vnp.vnp_size - tfoff);
  964                         KASSERT((mt->dirty & vm_page_bits(0,
  965                             object->un_pager.vnp.vnp_size - tfoff)) == 0,
  966                             ("vnode_pager_generic_getpages: page %p is dirty",
  967                             mt));
  968                 }
  969                 
  970                 if (i != reqpage)
  971                         vm_page_readahead_finish(mt);
  972         }
  973         VM_OBJECT_WUNLOCK(object);
  974         if (error) {
  975                 printf("vnode_pager_getpages: I/O read error\n");
  976         }
  977         return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
  978 }
  979 
  980 /*
  981  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
  982  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
  983  * vnode_pager_generic_putpages() to implement the previous behaviour.
  984  *
  985  * All other FS's should use the bypass to get to the local media
  986  * backing vp's VOP_PUTPAGES.
  987  */
  988 static void
  989 vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
  990     int flags, int *rtvals)
  991 {
  992         int rtval;
  993         struct vnode *vp;
  994         int bytes = count * PAGE_SIZE;
  995 
  996         /*
  997          * Force synchronous operation if we are extremely low on memory
  998          * to prevent a low-memory deadlock.  VOP operations often need to
  999          * allocate more memory to initiate the I/O ( i.e. do a BMAP
 1000          * operation ).  The swapper handles the case by limiting the amount
 1001          * of asynchronous I/O, but that sort of solution doesn't scale well
 1002          * for the vnode pager without a lot of work.
 1003          *
 1004          * Also, the backing vnode's iodone routine may not wake the pageout
 1005          * daemon up.  This should be probably be addressed XXX.
 1006          */
 1007 
 1008         if (cnt.v_free_count + cnt.v_cache_count < cnt.v_pageout_free_min)
 1009                 flags |= VM_PAGER_PUT_SYNC;
 1010 
 1011         /*
 1012          * Call device-specific putpages function
 1013          */
 1014         vp = object->handle;
 1015         VM_OBJECT_WUNLOCK(object);
 1016         rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals, 0);
 1017         KASSERT(rtval != EOPNOTSUPP, 
 1018             ("vnode_pager: stale FS putpages\n"));
 1019         VM_OBJECT_WLOCK(object);
 1020 }
 1021 
 1022 
 1023 /*
 1024  * This is now called from local media FS's to operate against their
 1025  * own vnodes if they fail to implement VOP_PUTPAGES.
 1026  *
 1027  * This is typically called indirectly via the pageout daemon and
 1028  * clustering has already typically occured, so in general we ask the
 1029  * underlying filesystem to write the data out asynchronously rather
 1030  * then delayed.
 1031  */
 1032 int
 1033 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
 1034     int flags, int *rtvals)
 1035 {
 1036         int i;
 1037         vm_object_t object;
 1038         vm_page_t m;
 1039         int count;
 1040 
 1041         int maxsize, ncount;
 1042         vm_ooffset_t poffset;
 1043         struct uio auio;
 1044         struct iovec aiov;
 1045         int error;
 1046         int ioflags;
 1047         int ppscheck = 0;
 1048         static struct timeval lastfail;
 1049         static int curfail;
 1050 
 1051         object = vp->v_object;
 1052         count = bytecount / PAGE_SIZE;
 1053 
 1054         for (i = 0; i < count; i++)
 1055                 rtvals[i] = VM_PAGER_ERROR;
 1056 
 1057         if ((int64_t)ma[0]->pindex < 0) {
 1058                 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
 1059                     (long)ma[0]->pindex, (u_long)ma[0]->dirty);
 1060                 rtvals[0] = VM_PAGER_BAD;
 1061                 return VM_PAGER_BAD;
 1062         }
 1063 
 1064         maxsize = count * PAGE_SIZE;
 1065         ncount = count;
 1066 
 1067         poffset = IDX_TO_OFF(ma[0]->pindex);
 1068 
 1069         /*
 1070          * If the page-aligned write is larger then the actual file we
 1071          * have to invalidate pages occuring beyond the file EOF.  However,
 1072          * there is an edge case where a file may not be page-aligned where
 1073          * the last page is partially invalid.  In this case the filesystem
 1074          * may not properly clear the dirty bits for the entire page (which
 1075          * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
 1076          * With the page locked we are free to fix-up the dirty bits here.
 1077          *
 1078          * We do not under any circumstances truncate the valid bits, as
 1079          * this will screw up bogus page replacement.
 1080          */
 1081         VM_OBJECT_WLOCK(object);
 1082         if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
 1083                 if (object->un_pager.vnp.vnp_size > poffset) {
 1084                         int pgoff;
 1085 
 1086                         maxsize = object->un_pager.vnp.vnp_size - poffset;
 1087                         ncount = btoc(maxsize);
 1088                         if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
 1089                                 /*
 1090                                  * If the object is locked and the following
 1091                                  * conditions hold, then the page's dirty
 1092                                  * field cannot be concurrently changed by a
 1093                                  * pmap operation.
 1094                                  */
 1095                                 m = ma[ncount - 1];
 1096                                 vm_page_assert_sbusied(m);
 1097                                 KASSERT(!pmap_page_is_write_mapped(m),
 1098                 ("vnode_pager_generic_putpages: page %p is not read-only", m));
 1099                                 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
 1100                                     pgoff);
 1101                         }
 1102                 } else {
 1103                         maxsize = 0;
 1104                         ncount = 0;
 1105                 }
 1106                 if (ncount < count) {
 1107                         for (i = ncount; i < count; i++) {
 1108                                 rtvals[i] = VM_PAGER_BAD;
 1109                         }
 1110                 }
 1111         }
 1112         VM_OBJECT_WUNLOCK(object);
 1113 
 1114         /*
 1115          * pageouts are already clustered, use IO_ASYNC to force a bawrite()
 1116          * rather then a bdwrite() to prevent paging I/O from saturating 
 1117          * the buffer cache.  Dummy-up the sequential heuristic to cause
 1118          * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
 1119          * the system decides how to cluster.
 1120          */
 1121         ioflags = IO_VMIO;
 1122         if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
 1123                 ioflags |= IO_SYNC;
 1124         else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
 1125                 ioflags |= IO_ASYNC;
 1126         ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
 1127         ioflags |= IO_SEQMAX << IO_SEQSHIFT;
 1128 
 1129         aiov.iov_base = (caddr_t) 0;
 1130         aiov.iov_len = maxsize;
 1131         auio.uio_iov = &aiov;
 1132         auio.uio_iovcnt = 1;
 1133         auio.uio_offset = poffset;
 1134         auio.uio_segflg = UIO_NOCOPY;
 1135         auio.uio_rw = UIO_WRITE;
 1136         auio.uio_resid = maxsize;
 1137         auio.uio_td = (struct thread *) 0;
 1138         error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
 1139         PCPU_INC(cnt.v_vnodeout);
 1140         PCPU_ADD(cnt.v_vnodepgsout, ncount);
 1141 
 1142         if (error) {
 1143                 if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
 1144                         printf("vnode_pager_putpages: I/O error %d\n", error);
 1145         }
 1146         if (auio.uio_resid) {
 1147                 if (ppscheck || ppsratecheck(&lastfail, &curfail, 1))
 1148                         printf("vnode_pager_putpages: residual I/O %zd at %lu\n",
 1149                             auio.uio_resid, (u_long)ma[0]->pindex);
 1150         }
 1151         for (i = 0; i < ncount; i++) {
 1152                 rtvals[i] = VM_PAGER_OK;
 1153         }
 1154         return rtvals[0];
 1155 }
 1156 
 1157 void
 1158 vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
 1159 {
 1160         vm_object_t obj;
 1161         int i, pos;
 1162 
 1163         if (written == 0)
 1164                 return;
 1165         obj = ma[0]->object;
 1166         VM_OBJECT_WLOCK(obj);
 1167         for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
 1168                 if (pos < trunc_page(written)) {
 1169                         rtvals[i] = VM_PAGER_OK;
 1170                         vm_page_undirty(ma[i]);
 1171                 } else {
 1172                         /* Partially written page. */
 1173                         rtvals[i] = VM_PAGER_AGAIN;
 1174                         vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
 1175                 }
 1176         }
 1177         VM_OBJECT_WUNLOCK(obj);
 1178 }
 1179 
 1180 void
 1181 vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
 1182     vm_offset_t end)
 1183 {
 1184         struct vnode *vp;
 1185         vm_ooffset_t old_wm;
 1186 
 1187         VM_OBJECT_WLOCK(object);
 1188         if (object->type != OBJT_VNODE) {
 1189                 VM_OBJECT_WUNLOCK(object);
 1190                 return;
 1191         }
 1192         old_wm = object->un_pager.vnp.writemappings;
 1193         object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
 1194         vp = object->handle;
 1195         if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
 1196                 ASSERT_VOP_ELOCKED(vp, "v_writecount inc");
 1197                 VOP_ADD_WRITECOUNT(vp, 1);
 1198                 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
 1199                     __func__, vp, vp->v_writecount);
 1200         } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
 1201                 ASSERT_VOP_ELOCKED(vp, "v_writecount dec");
 1202                 VOP_ADD_WRITECOUNT(vp, -1);
 1203                 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
 1204                     __func__, vp, vp->v_writecount);
 1205         }
 1206         VM_OBJECT_WUNLOCK(object);
 1207 }
 1208 
 1209 void
 1210 vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
 1211     vm_offset_t end)
 1212 {
 1213         struct vnode *vp;
 1214         struct mount *mp;
 1215         vm_offset_t inc;
 1216 
 1217         VM_OBJECT_WLOCK(object);
 1218 
 1219         /*
 1220          * First, recheck the object type to account for the race when
 1221          * the vnode is reclaimed.
 1222          */
 1223         if (object->type != OBJT_VNODE) {
 1224                 VM_OBJECT_WUNLOCK(object);
 1225                 return;
 1226         }
 1227 
 1228         /*
 1229          * Optimize for the case when writemappings is not going to
 1230          * zero.
 1231          */
 1232         inc = end - start;
 1233         if (object->un_pager.vnp.writemappings != inc) {
 1234                 object->un_pager.vnp.writemappings -= inc;
 1235                 VM_OBJECT_WUNLOCK(object);
 1236                 return;
 1237         }
 1238 
 1239         vp = object->handle;
 1240         vhold(vp);
 1241         VM_OBJECT_WUNLOCK(object);
 1242         mp = NULL;
 1243         vn_start_write(vp, &mp, V_WAIT);
 1244         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 1245 
 1246         /*
 1247          * Decrement the object's writemappings, by swapping the start
 1248          * and end arguments for vnode_pager_update_writecount().  If
 1249          * there was not a race with vnode reclaimation, then the
 1250          * vnode's v_writecount is decremented.
 1251          */
 1252         vnode_pager_update_writecount(object, end, start);
 1253         VOP_UNLOCK(vp, 0);
 1254         vdrop(vp);
 1255         if (mp != NULL)
 1256                 vn_finished_write(mp);
 1257 }

Cache object: e72706fee7a1374599cf4377267d4965


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.