The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vnode_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 University of Utah.
    3  * Copyright (c) 1991 The Regents of the University of California.
    4  * All rights reserved.
    5  * Copyright (c) 1993, 1994 John S. Dyson
    6  * Copyright (c) 1995, David Greenman
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *      from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
   41  */
   42 
   43 /*
   44  * Page to/from files (vnodes).
   45  */
   46 
   47 /*
   48  * TODO:
   49  *      Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
   50  *      greatly re-simplify the vnode_pager.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD: releng/6.2/sys/vm/vnode_pager.c 163340 2006-10-14 06:04:32Z alc $");
   55 
   56 #include <sys/param.h>
   57 #include <sys/systm.h>
   58 #include <sys/proc.h>
   59 #include <sys/vnode.h>
   60 #include <sys/mount.h>
   61 #include <sys/bio.h>
   62 #include <sys/buf.h>
   63 #include <sys/vmmeter.h>
   64 #include <sys/limits.h>
   65 #include <sys/conf.h>
   66 #include <sys/sf_buf.h>
   67 
   68 #include <machine/atomic.h>
   69 
   70 #include <vm/vm.h>
   71 #include <vm/vm_object.h>
   72 #include <vm/vm_page.h>
   73 #include <vm/vm_pager.h>
   74 #include <vm/vm_map.h>
   75 #include <vm/vnode_pager.h>
   76 #include <vm/vm_extern.h>
   77 
   78 static daddr_t vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
   79                                          int *run);
   80 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
   81 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
   82 static void vnode_pager_dealloc(vm_object_t);
   83 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
   84 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
   85 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
   86 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
   87 
   88 struct pagerops vnodepagerops = {
   89         .pgo_alloc =    vnode_pager_alloc,
   90         .pgo_dealloc =  vnode_pager_dealloc,
   91         .pgo_getpages = vnode_pager_getpages,
   92         .pgo_putpages = vnode_pager_putpages,
   93         .pgo_haspage =  vnode_pager_haspage,
   94 };
   95 
   96 int vnode_pbuf_freecnt;
   97 
   98 /*
   99  * Compatibility function for RELENG_6, in which vnode_create_vobject()
  100  * takes file size as size_t due to an oversight.  The type may not just
  101  * change to off_t because the ABI to 3rd party modules must be preserved
  102  * for RELENG_6 lifetime.
  103  */
  104 int
  105 vnode_create_vobject(struct vnode *vp, size_t isize __unused, struct thread *td)
  106 {
  107 
  108         /*
  109          * Size of 0 will indicate to vnode_create_vobject_off()
  110          * VOP_GETATTR() is to be called to get the actual size.
  111          */
  112         return (vnode_create_vobject_off(vp, 0, td));
  113 }
  114 
  115 /*
  116  * Create the VM system backing object for this vnode -- for RELENG_6 only.
  117  * In HEAD, vnode_create_vobject() has been fixed to take file size as off_t
  118  * and so it can be used as is.
  119  */
  120 int
  121 vnode_create_vobject_off(struct vnode *vp, off_t isize, struct thread *td)
  122 {
  123         vm_object_t object;
  124         vm_ooffset_t size = isize;
  125         struct vattr va;
  126 
  127         if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
  128                 return (0);
  129 
  130         while ((object = vp->v_object) != NULL) {
  131                 VM_OBJECT_LOCK(object);
  132                 if (!(object->flags & OBJ_DEAD)) {
  133                         VM_OBJECT_UNLOCK(object);
  134                         return (0);
  135                 }
  136                 VOP_UNLOCK(vp, 0, td);
  137                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  138                 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0);
  139                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
  140         }
  141 
  142         if (size == 0) {
  143                 if (vn_isdisk(vp, NULL)) {
  144                         size = IDX_TO_OFF(INT_MAX);
  145                 } else {
  146                         if (VOP_GETATTR(vp, &va, td->td_ucred, td) != 0)
  147                                 return (0);
  148                         size = va.va_size;
  149                 }
  150         }
  151 
  152         object = vnode_pager_alloc(vp, size, 0, 0);
  153         /*
  154          * Dereference the reference we just created.  This assumes
  155          * that the object is associated with the vp.
  156          */
  157         VM_OBJECT_LOCK(object);
  158         object->ref_count--;
  159         VM_OBJECT_UNLOCK(object);
  160         vrele(vp);
  161 
  162         KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
  163 
  164         return (0);
  165 }
  166 
  167 void
  168 vnode_destroy_vobject(struct vnode *vp)
  169 {
  170         struct vm_object *obj;
  171 
  172         obj = vp->v_object;
  173         if (obj == NULL)
  174                 return;
  175         ASSERT_VOP_LOCKED(vp, "vnode_destroy_vobject");
  176         VM_OBJECT_LOCK(obj);
  177         if (obj->ref_count == 0) {
  178                 /*
  179                  * vclean() may be called twice. The first time
  180                  * removes the primary reference to the object,
  181                  * the second time goes one further and is a
  182                  * special-case to terminate the object.
  183                  *
  184                  * don't double-terminate the object
  185                  */
  186                 if ((obj->flags & OBJ_DEAD) == 0)
  187                         vm_object_terminate(obj);
  188                 else
  189                         VM_OBJECT_UNLOCK(obj);
  190         } else {
  191                 /*
  192                  * Woe to the process that tries to page now :-).
  193                  */
  194                 vm_pager_deallocate(obj);
  195                 VM_OBJECT_UNLOCK(obj);
  196         }
  197         vp->v_object = NULL;
  198 }
  199 
  200 
  201 /*
  202  * Allocate (or lookup) pager for a vnode.
  203  * Handle is a vnode pointer.
  204  *
  205  * MPSAFE
  206  */
  207 vm_object_t
  208 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  209                   vm_ooffset_t offset)
  210 {
  211         vm_object_t object;
  212         struct vnode *vp;
  213 
  214         /*
  215          * Pageout to vnode, no can do yet.
  216          */
  217         if (handle == NULL)
  218                 return (NULL);
  219 
  220         vp = (struct vnode *) handle;
  221 
  222         ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc");
  223 
  224         /*
  225          * If the object is being terminated, wait for it to
  226          * go away.
  227          */
  228         while ((object = vp->v_object) != NULL) {
  229                 VM_OBJECT_LOCK(object);
  230                 if ((object->flags & OBJ_DEAD) == 0)
  231                         break;
  232                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  233                 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0);
  234         }
  235 
  236         if (vp->v_usecount == 0)
  237                 panic("vnode_pager_alloc: no vnode reference");
  238 
  239         if (object == NULL) {
  240                 /*
  241                  * And an object of the appropriate size
  242                  */
  243                 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
  244 
  245                 object->un_pager.vnp.vnp_size = size;
  246 
  247                 object->handle = handle;
  248                 if (VFS_NEEDSGIANT(vp->v_mount))
  249                         vm_object_set_flag(object, OBJ_NEEDGIANT);
  250                 vp->v_object = object;
  251         } else {
  252                 object->ref_count++;
  253                 VM_OBJECT_UNLOCK(object);
  254         }
  255         vref(vp);
  256         return (object);
  257 }
  258 
  259 /*
  260  *      The object must be locked.
  261  */
  262 static void
  263 vnode_pager_dealloc(object)
  264         vm_object_t object;
  265 {
  266         struct vnode *vp = object->handle;
  267 
  268         if (vp == NULL)
  269                 panic("vnode_pager_dealloc: pager already dealloced");
  270 
  271         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  272         vm_object_pip_wait(object, "vnpdea");
  273 
  274         object->handle = NULL;
  275         object->type = OBJT_DEAD;
  276         if (object->flags & OBJ_DISCONNECTWNT) {
  277                 vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
  278                 wakeup(object);
  279         }
  280         ASSERT_VOP_LOCKED(vp, "vnode_pager_dealloc");
  281         vp->v_object = NULL;
  282         vp->v_vflag &= ~VV_TEXT;
  283 }
  284 
  285 static boolean_t
  286 vnode_pager_haspage(object, pindex, before, after)
  287         vm_object_t object;
  288         vm_pindex_t pindex;
  289         int *before;
  290         int *after;
  291 {
  292         struct vnode *vp = object->handle;
  293         daddr_t bn;
  294         int err;
  295         daddr_t reqblock;
  296         int poff;
  297         int bsize;
  298         int pagesperblock, blocksperpage;
  299         int vfslocked;
  300 
  301         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  302         /*
  303          * If no vp or vp is doomed or marked transparent to VM, we do not
  304          * have the page.
  305          */
  306         if (vp == NULL || vp->v_iflag & VI_DOOMED)
  307                 return FALSE;
  308         /*
  309          * If the offset is beyond end of file we do
  310          * not have the page.
  311          */
  312         if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
  313                 return FALSE;
  314 
  315         bsize = vp->v_mount->mnt_stat.f_iosize;
  316         pagesperblock = bsize / PAGE_SIZE;
  317         blocksperpage = 0;
  318         if (pagesperblock > 0) {
  319                 reqblock = pindex / pagesperblock;
  320         } else {
  321                 blocksperpage = (PAGE_SIZE / bsize);
  322                 reqblock = pindex * blocksperpage;
  323         }
  324         VM_OBJECT_UNLOCK(object);
  325         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  326         err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
  327         VFS_UNLOCK_GIANT(vfslocked);
  328         VM_OBJECT_LOCK(object);
  329         if (err)
  330                 return TRUE;
  331         if (bn == -1)
  332                 return FALSE;
  333         if (pagesperblock > 0) {
  334                 poff = pindex - (reqblock * pagesperblock);
  335                 if (before) {
  336                         *before *= pagesperblock;
  337                         *before += poff;
  338                 }
  339                 if (after) {
  340                         int numafter;
  341                         *after *= pagesperblock;
  342                         numafter = pagesperblock - (poff + 1);
  343                         if (IDX_TO_OFF(pindex + numafter) >
  344                             object->un_pager.vnp.vnp_size) {
  345                                 numafter =
  346                                     OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
  347                                     pindex;
  348                         }
  349                         *after += numafter;
  350                 }
  351         } else {
  352                 if (before) {
  353                         *before /= blocksperpage;
  354                 }
  355 
  356                 if (after) {
  357                         *after /= blocksperpage;
  358                 }
  359         }
  360         return TRUE;
  361 }
  362 
  363 /*
  364  * Lets the VM system know about a change in size for a file.
  365  * We adjust our own internal size and flush any cached pages in
  366  * the associated object that are affected by the size change.
  367  *
  368  * Note: this routine may be invoked as a result of a pager put
  369  * operation (possibly at object termination time), so we must be careful.
  370  */
  371 void
  372 vnode_pager_setsize(vp, nsize)
  373         struct vnode *vp;
  374         vm_ooffset_t nsize;
  375 {
  376         vm_object_t object;
  377         vm_page_t m;
  378         vm_pindex_t nobjsize;
  379 
  380         if ((object = vp->v_object) == NULL)
  381                 return;
  382         VM_OBJECT_LOCK(object);
  383         if (nsize == object->un_pager.vnp.vnp_size) {
  384                 /*
  385                  * Hasn't changed size
  386                  */
  387                 VM_OBJECT_UNLOCK(object);
  388                 return;
  389         }
  390         nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
  391         if (nsize < object->un_pager.vnp.vnp_size) {
  392                 /*
  393                  * File has shrunk. Toss any cached pages beyond the new EOF.
  394                  */
  395                 if (nobjsize < object->size)
  396                         vm_object_page_remove(object, nobjsize, object->size,
  397                             FALSE);
  398                 /*
  399                  * this gets rid of garbage at the end of a page that is now
  400                  * only partially backed by the vnode.
  401                  *
  402                  * XXX for some reason (I don't know yet), if we take a
  403                  * completely invalid page and mark it partially valid
  404                  * it can screw up NFS reads, so we don't allow the case.
  405                  */
  406                 if ((nsize & PAGE_MASK) &&
  407                     (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL &&
  408                     m->valid != 0) {
  409                         int base = (int)nsize & PAGE_MASK;
  410                         int size = PAGE_SIZE - base;
  411 
  412                         /*
  413                          * Clear out partial-page garbage in case
  414                          * the page has been mapped.
  415                          */
  416                         pmap_zero_page_area(m, base, size);
  417 
  418                         /*
  419                          * XXX work around SMP data integrity race
  420                          * by unmapping the page from user processes.
  421                          * The garbage we just cleared may be mapped
  422                          * to a user process running on another cpu
  423                          * and this code is not running through normal
  424                          * I/O channels which handle SMP issues for
  425                          * us, so unmap page to synchronize all cpus.
  426                          *
  427                          * XXX should vm_pager_unmap_page() have
  428                          * dealt with this?
  429                          */
  430                         vm_page_lock_queues();
  431                         pmap_remove_all(m);
  432 
  433                         /*
  434                          * Clear out partial-page dirty bits.  This
  435                          * has the side effect of setting the valid
  436                          * bits, but that is ok.  There are a bunch
  437                          * of places in the VM system where we expected
  438                          * m->dirty == VM_PAGE_BITS_ALL.  The file EOF
  439                          * case is one of them.  If the page is still
  440                          * partially dirty, make it fully dirty.
  441                          *
  442                          * note that we do not clear out the valid
  443                          * bits.  This would prevent bogus_page
  444                          * replacement from working properly.
  445                          */
  446                         vm_page_set_validclean(m, base, size);
  447                         if (m->dirty != 0)
  448                                 m->dirty = VM_PAGE_BITS_ALL;
  449                         vm_page_unlock_queues();
  450                 }
  451         }
  452         object->un_pager.vnp.vnp_size = nsize;
  453         object->size = nobjsize;
  454         VM_OBJECT_UNLOCK(object);
  455 }
  456 
  457 /*
  458  * calculate the linear (byte) disk address of specified virtual
  459  * file address
  460  */
  461 static daddr_t
  462 vnode_pager_addr(vp, address, run)
  463         struct vnode *vp;
  464         vm_ooffset_t address;
  465         int *run;
  466 {
  467         daddr_t rtaddress;
  468         int bsize;
  469         daddr_t block;
  470         int err;
  471         daddr_t vblock;
  472         daddr_t voffset;
  473 
  474         if (address < 0)
  475                 return -1;
  476 
  477         if (vp->v_iflag & VI_DOOMED)
  478                 return -1;
  479 
  480         bsize = vp->v_mount->mnt_stat.f_iosize;
  481         vblock = address / bsize;
  482         voffset = address % bsize;
  483 
  484         err = VOP_BMAP(vp, vblock, NULL, &block, run, NULL);
  485 
  486         if (err || (block == -1))
  487                 rtaddress = -1;
  488         else {
  489                 rtaddress = block + voffset / DEV_BSIZE;
  490                 if (run) {
  491                         *run += 1;
  492                         *run *= bsize/PAGE_SIZE;
  493                         *run -= voffset/PAGE_SIZE;
  494                 }
  495         }
  496 
  497         return rtaddress;
  498 }
  499 
  500 /*
  501  * small block filesystem vnode pager input
  502  */
  503 static int
  504 vnode_pager_input_smlfs(object, m)
  505         vm_object_t object;
  506         vm_page_t m;
  507 {
  508         int i;
  509         struct vnode *vp;
  510         struct bufobj *bo;
  511         struct buf *bp;
  512         struct sf_buf *sf;
  513         daddr_t fileaddr;
  514         vm_offset_t bsize;
  515         int error = 0;
  516 
  517         vp = object->handle;
  518         if (vp->v_iflag & VI_DOOMED)
  519                 return VM_PAGER_BAD;
  520 
  521         bsize = vp->v_mount->mnt_stat.f_iosize;
  522 
  523         VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
  524 
  525         sf = sf_buf_alloc(m, 0);
  526 
  527         for (i = 0; i < PAGE_SIZE / bsize; i++) {
  528                 vm_ooffset_t address;
  529 
  530                 if (vm_page_bits(i * bsize, bsize) & m->valid)
  531                         continue;
  532 
  533                 address = IDX_TO_OFF(m->pindex) + i * bsize;
  534                 if (address >= object->un_pager.vnp.vnp_size) {
  535                         fileaddr = -1;
  536                 } else {
  537                         fileaddr = vnode_pager_addr(vp, address, NULL);
  538                 }
  539                 if (fileaddr != -1) {
  540                         bp = getpbuf(&vnode_pbuf_freecnt);
  541 
  542                         /* build a minimal buffer header */
  543                         bp->b_iocmd = BIO_READ;
  544                         bp->b_iodone = bdone;
  545                         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  546                         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  547                         bp->b_rcred = crhold(curthread->td_ucred);
  548                         bp->b_wcred = crhold(curthread->td_ucred);
  549                         bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
  550                         bp->b_blkno = fileaddr;
  551                         pbgetbo(bo, bp);
  552                         bp->b_bcount = bsize;
  553                         bp->b_bufsize = bsize;
  554                         bp->b_runningbufspace = bp->b_bufsize;
  555                         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  556 
  557                         /* do the input */
  558                         bp->b_iooffset = dbtob(bp->b_blkno);
  559                         bstrategy(bp);
  560 
  561                         bwait(bp, PVM, "vnsrd");
  562 
  563                         if ((bp->b_ioflags & BIO_ERROR) != 0)
  564                                 error = EIO;
  565 
  566                         /*
  567                          * free the buffer header back to the swap buffer pool
  568                          */
  569                         pbrelbo(bp);
  570                         relpbuf(bp, &vnode_pbuf_freecnt);
  571                         if (error)
  572                                 break;
  573 
  574                         VM_OBJECT_LOCK(object);
  575                         vm_page_lock_queues();
  576                         vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
  577                         vm_page_unlock_queues();
  578                         VM_OBJECT_UNLOCK(object);
  579                 } else {
  580                         VM_OBJECT_LOCK(object);
  581                         vm_page_lock_queues();
  582                         vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
  583                         vm_page_unlock_queues();
  584                         VM_OBJECT_UNLOCK(object);
  585                         bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
  586                 }
  587         }
  588         sf_buf_free(sf);
  589         vm_page_lock_queues();
  590         pmap_clear_modify(m);
  591         vm_page_unlock_queues();
  592         if (error) {
  593                 return VM_PAGER_ERROR;
  594         }
  595         return VM_PAGER_OK;
  596 
  597 }
  598 
  599 
  600 /*
  601  * old style vnode pager input routine
  602  */
  603 static int
  604 vnode_pager_input_old(object, m)
  605         vm_object_t object;
  606         vm_page_t m;
  607 {
  608         struct uio auio;
  609         struct iovec aiov;
  610         int error;
  611         int size;
  612         struct sf_buf *sf;
  613         struct vnode *vp;
  614 
  615         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  616         error = 0;
  617 
  618         /*
  619          * Return failure if beyond current EOF
  620          */
  621         if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
  622                 return VM_PAGER_BAD;
  623         } else {
  624                 size = PAGE_SIZE;
  625                 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
  626                         size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
  627                 vp = object->handle;
  628                 VM_OBJECT_UNLOCK(object);
  629 
  630                 /*
  631                  * Allocate a kernel virtual address and initialize so that
  632                  * we can use VOP_READ/WRITE routines.
  633                  */
  634                 sf = sf_buf_alloc(m, 0);
  635 
  636                 aiov.iov_base = (caddr_t)sf_buf_kva(sf);
  637                 aiov.iov_len = size;
  638                 auio.uio_iov = &aiov;
  639                 auio.uio_iovcnt = 1;
  640                 auio.uio_offset = IDX_TO_OFF(m->pindex);
  641                 auio.uio_segflg = UIO_SYSSPACE;
  642                 auio.uio_rw = UIO_READ;
  643                 auio.uio_resid = size;
  644                 auio.uio_td = curthread;
  645 
  646                 error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
  647                 if (!error) {
  648                         int count = size - auio.uio_resid;
  649 
  650                         if (count == 0)
  651                                 error = EINVAL;
  652                         else if (count != PAGE_SIZE)
  653                                 bzero((caddr_t)sf_buf_kva(sf) + count,
  654                                     PAGE_SIZE - count);
  655                 }
  656                 sf_buf_free(sf);
  657 
  658                 VM_OBJECT_LOCK(object);
  659         }
  660         vm_page_lock_queues();
  661         pmap_clear_modify(m);
  662         vm_page_undirty(m);
  663         vm_page_unlock_queues();
  664         if (!error)
  665                 m->valid = VM_PAGE_BITS_ALL;
  666         return error ? VM_PAGER_ERROR : VM_PAGER_OK;
  667 }
  668 
  669 /*
  670  * generic vnode pager input routine
  671  */
  672 
  673 /*
  674  * Local media VFS's that do not implement their own VOP_GETPAGES
  675  * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
  676  * to implement the previous behaviour.
  677  *
  678  * All other FS's should use the bypass to get to the local media
  679  * backing vp's VOP_GETPAGES.
  680  */
  681 static int
  682 vnode_pager_getpages(object, m, count, reqpage)
  683         vm_object_t object;
  684         vm_page_t *m;
  685         int count;
  686         int reqpage;
  687 {
  688         int rtval;
  689         struct vnode *vp;
  690         int bytes = count * PAGE_SIZE;
  691         int vfslocked;
  692 
  693         vp = object->handle;
  694         VM_OBJECT_UNLOCK(object);
  695         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  696         rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
  697         KASSERT(rtval != EOPNOTSUPP,
  698             ("vnode_pager: FS getpages not implemented\n"));
  699         VFS_UNLOCK_GIANT(vfslocked);
  700         VM_OBJECT_LOCK(object);
  701         return rtval;
  702 }
  703 
  704 /*
  705  * This is now called from local media FS's to operate against their
  706  * own vnodes if they fail to implement VOP_GETPAGES.
  707  */
  708 int
  709 vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
  710         struct vnode *vp;
  711         vm_page_t *m;
  712         int bytecount;
  713         int reqpage;
  714 {
  715         vm_object_t object;
  716         vm_offset_t kva;
  717         off_t foff, tfoff, nextoff;
  718         int i, j, size, bsize, first;
  719         daddr_t firstaddr, reqblock;
  720         struct bufobj *bo;
  721         int runpg;
  722         int runend;
  723         struct buf *bp;
  724         int count;
  725         int error = 0;
  726 
  727         object = vp->v_object;
  728         count = bytecount / PAGE_SIZE;
  729 
  730         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
  731             ("vnode_pager_generic_getpages does not support devices"));
  732         if (vp->v_iflag & VI_DOOMED)
  733                 return VM_PAGER_BAD;
  734 
  735         bsize = vp->v_mount->mnt_stat.f_iosize;
  736 
  737         /* get the UNDERLYING device for the file with VOP_BMAP() */
  738 
  739         /*
  740          * originally, we did not check for an error return value -- assuming
  741          * an fs always has a bmap entry point -- that assumption is wrong!!!
  742          */
  743         foff = IDX_TO_OFF(m[reqpage]->pindex);
  744 
  745         /*
  746          * if we can't bmap, use old VOP code
  747          */
  748         if (VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL)) {
  749                 VM_OBJECT_LOCK(object);
  750                 vm_page_lock_queues();
  751                 for (i = 0; i < count; i++)
  752                         if (i != reqpage)
  753                                 vm_page_free(m[i]);
  754                 vm_page_unlock_queues();
  755                 cnt.v_vnodein++;
  756                 cnt.v_vnodepgsin++;
  757                 error = vnode_pager_input_old(object, m[reqpage]);
  758                 VM_OBJECT_UNLOCK(object);
  759                 return (error);
  760 
  761                 /*
  762                  * if the blocksize is smaller than a page size, then use
  763                  * special small filesystem code.  NFS sometimes has a small
  764                  * blocksize, but it can handle large reads itself.
  765                  */
  766         } else if ((PAGE_SIZE / bsize) > 1 &&
  767             (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
  768                 VM_OBJECT_LOCK(object);
  769                 vm_page_lock_queues();
  770                 for (i = 0; i < count; i++)
  771                         if (i != reqpage)
  772                                 vm_page_free(m[i]);
  773                 vm_page_unlock_queues();
  774                 VM_OBJECT_UNLOCK(object);
  775                 cnt.v_vnodein++;
  776                 cnt.v_vnodepgsin++;
  777                 return vnode_pager_input_smlfs(object, m[reqpage]);
  778         }
  779 
  780         /*
  781          * If we have a completely valid page available to us, we can
  782          * clean up and return.  Otherwise we have to re-read the
  783          * media.
  784          */
  785         VM_OBJECT_LOCK(object);
  786         if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
  787                 vm_page_lock_queues();
  788                 for (i = 0; i < count; i++)
  789                         if (i != reqpage)
  790                                 vm_page_free(m[i]);
  791                 vm_page_unlock_queues();
  792                 VM_OBJECT_UNLOCK(object);
  793                 return VM_PAGER_OK;
  794         } else if (reqblock == -1) {
  795                 pmap_zero_page(m[reqpage]);
  796                 vm_page_undirty(m[reqpage]);
  797                 m[reqpage]->valid = VM_PAGE_BITS_ALL;
  798                 vm_page_lock_queues();
  799                 for (i = 0; i < count; i++)
  800                         if (i != reqpage)
  801                                 vm_page_free(m[i]);
  802                 vm_page_unlock_queues();
  803                 VM_OBJECT_UNLOCK(object);
  804                 return (VM_PAGER_OK);
  805         }
  806         m[reqpage]->valid = 0;
  807         VM_OBJECT_UNLOCK(object);
  808 
  809         /*
  810          * here on direct device I/O
  811          */
  812         firstaddr = -1;
  813 
  814         /*
  815          * calculate the run that includes the required page
  816          */
  817         for (first = 0, i = 0; i < count; i = runend) {
  818                 firstaddr = vnode_pager_addr(vp,
  819                         IDX_TO_OFF(m[i]->pindex), &runpg);
  820                 if (firstaddr == -1) {
  821                         VM_OBJECT_LOCK(object);
  822                         if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
  823                                 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
  824                                     (intmax_t)firstaddr, (uintmax_t)(foff >> 32),
  825                                     (uintmax_t)foff,
  826                                     (uintmax_t)
  827                                     (object->un_pager.vnp.vnp_size >> 32),
  828                                     (uintmax_t)object->un_pager.vnp.vnp_size);
  829                         }
  830                         vm_page_lock_queues();
  831                         vm_page_free(m[i]);
  832                         vm_page_unlock_queues();
  833                         VM_OBJECT_UNLOCK(object);
  834                         runend = i + 1;
  835                         first = runend;
  836                         continue;
  837                 }
  838                 runend = i + runpg;
  839                 if (runend <= reqpage) {
  840                         VM_OBJECT_LOCK(object);
  841                         vm_page_lock_queues();
  842                         for (j = i; j < runend; j++)
  843                                 vm_page_free(m[j]);
  844                         vm_page_unlock_queues();
  845                         VM_OBJECT_UNLOCK(object);
  846                 } else {
  847                         if (runpg < (count - first)) {
  848                                 VM_OBJECT_LOCK(object);
  849                                 vm_page_lock_queues();
  850                                 for (i = first + runpg; i < count; i++)
  851                                         vm_page_free(m[i]);
  852                                 vm_page_unlock_queues();
  853                                 VM_OBJECT_UNLOCK(object);
  854                                 count = first + runpg;
  855                         }
  856                         break;
  857                 }
  858                 first = runend;
  859         }
  860 
  861         /*
  862          * the first and last page have been calculated now, move input pages
  863          * to be zero based...
  864          */
  865         if (first != 0) {
  866                 for (i = first; i < count; i++) {
  867                         m[i - first] = m[i];
  868                 }
  869                 count -= first;
  870                 reqpage -= first;
  871         }
  872 
  873         /*
  874          * calculate the file virtual address for the transfer
  875          */
  876         foff = IDX_TO_OFF(m[0]->pindex);
  877 
  878         /*
  879          * calculate the size of the transfer
  880          */
  881         size = count * PAGE_SIZE;
  882         KASSERT(count > 0, ("zero count"));
  883         if ((foff + size) > object->un_pager.vnp.vnp_size)
  884                 size = object->un_pager.vnp.vnp_size - foff;
  885         KASSERT(size > 0, ("zero size"));
  886 
  887         /*
  888          * round up physical size for real devices.
  889          */
  890         if (1) {
  891                 int secmask = bo->bo_bsize - 1;
  892                 KASSERT(secmask < PAGE_SIZE && secmask > 0,
  893                     ("vnode_pager_generic_getpages: sector size %d too large",
  894                     secmask + 1));
  895                 size = (size + secmask) & ~secmask;
  896         }
  897 
  898         bp = getpbuf(&vnode_pbuf_freecnt);
  899         kva = (vm_offset_t) bp->b_data;
  900 
  901         /*
  902          * and map the pages to be read into the kva
  903          */
  904         pmap_qenter(kva, m, count);
  905 
  906         /* build a minimal buffer header */
  907         bp->b_iocmd = BIO_READ;
  908         bp->b_iodone = bdone;
  909         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  910         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  911         bp->b_rcred = crhold(curthread->td_ucred);
  912         bp->b_wcred = crhold(curthread->td_ucred);
  913         bp->b_blkno = firstaddr;
  914         pbgetbo(bo, bp);
  915         bp->b_bcount = size;
  916         bp->b_bufsize = size;
  917         bp->b_runningbufspace = bp->b_bufsize;
  918         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  919 
  920         cnt.v_vnodein++;
  921         cnt.v_vnodepgsin += count;
  922 
  923         /* do the input */
  924         bp->b_iooffset = dbtob(bp->b_blkno);
  925         bstrategy(bp);
  926 
  927         bwait(bp, PVM, "vnread");
  928 
  929         if ((bp->b_ioflags & BIO_ERROR) != 0)
  930                 error = EIO;
  931 
  932         if (!error) {
  933                 if (size != count * PAGE_SIZE)
  934                         bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
  935         }
  936         pmap_qremove(kva, count);
  937 
  938         /*
  939          * free the buffer header back to the swap buffer pool
  940          */
  941         pbrelbo(bp);
  942         relpbuf(bp, &vnode_pbuf_freecnt);
  943 
  944         VM_OBJECT_LOCK(object);
  945         vm_page_lock_queues();
  946         for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
  947                 vm_page_t mt;
  948 
  949                 nextoff = tfoff + PAGE_SIZE;
  950                 mt = m[i];
  951 
  952                 if (nextoff <= object->un_pager.vnp.vnp_size) {
  953                         /*
  954                          * Read filled up entire page.
  955                          */
  956                         mt->valid = VM_PAGE_BITS_ALL;
  957                         vm_page_undirty(mt);    /* should be an assert? XXX */
  958                         pmap_clear_modify(mt);
  959                 } else {
  960                         /*
  961                          * Read did not fill up entire page.  Since this
  962                          * is getpages, the page may be mapped, so we have
  963                          * to zero the invalid portions of the page even
  964                          * though we aren't setting them valid.
  965                          *
  966                          * Currently we do not set the entire page valid,
  967                          * we just try to clear the piece that we couldn't
  968                          * read.
  969                          */
  970                         vm_page_set_validclean(mt, 0,
  971                             object->un_pager.vnp.vnp_size - tfoff);
  972                         /* handled by vm_fault now */
  973                         /* vm_page_zero_invalid(mt, FALSE); */
  974                 }
  975                 
  976                 if (i != reqpage) {
  977 
  978                         /*
  979                          * whether or not to leave the page activated is up in
  980                          * the air, but we should put the page on a page queue
  981                          * somewhere. (it already is in the object). Result:
  982                          * It appears that empirical results show that
  983                          * deactivating pages is best.
  984                          */
  985 
  986                         /*
  987                          * just in case someone was asking for this page we
  988                          * now tell them that it is ok to use
  989                          */
  990                         if (!error) {
  991                                 if (mt->flags & PG_WANTED)
  992                                         vm_page_activate(mt);
  993                                 else
  994                                         vm_page_deactivate(mt);
  995                                 vm_page_wakeup(mt);
  996                         } else {
  997                                 vm_page_free(mt);
  998                         }
  999                 }
 1000         }
 1001         vm_page_unlock_queues();
 1002         VM_OBJECT_UNLOCK(object);
 1003         if (error) {
 1004                 printf("vnode_pager_getpages: I/O read error\n");
 1005         }
 1006         return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
 1007 }
 1008 
 1009 /*
 1010  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
 1011  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
 1012  * vnode_pager_generic_putpages() to implement the previous behaviour.
 1013  *
 1014  * All other FS's should use the bypass to get to the local media
 1015  * backing vp's VOP_PUTPAGES.
 1016  */
 1017 static void
 1018 vnode_pager_putpages(object, m, count, sync, rtvals)
 1019         vm_object_t object;
 1020         vm_page_t *m;
 1021         int count;
 1022         boolean_t sync;
 1023         int *rtvals;
 1024 {
 1025         int rtval;
 1026         struct vnode *vp;
 1027         struct mount *mp;
 1028         int bytes = count * PAGE_SIZE;
 1029 
 1030         /*
 1031          * Force synchronous operation if we are extremely low on memory
 1032          * to prevent a low-memory deadlock.  VOP operations often need to
 1033          * allocate more memory to initiate the I/O ( i.e. do a BMAP 
 1034          * operation ).  The swapper handles the case by limiting the amount
 1035          * of asynchronous I/O, but that sort of solution doesn't scale well
 1036          * for the vnode pager without a lot of work.
 1037          *
 1038          * Also, the backing vnode's iodone routine may not wake the pageout
 1039          * daemon up.  This should be probably be addressed XXX.
 1040          */
 1041 
 1042         if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
 1043                 sync |= OBJPC_SYNC;
 1044 
 1045         /*
 1046          * Call device-specific putpages function
 1047          */
 1048         vp = object->handle;
 1049         VM_OBJECT_UNLOCK(object);
 1050         if (vp->v_type != VREG)
 1051                 mp = NULL;
 1052         rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
 1053         KASSERT(rtval != EOPNOTSUPP, 
 1054             ("vnode_pager: stale FS putpages\n"));
 1055         VM_OBJECT_LOCK(object);
 1056 }
 1057 
 1058 
 1059 /*
 1060  * This is now called from local media FS's to operate against their
 1061  * own vnodes if they fail to implement VOP_PUTPAGES.
 1062  *
 1063  * This is typically called indirectly via the pageout daemon and
 1064  * clustering has already typically occured, so in general we ask the
 1065  * underlying filesystem to write the data out asynchronously rather
 1066  * then delayed.
 1067  */
 1068 int
 1069 vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
 1070         struct vnode *vp;
 1071         vm_page_t *m;
 1072         int bytecount;
 1073         int flags;
 1074         int *rtvals;
 1075 {
 1076         int i;
 1077         vm_object_t object;
 1078         int count;
 1079 
 1080         int maxsize, ncount;
 1081         vm_ooffset_t poffset;
 1082         struct uio auio;
 1083         struct iovec aiov;
 1084         int error;
 1085         int ioflags;
 1086         int ppscheck = 0;
 1087         static struct timeval lastfail;
 1088         static int curfail;
 1089 
 1090         object = vp->v_object;
 1091         count = bytecount / PAGE_SIZE;
 1092 
 1093         for (i = 0; i < count; i++)
 1094                 rtvals[i] = VM_PAGER_AGAIN;
 1095 
 1096         if ((int64_t)m[0]->pindex < 0) {
 1097                 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
 1098                         (long)m[0]->pindex, (u_long)m[0]->dirty);
 1099                 rtvals[0] = VM_PAGER_BAD;
 1100                 return VM_PAGER_BAD;
 1101         }
 1102 
 1103         maxsize = count * PAGE_SIZE;
 1104         ncount = count;
 1105 
 1106         poffset = IDX_TO_OFF(m[0]->pindex);
 1107 
 1108         /*
 1109          * If the page-aligned write is larger then the actual file we
 1110          * have to invalidate pages occuring beyond the file EOF.  However,
 1111          * there is an edge case where a file may not be page-aligned where
 1112          * the last page is partially invalid.  In this case the filesystem
 1113          * may not properly clear the dirty bits for the entire page (which
 1114          * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
 1115          * With the page locked we are free to fix-up the dirty bits here.
 1116          *
 1117          * We do not under any circumstances truncate the valid bits, as
 1118          * this will screw up bogus page replacement.
 1119          */
 1120         if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
 1121                 if (object->un_pager.vnp.vnp_size > poffset) {
 1122                         int pgoff;
 1123 
 1124                         maxsize = object->un_pager.vnp.vnp_size - poffset;
 1125                         ncount = btoc(maxsize);
 1126                         if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
 1127                                 vm_page_lock_queues();
 1128                                 vm_page_clear_dirty(m[ncount - 1], pgoff,
 1129                                         PAGE_SIZE - pgoff);
 1130                                 vm_page_unlock_queues();
 1131                         }
 1132                 } else {
 1133                         maxsize = 0;
 1134                         ncount = 0;
 1135                 }
 1136                 if (ncount < count) {
 1137                         for (i = ncount; i < count; i++) {
 1138                                 rtvals[i] = VM_PAGER_BAD;
 1139                         }
 1140                 }
 1141         }
 1142 
 1143         /*
 1144          * pageouts are already clustered, use IO_ASYNC t o force a bawrite()
 1145          * rather then a bdwrite() to prevent paging I/O from saturating 
 1146          * the buffer cache.  Dummy-up the sequential heuristic to cause
 1147          * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
 1148          * the system decides how to cluster.
 1149          */
 1150         ioflags = IO_VMIO;
 1151         if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
 1152                 ioflags |= IO_SYNC;
 1153         else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
 1154                 ioflags |= IO_ASYNC;
 1155         ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
 1156         ioflags |= IO_SEQMAX << IO_SEQSHIFT;
 1157 
 1158         aiov.iov_base = (caddr_t) 0;
 1159         aiov.iov_len = maxsize;
 1160         auio.uio_iov = &aiov;
 1161         auio.uio_iovcnt = 1;
 1162         auio.uio_offset = poffset;
 1163         auio.uio_segflg = UIO_NOCOPY;
 1164         auio.uio_rw = UIO_WRITE;
 1165         auio.uio_resid = maxsize;
 1166         auio.uio_td = (struct thread *) 0;
 1167         error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
 1168         cnt.v_vnodeout++;
 1169         cnt.v_vnodepgsout += ncount;
 1170 
 1171         if (error) {
 1172                 if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
 1173                         printf("vnode_pager_putpages: I/O error %d\n", error);
 1174         }
 1175         if (auio.uio_resid) {
 1176                 if (ppscheck || ppsratecheck(&lastfail, &curfail, 1))
 1177                         printf("vnode_pager_putpages: residual I/O %d at %lu\n",
 1178                             auio.uio_resid, (u_long)m[0]->pindex);
 1179         }
 1180         for (i = 0; i < ncount; i++) {
 1181                 rtvals[i] = VM_PAGER_OK;
 1182         }
 1183         return rtvals[0];
 1184 }
 1185 
 1186 struct vnode *
 1187 vnode_pager_lock(vm_object_t first_object)
 1188 {
 1189         struct vnode *vp;
 1190         vm_object_t backing_object, object;
 1191 
 1192         VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
 1193         for (object = first_object; object != NULL; object = backing_object) {
 1194                 if (object->type != OBJT_VNODE) {
 1195                         if ((backing_object = object->backing_object) != NULL)
 1196                                 VM_OBJECT_LOCK(backing_object);
 1197                         if (object != first_object)
 1198                                 VM_OBJECT_UNLOCK(object);
 1199                         continue;
 1200                 }
 1201         retry:
 1202                 if (object->flags & OBJ_DEAD) {
 1203                         if (object != first_object)
 1204                                 VM_OBJECT_UNLOCK(object);
 1205                         return NULL;
 1206                 }
 1207                 vp = object->handle;
 1208                 VI_LOCK(vp);
 1209                 VM_OBJECT_UNLOCK(object);
 1210                 if (first_object != object)
 1211                         VM_OBJECT_UNLOCK(first_object);
 1212                 VFS_ASSERT_GIANT(vp->v_mount);
 1213                 if (vget(vp, LK_CANRECURSE | LK_INTERLOCK |
 1214                     LK_RETRY | LK_SHARED, curthread)) {
 1215                         VM_OBJECT_LOCK(first_object);
 1216                         if (object != first_object)
 1217                                 VM_OBJECT_LOCK(object);
 1218                         if (object->type != OBJT_VNODE) {
 1219                                 if (object != first_object)
 1220                                         VM_OBJECT_UNLOCK(object);
 1221                                 return NULL;
 1222                         }
 1223                         printf("vnode_pager_lock: retrying\n");
 1224                         goto retry;
 1225                 }
 1226                 VM_OBJECT_LOCK(first_object);
 1227                 return (vp);
 1228         }
 1229         return NULL;
 1230 }

Cache object: fad02ae144fde8d2c41f743837643ee8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.