The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vnode_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 University of Utah.
    3  * Copyright (c) 1991 The Regents of the University of California.
    4  * All rights reserved.
    5  * Copyright (c) 1993, 1994 John S. Dyson
    6  * Copyright (c) 1995, David Greenman
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *      from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
   41  */
   42 
   43 /*
   44  * Page to/from files (vnodes).
   45  */
   46 
   47 /*
   48  * TODO:
   49  *      Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
   50  *      greatly re-simplify the vnode_pager.
   51  */
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD$");
   55 
   56 #include <sys/param.h>
   57 #include <sys/systm.h>
   58 #include <sys/proc.h>
   59 #include <sys/vnode.h>
   60 #include <sys/mount.h>
   61 #include <sys/bio.h>
   62 #include <sys/buf.h>
   63 #include <sys/vmmeter.h>
   64 #include <sys/limits.h>
   65 #include <sys/conf.h>
   66 #include <sys/sf_buf.h>
   67 
   68 #include <machine/atomic.h>
   69 
   70 #include <vm/vm.h>
   71 #include <vm/vm_object.h>
   72 #include <vm/vm_page.h>
   73 #include <vm/vm_pager.h>
   74 #include <vm/vm_map.h>
   75 #include <vm/vnode_pager.h>
   76 #include <vm/vm_extern.h>
   77 
   78 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
   79     daddr_t *rtaddress, int *run);
   80 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
   81 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
   82 static void vnode_pager_dealloc(vm_object_t);
   83 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
   84 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
   85 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
   86 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
   87 
   88 struct pagerops vnodepagerops = {
   89         .pgo_alloc =    vnode_pager_alloc,
   90         .pgo_dealloc =  vnode_pager_dealloc,
   91         .pgo_getpages = vnode_pager_getpages,
   92         .pgo_putpages = vnode_pager_putpages,
   93         .pgo_haspage =  vnode_pager_haspage,
   94 };
   95 
   96 int vnode_pbuf_freecnt;
   97 
   98 /* Create the VM system backing object for this vnode */
   99 int
  100 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
  101 {
  102         vm_object_t object;
  103         vm_ooffset_t size = isize;
  104         struct vattr va;
  105 
  106         if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
  107                 return (0);
  108 
  109         while ((object = vp->v_object) != NULL) {
  110                 VM_OBJECT_LOCK(object);
  111                 if (!(object->flags & OBJ_DEAD)) {
  112                         VM_OBJECT_UNLOCK(object);
  113                         return (0);
  114                 }
  115                 VOP_UNLOCK(vp, 0, td);
  116                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  117                 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0);
  118                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
  119         }
  120 
  121         if (size == 0) {
  122                 if (vn_isdisk(vp, NULL)) {
  123                         size = IDX_TO_OFF(INT_MAX);
  124                 } else {
  125                         if (VOP_GETATTR(vp, &va, td->td_ucred, td) != 0)
  126                                 return (0);
  127                         size = va.va_size;
  128                 }
  129         }
  130 
  131         object = vnode_pager_alloc(vp, size, 0, 0);
  132         /*
  133          * Dereference the reference we just created.  This assumes
  134          * that the object is associated with the vp.
  135          */
  136         VM_OBJECT_LOCK(object);
  137         object->ref_count--;
  138         VM_OBJECT_UNLOCK(object);
  139         vrele(vp);
  140 
  141         KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
  142 
  143         return (0);
  144 }
  145 
  146 void
  147 vnode_destroy_vobject(struct vnode *vp)
  148 {
  149         struct vm_object *obj;
  150 
  151         obj = vp->v_object;
  152         if (obj == NULL)
  153                 return;
  154         ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
  155         VM_OBJECT_LOCK(obj);
  156         if (obj->ref_count == 0) {
  157                 /*
  158                  * vclean() may be called twice. The first time
  159                  * removes the primary reference to the object,
  160                  * the second time goes one further and is a
  161                  * special-case to terminate the object.
  162                  *
  163                  * don't double-terminate the object
  164                  */
  165                 if ((obj->flags & OBJ_DEAD) == 0)
  166                         vm_object_terminate(obj);
  167                 else
  168                         VM_OBJECT_UNLOCK(obj);
  169         } else {
  170                 /*
  171                  * Woe to the process that tries to page now :-).
  172                  */
  173                 vm_pager_deallocate(obj);
  174                 VM_OBJECT_UNLOCK(obj);
  175         }
  176         vp->v_object = NULL;
  177 }
  178 
  179 
  180 /*
  181  * Allocate (or lookup) pager for a vnode.
  182  * Handle is a vnode pointer.
  183  *
  184  * MPSAFE
  185  */
  186 vm_object_t
  187 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  188                   vm_ooffset_t offset)
  189 {
  190         vm_object_t object;
  191         struct vnode *vp;
  192 
  193         /*
  194          * Pageout to vnode, no can do yet.
  195          */
  196         if (handle == NULL)
  197                 return (NULL);
  198 
  199         vp = (struct vnode *) handle;
  200 
  201         /*
  202          * If the object is being terminated, wait for it to
  203          * go away.
  204          */
  205 retry:
  206         while ((object = vp->v_object) != NULL) {
  207                 VM_OBJECT_LOCK(object);
  208                 if ((object->flags & OBJ_DEAD) == 0)
  209                         break;
  210                 vm_object_set_flag(object, OBJ_DISCONNECTWNT);
  211                 msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0);
  212         }
  213 
  214         if (vp->v_usecount == 0)
  215                 panic("vnode_pager_alloc: no vnode reference");
  216 
  217         if (object == NULL) {
  218                 /*
  219                  * Add an object of the appropriate size
  220                  */
  221                 object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
  222 
  223                 object->un_pager.vnp.vnp_size = size;
  224 
  225                 object->handle = handle;
  226                 if (VFS_NEEDSGIANT(vp->v_mount))
  227                         vm_object_set_flag(object, OBJ_NEEDGIANT);
  228                 VI_LOCK(vp);
  229                 if (vp->v_object != NULL) {
  230                         /*
  231                          * Object has been created while we were sleeping
  232                          */
  233                         VI_UNLOCK(vp);
  234                         vm_object_destroy(object);
  235                         goto retry;
  236                 }
  237                 vp->v_object = object;
  238                 VI_UNLOCK(vp);
  239         } else {
  240                 object->ref_count++;
  241                 VM_OBJECT_UNLOCK(object);
  242         }
  243         vref(vp);
  244         return (object);
  245 }
  246 
  247 /*
  248  *      The object must be locked.
  249  */
  250 static void
  251 vnode_pager_dealloc(object)
  252         vm_object_t object;
  253 {
  254         struct vnode *vp = object->handle;
  255 
  256         if (vp == NULL)
  257                 panic("vnode_pager_dealloc: pager already dealloced");
  258 
  259         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  260         vm_object_pip_wait(object, "vnpdea");
  261 
  262         object->handle = NULL;
  263         object->type = OBJT_DEAD;
  264         if (object->flags & OBJ_DISCONNECTWNT) {
  265                 vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
  266                 wakeup(object);
  267         }
  268         ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
  269         vp->v_object = NULL;
  270         vp->v_vflag &= ~VV_TEXT;
  271 }
  272 
  273 static boolean_t
  274 vnode_pager_haspage(object, pindex, before, after)
  275         vm_object_t object;
  276         vm_pindex_t pindex;
  277         int *before;
  278         int *after;
  279 {
  280         struct vnode *vp = object->handle;
  281         daddr_t bn;
  282         int err;
  283         daddr_t reqblock;
  284         int poff;
  285         int bsize;
  286         int pagesperblock, blocksperpage;
  287         int vfslocked;
  288 
  289         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  290         /*
  291          * If no vp or vp is doomed or marked transparent to VM, we do not
  292          * have the page.
  293          */
  294         if (vp == NULL || vp->v_iflag & VI_DOOMED)
  295                 return FALSE;
  296         /*
  297          * If the offset is beyond end of file we do
  298          * not have the page.
  299          */
  300         if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
  301                 return FALSE;
  302 
  303         bsize = vp->v_mount->mnt_stat.f_iosize;
  304         pagesperblock = bsize / PAGE_SIZE;
  305         blocksperpage = 0;
  306         if (pagesperblock > 0) {
  307                 reqblock = pindex / pagesperblock;
  308         } else {
  309                 blocksperpage = (PAGE_SIZE / bsize);
  310                 reqblock = pindex * blocksperpage;
  311         }
  312         VM_OBJECT_UNLOCK(object);
  313         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  314         err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
  315         VFS_UNLOCK_GIANT(vfslocked);
  316         VM_OBJECT_LOCK(object);
  317         if (err)
  318                 return TRUE;
  319         if (bn == -1)
  320                 return FALSE;
  321         if (pagesperblock > 0) {
  322                 poff = pindex - (reqblock * pagesperblock);
  323                 if (before) {
  324                         *before *= pagesperblock;
  325                         *before += poff;
  326                 }
  327                 if (after) {
  328                         int numafter;
  329                         *after *= pagesperblock;
  330                         numafter = pagesperblock - (poff + 1);
  331                         if (IDX_TO_OFF(pindex + numafter) >
  332                             object->un_pager.vnp.vnp_size) {
  333                                 numafter =
  334                                     OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
  335                                     pindex;
  336                         }
  337                         *after += numafter;
  338                 }
  339         } else {
  340                 if (before) {
  341                         *before /= blocksperpage;
  342                 }
  343 
  344                 if (after) {
  345                         *after /= blocksperpage;
  346                 }
  347         }
  348         return TRUE;
  349 }
  350 
  351 /*
  352  * Lets the VM system know about a change in size for a file.
  353  * We adjust our own internal size and flush any cached pages in
  354  * the associated object that are affected by the size change.
  355  *
  356  * Note: this routine may be invoked as a result of a pager put
  357  * operation (possibly at object termination time), so we must be careful.
  358  */
  359 void
  360 vnode_pager_setsize(vp, nsize)
  361         struct vnode *vp;
  362         vm_ooffset_t nsize;
  363 {
  364         vm_object_t object;
  365         vm_page_t m;
  366         vm_pindex_t nobjsize;
  367 
  368         if ((object = vp->v_object) == NULL)
  369                 return;
  370         VM_OBJECT_LOCK(object);
  371         if (nsize == object->un_pager.vnp.vnp_size) {
  372                 /*
  373                  * Hasn't changed size
  374                  */
  375                 VM_OBJECT_UNLOCK(object);
  376                 return;
  377         }
  378         nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
  379         if (nsize < object->un_pager.vnp.vnp_size) {
  380                 /*
  381                  * File has shrunk. Toss any cached pages beyond the new EOF.
  382                  */
  383                 if (nobjsize < object->size)
  384                         vm_object_page_remove(object, nobjsize, object->size,
  385                             FALSE);
  386                 /*
  387                  * this gets rid of garbage at the end of a page that is now
  388                  * only partially backed by the vnode.
  389                  *
  390                  * XXX for some reason (I don't know yet), if we take a
  391                  * completely invalid page and mark it partially valid
  392                  * it can screw up NFS reads, so we don't allow the case.
  393                  */
  394                 if ((nsize & PAGE_MASK) &&
  395                     (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL &&
  396                     m->valid != 0) {
  397                         int base = (int)nsize & PAGE_MASK;
  398                         int size = PAGE_SIZE - base;
  399 
  400                         /*
  401                          * Clear out partial-page garbage in case
  402                          * the page has been mapped.
  403                          */
  404                         pmap_zero_page_area(m, base, size);
  405 
  406                         /*
  407                          * XXX work around SMP data integrity race
  408                          * by unmapping the page from user processes.
  409                          * The garbage we just cleared may be mapped
  410                          * to a user process running on another cpu
  411                          * and this code is not running through normal
  412                          * I/O channels which handle SMP issues for
  413                          * us, so unmap page to synchronize all cpus.
  414                          *
  415                          * XXX should vm_pager_unmap_page() have
  416                          * dealt with this?
  417                          */
  418                         vm_page_lock_queues();
  419                         pmap_remove_all(m);
  420 
  421                         /*
  422                          * Clear out partial-page dirty bits.  This
  423                          * has the side effect of setting the valid
  424                          * bits, but that is ok.  There are a bunch
  425                          * of places in the VM system where we expected
  426                          * m->dirty == VM_PAGE_BITS_ALL.  The file EOF
  427                          * case is one of them.  If the page is still
  428                          * partially dirty, make it fully dirty.
  429                          *
  430                          * note that we do not clear out the valid
  431                          * bits.  This would prevent bogus_page
  432                          * replacement from working properly.
  433                          */
  434                         vm_page_set_validclean(m, base, size);
  435                         if (m->dirty != 0)
  436                                 m->dirty = VM_PAGE_BITS_ALL;
  437                         vm_page_unlock_queues();
  438                 } else if ((nsize & PAGE_MASK) &&
  439                     __predict_false(object->cache != NULL)) {
  440                         vm_page_cache_free(object, OFF_TO_IDX(nsize),
  441                             nobjsize);
  442                 }
  443         }
  444         object->un_pager.vnp.vnp_size = nsize;
  445         object->size = nobjsize;
  446         VM_OBJECT_UNLOCK(object);
  447 }
  448 
  449 /*
  450  * calculate the linear (byte) disk address of specified virtual
  451  * file address
  452  */
  453 static int
  454 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
  455     int *run)
  456 {
  457         int bsize;
  458         int err;
  459         daddr_t vblock;
  460         daddr_t voffset;
  461 
  462         if (address < 0)
  463                 return -1;
  464 
  465         if (vp->v_iflag & VI_DOOMED)
  466                 return -1;
  467 
  468         bsize = vp->v_mount->mnt_stat.f_iosize;
  469         vblock = address / bsize;
  470         voffset = address % bsize;
  471 
  472         err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL);
  473         if (err == 0) {
  474                 if (*rtaddress != -1)
  475                         *rtaddress += voffset / DEV_BSIZE;
  476                 if (run) {
  477                         *run += 1;
  478                         *run *= bsize/PAGE_SIZE;
  479                         *run -= voffset/PAGE_SIZE;
  480                 }
  481         }
  482 
  483         return (err);
  484 }
  485 
  486 /*
  487  * small block filesystem vnode pager input
  488  */
  489 static int
  490 vnode_pager_input_smlfs(object, m)
  491         vm_object_t object;
  492         vm_page_t m;
  493 {
  494         int i;
  495         struct vnode *vp;
  496         struct bufobj *bo;
  497         struct buf *bp;
  498         struct sf_buf *sf;
  499         daddr_t fileaddr;
  500         vm_offset_t bsize;
  501         int error = 0;
  502 
  503         vp = object->handle;
  504         if (vp->v_iflag & VI_DOOMED)
  505                 return VM_PAGER_BAD;
  506 
  507         bsize = vp->v_mount->mnt_stat.f_iosize;
  508 
  509         VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
  510 
  511         sf = sf_buf_alloc(m, 0);
  512 
  513         for (i = 0; i < PAGE_SIZE / bsize; i++) {
  514                 vm_ooffset_t address;
  515 
  516                 if (vm_page_bits(i * bsize, bsize) & m->valid)
  517                         continue;
  518 
  519                 address = IDX_TO_OFF(m->pindex) + i * bsize;
  520                 if (address >= object->un_pager.vnp.vnp_size) {
  521                         fileaddr = -1;
  522                 } else {
  523                         error = vnode_pager_addr(vp, address, &fileaddr, NULL);
  524                         if (error)
  525                                 break;
  526                 }
  527                 if (fileaddr != -1) {
  528                         bp = getpbuf(&vnode_pbuf_freecnt);
  529 
  530                         /* build a minimal buffer header */
  531                         bp->b_iocmd = BIO_READ;
  532                         bp->b_iodone = bdone;
  533                         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  534                         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  535                         bp->b_rcred = crhold(curthread->td_ucred);
  536                         bp->b_wcred = crhold(curthread->td_ucred);
  537                         bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
  538                         bp->b_blkno = fileaddr;
  539                         pbgetbo(bo, bp);
  540                         bp->b_bcount = bsize;
  541                         bp->b_bufsize = bsize;
  542                         bp->b_runningbufspace = bp->b_bufsize;
  543                         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  544 
  545                         /* do the input */
  546                         bp->b_iooffset = dbtob(bp->b_blkno);
  547                         bstrategy(bp);
  548 
  549                         bwait(bp, PVM, "vnsrd");
  550 
  551                         if ((bp->b_ioflags & BIO_ERROR) != 0)
  552                                 error = EIO;
  553 
  554                         /*
  555                          * free the buffer header back to the swap buffer pool
  556                          */
  557                         pbrelbo(bp);
  558                         relpbuf(bp, &vnode_pbuf_freecnt);
  559                         if (error)
  560                                 break;
  561 
  562                         VM_OBJECT_LOCK(object);
  563                         vm_page_lock_queues();
  564                         vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
  565                         vm_page_unlock_queues();
  566                         VM_OBJECT_UNLOCK(object);
  567                 } else {
  568                         VM_OBJECT_LOCK(object);
  569                         vm_page_lock_queues();
  570                         vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize);
  571                         vm_page_unlock_queues();
  572                         VM_OBJECT_UNLOCK(object);
  573                         bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
  574                 }
  575         }
  576         sf_buf_free(sf);
  577         vm_page_lock_queues();
  578         pmap_clear_modify(m);
  579         vm_page_unlock_queues();
  580         if (error) {
  581                 return VM_PAGER_ERROR;
  582         }
  583         return VM_PAGER_OK;
  584 
  585 }
  586 
  587 
  588 /*
  589  * old style vnode pager input routine
  590  */
  591 static int
  592 vnode_pager_input_old(object, m)
  593         vm_object_t object;
  594         vm_page_t m;
  595 {
  596         struct uio auio;
  597         struct iovec aiov;
  598         int error;
  599         int size;
  600         struct sf_buf *sf;
  601         struct vnode *vp;
  602 
  603         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  604         error = 0;
  605 
  606         /*
  607          * Return failure if beyond current EOF
  608          */
  609         if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
  610                 return VM_PAGER_BAD;
  611         } else {
  612                 size = PAGE_SIZE;
  613                 if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
  614                         size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
  615                 vp = object->handle;
  616                 VM_OBJECT_UNLOCK(object);
  617 
  618                 /*
  619                  * Allocate a kernel virtual address and initialize so that
  620                  * we can use VOP_READ/WRITE routines.
  621                  */
  622                 sf = sf_buf_alloc(m, 0);
  623 
  624                 aiov.iov_base = (caddr_t)sf_buf_kva(sf);
  625                 aiov.iov_len = size;
  626                 auio.uio_iov = &aiov;
  627                 auio.uio_iovcnt = 1;
  628                 auio.uio_offset = IDX_TO_OFF(m->pindex);
  629                 auio.uio_segflg = UIO_SYSSPACE;
  630                 auio.uio_rw = UIO_READ;
  631                 auio.uio_resid = size;
  632                 auio.uio_td = curthread;
  633 
  634                 error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
  635                 if (!error) {
  636                         int count = size - auio.uio_resid;
  637 
  638                         if (count == 0)
  639                                 error = EINVAL;
  640                         else if (count != PAGE_SIZE)
  641                                 bzero((caddr_t)sf_buf_kva(sf) + count,
  642                                     PAGE_SIZE - count);
  643                 }
  644                 sf_buf_free(sf);
  645 
  646                 VM_OBJECT_LOCK(object);
  647         }
  648         vm_page_lock_queues();
  649         pmap_clear_modify(m);
  650         vm_page_undirty(m);
  651         vm_page_unlock_queues();
  652         if (!error)
  653                 m->valid = VM_PAGE_BITS_ALL;
  654         return error ? VM_PAGER_ERROR : VM_PAGER_OK;
  655 }
  656 
  657 /*
  658  * generic vnode pager input routine
  659  */
  660 
  661 /*
  662  * Local media VFS's that do not implement their own VOP_GETPAGES
  663  * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
  664  * to implement the previous behaviour.
  665  *
  666  * All other FS's should use the bypass to get to the local media
  667  * backing vp's VOP_GETPAGES.
  668  */
  669 static int
  670 vnode_pager_getpages(object, m, count, reqpage)
  671         vm_object_t object;
  672         vm_page_t *m;
  673         int count;
  674         int reqpage;
  675 {
  676         int rtval;
  677         struct vnode *vp;
  678         int bytes = count * PAGE_SIZE;
  679         int vfslocked;
  680 
  681         vp = object->handle;
  682         VM_OBJECT_UNLOCK(object);
  683         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  684         rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
  685         KASSERT(rtval != EOPNOTSUPP,
  686             ("vnode_pager: FS getpages not implemented\n"));
  687         VFS_UNLOCK_GIANT(vfslocked);
  688         VM_OBJECT_LOCK(object);
  689         return rtval;
  690 }
  691 
  692 /*
  693  * This is now called from local media FS's to operate against their
  694  * own vnodes if they fail to implement VOP_GETPAGES.
  695  */
  696 int
  697 vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
  698         struct vnode *vp;
  699         vm_page_t *m;
  700         int bytecount;
  701         int reqpage;
  702 {
  703         vm_object_t object;
  704         vm_offset_t kva;
  705         off_t foff, tfoff, nextoff;
  706         int i, j, size, bsize, first;
  707         daddr_t firstaddr, reqblock;
  708         struct bufobj *bo;
  709         int runpg;
  710         int runend;
  711         struct buf *bp;
  712         int count;
  713         int error;
  714 
  715         object = vp->v_object;
  716         count = bytecount / PAGE_SIZE;
  717 
  718         KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
  719             ("vnode_pager_generic_getpages does not support devices"));
  720         if (vp->v_iflag & VI_DOOMED)
  721                 return VM_PAGER_BAD;
  722 
  723         bsize = vp->v_mount->mnt_stat.f_iosize;
  724 
  725         /* get the UNDERLYING device for the file with VOP_BMAP() */
  726 
  727         /*
  728          * originally, we did not check for an error return value -- assuming
  729          * an fs always has a bmap entry point -- that assumption is wrong!!!
  730          */
  731         foff = IDX_TO_OFF(m[reqpage]->pindex);
  732 
  733         /*
  734          * if we can't bmap, use old VOP code
  735          */
  736         error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
  737         if (error == EOPNOTSUPP) {
  738                 VM_OBJECT_LOCK(object);
  739                 vm_page_lock_queues();
  740                 for (i = 0; i < count; i++)
  741                         if (i != reqpage)
  742                                 vm_page_free(m[i]);
  743                 vm_page_unlock_queues();
  744                 PCPU_INC(cnt.v_vnodein);
  745                 PCPU_INC(cnt.v_vnodepgsin);
  746                 error = vnode_pager_input_old(object, m[reqpage]);
  747                 VM_OBJECT_UNLOCK(object);
  748                 return (error);
  749         } else if (error != 0) {
  750                 VM_OBJECT_LOCK(object);
  751                 vm_page_lock_queues();
  752                 for (i = 0; i < count; i++)
  753                         if (i != reqpage)
  754                                 vm_page_free(m[i]);
  755                 vm_page_unlock_queues();
  756                 VM_OBJECT_UNLOCK(object);
  757                 return (VM_PAGER_ERROR);
  758 
  759                 /*
  760                  * if the blocksize is smaller than a page size, then use
  761                  * special small filesystem code.  NFS sometimes has a small
  762                  * blocksize, but it can handle large reads itself.
  763                  */
  764         } else if ((PAGE_SIZE / bsize) > 1 &&
  765             (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
  766                 VM_OBJECT_LOCK(object);
  767                 vm_page_lock_queues();
  768                 for (i = 0; i < count; i++)
  769                         if (i != reqpage)
  770                                 vm_page_free(m[i]);
  771                 vm_page_unlock_queues();
  772                 VM_OBJECT_UNLOCK(object);
  773                 PCPU_INC(cnt.v_vnodein);
  774                 PCPU_INC(cnt.v_vnodepgsin);
  775                 return vnode_pager_input_smlfs(object, m[reqpage]);
  776         }
  777 
  778         /*
  779          * If we have a completely valid page available to us, we can
  780          * clean up and return.  Otherwise we have to re-read the
  781          * media.
  782          */
  783         VM_OBJECT_LOCK(object);
  784         if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
  785                 vm_page_lock_queues();
  786                 for (i = 0; i < count; i++)
  787                         if (i != reqpage)
  788                                 vm_page_free(m[i]);
  789                 vm_page_unlock_queues();
  790                 VM_OBJECT_UNLOCK(object);
  791                 return VM_PAGER_OK;
  792         } else if (reqblock == -1) {
  793                 pmap_zero_page(m[reqpage]);
  794                 vm_page_undirty(m[reqpage]);
  795                 m[reqpage]->valid = VM_PAGE_BITS_ALL;
  796                 vm_page_lock_queues();
  797                 for (i = 0; i < count; i++)
  798                         if (i != reqpage)
  799                                 vm_page_free(m[i]);
  800                 vm_page_unlock_queues();
  801                 VM_OBJECT_UNLOCK(object);
  802                 return (VM_PAGER_OK);
  803         }
  804         m[reqpage]->valid = 0;
  805         VM_OBJECT_UNLOCK(object);
  806 
  807         /*
  808          * here on direct device I/O
  809          */
  810         firstaddr = -1;
  811 
  812         /*
  813          * calculate the run that includes the required page
  814          */
  815         for (first = 0, i = 0; i < count; i = runend) {
  816                 if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
  817                     &runpg) != 0) {
  818                         VM_OBJECT_LOCK(object);
  819                         vm_page_lock_queues();
  820                         for (; i < count; i++)
  821                                 if (i != reqpage)
  822                                         vm_page_free(m[i]);
  823                         vm_page_unlock_queues();
  824                         VM_OBJECT_UNLOCK(object);
  825                         return (VM_PAGER_ERROR);
  826                 }
  827                 if (firstaddr == -1) {
  828                         VM_OBJECT_LOCK(object);
  829                         if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
  830                                 panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
  831                                     (intmax_t)firstaddr, (uintmax_t)(foff >> 32),
  832                                     (uintmax_t)foff,
  833                                     (uintmax_t)
  834                                     (object->un_pager.vnp.vnp_size >> 32),
  835                                     (uintmax_t)object->un_pager.vnp.vnp_size);
  836                         }
  837                         vm_page_lock_queues();
  838                         vm_page_free(m[i]);
  839                         vm_page_unlock_queues();
  840                         VM_OBJECT_UNLOCK(object);
  841                         runend = i + 1;
  842                         first = runend;
  843                         continue;
  844                 }
  845                 runend = i + runpg;
  846                 if (runend <= reqpage) {
  847                         VM_OBJECT_LOCK(object);
  848                         vm_page_lock_queues();
  849                         for (j = i; j < runend; j++)
  850                                 vm_page_free(m[j]);
  851                         vm_page_unlock_queues();
  852                         VM_OBJECT_UNLOCK(object);
  853                 } else {
  854                         if (runpg < (count - first)) {
  855                                 VM_OBJECT_LOCK(object);
  856                                 vm_page_lock_queues();
  857                                 for (i = first + runpg; i < count; i++)
  858                                         vm_page_free(m[i]);
  859                                 vm_page_unlock_queues();
  860                                 VM_OBJECT_UNLOCK(object);
  861                                 count = first + runpg;
  862                         }
  863                         break;
  864                 }
  865                 first = runend;
  866         }
  867 
  868         /*
  869          * the first and last page have been calculated now, move input pages
  870          * to be zero based...
  871          */
  872         if (first != 0) {
  873                 m += first;
  874                 count -= first;
  875                 reqpage -= first;
  876         }
  877 
  878         /*
  879          * calculate the file virtual address for the transfer
  880          */
  881         foff = IDX_TO_OFF(m[0]->pindex);
  882 
  883         /*
  884          * calculate the size of the transfer
  885          */
  886         size = count * PAGE_SIZE;
  887         KASSERT(count > 0, ("zero count"));
  888         if ((foff + size) > object->un_pager.vnp.vnp_size)
  889                 size = object->un_pager.vnp.vnp_size - foff;
  890         KASSERT(size > 0, ("zero size"));
  891 
  892         /*
  893          * round up physical size for real devices.
  894          */
  895         if (1) {
  896                 int secmask = bo->bo_bsize - 1;
  897                 KASSERT(secmask < PAGE_SIZE && secmask > 0,
  898                     ("vnode_pager_generic_getpages: sector size %d too large",
  899                     secmask + 1));
  900                 size = (size + secmask) & ~secmask;
  901         }
  902 
  903         bp = getpbuf(&vnode_pbuf_freecnt);
  904         kva = (vm_offset_t) bp->b_data;
  905 
  906         /*
  907          * and map the pages to be read into the kva
  908          */
  909         pmap_qenter(kva, m, count);
  910 
  911         /* build a minimal buffer header */
  912         bp->b_iocmd = BIO_READ;
  913         bp->b_iodone = bdone;
  914         KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
  915         KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
  916         bp->b_rcred = crhold(curthread->td_ucred);
  917         bp->b_wcred = crhold(curthread->td_ucred);
  918         bp->b_blkno = firstaddr;
  919         pbgetbo(bo, bp);
  920         bp->b_bcount = size;
  921         bp->b_bufsize = size;
  922         bp->b_runningbufspace = bp->b_bufsize;
  923         atomic_add_int(&runningbufspace, bp->b_runningbufspace);
  924 
  925         PCPU_INC(cnt.v_vnodein);
  926         PCPU_ADD(cnt.v_vnodepgsin, count);
  927 
  928         /* do the input */
  929         bp->b_iooffset = dbtob(bp->b_blkno);
  930         bstrategy(bp);
  931 
  932         bwait(bp, PVM, "vnread");
  933 
  934         if ((bp->b_ioflags & BIO_ERROR) != 0)
  935                 error = EIO;
  936 
  937         if (!error) {
  938                 if (size != count * PAGE_SIZE)
  939                         bzero((caddr_t) kva + size, PAGE_SIZE * count - size);
  940         }
  941         pmap_qremove(kva, count);
  942 
  943         /*
  944          * free the buffer header back to the swap buffer pool
  945          */
  946         pbrelbo(bp);
  947         relpbuf(bp, &vnode_pbuf_freecnt);
  948 
  949         VM_OBJECT_LOCK(object);
  950         vm_page_lock_queues();
  951         for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) {
  952                 vm_page_t mt;
  953 
  954                 nextoff = tfoff + PAGE_SIZE;
  955                 mt = m[i];
  956 
  957                 if (nextoff <= object->un_pager.vnp.vnp_size) {
  958                         /*
  959                          * Read filled up entire page.
  960                          */
  961                         mt->valid = VM_PAGE_BITS_ALL;
  962                         vm_page_undirty(mt);    /* should be an assert? XXX */
  963                         pmap_clear_modify(mt);
  964                 } else {
  965                         /*
  966                          * Read did not fill up entire page.  Since this
  967                          * is getpages, the page may be mapped, so we have
  968                          * to zero the invalid portions of the page even
  969                          * though we aren't setting them valid.
  970                          *
  971                          * Currently we do not set the entire page valid,
  972                          * we just try to clear the piece that we couldn't
  973                          * read.
  974                          */
  975                         vm_page_set_validclean(mt, 0,
  976                             object->un_pager.vnp.vnp_size - tfoff);
  977                         /* handled by vm_fault now */
  978                         /* vm_page_zero_invalid(mt, FALSE); */
  979                 }
  980                 
  981                 if (i != reqpage) {
  982 
  983                         /*
  984                          * whether or not to leave the page activated is up in
  985                          * the air, but we should put the page on a page queue
  986                          * somewhere. (it already is in the object). Result:
  987                          * It appears that empirical results show that
  988                          * deactivating pages is best.
  989                          */
  990 
  991                         /*
  992                          * just in case someone was asking for this page we
  993                          * now tell them that it is ok to use
  994                          */
  995                         if (!error) {
  996                                 if (mt->oflags & VPO_WANTED)
  997                                         vm_page_activate(mt);
  998                                 else
  999                                         vm_page_deactivate(mt);
 1000                                 vm_page_wakeup(mt);
 1001                         } else {
 1002                                 vm_page_free(mt);
 1003                         }
 1004                 }
 1005         }
 1006         vm_page_unlock_queues();
 1007         VM_OBJECT_UNLOCK(object);
 1008         if (error) {
 1009                 printf("vnode_pager_getpages: I/O read error\n");
 1010         }
 1011         return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
 1012 }
 1013 
 1014 /*
 1015  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
 1016  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
 1017  * vnode_pager_generic_putpages() to implement the previous behaviour.
 1018  *
 1019  * All other FS's should use the bypass to get to the local media
 1020  * backing vp's VOP_PUTPAGES.
 1021  */
 1022 static void
 1023 vnode_pager_putpages(object, m, count, sync, rtvals)
 1024         vm_object_t object;
 1025         vm_page_t *m;
 1026         int count;
 1027         boolean_t sync;
 1028         int *rtvals;
 1029 {
 1030         int rtval;
 1031         struct vnode *vp;
 1032         struct mount *mp;
 1033         int bytes = count * PAGE_SIZE;
 1034 
 1035         /*
 1036          * Force synchronous operation if we are extremely low on memory
 1037          * to prevent a low-memory deadlock.  VOP operations often need to
 1038          * allocate more memory to initiate the I/O ( i.e. do a BMAP 
 1039          * operation ).  The swapper handles the case by limiting the amount
 1040          * of asynchronous I/O, but that sort of solution doesn't scale well
 1041          * for the vnode pager without a lot of work.
 1042          *
 1043          * Also, the backing vnode's iodone routine may not wake the pageout
 1044          * daemon up.  This should be probably be addressed XXX.
 1045          */
 1046 
 1047         if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)
 1048                 sync |= OBJPC_SYNC;
 1049 
 1050         /*
 1051          * Call device-specific putpages function
 1052          */
 1053         vp = object->handle;
 1054         VM_OBJECT_UNLOCK(object);
 1055         if (vp->v_type != VREG)
 1056                 mp = NULL;
 1057         rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
 1058         KASSERT(rtval != EOPNOTSUPP, 
 1059             ("vnode_pager: stale FS putpages\n"));
 1060         VM_OBJECT_LOCK(object);
 1061 }
 1062 
 1063 
 1064 /*
 1065  * This is now called from local media FS's to operate against their
 1066  * own vnodes if they fail to implement VOP_PUTPAGES.
 1067  *
 1068  * This is typically called indirectly via the pageout daemon and
 1069  * clustering has already typically occured, so in general we ask the
 1070  * underlying filesystem to write the data out asynchronously rather
 1071  * then delayed.
 1072  */
 1073 int
 1074 vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
 1075         struct vnode *vp;
 1076         vm_page_t *m;
 1077         int bytecount;
 1078         int flags;
 1079         int *rtvals;
 1080 {
 1081         int i;
 1082         vm_object_t object;
 1083         int count;
 1084 
 1085         int maxsize, ncount;
 1086         vm_ooffset_t poffset;
 1087         struct uio auio;
 1088         struct iovec aiov;
 1089         int error;
 1090         int ioflags;
 1091         int ppscheck = 0;
 1092         static struct timeval lastfail;
 1093         static int curfail;
 1094 
 1095         object = vp->v_object;
 1096         count = bytecount / PAGE_SIZE;
 1097 
 1098         for (i = 0; i < count; i++)
 1099                 rtvals[i] = VM_PAGER_AGAIN;
 1100 
 1101         if ((int64_t)m[0]->pindex < 0) {
 1102                 printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
 1103                         (long)m[0]->pindex, (u_long)m[0]->dirty);
 1104                 rtvals[0] = VM_PAGER_BAD;
 1105                 return VM_PAGER_BAD;
 1106         }
 1107 
 1108         maxsize = count * PAGE_SIZE;
 1109         ncount = count;
 1110 
 1111         poffset = IDX_TO_OFF(m[0]->pindex);
 1112 
 1113         /*
 1114          * If the page-aligned write is larger then the actual file we
 1115          * have to invalidate pages occuring beyond the file EOF.  However,
 1116          * there is an edge case where a file may not be page-aligned where
 1117          * the last page is partially invalid.  In this case the filesystem
 1118          * may not properly clear the dirty bits for the entire page (which
 1119          * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
 1120          * With the page locked we are free to fix-up the dirty bits here.
 1121          *
 1122          * We do not under any circumstances truncate the valid bits, as
 1123          * this will screw up bogus page replacement.
 1124          */
 1125         if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
 1126                 if (object->un_pager.vnp.vnp_size > poffset) {
 1127                         int pgoff;
 1128 
 1129                         maxsize = object->un_pager.vnp.vnp_size - poffset;
 1130                         ncount = btoc(maxsize);
 1131                         if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
 1132                                 vm_page_lock_queues();
 1133                                 vm_page_clear_dirty(m[ncount - 1], pgoff,
 1134                                         PAGE_SIZE - pgoff);
 1135                                 vm_page_unlock_queues();
 1136                         }
 1137                 } else {
 1138                         maxsize = 0;
 1139                         ncount = 0;
 1140                 }
 1141                 if (ncount < count) {
 1142                         for (i = ncount; i < count; i++) {
 1143                                 rtvals[i] = VM_PAGER_BAD;
 1144                         }
 1145                 }
 1146         }
 1147 
 1148         /*
 1149          * pageouts are already clustered, use IO_ASYNC t o force a bawrite()
 1150          * rather then a bdwrite() to prevent paging I/O from saturating 
 1151          * the buffer cache.  Dummy-up the sequential heuristic to cause
 1152          * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
 1153          * the system decides how to cluster.
 1154          */
 1155         ioflags = IO_VMIO;
 1156         if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
 1157                 ioflags |= IO_SYNC;
 1158         else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
 1159                 ioflags |= IO_ASYNC;
 1160         ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
 1161         ioflags |= IO_SEQMAX << IO_SEQSHIFT;
 1162 
 1163         aiov.iov_base = (caddr_t) 0;
 1164         aiov.iov_len = maxsize;
 1165         auio.uio_iov = &aiov;
 1166         auio.uio_iovcnt = 1;
 1167         auio.uio_offset = poffset;
 1168         auio.uio_segflg = UIO_NOCOPY;
 1169         auio.uio_rw = UIO_WRITE;
 1170         auio.uio_resid = maxsize;
 1171         auio.uio_td = (struct thread *) 0;
 1172         error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
 1173         PCPU_INC(cnt.v_vnodeout);
 1174         PCPU_ADD(cnt.v_vnodepgsout, ncount);
 1175 
 1176         if (error) {
 1177                 if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
 1178                         printf("vnode_pager_putpages: I/O error %d\n", error);
 1179         }
 1180         if (auio.uio_resid) {
 1181                 if (ppscheck || ppsratecheck(&lastfail, &curfail, 1))
 1182                         printf("vnode_pager_putpages: residual I/O %d at %lu\n",
 1183                             auio.uio_resid, (u_long)m[0]->pindex);
 1184         }
 1185         for (i = 0; i < ncount; i++) {
 1186                 rtvals[i] = VM_PAGER_OK;
 1187         }
 1188         return rtvals[0];
 1189 }
 1190 
 1191 struct vnode *
 1192 vnode_pager_lock(vm_object_t first_object)
 1193 {
 1194         struct vnode *vp;
 1195         vm_object_t backing_object, object;
 1196 
 1197         VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
 1198         for (object = first_object; object != NULL; object = backing_object) {
 1199                 if (object->type != OBJT_VNODE) {
 1200                         if ((backing_object = object->backing_object) != NULL)
 1201                                 VM_OBJECT_LOCK(backing_object);
 1202                         if (object != first_object)
 1203                                 VM_OBJECT_UNLOCK(object);
 1204                         continue;
 1205                 }
 1206         retry:
 1207                 if (object->flags & OBJ_DEAD) {
 1208                         if (object != first_object)
 1209                                 VM_OBJECT_UNLOCK(object);
 1210                         return NULL;
 1211                 }
 1212                 vp = object->handle;
 1213                 VI_LOCK(vp);
 1214                 VM_OBJECT_UNLOCK(object);
 1215                 if (first_object != object)
 1216                         VM_OBJECT_UNLOCK(first_object);
 1217                 VFS_ASSERT_GIANT(vp->v_mount);
 1218                 if (vget(vp, LK_CANRECURSE | LK_INTERLOCK |
 1219                     LK_RETRY | LK_SHARED, curthread)) {
 1220                         VM_OBJECT_LOCK(first_object);
 1221                         if (object != first_object)
 1222                                 VM_OBJECT_LOCK(object);
 1223                         if (object->type != OBJT_VNODE) {
 1224                                 if (object != first_object)
 1225                                         VM_OBJECT_UNLOCK(object);
 1226                                 return NULL;
 1227                         }
 1228                         printf("vnode_pager_lock: retrying\n");
 1229                         goto retry;
 1230                 }
 1231                 VM_OBJECT_LOCK(first_object);
 1232                 return (vp);
 1233         }
 1234         return NULL;
 1235 }

Cache object: 140539d8a0cc3f123993934fbd7f74f9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.