The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vnode_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * (MPSAFE)
    3  *
    4  * Copyright (c) 1990 University of Utah.
    5  * Copyright (c) 1991 The Regents of the University of California.
    6  * All rights reserved.
    7  * Copyright (c) 1993, 1994 John S. Dyson
    8  * Copyright (c) 1995, David Greenman
    9  *
   10  * This code is derived from software contributed to Berkeley by
   11  * the Systems Programming Group of the University of Utah Computer
   12  * Science Department.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. Neither the name of the University nor the names of its contributors
   23  *    may be used to endorse or promote products derived from this software
   24  *    without specific prior written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   36  * SUCH DAMAGE.
   37  *
   38  *      from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
   39  * $FreeBSD: src/sys/vm/vnode_pager.c,v 1.116.2.7 2002/12/31 09:34:51 dillon Exp $
   40  */
   41 
   42 /*
   43  * Page to/from files (vnodes).
   44  */
   45 
   46 /*
   47  * TODO:
   48  *      Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
   49  *      greatly re-simplify the vnode_pager.
   50  */
   51 
   52 #include <sys/param.h>
   53 #include <sys/systm.h>
   54 #include <sys/kernel.h>
   55 #include <sys/proc.h>
   56 #include <sys/vnode.h>
   57 #include <sys/mount.h>
   58 #include <sys/buf.h>
   59 #include <sys/vmmeter.h>
   60 #include <sys/conf.h>
   61 
   62 #include <cpu/lwbuf.h>
   63 
   64 #include <vm/vm.h>
   65 #include <vm/vm_object.h>
   66 #include <vm/vm_page.h>
   67 #include <vm/vm_pager.h>
   68 #include <vm/vm_map.h>
   69 #include <vm/vnode_pager.h>
   70 #include <vm/swap_pager.h>
   71 #include <vm/vm_extern.h>
   72 
   73 #include <sys/thread2.h>
   74 #include <vm/vm_page2.h>
   75 
   76 static void vnode_pager_dealloc (vm_object_t);
   77 static int vnode_pager_getpage (vm_object_t, vm_page_t *, int);
   78 static void vnode_pager_putpages (vm_object_t, vm_page_t *, int, boolean_t, int *);
   79 static boolean_t vnode_pager_haspage (vm_object_t, vm_pindex_t);
   80 
   81 struct pagerops vnodepagerops = {
   82         vnode_pager_dealloc,
   83         vnode_pager_getpage,
   84         vnode_pager_putpages,
   85         vnode_pager_haspage
   86 };
   87 
   88 static struct krate vbadrate = { 1 };
   89 static struct krate vresrate = { 1 };
   90 
   91 long vnode_pbuf_freecnt = -1;   /* start out unlimited */
   92 
   93 /*
   94  * Allocate a VM object for a vnode, typically a regular file vnode.
   95  *
   96  * Some additional information is required to generate a properly sized
   97  * object which covers the entire buffer cache buffer straddling the file
   98  * EOF.  Userland does not see the extra pages as the VM fault code tests
   99  * against v_filesize.
  100  */
  101 vm_object_t
  102 vnode_pager_alloc(void *handle, off_t length, vm_prot_t prot, off_t offset,
  103                   int blksize, int boff)
  104 {
  105         vm_object_t object;
  106         struct vnode *vp;
  107         off_t loffset;
  108         vm_pindex_t lsize;
  109 
  110         /*
  111          * Pageout to vnode, no can do yet.
  112          */
  113         if (handle == NULL)
  114                 return (NULL);
  115 
  116         /*
  117          * XXX hack - This initialization should be put somewhere else.
  118          */
  119         if (vnode_pbuf_freecnt < 0) {
  120             vnode_pbuf_freecnt = nswbuf / 2 + 1;
  121         }
  122 
  123         /*
  124          * Serialize potential vnode/object teardowns and interlocks
  125          */
  126         vp = (struct vnode *)handle;
  127         lwkt_gettoken(&vp->v_token);
  128 
  129         /*
  130          * If the object is being terminated, wait for it to
  131          * go away.
  132          */
  133         object = vp->v_object;
  134         if (object) {
  135                 vm_object_hold(object);
  136                 KKASSERT((object->flags & OBJ_DEAD) == 0);
  137         }
  138 
  139         if (VREFCNT(vp) <= 0)
  140                 panic("vnode_pager_alloc: no vnode reference");
  141 
  142         /*
  143          * Round up to the *next* block, then destroy the buffers in question.
  144          * Since we are only removing some of the buffers we must rely on the
  145          * scan count to determine whether a loop is necessary.
  146          *
  147          * Destroy any pages beyond the last buffer.
  148          */
  149         if (boff < 0)
  150                 boff = (int)(length % blksize);
  151         if (boff)
  152                 loffset = length + (blksize - boff);
  153         else
  154                 loffset = length;
  155         lsize = OFF_TO_IDX(round_page64(loffset));
  156 
  157         if (object == NULL) {
  158                 /*
  159                  * And an object of the appropriate size
  160                  */
  161                 object = vm_object_allocate_hold(OBJT_VNODE, lsize);
  162                 object->handle = handle;
  163                 vp->v_object = object;
  164                 vp->v_filesize = length;
  165                 if (vp->v_mount && (vp->v_mount->mnt_kern_flag & MNTK_NOMSYNC))
  166                         vm_object_set_flag(object, OBJ_NOMSYNC);
  167                 vref(vp);
  168         } else {
  169                 vm_object_reference_quick(object);      /* also vref's */
  170                 if (object->size != lsize) {
  171                         kprintf("vnode_pager_alloc: Warning, objsize "
  172                                 "mismatch %jd/%jd vp=%p obj=%p\n",
  173                                 (intmax_t)object->size,
  174                                 (intmax_t)lsize,
  175                                 vp, object);
  176                 }
  177                 if (vp->v_filesize != length) {
  178                         kprintf("vnode_pager_alloc: Warning, filesize "
  179                                 "mismatch %jd/%jd vp=%p obj=%p\n",
  180                                 (intmax_t)vp->v_filesize,
  181                                 (intmax_t)length,
  182                                 vp, object);
  183                 }
  184         }
  185         vm_object_drop(object);
  186         lwkt_reltoken(&vp->v_token);
  187 
  188         return (object);
  189 }
  190 
  191 /*
  192  * Add a ref to a vnode's existing VM object, return the object or
  193  * NULL if the vnode did not have one.  This does not create the
  194  * object (we can't since we don't know what the proper blocksize/boff
  195  * is to match the VFS's use of the buffer cache).
  196  *
  197  * The vnode must be referenced and is typically open.  The object should
  198  * be stable in this situation.
  199  *
  200  * Returns the object with an additional reference but not locked.
  201  */
  202 vm_object_t
  203 vnode_pager_reference(struct vnode *vp)
  204 {
  205         vm_object_t object;
  206 
  207         if ((object = vp->v_object) != NULL)
  208                 vm_object_reference_quick(object); /* also vref's vnode */
  209         return (object);
  210 }
  211 
  212 static void
  213 vnode_pager_dealloc(vm_object_t object)
  214 {
  215         struct vnode *vp = object->handle;
  216 
  217         if (vp == NULL)
  218                 panic("vnode_pager_dealloc: pager already dealloced");
  219 
  220         vm_object_pip_wait(object, "vnpdea");
  221 
  222         object->handle = NULL;
  223         object->type = OBJT_DEAD;
  224         vp->v_object = NULL;
  225         vp->v_filesize = NOOFFSET;
  226         vclrflags(vp, VTEXT | VOBJBUF);
  227         swap_pager_freespace_all(object);
  228 }
  229 
  230 /*
  231  * Return whether the vnode pager has the requested page.  Return the
  232  * number of disk-contiguous pages before and after the requested page,
  233  * not including the requested page.
  234  */
  235 static boolean_t
  236 vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex)
  237 {
  238         struct vnode *vp = object->handle;
  239         off_t loffset;
  240         off_t doffset;
  241         int voff;
  242         int bsize;
  243         int error;
  244 
  245         /*
  246          * If no vp or vp is doomed or marked transparent to VM, we do not
  247          * have the page.
  248          */
  249         if ((vp == NULL) || (vp->v_flag & VRECLAIMED))
  250                 return FALSE;
  251 
  252         /*
  253          * If filesystem no longer mounted or offset beyond end of file we do
  254          * not have the page.
  255          */
  256         loffset = IDX_TO_OFF(pindex);
  257 
  258         if (vp->v_mount == NULL || loffset >= vp->v_filesize)
  259                 return FALSE;
  260 
  261         bsize = vp->v_mount->mnt_stat.f_iosize;
  262         voff = loffset % bsize;
  263 
  264         /*
  265          * XXX
  266          *
  267          * BMAP returns byte counts before and after, where after
  268          * is inclusive of the base page.  haspage must return page
  269          * counts before and after where after does not include the
  270          * base page.
  271          *
  272          * BMAP is allowed to return a *after of 0 for backwards
  273          * compatibility.  The base page is still considered valid if
  274          * no error is returned.
  275          */
  276         error = VOP_BMAP(vp, loffset - voff, &doffset, NULL, NULL, 0);
  277         if (error)
  278                 return TRUE;
  279         if (doffset == NOOFFSET)
  280                 return FALSE;
  281         return TRUE;
  282 }
  283 
  284 /*
  285  * Lets the VM system know about a change in size for a file.
  286  * We adjust our own internal size and flush any cached pages in
  287  * the associated object that are affected by the size change.
  288  *
  289  * NOTE: This routine may be invoked as a result of a pager put
  290  * operation (possibly at object termination time), so we must be careful.
  291  *
  292  * NOTE: vp->v_filesize is initialized to NOOFFSET (-1), be sure that
  293  * we do not blow up on the case.  nsize will always be >= 0, however.
  294  */
  295 void
  296 vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize)
  297 {
  298         vm_pindex_t nobjsize;
  299         vm_pindex_t oobjsize;
  300         vm_object_t object;
  301 
  302         object = vp->v_object;
  303         if (object == NULL)
  304                 return;
  305         vm_object_hold(object);
  306         KKASSERT(vp->v_object == object);
  307 
  308         /*
  309          * Hasn't changed size
  310          */
  311         if (nsize == vp->v_filesize) {
  312                 vm_object_drop(object);
  313                 return;
  314         }
  315 
  316         /*
  317          * Has changed size.  Adjust the VM object's size and v_filesize
  318          * before we start scanning pages to prevent new pages from being
  319          * allocated during the scan.
  320          */
  321         nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
  322         oobjsize = object->size;
  323         object->size = nobjsize;
  324 
  325         /*
  326          * File has shrunk. Toss any cached pages beyond the new EOF.
  327          */
  328         if (nsize < vp->v_filesize) {
  329                 vp->v_filesize = nsize;
  330                 if (nobjsize < oobjsize) {
  331                         vm_object_page_remove(object, nobjsize, oobjsize,
  332                                               FALSE);
  333                 }
  334                 /*
  335                  * This gets rid of garbage at the end of a page that is now
  336                  * only partially backed by the vnode.  Since we are setting
  337                  * the entire page valid & clean after we are done we have
  338                  * to be sure that the portion of the page within the file
  339                  * bounds is already valid.  If it isn't then making it
  340                  * valid would create a corrupt block.
  341                  */
  342                 if (nsize & PAGE_MASK) {
  343                         vm_offset_t kva;
  344                         vm_page_t m;
  345 
  346                         m = vm_page_lookup_busy_wait(object, OFF_TO_IDX(nsize),
  347                                                      TRUE, "vsetsz");
  348 
  349                         if (m && m->valid) {
  350                                 int base = (int)nsize & PAGE_MASK;
  351                                 int size = PAGE_SIZE - base;
  352                                 struct lwbuf *lwb;
  353                                 struct lwbuf lwb_cache;
  354 
  355                                 /*
  356                                  * Clear out partial-page garbage in case
  357                                  * the page has been mapped.
  358                                  *
  359                                  * This is byte aligned.
  360                                  */
  361                                 lwb = lwbuf_alloc(m, &lwb_cache);
  362                                 kva = lwbuf_kva(lwb);
  363                                 bzero((caddr_t)kva + base, size);
  364                                 lwbuf_free(lwb);
  365 
  366                                 /*
  367                                  * XXX work around SMP data integrity race
  368                                  * by unmapping the page from user processes.
  369                                  * The garbage we just cleared may be mapped
  370                                  * to a user process running on another cpu
  371                                  * and this code is not running through normal
  372                                  * I/O channels which handle SMP issues for
  373                                  * us, so unmap page to synchronize all cpus.
  374                                  *
  375                                  * XXX should vm_pager_unmap_page() have
  376                                  * dealt with this?
  377                                  */
  378                                 vm_page_protect(m, VM_PROT_NONE);
  379 
  380                                 /*
  381                                  * Clear out partial-page dirty bits.  This
  382                                  * has the side effect of setting the valid
  383                                  * bits, but that is ok.  There are a bunch
  384                                  * of places in the VM system where we expected
  385                                  * m->dirty == VM_PAGE_BITS_ALL.  The file EOF
  386                                  * case is one of them.  If the page is still
  387                                  * partially dirty, make it fully dirty.
  388                                  *
  389                                  * NOTE: We do not clear out the valid
  390                                  * bits.  This would prevent bogus_page
  391                                  * replacement from working properly.
  392                                  *
  393                                  * NOTE: We do not want to clear the dirty
  394                                  * bit for a partial DEV_BSIZE'd truncation!
  395                                  * This is DEV_BSIZE aligned!
  396                                  */
  397                                 vm_page_clear_dirty_beg_nonincl(m, base, size);
  398                                 if (m->dirty != 0)
  399                                         m->dirty = VM_PAGE_BITS_ALL;
  400                                 vm_page_wakeup(m);
  401                         } else if (m) {
  402                                 vm_page_wakeup(m);
  403                         }
  404                 }
  405         } else {
  406                 vp->v_filesize = nsize;
  407         }
  408         vm_object_drop(object);
  409 }
  410 
  411 /*
  412  * Release a page busied for a getpages operation.  The page may have become
  413  * wired (typically due to being used by the buffer cache) or otherwise been
  414  * soft-busied and cannot be freed in that case.  A held page can still be
  415  * freed.
  416  */
  417 void
  418 vnode_pager_freepage(vm_page_t m)
  419 {
  420         if (m->busy || m->wire_count || (m->flags & PG_NEED_COMMIT)) {
  421                 vm_page_activate(m);
  422                 vm_page_wakeup(m);
  423         } else {
  424                 vm_page_free(m);
  425         }
  426 }
  427 
  428 /*
  429  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
  430  * implement their own VOP_GETPAGES, their VOP_GETPAGES should call to
  431  * vnode_pager_generic_getpages() to implement the previous behaviour.
  432  *
  433  * All other FS's should use the bypass to get to the local media
  434  * backing vp's VOP_GETPAGES.
  435  */
  436 static int
  437 vnode_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
  438 {
  439         int rtval;
  440         struct vnode *vp;
  441 
  442         vp = object->handle;
  443         rtval = VOP_GETPAGES(vp, mpp, PAGE_SIZE, 0, 0, seqaccess);
  444         if (rtval == EOPNOTSUPP)
  445                 panic("vnode_pager: vfs's must implement vop_getpages");
  446         return rtval;
  447 }
  448 
  449 /*
  450  * This is now called from local media FS's to operate against their
  451  * own vnodes if they fail to implement VOP_GETPAGES.
  452  *
  453  * With all the caching local media devices do these days there is really
  454  * very little point to attempting to restrict the I/O size to contiguous
  455  * blocks on-disk, especially if our caller thinks we need all the specified
  456  * pages.  Just construct and issue a READ.
  457  */
  458 int
  459 vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *mpp, int bytecount,
  460                              int reqpage, int seqaccess)
  461 {
  462         struct iovec aiov;
  463         struct uio auio;
  464         off_t foff;
  465         int error;
  466         int count;
  467         int i;
  468         int ioflags;
  469 
  470         /*
  471          * Do not do anything if the vnode is bad.
  472          */
  473         if (vp->v_mount == NULL)
  474                 return VM_PAGER_BAD;
  475 
  476         /*
  477          * Calculate the number of pages.  Since we are paging in whole
  478          * pages, adjust bytecount to be an integral multiple of the page
  479          * size.  It will be clipped to the file EOF later on.
  480          */
  481         bytecount = round_page(bytecount);
  482         count = bytecount / PAGE_SIZE;
  483 
  484         /*
  485          * We could check m[reqpage]->valid here and shortcut the operation,
  486          * but doing so breaks read-ahead.  Instead assume that the VM
  487          * system has already done at least the check, don't worry about
  488          * any races, and issue the VOP_READ to allow read-ahead to function.
  489          *
  490          * This keeps the pipeline full for I/O bound sequentially scanned
  491          * mmap()'s
  492          */
  493         /* don't shortcut */
  494 
  495         /*
  496          * Discard pages past the file EOF.  If the requested page is past
  497          * the file EOF we just leave its valid bits set to 0, the caller
  498          * expects to maintain ownership of the requested page.  If the
  499          * entire range is past file EOF discard everything and generate
  500          * a pagein error.
  501          */
  502         foff = IDX_TO_OFF(mpp[0]->pindex);
  503         if (foff >= vp->v_filesize) {
  504                 for (i = 0; i < count; i++) {
  505                         if (i != reqpage)
  506                                 vnode_pager_freepage(mpp[i]);
  507                 }
  508                 return VM_PAGER_ERROR;
  509         }
  510 
  511         if (foff + bytecount > vp->v_filesize) {
  512                 bytecount = vp->v_filesize - foff;
  513                 i = round_page(bytecount) / PAGE_SIZE;
  514                 while (count > i) {
  515                         --count;
  516                         if (count != reqpage)
  517                                 vnode_pager_freepage(mpp[count]);
  518                 }
  519         }
  520 
  521         /*
  522          * The size of the transfer is bytecount.  bytecount will be an
  523          * integral multiple of the page size unless it has been clipped
  524          * to the file EOF.  The transfer cannot exceed the file EOF.
  525          *
  526          * When dealing with real devices we must round-up to the device
  527          * sector size.
  528          */
  529         if (vp->v_type == VBLK || vp->v_type == VCHR) {
  530                 int secmask = vp->v_rdev->si_bsize_phys - 1;
  531                 KASSERT(secmask < PAGE_SIZE, ("vnode_pager_generic_getpages: sector size %d too large", secmask + 1));
  532                 bytecount = (bytecount + secmask) & ~secmask;
  533         }
  534 
  535         /*
  536          * Severe hack to avoid deadlocks with the buffer cache
  537          */
  538         for (i = 0; i < count; ++i) {
  539                 vm_page_t mt = mpp[i];
  540 
  541                 vm_page_io_start(mt);
  542                 vm_page_wakeup(mt);
  543         }
  544 
  545         /*
  546          * Issue the I/O with some read-ahead if bytecount > PAGE_SIZE
  547          */
  548         ioflags = IO_VMIO;
  549         if (seqaccess)
  550                 ioflags |= IO_SEQMAX << IO_SEQSHIFT;
  551 
  552         aiov.iov_base = NULL;
  553         aiov.iov_len = bytecount;
  554         auio.uio_iov = &aiov;
  555         auio.uio_iovcnt = 1;
  556         auio.uio_offset = foff;
  557         auio.uio_segflg = UIO_NOCOPY;
  558         auio.uio_rw = UIO_READ;
  559         auio.uio_resid = bytecount;
  560         auio.uio_td = NULL;
  561         mycpu->gd_cnt.v_vnodein++;
  562         mycpu->gd_cnt.v_vnodepgsin += count;
  563 
  564         error = VOP_READ(vp, &auio, ioflags, proc0.p_ucred);
  565 
  566         /*
  567          * Severe hack to avoid deadlocks with the buffer cache
  568          */
  569         for (i = 0; i < count; ++i) {
  570                 vm_page_busy_wait(mpp[i], FALSE, "getpgs");
  571                 vm_page_io_finish(mpp[i]);
  572         }
  573 
  574         /*
  575          * Calculate the actual number of bytes read and clean up the
  576          * page list.  
  577          */
  578         bytecount -= auio.uio_resid;
  579 
  580         for (i = 0; i < count; ++i) {
  581                 vm_page_t mt = mpp[i];
  582 
  583                 if (i != reqpage) {
  584                         if (error == 0 && mt->valid) {
  585                                 if (mt->flags & PG_REFERENCED)
  586                                         vm_page_activate(mt);
  587                                 else
  588                                         vm_page_deactivate(mt);
  589                                 vm_page_wakeup(mt);
  590                         } else {
  591                                 vnode_pager_freepage(mt);
  592                         }
  593                 } else if (mt->valid == 0) {
  594                         if (error == 0) {
  595                                 kprintf("page failed but no I/O error page "
  596                                         "%p object %p pindex %d\n",
  597                                         mt, mt->object, (int) mt->pindex);
  598                                 /* whoops, something happened */
  599                                 error = EINVAL;
  600                         }
  601                 } else if (mt->valid != VM_PAGE_BITS_ALL) {
  602                         /*
  603                          * Zero-extend the requested page if necessary (if
  604                          * the filesystem is using a small block size).
  605                          */
  606                         vm_page_zero_invalid(mt, TRUE);
  607                 }
  608         }
  609         if (error) {
  610                 kprintf("vnode_pager_getpage: I/O read error\n");
  611         }
  612         return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
  613 }
  614 
  615 /*
  616  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
  617  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
  618  * vnode_pager_generic_putpages() to implement the previous behaviour.
  619  *
  620  * Caller has already cleared the pmap modified bits, if any.
  621  *
  622  * All other FS's should use the bypass to get to the local media
  623  * backing vp's VOP_PUTPAGES.
  624  */
  625 static void
  626 vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
  627                      boolean_t sync, int *rtvals)
  628 {
  629         int rtval;
  630         struct vnode *vp;
  631         int bytes = count * PAGE_SIZE;
  632 
  633         /*
  634          * Force synchronous operation if we are extremely low on memory
  635          * to prevent a low-memory deadlock.  VOP operations often need to
  636          * allocate more memory to initiate the I/O ( i.e. do a BMAP 
  637          * operation ).  The swapper handles the case by limiting the amount
  638          * of asynchronous I/O, but that sort of solution doesn't scale well
  639          * for the vnode pager without a lot of work.
  640          *
  641          * Also, the backing vnode's iodone routine may not wake the pageout
  642          * daemon up.  This should be probably be addressed XXX.
  643          */
  644 
  645         if ((vmstats.v_free_count + vmstats.v_cache_count) <
  646             vmstats.v_pageout_free_min) {
  647                 sync |= OBJPC_SYNC;
  648         }
  649 
  650         /*
  651          * Call device-specific putpages function
  652          */
  653         vp = object->handle;
  654         rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
  655         if (rtval == EOPNOTSUPP) {
  656             kprintf("vnode_pager: *** WARNING *** stale FS putpages\n");
  657             rtval = vnode_pager_generic_putpages( vp, m, bytes, sync, rtvals);
  658         }
  659 }
  660 
  661 
  662 /*
  663  * This is now called from local media FS's to operate against their
  664  * own vnodes if they fail to implement VOP_PUTPAGES.
  665  *
  666  * This is typically called indirectly via the pageout daemon and
  667  * clustering has already typically occured, so in general we ask the
  668  * underlying filesystem to write the data out asynchronously rather
  669  * then delayed.
  670  */
  671 int
  672 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *m, int bytecount,
  673                              int flags, int *rtvals)
  674 {
  675         int i;
  676         int maxsize, ncount, count;
  677         vm_ooffset_t poffset;
  678         struct uio auio;
  679         struct iovec aiov;
  680         int error;
  681         int ioflags;
  682 
  683         count = bytecount / PAGE_SIZE;
  684 
  685         for (i = 0; i < count; i++)
  686                 rtvals[i] = VM_PAGER_AGAIN;
  687 
  688         if ((int) m[0]->pindex < 0) {
  689                 kprintf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%x)\n",
  690                         (long)m[0]->pindex, m[0]->dirty);
  691                 rtvals[0] = VM_PAGER_BAD;
  692                 return VM_PAGER_BAD;
  693         }
  694 
  695         maxsize = count * PAGE_SIZE;
  696         ncount = count;
  697 
  698         poffset = IDX_TO_OFF(m[0]->pindex);
  699 
  700         /*
  701          * If the page-aligned write is larger then the actual file we
  702          * have to invalidate pages occuring beyond the file EOF.
  703          *
  704          * If the file EOF resides in the middle of a page we still clear
  705          * all of that page's dirty bits later on.  If we didn't it would
  706          * endlessly re-write.
  707          *
  708          * We do not under any circumstances truncate the valid bits, as
  709          * this will screw up bogus page replacement.
  710          *
  711          * The caller has already read-protected the pages.  The VFS must
  712          * use the buffer cache to wrap the pages.  The pages might not
  713          * be immediately flushed by the buffer cache but once under its
  714          * control the pages themselves can wind up being marked clean
  715          * and their covering buffer cache buffer can be marked dirty.
  716          */
  717         if (poffset + maxsize > vp->v_filesize) {
  718                 if (poffset < vp->v_filesize) {
  719                         maxsize = vp->v_filesize - poffset;
  720                         ncount = btoc(maxsize);
  721                 } else {
  722                         maxsize = 0;
  723                         ncount = 0;
  724                 }
  725                 if (ncount < count) {
  726                         for (i = ncount; i < count; i++) {
  727                                 rtvals[i] = VM_PAGER_BAD;
  728                         }
  729                 }
  730         }
  731 
  732         /*
  733          * pageouts are already clustered, use IO_ASYNC to force a bawrite()
  734          * rather then a bdwrite() to prevent paging I/O from saturating
  735          * the buffer cache.  Dummy-up the sequential heuristic to cause
  736          * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
  737          * the system decides how to cluster.
  738          */
  739         ioflags = IO_VMIO;
  740         if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
  741                 ioflags |= IO_SYNC;
  742         else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
  743                 ioflags |= IO_ASYNC;
  744         ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
  745         ioflags |= IO_SEQMAX << IO_SEQSHIFT;
  746 
  747         aiov.iov_base = (caddr_t) 0;
  748         aiov.iov_len = maxsize;
  749         auio.uio_iov = &aiov;
  750         auio.uio_iovcnt = 1;
  751         auio.uio_offset = poffset;
  752         auio.uio_segflg = UIO_NOCOPY;
  753         auio.uio_rw = UIO_WRITE;
  754         auio.uio_resid = maxsize;
  755         auio.uio_td = NULL;
  756         error = VOP_WRITE(vp, &auio, ioflags, proc0.p_ucred);
  757         mycpu->gd_cnt.v_vnodeout++;
  758         mycpu->gd_cnt.v_vnodepgsout += ncount;
  759 
  760         if (error) {
  761                 krateprintf(&vbadrate,
  762                             "vnode_pager_putpages: I/O error %d\n", error);
  763         }
  764         if (auio.uio_resid) {
  765                 krateprintf(&vresrate,
  766                             "vnode_pager_putpages: residual I/O %zd at %lu\n",
  767                             auio.uio_resid, (u_long)m[0]->pindex);
  768         }
  769         if (error == 0) {
  770                 for (i = 0; i < ncount; i++) {
  771                         rtvals[i] = VM_PAGER_OK;
  772                         vm_page_undirty(m[i]);
  773                 }
  774         }
  775         return rtvals[0];
  776 }
  777 
  778 /*
  779  * Run the chain and if the bottom-most object is a vnode-type lock the
  780  * underlying vnode.  A locked vnode or NULL is returned.
  781  */
  782 struct vnode *
  783 vnode_pager_lock(vm_object_t object)
  784 {
  785         struct vnode *vp = NULL;
  786         vm_object_t lobject;
  787         vm_object_t tobject;
  788         int error;
  789 
  790         if (object == NULL)
  791                 return(NULL);
  792 
  793         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
  794         lobject = object;
  795 
  796         while (lobject->type != OBJT_VNODE) {
  797                 if (lobject->flags & OBJ_DEAD)
  798                         break;
  799                 tobject = lobject->backing_object;
  800                 if (tobject == NULL)
  801                         break;
  802                 vm_object_hold_shared(tobject);
  803                 if (tobject == lobject->backing_object) {
  804                         if (lobject != object) {
  805                                 vm_object_lock_swap();
  806                                 vm_object_drop(lobject);
  807                         }
  808                         lobject = tobject;
  809                 } else {
  810                         vm_object_drop(tobject);
  811                 }
  812         }
  813         while (lobject->type == OBJT_VNODE &&
  814                (lobject->flags & OBJ_DEAD) == 0) {
  815                 /*
  816                  * Extract the vp
  817                  */
  818                 vp = lobject->handle;
  819                 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
  820                 if (error == 0) {
  821                         if (lobject->handle == vp)
  822                                 break;
  823                         vput(vp);
  824                 } else {
  825                         kprintf("vnode_pager_lock: vp %p error %d "
  826                                 "lockstatus %d, retrying\n",
  827                                 vp, error,
  828                                 lockstatus(&vp->v_lock, curthread));
  829                         tsleep(object->handle, 0, "vnpgrl", hz);
  830                 }
  831                 vp = NULL;
  832         }
  833         if (lobject != object)
  834                 vm_object_drop(lobject);
  835         return (vp);
  836 }

Cache object: 4acea8685c41d319b2ec53bc79adf8e6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.