The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/swap_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1998 Matthew Dillon,
    3  * Copyright (c) 1994 John S. Dyson
    4  * Copyright (c) 1990 University of Utah.
    5  * Copyright (c) 1991, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *                              New Swap System
   41  *                              Matthew Dillon
   42  *
   43  * Radix Bitmap 'blists'.
   44  *
   45  *      - The new swapper uses the new radix bitmap code.  This should scale
   46  *        to arbitrarily small or arbitrarily large swap spaces and an almost
   47  *        arbitrary degree of fragmentation.
   48  *
   49  * Features:
   50  *
   51  *      - on the fly reallocation of swap during putpages.  The new system
   52  *        does not try to keep previously allocated swap blocks for dirty
   53  *        pages.  
   54  *
   55  *      - on the fly deallocation of swap
   56  *
   57  *      - No more garbage collection required.  Unnecessarily allocated swap
   58  *        blocks only exist for dirty vm_page_t's now and these are already
   59  *        cycled (in a high-load system) by the pager.  We also do on-the-fly
   60  *        removal of invalidated swap blocks when a page is destroyed
   61  *        or renamed.
   62  *
   63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
   64  *
   65  *      @(#)swap_pager.c        8.9 (Berkeley) 3/21/94
   66  *
   67  * $FreeBSD: releng/5.0/sys/vm/swap_pager.c 107039 2002-11-18 04:05:22Z alc $
   68  */
   69 
   70 #include <sys/param.h>
   71 #include <sys/systm.h>
   72 #include <sys/conf.h>
   73 #include <sys/kernel.h>
   74 #include <sys/proc.h>
   75 #include <sys/bio.h>
   76 #include <sys/buf.h>
   77 #include <sys/vnode.h>
   78 #include <sys/malloc.h>
   79 #include <sys/sysctl.h>
   80 #include <sys/blist.h>
   81 #include <sys/lock.h>
   82 #include <sys/sx.h>
   83 #include <sys/vmmeter.h>
   84 
   85 #ifndef MAX_PAGEOUT_CLUSTER
   86 #define MAX_PAGEOUT_CLUSTER 16
   87 #endif
   88 
   89 #define SWB_NPAGES      MAX_PAGEOUT_CLUSTER
   90 
   91 #include "opt_swap.h"
   92 #include <vm/vm.h>
   93 #include <vm/pmap.h>
   94 #include <vm/vm_map.h>
   95 #include <vm/vm_kern.h>
   96 #include <vm/vm_object.h>
   97 #include <vm/vm_page.h>
   98 #include <vm/vm_pager.h>
   99 #include <vm/vm_pageout.h>
  100 #include <vm/swap_pager.h>
  101 #include <vm/vm_extern.h>
  102 #include <vm/uma.h>
  103 
  104 #define SWM_FREE        0x02    /* free, period                 */
  105 #define SWM_POP         0x04    /* pop out                      */
  106 
  107 /*
  108  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
  109  * in the old system.
  110  */
  111 extern int vm_swap_size;        /* number of free swap blocks, in pages */
  112 
  113 int swap_pager_full;            /* swap space exhaustion (task killing) */
  114 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
  115 static int nsw_rcount;          /* free read buffers                    */
  116 static int nsw_wcount_sync;     /* limit write buffers / synchronous    */
  117 static int nsw_wcount_async;    /* limit write buffers / asynchronous   */
  118 static int nsw_wcount_async_max;/* assigned maximum                     */
  119 static int nsw_cluster_max;     /* maximum VOP I/O allowed              */
  120 
  121 struct blist *swapblist;
  122 static struct swblock **swhash;
  123 static int swhash_mask;
  124 static int swap_async_max = 4;  /* maximum in-progress async I/O's      */
  125 static struct sx sw_alloc_sx;
  126 
  127 /* from vm_swap.c */
  128 extern struct vnode *swapdev_vp;
  129 extern struct swdevt *swdevt;
  130 extern int nswdev;
  131 
  132 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
  133         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
  134 
  135 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
  136 
  137 /*
  138  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
  139  * of searching a named list by hashing it just a little.
  140  */
  141 
  142 #define NOBJLISTS               8
  143 
  144 #define NOBJLIST(handle)        \
  145         (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
  146 
  147 static struct mtx sw_alloc_mtx; /* protect list manipulation */ 
  148 static struct pagerlst  swap_pager_object_list[NOBJLISTS];
  149 struct pagerlst         swap_pager_un_object_list;
  150 uma_zone_t              swap_zone;
  151 
  152 /*
  153  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
  154  * calls hooked from other parts of the VM system and do not appear here.
  155  * (see vm/swap_pager.h).
  156  */
  157 static vm_object_t
  158                 swap_pager_alloc(void *handle, vm_ooffset_t size,
  159                                       vm_prot_t prot, vm_ooffset_t offset);
  160 static void     swap_pager_dealloc(vm_object_t object);
  161 static int      swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
  162 static void     swap_pager_init(void);
  163 static void     swap_pager_unswapped(vm_page_t);
  164 static void     swap_pager_strategy(vm_object_t, struct bio *);
  165 
  166 struct pagerops swappagerops = {
  167         swap_pager_init,        /* early system initialization of pager */
  168         swap_pager_alloc,       /* allocate an OBJT_SWAP object         */
  169         swap_pager_dealloc,     /* deallocate an OBJT_SWAP object       */
  170         swap_pager_getpages,    /* pagein                               */
  171         swap_pager_putpages,    /* pageout                              */
  172         swap_pager_haspage,     /* get backing store status for page    */
  173         swap_pager_unswapped,   /* remove swap related to page          */
  174         swap_pager_strategy     /* pager strategy call                  */
  175 };
  176 
  177 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags);
  178 static void flushchainbuf(struct buf *nbp);
  179 static void waitchainbuf(struct bio *bp, int count, int done);
  180 
  181 /*
  182  * dmmax is in page-sized chunks with the new swap system.  It was
  183  * dev-bsized chunks in the old.  dmmax is always a power of 2.
  184  *
  185  * swap_*() routines are externally accessible.  swp_*() routines are
  186  * internal.
  187  */
  188 int dmmax;
  189 static int dmmax_mask;
  190 int nswap_lowat = 128;          /* in pages, swap_pager_almost_full warn */
  191 int nswap_hiwat = 512;          /* in pages, swap_pager_almost_full warn */
  192 
  193 SYSCTL_INT(_vm, OID_AUTO, dmmax,
  194         CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
  195 
  196 static __inline void    swp_sizecheck(void);
  197 static void     swp_pager_sync_iodone(struct buf *bp);
  198 static void     swp_pager_async_iodone(struct buf *bp);
  199 
  200 /*
  201  * Swap bitmap functions
  202  */
  203 static __inline void    swp_pager_freeswapspace(daddr_t blk, int npages);
  204 static __inline daddr_t swp_pager_getswapspace(int npages);
  205 
  206 /*
  207  * Metadata functions
  208  */
  209 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
  210 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t);
  211 static void swp_pager_meta_free_all(vm_object_t);
  212 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
  213 
  214 /*
  215  * SWP_SIZECHECK() -    update swap_pager_full indication
  216  *      
  217  *      update the swap_pager_almost_full indication and warn when we are
  218  *      about to run out of swap space, using lowat/hiwat hysteresis.
  219  *
  220  *      Clear swap_pager_full ( task killing ) indication when lowat is met.
  221  *
  222  *      No restrictions on call
  223  *      This routine may not block.
  224  *      This routine must be called at splvm()
  225  */
  226 static __inline void
  227 swp_sizecheck()
  228 {
  229         GIANT_REQUIRED;
  230 
  231         if (vm_swap_size < nswap_lowat) {
  232                 if (swap_pager_almost_full == 0) {
  233                         printf("swap_pager: out of swap space\n");
  234                         swap_pager_almost_full = 1;
  235                 }
  236         } else {
  237                 swap_pager_full = 0;
  238                 if (vm_swap_size > nswap_hiwat)
  239                         swap_pager_almost_full = 0;
  240         }
  241 }
  242 
  243 /*
  244  * SWAP_PAGER_INIT() -  initialize the swap pager!
  245  *
  246  *      Expected to be started from system init.  NOTE:  This code is run 
  247  *      before much else so be careful what you depend on.  Most of the VM
  248  *      system has yet to be initialized at this point.
  249  */
  250 static void
  251 swap_pager_init()
  252 {
  253         /*
  254          * Initialize object lists
  255          */
  256         int i;
  257 
  258         for (i = 0; i < NOBJLISTS; ++i)
  259                 TAILQ_INIT(&swap_pager_object_list[i]);
  260         TAILQ_INIT(&swap_pager_un_object_list);
  261         mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF);
  262 
  263         /*
  264          * Device Stripe, in PAGE_SIZE'd blocks
  265          */
  266         dmmax = SWB_NPAGES * 2;
  267         dmmax_mask = ~(dmmax - 1);
  268 }
  269 
  270 /*
  271  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
  272  *
  273  *      Expected to be started from pageout process once, prior to entering
  274  *      its main loop.
  275  */
  276 void
  277 swap_pager_swap_init()
  278 {
  279         int n, n2;
  280 
  281         /*
  282          * Number of in-transit swap bp operations.  Don't
  283          * exhaust the pbufs completely.  Make sure we
  284          * initialize workable values (0 will work for hysteresis
  285          * but it isn't very efficient).
  286          *
  287          * The nsw_cluster_max is constrained by the bp->b_pages[]
  288          * array (MAXPHYS/PAGE_SIZE) and our locally defined
  289          * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
  290          * constrained by the swap device interleave stripe size.
  291          *
  292          * Currently we hardwire nsw_wcount_async to 4.  This limit is 
  293          * designed to prevent other I/O from having high latencies due to
  294          * our pageout I/O.  The value 4 works well for one or two active swap
  295          * devices but is probably a little low if you have more.  Even so,
  296          * a higher value would probably generate only a limited improvement
  297          * with three or four active swap devices since the system does not
  298          * typically have to pageout at extreme bandwidths.   We will want
  299          * at least 2 per swap devices, and 4 is a pretty good value if you
  300          * have one NFS swap device due to the command/ack latency over NFS.
  301          * So it all works out pretty well.
  302          */
  303         nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
  304 
  305         mtx_lock(&pbuf_mtx);
  306         nsw_rcount = (nswbuf + 1) / 2;
  307         nsw_wcount_sync = (nswbuf + 3) / 4;
  308         nsw_wcount_async = 4;
  309         nsw_wcount_async_max = nsw_wcount_async;
  310         mtx_unlock(&pbuf_mtx);
  311 
  312         /*
  313          * Initialize our zone.  Right now I'm just guessing on the number
  314          * we need based on the number of pages in the system.  Each swblock
  315          * can hold 16 pages, so this is probably overkill.  This reservation
  316          * is typically limited to around 32MB by default.
  317          */
  318         n = cnt.v_page_count / 2;
  319         if (maxswzone && n > maxswzone / sizeof(struct swblock))
  320                 n = maxswzone / sizeof(struct swblock);
  321         n2 = n;
  322         swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
  323             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  324         do {
  325                 if (uma_zone_set_obj(swap_zone, NULL, n))
  326                         break;
  327                 /*
  328                  * if the allocation failed, try a zone two thirds the
  329                  * size of the previous attempt.
  330                  */
  331                 n -= ((n + 2) / 3);
  332         } while (n > 0);
  333         if (swap_zone == NULL)
  334                 panic("failed to create swap_zone.");
  335         if (n2 != n)
  336                 printf("Swap zone entries reduced from %d to %d.\n", n2, n);
  337         n2 = n;
  338 
  339         /*
  340          * Initialize our meta-data hash table.  The swapper does not need to
  341          * be quite as efficient as the VM system, so we do not use an 
  342          * oversized hash table.
  343          *
  344          *      n:              size of hash table, must be power of 2
  345          *      swhash_mask:    hash table index mask
  346          */
  347         for (n = 1; n < n2 / 8; n *= 2)
  348                 ;
  349         swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
  350         swhash_mask = n - 1;
  351 }
  352 
  353 /*
  354  * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
  355  *                      its metadata structures.
  356  *
  357  *      This routine is called from the mmap and fork code to create a new
  358  *      OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
  359  *      and then converting it with swp_pager_meta_build().
  360  *
  361  *      This routine may block in vm_object_allocate() and create a named
  362  *      object lookup race, so we must interlock.   We must also run at
  363  *      splvm() for the object lookup to handle races with interrupts, but
  364  *      we do not have to maintain splvm() in between the lookup and the
  365  *      add because (I believe) it is not possible to attempt to create
  366  *      a new swap object w/handle when a default object with that handle
  367  *      already exists.
  368  *
  369  * MPSAFE
  370  */
  371 static vm_object_t
  372 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  373                  vm_ooffset_t offset)
  374 {
  375         vm_object_t object;
  376 
  377         mtx_lock(&Giant);
  378         if (handle) {
  379                 /*
  380                  * Reference existing named region or allocate new one.  There
  381                  * should not be a race here against swp_pager_meta_build()
  382                  * as called from vm_page_remove() in regards to the lookup
  383                  * of the handle.
  384                  */
  385                 sx_xlock(&sw_alloc_sx);
  386                 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
  387 
  388                 if (object != NULL) {
  389                         vm_object_reference(object);
  390                 } else {
  391                         object = vm_object_allocate(OBJT_DEFAULT,
  392                                 OFF_TO_IDX(offset + PAGE_MASK + size));
  393                         object->handle = handle;
  394 
  395                         swp_pager_meta_build(object, 0, SWAPBLK_NONE);
  396                 }
  397                 sx_xunlock(&sw_alloc_sx);
  398         } else {
  399                 object = vm_object_allocate(OBJT_DEFAULT,
  400                         OFF_TO_IDX(offset + PAGE_MASK + size));
  401 
  402                 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
  403         }
  404         mtx_unlock(&Giant);
  405         return (object);
  406 }
  407 
  408 /*
  409  * SWAP_PAGER_DEALLOC() -       remove swap metadata from object
  410  *
  411  *      The swap backing for the object is destroyed.  The code is 
  412  *      designed such that we can reinstantiate it later, but this
  413  *      routine is typically called only when the entire object is
  414  *      about to be destroyed.
  415  *
  416  *      This routine may block, but no longer does. 
  417  *
  418  *      The object must be locked or unreferenceable.
  419  */
  420 static void
  421 swap_pager_dealloc(object)
  422         vm_object_t object;
  423 {
  424         int s;
  425 
  426         GIANT_REQUIRED;
  427 
  428         /*
  429          * Remove from list right away so lookups will fail if we block for
  430          * pageout completion.
  431          */
  432         mtx_lock(&sw_alloc_mtx);
  433         if (object->handle == NULL) {
  434                 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
  435         } else {
  436                 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
  437         }
  438         mtx_unlock(&sw_alloc_mtx);
  439 
  440         vm_object_pip_wait(object, "swpdea");
  441 
  442         /*
  443          * Free all remaining metadata.  We only bother to free it from 
  444          * the swap meta data.  We do not attempt to free swapblk's still
  445          * associated with vm_page_t's for this object.  We do not care
  446          * if paging is still in progress on some objects.
  447          */
  448         s = splvm();
  449         swp_pager_meta_free_all(object);
  450         splx(s);
  451 }
  452 
  453 /************************************************************************
  454  *                      SWAP PAGER BITMAP ROUTINES                      *
  455  ************************************************************************/
  456 
  457 /*
  458  * SWP_PAGER_GETSWAPSPACE() -   allocate raw swap space
  459  *
  460  *      Allocate swap for the requested number of pages.  The starting
  461  *      swap block number (a page index) is returned or SWAPBLK_NONE
  462  *      if the allocation failed.
  463  *
  464  *      Also has the side effect of advising that somebody made a mistake
  465  *      when they configured swap and didn't configure enough.
  466  *
  467  *      Must be called at splvm() to avoid races with bitmap frees from
  468  *      vm_page_remove() aka swap_pager_page_removed().
  469  *
  470  *      This routine may not block
  471  *      This routine must be called at splvm().
  472  */
  473 static __inline daddr_t
  474 swp_pager_getswapspace(npages)
  475         int npages;
  476 {
  477         daddr_t blk;
  478 
  479         GIANT_REQUIRED;
  480 
  481         if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
  482                 if (swap_pager_full != 2) {
  483                         printf("swap_pager_getswapspace: failed\n");
  484                         swap_pager_full = 2;
  485                         swap_pager_almost_full = 1;
  486                 }
  487         } else {
  488                 vm_swap_size -= npages;
  489                 /* per-swap area stats */
  490                 swdevt[BLK2DEVIDX(blk)].sw_used += npages;
  491                 swp_sizecheck();
  492         }
  493         return (blk);
  494 }
  495 
  496 /*
  497  * SWP_PAGER_FREESWAPSPACE() -  free raw swap space 
  498  *
  499  *      This routine returns the specified swap blocks back to the bitmap.
  500  *
  501  *      Note:  This routine may not block (it could in the old swap code),
  502  *      and through the use of the new blist routines it does not block.
  503  *
  504  *      We must be called at splvm() to avoid races with bitmap frees from
  505  *      vm_page_remove() aka swap_pager_page_removed().
  506  *
  507  *      This routine may not block
  508  *      This routine must be called at splvm().
  509  */
  510 static __inline void
  511 swp_pager_freeswapspace(blk, npages)
  512         daddr_t blk;
  513         int npages;
  514 {
  515         GIANT_REQUIRED;
  516 
  517         blist_free(swapblist, blk, npages);
  518         vm_swap_size += npages;
  519         /* per-swap area stats */
  520         swdevt[BLK2DEVIDX(blk)].sw_used -= npages;
  521         swp_sizecheck();
  522 }
  523 
  524 /*
  525  * SWAP_PAGER_FREESPACE() -     frees swap blocks associated with a page
  526  *                              range within an object.
  527  *
  528  *      This is a globally accessible routine.
  529  *
  530  *      This routine removes swapblk assignments from swap metadata.
  531  *
  532  *      The external callers of this routine typically have already destroyed 
  533  *      or renamed vm_page_t's associated with this range in the object so 
  534  *      we should be ok.
  535  *
  536  *      This routine may be called at any spl.  We up our spl to splvm temporarily
  537  *      in order to perform the metadata removal.
  538  */
  539 void
  540 swap_pager_freespace(object, start, size)
  541         vm_object_t object;
  542         vm_pindex_t start;
  543         vm_size_t size;
  544 {
  545         int s = splvm();
  546 
  547         GIANT_REQUIRED;
  548         swp_pager_meta_free(object, start, size);
  549         splx(s);
  550 }
  551 
  552 /*
  553  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
  554  *
  555  *      Assigns swap blocks to the specified range within the object.  The 
  556  *      swap blocks are not zerod.  Any previous swap assignment is destroyed.
  557  *
  558  *      Returns 0 on success, -1 on failure.
  559  */
  560 int
  561 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
  562 {
  563         int s;
  564         int n = 0;
  565         daddr_t blk = SWAPBLK_NONE;
  566         vm_pindex_t beg = start;        /* save start index */
  567 
  568         s = splvm();
  569         while (size) {
  570                 if (n == 0) {
  571                         n = BLIST_MAX_ALLOC;
  572                         while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
  573                                 n >>= 1;
  574                                 if (n == 0) {
  575                                         swp_pager_meta_free(object, beg, start - beg);
  576                                         splx(s);
  577                                         return (-1);
  578                                 }
  579                         }
  580                 }
  581                 swp_pager_meta_build(object, start, blk);
  582                 --size;
  583                 ++start;
  584                 ++blk;
  585                 --n;
  586         }
  587         swp_pager_meta_free(object, start, n);
  588         splx(s);
  589         return (0);
  590 }
  591 
  592 /*
  593  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
  594  *                      and destroy the source.
  595  *
  596  *      Copy any valid swapblks from the source to the destination.  In
  597  *      cases where both the source and destination have a valid swapblk,
  598  *      we keep the destination's.
  599  *
  600  *      This routine is allowed to block.  It may block allocating metadata
  601  *      indirectly through swp_pager_meta_build() or if paging is still in
  602  *      progress on the source. 
  603  *
  604  *      This routine can be called at any spl
  605  *
  606  *      XXX vm_page_collapse() kinda expects us not to block because we 
  607  *      supposedly do not need to allocate memory, but for the moment we
  608  *      *may* have to get a little memory from the zone allocator, but
  609  *      it is taken from the interrupt memory.  We should be ok. 
  610  *
  611  *      The source object contains no vm_page_t's (which is just as well)
  612  *
  613  *      The source object is of type OBJT_SWAP.
  614  *
  615  *      The source and destination objects must be locked or 
  616  *      inaccessible (XXX are they ?)
  617  */
  618 void
  619 swap_pager_copy(srcobject, dstobject, offset, destroysource)
  620         vm_object_t srcobject;
  621         vm_object_t dstobject;
  622         vm_pindex_t offset;
  623         int destroysource;
  624 {
  625         vm_pindex_t i;
  626         int s;
  627 
  628         GIANT_REQUIRED;
  629 
  630         s = splvm();
  631         /*
  632          * If destroysource is set, we remove the source object from the 
  633          * swap_pager internal queue now. 
  634          */
  635         if (destroysource) {
  636                 mtx_lock(&sw_alloc_mtx);
  637                 if (srcobject->handle == NULL) {
  638                         TAILQ_REMOVE(
  639                             &swap_pager_un_object_list, 
  640                             srcobject, 
  641                             pager_object_list
  642                         );
  643                 } else {
  644                         TAILQ_REMOVE(
  645                             NOBJLIST(srcobject->handle),
  646                             srcobject,
  647                             pager_object_list
  648                         );
  649                 }
  650                 mtx_unlock(&sw_alloc_mtx);
  651         }
  652 
  653         /*
  654          * transfer source to destination.
  655          */
  656         for (i = 0; i < dstobject->size; ++i) {
  657                 daddr_t dstaddr;
  658 
  659                 /*
  660                  * Locate (without changing) the swapblk on the destination,
  661                  * unless it is invalid in which case free it silently, or
  662                  * if the destination is a resident page, in which case the
  663                  * source is thrown away.
  664                  */
  665                 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
  666 
  667                 if (dstaddr == SWAPBLK_NONE) {
  668                         /*
  669                          * Destination has no swapblk and is not resident,
  670                          * copy source.
  671                          */
  672                         daddr_t srcaddr;
  673 
  674                         srcaddr = swp_pager_meta_ctl(
  675                             srcobject, 
  676                             i + offset,
  677                             SWM_POP
  678                         );
  679 
  680                         if (srcaddr != SWAPBLK_NONE)
  681                                 swp_pager_meta_build(dstobject, i, srcaddr);
  682                 } else {
  683                         /*
  684                          * Destination has valid swapblk or it is represented
  685                          * by a resident page.  We destroy the sourceblock.
  686                          */
  687                         
  688                         swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
  689                 }
  690         }
  691 
  692         /*
  693          * Free left over swap blocks in source.
  694          *
  695          * We have to revert the type to OBJT_DEFAULT so we do not accidently
  696          * double-remove the object from the swap queues.
  697          */
  698         if (destroysource) {
  699                 swp_pager_meta_free_all(srcobject);
  700                 /*
  701                  * Reverting the type is not necessary, the caller is going
  702                  * to destroy srcobject directly, but I'm doing it here
  703                  * for consistency since we've removed the object from its
  704                  * queues.
  705                  */
  706                 srcobject->type = OBJT_DEFAULT;
  707         }
  708         splx(s);
  709 }
  710 
  711 /*
  712  * SWAP_PAGER_HASPAGE() -       determine if we have good backing store for
  713  *                              the requested page.
  714  *
  715  *      We determine whether good backing store exists for the requested
  716  *      page and return TRUE if it does, FALSE if it doesn't.
  717  *
  718  *      If TRUE, we also try to determine how much valid, contiguous backing
  719  *      store exists before and after the requested page within a reasonable
  720  *      distance.  We do not try to restrict it to the swap device stripe
  721  *      (that is handled in getpages/putpages).  It probably isn't worth
  722  *      doing here.
  723  */
  724 boolean_t
  725 swap_pager_haspage(object, pindex, before, after)
  726         vm_object_t object;
  727         vm_pindex_t pindex;
  728         int *before;
  729         int *after;
  730 {
  731         daddr_t blk0;
  732         int s;
  733 
  734         /*
  735          * do we have good backing store at the requested index ?
  736          */
  737         s = splvm();
  738         blk0 = swp_pager_meta_ctl(object, pindex, 0);
  739 
  740         if (blk0 == SWAPBLK_NONE) {
  741                 splx(s);
  742                 if (before)
  743                         *before = 0;
  744                 if (after)
  745                         *after = 0;
  746                 return (FALSE);
  747         }
  748 
  749         /*
  750          * find backwards-looking contiguous good backing store
  751          */
  752         if (before != NULL) {
  753                 int i;
  754 
  755                 for (i = 1; i < (SWB_NPAGES/2); ++i) {
  756                         daddr_t blk;
  757 
  758                         if (i > pindex)
  759                                 break;
  760                         blk = swp_pager_meta_ctl(object, pindex - i, 0);
  761                         if (blk != blk0 - i)
  762                                 break;
  763                 }
  764                 *before = (i - 1);
  765         }
  766 
  767         /*
  768          * find forward-looking contiguous good backing store
  769          */
  770         if (after != NULL) {
  771                 int i;
  772 
  773                 for (i = 1; i < (SWB_NPAGES/2); ++i) {
  774                         daddr_t blk;
  775 
  776                         blk = swp_pager_meta_ctl(object, pindex + i, 0);
  777                         if (blk != blk0 + i)
  778                                 break;
  779                 }
  780                 *after = (i - 1);
  781         }
  782         splx(s);
  783         return (TRUE);
  784 }
  785 
  786 /*
  787  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
  788  *
  789  *      This removes any associated swap backing store, whether valid or
  790  *      not, from the page.  
  791  *
  792  *      This routine is typically called when a page is made dirty, at
  793  *      which point any associated swap can be freed.  MADV_FREE also
  794  *      calls us in a special-case situation
  795  *
  796  *      NOTE!!!  If the page is clean and the swap was valid, the caller
  797  *      should make the page dirty before calling this routine.  This routine
  798  *      does NOT change the m->dirty status of the page.  Also: MADV_FREE
  799  *      depends on it.
  800  *
  801  *      This routine may not block
  802  *      This routine must be called at splvm()
  803  */
  804 static void
  805 swap_pager_unswapped(m)
  806         vm_page_t m;
  807 {
  808         swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
  809 }
  810 
  811 /*
  812  * SWAP_PAGER_STRATEGY() - read, write, free blocks
  813  *
  814  *      This implements the vm_pager_strategy() interface to swap and allows
  815  *      other parts of the system to directly access swap as backing store
  816  *      through vm_objects of type OBJT_SWAP.  This is intended to be a 
  817  *      cacheless interface ( i.e. caching occurs at higher levels ).
  818  *      Therefore we do not maintain any resident pages.  All I/O goes
  819  *      directly to and from the swap device.
  820  *      
  821  *      Note that b_blkno is scaled for PAGE_SIZE
  822  *
  823  *      We currently attempt to run I/O synchronously or asynchronously as
  824  *      the caller requests.  This isn't perfect because we loose error
  825  *      sequencing when we run multiple ops in parallel to satisfy a request.
  826  *      But this is swap, so we let it all hang out.
  827  */
  828 static void     
  829 swap_pager_strategy(vm_object_t object, struct bio *bp)
  830 {
  831         vm_pindex_t start;
  832         int count;
  833         int s;
  834         char *data;
  835         struct buf *nbp = NULL;
  836 
  837         GIANT_REQUIRED;
  838 
  839         /* XXX: KASSERT instead ? */
  840         if (bp->bio_bcount & PAGE_MASK) {
  841                 biofinish(bp, NULL, EINVAL);
  842                 printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount);
  843                 return;
  844         }
  845 
  846         /*
  847          * Clear error indication, initialize page index, count, data pointer.
  848          */
  849         bp->bio_error = 0;
  850         bp->bio_flags &= ~BIO_ERROR;
  851         bp->bio_resid = bp->bio_bcount;
  852         *(u_int *) &bp->bio_driver1 = 0;
  853 
  854         start = bp->bio_pblkno;
  855         count = howmany(bp->bio_bcount, PAGE_SIZE);
  856         data = bp->bio_data;
  857 
  858         s = splvm();
  859 
  860         /*
  861          * Deal with BIO_DELETE
  862          */
  863         if (bp->bio_cmd == BIO_DELETE) {
  864                 /*
  865                  * FREE PAGE(s) - destroy underlying swap that is no longer
  866                  *                needed.
  867                  */
  868                 swp_pager_meta_free(object, start, count);
  869                 splx(s);
  870                 bp->bio_resid = 0;
  871                 biodone(bp);
  872                 return;
  873         }
  874 
  875         /*
  876          * Execute read or write
  877          */
  878         while (count > 0) {
  879                 daddr_t blk;
  880 
  881                 /*
  882                  * Obtain block.  If block not found and writing, allocate a
  883                  * new block and build it into the object.
  884                  */
  885 
  886                 blk = swp_pager_meta_ctl(object, start, 0);
  887                 if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) {
  888                         blk = swp_pager_getswapspace(1);
  889                         if (blk == SWAPBLK_NONE) {
  890                                 bp->bio_error = ENOMEM;
  891                                 bp->bio_flags |= BIO_ERROR;
  892                                 break;
  893                         }
  894                         swp_pager_meta_build(object, start, blk);
  895                 }
  896                         
  897                 /*
  898                  * Do we have to flush our current collection?  Yes if:
  899                  *
  900                  *      - no swap block at this index
  901                  *      - swap block is not contiguous
  902                  *      - we cross a physical disk boundry in the
  903                  *        stripe.
  904                  */
  905                 if (
  906                     nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
  907                      ((nbp->b_blkno ^ blk) & dmmax_mask)
  908                     )
  909                 ) {
  910                         splx(s);
  911                         if (bp->bio_cmd == BIO_READ) {
  912                                 ++cnt.v_swapin;
  913                                 cnt.v_swappgsin += btoc(nbp->b_bcount);
  914                         } else {
  915                                 ++cnt.v_swapout;
  916                                 cnt.v_swappgsout += btoc(nbp->b_bcount);
  917                                 nbp->b_dirtyend = nbp->b_bcount;
  918                         }
  919                         flushchainbuf(nbp);
  920                         s = splvm();
  921                         nbp = NULL;
  922                 }
  923 
  924                 /*
  925                  * Add new swapblk to nbp, instantiating nbp if necessary.
  926                  * Zero-fill reads are able to take a shortcut.
  927                  */
  928                 if (blk == SWAPBLK_NONE) {
  929                         /*
  930                          * We can only get here if we are reading.  Since
  931                          * we are at splvm() we can safely modify b_resid,
  932                          * even if chain ops are in progress.
  933                          */
  934                         bzero(data, PAGE_SIZE);
  935                         bp->bio_resid -= PAGE_SIZE;
  936                 } else {
  937                         if (nbp == NULL) {
  938                                 nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
  939                                 nbp->b_blkno = blk;
  940                                 nbp->b_bcount = 0;
  941                                 nbp->b_data = data;
  942                         }
  943                         nbp->b_bcount += PAGE_SIZE;
  944                 }
  945                 --count;
  946                 ++start;
  947                 data += PAGE_SIZE;
  948         }
  949 
  950         /*
  951          *  Flush out last buffer
  952          */
  953         splx(s);
  954 
  955         if (nbp) {
  956                 if (nbp->b_iocmd == BIO_READ) {
  957                         ++cnt.v_swapin;
  958                         cnt.v_swappgsin += btoc(nbp->b_bcount);
  959                 } else {
  960                         ++cnt.v_swapout;
  961                         cnt.v_swappgsout += btoc(nbp->b_bcount);
  962                         nbp->b_dirtyend = nbp->b_bcount;
  963                 }
  964                 flushchainbuf(nbp);
  965                 /* nbp = NULL; */
  966         }
  967         /*
  968          * Wait for completion.
  969          */
  970         waitchainbuf(bp, 0, 1);
  971 }
  972 
  973 /*
  974  * SWAP_PAGER_GETPAGES() - bring pages in from swap
  975  *
  976  *      Attempt to retrieve (m, count) pages from backing store, but make
  977  *      sure we retrieve at least m[reqpage].  We try to load in as large
  978  *      a chunk surrounding m[reqpage] as is contiguous in swap and which
  979  *      belongs to the same object.
  980  *
  981  *      The code is designed for asynchronous operation and 
  982  *      immediate-notification of 'reqpage' but tends not to be
  983  *      used that way.  Please do not optimize-out this algorithmic
  984  *      feature, I intend to improve on it in the future.
  985  *
  986  *      The parent has a single vm_object_pip_add() reference prior to
  987  *      calling us and we should return with the same.
  988  *
  989  *      The parent has BUSY'd the pages.  We should return with 'm'
  990  *      left busy, but the others adjusted.
  991  */
  992 static int
  993 swap_pager_getpages(object, m, count, reqpage)
  994         vm_object_t object;
  995         vm_page_t *m;
  996         int count, reqpage;
  997 {
  998         struct buf *bp;
  999         vm_page_t mreq;
 1000         int s;
 1001         int i;
 1002         int j;
 1003         daddr_t blk;
 1004         vm_offset_t kva;
 1005         vm_pindex_t lastpindex;
 1006 
 1007         GIANT_REQUIRED;
 1008 
 1009         mreq = m[reqpage];
 1010 
 1011         if (mreq->object != object) {
 1012                 panic("swap_pager_getpages: object mismatch %p/%p", 
 1013                     object, 
 1014                     mreq->object
 1015                 );
 1016         }
 1017         /*
 1018          * Calculate range to retrieve.  The pages have already been assigned
 1019          * their swapblks.  We require a *contiguous* range that falls entirely
 1020          * within a single device stripe.   If we do not supply it, bad things
 1021          * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 
 1022          * loops are set up such that the case(s) are handled implicitly.
 1023          *
 1024          * The swp_*() calls must be made at splvm().  vm_page_free() does
 1025          * not need to be, but it will go a little faster if it is.
 1026          */
 1027         s = splvm();
 1028         blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
 1029 
 1030         for (i = reqpage - 1; i >= 0; --i) {
 1031                 daddr_t iblk;
 1032 
 1033                 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
 1034                 if (blk != iblk + (reqpage - i))
 1035                         break;
 1036                 if ((blk ^ iblk) & dmmax_mask)
 1037                         break;
 1038         }
 1039         ++i;
 1040 
 1041         for (j = reqpage + 1; j < count; ++j) {
 1042                 daddr_t jblk;
 1043 
 1044                 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
 1045                 if (blk != jblk - (j - reqpage))
 1046                         break;
 1047                 if ((blk ^ jblk) & dmmax_mask)
 1048                         break;
 1049         }
 1050 
 1051         /*
 1052          * free pages outside our collection range.   Note: we never free
 1053          * mreq, it must remain busy throughout.
 1054          */
 1055         vm_page_lock_queues();
 1056         {
 1057                 int k;
 1058 
 1059                 for (k = 0; k < i; ++k)
 1060                         vm_page_free(m[k]);
 1061                 for (k = j; k < count; ++k)
 1062                         vm_page_free(m[k]);
 1063         }
 1064         vm_page_unlock_queues();
 1065         splx(s);
 1066 
 1067 
 1068         /*
 1069          * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq 
 1070          * still busy, but the others unbusied.
 1071          */
 1072         if (blk == SWAPBLK_NONE)
 1073                 return (VM_PAGER_FAIL);
 1074 
 1075         /*
 1076          * Get a swap buffer header to perform the IO
 1077          */
 1078         bp = getpbuf(&nsw_rcount);
 1079         kva = (vm_offset_t) bp->b_data;
 1080 
 1081         /*
 1082          * map our page(s) into kva for input
 1083          *
 1084          * NOTE: B_PAGING is set by pbgetvp()
 1085          */
 1086         pmap_qenter(kva, m + i, j - i);
 1087 
 1088         bp->b_iocmd = BIO_READ;
 1089         bp->b_iodone = swp_pager_async_iodone;
 1090         bp->b_rcred = crhold(thread0.td_ucred);
 1091         bp->b_wcred = crhold(thread0.td_ucred);
 1092         bp->b_data = (caddr_t) kva;
 1093         bp->b_blkno = blk - (reqpage - i);
 1094         bp->b_bcount = PAGE_SIZE * (j - i);
 1095         bp->b_bufsize = PAGE_SIZE * (j - i);
 1096         bp->b_pager.pg_reqpage = reqpage - i;
 1097 
 1098         {
 1099                 int k;
 1100 
 1101                 for (k = i; k < j; ++k) {
 1102                         bp->b_pages[k - i] = m[k];
 1103                         vm_page_flag_set(m[k], PG_SWAPINPROG);
 1104                 }
 1105         }
 1106         bp->b_npages = j - i;
 1107 
 1108         pbgetvp(swapdev_vp, bp);
 1109 
 1110         cnt.v_swapin++;
 1111         cnt.v_swappgsin += bp->b_npages;
 1112 
 1113         /*
 1114          * We still hold the lock on mreq, and our automatic completion routine
 1115          * does not remove it.
 1116          */
 1117         vm_object_pip_add(mreq->object, bp->b_npages);
 1118         lastpindex = m[j-1]->pindex;
 1119 
 1120         /*
 1121          * perform the I/O.  NOTE!!!  bp cannot be considered valid after
 1122          * this point because we automatically release it on completion.
 1123          * Instead, we look at the one page we are interested in which we
 1124          * still hold a lock on even through the I/O completion.
 1125          *
 1126          * The other pages in our m[] array are also released on completion,
 1127          * so we cannot assume they are valid anymore either.
 1128          *
 1129          * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
 1130          */
 1131         BUF_KERNPROC(bp);
 1132         BUF_STRATEGY(bp);
 1133 
 1134         /*
 1135          * wait for the page we want to complete.  PG_SWAPINPROG is always
 1136          * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
 1137          * is set in the meta-data.
 1138          */
 1139         s = splvm();
 1140         while ((mreq->flags & PG_SWAPINPROG) != 0) {
 1141                 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
 1142                 cnt.v_intrans++;
 1143                 if (tsleep(mreq, PSWP, "swread", hz*20)) {
 1144                         printf(
 1145                             "swap_pager: indefinite wait buffer: device:"
 1146                                 " %s, blkno: %ld, size: %ld\n",
 1147                             devtoname(bp->b_dev), (long)bp->b_blkno,
 1148                             bp->b_bcount
 1149                         );
 1150                 }
 1151         }
 1152         splx(s);
 1153 
 1154         /*
 1155          * mreq is left busied after completion, but all the other pages
 1156          * are freed.  If we had an unrecoverable read error the page will
 1157          * not be valid.
 1158          */
 1159         if (mreq->valid != VM_PAGE_BITS_ALL) {
 1160                 return (VM_PAGER_ERROR);
 1161         } else {
 1162                 return (VM_PAGER_OK);
 1163         }
 1164 
 1165         /*
 1166          * A final note: in a low swap situation, we cannot deallocate swap
 1167          * and mark a page dirty here because the caller is likely to mark
 1168          * the page clean when we return, causing the page to possibly revert 
 1169          * to all-zero's later.
 1170          */
 1171 }
 1172 
 1173 /*
 1174  *      swap_pager_putpages: 
 1175  *
 1176  *      Assign swap (if necessary) and initiate I/O on the specified pages.
 1177  *
 1178  *      We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
 1179  *      are automatically converted to SWAP objects.
 1180  *
 1181  *      In a low memory situation we may block in VOP_STRATEGY(), but the new 
 1182  *      vm_page reservation system coupled with properly written VFS devices 
 1183  *      should ensure that no low-memory deadlock occurs.  This is an area
 1184  *      which needs work.
 1185  *
 1186  *      The parent has N vm_object_pip_add() references prior to
 1187  *      calling us and will remove references for rtvals[] that are
 1188  *      not set to VM_PAGER_PEND.  We need to remove the rest on I/O
 1189  *      completion.
 1190  *
 1191  *      The parent has soft-busy'd the pages it passes us and will unbusy
 1192  *      those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
 1193  *      We need to unbusy the rest on I/O completion.
 1194  */
 1195 void
 1196 swap_pager_putpages(object, m, count, sync, rtvals)
 1197         vm_object_t object;
 1198         vm_page_t *m;
 1199         int count;
 1200         boolean_t sync;
 1201         int *rtvals;
 1202 {
 1203         int i;
 1204         int n = 0;
 1205 
 1206         GIANT_REQUIRED;
 1207         if (count && m[0]->object != object) {
 1208                 panic("swap_pager_getpages: object mismatch %p/%p", 
 1209                     object, 
 1210                     m[0]->object
 1211                 );
 1212         }
 1213         /*
 1214          * Step 1
 1215          *
 1216          * Turn object into OBJT_SWAP
 1217          * check for bogus sysops
 1218          * force sync if not pageout process
 1219          */
 1220         if (object->type != OBJT_SWAP)
 1221                 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
 1222 
 1223         if (curproc != pageproc)
 1224                 sync = TRUE;
 1225 
 1226         /*
 1227          * Step 2
 1228          *
 1229          * Update nsw parameters from swap_async_max sysctl values.  
 1230          * Do not let the sysop crash the machine with bogus numbers.
 1231          */
 1232         mtx_lock(&pbuf_mtx);
 1233         if (swap_async_max != nsw_wcount_async_max) {
 1234                 int n;
 1235                 int s;
 1236 
 1237                 /*
 1238                  * limit range
 1239                  */
 1240                 if ((n = swap_async_max) > nswbuf / 2)
 1241                         n = nswbuf / 2;
 1242                 if (n < 1)
 1243                         n = 1;
 1244                 swap_async_max = n;
 1245 
 1246                 /*
 1247                  * Adjust difference ( if possible ).  If the current async
 1248                  * count is too low, we may not be able to make the adjustment
 1249                  * at this time.
 1250                  */
 1251                 s = splvm();
 1252                 n -= nsw_wcount_async_max;
 1253                 if (nsw_wcount_async + n >= 0) {
 1254                         nsw_wcount_async += n;
 1255                         nsw_wcount_async_max += n;
 1256                         wakeup(&nsw_wcount_async);
 1257                 }
 1258                 splx(s);
 1259         }
 1260         mtx_unlock(&pbuf_mtx);
 1261 
 1262         /*
 1263          * Step 3
 1264          *
 1265          * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
 1266          * The page is left dirty until the pageout operation completes
 1267          * successfully.
 1268          */
 1269         for (i = 0; i < count; i += n) {
 1270                 int s;
 1271                 int j;
 1272                 struct buf *bp;
 1273                 daddr_t blk;
 1274 
 1275                 /*
 1276                  * Maximum I/O size is limited by a number of factors.
 1277                  */
 1278                 n = min(BLIST_MAX_ALLOC, count - i);
 1279                 n = min(n, nsw_cluster_max);
 1280 
 1281                 s = splvm();
 1282 
 1283                 /*
 1284                  * Get biggest block of swap we can.  If we fail, fall
 1285                  * back and try to allocate a smaller block.  Don't go
 1286                  * overboard trying to allocate space if it would overly
 1287                  * fragment swap.
 1288                  */
 1289                 while (
 1290                     (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
 1291                     n > 4
 1292                 ) {
 1293                         n >>= 1;
 1294                 }
 1295                 if (blk == SWAPBLK_NONE) {
 1296                         for (j = 0; j < n; ++j)
 1297                                 rtvals[i+j] = VM_PAGER_FAIL;
 1298                         splx(s);
 1299                         continue;
 1300                 }
 1301 
 1302                 /*
 1303                  * The I/O we are constructing cannot cross a physical
 1304                  * disk boundry in the swap stripe.  Note: we are still
 1305                  * at splvm().
 1306                  */
 1307                 if ((blk ^ (blk + n)) & dmmax_mask) {
 1308                         j = ((blk + dmmax) & dmmax_mask) - blk;
 1309                         swp_pager_freeswapspace(blk + j, n - j);
 1310                         n = j;
 1311                 }
 1312 
 1313                 /*
 1314                  * All I/O parameters have been satisfied, build the I/O
 1315                  * request and assign the swap space.
 1316                  *
 1317                  * NOTE: B_PAGING is set by pbgetvp()
 1318                  */
 1319                 if (sync == TRUE) {
 1320                         bp = getpbuf(&nsw_wcount_sync);
 1321                 } else {
 1322                         bp = getpbuf(&nsw_wcount_async);
 1323                         bp->b_flags = B_ASYNC;
 1324                 }
 1325                 bp->b_iocmd = BIO_WRITE;
 1326                 bp->b_spc = NULL;       /* not used, but NULL-out anyway */
 1327 
 1328                 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
 1329 
 1330                 bp->b_rcred = crhold(thread0.td_ucred);
 1331                 bp->b_wcred = crhold(thread0.td_ucred);
 1332                 bp->b_bcount = PAGE_SIZE * n;
 1333                 bp->b_bufsize = PAGE_SIZE * n;
 1334                 bp->b_blkno = blk;
 1335 
 1336                 pbgetvp(swapdev_vp, bp);
 1337 
 1338                 for (j = 0; j < n; ++j) {
 1339                         vm_page_t mreq = m[i+j];
 1340 
 1341                         swp_pager_meta_build(
 1342                             mreq->object, 
 1343                             mreq->pindex,
 1344                             blk + j
 1345                         );
 1346                         vm_page_dirty(mreq);
 1347                         rtvals[i+j] = VM_PAGER_OK;
 1348 
 1349                         vm_page_flag_set(mreq, PG_SWAPINPROG);
 1350                         bp->b_pages[j] = mreq;
 1351                 }
 1352                 bp->b_npages = n;
 1353                 /*
 1354                  * Must set dirty range for NFS to work.
 1355                  */
 1356                 bp->b_dirtyoff = 0;
 1357                 bp->b_dirtyend = bp->b_bcount;
 1358 
 1359                 cnt.v_swapout++;
 1360                 cnt.v_swappgsout += bp->b_npages;
 1361                 VI_LOCK(swapdev_vp);
 1362                 swapdev_vp->v_numoutput++;
 1363                 VI_UNLOCK(swapdev_vp);
 1364 
 1365                 splx(s);
 1366 
 1367                 /*
 1368                  * asynchronous
 1369                  *
 1370                  * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
 1371                  */
 1372                 if (sync == FALSE) {
 1373                         bp->b_iodone = swp_pager_async_iodone;
 1374                         BUF_KERNPROC(bp);
 1375                         BUF_STRATEGY(bp);
 1376 
 1377                         for (j = 0; j < n; ++j)
 1378                                 rtvals[i+j] = VM_PAGER_PEND;
 1379                         /* restart outter loop */
 1380                         continue;
 1381                 }
 1382 
 1383                 /*
 1384                  * synchronous
 1385                  *
 1386                  * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
 1387                  */
 1388                 bp->b_iodone = swp_pager_sync_iodone;
 1389                 BUF_STRATEGY(bp);
 1390 
 1391                 /*
 1392                  * Wait for the sync I/O to complete, then update rtvals.
 1393                  * We just set the rtvals[] to VM_PAGER_PEND so we can call
 1394                  * our async completion routine at the end, thus avoiding a
 1395                  * double-free.
 1396                  */
 1397                 s = splbio();
 1398                 while ((bp->b_flags & B_DONE) == 0) {
 1399                         tsleep(bp, PVM, "swwrt", 0);
 1400                 }
 1401                 for (j = 0; j < n; ++j)
 1402                         rtvals[i+j] = VM_PAGER_PEND;
 1403                 /*
 1404                  * Now that we are through with the bp, we can call the
 1405                  * normal async completion, which frees everything up.
 1406                  */
 1407                 swp_pager_async_iodone(bp);
 1408                 splx(s);
 1409         }
 1410 }
 1411 
 1412 /*
 1413  *      swap_pager_sync_iodone:
 1414  *
 1415  *      Completion routine for synchronous reads and writes from/to swap.
 1416  *      We just mark the bp is complete and wake up anyone waiting on it.
 1417  *
 1418  *      This routine may not block.  This routine is called at splbio() or better.
 1419  */
 1420 static void
 1421 swp_pager_sync_iodone(bp)
 1422         struct buf *bp;
 1423 {
 1424         bp->b_flags |= B_DONE;
 1425         bp->b_flags &= ~B_ASYNC;
 1426         wakeup(bp);
 1427 }
 1428 
 1429 /*
 1430  *      swp_pager_async_iodone:
 1431  *
 1432  *      Completion routine for asynchronous reads and writes from/to swap.
 1433  *      Also called manually by synchronous code to finish up a bp.
 1434  *
 1435  *      For READ operations, the pages are PG_BUSY'd.  For WRITE operations, 
 1436  *      the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY 
 1437  *      unbusy all pages except the 'main' request page.  For WRITE 
 1438  *      operations, we vm_page_t->busy'd unbusy all pages ( we can do this 
 1439  *      because we marked them all VM_PAGER_PEND on return from putpages ).
 1440  *
 1441  *      This routine may not block.
 1442  *      This routine is called at splbio() or better
 1443  *
 1444  *      We up ourselves to splvm() as required for various vm_page related
 1445  *      calls.
 1446  */
 1447 static void
 1448 swp_pager_async_iodone(bp)
 1449         struct buf *bp;
 1450 {
 1451         int s;
 1452         int i;
 1453         vm_object_t object = NULL;
 1454 
 1455         GIANT_REQUIRED;
 1456         bp->b_flags |= B_DONE;
 1457 
 1458         /*
 1459          * report error
 1460          */
 1461         if (bp->b_ioflags & BIO_ERROR) {
 1462                 printf(
 1463                     "swap_pager: I/O error - %s failed; blkno %ld,"
 1464                         "size %ld, error %d\n",
 1465                     ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
 1466                     (long)bp->b_blkno, 
 1467                     (long)bp->b_bcount,
 1468                     bp->b_error
 1469                 );
 1470         }
 1471 
 1472         /*
 1473          * set object, raise to splvm().
 1474          */
 1475         if (bp->b_npages)
 1476                 object = bp->b_pages[0]->object;
 1477         s = splvm();
 1478 
 1479         /*
 1480          * remove the mapping for kernel virtual
 1481          */
 1482         pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
 1483 
 1484         vm_page_lock_queues();
 1485         /*
 1486          * cleanup pages.  If an error occurs writing to swap, we are in
 1487          * very serious trouble.  If it happens to be a disk error, though,
 1488          * we may be able to recover by reassigning the swap later on.  So
 1489          * in this case we remove the m->swapblk assignment for the page 
 1490          * but do not free it in the rlist.  The errornous block(s) are thus
 1491          * never reallocated as swap.  Redirty the page and continue.
 1492          */
 1493         for (i = 0; i < bp->b_npages; ++i) {
 1494                 vm_page_t m = bp->b_pages[i];
 1495 
 1496                 vm_page_flag_clear(m, PG_SWAPINPROG);
 1497 
 1498                 if (bp->b_ioflags & BIO_ERROR) {
 1499                         /*
 1500                          * If an error occurs I'd love to throw the swapblk
 1501                          * away without freeing it back to swapspace, so it
 1502                          * can never be used again.  But I can't from an 
 1503                          * interrupt.
 1504                          */
 1505                         if (bp->b_iocmd == BIO_READ) {
 1506                                 /*
 1507                                  * When reading, reqpage needs to stay
 1508                                  * locked for the parent, but all other
 1509                                  * pages can be freed.  We still want to
 1510                                  * wakeup the parent waiting on the page,
 1511                                  * though.  ( also: pg_reqpage can be -1 and 
 1512                                  * not match anything ).
 1513                                  *
 1514                                  * We have to wake specifically requested pages
 1515                                  * up too because we cleared PG_SWAPINPROG and
 1516                                  * someone may be waiting for that.
 1517                                  *
 1518                                  * NOTE: for reads, m->dirty will probably
 1519                                  * be overridden by the original caller of
 1520                                  * getpages so don't play cute tricks here.
 1521                                  *
 1522                                  * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
 1523                                  * AS THIS MESSES WITH object->memq, and it is
 1524                                  * not legal to mess with object->memq from an
 1525                                  * interrupt.
 1526                                  */
 1527                                 m->valid = 0;
 1528                                 vm_page_flag_clear(m, PG_ZERO);
 1529                                 if (i != bp->b_pager.pg_reqpage)
 1530                                         vm_page_free(m);
 1531                                 else
 1532                                         vm_page_flash(m);
 1533                                 /*
 1534                                  * If i == bp->b_pager.pg_reqpage, do not wake 
 1535                                  * the page up.  The caller needs to.
 1536                                  */
 1537                         } else {
 1538                                 /*
 1539                                  * If a write error occurs, reactivate page
 1540                                  * so it doesn't clog the inactive list,
 1541                                  * then finish the I/O.
 1542                                  */
 1543                                 vm_page_dirty(m);
 1544                                 vm_page_activate(m);
 1545                                 vm_page_io_finish(m);
 1546                         }
 1547                 } else if (bp->b_iocmd == BIO_READ) {
 1548                         /*
 1549                          * For read success, clear dirty bits.  Nobody should
 1550                          * have this page mapped but don't take any chances,
 1551                          * make sure the pmap modify bits are also cleared.
 1552                          *
 1553                          * NOTE: for reads, m->dirty will probably be 
 1554                          * overridden by the original caller of getpages so
 1555                          * we cannot set them in order to free the underlying
 1556                          * swap in a low-swap situation.  I don't think we'd
 1557                          * want to do that anyway, but it was an optimization
 1558                          * that existed in the old swapper for a time before
 1559                          * it got ripped out due to precisely this problem.
 1560                          *
 1561                          * clear PG_ZERO in page.
 1562                          *
 1563                          * If not the requested page then deactivate it.
 1564                          *
 1565                          * Note that the requested page, reqpage, is left
 1566                          * busied, but we still have to wake it up.  The
 1567                          * other pages are released (unbusied) by 
 1568                          * vm_page_wakeup().  We do not set reqpage's
 1569                          * valid bits here, it is up to the caller.
 1570                          */
 1571                         pmap_clear_modify(m);
 1572                         m->valid = VM_PAGE_BITS_ALL;
 1573                         vm_page_undirty(m);
 1574                         vm_page_flag_clear(m, PG_ZERO);
 1575 
 1576                         /*
 1577                          * We have to wake specifically requested pages
 1578                          * up too because we cleared PG_SWAPINPROG and
 1579                          * could be waiting for it in getpages.  However,
 1580                          * be sure to not unbusy getpages specifically
 1581                          * requested page - getpages expects it to be 
 1582                          * left busy.
 1583                          */
 1584                         if (i != bp->b_pager.pg_reqpage) {
 1585                                 vm_page_deactivate(m);
 1586                                 vm_page_wakeup(m);
 1587                         } else {
 1588                                 vm_page_flash(m);
 1589                         }
 1590                 } else {
 1591                         /*
 1592                          * For write success, clear the modify and dirty 
 1593                          * status, then finish the I/O ( which decrements the 
 1594                          * busy count and possibly wakes waiter's up ).
 1595                          */
 1596                         pmap_clear_modify(m);
 1597                         vm_page_undirty(m);
 1598                         vm_page_io_finish(m);
 1599                         if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
 1600                                 pmap_page_protect(m, VM_PROT_READ);
 1601                 }
 1602         }
 1603         vm_page_unlock_queues();
 1604 
 1605         /*
 1606          * adjust pip.  NOTE: the original parent may still have its own
 1607          * pip refs on the object.
 1608          */
 1609         if (object)
 1610                 vm_object_pip_wakeupn(object, bp->b_npages);
 1611 
 1612         /*
 1613          * release the physical I/O buffer
 1614          */
 1615         relpbuf(
 1616             bp, 
 1617             ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 
 1618                 ((bp->b_flags & B_ASYNC) ? 
 1619                     &nsw_wcount_async : 
 1620                     &nsw_wcount_sync
 1621                 )
 1622             )
 1623         );
 1624         splx(s);
 1625 }
 1626 
 1627 /************************************************************************
 1628  *                              SWAP META DATA                          *
 1629  ************************************************************************
 1630  *
 1631  *      These routines manipulate the swap metadata stored in the 
 1632  *      OBJT_SWAP object.  All swp_*() routines must be called at
 1633  *      splvm() because swap can be freed up by the low level vm_page
 1634  *      code which might be called from interrupts beyond what splbio() covers.
 1635  *
 1636  *      Swap metadata is implemented with a global hash and not directly
 1637  *      linked into the object.  Instead the object simply contains
 1638  *      appropriate tracking counters.
 1639  */
 1640 
 1641 /*
 1642  * SWP_PAGER_HASH() -   hash swap meta data
 1643  *
 1644  *      This is an inline helper function which hashes the swapblk given
 1645  *      the object and page index.  It returns a pointer to a pointer
 1646  *      to the object, or a pointer to a NULL pointer if it could not
 1647  *      find a swapblk.
 1648  *
 1649  *      This routine must be called at splvm().
 1650  */
 1651 static __inline struct swblock **
 1652 swp_pager_hash(vm_object_t object, vm_pindex_t index)
 1653 {
 1654         struct swblock **pswap;
 1655         struct swblock *swap;
 1656 
 1657         index &= ~(vm_pindex_t)SWAP_META_MASK;
 1658         pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
 1659         while ((swap = *pswap) != NULL) {
 1660                 if (swap->swb_object == object &&
 1661                     swap->swb_index == index
 1662                 ) {
 1663                         break;
 1664                 }
 1665                 pswap = &swap->swb_hnext;
 1666         }
 1667         return (pswap);
 1668 }
 1669 
 1670 /*
 1671  * SWP_PAGER_META_BUILD() -     add swap block to swap meta data for object
 1672  *
 1673  *      We first convert the object to a swap object if it is a default
 1674  *      object.
 1675  *
 1676  *      The specified swapblk is added to the object's swap metadata.  If
 1677  *      the swapblk is not valid, it is freed instead.  Any previously
 1678  *      assigned swapblk is freed.
 1679  *
 1680  *      This routine must be called at splvm(), except when used to convert
 1681  *      an OBJT_DEFAULT object into an OBJT_SWAP object.
 1682  */
 1683 static void
 1684 swp_pager_meta_build(
 1685         vm_object_t object, 
 1686         vm_pindex_t pindex,
 1687         daddr_t swapblk
 1688 ) {
 1689         struct swblock *swap;
 1690         struct swblock **pswap;
 1691         int idx;
 1692 
 1693         GIANT_REQUIRED;
 1694         /*
 1695          * Convert default object to swap object if necessary
 1696          */
 1697         if (object->type != OBJT_SWAP) {
 1698                 object->type = OBJT_SWAP;
 1699                 object->un_pager.swp.swp_bcount = 0;
 1700 
 1701                 mtx_lock(&sw_alloc_mtx);
 1702                 if (object->handle != NULL) {
 1703                         TAILQ_INSERT_TAIL(
 1704                             NOBJLIST(object->handle),
 1705                             object, 
 1706                             pager_object_list
 1707                         );
 1708                 } else {
 1709                         TAILQ_INSERT_TAIL(
 1710                             &swap_pager_un_object_list,
 1711                             object, 
 1712                             pager_object_list
 1713                         );
 1714                 }
 1715                 mtx_unlock(&sw_alloc_mtx);
 1716         }
 1717         
 1718         /*
 1719          * Locate hash entry.  If not found create, but if we aren't adding
 1720          * anything just return.  If we run out of space in the map we wait
 1721          * and, since the hash table may have changed, retry.
 1722          */
 1723 retry:
 1724         pswap = swp_pager_hash(object, pindex);
 1725 
 1726         if ((swap = *pswap) == NULL) {
 1727                 int i;
 1728 
 1729                 if (swapblk == SWAPBLK_NONE)
 1730                         return;
 1731 
 1732                 swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
 1733                 if (swap == NULL) {
 1734                         VM_WAIT;
 1735                         goto retry;
 1736                 }
 1737 
 1738                 swap->swb_hnext = NULL;
 1739                 swap->swb_object = object;
 1740                 swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK;
 1741                 swap->swb_count = 0;
 1742 
 1743                 ++object->un_pager.swp.swp_bcount;
 1744 
 1745                 for (i = 0; i < SWAP_META_PAGES; ++i)
 1746                         swap->swb_pages[i] = SWAPBLK_NONE;
 1747         }
 1748 
 1749         /*
 1750          * Delete prior contents of metadata
 1751          */
 1752         idx = pindex & SWAP_META_MASK;
 1753 
 1754         if (swap->swb_pages[idx] != SWAPBLK_NONE) {
 1755                 swp_pager_freeswapspace(swap->swb_pages[idx], 1);
 1756                 --swap->swb_count;
 1757         }
 1758 
 1759         /*
 1760          * Enter block into metadata
 1761          */
 1762         swap->swb_pages[idx] = swapblk;
 1763         if (swapblk != SWAPBLK_NONE)
 1764                 ++swap->swb_count;
 1765 }
 1766 
 1767 /*
 1768  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
 1769  *
 1770  *      The requested range of blocks is freed, with any associated swap 
 1771  *      returned to the swap bitmap.
 1772  *
 1773  *      This routine will free swap metadata structures as they are cleaned 
 1774  *      out.  This routine does *NOT* operate on swap metadata associated
 1775  *      with resident pages.
 1776  *
 1777  *      This routine must be called at splvm()
 1778  */
 1779 static void
 1780 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
 1781 {
 1782         GIANT_REQUIRED;
 1783 
 1784         if (object->type != OBJT_SWAP)
 1785                 return;
 1786 
 1787         while (count > 0) {
 1788                 struct swblock **pswap;
 1789                 struct swblock *swap;
 1790 
 1791                 pswap = swp_pager_hash(object, index);
 1792 
 1793                 if ((swap = *pswap) != NULL) {
 1794                         daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
 1795 
 1796                         if (v != SWAPBLK_NONE) {
 1797                                 swp_pager_freeswapspace(v, 1);
 1798                                 swap->swb_pages[index & SWAP_META_MASK] =
 1799                                         SWAPBLK_NONE;
 1800                                 if (--swap->swb_count == 0) {
 1801                                         *pswap = swap->swb_hnext;
 1802                                         uma_zfree(swap_zone, swap);
 1803                                         --object->un_pager.swp.swp_bcount;
 1804                                 }
 1805                         }
 1806                         --count;
 1807                         ++index;
 1808                 } else {
 1809                         int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
 1810                         count -= n;
 1811                         index += n;
 1812                 }
 1813         }
 1814 }
 1815 
 1816 /*
 1817  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
 1818  *
 1819  *      This routine locates and destroys all swap metadata associated with
 1820  *      an object.
 1821  *
 1822  *      This routine must be called at splvm()
 1823  */
 1824 static void
 1825 swp_pager_meta_free_all(vm_object_t object)
 1826 {
 1827         daddr_t index = 0;
 1828 
 1829         GIANT_REQUIRED;
 1830         
 1831         if (object->type != OBJT_SWAP)
 1832                 return;
 1833 
 1834         while (object->un_pager.swp.swp_bcount) {
 1835                 struct swblock **pswap;
 1836                 struct swblock *swap;
 1837 
 1838                 pswap = swp_pager_hash(object, index);
 1839                 if ((swap = *pswap) != NULL) {
 1840                         int i;
 1841 
 1842                         for (i = 0; i < SWAP_META_PAGES; ++i) {
 1843                                 daddr_t v = swap->swb_pages[i];
 1844                                 if (v != SWAPBLK_NONE) {
 1845                                         --swap->swb_count;
 1846                                         swp_pager_freeswapspace(v, 1);
 1847                                 }
 1848                         }
 1849                         if (swap->swb_count != 0)
 1850                                 panic("swap_pager_meta_free_all: swb_count != 0");
 1851                         *pswap = swap->swb_hnext;
 1852                         uma_zfree(swap_zone, swap);
 1853                         --object->un_pager.swp.swp_bcount;
 1854                 }
 1855                 index += SWAP_META_PAGES;
 1856                 if (index > 0x20000000)
 1857                         panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
 1858         }
 1859 }
 1860 
 1861 /*
 1862  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
 1863  *
 1864  *      This routine is capable of looking up, popping, or freeing
 1865  *      swapblk assignments in the swap meta data or in the vm_page_t.
 1866  *      The routine typically returns the swapblk being looked-up, or popped,
 1867  *      or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
 1868  *      was invalid.  This routine will automatically free any invalid 
 1869  *      meta-data swapblks.
 1870  *
 1871  *      It is not possible to store invalid swapblks in the swap meta data
 1872  *      (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
 1873  *
 1874  *      When acting on a busy resident page and paging is in progress, we 
 1875  *      have to wait until paging is complete but otherwise can act on the 
 1876  *      busy page.
 1877  *
 1878  *      This routine must be called at splvm().
 1879  *
 1880  *      SWM_FREE        remove and free swap block from metadata
 1881  *      SWM_POP         remove from meta data but do not free.. pop it out
 1882  */
 1883 static daddr_t
 1884 swp_pager_meta_ctl(
 1885         vm_object_t object,
 1886         vm_pindex_t pindex,
 1887         int flags
 1888 ) {
 1889         struct swblock **pswap;
 1890         struct swblock *swap;
 1891         daddr_t r1;
 1892         int idx;
 1893 
 1894         GIANT_REQUIRED;
 1895         /*
 1896          * The meta data only exists of the object is OBJT_SWAP 
 1897          * and even then might not be allocated yet.
 1898          */
 1899         if (object->type != OBJT_SWAP)
 1900                 return (SWAPBLK_NONE);
 1901 
 1902         r1 = SWAPBLK_NONE;
 1903         pswap = swp_pager_hash(object, pindex);
 1904 
 1905         if ((swap = *pswap) != NULL) {
 1906                 idx = pindex & SWAP_META_MASK;
 1907                 r1 = swap->swb_pages[idx];
 1908 
 1909                 if (r1 != SWAPBLK_NONE) {
 1910                         if (flags & SWM_FREE) {
 1911                                 swp_pager_freeswapspace(r1, 1);
 1912                                 r1 = SWAPBLK_NONE;
 1913                         }
 1914                         if (flags & (SWM_FREE|SWM_POP)) {
 1915                                 swap->swb_pages[idx] = SWAPBLK_NONE;
 1916                                 if (--swap->swb_count == 0) {
 1917                                         *pswap = swap->swb_hnext;
 1918                                         uma_zfree(swap_zone, swap);
 1919                                         --object->un_pager.swp.swp_bcount;
 1920                                 }
 1921                         } 
 1922                 }
 1923         }
 1924         return (r1);
 1925 }
 1926 
 1927 /********************************************************
 1928  *              CHAINING FUNCTIONS                      *
 1929  ********************************************************
 1930  *
 1931  *      These functions support recursion of I/O operations
 1932  *      on bp's, typically by chaining one or more 'child' bp's
 1933  *      to the parent.  Synchronous, asynchronous, and semi-synchronous
 1934  *      chaining is possible.
 1935  */
 1936 
 1937 /*
 1938  *      vm_pager_chain_iodone:
 1939  *
 1940  *      io completion routine for child bp.  Currently we fudge a bit
 1941  *      on dealing with b_resid.   Since users of these routines may issue
 1942  *      multiple children simultaneously, sequencing of the error can be lost.
 1943  */
 1944 static void
 1945 vm_pager_chain_iodone(struct buf *nbp)
 1946 {
 1947         struct bio *bp;
 1948         u_int *count;
 1949 
 1950         bp = nbp->b_caller1;
 1951         count = (u_int *)&(bp->bio_driver1);
 1952         if (bp != NULL) {
 1953                 if (nbp->b_ioflags & BIO_ERROR) {
 1954                         bp->bio_flags |= BIO_ERROR;
 1955                         bp->bio_error = nbp->b_error;
 1956                 } else if (nbp->b_resid != 0) {
 1957                         bp->bio_flags |= BIO_ERROR;
 1958                         bp->bio_error = EINVAL;
 1959                 } else {
 1960                         bp->bio_resid -= nbp->b_bcount;
 1961                 }
 1962                 nbp->b_caller1 = NULL;
 1963                 --(*count);
 1964                 if (bp->bio_flags & BIO_FLAG1) {
 1965                         bp->bio_flags &= ~BIO_FLAG1;
 1966                         wakeup(bp);
 1967                 }
 1968         }
 1969         nbp->b_flags |= B_DONE;
 1970         nbp->b_flags &= ~B_ASYNC;
 1971         relpbuf(nbp, NULL);
 1972 }
 1973 
 1974 /*
 1975  *      getchainbuf:
 1976  *
 1977  *      Obtain a physical buffer and chain it to its parent buffer.  When
 1978  *      I/O completes, the parent buffer will be B_SIGNAL'd.  Errors are
 1979  *      automatically propagated to the parent
 1980  */
 1981 static struct buf *
 1982 getchainbuf(struct bio *bp, struct vnode *vp, int flags)
 1983 {
 1984         struct buf *nbp;
 1985         u_int *count;
 1986 
 1987         GIANT_REQUIRED;
 1988         nbp = getpbuf(NULL);
 1989         count = (u_int *)&(bp->bio_driver1);
 1990 
 1991         nbp->b_caller1 = bp;
 1992         ++(*count);
 1993 
 1994         if (*count > 4)
 1995                 waitchainbuf(bp, 4, 0);
 1996 
 1997         nbp->b_iocmd = bp->bio_cmd;
 1998         nbp->b_ioflags = 0;
 1999         nbp->b_flags = flags;
 2000         nbp->b_rcred = crhold(thread0.td_ucred);
 2001         nbp->b_wcred = crhold(thread0.td_ucred);
 2002         nbp->b_iodone = vm_pager_chain_iodone;
 2003 
 2004         if (vp)
 2005                 pbgetvp(vp, nbp);
 2006         return (nbp);
 2007 }
 2008 
 2009 static void
 2010 flushchainbuf(struct buf *nbp)
 2011 {
 2012         GIANT_REQUIRED;
 2013         if (nbp->b_bcount) {
 2014                 nbp->b_bufsize = nbp->b_bcount;
 2015                 if (nbp->b_iocmd == BIO_WRITE)
 2016                         nbp->b_dirtyend = nbp->b_bcount;
 2017                 BUF_KERNPROC(nbp);
 2018                 BUF_STRATEGY(nbp);
 2019         } else {
 2020                 bufdone(nbp);
 2021         }
 2022 }
 2023 
 2024 static void
 2025 waitchainbuf(struct bio *bp, int limit, int done)
 2026 {
 2027         int s;
 2028         u_int *count;
 2029 
 2030         GIANT_REQUIRED;
 2031         count = (u_int *)&(bp->bio_driver1);
 2032         s = splbio();
 2033         while (*count > limit) {
 2034                 bp->bio_flags |= BIO_FLAG1;
 2035                 tsleep(bp, PRIBIO + 4, "bpchain", 0);
 2036         }
 2037         if (done) {
 2038                 if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) {
 2039                         bp->bio_flags |= BIO_ERROR;
 2040                         bp->bio_error = EINVAL;
 2041                 }
 2042                 biodone(bp);
 2043         }
 2044         splx(s);
 2045 }
 2046 

Cache object: ea97e1a3e3b5ea061e48a2d934c38e3f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.