The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/swap_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1998 Matthew Dillon,
    3  * Copyright (c) 1994 John S. Dyson
    4  * Copyright (c) 1990 University of Utah.
    5  * Copyright (c) 1991, 1993
    6  *      The Regents of the University of California.  All rights reserved.
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * the Systems Programming Group of the University of Utah Computer
   10  * Science Department.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *                              New Swap System
   41  *                              Matthew Dillon
   42  *
   43  * Radix Bitmap 'blists'.
   44  *
   45  *      - The new swapper uses the new radix bitmap code.  This should scale
   46  *        to arbitrarily small or arbitrarily large swap spaces and an almost
   47  *        arbitrary degree of fragmentation.
   48  *
   49  * Features:
   50  *
   51  *      - on the fly reallocation of swap during putpages.  The new system
   52  *        does not try to keep previously allocated swap blocks for dirty
   53  *        pages.  
   54  *
   55  *      - on the fly deallocation of swap
   56  *
   57  *      - No more garbage collection required.  Unnecessarily allocated swap
   58  *        blocks only exist for dirty vm_page_t's now and these are already
   59  *        cycled (in a high-load system) by the pager.  We also do on-the-fly
   60  *        removal of invalidated swap blocks when a page is destroyed
   61  *        or renamed.
   62  *
   63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
   64  *
   65  *      @(#)swap_pager.c        8.9 (Berkeley) 3/21/94
   66  *
   67  * $FreeBSD$
   68  */
   69 
   70 #include <sys/param.h>
   71 #include <sys/systm.h>
   72 #include <sys/conf.h>
   73 #include <sys/kernel.h>
   74 #include <sys/proc.h>
   75 #include <sys/buf.h>
   76 #include <sys/vnode.h>
   77 #include <sys/malloc.h>
   78 #include <sys/vmmeter.h>
   79 #include <sys/sysctl.h>
   80 #include <sys/blist.h>
   81 #include <sys/lock.h>
   82 #include <sys/vmmeter.h>
   83 
   84 #ifndef MAX_PAGEOUT_CLUSTER
   85 #define MAX_PAGEOUT_CLUSTER 16
   86 #endif
   87 
   88 #define SWB_NPAGES      MAX_PAGEOUT_CLUSTER
   89 
   90 #include "opt_swap.h"
   91 #include <vm/vm.h>
   92 #include <vm/vm_object.h>
   93 #include <vm/vm_page.h>
   94 #include <vm/vm_pager.h>
   95 #include <vm/vm_pageout.h>
   96 #include <vm/swap_pager.h>
   97 #include <vm/vm_extern.h>
   98 #include <vm/vm_zone.h>
   99 
  100 #define SWM_FREE        0x02    /* free, period                 */
  101 #define SWM_POP         0x04    /* pop out                      */
  102 
  103 /*
  104  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
  105  * in the old system.
  106  */
  107 
  108 extern int vm_swap_size;        /* number of free swap blocks, in pages */
  109 
  110 int swap_pager_full;            /* swap space exhaustion (task killing) */
  111 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
  112 static int nsw_rcount;          /* free read buffers                    */
  113 static int nsw_wcount_sync;     /* limit write buffers / synchronous    */
  114 static int nsw_wcount_async;    /* limit write buffers / asynchronous   */
  115 static int nsw_wcount_async_max;/* assigned maximum                     */
  116 static int nsw_cluster_max;     /* maximum VOP I/O allowed              */
  117 static int sw_alloc_interlock;  /* swap pager allocation interlock      */
  118 
  119 struct blist *swapblist;
  120 static struct swblock **swhash;
  121 static int swhash_mask;
  122 static int swap_async_max = 4;  /* maximum in-progress async I/O's      */
  123 
  124 extern struct vnode *swapdev_vp;        /* from vm_swap.c */
  125 
  126 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
  127         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
  128 
  129 /*
  130  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
  131  * of searching a named list by hashing it just a little.
  132  */
  133 
  134 #define NOBJLISTS               8
  135 
  136 #define NOBJLIST(handle)        \
  137         (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
  138 
  139 static struct pagerlst  swap_pager_object_list[NOBJLISTS];
  140 struct pagerlst         swap_pager_un_object_list;
  141 vm_zone_t               swap_zone;
  142 
  143 /*
  144  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
  145  * calls hooked from other parts of the VM system and do not appear here.
  146  * (see vm/swap_pager.h).
  147  */
  148 
  149 static vm_object_t
  150                 swap_pager_alloc __P((void *handle, vm_ooffset_t size,
  151                                       vm_prot_t prot, vm_ooffset_t offset));
  152 static void     swap_pager_dealloc __P((vm_object_t object));
  153 static int      swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
  154 static void     swap_pager_init __P((void));
  155 static void     swap_pager_unswapped __P((vm_page_t));
  156 static void     swap_pager_strategy __P((vm_object_t, struct buf *));
  157 
  158 struct pagerops swappagerops = {
  159         swap_pager_init,        /* early system initialization of pager */
  160         swap_pager_alloc,       /* allocate an OBJT_SWAP object         */
  161         swap_pager_dealloc,     /* deallocate an OBJT_SWAP object       */
  162         swap_pager_getpages,    /* pagein                               */
  163         swap_pager_putpages,    /* pageout                              */
  164         swap_pager_haspage,     /* get backing store status for page    */
  165         swap_pager_unswapped,   /* remove swap related to page          */
  166         swap_pager_strategy     /* pager strategy call                  */
  167 };
  168 
  169 /*
  170  * dmmax is in page-sized chunks with the new swap system.  It was
  171  * dev-bsized chunks in the old.  dmmax is always a power of 2.
  172  *
  173  * swap_*() routines are externally accessible.  swp_*() routines are
  174  * internal.
  175  */
  176 
  177 int dmmax;
  178 static int dmmax_mask;
  179 int nswap_lowat = 128;          /* in pages, swap_pager_almost_full warn */
  180 int nswap_hiwat = 512;          /* in pages, swap_pager_almost_full warn */
  181 
  182 static __inline void    swp_sizecheck __P((void));
  183 static void     swp_pager_sync_iodone __P((struct buf *bp));
  184 static void     swp_pager_async_iodone __P((struct buf *bp));
  185 
  186 /*
  187  * Swap bitmap functions
  188  */
  189 
  190 static __inline void    swp_pager_freeswapspace __P((daddr_t blk, int npages));
  191 static __inline daddr_t swp_pager_getswapspace __P((int npages));
  192 
  193 /*
  194  * Metadata functions
  195  */
  196 
  197 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
  198 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
  199 static void swp_pager_meta_free_all __P((vm_object_t));
  200 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
  201 
  202 /*
  203  * SWP_SIZECHECK() -    update swap_pager_full indication
  204  *      
  205  *      update the swap_pager_almost_full indication and warn when we are
  206  *      about to run out of swap space, using lowat/hiwat hysteresis.
  207  *
  208  *      Clear swap_pager_full ( task killing ) indication when lowat is met.
  209  *
  210  *      No restrictions on call
  211  *      This routine may not block.
  212  *      This routine must be called at splvm()
  213  */
  214 
  215 static __inline void
  216 swp_sizecheck()
  217 {
  218         if (vm_swap_size < nswap_lowat) {
  219                 if (swap_pager_almost_full == 0) {
  220                         printf("swap_pager: out of swap space\n");
  221                         swap_pager_almost_full = 1;
  222                 }
  223         } else {
  224                 swap_pager_full = 0;
  225                 if (vm_swap_size > nswap_hiwat)
  226                         swap_pager_almost_full = 0;
  227         }
  228 }
  229 
  230 /*
  231  * SWAP_PAGER_INIT() -  initialize the swap pager!
  232  *
  233  *      Expected to be started from system init.  NOTE:  This code is run 
  234  *      before much else so be careful what you depend on.  Most of the VM
  235  *      system has yet to be initialized at this point.
  236  */
  237 
  238 static void
  239 swap_pager_init()
  240 {
  241         /*
  242          * Initialize object lists
  243          */
  244         int i;
  245 
  246         for (i = 0; i < NOBJLISTS; ++i)
  247                 TAILQ_INIT(&swap_pager_object_list[i]);
  248         TAILQ_INIT(&swap_pager_un_object_list);
  249 
  250         /*
  251          * Device Stripe, in PAGE_SIZE'd blocks
  252          */
  253 
  254         dmmax = SWB_NPAGES * 2;
  255         dmmax_mask = ~(dmmax - 1);
  256 }
  257 
  258 /*
  259  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
  260  *
  261  *      Expected to be started from pageout process once, prior to entering
  262  *      its main loop.
  263  */
  264 
  265 void
  266 swap_pager_swap_init()
  267 {
  268         int n, n2;
  269 
  270         /*
  271          * Number of in-transit swap bp operations.  Don't
  272          * exhaust the pbufs completely.  Make sure we
  273          * initialize workable values (0 will work for hysteresis
  274          * but it isn't very efficient).
  275          *
  276          * The nsw_cluster_max is constrained by the bp->b_pages[]
  277          * array (MAXPHYS/PAGE_SIZE) and our locally defined
  278          * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
  279          * constrained by the swap device interleave stripe size.
  280          *
  281          * Currently we hardwire nsw_wcount_async to 4.  This limit is 
  282          * designed to prevent other I/O from having high latencies due to
  283          * our pageout I/O.  The value 4 works well for one or two active swap
  284          * devices but is probably a little low if you have more.  Even so,
  285          * a higher value would probably generate only a limited improvement
  286          * with three or four active swap devices since the system does not
  287          * typically have to pageout at extreme bandwidths.   We will want
  288          * at least 2 per swap devices, and 4 is a pretty good value if you
  289          * have one NFS swap device due to the command/ack latency over NFS.
  290          * So it all works out pretty well.
  291          */
  292 
  293         nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
  294 
  295         nsw_rcount = (nswbuf + 1) / 2;
  296         nsw_wcount_sync = (nswbuf + 3) / 4;
  297         nsw_wcount_async = 4;
  298         nsw_wcount_async_max = nsw_wcount_async;
  299 
  300         /*
  301          * Initialize our zone.  Right now I'm just guessing on the number
  302          * we need based on the number of pages in the system.  Each swblock
  303          * can hold 16 pages, so this is probably overkill.  This reservation
  304          * is typically limited to around 32MB by default.  The initial
  305          * guess caps the swap space at 8 times the size of RAM.
  306          */
  307         n = cnt.v_page_count / 2;
  308         if (maxswzone && n > maxswzone / sizeof(struct swblock))
  309                 n = maxswzone / sizeof(struct swblock);
  310         n2 = n;
  311 
  312         do {
  313                 swap_zone = zinit(
  314                         "SWAPMETA", 
  315                         sizeof(struct swblock), 
  316                         n,
  317                         ZONE_INTERRUPT, 
  318                         1);
  319                 if (swap_zone != NULL)
  320                         break;
  321                 /*
  322                  * if the allocation failed, try a zone two thirds the
  323                  * size of the previous attempt.
  324                  */
  325                 n -= ((n + 2) / 3);
  326         } while (n > 0);
  327 
  328         if (swap_zone == NULL)
  329                 panic("swap_pager_swap_init: swap_zone == NULL");
  330         if (n2 != n)
  331                 printf("Swap zone entries reduced from %d to %d.\n", n2, n);
  332         n2 = n;
  333 
  334         /*
  335          * Initialize our meta-data hash table.  The swapper does not need to
  336          * be quite as efficient as the VM system, so we do not use an 
  337          * oversized hash table.
  338          *
  339          *      n:              size of hash table, must be power of 2
  340          *      swhash_mask:    hash table index mask
  341          */
  342 
  343         for (n = 1; n < n2 / 8; n *= 2)
  344                 ;
  345 
  346         swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK);
  347         bzero(swhash, sizeof(struct swblock *) * n);
  348 
  349         swhash_mask = n - 1;
  350 }
  351 
  352 /*
  353  * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
  354  *                      its metadata structures.
  355  *
  356  *      This routine is called from the mmap and fork code to create a new
  357  *      OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
  358  *      and then converting it with swp_pager_meta_build().
  359  *
  360  *      This routine may block in vm_object_allocate() and create a named
  361  *      object lookup race, so we must interlock.   We must also run at
  362  *      splvm() for the object lookup to handle races with interrupts, but
  363  *      we do not have to maintain splvm() in between the lookup and the
  364  *      add because (I believe) it is not possible to attempt to create
  365  *      a new swap object w/handle when a default object with that handle
  366  *      already exists.
  367  */
  368 
  369 static vm_object_t
  370 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  371                  vm_ooffset_t offset)
  372 {
  373         vm_object_t object;
  374 
  375         if (handle) {
  376                 /*
  377                  * Reference existing named region or allocate new one.  There
  378                  * should not be a race here against swp_pager_meta_build()
  379                  * as called from vm_page_remove() in regards to the lookup
  380                  * of the handle.
  381                  */
  382 
  383                 while (sw_alloc_interlock) {
  384                         sw_alloc_interlock = -1;
  385                         tsleep(&sw_alloc_interlock, PVM, "swpalc", 0);
  386                 }
  387                 sw_alloc_interlock = 1;
  388 
  389                 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
  390 
  391                 if (object != NULL) {
  392                         vm_object_reference(object);
  393                 } else {
  394                         object = vm_object_allocate(OBJT_DEFAULT,
  395                                 OFF_TO_IDX(offset + PAGE_MASK + size));
  396                         object->handle = handle;
  397 
  398                         swp_pager_meta_build(object, 0, SWAPBLK_NONE);
  399                 }
  400 
  401                 if (sw_alloc_interlock < 0)
  402                         wakeup(&sw_alloc_interlock);
  403 
  404                 sw_alloc_interlock = 0;
  405         } else {
  406                 object = vm_object_allocate(OBJT_DEFAULT,
  407                         OFF_TO_IDX(offset + PAGE_MASK + size));
  408 
  409                 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
  410         }
  411 
  412         return (object);
  413 }
  414 
  415 /*
  416  * SWAP_PAGER_DEALLOC() -       remove swap metadata from object
  417  *
  418  *      The swap backing for the object is destroyed.  The code is 
  419  *      designed such that we can reinstantiate it later, but this
  420  *      routine is typically called only when the entire object is
  421  *      about to be destroyed.
  422  *
  423  *      This routine may block, but no longer does. 
  424  *
  425  *      The object must be locked or unreferenceable.
  426  */
  427 
  428 static void
  429 swap_pager_dealloc(object)
  430         vm_object_t object;
  431 {
  432         int s;
  433 
  434         /*
  435          * Remove from list right away so lookups will fail if we block for
  436          * pageout completion.
  437          */
  438 
  439         if (object->handle == NULL) {
  440                 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
  441         } else {
  442                 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
  443         }
  444 
  445         vm_object_pip_wait(object, "swpdea");
  446 
  447         /*
  448          * Free all remaining metadata.  We only bother to free it from 
  449          * the swap meta data.  We do not attempt to free swapblk's still
  450          * associated with vm_page_t's for this object.  We do not care
  451          * if paging is still in progress on some objects.
  452          */
  453         s = splvm();
  454         swp_pager_meta_free_all(object);
  455         splx(s);
  456 }
  457 
  458 /************************************************************************
  459  *                      SWAP PAGER BITMAP ROUTINES                      *
  460  ************************************************************************/
  461 
  462 /*
  463  * SWP_PAGER_GETSWAPSPACE() -   allocate raw swap space
  464  *
  465  *      Allocate swap for the requested number of pages.  The starting
  466  *      swap block number (a page index) is returned or SWAPBLK_NONE
  467  *      if the allocation failed.
  468  *
  469  *      Also has the side effect of advising that somebody made a mistake
  470  *      when they configured swap and didn't configure enough.
  471  *
  472  *      Must be called at splvm() to avoid races with bitmap frees from
  473  *      vm_page_remove() aka swap_pager_page_removed().
  474  *
  475  *      This routine may not block
  476  *      This routine must be called at splvm().
  477  */
  478 
  479 static __inline daddr_t
  480 swp_pager_getswapspace(npages)
  481         int npages;
  482 {
  483         daddr_t blk;
  484 
  485         if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
  486                 if (swap_pager_full != 2) {
  487                         printf("swap_pager_getswapspace: failed\n");
  488                         swap_pager_full = 2;
  489                         swap_pager_almost_full = 1;
  490                 }
  491         } else {
  492                 vm_swap_size -= npages;
  493                 swp_sizecheck();
  494         }
  495         return(blk);
  496 }
  497 
  498 /*
  499  * SWP_PAGER_FREESWAPSPACE() -  free raw swap space 
  500  *
  501  *      This routine returns the specified swap blocks back to the bitmap.
  502  *
  503  *      Note:  This routine may not block (it could in the old swap code),
  504  *      and through the use of the new blist routines it does not block.
  505  *
  506  *      We must be called at splvm() to avoid races with bitmap frees from
  507  *      vm_page_remove() aka swap_pager_page_removed().
  508  *
  509  *      This routine may not block
  510  *      This routine must be called at splvm().
  511  */
  512 
  513 static __inline void
  514 swp_pager_freeswapspace(blk, npages)
  515         daddr_t blk;
  516         int npages;
  517 {
  518         blist_free(swapblist, blk, npages);
  519         vm_swap_size += npages;
  520         swp_sizecheck();
  521 }
  522 
  523 /*
  524  * SWAP_PAGER_FREESPACE() -     frees swap blocks associated with a page
  525  *                              range within an object.
  526  *
  527  *      This is a globally accessible routine.
  528  *
  529  *      This routine removes swapblk assignments from swap metadata.
  530  *
  531  *      The external callers of this routine typically have already destroyed 
  532  *      or renamed vm_page_t's associated with this range in the object so 
  533  *      we should be ok.
  534  *
  535  *      This routine may be called at any spl.  We up our spl to splvm temporarily
  536  *      in order to perform the metadata removal.
  537  */
  538 
  539 void
  540 swap_pager_freespace(object, start, size)
  541         vm_object_t object;
  542         vm_pindex_t start;
  543         vm_size_t size;
  544 {
  545         int s = splvm();
  546         swp_pager_meta_free(object, start, size);
  547         splx(s);
  548 }
  549 
  550 /*
  551  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
  552  *
  553  *      Assigns swap blocks to the specified range within the object.  The 
  554  *      swap blocks are not zerod.  Any previous swap assignment is destroyed.
  555  *
  556  *      Returns 0 on success, -1 on failure.
  557  */
  558 
  559 int
  560 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
  561 {
  562         int s;
  563         int n = 0;
  564         daddr_t blk = SWAPBLK_NONE;
  565         vm_pindex_t beg = start;        /* save start index */
  566 
  567         s = splvm();
  568         while (size) {
  569                 if (n == 0) {
  570                         n = BLIST_MAX_ALLOC;
  571                         while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
  572                                 n >>= 1;
  573                                 if (n == 0) {
  574                                         swp_pager_meta_free(object, beg, start - beg);
  575                                         splx(s);
  576                                         return(-1);
  577                                 }
  578                         }
  579                 }
  580                 swp_pager_meta_build(object, start, blk);
  581                 --size;
  582                 ++start;
  583                 ++blk;
  584                 --n;
  585         }
  586         swp_pager_meta_free(object, start, n);
  587         splx(s);
  588         return(0);
  589 }
  590 
  591 /*
  592  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
  593  *                      and destroy the source.
  594  *
  595  *      Copy any valid swapblks from the source to the destination.  In
  596  *      cases where both the source and destination have a valid swapblk,
  597  *      we keep the destination's.
  598  *
  599  *      This routine is allowed to block.  It may block allocating metadata
  600  *      indirectly through swp_pager_meta_build() or if paging is still in
  601  *      progress on the source. 
  602  *
  603  *      This routine can be called at any spl
  604  *
  605  *      XXX vm_page_collapse() kinda expects us not to block because we 
  606  *      supposedly do not need to allocate memory, but for the moment we
  607  *      *may* have to get a little memory from the zone allocator, but
  608  *      it is taken from the interrupt memory.  We should be ok. 
  609  *
  610  *      The source object contains no vm_page_t's (which is just as well)
  611  *
  612  *      The source object is of type OBJT_SWAP.
  613  *
  614  *      The source and destination objects must be locked or 
  615  *      inaccessible (XXX are they ?)
  616  */
  617 
  618 void
  619 swap_pager_copy(srcobject, dstobject, offset, destroysource)
  620         vm_object_t srcobject;
  621         vm_object_t dstobject;
  622         vm_pindex_t offset;
  623         int destroysource;
  624 {
  625         vm_pindex_t i;
  626         int s;
  627 
  628         s = splvm();
  629 
  630         /*
  631          * If destroysource is set, we remove the source object from the 
  632          * swap_pager internal queue now. 
  633          */
  634 
  635         if (destroysource) {
  636                 if (srcobject->handle == NULL) {
  637                         TAILQ_REMOVE(
  638                             &swap_pager_un_object_list, 
  639                             srcobject, 
  640                             pager_object_list
  641                         );
  642                 } else {
  643                         TAILQ_REMOVE(
  644                             NOBJLIST(srcobject->handle),
  645                             srcobject,
  646                             pager_object_list
  647                         );
  648                 }
  649         }
  650 
  651         /*
  652          * transfer source to destination.
  653          */
  654 
  655         for (i = 0; i < dstobject->size; ++i) {
  656                 daddr_t dstaddr;
  657 
  658                 /*
  659                  * Locate (without changing) the swapblk on the destination,
  660                  * unless it is invalid in which case free it silently, or
  661                  * if the destination is a resident page, in which case the
  662                  * source is thrown away.
  663                  */
  664 
  665                 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
  666 
  667                 if (dstaddr == SWAPBLK_NONE) {
  668                         /*
  669                          * Destination has no swapblk and is not resident,
  670                          * copy source.
  671                          */
  672                         daddr_t srcaddr;
  673 
  674                         srcaddr = swp_pager_meta_ctl(
  675                             srcobject, 
  676                             i + offset,
  677                             SWM_POP
  678                         );
  679 
  680                         if (srcaddr != SWAPBLK_NONE)
  681                                 swp_pager_meta_build(dstobject, i, srcaddr);
  682                 } else {
  683                         /*
  684                          * Destination has valid swapblk or it is represented
  685                          * by a resident page.  We destroy the sourceblock.
  686                          */
  687                         
  688                         swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
  689                 }
  690         }
  691 
  692         /*
  693          * Free left over swap blocks in source.
  694          *
  695          * We have to revert the type to OBJT_DEFAULT so we do not accidently
  696          * double-remove the object from the swap queues.
  697          */
  698 
  699         if (destroysource) {
  700                 swp_pager_meta_free_all(srcobject);
  701                 /*
  702                  * Reverting the type is not necessary, the caller is going
  703                  * to destroy srcobject directly, but I'm doing it here
  704                  * for consistency since we've removed the object from its
  705                  * queues.
  706                  */
  707                 srcobject->type = OBJT_DEFAULT;
  708         }
  709         splx(s);
  710 }
  711 
  712 /*
  713  * SWAP_PAGER_HASPAGE() -       determine if we have good backing store for
  714  *                              the requested page.
  715  *
  716  *      We determine whether good backing store exists for the requested
  717  *      page and return TRUE if it does, FALSE if it doesn't.
  718  *
  719  *      If TRUE, we also try to determine how much valid, contiguous backing
  720  *      store exists before and after the requested page within a reasonable
  721  *      distance.  We do not try to restrict it to the swap device stripe
  722  *      (that is handled in getpages/putpages).  It probably isn't worth
  723  *      doing here.
  724  */
  725 
  726 boolean_t
  727 swap_pager_haspage(object, pindex, before, after)
  728         vm_object_t object;
  729         vm_pindex_t pindex;
  730         int *before;
  731         int *after;
  732 {
  733         daddr_t blk0;
  734         int s;
  735 
  736         /*
  737          * do we have good backing store at the requested index ?
  738          */
  739 
  740         s = splvm();
  741         blk0 = swp_pager_meta_ctl(object, pindex, 0);
  742 
  743         if (blk0 == SWAPBLK_NONE) {
  744                 splx(s);
  745                 if (before)
  746                         *before = 0;
  747                 if (after)
  748                         *after = 0;
  749                 return (FALSE);
  750         }
  751 
  752         /*
  753          * find backwards-looking contiguous good backing store
  754          */
  755 
  756         if (before != NULL) {
  757                 int i;
  758 
  759                 for (i = 1; i < (SWB_NPAGES/2); ++i) {
  760                         daddr_t blk;
  761 
  762                         if (i > pindex)
  763                                 break;
  764                         blk = swp_pager_meta_ctl(object, pindex - i, 0);
  765                         if (blk != blk0 - i)
  766                                 break;
  767                 }
  768                 *before = (i - 1);
  769         }
  770 
  771         /*
  772          * find forward-looking contiguous good backing store
  773          */
  774 
  775         if (after != NULL) {
  776                 int i;
  777 
  778                 for (i = 1; i < (SWB_NPAGES/2); ++i) {
  779                         daddr_t blk;
  780 
  781                         blk = swp_pager_meta_ctl(object, pindex + i, 0);
  782                         if (blk != blk0 + i)
  783                                 break;
  784                 }
  785                 *after = (i - 1);
  786         }
  787         splx(s);
  788         return (TRUE);
  789 }
  790 
  791 /*
  792  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
  793  *
  794  *      This removes any associated swap backing store, whether valid or
  795  *      not, from the page.  
  796  *
  797  *      This routine is typically called when a page is made dirty, at
  798  *      which point any associated swap can be freed.  MADV_FREE also
  799  *      calls us in a special-case situation
  800  *
  801  *      NOTE!!!  If the page is clean and the swap was valid, the caller
  802  *      should make the page dirty before calling this routine.  This routine
  803  *      does NOT change the m->dirty status of the page.  Also: MADV_FREE
  804  *      depends on it.
  805  *
  806  *      This routine may not block
  807  *      This routine must be called at splvm()
  808  */
  809 
  810 static void
  811 swap_pager_unswapped(m)
  812         vm_page_t m;
  813 {
  814         swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
  815 }
  816 
  817 /*
  818  * SWAP_PAGER_STRATEGY() - read, write, free blocks
  819  *
  820  *      This implements the vm_pager_strategy() interface to swap and allows
  821  *      other parts of the system to directly access swap as backing store
  822  *      through vm_objects of type OBJT_SWAP.  This is intended to be a 
  823  *      cacheless interface ( i.e. caching occurs at higher levels ).
  824  *      Therefore we do not maintain any resident pages.  All I/O goes
  825  *      directly to and from the swap device.
  826  *      
  827  *      Note that b_blkno is scaled for PAGE_SIZE
  828  *
  829  *      We currently attempt to run I/O synchronously or asynchronously as
  830  *      the caller requests.  This isn't perfect because we loose error
  831  *      sequencing when we run multiple ops in parallel to satisfy a request.
  832  *      But this is swap, so we let it all hang out.
  833  */
  834 
  835 static void     
  836 swap_pager_strategy(vm_object_t object, struct buf *bp)
  837 {
  838         vm_pindex_t start;
  839         int count;
  840         int s;
  841         char *data;
  842         struct buf *nbp = NULL;
  843 
  844         if (bp->b_bcount & PAGE_MASK) {
  845                 bp->b_error = EINVAL;
  846                 bp->b_flags |= B_ERROR | B_INVAL;
  847                 biodone(bp);
  848                 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount);
  849                 return;
  850         }
  851 
  852         /*
  853          * Clear error indication, initialize page index, count, data pointer.
  854          */
  855 
  856         bp->b_error = 0;
  857         bp->b_flags &= ~B_ERROR;
  858         bp->b_resid = bp->b_bcount;
  859 
  860         start = bp->b_pblkno;
  861         count = howmany(bp->b_bcount, PAGE_SIZE);
  862         data = bp->b_data;
  863 
  864         s = splvm();
  865 
  866         /*
  867          * Deal with B_FREEBUF
  868          */
  869 
  870         if (bp->b_flags & B_FREEBUF) {
  871                 /*
  872                  * FREE PAGE(s) - destroy underlying swap that is no longer
  873                  *                needed.
  874                  */
  875                 swp_pager_meta_free(object, start, count);
  876                 splx(s);
  877                 bp->b_resid = 0;
  878                 biodone(bp);
  879                 return;
  880         }
  881 
  882         /*
  883          * Execute read or write
  884          */
  885 
  886         while (count > 0) {
  887                 daddr_t blk;
  888 
  889                 /*
  890                  * Obtain block.  If block not found and writing, allocate a
  891                  * new block and build it into the object.
  892                  */
  893 
  894                 blk = swp_pager_meta_ctl(object, start, 0);
  895                 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) {
  896                         blk = swp_pager_getswapspace(1);
  897                         if (blk == SWAPBLK_NONE) {
  898                                 bp->b_error = ENOMEM;
  899                                 bp->b_flags |= B_ERROR;
  900                                 break;
  901                         }
  902                         swp_pager_meta_build(object, start, blk);
  903                 }
  904                         
  905                 /*
  906                  * Do we have to flush our current collection?  Yes if:
  907                  *
  908                  *      - no swap block at this index
  909                  *      - swap block is not contiguous
  910                  *      - we cross a physical disk boundry in the
  911                  *        stripe.
  912                  */
  913 
  914                 if (
  915                     nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
  916                      ((nbp->b_blkno ^ blk) & dmmax_mask)
  917                     )
  918                 ) {
  919                         splx(s);
  920                         if (bp->b_flags & B_READ) {
  921                                 ++cnt.v_swapin;
  922                                 cnt.v_swappgsin += btoc(nbp->b_bcount);
  923                         } else {
  924                                 ++cnt.v_swapout;
  925                                 cnt.v_swappgsout += btoc(nbp->b_bcount);
  926                                 nbp->b_dirtyend = nbp->b_bcount;
  927                         }
  928                         flushchainbuf(nbp);
  929                         s = splvm();
  930                         nbp = NULL;
  931                 }
  932 
  933                 /*
  934                  * Add new swapblk to nbp, instantiating nbp if necessary.
  935                  * Zero-fill reads are able to take a shortcut.
  936                  */
  937 
  938                 if (blk == SWAPBLK_NONE) {
  939                         /*
  940                          * We can only get here if we are reading.  Since
  941                          * we are at splvm() we can safely modify b_resid,
  942                          * even if chain ops are in progress.
  943                          */
  944                         bzero(data, PAGE_SIZE);
  945                         bp->b_resid -= PAGE_SIZE;
  946                 } else {
  947                         if (nbp == NULL) {
  948                                 nbp = getchainbuf(bp, swapdev_vp, (bp->b_flags & B_READ) | B_ASYNC);
  949                                 nbp->b_blkno = blk;
  950                                 nbp->b_bcount = 0;
  951                                 nbp->b_data = data;
  952                         }
  953                         nbp->b_bcount += PAGE_SIZE;
  954                 }
  955                 --count;
  956                 ++start;
  957                 data += PAGE_SIZE;
  958         }
  959 
  960         /*
  961          *  Flush out last buffer
  962          */
  963 
  964         splx(s);
  965 
  966         if (nbp) {
  967                 if ((bp->b_flags & B_ASYNC) == 0)
  968                         nbp->b_flags &= ~B_ASYNC;
  969                 if (nbp->b_flags & B_READ) {
  970                         ++cnt.v_swapin;
  971                         cnt.v_swappgsin += btoc(nbp->b_bcount);
  972                 } else {
  973                         ++cnt.v_swapout;
  974                         cnt.v_swappgsout += btoc(nbp->b_bcount);
  975                         nbp->b_dirtyend = nbp->b_bcount;
  976                 }
  977                 flushchainbuf(nbp);
  978                 /* nbp = NULL; */
  979         }
  980 
  981         /*
  982          * Wait for completion.
  983          */
  984 
  985         if (bp->b_flags & B_ASYNC) {
  986                 autochaindone(bp);
  987         } else {
  988                 waitchainbuf(bp, 0, 1);
  989         }
  990 }
  991 
  992 /*
  993  * SWAP_PAGER_GETPAGES() - bring pages in from swap
  994  *
  995  *      Attempt to retrieve (m, count) pages from backing store, but make
  996  *      sure we retrieve at least m[reqpage].  We try to load in as large
  997  *      a chunk surrounding m[reqpage] as is contiguous in swap and which
  998  *      belongs to the same object.
  999  *
 1000  *      The code is designed for asynchronous operation and 
 1001  *      immediate-notification of 'reqpage' but tends not to be
 1002  *      used that way.  Please do not optimize-out this algorithmic
 1003  *      feature, I intend to improve on it in the future.
 1004  *
 1005  *      The parent has a single vm_object_pip_add() reference prior to
 1006  *      calling us and we should return with the same.
 1007  *
 1008  *      The parent has BUSY'd the pages.  We should return with 'm'
 1009  *      left busy, but the others adjusted.
 1010  */
 1011 
 1012 static int
 1013 swap_pager_getpages(object, m, count, reqpage)
 1014         vm_object_t object;
 1015         vm_page_t *m;
 1016         int count, reqpage;
 1017 {
 1018         struct buf *bp;
 1019         vm_page_t mreq;
 1020         int s;
 1021         int i;
 1022         int j;
 1023         daddr_t blk;
 1024         vm_offset_t kva;
 1025         vm_pindex_t lastpindex;
 1026 
 1027         mreq = m[reqpage];
 1028 
 1029         if (mreq->object != object) {
 1030                 panic("swap_pager_getpages: object mismatch %p/%p", 
 1031                     object, 
 1032                     mreq->object
 1033                 );
 1034         }
 1035         /*
 1036          * Calculate range to retrieve.  The pages have already been assigned
 1037          * their swapblks.  We require a *contiguous* range that falls entirely
 1038          * within a single device stripe.   If we do not supply it, bad things
 1039          * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 
 1040          * loops are set up such that the case(s) are handled implicitly.
 1041          *
 1042          * The swp_*() calls must be made at splvm().  vm_page_free() does
 1043          * not need to be, but it will go a little faster if it is.
 1044          */
 1045 
 1046         s = splvm();
 1047         blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
 1048 
 1049         for (i = reqpage - 1; i >= 0; --i) {
 1050                 daddr_t iblk;
 1051 
 1052                 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
 1053                 if (blk != iblk + (reqpage - i))
 1054                         break;
 1055                 if ((blk ^ iblk) & dmmax_mask)
 1056                         break;
 1057         }
 1058         ++i;
 1059 
 1060         for (j = reqpage + 1; j < count; ++j) {
 1061                 daddr_t jblk;
 1062 
 1063                 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
 1064                 if (blk != jblk - (j - reqpage))
 1065                         break;
 1066                 if ((blk ^ jblk) & dmmax_mask)
 1067                         break;
 1068         }
 1069 
 1070         /*
 1071          * free pages outside our collection range.   Note: we never free
 1072          * mreq, it must remain busy throughout.
 1073          */
 1074 
 1075         {
 1076                 int k;
 1077 
 1078                 for (k = 0; k < i; ++k)
 1079                         vm_page_free(m[k]);
 1080                 for (k = j; k < count; ++k)
 1081                         vm_page_free(m[k]);
 1082         }
 1083         splx(s);
 1084 
 1085 
 1086         /*
 1087          * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq 
 1088          * still busy, but the others unbusied.
 1089          */
 1090 
 1091         if (blk == SWAPBLK_NONE)
 1092                 return(VM_PAGER_FAIL);
 1093 
 1094         /*
 1095          * Get a swap buffer header to perform the IO
 1096          */
 1097 
 1098         bp = getpbuf(&nsw_rcount);
 1099         kva = (vm_offset_t) bp->b_data;
 1100 
 1101         /*
 1102          * map our page(s) into kva for input
 1103          *
 1104          * NOTE: B_PAGING is set by pbgetvp()
 1105          */
 1106 
 1107         pmap_qenter(kva, m + i, j - i);
 1108 
 1109         bp->b_flags = B_READ | B_CALL;
 1110         bp->b_iodone = swp_pager_async_iodone;
 1111         bp->b_rcred = bp->b_wcred = proc0.p_ucred;
 1112         bp->b_data = (caddr_t) kva;
 1113         crhold(bp->b_rcred);
 1114         crhold(bp->b_wcred);
 1115         bp->b_blkno = blk - (reqpage - i);
 1116         bp->b_bcount = PAGE_SIZE * (j - i);
 1117         bp->b_bufsize = PAGE_SIZE * (j - i);
 1118         bp->b_pager.pg_reqpage = reqpage - i;
 1119 
 1120         {
 1121                 int k;
 1122 
 1123                 for (k = i; k < j; ++k) {
 1124                         bp->b_pages[k - i] = m[k];
 1125                         vm_page_flag_set(m[k], PG_SWAPINPROG);
 1126                 }
 1127         }
 1128         bp->b_npages = j - i;
 1129 
 1130         pbgetvp(swapdev_vp, bp);
 1131 
 1132         cnt.v_swapin++;
 1133         cnt.v_swappgsin += bp->b_npages;
 1134 
 1135         /*
 1136          * We still hold the lock on mreq, and our automatic completion routine
 1137          * does not remove it.
 1138          */
 1139 
 1140         vm_object_pip_add(mreq->object, bp->b_npages);
 1141         lastpindex = m[j-1]->pindex;
 1142 
 1143         /*
 1144          * perform the I/O.  NOTE!!!  bp cannot be considered valid after
 1145          * this point because we automatically release it on completion.
 1146          * Instead, we look at the one page we are interested in which we
 1147          * still hold a lock on even through the I/O completion.
 1148          *
 1149          * The other pages in our m[] array are also released on completion,
 1150          * so we cannot assume they are valid anymore either.
 1151          *
 1152          * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
 1153          */
 1154 
 1155         BUF_KERNPROC(bp);
 1156         VOP_STRATEGY(bp->b_vp, bp);
 1157 
 1158         /*
 1159          * wait for the page we want to complete.  PG_SWAPINPROG is always
 1160          * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
 1161          * is set in the meta-data.
 1162          */
 1163 
 1164         s = splvm();
 1165 
 1166         while ((mreq->flags & PG_SWAPINPROG) != 0) {
 1167                 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
 1168                 cnt.v_intrans++;
 1169                 if (tsleep(mreq, PSWP, "swread", hz*20)) {
 1170                         printf(
 1171                             "swap_pager: indefinite wait buffer: device:"
 1172                                 " %s, blkno: %ld, size: %ld\n",
 1173                             devtoname(bp->b_dev), (long)bp->b_blkno,
 1174                             bp->b_bcount
 1175                         );
 1176                 }
 1177         }
 1178 
 1179         splx(s);
 1180 
 1181         /*
 1182          * mreq is left bussied after completion, but all the other pages
 1183          * are freed.  If we had an unrecoverable read error the page will
 1184          * not be valid.
 1185          */
 1186 
 1187         if (mreq->valid != VM_PAGE_BITS_ALL) {
 1188                 return(VM_PAGER_ERROR);
 1189         } else {
 1190                 return(VM_PAGER_OK);
 1191         }
 1192 
 1193         /*
 1194          * A final note: in a low swap situation, we cannot deallocate swap
 1195          * and mark a page dirty here because the caller is likely to mark
 1196          * the page clean when we return, causing the page to possibly revert 
 1197          * to all-zero's later.
 1198          */
 1199 }
 1200 
 1201 /*
 1202  *      swap_pager_putpages: 
 1203  *
 1204  *      Assign swap (if necessary) and initiate I/O on the specified pages.
 1205  *
 1206  *      We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
 1207  *      are automatically converted to SWAP objects.
 1208  *
 1209  *      In a low memory situation we may block in VOP_STRATEGY(), but the new 
 1210  *      vm_page reservation system coupled with properly written VFS devices 
 1211  *      should ensure that no low-memory deadlock occurs.  This is an area
 1212  *      which needs work.
 1213  *
 1214  *      The parent has N vm_object_pip_add() references prior to
 1215  *      calling us and will remove references for rtvals[] that are
 1216  *      not set to VM_PAGER_PEND.  We need to remove the rest on I/O
 1217  *      completion.
 1218  *
 1219  *      The parent has soft-busy'd the pages it passes us and will unbusy
 1220  *      those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
 1221  *      We need to unbusy the rest on I/O completion.
 1222  */
 1223 
 1224 void
 1225 swap_pager_putpages(object, m, count, sync, rtvals)
 1226         vm_object_t object;
 1227         vm_page_t *m;
 1228         int count;
 1229         boolean_t sync;
 1230         int *rtvals;
 1231 {
 1232         int i;
 1233         int n = 0;
 1234 
 1235         if (count && m[0]->object != object) {
 1236                 panic("swap_pager_getpages: object mismatch %p/%p", 
 1237                     object, 
 1238                     m[0]->object
 1239                 );
 1240         }
 1241         /*
 1242          * Step 1
 1243          *
 1244          * Turn object into OBJT_SWAP
 1245          * check for bogus sysops
 1246          * force sync if not pageout process
 1247          */
 1248 
 1249         if (object->type != OBJT_SWAP)
 1250                 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
 1251 
 1252         if (curproc != pageproc)
 1253                 sync = TRUE;
 1254 
 1255         /*
 1256          * Step 2
 1257          *
 1258          * Update nsw parameters from swap_async_max sysctl values.  
 1259          * Do not let the sysop crash the machine with bogus numbers.
 1260          */
 1261 
 1262         if (swap_async_max != nsw_wcount_async_max) {
 1263                 int n;
 1264                 int s;
 1265 
 1266                 /*
 1267                  * limit range
 1268                  */
 1269                 if ((n = swap_async_max) > nswbuf / 2)
 1270                         n = nswbuf / 2;
 1271                 if (n < 1)
 1272                         n = 1;
 1273                 swap_async_max = n;
 1274 
 1275                 /*
 1276                  * Adjust difference ( if possible ).  If the current async
 1277                  * count is too low, we may not be able to make the adjustment
 1278                  * at this time.
 1279                  */
 1280                 s = splvm();
 1281                 n -= nsw_wcount_async_max;
 1282                 if (nsw_wcount_async + n >= 0) {
 1283                         nsw_wcount_async += n;
 1284                         nsw_wcount_async_max += n;
 1285                         wakeup(&nsw_wcount_async);
 1286                 }
 1287                 splx(s);
 1288         }
 1289 
 1290         /*
 1291          * Step 3
 1292          *
 1293          * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
 1294          * The page is left dirty until the pageout operation completes
 1295          * successfully.
 1296          */
 1297 
 1298         for (i = 0; i < count; i += n) {
 1299                 int s;
 1300                 int j;
 1301                 struct buf *bp;
 1302                 daddr_t blk;
 1303 
 1304                 /*
 1305                  * Maximum I/O size is limited by a number of factors.
 1306                  */
 1307 
 1308                 n = min(BLIST_MAX_ALLOC, count - i);
 1309                 n = min(n, nsw_cluster_max);
 1310 
 1311                 s = splvm();
 1312 
 1313                 /*
 1314                  * Get biggest block of swap we can.  If we fail, fall
 1315                  * back and try to allocate a smaller block.  Don't go
 1316                  * overboard trying to allocate space if it would overly
 1317                  * fragment swap.
 1318                  */
 1319                 while (
 1320                     (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
 1321                     n > 4
 1322                 ) {
 1323                         n >>= 1;
 1324                 }
 1325                 if (blk == SWAPBLK_NONE) {
 1326                         for (j = 0; j < n; ++j)
 1327                                 rtvals[i+j] = VM_PAGER_FAIL;
 1328                         splx(s);
 1329                         continue;
 1330                 }
 1331 
 1332                 /*
 1333                  * The I/O we are constructing cannot cross a physical
 1334                  * disk boundry in the swap stripe.  Note: we are still
 1335                  * at splvm().
 1336                  */
 1337                 if ((blk ^ (blk + n)) & dmmax_mask) {
 1338                         j = ((blk + dmmax) & dmmax_mask) - blk;
 1339                         swp_pager_freeswapspace(blk + j, n - j);
 1340                         n = j;
 1341                 }
 1342 
 1343                 /*
 1344                  * All I/O parameters have been satisfied, build the I/O
 1345                  * request and assign the swap space.
 1346                  *
 1347                  * NOTE: B_PAGING is set by pbgetvp()
 1348                  */
 1349 
 1350                 if (sync == TRUE) {
 1351                         bp = getpbuf(&nsw_wcount_sync);
 1352                         bp->b_flags = B_CALL;
 1353                 } else {
 1354                         bp = getpbuf(&nsw_wcount_async);
 1355                         bp->b_flags = B_CALL | B_ASYNC;
 1356                 }
 1357                 bp->b_spc = NULL;       /* not used, but NULL-out anyway */
 1358 
 1359                 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
 1360 
 1361                 bp->b_rcred = bp->b_wcred = proc0.p_ucred;
 1362                 bp->b_bcount = PAGE_SIZE * n;
 1363                 bp->b_bufsize = PAGE_SIZE * n;
 1364                 bp->b_blkno = blk;
 1365 
 1366                 crhold(bp->b_rcred);
 1367                 crhold(bp->b_wcred);
 1368 
 1369                 pbgetvp(swapdev_vp, bp);
 1370 
 1371                 for (j = 0; j < n; ++j) {
 1372                         vm_page_t mreq = m[i+j];
 1373 
 1374                         swp_pager_meta_build(
 1375                             mreq->object, 
 1376                             mreq->pindex,
 1377                             blk + j
 1378                         );
 1379                         vm_page_dirty(mreq);
 1380                         rtvals[i+j] = VM_PAGER_OK;
 1381 
 1382                         vm_page_flag_set(mreq, PG_SWAPINPROG);
 1383                         bp->b_pages[j] = mreq;
 1384                 }
 1385                 bp->b_npages = n;
 1386                 /*
 1387                  * Must set dirty range for NFS to work.
 1388                  */
 1389                 bp->b_dirtyoff = 0;
 1390                 bp->b_dirtyend = bp->b_bcount;
 1391 
 1392                 cnt.v_swapout++;
 1393                 cnt.v_swappgsout += bp->b_npages;
 1394                 swapdev_vp->v_numoutput++;
 1395 
 1396                 splx(s);
 1397 
 1398                 /*
 1399                  * asynchronous
 1400                  *
 1401                  * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
 1402                  */
 1403 
 1404                 if (sync == FALSE) {
 1405                         bp->b_iodone = swp_pager_async_iodone;
 1406                         BUF_KERNPROC(bp);
 1407                         VOP_STRATEGY(bp->b_vp, bp);
 1408 
 1409                         for (j = 0; j < n; ++j)
 1410                                 rtvals[i+j] = VM_PAGER_PEND;
 1411                         continue;
 1412                 }
 1413 
 1414                 /*
 1415                  * synchronous
 1416                  *
 1417                  * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
 1418                  */
 1419 
 1420                 bp->b_iodone = swp_pager_sync_iodone;
 1421                 VOP_STRATEGY(bp->b_vp, bp);
 1422 
 1423                 /*
 1424                  * Wait for the sync I/O to complete, then update rtvals.
 1425                  * We just set the rtvals[] to VM_PAGER_PEND so we can call
 1426                  * our async completion routine at the end, thus avoiding a
 1427                  * double-free.
 1428                  */
 1429                 s = splbio();
 1430 
 1431                 while ((bp->b_flags & B_DONE) == 0) {
 1432                         tsleep(bp, PVM, "swwrt", 0);
 1433                 }
 1434 
 1435                 for (j = 0; j < n; ++j)
 1436                         rtvals[i+j] = VM_PAGER_PEND;
 1437 
 1438                 /*
 1439                  * Now that we are through with the bp, we can call the
 1440                  * normal async completion, which frees everything up.
 1441                  */
 1442 
 1443                 swp_pager_async_iodone(bp);
 1444 
 1445                 splx(s);
 1446         }
 1447 }
 1448 
 1449 /*
 1450  *      swap_pager_sync_iodone:
 1451  *
 1452  *      Completion routine for synchronous reads and writes from/to swap.
 1453  *      We just mark the bp is complete and wake up anyone waiting on it.
 1454  *
 1455  *      This routine may not block.  This routine is called at splbio() or better.
 1456  */
 1457 
 1458 static void
 1459 swp_pager_sync_iodone(bp)
 1460         struct buf *bp;
 1461 {
 1462         bp->b_flags |= B_DONE;
 1463         bp->b_flags &= ~B_ASYNC;
 1464         wakeup(bp);
 1465 }
 1466 
 1467 /*
 1468  *      swp_pager_async_iodone:
 1469  *
 1470  *      Completion routine for asynchronous reads and writes from/to swap.
 1471  *      Also called manually by synchronous code to finish up a bp.
 1472  *
 1473  *      For READ operations, the pages are PG_BUSY'd.  For WRITE operations, 
 1474  *      the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY 
 1475  *      unbusy all pages except the 'main' request page.  For WRITE 
 1476  *      operations, we vm_page_t->busy'd unbusy all pages ( we can do this 
 1477  *      because we marked them all VM_PAGER_PEND on return from putpages ).
 1478  *
 1479  *      This routine may not block.
 1480  *      This routine is called at splbio() or better
 1481  *
 1482  *      We up ourselves to splvm() as required for various vm_page related
 1483  *      calls.
 1484  */
 1485 
 1486 static void
 1487 swp_pager_async_iodone(bp)
 1488         register struct buf *bp;
 1489 {
 1490         int s;
 1491         int i;
 1492         vm_object_t object = NULL;
 1493 
 1494         bp->b_flags |= B_DONE;
 1495 
 1496         /*
 1497          * report error
 1498          */
 1499 
 1500         if (bp->b_flags & B_ERROR) {
 1501                 printf(
 1502                     "swap_pager: I/O error - %s failed; blkno %ld,"
 1503                         "size %ld, error %d\n",
 1504                     ((bp->b_flags & B_READ) ? "pagein" : "pageout"),
 1505                     (long)bp->b_blkno, 
 1506                     (long)bp->b_bcount,
 1507                     bp->b_error
 1508                 );
 1509         }
 1510 
 1511         /*
 1512          * set object, raise to splvm().
 1513          */
 1514 
 1515         if (bp->b_npages)
 1516                 object = bp->b_pages[0]->object;
 1517         s = splvm();
 1518 
 1519         /*
 1520          * remove the mapping for kernel virtual
 1521          */
 1522 
 1523         pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
 1524 
 1525         /*
 1526          * cleanup pages.  If an error occurs writing to swap, we are in
 1527          * very serious trouble.  If it happens to be a disk error, though,
 1528          * we may be able to recover by reassigning the swap later on.  So
 1529          * in this case we remove the m->swapblk assignment for the page 
 1530          * but do not free it in the rlist.  The errornous block(s) are thus
 1531          * never reallocated as swap.  Redirty the page and continue.
 1532          */
 1533 
 1534         for (i = 0; i < bp->b_npages; ++i) {
 1535                 vm_page_t m = bp->b_pages[i];
 1536 
 1537                 vm_page_flag_clear(m, PG_SWAPINPROG);
 1538 
 1539                 if (bp->b_flags & B_ERROR) {
 1540                         /*
 1541                          * If an error occurs I'd love to throw the swapblk
 1542                          * away without freeing it back to swapspace, so it
 1543                          * can never be used again.  But I can't from an 
 1544                          * interrupt.
 1545                          */
 1546 
 1547                         if (bp->b_flags & B_READ) {
 1548                                 /*
 1549                                  * When reading, reqpage needs to stay
 1550                                  * locked for the parent, but all other
 1551                                  * pages can be freed.  We still want to
 1552                                  * wakeup the parent waiting on the page,
 1553                                  * though.  ( also: pg_reqpage can be -1 and 
 1554                                  * not match anything ).
 1555                                  *
 1556                                  * We have to wake specifically requested pages
 1557                                  * up too because we cleared PG_SWAPINPROG and
 1558                                  * someone may be waiting for that.
 1559                                  *
 1560                                  * NOTE: for reads, m->dirty will probably
 1561                                  * be overridden by the original caller of
 1562                                  * getpages so don't play cute tricks here.
 1563                                  *
 1564                                  * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
 1565                                  * AS THIS MESSES WITH object->memq, and it is
 1566                                  * not legal to mess with object->memq from an
 1567                                  * interrupt.
 1568                                  */
 1569 
 1570                                 m->valid = 0;
 1571                                 vm_page_flag_clear(m, PG_ZERO);
 1572 
 1573                                 if (i != bp->b_pager.pg_reqpage)
 1574                                         vm_page_free(m);
 1575                                 else
 1576                                         vm_page_flash(m);
 1577                                 /*
 1578                                  * If i == bp->b_pager.pg_reqpage, do not wake 
 1579                                  * the page up.  The caller needs to.
 1580                                  */
 1581                         } else {
 1582                                 /*
 1583                                  * If a write error occurs, reactivate page
 1584                                  * so it doesn't clog the inactive list,
 1585                                  * then finish the I/O.
 1586                                  */
 1587                                 vm_page_dirty(m);
 1588                                 vm_page_activate(m);
 1589                                 vm_page_io_finish(m);
 1590                         }
 1591                 } else if (bp->b_flags & B_READ) {
 1592                         /*
 1593                          * For read success, clear dirty bits.  Nobody should
 1594                          * have this page mapped but don't take any chances,
 1595                          * make sure the pmap modify bits are also cleared.
 1596                          *
 1597                          * NOTE: for reads, m->dirty will probably be 
 1598                          * overridden by the original caller of getpages so
 1599                          * we cannot set them in order to free the underlying
 1600                          * swap in a low-swap situation.  I don't think we'd
 1601                          * want to do that anyway, but it was an optimization
 1602                          * that existed in the old swapper for a time before
 1603                          * it got ripped out due to precisely this problem.
 1604                          *
 1605                          * clear PG_ZERO in page.
 1606                          *
 1607                          * If not the requested page then deactivate it.
 1608                          *
 1609                          * Note that the requested page, reqpage, is left
 1610                          * busied, but we still have to wake it up.  The
 1611                          * other pages are released (unbusied) by 
 1612                          * vm_page_wakeup().  We do not set reqpage's
 1613                          * valid bits here, it is up to the caller.
 1614                          */
 1615 
 1616                         pmap_clear_modify(m);
 1617                         m->valid = VM_PAGE_BITS_ALL;
 1618                         vm_page_undirty(m);
 1619                         vm_page_flag_clear(m, PG_ZERO);
 1620 
 1621                         /*
 1622                          * We have to wake specifically requested pages
 1623                          * up too because we cleared PG_SWAPINPROG and
 1624                          * could be waiting for it in getpages.  However,
 1625                          * be sure to not unbusy getpages specifically
 1626                          * requested page - getpages expects it to be 
 1627                          * left busy.
 1628                          */
 1629                         if (i != bp->b_pager.pg_reqpage) {
 1630                                 vm_page_deactivate(m);
 1631                                 vm_page_wakeup(m);
 1632                         } else {
 1633                                 vm_page_flash(m);
 1634                         }
 1635                 } else {
 1636                         /*
 1637                          * For write success, clear the modify and dirty 
 1638                          * status, then finish the I/O ( which decrements the 
 1639                          * busy count and possibly wakes waiter's up ).
 1640                          */
 1641                         pmap_clear_modify(m);
 1642                         vm_page_undirty(m);
 1643                         vm_page_io_finish(m);
 1644                         if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
 1645                                 vm_page_protect(m, VM_PROT_READ);
 1646                 }
 1647         }
 1648 
 1649         /*
 1650          * adjust pip.  NOTE: the original parent may still have its own
 1651          * pip refs on the object.
 1652          */
 1653 
 1654         if (object)
 1655                 vm_object_pip_wakeupn(object, bp->b_npages);
 1656 
 1657         /*
 1658          * release the physical I/O buffer
 1659          */
 1660 
 1661         relpbuf(
 1662             bp, 
 1663             ((bp->b_flags & B_READ) ? &nsw_rcount : 
 1664                 ((bp->b_flags & B_ASYNC) ? 
 1665                     &nsw_wcount_async : 
 1666                     &nsw_wcount_sync
 1667                 )
 1668             )
 1669         );
 1670         splx(s);
 1671 }
 1672 
 1673 /************************************************************************
 1674  *                              SWAP META DATA                          *
 1675  ************************************************************************
 1676  *
 1677  *      These routines manipulate the swap metadata stored in the 
 1678  *      OBJT_SWAP object.  All swp_*() routines must be called at
 1679  *      splvm() because swap can be freed up by the low level vm_page
 1680  *      code which might be called from interrupts beyond what splbio() covers.
 1681  *
 1682  *      Swap metadata is implemented with a global hash and not directly
 1683  *      linked into the object.  Instead the object simply contains
 1684  *      appropriate tracking counters.
 1685  */
 1686 
 1687 /*
 1688  * SWP_PAGER_HASH() -   hash swap meta data
 1689  *
 1690  *      This is an inline helper function which hashes the swapblk given
 1691  *      the object and page index.  It returns a pointer to a pointer
 1692  *      to the object, or a pointer to a NULL pointer if it could not
 1693  *      find a swapblk.
 1694  *
 1695  *      This routine must be called at splvm().
 1696  */
 1697 
 1698 static __inline struct swblock **
 1699 swp_pager_hash(vm_object_t object, vm_pindex_t index)
 1700 {
 1701         struct swblock **pswap;
 1702         struct swblock *swap;
 1703 
 1704         index &= ~SWAP_META_MASK;
 1705         pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
 1706 
 1707         while ((swap = *pswap) != NULL) {
 1708                 if (swap->swb_object == object &&
 1709                     swap->swb_index == index
 1710                 ) {
 1711                         break;
 1712                 }
 1713                 pswap = &swap->swb_hnext;
 1714         }
 1715         return(pswap);
 1716 }
 1717 
 1718 /*
 1719  * SWP_PAGER_META_BUILD() -     add swap block to swap meta data for object
 1720  *
 1721  *      We first convert the object to a swap object if it is a default
 1722  *      object.
 1723  *
 1724  *      The specified swapblk is added to the object's swap metadata.  If
 1725  *      the swapblk is not valid, it is freed instead.  Any previously
 1726  *      assigned swapblk is freed.
 1727  *
 1728  *      This routine must be called at splvm(), except when used to convert
 1729  *      an OBJT_DEFAULT object into an OBJT_SWAP object.
 1730 
 1731  */
 1732 
 1733 static void
 1734 swp_pager_meta_build(
 1735         vm_object_t object, 
 1736         vm_pindex_t index,
 1737         daddr_t swapblk
 1738 ) {
 1739         struct swblock *swap;
 1740         struct swblock **pswap;
 1741 
 1742         /*
 1743          * Convert default object to swap object if necessary
 1744          */
 1745 
 1746         if (object->type != OBJT_SWAP) {
 1747                 object->type = OBJT_SWAP;
 1748                 object->un_pager.swp.swp_bcount = 0;
 1749 
 1750                 if (object->handle != NULL) {
 1751                         TAILQ_INSERT_TAIL(
 1752                             NOBJLIST(object->handle),
 1753                             object, 
 1754                             pager_object_list
 1755                         );
 1756                 } else {
 1757                         TAILQ_INSERT_TAIL(
 1758                             &swap_pager_un_object_list,
 1759                             object, 
 1760                             pager_object_list
 1761                         );
 1762                 }
 1763         }
 1764         
 1765         /*
 1766          * Locate hash entry.  If not found create, but if we aren't adding
 1767          * anything just return.  If we run out of space in the map we wait
 1768          * and, since the hash table may have changed, retry.
 1769          */
 1770 
 1771 retry:
 1772         pswap = swp_pager_hash(object, index);
 1773 
 1774         if ((swap = *pswap) == NULL) {
 1775                 int i;
 1776 
 1777                 if (swapblk == SWAPBLK_NONE)
 1778                         return;
 1779 
 1780                 swap = *pswap = zalloc(swap_zone);
 1781                 if (swap == NULL) {
 1782                         if (swap_zone->zpagecount >= swap_zone->zpagemax)
 1783                                 printf("swap zone exhausted, increase kern.maxswzone\n");
 1784                         VM_WAIT;
 1785                         goto retry;
 1786                 }
 1787                 swap->swb_hnext = NULL;
 1788                 swap->swb_object = object;
 1789                 swap->swb_index = index & ~SWAP_META_MASK;
 1790                 swap->swb_count = 0;
 1791 
 1792                 ++object->un_pager.swp.swp_bcount;
 1793 
 1794                 for (i = 0; i < SWAP_META_PAGES; ++i)
 1795                         swap->swb_pages[i] = SWAPBLK_NONE;
 1796         }
 1797 
 1798         /*
 1799          * Delete prior contents of metadata
 1800          */
 1801 
 1802         index &= SWAP_META_MASK;
 1803 
 1804         if (swap->swb_pages[index] != SWAPBLK_NONE) {
 1805                 swp_pager_freeswapspace(swap->swb_pages[index], 1);
 1806                 --swap->swb_count;
 1807         }
 1808 
 1809         /*
 1810          * Enter block into metadata
 1811          */
 1812 
 1813         swap->swb_pages[index] = swapblk;
 1814         if (swapblk != SWAPBLK_NONE)
 1815                 ++swap->swb_count;
 1816 }
 1817 
 1818 /*
 1819  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
 1820  *
 1821  *      The requested range of blocks is freed, with any associated swap 
 1822  *      returned to the swap bitmap.
 1823  *
 1824  *      This routine will free swap metadata structures as they are cleaned 
 1825  *      out.  This routine does *NOT* operate on swap metadata associated
 1826  *      with resident pages.
 1827  *
 1828  *      This routine must be called at splvm()
 1829  */
 1830 
 1831 static void
 1832 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
 1833 {
 1834         if (object->type != OBJT_SWAP)
 1835                 return;
 1836 
 1837         while (count > 0) {
 1838                 struct swblock **pswap;
 1839                 struct swblock *swap;
 1840 
 1841                 pswap = swp_pager_hash(object, index);
 1842 
 1843                 if ((swap = *pswap) != NULL) {
 1844                         daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
 1845 
 1846                         if (v != SWAPBLK_NONE) {
 1847                                 swp_pager_freeswapspace(v, 1);
 1848                                 swap->swb_pages[index & SWAP_META_MASK] =
 1849                                         SWAPBLK_NONE;
 1850                                 if (--swap->swb_count == 0) {
 1851                                         *pswap = swap->swb_hnext;
 1852                                         zfree(swap_zone, swap);
 1853                                         --object->un_pager.swp.swp_bcount;
 1854                                 }
 1855                         }
 1856                         --count;
 1857                         ++index;
 1858                 } else {
 1859                         int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
 1860                         count -= n;
 1861                         index += n;
 1862                 }
 1863         }
 1864 }
 1865 
 1866 /*
 1867  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
 1868  *
 1869  *      This routine locates and destroys all swap metadata associated with
 1870  *      an object.
 1871  *
 1872  *      This routine must be called at splvm()
 1873  */
 1874 
 1875 static void
 1876 swp_pager_meta_free_all(vm_object_t object)
 1877 {
 1878         daddr_t index = 0;
 1879 
 1880         if (object->type != OBJT_SWAP)
 1881                 return;
 1882 
 1883         while (object->un_pager.swp.swp_bcount) {
 1884                 struct swblock **pswap;
 1885                 struct swblock *swap;
 1886 
 1887                 pswap = swp_pager_hash(object, index);
 1888                 if ((swap = *pswap) != NULL) {
 1889                         int i;
 1890 
 1891                         for (i = 0; i < SWAP_META_PAGES; ++i) {
 1892                                 daddr_t v = swap->swb_pages[i];
 1893                                 if (v != SWAPBLK_NONE) {
 1894                                         --swap->swb_count;
 1895                                         swp_pager_freeswapspace(v, 1);
 1896                                 }
 1897                         }
 1898                         if (swap->swb_count != 0)
 1899                                 panic("swap_pager_meta_free_all: swb_count != 0");
 1900                         *pswap = swap->swb_hnext;
 1901                         zfree(swap_zone, swap);
 1902                         --object->un_pager.swp.swp_bcount;
 1903                 }
 1904                 index += SWAP_META_PAGES;
 1905                 if (index > 0x20000000)
 1906                         panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
 1907         }
 1908 }
 1909 
 1910 /*
 1911  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
 1912  *
 1913  *      This routine is capable of looking up, popping, or freeing
 1914  *      swapblk assignments in the swap meta data or in the vm_page_t.
 1915  *      The routine typically returns the swapblk being looked-up, or popped,
 1916  *      or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
 1917  *      was invalid.  This routine will automatically free any invalid 
 1918  *      meta-data swapblks.
 1919  *
 1920  *      It is not possible to store invalid swapblks in the swap meta data
 1921  *      (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
 1922  *
 1923  *      When acting on a busy resident page and paging is in progress, we 
 1924  *      have to wait until paging is complete but otherwise can act on the 
 1925  *      busy page.
 1926  *
 1927  *      This routine must be called at splvm().
 1928  *
 1929  *      SWM_FREE        remove and free swap block from metadata
 1930  *      SWM_POP         remove from meta data but do not free.. pop it out
 1931  */
 1932 
 1933 static daddr_t
 1934 swp_pager_meta_ctl(
 1935         vm_object_t object,
 1936         vm_pindex_t index,
 1937         int flags
 1938 ) {
 1939         struct swblock **pswap;
 1940         struct swblock *swap;
 1941         daddr_t r1;
 1942 
 1943         /*
 1944          * The meta data only exists of the object is OBJT_SWAP 
 1945          * and even then might not be allocated yet.
 1946          */
 1947 
 1948         if (object->type != OBJT_SWAP)
 1949                 return(SWAPBLK_NONE);
 1950 
 1951         r1 = SWAPBLK_NONE;
 1952         pswap = swp_pager_hash(object, index);
 1953 
 1954         if ((swap = *pswap) != NULL) {
 1955                 index &= SWAP_META_MASK;
 1956                 r1 = swap->swb_pages[index];
 1957 
 1958                 if (r1 != SWAPBLK_NONE) {
 1959                         if (flags & SWM_FREE) {
 1960                                 swp_pager_freeswapspace(r1, 1);
 1961                                 r1 = SWAPBLK_NONE;
 1962                         }
 1963                         if (flags & (SWM_FREE|SWM_POP)) {
 1964                                 swap->swb_pages[index] = SWAPBLK_NONE;
 1965                                 if (--swap->swb_count == 0) {
 1966                                         *pswap = swap->swb_hnext;
 1967                                         zfree(swap_zone, swap);
 1968                                         --object->un_pager.swp.swp_bcount;
 1969                                 }
 1970                         } 
 1971                 }
 1972         }
 1973         return(r1);
 1974 }
 1975 

Cache object: a5981e65825a2977bff67912cf60722e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.