The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/swap_pager.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-4-Clause
    3  *
    4  * Copyright (c) 1998 Matthew Dillon,
    5  * Copyright (c) 1994 John S. Dyson
    6  * Copyright (c) 1990 University of Utah.
    7  * Copyright (c) 1982, 1986, 1989, 1993
    8  *      The Regents of the University of California.  All rights reserved.
    9  *
   10  * This code is derived from software contributed to Berkeley by
   11  * the Systems Programming Group of the University of Utah Computer
   12  * Science Department.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. All advertising materials mentioning features or use of this software
   23  *    must display the following acknowledgement:
   24  *      This product includes software developed by the University of
   25  *      California, Berkeley and its contributors.
   26  * 4. Neither the name of the University nor the names of its contributors
   27  *    may be used to endorse or promote products derived from this software
   28  *    without specific prior written permission.
   29  *
   30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   40  * SUCH DAMAGE.
   41  *
   42  *                              New Swap System
   43  *                              Matthew Dillon
   44  *
   45  * Radix Bitmap 'blists'.
   46  *
   47  *      - The new swapper uses the new radix bitmap code.  This should scale
   48  *        to arbitrarily small or arbitrarily large swap spaces and an almost
   49  *        arbitrary degree of fragmentation.
   50  *
   51  * Features:
   52  *
   53  *      - on the fly reallocation of swap during putpages.  The new system
   54  *        does not try to keep previously allocated swap blocks for dirty
   55  *        pages.
   56  *
   57  *      - on the fly deallocation of swap
   58  *
   59  *      - No more garbage collection required.  Unnecessarily allocated swap
   60  *        blocks only exist for dirty vm_page_t's now and these are already
   61  *        cycled (in a high-load system) by the pager.  We also do on-the-fly
   62  *        removal of invalidated swap blocks when a page is destroyed
   63  *        or renamed.
   64  *
   65  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
   66  *
   67  *      @(#)swap_pager.c        8.9 (Berkeley) 3/21/94
   68  *      @(#)vm_swap.c   8.5 (Berkeley) 2/17/94
   69  */
   70 
   71 #include <sys/cdefs.h>
   72 __FBSDID("$FreeBSD$");
   73 
   74 #include "opt_vm.h"
   75 
   76 #include <sys/param.h>
   77 #include <sys/bio.h>
   78 #include <sys/blist.h>
   79 #include <sys/buf.h>
   80 #include <sys/conf.h>
   81 #include <sys/disk.h>
   82 #include <sys/disklabel.h>
   83 #include <sys/eventhandler.h>
   84 #include <sys/fcntl.h>
   85 #include <sys/lock.h>
   86 #include <sys/kernel.h>
   87 #include <sys/mount.h>
   88 #include <sys/namei.h>
   89 #include <sys/malloc.h>
   90 #include <sys/pctrie.h>
   91 #include <sys/priv.h>
   92 #include <sys/proc.h>
   93 #include <sys/racct.h>
   94 #include <sys/resource.h>
   95 #include <sys/resourcevar.h>
   96 #include <sys/rwlock.h>
   97 #include <sys/sbuf.h>
   98 #include <sys/sysctl.h>
   99 #include <sys/sysproto.h>
  100 #include <sys/systm.h>
  101 #include <sys/sx.h>
  102 #include <sys/vmmeter.h>
  103 #include <sys/vnode.h>
  104 
  105 #include <security/mac/mac_framework.h>
  106 
  107 #include <vm/vm.h>
  108 #include <vm/pmap.h>
  109 #include <vm/vm_map.h>
  110 #include <vm/vm_kern.h>
  111 #include <vm/vm_object.h>
  112 #include <vm/vm_page.h>
  113 #include <vm/vm_pager.h>
  114 #include <vm/vm_pageout.h>
  115 #include <vm/vm_param.h>
  116 #include <vm/swap_pager.h>
  117 #include <vm/vm_extern.h>
  118 #include <vm/uma.h>
  119 
  120 #include <geom/geom.h>
  121 
  122 /*
  123  * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
  124  * The 64-page limit is due to the radix code (kern/subr_blist.c).
  125  */
  126 #ifndef MAX_PAGEOUT_CLUSTER
  127 #define MAX_PAGEOUT_CLUSTER     32
  128 #endif
  129 
  130 #if !defined(SWB_NPAGES)
  131 #define SWB_NPAGES      MAX_PAGEOUT_CLUSTER
  132 #endif
  133 
  134 #define SWAP_META_PAGES         PCTRIE_COUNT
  135 
  136 /*
  137  * A swblk structure maps each page index within a
  138  * SWAP_META_PAGES-aligned and sized range to the address of an
  139  * on-disk swap block (or SWAPBLK_NONE). The collection of these
  140  * mappings for an entire vm object is implemented as a pc-trie.
  141  */
  142 struct swblk {
  143         vm_pindex_t     p;
  144         daddr_t         d[SWAP_META_PAGES];
  145 };
  146 
  147 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
  148 static struct mtx sw_dev_mtx;
  149 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
  150 static struct swdevt *swdevhd;  /* Allocate from here next */
  151 static int nswapdev;            /* Number of swap devices */
  152 int swap_pager_avail;
  153 static struct sx swdev_syscall_lock;    /* serialize swap(on|off) */
  154 
  155 static __exclusive_cache_line u_long swap_reserved;
  156 static u_long swap_total;
  157 static int sysctl_page_shift(SYSCTL_HANDLER_ARGS);
  158 
  159 static SYSCTL_NODE(_vm_stats, OID_AUTO, swap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
  160     "VM swap stats");
  161 
  162 SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
  163     &swap_reserved, 0, sysctl_page_shift, "A", 
  164     "Amount of swap storage needed to back all allocated anonymous memory.");
  165 SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
  166     &swap_total, 0, sysctl_page_shift, "A", 
  167     "Total amount of available swap storage.");
  168 
  169 static int overcommit = 0;
  170 SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &overcommit, 0,
  171     "Configure virtual memory overcommit behavior. See tuning(7) "
  172     "for details.");
  173 static unsigned long swzone;
  174 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
  175     "Actual size of swap metadata zone");
  176 static unsigned long swap_maxpages;
  177 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
  178     "Maximum amount of swap supported");
  179 
  180 static COUNTER_U64_DEFINE_EARLY(swap_free_deferred);
  181 SYSCTL_COUNTER_U64(_vm_stats_swap, OID_AUTO, free_deferred,
  182     CTLFLAG_RD, &swap_free_deferred,
  183     "Number of pages that deferred freeing swap space");
  184 
  185 static COUNTER_U64_DEFINE_EARLY(swap_free_completed);
  186 SYSCTL_COUNTER_U64(_vm_stats_swap, OID_AUTO, free_completed,
  187     CTLFLAG_RD, &swap_free_completed,
  188     "Number of deferred frees completed");
  189 
  190 /* bits from overcommit */
  191 #define SWAP_RESERVE_FORCE_ON           (1 << 0)
  192 #define SWAP_RESERVE_RLIMIT_ON          (1 << 1)
  193 #define SWAP_RESERVE_ALLOW_NONWIRED     (1 << 2)
  194 
  195 static int
  196 sysctl_page_shift(SYSCTL_HANDLER_ARGS)
  197 {
  198         uint64_t newval;
  199         u_long value = *(u_long *)arg1;
  200 
  201         newval = ((uint64_t)value) << PAGE_SHIFT;
  202         return (sysctl_handle_64(oidp, &newval, 0, req));
  203 }
  204 
  205 static bool
  206 swap_reserve_by_cred_rlimit(u_long pincr, struct ucred *cred, int oc)
  207 {
  208         struct uidinfo *uip;
  209         u_long prev;
  210 
  211         uip = cred->cr_ruidinfo;
  212 
  213         prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr);
  214         if ((oc & SWAP_RESERVE_RLIMIT_ON) != 0 &&
  215             prev + pincr > lim_cur(curthread, RLIMIT_SWAP) &&
  216             priv_check(curthread, PRIV_VM_SWAP_NORLIMIT) != 0) {
  217                 prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr);
  218                 KASSERT(prev >= pincr, ("negative vmsize for uid = %d\n", uip->ui_uid));
  219                 return (false);
  220         }
  221         return (true);
  222 }
  223 
  224 static void
  225 swap_release_by_cred_rlimit(u_long pdecr, struct ucred *cred)
  226 {
  227         struct uidinfo *uip;
  228 #ifdef INVARIANTS
  229         u_long prev;
  230 #endif
  231 
  232         uip = cred->cr_ruidinfo;
  233 
  234 #ifdef INVARIANTS
  235         prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
  236         KASSERT(prev >= pdecr, ("negative vmsize for uid = %d\n", uip->ui_uid));
  237 #else
  238         atomic_subtract_long(&uip->ui_vmsize, pdecr);
  239 #endif
  240 }
  241 
  242 static void
  243 swap_reserve_force_rlimit(u_long pincr, struct ucred *cred)
  244 {
  245         struct uidinfo *uip;
  246 
  247         uip = cred->cr_ruidinfo;
  248         atomic_add_long(&uip->ui_vmsize, pincr);
  249 }
  250 
  251 bool
  252 swap_reserve(vm_ooffset_t incr)
  253 {
  254 
  255         return (swap_reserve_by_cred(incr, curthread->td_ucred));
  256 }
  257 
  258 bool
  259 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
  260 {
  261         u_long r, s, prev, pincr;
  262 #ifdef RACCT
  263         int error;
  264 #endif
  265         int oc;
  266         static int curfail;
  267         static struct timeval lastfail;
  268 
  269         KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
  270             (uintmax_t)incr));
  271 
  272 #ifdef RACCT
  273         if (RACCT_ENABLED()) {
  274                 PROC_LOCK(curproc);
  275                 error = racct_add(curproc, RACCT_SWAP, incr);
  276                 PROC_UNLOCK(curproc);
  277                 if (error != 0)
  278                         return (false);
  279         }
  280 #endif
  281 
  282         pincr = atop(incr);
  283         prev = atomic_fetchadd_long(&swap_reserved, pincr);
  284         r = prev + pincr;
  285         s = swap_total;
  286         oc = atomic_load_int(&overcommit);
  287         if (r > s && (oc & SWAP_RESERVE_ALLOW_NONWIRED) != 0) {
  288                 s += vm_cnt.v_page_count - vm_cnt.v_free_reserved -
  289                     vm_wire_count();
  290         }
  291         if ((oc & SWAP_RESERVE_FORCE_ON) != 0 && r > s &&
  292             priv_check(curthread, PRIV_VM_SWAP_NOQUOTA) != 0) {
  293                 prev = atomic_fetchadd_long(&swap_reserved, -pincr);
  294                 KASSERT(prev >= pincr, ("swap_reserved < incr on overcommit fail"));
  295                 goto out_error;
  296         }
  297 
  298         if (!swap_reserve_by_cred_rlimit(pincr, cred, oc)) {
  299                 prev = atomic_fetchadd_long(&swap_reserved, -pincr);
  300                 KASSERT(prev >= pincr, ("swap_reserved < incr on overcommit fail"));
  301                 goto out_error;
  302         }
  303 
  304         return (true);
  305 
  306 out_error:
  307         if (ppsratecheck(&lastfail, &curfail, 1)) {
  308                 printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
  309                     cred->cr_ruidinfo->ui_uid, curproc->p_pid, incr);
  310         }
  311 #ifdef RACCT
  312         if (RACCT_ENABLED()) {
  313                 PROC_LOCK(curproc);
  314                 racct_sub(curproc, RACCT_SWAP, incr);
  315                 PROC_UNLOCK(curproc);
  316         }
  317 #endif
  318 
  319         return (false);
  320 }
  321 
  322 void
  323 swap_reserve_force(vm_ooffset_t incr)
  324 {
  325         u_long pincr;
  326 
  327         KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
  328             (uintmax_t)incr));
  329 
  330 #ifdef RACCT
  331         if (RACCT_ENABLED()) {
  332                 PROC_LOCK(curproc);
  333                 racct_add_force(curproc, RACCT_SWAP, incr);
  334                 PROC_UNLOCK(curproc);
  335         }
  336 #endif
  337         pincr = atop(incr);
  338         atomic_add_long(&swap_reserved, pincr);
  339         swap_reserve_force_rlimit(pincr, curthread->td_ucred);
  340 }
  341 
  342 void
  343 swap_release(vm_ooffset_t decr)
  344 {
  345         struct ucred *cred;
  346 
  347         PROC_LOCK(curproc);
  348         cred = curproc->p_ucred;
  349         swap_release_by_cred(decr, cred);
  350         PROC_UNLOCK(curproc);
  351 }
  352 
  353 void
  354 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
  355 {
  356         u_long pdecr;
  357 #ifdef INVARIANTS
  358         u_long prev;
  359 #endif
  360 
  361         KASSERT((decr & PAGE_MASK) == 0, ("%s: decr: %ju & PAGE_MASK", __func__,
  362             (uintmax_t)decr));
  363 
  364         pdecr = atop(decr);
  365 #ifdef INVARIANTS
  366         prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
  367         KASSERT(prev >= pdecr, ("swap_reserved < decr"));
  368 #else
  369         atomic_subtract_long(&swap_reserved, pdecr);
  370 #endif
  371 
  372         swap_release_by_cred_rlimit(pdecr, cred);
  373 #ifdef RACCT
  374         if (racct_enable)
  375                 racct_sub_cred(cred, RACCT_SWAP, decr);
  376 #endif
  377 }
  378 
  379 static int swap_pager_full = 2; /* swap space exhaustion (task killing) */
  380 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
  381 static struct mtx swbuf_mtx;    /* to sync nsw_wcount_async */
  382 static int nsw_wcount_async;    /* limit async write buffers */
  383 static int nsw_wcount_async_max;/* assigned maximum                     */
  384 static int nsw_cluster_max;     /* maximum VOP I/O allowed              */
  385 
  386 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
  387 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
  388     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
  389     "Maximum running async swap ops");
  390 static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS);
  391 SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD |
  392     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A",
  393     "Swap Fragmentation Info");
  394 
  395 static struct sx sw_alloc_sx;
  396 
  397 /*
  398  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
  399  * of searching a named list by hashing it just a little.
  400  */
  401 
  402 #define NOBJLISTS               8
  403 
  404 #define NOBJLIST(handle)        \
  405         (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
  406 
  407 static struct pagerlst  swap_pager_object_list[NOBJLISTS];
  408 static uma_zone_t swwbuf_zone;
  409 static uma_zone_t swrbuf_zone;
  410 static uma_zone_t swblk_zone;
  411 static uma_zone_t swpctrie_zone;
  412 
  413 /*
  414  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
  415  * calls hooked from other parts of the VM system and do not appear here.
  416  * (see vm/swap_pager.h).
  417  */
  418 static vm_object_t
  419                 swap_pager_alloc(void *handle, vm_ooffset_t size,
  420                     vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
  421 static void     swap_pager_dealloc(vm_object_t object);
  422 static int      swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
  423     int *);
  424 static int      swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
  425     int *, pgo_getpages_iodone_t, void *);
  426 static void     swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
  427 static boolean_t
  428                 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
  429 static void     swap_pager_init(void);
  430 static void     swap_pager_unswapped(vm_page_t);
  431 static void     swap_pager_swapoff(struct swdevt *sp);
  432 static void     swap_pager_update_writecount(vm_object_t object,
  433     vm_offset_t start, vm_offset_t end);
  434 static void     swap_pager_release_writecount(vm_object_t object,
  435     vm_offset_t start, vm_offset_t end);
  436 
  437 struct pagerops swappagerops = {
  438         .pgo_init =     swap_pager_init,        /* early system initialization of pager */
  439         .pgo_alloc =    swap_pager_alloc,       /* allocate an OBJT_SWAP object         */
  440         .pgo_dealloc =  swap_pager_dealloc,     /* deallocate an OBJT_SWAP object       */
  441         .pgo_getpages = swap_pager_getpages,    /* pagein                               */
  442         .pgo_getpages_async = swap_pager_getpages_async, /* pagein (async)              */
  443         .pgo_putpages = swap_pager_putpages,    /* pageout                              */
  444         .pgo_haspage =  swap_pager_haspage,     /* get backing store status for page    */
  445         .pgo_pageunswapped = swap_pager_unswapped,      /* remove swap related to page          */
  446         .pgo_update_writecount = swap_pager_update_writecount,
  447         .pgo_release_writecount = swap_pager_release_writecount,
  448 };
  449 
  450 /*
  451  * swap_*() routines are externally accessible.  swp_*() routines are
  452  * internal.
  453  */
  454 static int nswap_lowat = 128;   /* in pages, swap_pager_almost_full warn */
  455 static int nswap_hiwat = 512;   /* in pages, swap_pager_almost_full warn */
  456 
  457 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
  458     "Maximum size of a swap block in pages");
  459 
  460 static void     swp_sizecheck(void);
  461 static void     swp_pager_async_iodone(struct buf *bp);
  462 static bool     swp_pager_swblk_empty(struct swblk *sb, int start, int limit);
  463 static void     swp_pager_free_empty_swblk(vm_object_t, struct swblk *sb);
  464 static int      swapongeom(struct vnode *);
  465 static int      swaponvp(struct thread *, struct vnode *, u_long);
  466 static int      swapoff_one(struct swdevt *sp, struct ucred *cred);
  467 
  468 /*
  469  * Swap bitmap functions
  470  */
  471 static void     swp_pager_freeswapspace(daddr_t blk, daddr_t npages);
  472 static daddr_t  swp_pager_getswapspace(int *npages);
  473 
  474 /*
  475  * Metadata functions
  476  */
  477 static daddr_t swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
  478 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
  479 static void swp_pager_meta_transfer(vm_object_t src, vm_object_t dst,
  480     vm_pindex_t pindex, vm_pindex_t count);
  481 static void swp_pager_meta_free_all(vm_object_t);
  482 static daddr_t swp_pager_meta_lookup(vm_object_t, vm_pindex_t);
  483 
  484 static void
  485 swp_pager_init_freerange(daddr_t *start, daddr_t *num)
  486 {
  487 
  488         *start = SWAPBLK_NONE;
  489         *num = 0;
  490 }
  491 
  492 static void
  493 swp_pager_update_freerange(daddr_t *start, daddr_t *num, daddr_t addr)
  494 {
  495 
  496         if (*start + *num == addr) {
  497                 (*num)++;
  498         } else {
  499                 swp_pager_freeswapspace(*start, *num);
  500                 *start = addr;
  501                 *num = 1;
  502         }
  503 }
  504 
  505 static void *
  506 swblk_trie_alloc(struct pctrie *ptree)
  507 {
  508 
  509         return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
  510             M_USE_RESERVE : 0)));
  511 }
  512 
  513 static void
  514 swblk_trie_free(struct pctrie *ptree, void *node)
  515 {
  516 
  517         uma_zfree(swpctrie_zone, node);
  518 }
  519 
  520 PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
  521 
  522 /*
  523  * SWP_SIZECHECK() -    update swap_pager_full indication
  524  *
  525  *      update the swap_pager_almost_full indication and warn when we are
  526  *      about to run out of swap space, using lowat/hiwat hysteresis.
  527  *
  528  *      Clear swap_pager_full ( task killing ) indication when lowat is met.
  529  *
  530  *      No restrictions on call
  531  *      This routine may not block.
  532  */
  533 static void
  534 swp_sizecheck(void)
  535 {
  536 
  537         if (swap_pager_avail < nswap_lowat) {
  538                 if (swap_pager_almost_full == 0) {
  539                         printf("swap_pager: out of swap space\n");
  540                         swap_pager_almost_full = 1;
  541                 }
  542         } else {
  543                 swap_pager_full = 0;
  544                 if (swap_pager_avail > nswap_hiwat)
  545                         swap_pager_almost_full = 0;
  546         }
  547 }
  548 
  549 /*
  550  * SWAP_PAGER_INIT() -  initialize the swap pager!
  551  *
  552  *      Expected to be started from system init.  NOTE:  This code is run
  553  *      before much else so be careful what you depend on.  Most of the VM
  554  *      system has yet to be initialized at this point.
  555  */
  556 static void
  557 swap_pager_init(void)
  558 {
  559         /*
  560          * Initialize object lists
  561          */
  562         int i;
  563 
  564         for (i = 0; i < NOBJLISTS; ++i)
  565                 TAILQ_INIT(&swap_pager_object_list[i]);
  566         mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
  567         sx_init(&sw_alloc_sx, "swspsx");
  568         sx_init(&swdev_syscall_lock, "swsysc");
  569 }
  570 
  571 /*
  572  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
  573  *
  574  *      Expected to be started from pageout process once, prior to entering
  575  *      its main loop.
  576  */
  577 void
  578 swap_pager_swap_init(void)
  579 {
  580         unsigned long n, n2;
  581 
  582         /*
  583          * Number of in-transit swap bp operations.  Don't
  584          * exhaust the pbufs completely.  Make sure we
  585          * initialize workable values (0 will work for hysteresis
  586          * but it isn't very efficient).
  587          *
  588          * The nsw_cluster_max is constrained by the bp->b_pages[]
  589          * array, which has maxphys / PAGE_SIZE entries, and our locally
  590          * defined MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
  591          * constrained by the swap device interleave stripe size.
  592          *
  593          * Currently we hardwire nsw_wcount_async to 4.  This limit is
  594          * designed to prevent other I/O from having high latencies due to
  595          * our pageout I/O.  The value 4 works well for one or two active swap
  596          * devices but is probably a little low if you have more.  Even so,
  597          * a higher value would probably generate only a limited improvement
  598          * with three or four active swap devices since the system does not
  599          * typically have to pageout at extreme bandwidths.   We will want
  600          * at least 2 per swap devices, and 4 is a pretty good value if you
  601          * have one NFS swap device due to the command/ack latency over NFS.
  602          * So it all works out pretty well.
  603          */
  604         nsw_cluster_max = min(maxphys / PAGE_SIZE, MAX_PAGEOUT_CLUSTER);
  605 
  606         nsw_wcount_async = 4;
  607         nsw_wcount_async_max = nsw_wcount_async;
  608         mtx_init(&swbuf_mtx, "async swbuf mutex", NULL, MTX_DEF);
  609 
  610         swwbuf_zone = pbuf_zsecond_create("swwbuf", nswbuf / 4);
  611         swrbuf_zone = pbuf_zsecond_create("swrbuf", nswbuf / 2);
  612 
  613         /*
  614          * Initialize our zone, taking the user's requested size or
  615          * estimating the number we need based on the number of pages
  616          * in the system.
  617          */
  618         n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) :
  619             vm_cnt.v_page_count / 2;
  620         swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
  621             pctrie_zone_init, NULL, UMA_ALIGN_PTR, 0);
  622         if (swpctrie_zone == NULL)
  623                 panic("failed to create swap pctrie zone.");
  624         swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
  625             NULL, NULL, _Alignof(struct swblk) - 1, 0);
  626         if (swblk_zone == NULL)
  627                 panic("failed to create swap blk zone.");
  628         n2 = n;
  629         do {
  630                 if (uma_zone_reserve_kva(swblk_zone, n))
  631                         break;
  632                 /*
  633                  * if the allocation failed, try a zone two thirds the
  634                  * size of the previous attempt.
  635                  */
  636                 n -= ((n + 2) / 3);
  637         } while (n > 0);
  638 
  639         /*
  640          * Often uma_zone_reserve_kva() cannot reserve exactly the
  641          * requested size.  Account for the difference when
  642          * calculating swap_maxpages.
  643          */
  644         n = uma_zone_get_max(swblk_zone);
  645 
  646         if (n < n2)
  647                 printf("Swap blk zone entries changed from %lu to %lu.\n",
  648                     n2, n);
  649         /* absolute maximum we can handle assuming 100% efficiency */
  650         swap_maxpages = n * SWAP_META_PAGES;
  651         swzone = n * sizeof(struct swblk);
  652         if (!uma_zone_reserve_kva(swpctrie_zone, n))
  653                 printf("Cannot reserve swap pctrie zone, "
  654                     "reduce kern.maxswzone.\n");
  655 }
  656 
  657 static vm_object_t
  658 swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
  659     vm_ooffset_t offset)
  660 {
  661         vm_object_t object;
  662 
  663         if (cred != NULL) {
  664                 if (!swap_reserve_by_cred(size, cred))
  665                         return (NULL);
  666                 crhold(cred);
  667         }
  668 
  669         /*
  670          * The un_pager.swp.swp_blks trie is initialized by
  671          * vm_object_allocate() to ensure the correct order of
  672          * visibility to other threads.
  673          */
  674         object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
  675             PAGE_MASK + size));
  676 
  677         object->un_pager.swp.writemappings = 0;
  678         object->handle = handle;
  679         if (cred != NULL) {
  680                 object->cred = cred;
  681                 object->charge = size;
  682         }
  683         return (object);
  684 }
  685 
  686 /*
  687  * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
  688  *                      its metadata structures.
  689  *
  690  *      This routine is called from the mmap and fork code to create a new
  691  *      OBJT_SWAP object.
  692  *
  693  *      This routine must ensure that no live duplicate is created for
  694  *      the named object request, which is protected against by
  695  *      holding the sw_alloc_sx lock in case handle != NULL.
  696  */
  697 static vm_object_t
  698 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
  699     vm_ooffset_t offset, struct ucred *cred)
  700 {
  701         vm_object_t object;
  702 
  703         if (handle != NULL) {
  704                 /*
  705                  * Reference existing named region or allocate new one.  There
  706                  * should not be a race here against swp_pager_meta_build()
  707                  * as called from vm_page_remove() in regards to the lookup
  708                  * of the handle.
  709                  */
  710                 sx_xlock(&sw_alloc_sx);
  711                 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
  712                 if (object == NULL) {
  713                         object = swap_pager_alloc_init(handle, cred, size,
  714                             offset);
  715                         if (object != NULL) {
  716                                 TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
  717                                     object, pager_object_list);
  718                         }
  719                 }
  720                 sx_xunlock(&sw_alloc_sx);
  721         } else {
  722                 object = swap_pager_alloc_init(handle, cred, size, offset);
  723         }
  724         return (object);
  725 }
  726 
  727 /*
  728  * SWAP_PAGER_DEALLOC() -       remove swap metadata from object
  729  *
  730  *      The swap backing for the object is destroyed.  The code is
  731  *      designed such that we can reinstantiate it later, but this
  732  *      routine is typically called only when the entire object is
  733  *      about to be destroyed.
  734  *
  735  *      The object must be locked.
  736  */
  737 static void
  738 swap_pager_dealloc(vm_object_t object)
  739 {
  740 
  741         VM_OBJECT_ASSERT_WLOCKED(object);
  742         KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
  743 
  744         /*
  745          * Remove from list right away so lookups will fail if we block for
  746          * pageout completion.
  747          */
  748         if ((object->flags & OBJ_ANON) == 0 && object->handle != NULL) {
  749                 VM_OBJECT_WUNLOCK(object);
  750                 sx_xlock(&sw_alloc_sx);
  751                 TAILQ_REMOVE(NOBJLIST(object->handle), object,
  752                     pager_object_list);
  753                 sx_xunlock(&sw_alloc_sx);
  754                 VM_OBJECT_WLOCK(object);
  755         }
  756 
  757         vm_object_pip_wait(object, "swpdea");
  758 
  759         /*
  760          * Free all remaining metadata.  We only bother to free it from
  761          * the swap meta data.  We do not attempt to free swapblk's still
  762          * associated with vm_page_t's for this object.  We do not care
  763          * if paging is still in progress on some objects.
  764          */
  765         swp_pager_meta_free_all(object);
  766         object->handle = NULL;
  767         object->type = OBJT_DEAD;
  768 }
  769 
  770 /************************************************************************
  771  *                      SWAP PAGER BITMAP ROUTINES                      *
  772  ************************************************************************/
  773 
  774 /*
  775  * SWP_PAGER_GETSWAPSPACE() -   allocate raw swap space
  776  *
  777  *      Allocate swap for up to the requested number of pages.  The
  778  *      starting swap block number (a page index) is returned or
  779  *      SWAPBLK_NONE if the allocation failed.
  780  *
  781  *      Also has the side effect of advising that somebody made a mistake
  782  *      when they configured swap and didn't configure enough.
  783  *
  784  *      This routine may not sleep.
  785  *
  786  *      We allocate in round-robin fashion from the configured devices.
  787  */
  788 static daddr_t
  789 swp_pager_getswapspace(int *io_npages)
  790 {
  791         daddr_t blk;
  792         struct swdevt *sp;
  793         int mpages, npages;
  794 
  795         KASSERT(*io_npages >= 1,
  796             ("%s: npages not positive", __func__));
  797         blk = SWAPBLK_NONE;
  798         mpages = *io_npages;
  799         npages = imin(BLIST_MAX_ALLOC, mpages);
  800         mtx_lock(&sw_dev_mtx);
  801         sp = swdevhd;
  802         while (!TAILQ_EMPTY(&swtailq)) {
  803                 if (sp == NULL)
  804                         sp = TAILQ_FIRST(&swtailq);
  805                 if ((sp->sw_flags & SW_CLOSING) == 0)
  806                         blk = blist_alloc(sp->sw_blist, &npages, mpages);
  807                 if (blk != SWAPBLK_NONE)
  808                         break;
  809                 sp = TAILQ_NEXT(sp, sw_list);
  810                 if (swdevhd == sp) {
  811                         if (npages == 1)
  812                                 break;
  813                         mpages = npages - 1;
  814                         npages >>= 1;
  815                 }
  816         }
  817         if (blk != SWAPBLK_NONE) {
  818                 *io_npages = npages;
  819                 blk += sp->sw_first;
  820                 sp->sw_used += npages;
  821                 swap_pager_avail -= npages;
  822                 swp_sizecheck();
  823                 swdevhd = TAILQ_NEXT(sp, sw_list);
  824         } else {
  825                 if (swap_pager_full != 2) {
  826                         printf("swp_pager_getswapspace(%d): failed\n",
  827                             *io_npages);
  828                         swap_pager_full = 2;
  829                         swap_pager_almost_full = 1;
  830                 }
  831                 swdevhd = NULL;
  832         }
  833         mtx_unlock(&sw_dev_mtx);
  834         return (blk);
  835 }
  836 
  837 static bool
  838 swp_pager_isondev(daddr_t blk, struct swdevt *sp)
  839 {
  840 
  841         return (blk >= sp->sw_first && blk < sp->sw_end);
  842 }
  843 
  844 static void
  845 swp_pager_strategy(struct buf *bp)
  846 {
  847         struct swdevt *sp;
  848 
  849         mtx_lock(&sw_dev_mtx);
  850         TAILQ_FOREACH(sp, &swtailq, sw_list) {
  851                 if (swp_pager_isondev(bp->b_blkno, sp)) {
  852                         mtx_unlock(&sw_dev_mtx);
  853                         if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
  854                             unmapped_buf_allowed) {
  855                                 bp->b_data = unmapped_buf;
  856                                 bp->b_offset = 0;
  857                         } else {
  858                                 pmap_qenter((vm_offset_t)bp->b_data,
  859                                     &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
  860                         }
  861                         sp->sw_strategy(bp, sp);
  862                         return;
  863                 }
  864         }
  865         panic("Swapdev not found");
  866 }
  867 
  868 /*
  869  * SWP_PAGER_FREESWAPSPACE() -  free raw swap space
  870  *
  871  *      This routine returns the specified swap blocks back to the bitmap.
  872  *
  873  *      This routine may not sleep.
  874  */
  875 static void
  876 swp_pager_freeswapspace(daddr_t blk, daddr_t npages)
  877 {
  878         struct swdevt *sp;
  879 
  880         if (npages == 0)
  881                 return;
  882         mtx_lock(&sw_dev_mtx);
  883         TAILQ_FOREACH(sp, &swtailq, sw_list) {
  884                 if (swp_pager_isondev(blk, sp)) {
  885                         sp->sw_used -= npages;
  886                         /*
  887                          * If we are attempting to stop swapping on
  888                          * this device, we don't want to mark any
  889                          * blocks free lest they be reused.
  890                          */
  891                         if ((sp->sw_flags & SW_CLOSING) == 0) {
  892                                 blist_free(sp->sw_blist, blk - sp->sw_first,
  893                                     npages);
  894                                 swap_pager_avail += npages;
  895                                 swp_sizecheck();
  896                         }
  897                         mtx_unlock(&sw_dev_mtx);
  898                         return;
  899                 }
  900         }
  901         panic("Swapdev not found");
  902 }
  903 
  904 /*
  905  * SYSCTL_SWAP_FRAGMENTATION() -        produce raw swap space stats
  906  */
  907 static int
  908 sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS)
  909 {
  910         struct sbuf sbuf;
  911         struct swdevt *sp;
  912         const char *devname;
  913         int error;
  914 
  915         error = sysctl_wire_old_buffer(req, 0);
  916         if (error != 0)
  917                 return (error);
  918         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
  919         mtx_lock(&sw_dev_mtx);
  920         TAILQ_FOREACH(sp, &swtailq, sw_list) {
  921                 if (vn_isdisk(sp->sw_vp))
  922                         devname = devtoname(sp->sw_vp->v_rdev);
  923                 else
  924                         devname = "[file]";
  925                 sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname);
  926                 blist_stats(sp->sw_blist, &sbuf);
  927         }
  928         mtx_unlock(&sw_dev_mtx);
  929         error = sbuf_finish(&sbuf);
  930         sbuf_delete(&sbuf);
  931         return (error);
  932 }
  933 
  934 /*
  935  * SWAP_PAGER_FREESPACE() -     frees swap blocks associated with a page
  936  *                              range within an object.
  937  *
  938  *      This is a globally accessible routine.
  939  *
  940  *      This routine removes swapblk assignments from swap metadata.
  941  *
  942  *      The external callers of this routine typically have already destroyed
  943  *      or renamed vm_page_t's associated with this range in the object so
  944  *      we should be ok.
  945  *
  946  *      The object must be locked.
  947  */
  948 void
  949 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
  950 {
  951 
  952         swp_pager_meta_free(object, start, size);
  953 }
  954 
  955 /*
  956  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
  957  *
  958  *      Assigns swap blocks to the specified range within the object.  The
  959  *      swap blocks are not zeroed.  Any previous swap assignment is destroyed.
  960  *
  961  *      Returns 0 on success, -1 on failure.
  962  */
  963 int
  964 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
  965 {
  966         daddr_t addr, blk, n_free, s_free;
  967         int i, j, n;
  968 
  969         swp_pager_init_freerange(&s_free, &n_free);
  970         VM_OBJECT_WLOCK(object);
  971         for (i = 0; i < size; i += n) {
  972                 n = size - i;
  973                 blk = swp_pager_getswapspace(&n);
  974                 if (blk == SWAPBLK_NONE) {
  975                         swp_pager_meta_free(object, start, i);
  976                         VM_OBJECT_WUNLOCK(object);
  977                         return (-1);
  978                 }
  979                 for (j = 0; j < n; ++j) {
  980                         addr = swp_pager_meta_build(object,
  981                             start + i + j, blk + j);
  982                         if (addr != SWAPBLK_NONE)
  983                                 swp_pager_update_freerange(&s_free, &n_free,
  984                                     addr);
  985                 }
  986         }
  987         swp_pager_freeswapspace(s_free, n_free);
  988         VM_OBJECT_WUNLOCK(object);
  989         return (0);
  990 }
  991 
  992 static bool
  993 swp_pager_xfer_source(vm_object_t srcobject, vm_object_t dstobject,
  994     vm_pindex_t pindex, daddr_t addr)
  995 {
  996         daddr_t dstaddr;
  997 
  998         KASSERT(srcobject->type == OBJT_SWAP,
  999             ("%s: Srcobject not swappable", __func__));
 1000         if (dstobject->type == OBJT_SWAP &&
 1001             swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) {
 1002                 /* Caller should destroy the source block. */
 1003                 return (false);
 1004         }
 1005 
 1006         /*
 1007          * Destination has no swapblk and is not resident, transfer source.
 1008          * swp_pager_meta_build() can sleep.
 1009          */
 1010         VM_OBJECT_WUNLOCK(srcobject);
 1011         dstaddr = swp_pager_meta_build(dstobject, pindex, addr);
 1012         KASSERT(dstaddr == SWAPBLK_NONE,
 1013             ("Unexpected destination swapblk"));
 1014         VM_OBJECT_WLOCK(srcobject);
 1015 
 1016         return (true);
 1017 }
 1018 
 1019 /*
 1020  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
 1021  *                      and destroy the source.
 1022  *
 1023  *      Copy any valid swapblks from the source to the destination.  In
 1024  *      cases where both the source and destination have a valid swapblk,
 1025  *      we keep the destination's.
 1026  *
 1027  *      This routine is allowed to sleep.  It may sleep allocating metadata
 1028  *      indirectly through swp_pager_meta_build().
 1029  *
 1030  *      The source object contains no vm_page_t's (which is just as well)
 1031  *
 1032  *      The source object is of type OBJT_SWAP.
 1033  *
 1034  *      The source and destination objects must be locked.
 1035  *      Both object locks may temporarily be released.
 1036  */
 1037 void
 1038 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
 1039     vm_pindex_t offset, int destroysource)
 1040 {
 1041 
 1042         VM_OBJECT_ASSERT_WLOCKED(srcobject);
 1043         VM_OBJECT_ASSERT_WLOCKED(dstobject);
 1044 
 1045         /*
 1046          * If destroysource is set, we remove the source object from the
 1047          * swap_pager internal queue now.
 1048          */
 1049         if (destroysource && (srcobject->flags & OBJ_ANON) == 0 &&
 1050             srcobject->handle != NULL) {
 1051                 VM_OBJECT_WUNLOCK(srcobject);
 1052                 VM_OBJECT_WUNLOCK(dstobject);
 1053                 sx_xlock(&sw_alloc_sx);
 1054                 TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
 1055                     pager_object_list);
 1056                 sx_xunlock(&sw_alloc_sx);
 1057                 VM_OBJECT_WLOCK(dstobject);
 1058                 VM_OBJECT_WLOCK(srcobject);
 1059         }
 1060 
 1061         /*
 1062          * Transfer source to destination.
 1063          */
 1064         swp_pager_meta_transfer(srcobject, dstobject, offset, dstobject->size);
 1065 
 1066         /*
 1067          * Free left over swap blocks in source.
 1068          *
 1069          * We have to revert the type to OBJT_DEFAULT so we do not accidentally
 1070          * double-remove the object from the swap queues.
 1071          */
 1072         if (destroysource) {
 1073                 swp_pager_meta_free_all(srcobject);
 1074                 /*
 1075                  * Reverting the type is not necessary, the caller is going
 1076                  * to destroy srcobject directly, but I'm doing it here
 1077                  * for consistency since we've removed the object from its
 1078                  * queues.
 1079                  */
 1080                 srcobject->type = OBJT_DEFAULT;
 1081         }
 1082 }
 1083 
 1084 /*
 1085  * SWAP_PAGER_HASPAGE() -       determine if we have good backing store for
 1086  *                              the requested page.
 1087  *
 1088  *      We determine whether good backing store exists for the requested
 1089  *      page and return TRUE if it does, FALSE if it doesn't.
 1090  *
 1091  *      If TRUE, we also try to determine how much valid, contiguous backing
 1092  *      store exists before and after the requested page.
 1093  */
 1094 static boolean_t
 1095 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
 1096     int *after)
 1097 {
 1098         daddr_t blk, blk0;
 1099         int i;
 1100 
 1101         VM_OBJECT_ASSERT_LOCKED(object);
 1102         KASSERT(object->type == OBJT_SWAP,
 1103             ("%s: object not swappable", __func__));
 1104 
 1105         /*
 1106          * do we have good backing store at the requested index ?
 1107          */
 1108         blk0 = swp_pager_meta_lookup(object, pindex);
 1109         if (blk0 == SWAPBLK_NONE) {
 1110                 if (before)
 1111                         *before = 0;
 1112                 if (after)
 1113                         *after = 0;
 1114                 return (FALSE);
 1115         }
 1116 
 1117         /*
 1118          * find backwards-looking contiguous good backing store
 1119          */
 1120         if (before != NULL) {
 1121                 for (i = 1; i < SWB_NPAGES; i++) {
 1122                         if (i > pindex)
 1123                                 break;
 1124                         blk = swp_pager_meta_lookup(object, pindex - i);
 1125                         if (blk != blk0 - i)
 1126                                 break;
 1127                 }
 1128                 *before = i - 1;
 1129         }
 1130 
 1131         /*
 1132          * find forward-looking contiguous good backing store
 1133          */
 1134         if (after != NULL) {
 1135                 for (i = 1; i < SWB_NPAGES; i++) {
 1136                         blk = swp_pager_meta_lookup(object, pindex + i);
 1137                         if (blk != blk0 + i)
 1138                                 break;
 1139                 }
 1140                 *after = i - 1;
 1141         }
 1142         return (TRUE);
 1143 }
 1144 
 1145 /*
 1146  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
 1147  *
 1148  *      This removes any associated swap backing store, whether valid or
 1149  *      not, from the page.
 1150  *
 1151  *      This routine is typically called when a page is made dirty, at
 1152  *      which point any associated swap can be freed.  MADV_FREE also
 1153  *      calls us in a special-case situation
 1154  *
 1155  *      NOTE!!!  If the page is clean and the swap was valid, the caller
 1156  *      should make the page dirty before calling this routine.  This routine
 1157  *      does NOT change the m->dirty status of the page.  Also: MADV_FREE
 1158  *      depends on it.
 1159  *
 1160  *      This routine may not sleep.
 1161  *
 1162  *      The object containing the page may be locked.
 1163  */
 1164 static void
 1165 swap_pager_unswapped(vm_page_t m)
 1166 {
 1167         struct swblk *sb;
 1168         vm_object_t obj;
 1169 
 1170         /*
 1171          * Handle enqueing deferred frees first.  If we do not have the
 1172          * object lock we wait for the page daemon to clear the space.
 1173          */
 1174         obj = m->object;
 1175         if (!VM_OBJECT_WOWNED(obj)) {
 1176                 VM_PAGE_OBJECT_BUSY_ASSERT(m);
 1177                 /*
 1178                  * The caller is responsible for synchronization but we
 1179                  * will harmlessly handle races.  This is typically provided
 1180                  * by only calling unswapped() when a page transitions from
 1181                  * clean to dirty.
 1182                  */
 1183                 if ((m->a.flags & (PGA_SWAP_SPACE | PGA_SWAP_FREE)) ==
 1184                     PGA_SWAP_SPACE) {
 1185                         vm_page_aflag_set(m, PGA_SWAP_FREE);
 1186                         counter_u64_add(swap_free_deferred, 1);
 1187                 }
 1188                 return;
 1189         }
 1190         if ((m->a.flags & PGA_SWAP_FREE) != 0)
 1191                 counter_u64_add(swap_free_completed, 1);
 1192         vm_page_aflag_clear(m, PGA_SWAP_FREE | PGA_SWAP_SPACE);
 1193 
 1194         /*
 1195          * The meta data only exists if the object is OBJT_SWAP
 1196          * and even then might not be allocated yet.
 1197          */
 1198         KASSERT(m->object->type == OBJT_SWAP,
 1199             ("Free object not swappable"));
 1200 
 1201         sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks,
 1202             rounddown(m->pindex, SWAP_META_PAGES));
 1203         if (sb == NULL)
 1204                 return;
 1205         if (sb->d[m->pindex % SWAP_META_PAGES] == SWAPBLK_NONE)
 1206                 return;
 1207         swp_pager_freeswapspace(sb->d[m->pindex % SWAP_META_PAGES], 1);
 1208         sb->d[m->pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
 1209         swp_pager_free_empty_swblk(m->object, sb);
 1210 }
 1211 
 1212 /*
 1213  * swap_pager_getpages() - bring pages in from swap
 1214  *
 1215  *      Attempt to page in the pages in array "ma" of length "count".  The
 1216  *      caller may optionally specify that additional pages preceding and
 1217  *      succeeding the specified range be paged in.  The number of such pages
 1218  *      is returned in the "rbehind" and "rahead" parameters, and they will
 1219  *      be in the inactive queue upon return.
 1220  *
 1221  *      The pages in "ma" must be busied and will remain busied upon return.
 1222  */
 1223 static int
 1224 swap_pager_getpages_locked(vm_object_t object, vm_page_t *ma, int count,
 1225     int *rbehind, int *rahead)
 1226 {
 1227         struct buf *bp;
 1228         vm_page_t bm, mpred, msucc, p;
 1229         vm_pindex_t pindex;
 1230         daddr_t blk;
 1231         int i, maxahead, maxbehind, reqcount;
 1232 
 1233         VM_OBJECT_ASSERT_WLOCKED(object);
 1234         reqcount = count;
 1235 
 1236         KASSERT(object->type == OBJT_SWAP,
 1237             ("%s: object not swappable", __func__));
 1238         if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) {
 1239                 VM_OBJECT_WUNLOCK(object);
 1240                 return (VM_PAGER_FAIL);
 1241         }
 1242 
 1243         KASSERT(reqcount - 1 <= maxahead,
 1244             ("page count %d extends beyond swap block", reqcount));
 1245 
 1246         /*
 1247          * Do not transfer any pages other than those that are xbusied
 1248          * when running during a split or collapse operation.  This
 1249          * prevents clustering from re-creating pages which are being
 1250          * moved into another object.
 1251          */
 1252         if ((object->flags & (OBJ_SPLIT | OBJ_DEAD)) != 0) {
 1253                 maxahead = reqcount - 1;
 1254                 maxbehind = 0;
 1255         }
 1256 
 1257         /*
 1258          * Clip the readahead and readbehind ranges to exclude resident pages.
 1259          */
 1260         if (rahead != NULL) {
 1261                 *rahead = imin(*rahead, maxahead - (reqcount - 1));
 1262                 pindex = ma[reqcount - 1]->pindex;
 1263                 msucc = TAILQ_NEXT(ma[reqcount - 1], listq);
 1264                 if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
 1265                         *rahead = msucc->pindex - pindex - 1;
 1266         }
 1267         if (rbehind != NULL) {
 1268                 *rbehind = imin(*rbehind, maxbehind);
 1269                 pindex = ma[0]->pindex;
 1270                 mpred = TAILQ_PREV(ma[0], pglist, listq);
 1271                 if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
 1272                         *rbehind = pindex - mpred->pindex - 1;
 1273         }
 1274 
 1275         bm = ma[0];
 1276         for (i = 0; i < count; i++)
 1277                 ma[i]->oflags |= VPO_SWAPINPROG;
 1278 
 1279         /*
 1280          * Allocate readahead and readbehind pages.
 1281          */
 1282         if (rbehind != NULL) {
 1283                 for (i = 1; i <= *rbehind; i++) {
 1284                         p = vm_page_alloc(object, ma[0]->pindex - i,
 1285                             VM_ALLOC_NORMAL);
 1286                         if (p == NULL)
 1287                                 break;
 1288                         p->oflags |= VPO_SWAPINPROG;
 1289                         bm = p;
 1290                 }
 1291                 *rbehind = i - 1;
 1292         }
 1293         if (rahead != NULL) {
 1294                 for (i = 0; i < *rahead; i++) {
 1295                         p = vm_page_alloc(object,
 1296                             ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
 1297                         if (p == NULL)
 1298                                 break;
 1299                         p->oflags |= VPO_SWAPINPROG;
 1300                 }
 1301                 *rahead = i;
 1302         }
 1303         if (rbehind != NULL)
 1304                 count += *rbehind;
 1305         if (rahead != NULL)
 1306                 count += *rahead;
 1307 
 1308         vm_object_pip_add(object, count);
 1309 
 1310         pindex = bm->pindex;
 1311         blk = swp_pager_meta_lookup(object, pindex);
 1312         KASSERT(blk != SWAPBLK_NONE,
 1313             ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
 1314 
 1315         VM_OBJECT_WUNLOCK(object);
 1316         bp = uma_zalloc(swrbuf_zone, M_WAITOK);
 1317         MPASS((bp->b_flags & B_MAXPHYS) != 0);
 1318         /* Pages cannot leave the object while busy. */
 1319         for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
 1320                 MPASS(p->pindex == bm->pindex + i);
 1321                 bp->b_pages[i] = p;
 1322         }
 1323 
 1324         bp->b_flags |= B_PAGING;
 1325         bp->b_iocmd = BIO_READ;
 1326         bp->b_iodone = swp_pager_async_iodone;
 1327         bp->b_rcred = crhold(thread0.td_ucred);
 1328         bp->b_wcred = crhold(thread0.td_ucred);
 1329         bp->b_blkno = blk;
 1330         bp->b_bcount = PAGE_SIZE * count;
 1331         bp->b_bufsize = PAGE_SIZE * count;
 1332         bp->b_npages = count;
 1333         bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
 1334         bp->b_pgafter = rahead != NULL ? *rahead : 0;
 1335 
 1336         VM_CNT_INC(v_swapin);
 1337         VM_CNT_ADD(v_swappgsin, count);
 1338 
 1339         /*
 1340          * perform the I/O.  NOTE!!!  bp cannot be considered valid after
 1341          * this point because we automatically release it on completion.
 1342          * Instead, we look at the one page we are interested in which we
 1343          * still hold a lock on even through the I/O completion.
 1344          *
 1345          * The other pages in our ma[] array are also released on completion,
 1346          * so we cannot assume they are valid anymore either.
 1347          *
 1348          * NOTE: b_blkno is destroyed by the call to swapdev_strategy
 1349          */
 1350         BUF_KERNPROC(bp);
 1351         swp_pager_strategy(bp);
 1352 
 1353         /*
 1354          * Wait for the pages we want to complete.  VPO_SWAPINPROG is always
 1355          * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
 1356          * is set in the metadata for each page in the request.
 1357          */
 1358         VM_OBJECT_WLOCK(object);
 1359         /* This could be implemented more efficiently with aflags */
 1360         while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
 1361                 ma[0]->oflags |= VPO_SWAPSLEEP;
 1362                 VM_CNT_INC(v_intrans);
 1363                 if (VM_OBJECT_SLEEP(object, &object->handle, PSWP,
 1364                     "swread", hz * 20)) {
 1365                         printf(
 1366 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
 1367                             bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
 1368                 }
 1369         }
 1370         VM_OBJECT_WUNLOCK(object);
 1371 
 1372         /*
 1373          * If we had an unrecoverable read error pages will not be valid.
 1374          */
 1375         for (i = 0; i < reqcount; i++)
 1376                 if (ma[i]->valid != VM_PAGE_BITS_ALL)
 1377                         return (VM_PAGER_ERROR);
 1378 
 1379         return (VM_PAGER_OK);
 1380 
 1381         /*
 1382          * A final note: in a low swap situation, we cannot deallocate swap
 1383          * and mark a page dirty here because the caller is likely to mark
 1384          * the page clean when we return, causing the page to possibly revert
 1385          * to all-zero's later.
 1386          */
 1387 }
 1388 
 1389 static int
 1390 swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count,
 1391     int *rbehind, int *rahead)
 1392 {
 1393 
 1394         VM_OBJECT_WLOCK(object);
 1395         return (swap_pager_getpages_locked(object, ma, count, rbehind, rahead));
 1396 }
 1397 
 1398 /*
 1399  *      swap_pager_getpages_async():
 1400  *
 1401  *      Right now this is emulation of asynchronous operation on top of
 1402  *      swap_pager_getpages().
 1403  */
 1404 static int
 1405 swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count,
 1406     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
 1407 {
 1408         int r, error;
 1409 
 1410         r = swap_pager_getpages(object, ma, count, rbehind, rahead);
 1411         switch (r) {
 1412         case VM_PAGER_OK:
 1413                 error = 0;
 1414                 break;
 1415         case VM_PAGER_ERROR:
 1416                 error = EIO;
 1417                 break;
 1418         case VM_PAGER_FAIL:
 1419                 error = EINVAL;
 1420                 break;
 1421         default:
 1422                 panic("unhandled swap_pager_getpages() error %d", r);
 1423         }
 1424         (iodone)(arg, ma, count, error);
 1425 
 1426         return (r);
 1427 }
 1428 
 1429 /*
 1430  *      swap_pager_putpages:
 1431  *
 1432  *      Assign swap (if necessary) and initiate I/O on the specified pages.
 1433  *
 1434  *      We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
 1435  *      are automatically converted to SWAP objects.
 1436  *
 1437  *      In a low memory situation we may block in VOP_STRATEGY(), but the new
 1438  *      vm_page reservation system coupled with properly written VFS devices
 1439  *      should ensure that no low-memory deadlock occurs.  This is an area
 1440  *      which needs work.
 1441  *
 1442  *      The parent has N vm_object_pip_add() references prior to
 1443  *      calling us and will remove references for rtvals[] that are
 1444  *      not set to VM_PAGER_PEND.  We need to remove the rest on I/O
 1445  *      completion.
 1446  *
 1447  *      The parent has soft-busy'd the pages it passes us and will unbusy
 1448  *      those whose rtvals[] entry is not set to VM_PAGER_PEND on return.
 1449  *      We need to unbusy the rest on I/O completion.
 1450  */
 1451 static void
 1452 swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
 1453     int flags, int *rtvals)
 1454 {
 1455         struct buf *bp;
 1456         daddr_t addr, blk, n_free, s_free;
 1457         vm_page_t mreq;
 1458         int i, j, n;
 1459         bool async;
 1460 
 1461         KASSERT(count == 0 || ma[0]->object == object,
 1462             ("%s: object mismatch %p/%p",
 1463             __func__, object, ma[0]->object));
 1464 
 1465         /*
 1466          * Step 1
 1467          *
 1468          * Turn object into OBJT_SWAP.  Force sync if not a pageout process.
 1469          */
 1470         if (object->type != OBJT_SWAP) {
 1471                 addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE);
 1472                 KASSERT(addr == SWAPBLK_NONE,
 1473                     ("unexpected object swap block"));
 1474         }
 1475         VM_OBJECT_WUNLOCK(object);
 1476         async = curproc == pageproc && (flags & VM_PAGER_PUT_SYNC) == 0;
 1477         swp_pager_init_freerange(&s_free, &n_free);
 1478 
 1479         /*
 1480          * Step 2
 1481          *
 1482          * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
 1483          * The page is left dirty until the pageout operation completes
 1484          * successfully.
 1485          */
 1486         for (i = 0; i < count; i += n) {
 1487                 /* Maximum I/O size is limited by maximum swap block size. */
 1488                 n = min(count - i, nsw_cluster_max);
 1489 
 1490                 if (async) {
 1491                         mtx_lock(&swbuf_mtx);
 1492                         while (nsw_wcount_async == 0)
 1493                                 msleep(&nsw_wcount_async, &swbuf_mtx, PVM,
 1494                                     "swbufa", 0);
 1495                         nsw_wcount_async--;
 1496                         mtx_unlock(&swbuf_mtx);
 1497                 }
 1498 
 1499                 /* Get a block of swap of size up to size n. */
 1500                 VM_OBJECT_WLOCK(object);
 1501                 blk = swp_pager_getswapspace(&n);
 1502                 if (blk == SWAPBLK_NONE) {
 1503                         VM_OBJECT_WUNLOCK(object);
 1504                         mtx_lock(&swbuf_mtx);
 1505                         if (++nsw_wcount_async == 1)
 1506                                 wakeup(&nsw_wcount_async);
 1507                         mtx_unlock(&swbuf_mtx);
 1508                         for (j = 0; j < n; ++j)
 1509                                 rtvals[i + j] = VM_PAGER_FAIL;
 1510                         continue;
 1511                 }
 1512                 for (j = 0; j < n; ++j) {
 1513                         mreq = ma[i + j];
 1514                         vm_page_aflag_clear(mreq, PGA_SWAP_FREE);
 1515                         addr = swp_pager_meta_build(mreq->object, mreq->pindex,
 1516                             blk + j);
 1517                         if (addr != SWAPBLK_NONE)
 1518                                 swp_pager_update_freerange(&s_free, &n_free,
 1519                                     addr);
 1520                         MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
 1521                         mreq->oflags |= VPO_SWAPINPROG;
 1522                 }
 1523                 VM_OBJECT_WUNLOCK(object);
 1524 
 1525                 bp = uma_zalloc(swwbuf_zone, M_WAITOK);
 1526                 MPASS((bp->b_flags & B_MAXPHYS) != 0);
 1527                 if (async)
 1528                         bp->b_flags |= B_ASYNC;
 1529                 bp->b_flags |= B_PAGING;
 1530                 bp->b_iocmd = BIO_WRITE;
 1531 
 1532                 bp->b_rcred = crhold(thread0.td_ucred);
 1533                 bp->b_wcred = crhold(thread0.td_ucred);
 1534                 bp->b_bcount = PAGE_SIZE * n;
 1535                 bp->b_bufsize = PAGE_SIZE * n;
 1536                 bp->b_blkno = blk;
 1537                 for (j = 0; j < n; j++)
 1538                         bp->b_pages[j] = ma[i + j];
 1539                 bp->b_npages = n;
 1540 
 1541                 /*
 1542                  * Must set dirty range for NFS to work.
 1543                  */
 1544                 bp->b_dirtyoff = 0;
 1545                 bp->b_dirtyend = bp->b_bcount;
 1546 
 1547                 VM_CNT_INC(v_swapout);
 1548                 VM_CNT_ADD(v_swappgsout, bp->b_npages);
 1549 
 1550                 /*
 1551                  * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
 1552                  * can call the async completion routine at the end of a
 1553                  * synchronous I/O operation.  Otherwise, our caller would
 1554                  * perform duplicate unbusy and wakeup operations on the page
 1555                  * and object, respectively.
 1556                  */
 1557                 for (j = 0; j < n; j++)
 1558                         rtvals[i + j] = VM_PAGER_PEND;
 1559 
 1560                 /*
 1561                  * asynchronous
 1562                  *
 1563                  * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
 1564                  */
 1565                 if (async) {
 1566                         bp->b_iodone = swp_pager_async_iodone;
 1567                         BUF_KERNPROC(bp);
 1568                         swp_pager_strategy(bp);
 1569                         continue;
 1570                 }
 1571 
 1572                 /*
 1573                  * synchronous
 1574                  *
 1575                  * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
 1576                  */
 1577                 bp->b_iodone = bdone;
 1578                 swp_pager_strategy(bp);
 1579 
 1580                 /*
 1581                  * Wait for the sync I/O to complete.
 1582                  */
 1583                 bwait(bp, PVM, "swwrt");
 1584 
 1585                 /*
 1586                  * Now that we are through with the bp, we can call the
 1587                  * normal async completion, which frees everything up.
 1588                  */
 1589                 swp_pager_async_iodone(bp);
 1590         }
 1591         swp_pager_freeswapspace(s_free, n_free);
 1592         VM_OBJECT_WLOCK(object);
 1593 }
 1594 
 1595 /*
 1596  *      swp_pager_async_iodone:
 1597  *
 1598  *      Completion routine for asynchronous reads and writes from/to swap.
 1599  *      Also called manually by synchronous code to finish up a bp.
 1600  *
 1601  *      This routine may not sleep.
 1602  */
 1603 static void
 1604 swp_pager_async_iodone(struct buf *bp)
 1605 {
 1606         int i;
 1607         vm_object_t object = NULL;
 1608 
 1609         /*
 1610          * Report error - unless we ran out of memory, in which case
 1611          * we've already logged it in swapgeom_strategy().
 1612          */
 1613         if (bp->b_ioflags & BIO_ERROR && bp->b_error != ENOMEM) {
 1614                 printf(
 1615                     "swap_pager: I/O error - %s failed; blkno %ld,"
 1616                         "size %ld, error %d\n",
 1617                     ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
 1618                     (long)bp->b_blkno,
 1619                     (long)bp->b_bcount,
 1620                     bp->b_error
 1621                 );
 1622         }
 1623 
 1624         /*
 1625          * remove the mapping for kernel virtual
 1626          */
 1627         if (buf_mapped(bp))
 1628                 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
 1629         else
 1630                 bp->b_data = bp->b_kvabase;
 1631 
 1632         if (bp->b_npages) {
 1633                 object = bp->b_pages[0]->object;
 1634                 VM_OBJECT_WLOCK(object);
 1635         }
 1636 
 1637         /*
 1638          * cleanup pages.  If an error occurs writing to swap, we are in
 1639          * very serious trouble.  If it happens to be a disk error, though,
 1640          * we may be able to recover by reassigning the swap later on.  So
 1641          * in this case we remove the m->swapblk assignment for the page
 1642          * but do not free it in the rlist.  The errornous block(s) are thus
 1643          * never reallocated as swap.  Redirty the page and continue.
 1644          */
 1645         for (i = 0; i < bp->b_npages; ++i) {
 1646                 vm_page_t m = bp->b_pages[i];
 1647 
 1648                 m->oflags &= ~VPO_SWAPINPROG;
 1649                 if (m->oflags & VPO_SWAPSLEEP) {
 1650                         m->oflags &= ~VPO_SWAPSLEEP;
 1651                         wakeup(&object->handle);
 1652                 }
 1653 
 1654                 /* We always have space after I/O, successful or not. */
 1655                 vm_page_aflag_set(m, PGA_SWAP_SPACE);
 1656 
 1657                 if (bp->b_ioflags & BIO_ERROR) {
 1658                         /*
 1659                          * If an error occurs I'd love to throw the swapblk
 1660                          * away without freeing it back to swapspace, so it
 1661                          * can never be used again.  But I can't from an
 1662                          * interrupt.
 1663                          */
 1664                         if (bp->b_iocmd == BIO_READ) {
 1665                                 /*
 1666                                  * NOTE: for reads, m->dirty will probably
 1667                                  * be overridden by the original caller of
 1668                                  * getpages so don't play cute tricks here.
 1669                                  */
 1670                                 vm_page_invalid(m);
 1671                         } else {
 1672                                 /*
 1673                                  * If a write error occurs, reactivate page
 1674                                  * so it doesn't clog the inactive list,
 1675                                  * then finish the I/O.
 1676                                  */
 1677                                 MPASS(m->dirty == VM_PAGE_BITS_ALL);
 1678 
 1679                                 /* PQ_UNSWAPPABLE? */
 1680                                 vm_page_activate(m);
 1681                                 vm_page_sunbusy(m);
 1682                         }
 1683                 } else if (bp->b_iocmd == BIO_READ) {
 1684                         /*
 1685                          * NOTE: for reads, m->dirty will probably be
 1686                          * overridden by the original caller of getpages so
 1687                          * we cannot set them in order to free the underlying
 1688                          * swap in a low-swap situation.  I don't think we'd
 1689                          * want to do that anyway, but it was an optimization
 1690                          * that existed in the old swapper for a time before
 1691                          * it got ripped out due to precisely this problem.
 1692                          */
 1693                         KASSERT(!pmap_page_is_mapped(m),
 1694                             ("swp_pager_async_iodone: page %p is mapped", m));
 1695                         KASSERT(m->dirty == 0,
 1696                             ("swp_pager_async_iodone: page %p is dirty", m));
 1697 
 1698                         vm_page_valid(m);
 1699                         if (i < bp->b_pgbefore ||
 1700                             i >= bp->b_npages - bp->b_pgafter)
 1701                                 vm_page_readahead_finish(m);
 1702                 } else {
 1703                         /*
 1704                          * For write success, clear the dirty
 1705                          * status, then finish the I/O ( which decrements the
 1706                          * busy count and possibly wakes waiter's up ).
 1707                          * A page is only written to swap after a period of
 1708                          * inactivity.  Therefore, we do not expect it to be
 1709                          * reused.
 1710                          */
 1711                         KASSERT(!pmap_page_is_write_mapped(m),
 1712                             ("swp_pager_async_iodone: page %p is not write"
 1713                             " protected", m));
 1714                         vm_page_undirty(m);
 1715                         vm_page_deactivate_noreuse(m);
 1716                         vm_page_sunbusy(m);
 1717                 }
 1718         }
 1719 
 1720         /*
 1721          * adjust pip.  NOTE: the original parent may still have its own
 1722          * pip refs on the object.
 1723          */
 1724         if (object != NULL) {
 1725                 vm_object_pip_wakeupn(object, bp->b_npages);
 1726                 VM_OBJECT_WUNLOCK(object);
 1727         }
 1728 
 1729         /*
 1730          * swapdev_strategy() manually sets b_vp and b_bufobj before calling
 1731          * bstrategy(). Set them back to NULL now we're done with it, or we'll
 1732          * trigger a KASSERT in relpbuf().
 1733          */
 1734         if (bp->b_vp) {
 1735                     bp->b_vp = NULL;
 1736                     bp->b_bufobj = NULL;
 1737         }
 1738         /*
 1739          * release the physical I/O buffer
 1740          */
 1741         if (bp->b_flags & B_ASYNC) {
 1742                 mtx_lock(&swbuf_mtx);
 1743                 if (++nsw_wcount_async == 1)
 1744                         wakeup(&nsw_wcount_async);
 1745                 mtx_unlock(&swbuf_mtx);
 1746         }
 1747         uma_zfree((bp->b_iocmd == BIO_READ) ? swrbuf_zone : swwbuf_zone, bp);
 1748 }
 1749 
 1750 int
 1751 swap_pager_nswapdev(void)
 1752 {
 1753 
 1754         return (nswapdev);
 1755 }
 1756 
 1757 static void
 1758 swp_pager_force_dirty(vm_page_t m)
 1759 {
 1760 
 1761         vm_page_dirty(m);
 1762         swap_pager_unswapped(m);
 1763         vm_page_launder(m);
 1764 }
 1765 
 1766 /*
 1767  *      swap_pager_swapoff_object:
 1768  *
 1769  *      Page in all of the pages that have been paged out for an object
 1770  *      to a swap device.
 1771  */
 1772 static void
 1773 swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
 1774 {
 1775         struct swblk *sb;
 1776         vm_page_t m;
 1777         vm_pindex_t pi;
 1778         daddr_t blk;
 1779         int i, nv, rahead, rv;
 1780 
 1781         KASSERT(object->type == OBJT_SWAP,
 1782             ("%s: Object not swappable", __func__));
 1783 
 1784         for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
 1785             &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
 1786                 if ((object->flags & OBJ_DEAD) != 0) {
 1787                         /*
 1788                          * Make sure that pending writes finish before
 1789                          * returning.
 1790                          */
 1791                         vm_object_pip_wait(object, "swpoff");
 1792                         swp_pager_meta_free_all(object);
 1793                         break;
 1794                 }
 1795                 for (i = 0; i < SWAP_META_PAGES; i++) {
 1796                         /*
 1797                          * Count the number of contiguous valid blocks.
 1798                          */
 1799                         for (nv = 0; nv < SWAP_META_PAGES - i; nv++) {
 1800                                 blk = sb->d[i + nv];
 1801                                 if (!swp_pager_isondev(blk, sp) ||
 1802                                     blk == SWAPBLK_NONE)
 1803                                         break;
 1804                         }
 1805                         if (nv == 0)
 1806                                 continue;
 1807 
 1808                         /*
 1809                          * Look for a page corresponding to the first
 1810                          * valid block and ensure that any pending paging
 1811                          * operations on it are complete.  If the page is valid,
 1812                          * mark it dirty and free the swap block.  Try to batch
 1813                          * this operation since it may cause sp to be freed,
 1814                          * meaning that we must restart the scan.  Avoid busying
 1815                          * valid pages since we may block forever on kernel
 1816                          * stack pages.
 1817                          */
 1818                         m = vm_page_lookup(object, sb->p + i);
 1819                         if (m == NULL) {
 1820                                 m = vm_page_alloc(object, sb->p + i,
 1821                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
 1822                                 if (m == NULL)
 1823                                         break;
 1824                         } else {
 1825                                 if ((m->oflags & VPO_SWAPINPROG) != 0) {
 1826                                         m->oflags |= VPO_SWAPSLEEP;
 1827                                         VM_OBJECT_SLEEP(object, &object->handle,
 1828                                             PSWP, "swpoff", 0);
 1829                                         break;
 1830                                 }
 1831                                 if (vm_page_all_valid(m)) {
 1832                                         do {
 1833                                                 swp_pager_force_dirty(m);
 1834                                         } while (--nv > 0 &&
 1835                                             (m = vm_page_next(m)) != NULL &&
 1836                                             vm_page_all_valid(m) &&
 1837                                             (m->oflags & VPO_SWAPINPROG) == 0);
 1838                                         break;
 1839                                 }
 1840                                 if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL))
 1841                                         break;
 1842                         }
 1843 
 1844                         vm_object_pip_add(object, 1);
 1845                         rahead = SWAP_META_PAGES;
 1846                         rv = swap_pager_getpages_locked(object, &m, 1, NULL,
 1847                             &rahead);
 1848                         if (rv != VM_PAGER_OK)
 1849                                 panic("%s: read from swap failed: %d",
 1850                                     __func__, rv);
 1851                         vm_object_pip_wakeupn(object, 1);
 1852                         VM_OBJECT_WLOCK(object);
 1853                         vm_page_xunbusy(m);
 1854 
 1855                         /*
 1856                          * The object lock was dropped so we must restart the
 1857                          * scan of this swap block.  Pages paged in during this
 1858                          * iteration will be marked dirty in a future iteration.
 1859                          */
 1860                         break;
 1861                 }
 1862                 if (i == SWAP_META_PAGES)
 1863                         pi = sb->p + SWAP_META_PAGES;
 1864         }
 1865 }
 1866 
 1867 /*
 1868  *      swap_pager_swapoff:
 1869  *
 1870  *      Page in all of the pages that have been paged out to the
 1871  *      given device.  The corresponding blocks in the bitmap must be
 1872  *      marked as allocated and the device must be flagged SW_CLOSING.
 1873  *      There may be no processes swapped out to the device.
 1874  *
 1875  *      This routine may block.
 1876  */
 1877 static void
 1878 swap_pager_swapoff(struct swdevt *sp)
 1879 {
 1880         vm_object_t object;
 1881         int retries;
 1882 
 1883         sx_assert(&swdev_syscall_lock, SA_XLOCKED);
 1884 
 1885         retries = 0;
 1886 full_rescan:
 1887         mtx_lock(&vm_object_list_mtx);
 1888         TAILQ_FOREACH(object, &vm_object_list, object_list) {
 1889                 if (object->type != OBJT_SWAP)
 1890                         continue;
 1891                 mtx_unlock(&vm_object_list_mtx);
 1892                 /* Depends on type-stability. */
 1893                 VM_OBJECT_WLOCK(object);
 1894 
 1895                 /*
 1896                  * Dead objects are eventually terminated on their own.
 1897                  */
 1898                 if ((object->flags & OBJ_DEAD) != 0)
 1899                         goto next_obj;
 1900 
 1901                 /*
 1902                  * Sync with fences placed after pctrie
 1903                  * initialization.  We must not access pctrie below
 1904                  * unless we checked that our object is swap and not
 1905                  * dead.
 1906                  */
 1907                 atomic_thread_fence_acq();
 1908                 if (object->type != OBJT_SWAP)
 1909                         goto next_obj;
 1910 
 1911                 swap_pager_swapoff_object(sp, object);
 1912 next_obj:
 1913                 VM_OBJECT_WUNLOCK(object);
 1914                 mtx_lock(&vm_object_list_mtx);
 1915         }
 1916         mtx_unlock(&vm_object_list_mtx);
 1917 
 1918         if (sp->sw_used) {
 1919                 /*
 1920                  * Objects may be locked or paging to the device being
 1921                  * removed, so we will miss their pages and need to
 1922                  * make another pass.  We have marked this device as
 1923                  * SW_CLOSING, so the activity should finish soon.
 1924                  */
 1925                 retries++;
 1926                 if (retries > 100) {
 1927                         panic("swapoff: failed to locate %d swap blocks",
 1928                             sp->sw_used);
 1929                 }
 1930                 pause("swpoff", hz / 20);
 1931                 goto full_rescan;
 1932         }
 1933         EVENTHANDLER_INVOKE(swapoff, sp);
 1934 }
 1935 
 1936 /************************************************************************
 1937  *                              SWAP META DATA                          *
 1938  ************************************************************************
 1939  *
 1940  *      These routines manipulate the swap metadata stored in the
 1941  *      OBJT_SWAP object.
 1942  *
 1943  *      Swap metadata is implemented with a global hash and not directly
 1944  *      linked into the object.  Instead the object simply contains
 1945  *      appropriate tracking counters.
 1946  */
 1947 
 1948 /*
 1949  * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free?
 1950  */
 1951 static bool
 1952 swp_pager_swblk_empty(struct swblk *sb, int start, int limit)
 1953 {
 1954         int i;
 1955 
 1956         MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES);
 1957         for (i = start; i < limit; i++) {
 1958                 if (sb->d[i] != SWAPBLK_NONE)
 1959                         return (false);
 1960         }
 1961         return (true);
 1962 }
 1963 
 1964 /*
 1965  * SWP_PAGER_FREE_EMPTY_SWBLK() - frees if a block is free
 1966  *
 1967  *  Nothing is done if the block is still in use.
 1968  */
 1969 static void
 1970 swp_pager_free_empty_swblk(vm_object_t object, struct swblk *sb)
 1971 {
 1972 
 1973         if (swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
 1974                 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
 1975                 uma_zfree(swblk_zone, sb);
 1976         }
 1977 }
 1978    
 1979 /*
 1980  * SWP_PAGER_META_BUILD() -     add swap block to swap meta data for object
 1981  *
 1982  *      We first convert the object to a swap object if it is a default
 1983  *      object.
 1984  *
 1985  *      The specified swapblk is added to the object's swap metadata.  If
 1986  *      the swapblk is not valid, it is freed instead.  Any previously
 1987  *      assigned swapblk is returned.
 1988  */
 1989 static daddr_t
 1990 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
 1991 {
 1992         static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
 1993         struct swblk *sb, *sb1;
 1994         vm_pindex_t modpi, rdpi;
 1995         daddr_t prev_swapblk;
 1996         int error, i;
 1997 
 1998         VM_OBJECT_ASSERT_WLOCKED(object);
 1999 
 2000         /*
 2001          * Convert default object to swap object if necessary
 2002          */
 2003         if (object->type != OBJT_SWAP) {
 2004                 pctrie_init(&object->un_pager.swp.swp_blks);
 2005 
 2006                 /*
 2007                  * Ensure that swap_pager_swapoff()'s iteration over
 2008                  * object_list does not see a garbage pctrie.
 2009                  */
 2010                 atomic_thread_fence_rel();
 2011 
 2012                 object->type = OBJT_SWAP;
 2013                 object->un_pager.swp.writemappings = 0;
 2014                 KASSERT((object->flags & OBJ_ANON) != 0 ||
 2015                     object->handle == NULL,
 2016                     ("default pager %p with handle %p",
 2017                     object, object->handle));
 2018         }
 2019 
 2020         rdpi = rounddown(pindex, SWAP_META_PAGES);
 2021         sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
 2022         if (sb == NULL) {
 2023                 if (swapblk == SWAPBLK_NONE)
 2024                         return (SWAPBLK_NONE);
 2025                 for (;;) {
 2026                         sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
 2027                             pageproc ? M_USE_RESERVE : 0));
 2028                         if (sb != NULL) {
 2029                                 sb->p = rdpi;
 2030                                 for (i = 0; i < SWAP_META_PAGES; i++)
 2031                                         sb->d[i] = SWAPBLK_NONE;
 2032                                 if (atomic_cmpset_int(&swblk_zone_exhausted,
 2033                                     1, 0))
 2034                                         printf("swblk zone ok\n");
 2035                                 break;
 2036                         }
 2037                         VM_OBJECT_WUNLOCK(object);
 2038                         if (uma_zone_exhausted(swblk_zone)) {
 2039                                 if (atomic_cmpset_int(&swblk_zone_exhausted,
 2040                                     0, 1))
 2041                                         printf("swap blk zone exhausted, "
 2042                                             "increase kern.maxswzone\n");
 2043                                 vm_pageout_oom(VM_OOM_SWAPZ);
 2044                                 pause("swzonxb", 10);
 2045                         } else
 2046                                 uma_zwait(swblk_zone);
 2047                         VM_OBJECT_WLOCK(object);
 2048                         sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
 2049                             rdpi);
 2050                         if (sb != NULL)
 2051                                 /*
 2052                                  * Somebody swapped out a nearby page,
 2053                                  * allocating swblk at the rdpi index,
 2054                                  * while we dropped the object lock.
 2055                                  */
 2056                                 goto allocated;
 2057                 }
 2058                 for (;;) {
 2059                         error = SWAP_PCTRIE_INSERT(
 2060                             &object->un_pager.swp.swp_blks, sb);
 2061                         if (error == 0) {
 2062                                 if (atomic_cmpset_int(&swpctrie_zone_exhausted,
 2063                                     1, 0))
 2064                                         printf("swpctrie zone ok\n");
 2065                                 break;
 2066                         }
 2067                         VM_OBJECT_WUNLOCK(object);
 2068                         if (uma_zone_exhausted(swpctrie_zone)) {
 2069                                 if (atomic_cmpset_int(&swpctrie_zone_exhausted,
 2070                                     0, 1))
 2071                                         printf("swap pctrie zone exhausted, "
 2072                                             "increase kern.maxswzone\n");
 2073                                 vm_pageout_oom(VM_OOM_SWAPZ);
 2074                                 pause("swzonxp", 10);
 2075                         } else
 2076                                 uma_zwait(swpctrie_zone);
 2077                         VM_OBJECT_WLOCK(object);
 2078                         sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
 2079                             rdpi);
 2080                         if (sb1 != NULL) {
 2081                                 uma_zfree(swblk_zone, sb);
 2082                                 sb = sb1;
 2083                                 goto allocated;
 2084                         }
 2085                 }
 2086         }
 2087 allocated:
 2088         MPASS(sb->p == rdpi);
 2089 
 2090         modpi = pindex % SWAP_META_PAGES;
 2091         /* Return prior contents of metadata. */
 2092         prev_swapblk = sb->d[modpi];
 2093         /* Enter block into metadata. */
 2094         sb->d[modpi] = swapblk;
 2095 
 2096         /*
 2097          * Free the swblk if we end up with the empty page run.
 2098          */
 2099         if (swapblk == SWAPBLK_NONE)
 2100                 swp_pager_free_empty_swblk(object, sb);
 2101         return (prev_swapblk);
 2102 }
 2103 
 2104 /*
 2105  * SWP_PAGER_META_TRANSFER() - free a range of blocks in the srcobject's swap
 2106  * metadata, or transfer it into dstobject.
 2107  *
 2108  *      This routine will free swap metadata structures as they are cleaned
 2109  *      out.
 2110  */
 2111 static void
 2112 swp_pager_meta_transfer(vm_object_t srcobject, vm_object_t dstobject,
 2113     vm_pindex_t pindex, vm_pindex_t count)
 2114 {
 2115         struct swblk *sb;
 2116         daddr_t n_free, s_free;
 2117         vm_pindex_t offset, last;
 2118         int i, limit, start;
 2119 
 2120         VM_OBJECT_ASSERT_WLOCKED(srcobject);
 2121         if (srcobject->type != OBJT_SWAP || count == 0)
 2122                 return;
 2123 
 2124         swp_pager_init_freerange(&s_free, &n_free);
 2125         offset = pindex;
 2126         last = pindex + count;
 2127         for (;;) {
 2128                 sb = SWAP_PCTRIE_LOOKUP_GE(&srcobject->un_pager.swp.swp_blks,
 2129                     rounddown(pindex, SWAP_META_PAGES));
 2130                 if (sb == NULL || sb->p >= last)
 2131                         break;
 2132                 start = pindex > sb->p ? pindex - sb->p : 0;
 2133                 limit = last - sb->p < SWAP_META_PAGES ? last - sb->p :
 2134                     SWAP_META_PAGES;
 2135                 for (i = start; i < limit; i++) {
 2136                         if (sb->d[i] == SWAPBLK_NONE)
 2137                                 continue;
 2138                         if (dstobject == NULL ||
 2139                             !swp_pager_xfer_source(srcobject, dstobject, 
 2140                             sb->p + i - offset, sb->d[i])) {
 2141                                 swp_pager_update_freerange(&s_free, &n_free,
 2142                                     sb->d[i]);
 2143                         }
 2144                         sb->d[i] = SWAPBLK_NONE;
 2145                 }
 2146                 pindex = sb->p + SWAP_META_PAGES;
 2147                 if (swp_pager_swblk_empty(sb, 0, start) &&
 2148                     swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) {
 2149                         SWAP_PCTRIE_REMOVE(&srcobject->un_pager.swp.swp_blks,
 2150                             sb->p);
 2151                         uma_zfree(swblk_zone, sb);
 2152                 }
 2153         }
 2154         swp_pager_freeswapspace(s_free, n_free);
 2155 }
 2156 
 2157 /*
 2158  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
 2159  *
 2160  *      The requested range of blocks is freed, with any associated swap
 2161  *      returned to the swap bitmap.
 2162  *
 2163  *      This routine will free swap metadata structures as they are cleaned
 2164  *      out.  This routine does *NOT* operate on swap metadata associated
 2165  *      with resident pages.
 2166  */
 2167 static void
 2168 swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count)
 2169 {
 2170         swp_pager_meta_transfer(object, NULL, pindex, count);
 2171 }
 2172 
 2173 /*
 2174  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
 2175  *
 2176  *      This routine locates and destroys all swap metadata associated with
 2177  *      an object.
 2178  */
 2179 static void
 2180 swp_pager_meta_free_all(vm_object_t object)
 2181 {
 2182         struct swblk *sb;
 2183         daddr_t n_free, s_free;
 2184         vm_pindex_t pindex;
 2185         int i;
 2186 
 2187         VM_OBJECT_ASSERT_WLOCKED(object);
 2188         if (object->type != OBJT_SWAP)
 2189                 return;
 2190 
 2191         swp_pager_init_freerange(&s_free, &n_free);
 2192         for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
 2193             &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
 2194                 pindex = sb->p + SWAP_META_PAGES;
 2195                 for (i = 0; i < SWAP_META_PAGES; i++) {
 2196                         if (sb->d[i] == SWAPBLK_NONE)
 2197                                 continue;
 2198                         swp_pager_update_freerange(&s_free, &n_free, sb->d[i]);
 2199                 }
 2200                 SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
 2201                 uma_zfree(swblk_zone, sb);
 2202         }
 2203         swp_pager_freeswapspace(s_free, n_free);
 2204 }
 2205 
 2206 /*
 2207  * SWP_PAGER_METACTL() -  misc control of swap meta data.
 2208  *
 2209  *      This routine is capable of looking up, or removing swapblk
 2210  *      assignments in the swap meta data.  It returns the swapblk being
 2211  *      looked-up, popped, or SWAPBLK_NONE if the block was invalid.
 2212  *
 2213  *      When acting on a busy resident page and paging is in progress, we
 2214  *      have to wait until paging is complete but otherwise can act on the
 2215  *      busy page.
 2216  */
 2217 static daddr_t
 2218 swp_pager_meta_lookup(vm_object_t object, vm_pindex_t pindex)
 2219 {
 2220         struct swblk *sb;
 2221 
 2222         VM_OBJECT_ASSERT_LOCKED(object);
 2223 
 2224         /*
 2225          * The meta data only exists if the object is OBJT_SWAP
 2226          * and even then might not be allocated yet.
 2227          */
 2228         KASSERT(object->type == OBJT_SWAP,
 2229             ("Lookup object not swappable"));
 2230 
 2231         sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
 2232             rounddown(pindex, SWAP_META_PAGES));
 2233         if (sb == NULL)
 2234                 return (SWAPBLK_NONE);
 2235         return (sb->d[pindex % SWAP_META_PAGES]);
 2236 }
 2237 
 2238 /*
 2239  * Returns the least page index which is greater than or equal to the
 2240  * parameter pindex and for which there is a swap block allocated.
 2241  * Returns object's size if the object's type is not swap or if there
 2242  * are no allocated swap blocks for the object after the requested
 2243  * pindex.
 2244  */
 2245 vm_pindex_t
 2246 swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
 2247 {
 2248         struct swblk *sb;
 2249         int i;
 2250 
 2251         VM_OBJECT_ASSERT_LOCKED(object);
 2252         if (object->type != OBJT_SWAP)
 2253                 return (object->size);
 2254 
 2255         sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
 2256             rounddown(pindex, SWAP_META_PAGES));
 2257         if (sb == NULL)
 2258                 return (object->size);
 2259         if (sb->p < pindex) {
 2260                 for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
 2261                         if (sb->d[i] != SWAPBLK_NONE)
 2262                                 return (sb->p + i);
 2263                 }
 2264                 sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
 2265                     roundup(pindex, SWAP_META_PAGES));
 2266                 if (sb == NULL)
 2267                         return (object->size);
 2268         }
 2269         for (i = 0; i < SWAP_META_PAGES; i++) {
 2270                 if (sb->d[i] != SWAPBLK_NONE)
 2271                         return (sb->p + i);
 2272         }
 2273 
 2274         /*
 2275          * We get here if a swblk is present in the trie but it
 2276          * doesn't map any blocks.
 2277          */
 2278         MPASS(0);
 2279         return (object->size);
 2280 }
 2281 
 2282 /*
 2283  * System call swapon(name) enables swapping on device name,
 2284  * which must be in the swdevsw.  Return EBUSY
 2285  * if already swapping on this device.
 2286  */
 2287 #ifndef _SYS_SYSPROTO_H_
 2288 struct swapon_args {
 2289         char *name;
 2290 };
 2291 #endif
 2292 
 2293 /*
 2294  * MPSAFE
 2295  */
 2296 /* ARGSUSED */
 2297 int
 2298 sys_swapon(struct thread *td, struct swapon_args *uap)
 2299 {
 2300         struct vattr attr;
 2301         struct vnode *vp;
 2302         struct nameidata nd;
 2303         int error;
 2304 
 2305         error = priv_check(td, PRIV_SWAPON);
 2306         if (error)
 2307                 return (error);
 2308 
 2309         sx_xlock(&swdev_syscall_lock);
 2310 
 2311         /*
 2312          * Swap metadata may not fit in the KVM if we have physical
 2313          * memory of >1GB.
 2314          */
 2315         if (swblk_zone == NULL) {
 2316                 error = ENOMEM;
 2317                 goto done;
 2318         }
 2319 
 2320         NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
 2321             uap->name, td);
 2322         error = namei(&nd);
 2323         if (error)
 2324                 goto done;
 2325 
 2326         NDFREE(&nd, NDF_ONLY_PNBUF);
 2327         vp = nd.ni_vp;
 2328 
 2329         if (vn_isdisk_error(vp, &error)) {
 2330                 error = swapongeom(vp);
 2331         } else if (vp->v_type == VREG &&
 2332             (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
 2333             (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
 2334                 /*
 2335                  * Allow direct swapping to NFS regular files in the same
 2336                  * way that nfs_mountroot() sets up diskless swapping.
 2337                  */
 2338                 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
 2339         }
 2340 
 2341         if (error)
 2342                 vrele(vp);
 2343 done:
 2344         sx_xunlock(&swdev_syscall_lock);
 2345         return (error);
 2346 }
 2347 
 2348 /*
 2349  * Check that the total amount of swap currently configured does not
 2350  * exceed half the theoretical maximum.  If it does, print a warning
 2351  * message.
 2352  */
 2353 static void
 2354 swapon_check_swzone(void)
 2355 {
 2356 
 2357         /* recommend using no more than half that amount */
 2358         if (swap_total > swap_maxpages / 2) {
 2359                 printf("warning: total configured swap (%lu pages) "
 2360                     "exceeds maximum recommended amount (%lu pages).\n",
 2361                     swap_total, swap_maxpages / 2);
 2362                 printf("warning: increase kern.maxswzone "
 2363                     "or reduce amount of swap.\n");
 2364         }
 2365 }
 2366 
 2367 static void
 2368 swaponsomething(struct vnode *vp, void *id, u_long nblks,
 2369     sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
 2370 {
 2371         struct swdevt *sp, *tsp;
 2372         daddr_t dvbase;
 2373 
 2374         /*
 2375          * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
 2376          * First chop nblks off to page-align it, then convert.
 2377          *
 2378          * sw->sw_nblks is in page-sized chunks now too.
 2379          */
 2380         nblks &= ~(ctodb(1) - 1);
 2381         nblks = dbtoc(nblks);
 2382 
 2383         sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
 2384         sp->sw_blist = blist_create(nblks, M_WAITOK);
 2385         sp->sw_vp = vp;
 2386         sp->sw_id = id;
 2387         sp->sw_dev = dev;
 2388         sp->sw_nblks = nblks;
 2389         sp->sw_used = 0;
 2390         sp->sw_strategy = strategy;
 2391         sp->sw_close = close;
 2392         sp->sw_flags = flags;
 2393 
 2394         /*
 2395          * Do not free the first blocks in order to avoid overwriting
 2396          * any bsd label at the front of the partition
 2397          */
 2398         blist_free(sp->sw_blist, howmany(BBSIZE, PAGE_SIZE),
 2399             nblks - howmany(BBSIZE, PAGE_SIZE));
 2400 
 2401         dvbase = 0;
 2402         mtx_lock(&sw_dev_mtx);
 2403         TAILQ_FOREACH(tsp, &swtailq, sw_list) {
 2404                 if (tsp->sw_end >= dvbase) {
 2405                         /*
 2406                          * We put one uncovered page between the devices
 2407                          * in order to definitively prevent any cross-device
 2408                          * I/O requests
 2409                          */
 2410                         dvbase = tsp->sw_end + 1;
 2411                 }
 2412         }
 2413         sp->sw_first = dvbase;
 2414         sp->sw_end = dvbase + nblks;
 2415         TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
 2416         nswapdev++;
 2417         swap_pager_avail += nblks - howmany(BBSIZE, PAGE_SIZE);
 2418         swap_total += nblks;
 2419         swapon_check_swzone();
 2420         swp_sizecheck();
 2421         mtx_unlock(&sw_dev_mtx);
 2422         EVENTHANDLER_INVOKE(swapon, sp);
 2423 }
 2424 
 2425 /*
 2426  * SYSCALL: swapoff(devname)
 2427  *
 2428  * Disable swapping on the given device.
 2429  *
 2430  * XXX: Badly designed system call: it should use a device index
 2431  * rather than filename as specification.  We keep sw_vp around
 2432  * only to make this work.
 2433  */
 2434 #ifndef _SYS_SYSPROTO_H_
 2435 struct swapoff_args {
 2436         char *name;
 2437 };
 2438 #endif
 2439 
 2440 /*
 2441  * MPSAFE
 2442  */
 2443 /* ARGSUSED */
 2444 int
 2445 sys_swapoff(struct thread *td, struct swapoff_args *uap)
 2446 {
 2447         struct vnode *vp;
 2448         struct nameidata nd;
 2449         struct swdevt *sp;
 2450         int error;
 2451 
 2452         error = priv_check(td, PRIV_SWAPOFF);
 2453         if (error)
 2454                 return (error);
 2455 
 2456         sx_xlock(&swdev_syscall_lock);
 2457 
 2458         NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
 2459             td);
 2460         error = namei(&nd);
 2461         if (error)
 2462                 goto done;
 2463         NDFREE(&nd, NDF_ONLY_PNBUF);
 2464         vp = nd.ni_vp;
 2465 
 2466         mtx_lock(&sw_dev_mtx);
 2467         TAILQ_FOREACH(sp, &swtailq, sw_list) {
 2468                 if (sp->sw_vp == vp)
 2469                         break;
 2470         }
 2471         mtx_unlock(&sw_dev_mtx);
 2472         if (sp == NULL) {
 2473                 error = EINVAL;
 2474                 goto done;
 2475         }
 2476         error = swapoff_one(sp, td->td_ucred);
 2477 done:
 2478         sx_xunlock(&swdev_syscall_lock);
 2479         return (error);
 2480 }
 2481 
 2482 static int
 2483 swapoff_one(struct swdevt *sp, struct ucred *cred)
 2484 {
 2485         u_long nblks;
 2486 #ifdef MAC
 2487         int error;
 2488 #endif
 2489 
 2490         sx_assert(&swdev_syscall_lock, SA_XLOCKED);
 2491 #ifdef MAC
 2492         (void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
 2493         error = mac_system_check_swapoff(cred, sp->sw_vp);
 2494         (void) VOP_UNLOCK(sp->sw_vp);
 2495         if (error != 0)
 2496                 return (error);
 2497 #endif
 2498         nblks = sp->sw_nblks;
 2499 
 2500         /*
 2501          * We can turn off this swap device safely only if the
 2502          * available virtual memory in the system will fit the amount
 2503          * of data we will have to page back in, plus an epsilon so
 2504          * the system doesn't become critically low on swap space.
 2505          */
 2506         if (vm_free_count() + swap_pager_avail < nblks + nswap_lowat)
 2507                 return (ENOMEM);
 2508 
 2509         /*
 2510          * Prevent further allocations on this device.
 2511          */
 2512         mtx_lock(&sw_dev_mtx);
 2513         sp->sw_flags |= SW_CLOSING;
 2514         swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
 2515         swap_total -= nblks;
 2516         mtx_unlock(&sw_dev_mtx);
 2517 
 2518         /*
 2519          * Page in the contents of the device and close it.
 2520          */
 2521         swap_pager_swapoff(sp);
 2522 
 2523         sp->sw_close(curthread, sp);
 2524         mtx_lock(&sw_dev_mtx);
 2525         sp->sw_id = NULL;
 2526         TAILQ_REMOVE(&swtailq, sp, sw_list);
 2527         nswapdev--;
 2528         if (nswapdev == 0) {
 2529                 swap_pager_full = 2;
 2530                 swap_pager_almost_full = 1;
 2531         }
 2532         if (swdevhd == sp)
 2533                 swdevhd = NULL;
 2534         mtx_unlock(&sw_dev_mtx);
 2535         blist_destroy(sp->sw_blist);
 2536         free(sp, M_VMPGDATA);
 2537         return (0);
 2538 }
 2539 
 2540 void
 2541 swapoff_all(void)
 2542 {
 2543         struct swdevt *sp, *spt;
 2544         const char *devname;
 2545         int error;
 2546 
 2547         sx_xlock(&swdev_syscall_lock);
 2548 
 2549         mtx_lock(&sw_dev_mtx);
 2550         TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
 2551                 mtx_unlock(&sw_dev_mtx);
 2552                 if (vn_isdisk(sp->sw_vp))
 2553                         devname = devtoname(sp->sw_vp->v_rdev);
 2554                 else
 2555                         devname = "[file]";
 2556                 error = swapoff_one(sp, thread0.td_ucred);
 2557                 if (error != 0) {
 2558                         printf("Cannot remove swap device %s (error=%d), "
 2559                             "skipping.\n", devname, error);
 2560                 } else if (bootverbose) {
 2561                         printf("Swap device %s removed.\n", devname);
 2562                 }
 2563                 mtx_lock(&sw_dev_mtx);
 2564         }
 2565         mtx_unlock(&sw_dev_mtx);
 2566 
 2567         sx_xunlock(&swdev_syscall_lock);
 2568 }
 2569 
 2570 void
 2571 swap_pager_status(int *total, int *used)
 2572 {
 2573 
 2574         *total = swap_total;
 2575         *used = swap_total - swap_pager_avail -
 2576             nswapdev * howmany(BBSIZE, PAGE_SIZE);
 2577 }
 2578 
 2579 int
 2580 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
 2581 {
 2582         struct swdevt *sp;
 2583         const char *tmp_devname;
 2584         int error, n;
 2585 
 2586         n = 0;
 2587         error = ENOENT;
 2588         mtx_lock(&sw_dev_mtx);
 2589         TAILQ_FOREACH(sp, &swtailq, sw_list) {
 2590                 if (n != name) {
 2591                         n++;
 2592                         continue;
 2593                 }
 2594                 xs->xsw_version = XSWDEV_VERSION;
 2595                 xs->xsw_dev = sp->sw_dev;
 2596                 xs->xsw_flags = sp->sw_flags;
 2597                 xs->xsw_nblks = sp->sw_nblks;
 2598                 xs->xsw_used = sp->sw_used;
 2599                 if (devname != NULL) {
 2600                         if (vn_isdisk(sp->sw_vp))
 2601                                 tmp_devname = devtoname(sp->sw_vp->v_rdev);
 2602                         else
 2603                                 tmp_devname = "[file]";
 2604                         strncpy(devname, tmp_devname, len);
 2605                 }
 2606                 error = 0;
 2607                 break;
 2608         }
 2609         mtx_unlock(&sw_dev_mtx);
 2610         return (error);
 2611 }
 2612 
 2613 #if defined(COMPAT_FREEBSD11)
 2614 #define XSWDEV_VERSION_11       1
 2615 struct xswdev11 {
 2616         u_int   xsw_version;
 2617         uint32_t xsw_dev;
 2618         int     xsw_flags;
 2619         int     xsw_nblks;
 2620         int     xsw_used;
 2621 };
 2622 #endif
 2623 
 2624 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
 2625 struct xswdev32 {
 2626         u_int   xsw_version;
 2627         u_int   xsw_dev1, xsw_dev2;
 2628         int     xsw_flags;
 2629         int     xsw_nblks;
 2630         int     xsw_used;
 2631 };
 2632 #endif
 2633 
 2634 static int
 2635 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
 2636 {
 2637         struct xswdev xs;
 2638 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
 2639         struct xswdev32 xs32;
 2640 #endif
 2641 #if defined(COMPAT_FREEBSD11)
 2642         struct xswdev11 xs11;
 2643 #endif
 2644         int error;
 2645 
 2646         if (arg2 != 1)                  /* name length */
 2647                 return (EINVAL);
 2648         error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
 2649         if (error != 0)
 2650                 return (error);
 2651 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
 2652         if (req->oldlen == sizeof(xs32)) {
 2653                 xs32.xsw_version = XSWDEV_VERSION;
 2654                 xs32.xsw_dev1 = xs.xsw_dev;
 2655                 xs32.xsw_dev2 = xs.xsw_dev >> 32;
 2656                 xs32.xsw_flags = xs.xsw_flags;
 2657                 xs32.xsw_nblks = xs.xsw_nblks;
 2658                 xs32.xsw_used = xs.xsw_used;
 2659                 error = SYSCTL_OUT(req, &xs32, sizeof(xs32));
 2660                 return (error);
 2661         }
 2662 #endif
 2663 #if defined(COMPAT_FREEBSD11)
 2664         if (req->oldlen == sizeof(xs11)) {
 2665                 xs11.xsw_version = XSWDEV_VERSION_11;
 2666                 xs11.xsw_dev = xs.xsw_dev; /* truncation */
 2667                 xs11.xsw_flags = xs.xsw_flags;
 2668                 xs11.xsw_nblks = xs.xsw_nblks;
 2669                 xs11.xsw_used = xs.xsw_used;
 2670                 error = SYSCTL_OUT(req, &xs11, sizeof(xs11));
 2671                 return (error);
 2672         }
 2673 #endif
 2674         error = SYSCTL_OUT(req, &xs, sizeof(xs));
 2675         return (error);
 2676 }
 2677 
 2678 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
 2679     "Number of swap devices");
 2680 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
 2681     sysctl_vm_swap_info,
 2682     "Swap statistics by device");
 2683 
 2684 /*
 2685  * Count the approximate swap usage in pages for a vmspace.  The
 2686  * shadowed or not yet copied on write swap blocks are not accounted.
 2687  * The map must be locked.
 2688  */
 2689 long
 2690 vmspace_swap_count(struct vmspace *vmspace)
 2691 {
 2692         vm_map_t map;
 2693         vm_map_entry_t cur;
 2694         vm_object_t object;
 2695         struct swblk *sb;
 2696         vm_pindex_t e, pi;
 2697         long count;
 2698         int i;
 2699 
 2700         map = &vmspace->vm_map;
 2701         count = 0;
 2702 
 2703         VM_MAP_ENTRY_FOREACH(cur, map) {
 2704                 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
 2705                         continue;
 2706                 object = cur->object.vm_object;
 2707                 if (object == NULL || object->type != OBJT_SWAP)
 2708                         continue;
 2709                 VM_OBJECT_RLOCK(object);
 2710                 if (object->type != OBJT_SWAP)
 2711                         goto unlock;
 2712                 pi = OFF_TO_IDX(cur->offset);
 2713                 e = pi + OFF_TO_IDX(cur->end - cur->start);
 2714                 for (;; pi = sb->p + SWAP_META_PAGES) {
 2715                         sb = SWAP_PCTRIE_LOOKUP_GE(
 2716                             &object->un_pager.swp.swp_blks, pi);
 2717                         if (sb == NULL || sb->p >= e)
 2718                                 break;
 2719                         for (i = 0; i < SWAP_META_PAGES; i++) {
 2720                                 if (sb->p + i < e &&
 2721                                     sb->d[i] != SWAPBLK_NONE)
 2722                                         count++;
 2723                         }
 2724                 }
 2725 unlock:
 2726                 VM_OBJECT_RUNLOCK(object);
 2727         }
 2728         return (count);
 2729 }
 2730 
 2731 /*
 2732  * GEOM backend
 2733  *
 2734  * Swapping onto disk devices.
 2735  *
 2736  */
 2737 
 2738 static g_orphan_t swapgeom_orphan;
 2739 
 2740 static struct g_class g_swap_class = {
 2741         .name = "SWAP",
 2742         .version = G_VERSION,
 2743         .orphan = swapgeom_orphan,
 2744 };
 2745 
 2746 DECLARE_GEOM_CLASS(g_swap_class, g_class);
 2747 
 2748 static void
 2749 swapgeom_close_ev(void *arg, int flags)
 2750 {
 2751         struct g_consumer *cp;
 2752 
 2753         cp = arg;
 2754         g_access(cp, -1, -1, 0);
 2755         g_detach(cp);
 2756         g_destroy_consumer(cp);
 2757 }
 2758 
 2759 /*
 2760  * Add a reference to the g_consumer for an inflight transaction.
 2761  */
 2762 static void
 2763 swapgeom_acquire(struct g_consumer *cp)
 2764 {
 2765 
 2766         mtx_assert(&sw_dev_mtx, MA_OWNED);
 2767         cp->index++;
 2768 }
 2769 
 2770 /*
 2771  * Remove a reference from the g_consumer.  Post a close event if all
 2772  * references go away, since the function might be called from the
 2773  * biodone context.
 2774  */
 2775 static void
 2776 swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
 2777 {
 2778 
 2779         mtx_assert(&sw_dev_mtx, MA_OWNED);
 2780         cp->index--;
 2781         if (cp->index == 0) {
 2782                 if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
 2783                         sp->sw_id = NULL;
 2784         }
 2785 }
 2786 
 2787 static void
 2788 swapgeom_done(struct bio *bp2)
 2789 {
 2790         struct swdevt *sp;
 2791         struct buf *bp;
 2792         struct g_consumer *cp;
 2793 
 2794         bp = bp2->bio_caller2;
 2795         cp = bp2->bio_from;
 2796         bp->b_ioflags = bp2->bio_flags;
 2797         if (bp2->bio_error)
 2798                 bp->b_ioflags |= BIO_ERROR;
 2799         bp->b_resid = bp->b_bcount - bp2->bio_completed;
 2800         bp->b_error = bp2->bio_error;
 2801         bp->b_caller1 = NULL;
 2802         bufdone(bp);
 2803         sp = bp2->bio_caller1;
 2804         mtx_lock(&sw_dev_mtx);
 2805         swapgeom_release(cp, sp);
 2806         mtx_unlock(&sw_dev_mtx);
 2807         g_destroy_bio(bp2);
 2808 }
 2809 
 2810 static void
 2811 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
 2812 {
 2813         struct bio *bio;
 2814         struct g_consumer *cp;
 2815 
 2816         mtx_lock(&sw_dev_mtx);
 2817         cp = sp->sw_id;
 2818         if (cp == NULL) {
 2819                 mtx_unlock(&sw_dev_mtx);
 2820                 bp->b_error = ENXIO;
 2821                 bp->b_ioflags |= BIO_ERROR;
 2822                 bufdone(bp);
 2823                 return;
 2824         }
 2825         swapgeom_acquire(cp);
 2826         mtx_unlock(&sw_dev_mtx);
 2827         if (bp->b_iocmd == BIO_WRITE)
 2828                 bio = g_new_bio();
 2829         else
 2830                 bio = g_alloc_bio();
 2831         if (bio == NULL) {
 2832                 mtx_lock(&sw_dev_mtx);
 2833                 swapgeom_release(cp, sp);
 2834                 mtx_unlock(&sw_dev_mtx);
 2835                 bp->b_error = ENOMEM;
 2836                 bp->b_ioflags |= BIO_ERROR;
 2837                 printf("swap_pager: cannot allocate bio\n");
 2838                 bufdone(bp);
 2839                 return;
 2840         }
 2841 
 2842         bp->b_caller1 = bio;
 2843         bio->bio_caller1 = sp;
 2844         bio->bio_caller2 = bp;
 2845         bio->bio_cmd = bp->b_iocmd;
 2846         bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
 2847         bio->bio_length = bp->b_bcount;
 2848         bio->bio_done = swapgeom_done;
 2849         if (!buf_mapped(bp)) {
 2850                 bio->bio_ma = bp->b_pages;
 2851                 bio->bio_data = unmapped_buf;
 2852                 bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
 2853                 bio->bio_ma_n = bp->b_npages;
 2854                 bio->bio_flags |= BIO_UNMAPPED;
 2855         } else {
 2856                 bio->bio_data = bp->b_data;
 2857                 bio->bio_ma = NULL;
 2858         }
 2859         g_io_request(bio, cp);
 2860         return;
 2861 }
 2862 
 2863 static void
 2864 swapgeom_orphan(struct g_consumer *cp)
 2865 {
 2866         struct swdevt *sp;
 2867         int destroy;
 2868 
 2869         mtx_lock(&sw_dev_mtx);
 2870         TAILQ_FOREACH(sp, &swtailq, sw_list) {
 2871                 if (sp->sw_id == cp) {
 2872                         sp->sw_flags |= SW_CLOSING;
 2873                         break;
 2874                 }
 2875         }
 2876         /*
 2877          * Drop reference we were created with. Do directly since we're in a
 2878          * special context where we don't have to queue the call to
 2879          * swapgeom_close_ev().
 2880          */
 2881         cp->index--;
 2882         destroy = ((sp != NULL) && (cp->index == 0));
 2883         if (destroy)
 2884                 sp->sw_id = NULL;
 2885         mtx_unlock(&sw_dev_mtx);
 2886         if (destroy)
 2887                 swapgeom_close_ev(cp, 0);
 2888 }
 2889 
 2890 static void
 2891 swapgeom_close(struct thread *td, struct swdevt *sw)
 2892 {
 2893         struct g_consumer *cp;
 2894 
 2895         mtx_lock(&sw_dev_mtx);
 2896         cp = sw->sw_id;
 2897         sw->sw_id = NULL;
 2898         mtx_unlock(&sw_dev_mtx);
 2899 
 2900         /*
 2901          * swapgeom_close() may be called from the biodone context,
 2902          * where we cannot perform topology changes.  Delegate the
 2903          * work to the events thread.
 2904          */
 2905         if (cp != NULL)
 2906                 g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
 2907 }
 2908 
 2909 static int
 2910 swapongeom_locked(struct cdev *dev, struct vnode *vp)
 2911 {
 2912         struct g_provider *pp;
 2913         struct g_consumer *cp;
 2914         static struct g_geom *gp;
 2915         struct swdevt *sp;
 2916         u_long nblks;
 2917         int error;
 2918 
 2919         pp = g_dev_getprovider(dev);
 2920         if (pp == NULL)
 2921                 return (ENODEV);
 2922         mtx_lock(&sw_dev_mtx);
 2923         TAILQ_FOREACH(sp, &swtailq, sw_list) {
 2924                 cp = sp->sw_id;
 2925                 if (cp != NULL && cp->provider == pp) {
 2926                         mtx_unlock(&sw_dev_mtx);
 2927                         return (EBUSY);
 2928                 }
 2929         }
 2930         mtx_unlock(&sw_dev_mtx);
 2931         if (gp == NULL)
 2932                 gp = g_new_geomf(&g_swap_class, "swap");
 2933         cp = g_new_consumer(gp);
 2934         cp->index = 1;  /* Number of active I/Os, plus one for being active. */
 2935         cp->flags |=  G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
 2936         g_attach(cp, pp);
 2937         /*
 2938          * XXX: Every time you think you can improve the margin for
 2939          * footshooting, somebody depends on the ability to do so:
 2940          * savecore(8) wants to write to our swapdev so we cannot
 2941          * set an exclusive count :-(
 2942          */
 2943         error = g_access(cp, 1, 1, 0);
 2944         if (error != 0) {
 2945                 g_detach(cp);
 2946                 g_destroy_consumer(cp);
 2947                 return (error);
 2948         }
 2949         nblks = pp->mediasize / DEV_BSIZE;
 2950         swaponsomething(vp, cp, nblks, swapgeom_strategy,
 2951             swapgeom_close, dev2udev(dev),
 2952             (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
 2953         return (0);
 2954 }
 2955 
 2956 static int
 2957 swapongeom(struct vnode *vp)
 2958 {
 2959         int error;
 2960 
 2961         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 2962         if (vp->v_type != VCHR || VN_IS_DOOMED(vp)) {
 2963                 error = ENOENT;
 2964         } else {
 2965                 g_topology_lock();
 2966                 error = swapongeom_locked(vp->v_rdev, vp);
 2967                 g_topology_unlock();
 2968         }
 2969         VOP_UNLOCK(vp);
 2970         return (error);
 2971 }
 2972 
 2973 /*
 2974  * VNODE backend
 2975  *
 2976  * This is used mainly for network filesystem (read: probably only tested
 2977  * with NFS) swapfiles.
 2978  *
 2979  */
 2980 
 2981 static void
 2982 swapdev_strategy(struct buf *bp, struct swdevt *sp)
 2983 {
 2984         struct vnode *vp2;
 2985 
 2986         bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
 2987 
 2988         vp2 = sp->sw_id;
 2989         vhold(vp2);
 2990         if (bp->b_iocmd == BIO_WRITE) {
 2991                 if (bp->b_bufobj)
 2992                         bufobj_wdrop(bp->b_bufobj);
 2993                 bufobj_wref(&vp2->v_bufobj);
 2994         }
 2995         if (bp->b_bufobj != &vp2->v_bufobj)
 2996                 bp->b_bufobj = &vp2->v_bufobj;
 2997         bp->b_vp = vp2;
 2998         bp->b_iooffset = dbtob(bp->b_blkno);
 2999         bstrategy(bp);
 3000         return;
 3001 }
 3002 
 3003 static void
 3004 swapdev_close(struct thread *td, struct swdevt *sp)
 3005 {
 3006 
 3007         VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
 3008         vrele(sp->sw_vp);
 3009 }
 3010 
 3011 static int
 3012 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
 3013 {
 3014         struct swdevt *sp;
 3015         int error;
 3016 
 3017         if (nblks == 0)
 3018                 return (ENXIO);
 3019         mtx_lock(&sw_dev_mtx);
 3020         TAILQ_FOREACH(sp, &swtailq, sw_list) {
 3021                 if (sp->sw_id == vp) {
 3022                         mtx_unlock(&sw_dev_mtx);
 3023                         return (EBUSY);
 3024                 }
 3025         }
 3026         mtx_unlock(&sw_dev_mtx);
 3027 
 3028         (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 3029 #ifdef MAC
 3030         error = mac_system_check_swapon(td->td_ucred, vp);
 3031         if (error == 0)
 3032 #endif
 3033                 error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
 3034         (void) VOP_UNLOCK(vp);
 3035         if (error)
 3036                 return (error);
 3037 
 3038         swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
 3039             NODEV, 0);
 3040         return (0);
 3041 }
 3042 
 3043 static int
 3044 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
 3045 {
 3046         int error, new, n;
 3047 
 3048         new = nsw_wcount_async_max;
 3049         error = sysctl_handle_int(oidp, &new, 0, req);
 3050         if (error != 0 || req->newptr == NULL)
 3051                 return (error);
 3052 
 3053         if (new > nswbuf / 2 || new < 1)
 3054                 return (EINVAL);
 3055 
 3056         mtx_lock(&swbuf_mtx);
 3057         while (nsw_wcount_async_max != new) {
 3058                 /*
 3059                  * Adjust difference.  If the current async count is too low,
 3060                  * we will need to sqeeze our update slowly in.  Sleep with a
 3061                  * higher priority than getpbuf() to finish faster.
 3062                  */
 3063                 n = new - nsw_wcount_async_max;
 3064                 if (nsw_wcount_async + n >= 0) {
 3065                         nsw_wcount_async += n;
 3066                         nsw_wcount_async_max += n;
 3067                         wakeup(&nsw_wcount_async);
 3068                 } else {
 3069                         nsw_wcount_async_max -= nsw_wcount_async;
 3070                         nsw_wcount_async = 0;
 3071                         msleep(&nsw_wcount_async, &swbuf_mtx, PSWP,
 3072                             "swpsysctl", 0);
 3073                 }
 3074         }
 3075         mtx_unlock(&swbuf_mtx);
 3076 
 3077         return (0);
 3078 }
 3079 
 3080 static void
 3081 swap_pager_update_writecount(vm_object_t object, vm_offset_t start,
 3082     vm_offset_t end)
 3083 {
 3084 
 3085         VM_OBJECT_WLOCK(object);
 3086         KASSERT((object->flags & OBJ_ANON) == 0,
 3087             ("Splittable object with writecount"));
 3088         object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
 3089         VM_OBJECT_WUNLOCK(object);
 3090 }
 3091 
 3092 static void
 3093 swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
 3094     vm_offset_t end)
 3095 {
 3096 
 3097         VM_OBJECT_WLOCK(object);
 3098         KASSERT((object->flags & OBJ_ANON) == 0,
 3099             ("Splittable object with writecount"));
 3100         object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
 3101         VM_OBJECT_WUNLOCK(object);
 3102 }

Cache object: 01cbe0c2eec3dfa0f46a33b5917e1aba


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.