The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_aobj.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: uvm_aobj.c,v 1.156 2022/05/31 08:43:16 andvar Exp $    */
    2 
    3 /*
    4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
    5  *                    Washington University.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  *
   28  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
   29  */
   30 
   31 /*
   32  * uvm_aobj.c: anonymous memory uvm_object pager
   33  *
   34  * author: Chuck Silvers <chuq@chuq.com>
   35  * started: Jan-1998
   36  *
   37  * - design mostly from Chuck Cranor
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.156 2022/05/31 08:43:16 andvar Exp $");
   42 
   43 #ifdef _KERNEL_OPT
   44 #include "opt_uvmhist.h"
   45 #endif
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/kernel.h>
   50 #include <sys/kmem.h>
   51 #include <sys/pool.h>
   52 #include <sys/atomic.h>
   53 
   54 #include <uvm/uvm.h>
   55 #include <uvm/uvm_page_array.h>
   56 
   57 /*
   58  * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
   59  * keeping the list of resident pages, it may also keep a list of allocated
   60  * swap blocks.  Depending on the size of the object, this list is either
   61  * stored in an array (small objects) or in a hash table (large objects).
   62  *
   63  * Lock order
   64  *
   65  *      uao_list_lock ->
   66  *              uvm_object::vmobjlock
   67  */
   68 
   69 /*
   70  * Note: for hash tables, we break the address space of the aobj into blocks
   71  * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
   72  */
   73 
   74 #define UAO_SWHASH_CLUSTER_SHIFT        4
   75 #define UAO_SWHASH_CLUSTER_SIZE         (1 << UAO_SWHASH_CLUSTER_SHIFT)
   76 
   77 /* Get the "tag" for this page index. */
   78 #define UAO_SWHASH_ELT_TAG(idx)         ((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
   79 #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
   80     ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
   81 
   82 /* Given an ELT and a page index, find the swap slot. */
   83 #define UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
   84     ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
   85 
   86 /* Given an ELT, return its pageidx base. */
   87 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
   88     ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
   89 
   90 /* The hash function. */
   91 #define UAO_SWHASH_HASH(aobj, idx) \
   92     (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
   93     & (aobj)->u_swhashmask)])
   94 
   95 /*
   96  * The threshold which determines whether we will use an array or a
   97  * hash table to store the list of allocated swap blocks.
   98  */
   99 #define UAO_SWHASH_THRESHOLD            (UAO_SWHASH_CLUSTER_SIZE * 4)
  100 #define UAO_USES_SWHASH(aobj) \
  101     ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
  102 
  103 /* The number of buckets in a hash, with an upper bound. */
  104 #define UAO_SWHASH_MAXBUCKETS           256
  105 #define UAO_SWHASH_BUCKETS(aobj) \
  106     (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
  107 
  108 /*
  109  * uao_swhash_elt: when a hash table is being used, this structure defines
  110  * the format of an entry in the bucket list.
  111  */
  112 
  113 struct uao_swhash_elt {
  114         LIST_ENTRY(uao_swhash_elt) list;        /* the hash list */
  115         voff_t tag;                             /* our 'tag' */
  116         int count;                              /* our number of active slots */
  117         int slots[UAO_SWHASH_CLUSTER_SIZE];     /* the slots */
  118 };
  119 
  120 /*
  121  * uao_swhash: the swap hash table structure
  122  */
  123 
  124 LIST_HEAD(uao_swhash, uao_swhash_elt);
  125 
  126 /*
  127  * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
  128  * Note: pages for this pool must not come from a pageable kernel map.
  129  */
  130 static struct pool      uao_swhash_elt_pool     __cacheline_aligned;
  131 
  132 /*
  133  * uvm_aobj: the actual anon-backed uvm_object
  134  *
  135  * => the uvm_object is at the top of the structure, this allows
  136  *   (struct uvm_aobj *) == (struct uvm_object *)
  137  * => only one of u_swslots and u_swhash is used in any given aobj
  138  */
  139 
  140 struct uvm_aobj {
  141         struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */
  142         pgoff_t u_pages;         /* number of pages in entire object */
  143         int u_flags;             /* the flags (see uvm_aobj.h) */
  144         int *u_swslots;          /* array of offset->swapslot mappings */
  145                                  /*
  146                                   * hashtable of offset->swapslot mappings
  147                                   * (u_swhash is an array of bucket heads)
  148                                   */
  149         struct uao_swhash *u_swhash;
  150         u_long u_swhashmask;            /* mask for hashtable */
  151         LIST_ENTRY(uvm_aobj) u_list;    /* global list of aobjs */
  152         int u_freelist;           /* freelist to allocate pages from */
  153 };
  154 
  155 static void     uao_free(struct uvm_aobj *);
  156 static int      uao_get(struct uvm_object *, voff_t, struct vm_page **,
  157                     int *, int, vm_prot_t, int, int);
  158 static int      uao_put(struct uvm_object *, voff_t, voff_t, int);
  159 
  160 #if defined(VMSWAP)
  161 static struct uao_swhash_elt *uao_find_swhash_elt
  162     (struct uvm_aobj *, int, bool);
  163 
  164 static bool uao_pagein(struct uvm_aobj *, int, int);
  165 static bool uao_pagein_page(struct uvm_aobj *, int);
  166 #endif /* defined(VMSWAP) */
  167 
  168 static struct vm_page   *uao_pagealloc(struct uvm_object *, voff_t, int);
  169 
  170 /*
  171  * aobj_pager
  172  *
  173  * note that some functions (e.g. put) are handled elsewhere
  174  */
  175 
  176 const struct uvm_pagerops aobj_pager = {
  177         .pgo_reference = uao_reference,
  178         .pgo_detach = uao_detach,
  179         .pgo_get = uao_get,
  180         .pgo_put = uao_put,
  181 };
  182 
  183 /*
  184  * uao_list: global list of active aobjs, locked by uao_list_lock
  185  */
  186 
  187 static LIST_HEAD(aobjlist, uvm_aobj) uao_list   __cacheline_aligned;
  188 static kmutex_t         uao_list_lock           __cacheline_aligned;
  189 
  190 /*
  191  * hash table/array related functions
  192  */
  193 
  194 #if defined(VMSWAP)
  195 
  196 /*
  197  * uao_find_swhash_elt: find (or create) a hash table entry for a page
  198  * offset.
  199  *
  200  * => the object should be locked by the caller
  201  */
  202 
  203 static struct uao_swhash_elt *
  204 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
  205 {
  206         struct uao_swhash *swhash;
  207         struct uao_swhash_elt *elt;
  208         voff_t page_tag;
  209 
  210         swhash = UAO_SWHASH_HASH(aobj, pageidx);
  211         page_tag = UAO_SWHASH_ELT_TAG(pageidx);
  212 
  213         /*
  214          * now search the bucket for the requested tag
  215          */
  216 
  217         LIST_FOREACH(elt, swhash, list) {
  218                 if (elt->tag == page_tag) {
  219                         return elt;
  220                 }
  221         }
  222         if (!create) {
  223                 return NULL;
  224         }
  225 
  226         /*
  227          * allocate a new entry for the bucket and init/insert it in
  228          */
  229 
  230         elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
  231         if (elt == NULL) {
  232                 return NULL;
  233         }
  234         LIST_INSERT_HEAD(swhash, elt, list);
  235         elt->tag = page_tag;
  236         elt->count = 0;
  237         memset(elt->slots, 0, sizeof(elt->slots));
  238         return elt;
  239 }
  240 
  241 /*
  242  * uao_find_swslot: find the swap slot number for an aobj/pageidx
  243  *
  244  * => object must be locked by caller
  245  */
  246 
  247 int
  248 uao_find_swslot(struct uvm_object *uobj, int pageidx)
  249 {
  250         struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
  251         struct uao_swhash_elt *elt;
  252 
  253         KASSERT(UVM_OBJ_IS_AOBJ(uobj));
  254 
  255         /*
  256          * if noswap flag is set, then we never return a slot
  257          */
  258 
  259         if (aobj->u_flags & UAO_FLAG_NOSWAP)
  260                 return 0;
  261 
  262         /*
  263          * if hashing, look in hash table.
  264          */
  265 
  266         if (UAO_USES_SWHASH(aobj)) {
  267                 elt = uao_find_swhash_elt(aobj, pageidx, false);
  268                 return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
  269         }
  270 
  271         /*
  272          * otherwise, look in the array
  273          */
  274 
  275         return aobj->u_swslots[pageidx];
  276 }
  277 
  278 /*
  279  * uao_set_swslot: set the swap slot for a page in an aobj.
  280  *
  281  * => setting a slot to zero frees the slot
  282  * => object must be locked by caller
  283  * => we return the old slot number, or -1 if we failed to allocate
  284  *    memory to record the new slot number
  285  */
  286 
  287 int
  288 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
  289 {
  290         struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
  291         struct uao_swhash_elt *elt;
  292         int oldslot;
  293         UVMHIST_FUNC(__func__);
  294         UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
  295             (uintptr_t)aobj, pageidx, slot, 0);
  296 
  297         KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
  298         KASSERT(UVM_OBJ_IS_AOBJ(uobj));
  299 
  300         /*
  301          * if noswap flag is set, then we can't set a non-zero slot.
  302          */
  303 
  304         if (aobj->u_flags & UAO_FLAG_NOSWAP) {
  305                 KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
  306                 return 0;
  307         }
  308 
  309         /*
  310          * are we using a hash table?  if so, add it in the hash.
  311          */
  312 
  313         if (UAO_USES_SWHASH(aobj)) {
  314 
  315                 /*
  316                  * Avoid allocating an entry just to free it again if
  317                  * the page had not swap slot in the first place, and
  318                  * we are freeing.
  319                  */
  320 
  321                 elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
  322                 if (elt == NULL) {
  323                         return slot ? -1 : 0;
  324                 }
  325 
  326                 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
  327                 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
  328 
  329                 /*
  330                  * now adjust the elt's reference counter and free it if we've
  331                  * dropped it to zero.
  332                  */
  333 
  334                 if (slot) {
  335                         if (oldslot == 0)
  336                                 elt->count++;
  337                 } else {
  338                         if (oldslot)
  339                                 elt->count--;
  340 
  341                         if (elt->count == 0) {
  342                                 LIST_REMOVE(elt, list);
  343                                 pool_put(&uao_swhash_elt_pool, elt);
  344                         }
  345                 }
  346         } else {
  347                 /* we are using an array */
  348                 oldslot = aobj->u_swslots[pageidx];
  349                 aobj->u_swslots[pageidx] = slot;
  350         }
  351         return oldslot;
  352 }
  353 
  354 #endif /* defined(VMSWAP) */
  355 
  356 /*
  357  * end of hash/array functions
  358  */
  359 
  360 /*
  361  * uao_free: free all resources held by an aobj, and then free the aobj
  362  *
  363  * => the aobj should be dead
  364  */
  365 
  366 static void
  367 uao_free(struct uvm_aobj *aobj)
  368 {
  369         struct uvm_object *uobj = &aobj->u_obj;
  370 
  371         KASSERT(UVM_OBJ_IS_AOBJ(uobj));
  372         KASSERT(rw_write_held(uobj->vmobjlock));
  373         uao_dropswap_range(uobj, 0, 0);
  374         rw_exit(uobj->vmobjlock);
  375 
  376 #if defined(VMSWAP)
  377         if (UAO_USES_SWHASH(aobj)) {
  378 
  379                 /*
  380                  * free the hash table itself.
  381                  */
  382 
  383                 hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
  384         } else {
  385 
  386                 /*
  387                  * free the array itself.
  388                  */
  389 
  390                 kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
  391         }
  392 #endif /* defined(VMSWAP) */
  393 
  394         /*
  395          * finally free the aobj itself
  396          */
  397 
  398         uvm_obj_destroy(uobj, true);
  399         kmem_free(aobj, sizeof(struct uvm_aobj));
  400 }
  401 
  402 /*
  403  * pager functions
  404  */
  405 
  406 /*
  407  * uao_create: create an aobj of the given size and return its uvm_object.
  408  *
  409  * => for normal use, flags are always zero
  410  * => for the kernel object, the flags are:
  411  *      UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
  412  *      UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
  413  */
  414 
  415 struct uvm_object *
  416 uao_create(voff_t size, int flags)
  417 {
  418         static struct uvm_aobj kernel_object_store;
  419         static krwlock_t bootstrap_kernel_object_lock;
  420         static int kobj_alloced __diagused = 0;
  421         pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT;
  422         struct uvm_aobj *aobj;
  423         int refs;
  424 
  425         /*
  426          * Allocate a new aobj, unless kernel object is requested.
  427          */
  428 
  429         if (flags & UAO_FLAG_KERNOBJ) {
  430                 KASSERT(!kobj_alloced);
  431                 aobj = &kernel_object_store;
  432                 aobj->u_pages = pages;
  433                 aobj->u_flags = UAO_FLAG_NOSWAP;
  434                 refs = UVM_OBJ_KERN;
  435                 kobj_alloced = UAO_FLAG_KERNOBJ;
  436         } else if (flags & UAO_FLAG_KERNSWAP) {
  437                 KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
  438                 aobj = &kernel_object_store;
  439                 kobj_alloced = UAO_FLAG_KERNSWAP;
  440                 refs = 0xdeadbeaf; /* XXX: gcc */
  441         } else {
  442                 aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
  443                 aobj->u_pages = pages;
  444                 aobj->u_flags = 0;
  445                 refs = 1;
  446         }
  447 
  448         /*
  449          * no freelist by default
  450          */
  451 
  452         aobj->u_freelist = VM_NFREELIST;
  453 
  454         /*
  455          * allocate hash/array if necessary
  456          *
  457          * note: in the KERNSWAP case no need to worry about locking since
  458          * we are still booting we should be the only thread around.
  459          */
  460 
  461         const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
  462         if (flags == 0 || kernswap) {
  463 #if defined(VMSWAP)
  464 
  465                 /* allocate hash table or array depending on object size */
  466                 if (UAO_USES_SWHASH(aobj)) {
  467                         aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
  468                             HASH_LIST, true, &aobj->u_swhashmask);
  469                 } else {
  470                         aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
  471                             KM_SLEEP);
  472                 }
  473 #endif /* defined(VMSWAP) */
  474 
  475                 /*
  476                  * Replace kernel_object's temporary static lock with
  477                  * a regular rw_obj.  We cannot use uvm_obj_setlock()
  478                  * because that would try to free the old lock.
  479                  */
  480 
  481                 if (kernswap) {
  482                         aobj->u_obj.vmobjlock = rw_obj_alloc();
  483                         rw_destroy(&bootstrap_kernel_object_lock);
  484                 }
  485                 if (flags) {
  486                         aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
  487                         return &aobj->u_obj;
  488                 }
  489         }
  490 
  491         /*
  492          * Initialise UVM object.
  493          */
  494 
  495         const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
  496         uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
  497         if (__predict_false(kernobj)) {
  498                 /* Use a temporary static lock for kernel_object. */
  499                 rw_init(&bootstrap_kernel_object_lock);
  500                 uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock);
  501         }
  502 
  503         /*
  504          * now that aobj is ready, add it to the global list
  505          */
  506 
  507         mutex_enter(&uao_list_lock);
  508         LIST_INSERT_HEAD(&uao_list, aobj, u_list);
  509         mutex_exit(&uao_list_lock);
  510         return(&aobj->u_obj);
  511 }
  512 
  513 /*
  514  * uao_set_pgfl: allocate pages only from the specified freelist.
  515  *
  516  * => must be called before any pages are allocated for the object.
  517  * => reset by setting it to VM_NFREELIST, meaning any freelist.
  518  */
  519 
  520 void
  521 uao_set_pgfl(struct uvm_object *uobj, int freelist)
  522 {
  523         struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
  524 
  525         KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
  526         KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
  527             freelist);
  528 
  529         aobj->u_freelist = freelist;
  530 }
  531 
  532 /*
  533  * uao_pagealloc: allocate a page for aobj.
  534  */
  535 
  536 static inline struct vm_page *
  537 uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
  538 {
  539         struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
  540 
  541         if (__predict_true(aobj->u_freelist == VM_NFREELIST))
  542                 return uvm_pagealloc(uobj, offset, NULL, flags);
  543         else
  544                 return uvm_pagealloc_strat(uobj, offset, NULL, flags,
  545                     UVM_PGA_STRAT_ONLY, aobj->u_freelist);
  546 }
  547 
  548 /*
  549  * uao_init: set up aobj pager subsystem
  550  *
  551  * => called at boot time from uvm_pager_init()
  552  */
  553 
  554 void
  555 uao_init(void)
  556 {
  557         static int uao_initialized;
  558 
  559         if (uao_initialized)
  560                 return;
  561         uao_initialized = true;
  562         LIST_INIT(&uao_list);
  563         mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
  564         pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
  565             0, 0, 0, "uaoeltpl", NULL, IPL_VM);
  566 }
  567 
  568 /*
  569  * uao_reference: hold a reference to an anonymous UVM object.
  570  */
  571 void
  572 uao_reference(struct uvm_object *uobj)
  573 {
  574         /* Kernel object is persistent. */
  575         if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
  576                 return;
  577         }
  578         atomic_inc_uint(&uobj->uo_refs);
  579 }
  580 
  581 /*
  582  * uao_detach: drop a reference to an anonymous UVM object.
  583  */
  584 void
  585 uao_detach(struct uvm_object *uobj)
  586 {
  587         struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
  588         struct uvm_page_array a;
  589         struct vm_page *pg;
  590 
  591         UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
  592 
  593         /*
  594          * Detaching from kernel object is a NOP.
  595          */
  596 
  597         if (UVM_OBJ_IS_KERN_OBJECT(uobj))
  598                 return;
  599 
  600         /*
  601          * Drop the reference.  If it was the last one, destroy the object.
  602          */
  603 
  604         KASSERT(uobj->uo_refs > 0);
  605         UVMHIST_LOG(maphist,"  (uobj=%#jx)  ref=%jd",
  606             (uintptr_t)uobj, uobj->uo_refs, 0, 0);
  607 #ifndef __HAVE_ATOMIC_AS_MEMBAR
  608         membar_release();
  609 #endif
  610         if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
  611                 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
  612                 return;
  613         }
  614 #ifndef __HAVE_ATOMIC_AS_MEMBAR
  615         membar_acquire();
  616 #endif
  617 
  618         /*
  619          * Remove the aobj from the global list.
  620          */
  621 
  622         mutex_enter(&uao_list_lock);
  623         LIST_REMOVE(aobj, u_list);
  624         mutex_exit(&uao_list_lock);
  625 
  626         /*
  627          * Free all the pages left in the aobj.  For each page, when the
  628          * page is no longer busy (and thus after any disk I/O that it is
  629          * involved in is complete), release any swap resources and free
  630          * the page itself.
  631          */
  632         uvm_page_array_init(&a, uobj, 0);
  633         rw_enter(uobj->vmobjlock, RW_WRITER);
  634         while ((pg = uvm_page_array_fill_and_peek(&a, 0, 0)) != NULL) {
  635                 uvm_page_array_advance(&a);
  636                 pmap_page_protect(pg, VM_PROT_NONE);
  637                 if (pg->flags & PG_BUSY) {
  638                         uvm_pagewait(pg, uobj->vmobjlock, "uao_det");
  639                         uvm_page_array_clear(&a);
  640                         rw_enter(uobj->vmobjlock, RW_WRITER);
  641                         continue;
  642                 }
  643                 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
  644                 uvm_pagefree(pg);
  645         }
  646         uvm_page_array_fini(&a);
  647 
  648         /*
  649          * Finally, free the anonymous UVM object itself.
  650          */
  651 
  652         uao_free(aobj);
  653 }
  654 
  655 /*
  656  * uao_put: flush pages out of a uvm object
  657  *
  658  * => object should be locked by caller.  we may _unlock_ the object
  659  *      if (and only if) we need to clean a page (PGO_CLEANIT).
  660  *      XXXJRT Currently, however, we don't.  In the case of cleaning
  661  *      XXXJRT a page, we simply just deactivate it.  Should probably
  662  *      XXXJRT handle this better, in the future (although "flushing"
  663  *      XXXJRT anonymous memory isn't terribly important).
  664  * => if PGO_CLEANIT is not set, then we will neither unlock the object
  665  *      or block.
  666  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
  667  *      for flushing.
  668  * => we return 0 unless we encountered some sort of I/O error
  669  *      XXXJRT currently never happens, as we never directly initiate
  670  *      XXXJRT I/O
  671  */
  672 
  673 static int
  674 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
  675 {
  676         struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
  677         struct uvm_page_array a;
  678         struct vm_page *pg;
  679         voff_t curoff;
  680         UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
  681 
  682         KASSERT(UVM_OBJ_IS_AOBJ(uobj));
  683         KASSERT(rw_write_held(uobj->vmobjlock));
  684 
  685         if (flags & PGO_ALLPAGES) {
  686                 start = 0;
  687                 stop = aobj->u_pages << PAGE_SHIFT;
  688         } else {
  689                 start = trunc_page(start);
  690                 if (stop == 0) {
  691                         stop = aobj->u_pages << PAGE_SHIFT;
  692                 } else {
  693                         stop = round_page(stop);
  694                 }
  695                 if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) {
  696                         printf("uao_put: strange, got an out of range "
  697                             "flush %#jx > %#jx (fixed)\n",
  698                             (uintmax_t)stop,
  699                             (uintmax_t)(aobj->u_pages << PAGE_SHIFT));
  700                         stop = aobj->u_pages << PAGE_SHIFT;
  701                 }
  702         }
  703         UVMHIST_LOG(maphist,
  704             " flush start=%#jx, stop=%#jx, flags=%#jx",
  705             start, stop, flags, 0);
  706 
  707         /*
  708          * Don't need to do any work here if we're not freeing
  709          * or deactivating pages.
  710          */
  711 
  712         if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
  713                 rw_exit(uobj->vmobjlock);
  714                 return 0;
  715         }
  716 
  717         /* locked: uobj */
  718         uvm_page_array_init(&a, uobj, 0);
  719         curoff = start;
  720         while ((pg = uvm_page_array_fill_and_peek(&a, curoff, 0)) != NULL) {
  721                 if (pg->offset >= stop) {
  722                         break;
  723                 }
  724 
  725                 /*
  726                  * wait and try again if the page is busy.
  727                  */
  728 
  729                 if (pg->flags & PG_BUSY) {
  730                         uvm_pagewait(pg, uobj->vmobjlock, "uao_put");
  731                         uvm_page_array_clear(&a);
  732                         rw_enter(uobj->vmobjlock, RW_WRITER);
  733                         continue;
  734                 }
  735                 uvm_page_array_advance(&a);
  736                 curoff = pg->offset + PAGE_SIZE;
  737 
  738                 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
  739 
  740                 /*
  741                  * XXX In these first 3 cases, we always just
  742                  * XXX deactivate the page.  We may want to
  743                  * XXX handle the different cases more specifically
  744                  * XXX in the future.
  745                  */
  746 
  747                 case PGO_CLEANIT|PGO_FREE:
  748                 case PGO_CLEANIT|PGO_DEACTIVATE:
  749                 case PGO_DEACTIVATE:
  750  deactivate_it:
  751                         uvm_pagelock(pg);
  752                         uvm_pagedeactivate(pg);
  753                         uvm_pageunlock(pg);
  754                         break;
  755 
  756                 case PGO_FREE:
  757                         /*
  758                          * If there are multiple references to
  759                          * the object, just deactivate the page.
  760                          */
  761 
  762                         if (uobj->uo_refs > 1)
  763                                 goto deactivate_it;
  764 
  765                         /*
  766                          * free the swap slot and the page.
  767                          */
  768 
  769                         pmap_page_protect(pg, VM_PROT_NONE);
  770 
  771                         /*
  772                          * freeing swapslot here is not strictly necessary.
  773                          * however, leaving it here doesn't save much
  774                          * because we need to update swap accounting anyway.
  775                          */
  776 
  777                         uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
  778                         uvm_pagefree(pg);
  779                         break;
  780 
  781                 default:
  782                         panic("%s: impossible", __func__);
  783                 }
  784         }
  785         rw_exit(uobj->vmobjlock);
  786         uvm_page_array_fini(&a);
  787         return 0;
  788 }
  789 
  790 /*
  791  * uao_get: fetch me a page
  792  *
  793  * we have three cases:
  794  * 1: page is resident     -> just return the page.
  795  * 2: page is zero-fill    -> allocate a new page and zero it.
  796  * 3: page is swapped out  -> fetch the page from swap.
  797  *
  798  * case 1 can be handled with PGO_LOCKED, cases 2 and 3 cannot.
  799  * so, if the "center" page hits case 2/3 then we will need to return EBUSY.
  800  *
  801  * => prefer map unlocked (not required)
  802  * => object must be locked!  we will _unlock_ it before starting any I/O.
  803  * => flags: PGO_LOCKED: fault data structures are locked
  804  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
  805  * => NOTE: caller must check for released pages!!
  806  */
  807 
  808 static int
  809 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
  810     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
  811 {
  812         voff_t current_offset;
  813         struct vm_page *ptmp;
  814         int lcv, gotpages, maxpages, swslot, pageidx;
  815         bool overwrite = ((flags & PGO_OVERWRITE) != 0);
  816         struct uvm_page_array a;
  817 
  818         UVMHIST_FUNC(__func__);
  819         UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%#jx",
  820                     (uintptr_t)uobj, offset, flags,0);
  821 
  822         /*
  823          * the object must be locked.  it can only be a read lock when
  824          * processing a read fault with PGO_LOCKED.
  825          */
  826 
  827         KASSERT(UVM_OBJ_IS_AOBJ(uobj));
  828         KASSERT(rw_lock_held(uobj->vmobjlock));
  829         KASSERT(rw_write_held(uobj->vmobjlock) ||
  830            ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0));
  831 
  832         /*
  833          * get number of pages
  834          */
  835 
  836         maxpages = *npagesp;
  837 
  838         /*
  839          * step 1: handled the case where fault data structures are locked.
  840          */
  841 
  842         if (flags & PGO_LOCKED) {
  843 
  844                 /*
  845                  * step 1a: get pages that are already resident.   only do
  846                  * this if the data structures are locked (i.e. the first
  847                  * time through).
  848                  */
  849 
  850                 uvm_page_array_init(&a, uobj, 0);
  851                 gotpages = 0;   /* # of pages we got so far */
  852                 for (lcv = 0; lcv < maxpages; lcv++) {
  853                         ptmp = uvm_page_array_fill_and_peek(&a,
  854                             offset + (lcv << PAGE_SHIFT), maxpages);
  855                         if (ptmp == NULL) {
  856                                 break;
  857                         }
  858                         KASSERT(ptmp->offset >= offset);
  859                         lcv = (ptmp->offset - offset) >> PAGE_SHIFT;
  860                         if (lcv >= maxpages) {
  861                                 break;
  862                         }
  863                         uvm_page_array_advance(&a);
  864 
  865                         /*
  866                          * to be useful must get a non-busy page
  867                          */
  868 
  869                         if ((ptmp->flags & PG_BUSY) != 0) {
  870                                 continue;
  871                         }
  872 
  873                         /*
  874                          * useful page: plug it in our result array
  875                          */
  876 
  877                         KASSERT(uvm_pagegetdirty(ptmp) !=
  878                             UVM_PAGE_STATUS_CLEAN);
  879                         pps[lcv] = ptmp;
  880                         gotpages++;
  881                 }
  882                 uvm_page_array_fini(&a);
  883 
  884                 /*
  885                  * step 1b: now we've either done everything needed or we
  886                  * to unlock and do some waiting or I/O.
  887                  */
  888 
  889                 UVMHIST_LOG(pdhist, "<- done (done=%jd)",
  890                     (pps[centeridx] != NULL), 0,0,0);
  891                 *npagesp = gotpages;
  892                 return pps[centeridx] != NULL ? 0 : EBUSY;
  893         }
  894 
  895         /*
  896          * step 2: get non-resident or busy pages.
  897          * object is locked.   data structures are unlocked.
  898          */
  899 
  900         if ((flags & PGO_SYNCIO) == 0) {
  901                 goto done;
  902         }
  903 
  904         uvm_page_array_init(&a, uobj, 0);
  905         for (lcv = 0, current_offset = offset ; lcv < maxpages ;) {
  906 
  907                 /*
  908                  * we have yet to locate the current page (pps[lcv]).   we
  909                  * first look for a page that is already at the current offset.
  910                  * if we find a page, we check to see if it is busy or
  911                  * released.  if that is the case, then we sleep on the page
  912                  * until it is no longer busy or released and repeat the lookup.
  913                  * if the page we found is neither busy nor released, then we
  914                  * busy it (so we own it) and plug it into pps[lcv].   we are
  915                  * ready to move on to the next page.
  916                  */
  917 
  918                 ptmp = uvm_page_array_fill_and_peek(&a, current_offset,
  919                     maxpages - lcv);
  920 
  921                 if (ptmp != NULL && ptmp->offset == current_offset) {
  922                         /* page is there, see if we need to wait on it */
  923                         if ((ptmp->flags & PG_BUSY) != 0) {
  924                                 UVMHIST_LOG(pdhist,
  925                                     "sleeping, ptmp->flags %#jx\n",
  926                                     ptmp->flags,0,0,0);
  927                                 uvm_pagewait(ptmp, uobj->vmobjlock, "uao_get");
  928                                 rw_enter(uobj->vmobjlock, RW_WRITER);
  929                                 uvm_page_array_clear(&a);
  930                                 continue;
  931                         }
  932 
  933                         /*
  934                          * if we get here then the page is resident and
  935                          * unbusy.  we busy it now (so we own it).  if
  936                          * overwriting, mark the page dirty up front as
  937                          * it will be zapped via an unmanaged mapping.
  938                          */
  939 
  940                         KASSERT(uvm_pagegetdirty(ptmp) !=
  941                             UVM_PAGE_STATUS_CLEAN);
  942                         if (overwrite) {
  943                                 uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
  944                         }
  945                         /* we own it, caller must un-busy */
  946                         ptmp->flags |= PG_BUSY;
  947                         UVM_PAGE_OWN(ptmp, "uao_get2");
  948                         pps[lcv++] = ptmp;
  949                         current_offset += PAGE_SIZE;
  950                         uvm_page_array_advance(&a);
  951                         continue;
  952                 } else {
  953                         KASSERT(ptmp == NULL || ptmp->offset > current_offset);
  954                 }
  955 
  956                 /*
  957                  * not resident.  allocate a new busy/fake/clean page in the
  958                  * object.  if it's in swap we need to do I/O to fill in the
  959                  * data, otherwise the page needs to be cleared: if it's not
  960                  * destined to be overwritten, then zero it here and now.
  961                  */
  962 
  963                 pageidx = current_offset >> PAGE_SHIFT;
  964                 swslot = uao_find_swslot(uobj, pageidx);
  965                 ptmp = uao_pagealloc(uobj, current_offset,
  966                     swslot != 0 || overwrite ? 0 : UVM_PGA_ZERO);
  967 
  968                 /* out of RAM? */
  969                 if (ptmp == NULL) {
  970                         rw_exit(uobj->vmobjlock);
  971                         UVMHIST_LOG(pdhist, "sleeping, ptmp == NULL",0,0,0,0);
  972                         uvm_wait("uao_getpage");
  973                         rw_enter(uobj->vmobjlock, RW_WRITER);
  974                         uvm_page_array_clear(&a);
  975                         continue;
  976                 }
  977 
  978                 /*
  979                  * if swslot == 0, page hasn't existed before and is zeroed.
  980                  * otherwise we have a "fake/busy/clean" page that we just
  981                  * allocated.  do the needed "i/o", reading from swap.
  982                  */
  983 
  984                 if (swslot != 0) {
  985 #if defined(VMSWAP)
  986                         int error;
  987 
  988                         UVMHIST_LOG(pdhist, "pagein from swslot %jd",
  989                              swslot, 0,0,0);
  990 
  991                         /*
  992                          * page in the swapped-out page.
  993                          * unlock object for i/o, relock when done.
  994                          */
  995 
  996                         uvm_page_array_clear(&a);
  997                         rw_exit(uobj->vmobjlock);
  998                         error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
  999                         rw_enter(uobj->vmobjlock, RW_WRITER);
 1000 
 1001                         /*
 1002                          * I/O done.  check for errors.
 1003                          */
 1004 
 1005                         if (error != 0) {
 1006                                 UVMHIST_LOG(pdhist, "<- done (error=%jd)",
 1007                                     error,0,0,0);
 1008 
 1009                                 /*
 1010                                  * remove the swap slot from the aobj
 1011                                  * and mark the aobj as having no real slot.
 1012                                  * don't free the swap slot, thus preventing
 1013                                  * it from being used again.
 1014                                  */
 1015 
 1016                                 swslot = uao_set_swslot(uobj, pageidx,
 1017                                     SWSLOT_BAD);
 1018                                 if (swslot > 0) {
 1019                                         uvm_swap_markbad(swslot, 1);
 1020                                 }
 1021 
 1022                                 uvm_pagefree(ptmp);
 1023                                 rw_exit(uobj->vmobjlock);
 1024                                 UVMHIST_LOG(pdhist, "<- done (error)",
 1025                                     error,lcv,0,0);
 1026                                 if (lcv != 0) {
 1027                                         uvm_page_unbusy(pps, lcv);
 1028                                 }
 1029                                 memset(pps, 0, maxpages * sizeof(pps[0]));
 1030                                 uvm_page_array_fini(&a);
 1031                                 return error;
 1032                         }
 1033 #else /* defined(VMSWAP) */
 1034                         panic("%s: pagein", __func__);
 1035 #endif /* defined(VMSWAP) */
 1036                 }
 1037 
 1038                 /*
 1039                  * note that we will allow the page being writably-mapped
 1040                  * (!PG_RDONLY) regardless of access_type.  if overwrite,
 1041                  * the page can be modified through an unmanaged mapping
 1042                  * so mark it dirty up front.
 1043                  */
 1044                 if (overwrite) {
 1045                         uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
 1046                 } else {
 1047                         uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_UNKNOWN);
 1048                 }
 1049 
 1050                 /*
 1051                  * we got the page!   clear the fake flag (indicates valid
 1052                  * data now in page) and plug into our result array.   note
 1053                  * that page is still busy.
 1054                  *
 1055                  * it is the callers job to:
 1056                  * => check if the page is released
 1057                  * => unbusy the page
 1058                  * => activate the page
 1059                  */
 1060                 KASSERT(uvm_pagegetdirty(ptmp) != UVM_PAGE_STATUS_CLEAN);
 1061                 KASSERT((ptmp->flags & PG_FAKE) != 0);
 1062                 KASSERT(ptmp->offset == current_offset);
 1063                 ptmp->flags &= ~PG_FAKE;
 1064                 pps[lcv++] = ptmp;
 1065                 current_offset += PAGE_SIZE;
 1066         }
 1067         uvm_page_array_fini(&a);
 1068 
 1069         /*
 1070          * finally, unlock object and return.
 1071          */
 1072 
 1073 done:
 1074         rw_exit(uobj->vmobjlock);
 1075         UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
 1076         return 0;
 1077 }
 1078 
 1079 #if defined(VMSWAP)
 1080 
 1081 /*
 1082  * uao_dropswap:  release any swap resources from this aobj page.
 1083  *
 1084  * => aobj must be locked or have a reference count of 0.
 1085  */
 1086 
 1087 void
 1088 uao_dropswap(struct uvm_object *uobj, int pageidx)
 1089 {
 1090         int slot;
 1091 
 1092         KASSERT(UVM_OBJ_IS_AOBJ(uobj));
 1093 
 1094         slot = uao_set_swslot(uobj, pageidx, 0);
 1095         if (slot) {
 1096                 uvm_swap_free(slot, 1);
 1097         }
 1098 }
 1099 
 1100 /*
 1101  * page in every page in every aobj that is paged-out to a range of swslots.
 1102  *
 1103  * => nothing should be locked.
 1104  * => returns true if pagein was aborted due to lack of memory.
 1105  */
 1106 
 1107 bool
 1108 uao_swap_off(int startslot, int endslot)
 1109 {
 1110         struct uvm_aobj *aobj;
 1111 
 1112         /*
 1113          * Walk the list of all anonymous UVM objects.  Grab the first.
 1114          */
 1115         mutex_enter(&uao_list_lock);
 1116         if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
 1117                 mutex_exit(&uao_list_lock);
 1118                 return false;
 1119         }
 1120         uao_reference(&aobj->u_obj);
 1121 
 1122         do {
 1123                 struct uvm_aobj *nextaobj;
 1124                 bool rv;
 1125 
 1126                 /*
 1127                  * Prefetch the next object and immediately hold a reference
 1128                  * on it, so neither the current nor the next entry could
 1129                  * disappear while we are iterating.
 1130                  */
 1131                 if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
 1132                         uao_reference(&nextaobj->u_obj);
 1133                 }
 1134                 mutex_exit(&uao_list_lock);
 1135 
 1136                 /*
 1137                  * Page in all pages in the swap slot range.
 1138                  */
 1139                 rw_enter(aobj->u_obj.vmobjlock, RW_WRITER);
 1140                 rv = uao_pagein(aobj, startslot, endslot);
 1141                 rw_exit(aobj->u_obj.vmobjlock);
 1142 
 1143                 /* Drop the reference of the current object. */
 1144                 uao_detach(&aobj->u_obj);
 1145                 if (rv) {
 1146                         if (nextaobj) {
 1147                                 uao_detach(&nextaobj->u_obj);
 1148                         }
 1149                         return rv;
 1150                 }
 1151 
 1152                 aobj = nextaobj;
 1153                 mutex_enter(&uao_list_lock);
 1154         } while (aobj);
 1155 
 1156         mutex_exit(&uao_list_lock);
 1157         return false;
 1158 }
 1159 
 1160 /*
 1161  * page in any pages from aobj in the given range.
 1162  *
 1163  * => aobj must be locked and is returned locked.
 1164  * => returns true if pagein was aborted due to lack of memory.
 1165  */
 1166 static bool
 1167 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
 1168 {
 1169         bool rv;
 1170 
 1171         if (UAO_USES_SWHASH(aobj)) {
 1172                 struct uao_swhash_elt *elt;
 1173                 int buck;
 1174 
 1175 restart:
 1176                 for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
 1177                         for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
 1178                              elt != NULL;
 1179                              elt = LIST_NEXT(elt, list)) {
 1180                                 int i;
 1181 
 1182                                 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
 1183                                         int slot = elt->slots[i];
 1184 
 1185                                         /*
 1186                                          * if the slot isn't in range, skip it.
 1187                                          */
 1188 
 1189                                         if (slot < startslot ||
 1190                                             slot >= endslot) {
 1191                                                 continue;
 1192                                         }
 1193 
 1194                                         /*
 1195                                          * process the page,
 1196                                          * the start over on this object
 1197                                          * since the swhash elt
 1198                                          * may have been freed.
 1199                                          */
 1200 
 1201                                         rv = uao_pagein_page(aobj,
 1202                                           UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
 1203                                         if (rv) {
 1204                                                 return rv;
 1205                                         }
 1206                                         goto restart;
 1207                                 }
 1208                         }
 1209                 }
 1210         } else {
 1211                 int i;
 1212 
 1213                 for (i = 0; i < aobj->u_pages; i++) {
 1214                         int slot = aobj->u_swslots[i];
 1215 
 1216                         /*
 1217                          * if the slot isn't in range, skip it
 1218                          */
 1219 
 1220                         if (slot < startslot || slot >= endslot) {
 1221                                 continue;
 1222                         }
 1223 
 1224                         /*
 1225                          * process the page.
 1226                          */
 1227 
 1228                         rv = uao_pagein_page(aobj, i);
 1229                         if (rv) {
 1230                                 return rv;
 1231                         }
 1232                 }
 1233         }
 1234 
 1235         return false;
 1236 }
 1237 
 1238 /*
 1239  * uao_pagein_page: page in a single page from an anonymous UVM object.
 1240  *
 1241  * => Returns true if pagein was aborted due to lack of memory.
 1242  * => Object must be locked and is returned locked.
 1243  */
 1244 
 1245 static bool
 1246 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
 1247 {
 1248         struct uvm_object *uobj = &aobj->u_obj;
 1249         struct vm_page *pg;
 1250         int rv, npages;
 1251 
 1252         pg = NULL;
 1253         npages = 1;
 1254 
 1255         KASSERT(rw_write_held(uobj->vmobjlock));
 1256         rv = uao_get(uobj, (voff_t)pageidx << PAGE_SHIFT, &pg, &npages,
 1257             0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
 1258 
 1259         /*
 1260          * relock and finish up.
 1261          */
 1262 
 1263         rw_enter(uobj->vmobjlock, RW_WRITER);
 1264         switch (rv) {
 1265         case 0:
 1266                 break;
 1267 
 1268         case EIO:
 1269         case ERESTART:
 1270 
 1271                 /*
 1272                  * nothing more to do on errors.
 1273                  * ERESTART can only mean that the anon was freed,
 1274                  * so again there's nothing to do.
 1275                  */
 1276 
 1277                 return false;
 1278 
 1279         default:
 1280                 return true;
 1281         }
 1282 
 1283         /*
 1284          * ok, we've got the page now.
 1285          * mark it as dirty, clear its swslot and un-busy it.
 1286          */
 1287         uao_dropswap(&aobj->u_obj, pageidx);
 1288 
 1289         /*
 1290          * make sure it's on a page queue.
 1291          */
 1292         uvm_pagelock(pg);
 1293         uvm_pageenqueue(pg);
 1294         uvm_pagewakeup(pg);
 1295         uvm_pageunlock(pg);
 1296 
 1297         pg->flags &= ~(PG_BUSY|PG_FAKE);
 1298         uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
 1299         UVM_PAGE_OWN(pg, NULL);
 1300 
 1301         return false;
 1302 }
 1303 
 1304 /*
 1305  * uao_dropswap_range: drop swapslots in the range.
 1306  *
 1307  * => aobj must be locked and is returned locked.
 1308  * => start is inclusive.  end is exclusive.
 1309  */
 1310 
 1311 void
 1312 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
 1313 {
 1314         struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
 1315         int swpgonlydelta = 0;
 1316 
 1317         KASSERT(UVM_OBJ_IS_AOBJ(uobj));
 1318         KASSERT(rw_write_held(uobj->vmobjlock));
 1319 
 1320         if (end == 0) {
 1321                 end = INT64_MAX;
 1322         }
 1323 
 1324         if (UAO_USES_SWHASH(aobj)) {
 1325                 int i, hashbuckets = aobj->u_swhashmask + 1;
 1326                 voff_t taghi;
 1327                 voff_t taglo;
 1328 
 1329                 taglo = UAO_SWHASH_ELT_TAG(start);
 1330                 taghi = UAO_SWHASH_ELT_TAG(end);
 1331 
 1332                 for (i = 0; i < hashbuckets; i++) {
 1333                         struct uao_swhash_elt *elt, *next;
 1334 
 1335                         for (elt = LIST_FIRST(&aobj->u_swhash[i]);
 1336                              elt != NULL;
 1337                              elt = next) {
 1338                                 int startidx, endidx;
 1339                                 int j;
 1340 
 1341                                 next = LIST_NEXT(elt, list);
 1342 
 1343                                 if (elt->tag < taglo || taghi < elt->tag) {
 1344                                         continue;
 1345                                 }
 1346 
 1347                                 if (elt->tag == taglo) {
 1348                                         startidx =
 1349                                             UAO_SWHASH_ELT_PAGESLOT_IDX(start);
 1350                                 } else {
 1351                                         startidx = 0;
 1352                                 }
 1353 
 1354                                 if (elt->tag == taghi) {
 1355                                         endidx =
 1356                                             UAO_SWHASH_ELT_PAGESLOT_IDX(end);
 1357                                 } else {
 1358                                         endidx = UAO_SWHASH_CLUSTER_SIZE;
 1359                                 }
 1360 
 1361                                 for (j = startidx; j < endidx; j++) {
 1362                                         int slot = elt->slots[j];
 1363 
 1364                                         KASSERT(uvm_pagelookup(&aobj->u_obj,
 1365                                             (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
 1366                                             + j) << PAGE_SHIFT) == NULL);
 1367                                         if (slot > 0) {
 1368                                                 uvm_swap_free(slot, 1);
 1369                                                 swpgonlydelta++;
 1370                                                 KASSERT(elt->count > 0);
 1371                                                 elt->slots[j] = 0;
 1372                                                 elt->count--;
 1373                                         }
 1374                                 }
 1375 
 1376                                 if (elt->count == 0) {
 1377                                         LIST_REMOVE(elt, list);
 1378                                         pool_put(&uao_swhash_elt_pool, elt);
 1379                                 }
 1380                         }
 1381                 }
 1382         } else {
 1383                 int i;
 1384 
 1385                 if (aobj->u_pages < end) {
 1386                         end = aobj->u_pages;
 1387                 }
 1388                 for (i = start; i < end; i++) {
 1389                         int slot = aobj->u_swslots[i];
 1390 
 1391                         if (slot > 0) {
 1392                                 uvm_swap_free(slot, 1);
 1393                                 swpgonlydelta++;
 1394                         }
 1395                 }
 1396         }
 1397 
 1398         /*
 1399          * adjust the counter of pages only in swap for all
 1400          * the swap slots we've freed.
 1401          */
 1402 
 1403         if (swpgonlydelta > 0) {
 1404                 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
 1405                 atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
 1406         }
 1407 }
 1408 
 1409 #endif /* defined(VMSWAP) */

Cache object: c7eff6bbb0378a75f048502fc4445ac4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.