The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
    3  *
    4  * Copyright (c) 1991 Regents of the University of California.
    5  * All rights reserved.
    6  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
    7  *
    8  * This code is derived from software contributed to Berkeley by
    9  * The Mach Operating System project at Carnegie-Mellon University.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. Neither the name of the University nor the names of its contributors
   20  *    may be used to endorse or promote products derived from this software
   21  *    without specific prior written permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   33  * SUCH DAMAGE.
   34  *
   35  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
   36  */
   37 
   38 /*-
   39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   40  * All rights reserved.
   41  *
   42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   43  *
   44  * Permission to use, copy, modify and distribute this software and
   45  * its documentation is hereby granted, provided that both the copyright
   46  * notice and this permission notice appear in all copies of the
   47  * software, derivative works or modified versions, and any portions
   48  * thereof, and that both notices appear in supporting documentation.
   49  *
   50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   53  *
   54  * Carnegie Mellon requests users of this software to return to
   55  *
   56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   57  *  School of Computer Science
   58  *  Carnegie Mellon University
   59  *  Pittsburgh PA 15213-3890
   60  *
   61  * any improvements or extensions that they make and grant Carnegie the
   62  * rights to redistribute these changes.
   63  */
   64 
   65 /*
   66  *                      GENERAL RULES ON VM_PAGE MANIPULATION
   67  *
   68  *      - A page queue lock is required when adding or removing a page from a
   69  *        page queue regardless of other locks or the busy state of a page.
   70  *
   71  *              * In general, no thread besides the page daemon can acquire or
   72  *                hold more than one page queue lock at a time.
   73  *
   74  *              * The page daemon can acquire and hold any pair of page queue
   75  *                locks in any order.
   76  *
   77  *      - The object lock is required when inserting or removing
   78  *        pages from an object (vm_page_insert() or vm_page_remove()).
   79  *
   80  */
   81 
   82 /*
   83  *      Resident memory management module.
   84  */
   85 
   86 #include <sys/cdefs.h>
   87 __FBSDID("$FreeBSD: releng/12.0/sys/vm/vm_page.c 341251 2018-11-29 17:54:03Z markj $");
   88 
   89 #include "opt_vm.h"
   90 
   91 #include <sys/param.h>
   92 #include <sys/systm.h>
   93 #include <sys/lock.h>
   94 #include <sys/domainset.h>
   95 #include <sys/kernel.h>
   96 #include <sys/limits.h>
   97 #include <sys/linker.h>
   98 #include <sys/malloc.h>
   99 #include <sys/mman.h>
  100 #include <sys/msgbuf.h>
  101 #include <sys/mutex.h>
  102 #include <sys/proc.h>
  103 #include <sys/rwlock.h>
  104 #include <sys/sbuf.h>
  105 #include <sys/sched.h>
  106 #include <sys/smp.h>
  107 #include <sys/sysctl.h>
  108 #include <sys/vmmeter.h>
  109 #include <sys/vnode.h>
  110 
  111 #include <vm/vm.h>
  112 #include <vm/pmap.h>
  113 #include <vm/vm_param.h>
  114 #include <vm/vm_domainset.h>
  115 #include <vm/vm_kern.h>
  116 #include <vm/vm_map.h>
  117 #include <vm/vm_object.h>
  118 #include <vm/vm_page.h>
  119 #include <vm/vm_pageout.h>
  120 #include <vm/vm_phys.h>
  121 #include <vm/vm_pagequeue.h>
  122 #include <vm/vm_pager.h>
  123 #include <vm/vm_radix.h>
  124 #include <vm/vm_reserv.h>
  125 #include <vm/vm_extern.h>
  126 #include <vm/uma.h>
  127 #include <vm/uma_int.h>
  128 
  129 #include <machine/md_var.h>
  130 
  131 extern int      uma_startup_count(int);
  132 extern void     uma_startup(void *, int);
  133 extern int      vmem_startup_count(void);
  134 
  135 struct vm_domain vm_dom[MAXMEMDOM];
  136 
  137 DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT]);
  138 
  139 struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT];
  140 
  141 struct mtx_padalign __exclusive_cache_line vm_domainset_lock;
  142 /* The following fields are protected by the domainset lock. */
  143 domainset_t __exclusive_cache_line vm_min_domains;
  144 domainset_t __exclusive_cache_line vm_severe_domains;
  145 static int vm_min_waiters;
  146 static int vm_severe_waiters;
  147 static int vm_pageproc_waiters;
  148 
  149 /*
  150  * bogus page -- for I/O to/from partially complete buffers,
  151  * or for paging into sparsely invalid regions.
  152  */
  153 vm_page_t bogus_page;
  154 
  155 vm_page_t vm_page_array;
  156 long vm_page_array_size;
  157 long first_page;
  158 
  159 static int boot_pages;
  160 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
  161     &boot_pages, 0,
  162     "number of pages allocated for bootstrapping the VM system");
  163 
  164 static int pa_tryrelock_restart;
  165 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
  166     &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
  167 
  168 static TAILQ_HEAD(, vm_page) blacklist_head;
  169 static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
  170 SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
  171     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
  172 
  173 static uma_zone_t fakepg_zone;
  174 
  175 static void vm_page_alloc_check(vm_page_t m);
  176 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
  177 static void vm_page_dequeue_complete(vm_page_t m);
  178 static void vm_page_enqueue(vm_page_t m, uint8_t queue);
  179 static void vm_page_init(void *dummy);
  180 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
  181     vm_pindex_t pindex, vm_page_t mpred);
  182 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
  183     vm_page_t mpred);
  184 static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
  185     vm_page_t m_run, vm_paddr_t high);
  186 static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
  187     int req);
  188 static int vm_page_import(void *arg, void **store, int cnt, int domain,
  189     int flags);
  190 static void vm_page_release(void *arg, void **store, int cnt);
  191 
  192 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
  193 
  194 static void
  195 vm_page_init(void *dummy)
  196 {
  197 
  198         fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
  199             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
  200         bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
  201             VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  202 }
  203 
  204 /*
  205  * The cache page zone is initialized later since we need to be able to allocate
  206  * pages before UMA is fully initialized.
  207  */
  208 static void
  209 vm_page_init_cache_zones(void *dummy __unused)
  210 {
  211         struct vm_domain *vmd;
  212         int i;
  213 
  214         for (i = 0; i < vm_ndomains; i++) {
  215                 vmd = VM_DOMAIN(i);
  216                 /*
  217                  * Don't allow the page cache to take up more than .25% of
  218                  * memory.
  219                  */
  220                 if (vmd->vmd_page_count / 400 < 256 * mp_ncpus)
  221                         continue;
  222                 vmd->vmd_pgcache = uma_zcache_create("vm pgcache",
  223                     sizeof(struct vm_page), NULL, NULL, NULL, NULL,
  224                     vm_page_import, vm_page_release, vmd,
  225                     UMA_ZONE_NOBUCKETCACHE | UMA_ZONE_MAXBUCKET | UMA_ZONE_VM);
  226         }
  227 }
  228 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
  229 
  230 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
  231 #if PAGE_SIZE == 32768
  232 #ifdef CTASSERT
  233 CTASSERT(sizeof(u_long) >= 8);
  234 #endif
  235 #endif
  236 
  237 /*
  238  * Try to acquire a physical address lock while a pmap is locked.  If we
  239  * fail to trylock we unlock and lock the pmap directly and cache the
  240  * locked pa in *locked.  The caller should then restart their loop in case
  241  * the virtual to physical mapping has changed.
  242  */
  243 int
  244 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
  245 {
  246         vm_paddr_t lockpa;
  247 
  248         lockpa = *locked;
  249         *locked = pa;
  250         if (lockpa) {
  251                 PA_LOCK_ASSERT(lockpa, MA_OWNED);
  252                 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
  253                         return (0);
  254                 PA_UNLOCK(lockpa);
  255         }
  256         if (PA_TRYLOCK(pa))
  257                 return (0);
  258         PMAP_UNLOCK(pmap);
  259         atomic_add_int(&pa_tryrelock_restart, 1);
  260         PA_LOCK(pa);
  261         PMAP_LOCK(pmap);
  262         return (EAGAIN);
  263 }
  264 
  265 /*
  266  *      vm_set_page_size:
  267  *
  268  *      Sets the page size, perhaps based upon the memory
  269  *      size.  Must be called before any use of page-size
  270  *      dependent functions.
  271  */
  272 void
  273 vm_set_page_size(void)
  274 {
  275         if (vm_cnt.v_page_size == 0)
  276                 vm_cnt.v_page_size = PAGE_SIZE;
  277         if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
  278                 panic("vm_set_page_size: page size not a power of two");
  279 }
  280 
  281 /*
  282  *      vm_page_blacklist_next:
  283  *
  284  *      Find the next entry in the provided string of blacklist
  285  *      addresses.  Entries are separated by space, comma, or newline.
  286  *      If an invalid integer is encountered then the rest of the
  287  *      string is skipped.  Updates the list pointer to the next
  288  *      character, or NULL if the string is exhausted or invalid.
  289  */
  290 static vm_paddr_t
  291 vm_page_blacklist_next(char **list, char *end)
  292 {
  293         vm_paddr_t bad;
  294         char *cp, *pos;
  295 
  296         if (list == NULL || *list == NULL)
  297                 return (0);
  298         if (**list =='\0') {
  299                 *list = NULL;
  300                 return (0);
  301         }
  302 
  303         /*
  304          * If there's no end pointer then the buffer is coming from
  305          * the kenv and we know it's null-terminated.
  306          */
  307         if (end == NULL)
  308                 end = *list + strlen(*list);
  309 
  310         /* Ensure that strtoq() won't walk off the end */
  311         if (*end != '\0') {
  312                 if (*end == '\n' || *end == ' ' || *end  == ',')
  313                         *end = '\0';
  314                 else {
  315                         printf("Blacklist not terminated, skipping\n");
  316                         *list = NULL;
  317                         return (0);
  318                 }
  319         }
  320 
  321         for (pos = *list; *pos != '\0'; pos = cp) {
  322                 bad = strtoq(pos, &cp, 0);
  323                 if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
  324                         if (bad == 0) {
  325                                 if (++cp < end)
  326                                         continue;
  327                                 else
  328                                         break;
  329                         }
  330                 } else
  331                         break;
  332                 if (*cp == '\0' || ++cp >= end)
  333                         *list = NULL;
  334                 else
  335                         *list = cp;
  336                 return (trunc_page(bad));
  337         }
  338         printf("Garbage in RAM blacklist, skipping\n");
  339         *list = NULL;
  340         return (0);
  341 }
  342 
  343 bool
  344 vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
  345 {
  346         struct vm_domain *vmd;
  347         vm_page_t m;
  348         int ret;
  349 
  350         m = vm_phys_paddr_to_vm_page(pa);
  351         if (m == NULL)
  352                 return (true); /* page does not exist, no failure */
  353 
  354         vmd = vm_pagequeue_domain(m);
  355         vm_domain_free_lock(vmd);
  356         ret = vm_phys_unfree_page(m);
  357         vm_domain_free_unlock(vmd);
  358         if (ret != 0) {
  359                 vm_domain_freecnt_inc(vmd, -1);
  360                 TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
  361                 if (verbose)
  362                         printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa);
  363         }
  364         return (ret);
  365 }
  366 
  367 /*
  368  *      vm_page_blacklist_check:
  369  *
  370  *      Iterate through the provided string of blacklist addresses, pulling
  371  *      each entry out of the physical allocator free list and putting it
  372  *      onto a list for reporting via the vm.page_blacklist sysctl.
  373  */
  374 static void
  375 vm_page_blacklist_check(char *list, char *end)
  376 {
  377         vm_paddr_t pa;
  378         char *next;
  379 
  380         next = list;
  381         while (next != NULL) {
  382                 if ((pa = vm_page_blacklist_next(&next, end)) == 0)
  383                         continue;
  384                 vm_page_blacklist_add(pa, bootverbose);
  385         }
  386 }
  387 
  388 /*
  389  *      vm_page_blacklist_load:
  390  *
  391  *      Search for a special module named "ram_blacklist".  It'll be a
  392  *      plain text file provided by the user via the loader directive
  393  *      of the same name.
  394  */
  395 static void
  396 vm_page_blacklist_load(char **list, char **end)
  397 {
  398         void *mod;
  399         u_char *ptr;
  400         u_int len;
  401 
  402         mod = NULL;
  403         ptr = NULL;
  404 
  405         mod = preload_search_by_type("ram_blacklist");
  406         if (mod != NULL) {
  407                 ptr = preload_fetch_addr(mod);
  408                 len = preload_fetch_size(mod);
  409         }
  410         *list = ptr;
  411         if (ptr != NULL)
  412                 *end = ptr + len;
  413         else
  414                 *end = NULL;
  415         return;
  416 }
  417 
  418 static int
  419 sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
  420 {
  421         vm_page_t m;
  422         struct sbuf sbuf;
  423         int error, first;
  424 
  425         first = 1;
  426         error = sysctl_wire_old_buffer(req, 0);
  427         if (error != 0)
  428                 return (error);
  429         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
  430         TAILQ_FOREACH(m, &blacklist_head, listq) {
  431                 sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
  432                     (uintmax_t)m->phys_addr);
  433                 first = 0;
  434         }
  435         error = sbuf_finish(&sbuf);
  436         sbuf_delete(&sbuf);
  437         return (error);
  438 }
  439 
  440 /*
  441  * Initialize a dummy page for use in scans of the specified paging queue.
  442  * In principle, this function only needs to set the flag PG_MARKER.
  443  * Nonetheless, it write busies and initializes the hold count to one as
  444  * safety precautions.
  445  */
  446 static void
  447 vm_page_init_marker(vm_page_t marker, int queue, uint8_t aflags)
  448 {
  449 
  450         bzero(marker, sizeof(*marker));
  451         marker->flags = PG_MARKER;
  452         marker->aflags = aflags;
  453         marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
  454         marker->queue = queue;
  455         marker->hold_count = 1;
  456 }
  457 
  458 static void
  459 vm_page_domain_init(int domain)
  460 {
  461         struct vm_domain *vmd;
  462         struct vm_pagequeue *pq;
  463         int i;
  464 
  465         vmd = VM_DOMAIN(domain);
  466         bzero(vmd, sizeof(*vmd));
  467         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
  468             "vm inactive pagequeue";
  469         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
  470             "vm active pagequeue";
  471         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_LAUNDRY].pq_name) =
  472             "vm laundry pagequeue";
  473         *__DECONST(char **, &vmd->vmd_pagequeues[PQ_UNSWAPPABLE].pq_name) =
  474             "vm unswappable pagequeue";
  475         vmd->vmd_domain = domain;
  476         vmd->vmd_page_count = 0;
  477         vmd->vmd_free_count = 0;
  478         vmd->vmd_segs = 0;
  479         vmd->vmd_oom = FALSE;
  480         for (i = 0; i < PQ_COUNT; i++) {
  481                 pq = &vmd->vmd_pagequeues[i];
  482                 TAILQ_INIT(&pq->pq_pl);
  483                 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
  484                     MTX_DEF | MTX_DUPOK);
  485                 pq->pq_pdpages = 0;
  486                 vm_page_init_marker(&vmd->vmd_markers[i], i, 0);
  487         }
  488         mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
  489         mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
  490         snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
  491 
  492         /*
  493          * inacthead is used to provide FIFO ordering for LRU-bypassing
  494          * insertions.
  495          */
  496         vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE, PGA_ENQUEUED);
  497         TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
  498             &vmd->vmd_inacthead, plinks.q);
  499 
  500         /*
  501          * The clock pages are used to implement active queue scanning without
  502          * requeues.  Scans start at clock[0], which is advanced after the scan
  503          * ends.  When the two clock hands meet, they are reset and scanning
  504          * resumes from the head of the queue.
  505          */
  506         vm_page_init_marker(&vmd->vmd_clock[0], PQ_ACTIVE, PGA_ENQUEUED);
  507         vm_page_init_marker(&vmd->vmd_clock[1], PQ_ACTIVE, PGA_ENQUEUED);
  508         TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
  509             &vmd->vmd_clock[0], plinks.q);
  510         TAILQ_INSERT_TAIL(&vmd->vmd_pagequeues[PQ_ACTIVE].pq_pl,
  511             &vmd->vmd_clock[1], plinks.q);
  512 }
  513 
  514 /*
  515  * Initialize a physical page in preparation for adding it to the free
  516  * lists.
  517  */
  518 static void
  519 vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
  520 {
  521 
  522         m->object = NULL;
  523         m->wire_count = 0;
  524         m->busy_lock = VPB_UNBUSIED;
  525         m->hold_count = 0;
  526         m->flags = m->aflags = 0;
  527         m->phys_addr = pa;
  528         m->queue = PQ_NONE;
  529         m->psind = 0;
  530         m->segind = segind;
  531         m->order = VM_NFREEORDER;
  532         m->pool = VM_FREEPOOL_DEFAULT;
  533         m->valid = m->dirty = 0;
  534         pmap_page_init(m);
  535 }
  536 
  537 /*
  538  *      vm_page_startup:
  539  *
  540  *      Initializes the resident memory module.  Allocates physical memory for
  541  *      bootstrapping UMA and some data structures that are used to manage
  542  *      physical pages.  Initializes these structures, and populates the free
  543  *      page queues.
  544  */
  545 vm_offset_t
  546 vm_page_startup(vm_offset_t vaddr)
  547 {
  548         struct vm_phys_seg *seg;
  549         vm_page_t m;
  550         char *list, *listend;
  551         vm_offset_t mapped;
  552         vm_paddr_t end, high_avail, low_avail, new_end, page_range, size;
  553         vm_paddr_t biggestsize, last_pa, pa;
  554         u_long pagecount;
  555         int biggestone, i, segind;
  556 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
  557         long ii;
  558 #endif
  559 
  560         biggestsize = 0;
  561         biggestone = 0;
  562         vaddr = round_page(vaddr);
  563 
  564         for (i = 0; phys_avail[i + 1]; i += 2) {
  565                 phys_avail[i] = round_page(phys_avail[i]);
  566                 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
  567         }
  568         for (i = 0; phys_avail[i + 1]; i += 2) {
  569                 size = phys_avail[i + 1] - phys_avail[i];
  570                 if (size > biggestsize) {
  571                         biggestone = i;
  572                         biggestsize = size;
  573                 }
  574         }
  575 
  576         end = phys_avail[biggestone+1];
  577 
  578         /*
  579          * Initialize the page and queue locks.
  580          */
  581         mtx_init(&vm_domainset_lock, "vm domainset lock", NULL, MTX_DEF);
  582         for (i = 0; i < PA_LOCK_COUNT; i++)
  583                 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
  584         for (i = 0; i < vm_ndomains; i++)
  585                 vm_page_domain_init(i);
  586 
  587         /*
  588          * Allocate memory for use when boot strapping the kernel memory
  589          * allocator.  Tell UMA how many zones we are going to create
  590          * before going fully functional.  UMA will add its zones.
  591          *
  592          * VM startup zones: vmem, vmem_btag, VM OBJECT, RADIX NODE, MAP,
  593          * KMAP ENTRY, MAP ENTRY, VMSPACE.
  594          */
  595         boot_pages = uma_startup_count(8);
  596 
  597 #ifndef UMA_MD_SMALL_ALLOC
  598         /* vmem_startup() calls uma_prealloc(). */
  599         boot_pages += vmem_startup_count();
  600         /* vm_map_startup() calls uma_prealloc(). */
  601         boot_pages += howmany(MAX_KMAP,
  602             UMA_SLAB_SPACE / sizeof(struct vm_map));
  603 
  604         /*
  605          * Before going fully functional kmem_init() does allocation
  606          * from "KMAP ENTRY" and vmem_create() does allocation from "vmem".
  607          */
  608         boot_pages += 2;
  609 #endif
  610         /*
  611          * CTFLAG_RDTUN doesn't work during the early boot process, so we must
  612          * manually fetch the value.
  613          */
  614         TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
  615         new_end = end - (boot_pages * UMA_SLAB_SIZE);
  616         new_end = trunc_page(new_end);
  617         mapped = pmap_map(&vaddr, new_end, end,
  618             VM_PROT_READ | VM_PROT_WRITE);
  619         bzero((void *)mapped, end - new_end);
  620         uma_startup((void *)mapped, boot_pages);
  621 
  622 #ifdef WITNESS
  623         end = new_end;
  624         new_end = end - round_page(witness_startup_count());
  625         mapped = pmap_map(&vaddr, new_end, end,
  626             VM_PROT_READ | VM_PROT_WRITE);
  627         bzero((void *)mapped, end - new_end);
  628         witness_startup((void *)mapped);
  629 #endif
  630 
  631 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
  632     defined(__i386__) || defined(__mips__)
  633         /*
  634          * Allocate a bitmap to indicate that a random physical page
  635          * needs to be included in a minidump.
  636          *
  637          * The amd64 port needs this to indicate which direct map pages
  638          * need to be dumped, via calls to dump_add_page()/dump_drop_page().
  639          *
  640          * However, i386 still needs this workspace internally within the
  641          * minidump code.  In theory, they are not needed on i386, but are
  642          * included should the sf_buf code decide to use them.
  643          */
  644         last_pa = 0;
  645         for (i = 0; dump_avail[i + 1] != 0; i += 2)
  646                 if (dump_avail[i + 1] > last_pa)
  647                         last_pa = dump_avail[i + 1];
  648         page_range = last_pa / PAGE_SIZE;
  649         vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
  650         new_end -= vm_page_dump_size;
  651         vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
  652             new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
  653         bzero((void *)vm_page_dump, vm_page_dump_size);
  654 #else
  655         (void)last_pa;
  656 #endif
  657 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__)
  658         /*
  659          * Include the UMA bootstrap pages and vm_page_dump in a crash dump.
  660          * When pmap_map() uses the direct map, they are not automatically 
  661          * included.
  662          */
  663         for (pa = new_end; pa < end; pa += PAGE_SIZE)
  664                 dump_add_page(pa);
  665 #endif
  666         phys_avail[biggestone + 1] = new_end;
  667 #ifdef __amd64__
  668         /*
  669          * Request that the physical pages underlying the message buffer be
  670          * included in a crash dump.  Since the message buffer is accessed
  671          * through the direct map, they are not automatically included.
  672          */
  673         pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
  674         last_pa = pa + round_page(msgbufsize);
  675         while (pa < last_pa) {
  676                 dump_add_page(pa);
  677                 pa += PAGE_SIZE;
  678         }
  679 #endif
  680         /*
  681          * Compute the number of pages of memory that will be available for
  682          * use, taking into account the overhead of a page structure per page.
  683          * In other words, solve
  684          *      "available physical memory" - round_page(page_range *
  685          *          sizeof(struct vm_page)) = page_range * PAGE_SIZE 
  686          * for page_range.  
  687          */
  688         low_avail = phys_avail[0];
  689         high_avail = phys_avail[1];
  690         for (i = 0; i < vm_phys_nsegs; i++) {
  691                 if (vm_phys_segs[i].start < low_avail)
  692                         low_avail = vm_phys_segs[i].start;
  693                 if (vm_phys_segs[i].end > high_avail)
  694                         high_avail = vm_phys_segs[i].end;
  695         }
  696         /* Skip the first chunk.  It is already accounted for. */
  697         for (i = 2; phys_avail[i + 1] != 0; i += 2) {
  698                 if (phys_avail[i] < low_avail)
  699                         low_avail = phys_avail[i];
  700                 if (phys_avail[i + 1] > high_avail)
  701                         high_avail = phys_avail[i + 1];
  702         }
  703         first_page = low_avail / PAGE_SIZE;
  704 #ifdef VM_PHYSSEG_SPARSE
  705         size = 0;
  706         for (i = 0; i < vm_phys_nsegs; i++)
  707                 size += vm_phys_segs[i].end - vm_phys_segs[i].start;
  708         for (i = 0; phys_avail[i + 1] != 0; i += 2)
  709                 size += phys_avail[i + 1] - phys_avail[i];
  710 #elif defined(VM_PHYSSEG_DENSE)
  711         size = high_avail - low_avail;
  712 #else
  713 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
  714 #endif
  715 
  716 #ifdef VM_PHYSSEG_DENSE
  717         /*
  718          * In the VM_PHYSSEG_DENSE case, the number of pages can account for
  719          * the overhead of a page structure per page only if vm_page_array is
  720          * allocated from the last physical memory chunk.  Otherwise, we must
  721          * allocate page structures representing the physical memory
  722          * underlying vm_page_array, even though they will not be used.
  723          */
  724         if (new_end != high_avail)
  725                 page_range = size / PAGE_SIZE;
  726         else
  727 #endif
  728         {
  729                 page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
  730 
  731                 /*
  732                  * If the partial bytes remaining are large enough for
  733                  * a page (PAGE_SIZE) without a corresponding
  734                  * 'struct vm_page', then new_end will contain an
  735                  * extra page after subtracting the length of the VM
  736                  * page array.  Compensate by subtracting an extra
  737                  * page from new_end.
  738                  */
  739                 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
  740                         if (new_end == high_avail)
  741                                 high_avail -= PAGE_SIZE;
  742                         new_end -= PAGE_SIZE;
  743                 }
  744         }
  745         end = new_end;
  746 
  747         /*
  748          * Reserve an unmapped guard page to trap access to vm_page_array[-1].
  749          * However, because this page is allocated from KVM, out-of-bounds
  750          * accesses using the direct map will not be trapped.
  751          */
  752         vaddr += PAGE_SIZE;
  753 
  754         /*
  755          * Allocate physical memory for the page structures, and map it.
  756          */
  757         new_end = trunc_page(end - page_range * sizeof(struct vm_page));
  758         mapped = pmap_map(&vaddr, new_end, end,
  759             VM_PROT_READ | VM_PROT_WRITE);
  760         vm_page_array = (vm_page_t)mapped;
  761         vm_page_array_size = page_range;
  762 
  763 #if VM_NRESERVLEVEL > 0
  764         /*
  765          * Allocate physical memory for the reservation management system's
  766          * data structures, and map it.
  767          */
  768         if (high_avail == end)
  769                 high_avail = new_end;
  770         new_end = vm_reserv_startup(&vaddr, new_end, high_avail);
  771 #endif
  772 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__)
  773         /*
  774          * Include vm_page_array and vm_reserv_array in a crash dump.
  775          */
  776         for (pa = new_end; pa < end; pa += PAGE_SIZE)
  777                 dump_add_page(pa);
  778 #endif
  779         phys_avail[biggestone + 1] = new_end;
  780 
  781         /*
  782          * Add physical memory segments corresponding to the available
  783          * physical pages.
  784          */
  785         for (i = 0; phys_avail[i + 1] != 0; i += 2)
  786                 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
  787 
  788         /*
  789          * Initialize the physical memory allocator.
  790          */
  791         vm_phys_init();
  792 
  793         /*
  794          * Initialize the page structures and add every available page to the
  795          * physical memory allocator's free lists.
  796          */
  797 #if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
  798         for (ii = 0; ii < vm_page_array_size; ii++) {
  799                 m = &vm_page_array[ii];
  800                 vm_page_init_page(m, (first_page + ii) << PAGE_SHIFT, 0);
  801                 m->flags = PG_FICTITIOUS;
  802         }
  803 #endif
  804         vm_cnt.v_page_count = 0;
  805         for (segind = 0; segind < vm_phys_nsegs; segind++) {
  806                 seg = &vm_phys_segs[segind];
  807                 for (m = seg->first_page, pa = seg->start; pa < seg->end;
  808                     m++, pa += PAGE_SIZE)
  809                         vm_page_init_page(m, pa, segind);
  810 
  811                 /*
  812                  * Add the segment to the free lists only if it is covered by
  813                  * one of the ranges in phys_avail.  Because we've added the
  814                  * ranges to the vm_phys_segs array, we can assume that each
  815                  * segment is either entirely contained in one of the ranges,
  816                  * or doesn't overlap any of them.
  817                  */
  818                 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
  819                         struct vm_domain *vmd;
  820 
  821                         if (seg->start < phys_avail[i] ||
  822                             seg->end > phys_avail[i + 1])
  823                                 continue;
  824 
  825                         m = seg->first_page;
  826                         pagecount = (u_long)atop(seg->end - seg->start);
  827 
  828                         vmd = VM_DOMAIN(seg->domain);
  829                         vm_domain_free_lock(vmd);
  830                         vm_phys_free_contig(m, pagecount);
  831                         vm_domain_free_unlock(vmd);
  832                         vm_domain_freecnt_inc(vmd, pagecount);
  833                         vm_cnt.v_page_count += (u_int)pagecount;
  834 
  835                         vmd = VM_DOMAIN(seg->domain);
  836                         vmd->vmd_page_count += (u_int)pagecount;
  837                         vmd->vmd_segs |= 1UL << m->segind;
  838                         break;
  839                 }
  840         }
  841 
  842         /*
  843          * Remove blacklisted pages from the physical memory allocator.
  844          */
  845         TAILQ_INIT(&blacklist_head);
  846         vm_page_blacklist_load(&list, &listend);
  847         vm_page_blacklist_check(list, listend);
  848 
  849         list = kern_getenv("vm.blacklist");
  850         vm_page_blacklist_check(list, NULL);
  851 
  852         freeenv(list);
  853 #if VM_NRESERVLEVEL > 0
  854         /*
  855          * Initialize the reservation management system.
  856          */
  857         vm_reserv_init();
  858 #endif
  859 
  860         return (vaddr);
  861 }
  862 
  863 void
  864 vm_page_reference(vm_page_t m)
  865 {
  866 
  867         vm_page_aflag_set(m, PGA_REFERENCED);
  868 }
  869 
  870 /*
  871  *      vm_page_busy_downgrade:
  872  *
  873  *      Downgrade an exclusive busy page into a single shared busy page.
  874  */
  875 void
  876 vm_page_busy_downgrade(vm_page_t m)
  877 {
  878         u_int x;
  879         bool locked;
  880 
  881         vm_page_assert_xbusied(m);
  882         locked = mtx_owned(vm_page_lockptr(m));
  883 
  884         for (;;) {
  885                 x = m->busy_lock;
  886                 x &= VPB_BIT_WAITERS;
  887                 if (x != 0 && !locked)
  888                         vm_page_lock(m);
  889                 if (atomic_cmpset_rel_int(&m->busy_lock,
  890                     VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1)))
  891                         break;
  892                 if (x != 0 && !locked)
  893                         vm_page_unlock(m);
  894         }
  895         if (x != 0) {
  896                 wakeup(m);
  897                 if (!locked)
  898                         vm_page_unlock(m);
  899         }
  900 }
  901 
  902 /*
  903  *      vm_page_sbusied:
  904  *
  905  *      Return a positive value if the page is shared busied, 0 otherwise.
  906  */
  907 int
  908 vm_page_sbusied(vm_page_t m)
  909 {
  910         u_int x;
  911 
  912         x = m->busy_lock;
  913         return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
  914 }
  915 
  916 /*
  917  *      vm_page_sunbusy:
  918  *
  919  *      Shared unbusy a page.
  920  */
  921 void
  922 vm_page_sunbusy(vm_page_t m)
  923 {
  924         u_int x;
  925 
  926         vm_page_lock_assert(m, MA_NOTOWNED);
  927         vm_page_assert_sbusied(m);
  928 
  929         for (;;) {
  930                 x = m->busy_lock;
  931                 if (VPB_SHARERS(x) > 1) {
  932                         if (atomic_cmpset_int(&m->busy_lock, x,
  933                             x - VPB_ONE_SHARER))
  934                                 break;
  935                         continue;
  936                 }
  937                 if ((x & VPB_BIT_WAITERS) == 0) {
  938                         KASSERT(x == VPB_SHARERS_WORD(1),
  939                             ("vm_page_sunbusy: invalid lock state"));
  940                         if (atomic_cmpset_int(&m->busy_lock,
  941                             VPB_SHARERS_WORD(1), VPB_UNBUSIED))
  942                                 break;
  943                         continue;
  944                 }
  945                 KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS),
  946                     ("vm_page_sunbusy: invalid lock state for waiters"));
  947 
  948                 vm_page_lock(m);
  949                 if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) {
  950                         vm_page_unlock(m);
  951                         continue;
  952                 }
  953                 wakeup(m);
  954                 vm_page_unlock(m);
  955                 break;
  956         }
  957 }
  958 
  959 /*
  960  *      vm_page_busy_sleep:
  961  *
  962  *      Sleep and release the page lock, using the page pointer as wchan.
  963  *      This is used to implement the hard-path of busying mechanism.
  964  *
  965  *      The given page must be locked.
  966  *
  967  *      If nonshared is true, sleep only if the page is xbusy.
  968  */
  969 void
  970 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared)
  971 {
  972         u_int x;
  973 
  974         vm_page_assert_locked(m);
  975 
  976         x = m->busy_lock;
  977         if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) ||
  978             ((x & VPB_BIT_WAITERS) == 0 &&
  979             !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) {
  980                 vm_page_unlock(m);
  981                 return;
  982         }
  983         msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0);
  984 }
  985 
  986 /*
  987  *      vm_page_trysbusy:
  988  *
  989  *      Try to shared busy a page.
  990  *      If the operation succeeds 1 is returned otherwise 0.
  991  *      The operation never sleeps.
  992  */
  993 int
  994 vm_page_trysbusy(vm_page_t m)
  995 {
  996         u_int x;
  997 
  998         for (;;) {
  999                 x = m->busy_lock;
 1000                 if ((x & VPB_BIT_SHARED) == 0)
 1001                         return (0);
 1002                 if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER))
 1003                         return (1);
 1004         }
 1005 }
 1006 
 1007 static void
 1008 vm_page_xunbusy_locked(vm_page_t m)
 1009 {
 1010 
 1011         vm_page_assert_xbusied(m);
 1012         vm_page_assert_locked(m);
 1013 
 1014         atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
 1015         /* There is a waiter, do wakeup() instead of vm_page_flash(). */
 1016         wakeup(m);
 1017 }
 1018 
 1019 void
 1020 vm_page_xunbusy_maybelocked(vm_page_t m)
 1021 {
 1022         bool lockacq;
 1023 
 1024         vm_page_assert_xbusied(m);
 1025 
 1026         /*
 1027          * Fast path for unbusy.  If it succeeds, we know that there
 1028          * are no waiters, so we do not need a wakeup.
 1029          */
 1030         if (atomic_cmpset_rel_int(&m->busy_lock, VPB_SINGLE_EXCLUSIVER,
 1031             VPB_UNBUSIED))
 1032                 return;
 1033 
 1034         lockacq = !mtx_owned(vm_page_lockptr(m));
 1035         if (lockacq)
 1036                 vm_page_lock(m);
 1037         vm_page_xunbusy_locked(m);
 1038         if (lockacq)
 1039                 vm_page_unlock(m);
 1040 }
 1041 
 1042 /*
 1043  *      vm_page_xunbusy_hard:
 1044  *
 1045  *      Called after the first try the exclusive unbusy of a page failed.
 1046  *      It is assumed that the waiters bit is on.
 1047  */
 1048 void
 1049 vm_page_xunbusy_hard(vm_page_t m)
 1050 {
 1051 
 1052         vm_page_assert_xbusied(m);
 1053 
 1054         vm_page_lock(m);
 1055         vm_page_xunbusy_locked(m);
 1056         vm_page_unlock(m);
 1057 }
 1058 
 1059 /*
 1060  *      vm_page_flash:
 1061  *
 1062  *      Wakeup anyone waiting for the page.
 1063  *      The ownership bits do not change.
 1064  *
 1065  *      The given page must be locked.
 1066  */
 1067 void
 1068 vm_page_flash(vm_page_t m)
 1069 {
 1070         u_int x;
 1071 
 1072         vm_page_lock_assert(m, MA_OWNED);
 1073 
 1074         for (;;) {
 1075                 x = m->busy_lock;
 1076                 if ((x & VPB_BIT_WAITERS) == 0)
 1077                         return;
 1078                 if (atomic_cmpset_int(&m->busy_lock, x,
 1079                     x & (~VPB_BIT_WAITERS)))
 1080                         break;
 1081         }
 1082         wakeup(m);
 1083 }
 1084 
 1085 /*
 1086  * Avoid releasing and reacquiring the same page lock.
 1087  */
 1088 void
 1089 vm_page_change_lock(vm_page_t m, struct mtx **mtx)
 1090 {
 1091         struct mtx *mtx1;
 1092 
 1093         mtx1 = vm_page_lockptr(m);
 1094         if (*mtx == mtx1)
 1095                 return;
 1096         if (*mtx != NULL)
 1097                 mtx_unlock(*mtx);
 1098         *mtx = mtx1;
 1099         mtx_lock(mtx1);
 1100 }
 1101 
 1102 /*
 1103  * Keep page from being freed by the page daemon
 1104  * much of the same effect as wiring, except much lower
 1105  * overhead and should be used only for *very* temporary
 1106  * holding ("wiring").
 1107  */
 1108 void
 1109 vm_page_hold(vm_page_t mem)
 1110 {
 1111 
 1112         vm_page_lock_assert(mem, MA_OWNED);
 1113         mem->hold_count++;
 1114 }
 1115 
 1116 void
 1117 vm_page_unhold(vm_page_t mem)
 1118 {
 1119 
 1120         vm_page_lock_assert(mem, MA_OWNED);
 1121         KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!"));
 1122         --mem->hold_count;
 1123         if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
 1124                 vm_page_free_toq(mem);
 1125 }
 1126 
 1127 /*
 1128  *      vm_page_unhold_pages:
 1129  *
 1130  *      Unhold each of the pages that is referenced by the given array.
 1131  */
 1132 void
 1133 vm_page_unhold_pages(vm_page_t *ma, int count)
 1134 {
 1135         struct mtx *mtx;
 1136 
 1137         mtx = NULL;
 1138         for (; count != 0; count--) {
 1139                 vm_page_change_lock(*ma, &mtx);
 1140                 vm_page_unhold(*ma);
 1141                 ma++;
 1142         }
 1143         if (mtx != NULL)
 1144                 mtx_unlock(mtx);
 1145 }
 1146 
 1147 vm_page_t
 1148 PHYS_TO_VM_PAGE(vm_paddr_t pa)
 1149 {
 1150         vm_page_t m;
 1151 
 1152 #ifdef VM_PHYSSEG_SPARSE
 1153         m = vm_phys_paddr_to_vm_page(pa);
 1154         if (m == NULL)
 1155                 m = vm_phys_fictitious_to_vm_page(pa);
 1156         return (m);
 1157 #elif defined(VM_PHYSSEG_DENSE)
 1158         long pi;
 1159 
 1160         pi = atop(pa);
 1161         if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
 1162                 m = &vm_page_array[pi - first_page];
 1163                 return (m);
 1164         }
 1165         return (vm_phys_fictitious_to_vm_page(pa));
 1166 #else
 1167 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
 1168 #endif
 1169 }
 1170 
 1171 /*
 1172  *      vm_page_getfake:
 1173  *
 1174  *      Create a fictitious page with the specified physical address and
 1175  *      memory attribute.  The memory attribute is the only the machine-
 1176  *      dependent aspect of a fictitious page that must be initialized.
 1177  */
 1178 vm_page_t
 1179 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
 1180 {
 1181         vm_page_t m;
 1182 
 1183         m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
 1184         vm_page_initfake(m, paddr, memattr);
 1185         return (m);
 1186 }
 1187 
 1188 void
 1189 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
 1190 {
 1191 
 1192         if ((m->flags & PG_FICTITIOUS) != 0) {
 1193                 /*
 1194                  * The page's memattr might have changed since the
 1195                  * previous initialization.  Update the pmap to the
 1196                  * new memattr.
 1197                  */
 1198                 goto memattr;
 1199         }
 1200         m->phys_addr = paddr;
 1201         m->queue = PQ_NONE;
 1202         /* Fictitious pages don't use "segind". */
 1203         m->flags = PG_FICTITIOUS;
 1204         /* Fictitious pages don't use "order" or "pool". */
 1205         m->oflags = VPO_UNMANAGED;
 1206         m->busy_lock = VPB_SINGLE_EXCLUSIVER;
 1207         m->wire_count = 1;
 1208         pmap_page_init(m);
 1209 memattr:
 1210         pmap_page_set_memattr(m, memattr);
 1211 }
 1212 
 1213 /*
 1214  *      vm_page_putfake:
 1215  *
 1216  *      Release a fictitious page.
 1217  */
 1218 void
 1219 vm_page_putfake(vm_page_t m)
 1220 {
 1221 
 1222         KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
 1223         KASSERT((m->flags & PG_FICTITIOUS) != 0,
 1224             ("vm_page_putfake: bad page %p", m));
 1225         uma_zfree(fakepg_zone, m);
 1226 }
 1227 
 1228 /*
 1229  *      vm_page_updatefake:
 1230  *
 1231  *      Update the given fictitious page to the specified physical address and
 1232  *      memory attribute.
 1233  */
 1234 void
 1235 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
 1236 {
 1237 
 1238         KASSERT((m->flags & PG_FICTITIOUS) != 0,
 1239             ("vm_page_updatefake: bad page %p", m));
 1240         m->phys_addr = paddr;
 1241         pmap_page_set_memattr(m, memattr);
 1242 }
 1243 
 1244 /*
 1245  *      vm_page_free:
 1246  *
 1247  *      Free a page.
 1248  */
 1249 void
 1250 vm_page_free(vm_page_t m)
 1251 {
 1252 
 1253         m->flags &= ~PG_ZERO;
 1254         vm_page_free_toq(m);
 1255 }
 1256 
 1257 /*
 1258  *      vm_page_free_zero:
 1259  *
 1260  *      Free a page to the zerod-pages queue
 1261  */
 1262 void
 1263 vm_page_free_zero(vm_page_t m)
 1264 {
 1265 
 1266         m->flags |= PG_ZERO;
 1267         vm_page_free_toq(m);
 1268 }
 1269 
 1270 /*
 1271  * Unbusy and handle the page queueing for a page from a getpages request that
 1272  * was optionally read ahead or behind.
 1273  */
 1274 void
 1275 vm_page_readahead_finish(vm_page_t m)
 1276 {
 1277 
 1278         /* We shouldn't put invalid pages on queues. */
 1279         KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m));
 1280 
 1281         /*
 1282          * Since the page is not the actually needed one, whether it should
 1283          * be activated or deactivated is not obvious.  Empirical results
 1284          * have shown that deactivating the page is usually the best choice,
 1285          * unless the page is wanted by another thread.
 1286          */
 1287         vm_page_lock(m);
 1288         if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
 1289                 vm_page_activate(m);
 1290         else
 1291                 vm_page_deactivate(m);
 1292         vm_page_unlock(m);
 1293         vm_page_xunbusy(m);
 1294 }
 1295 
 1296 /*
 1297  *      vm_page_sleep_if_busy:
 1298  *
 1299  *      Sleep and release the page queues lock if the page is busied.
 1300  *      Returns TRUE if the thread slept.
 1301  *
 1302  *      The given page must be unlocked and object containing it must
 1303  *      be locked.
 1304  */
 1305 int
 1306 vm_page_sleep_if_busy(vm_page_t m, const char *msg)
 1307 {
 1308         vm_object_t obj;
 1309 
 1310         vm_page_lock_assert(m, MA_NOTOWNED);
 1311         VM_OBJECT_ASSERT_WLOCKED(m->object);
 1312 
 1313         if (vm_page_busied(m)) {
 1314                 /*
 1315                  * The page-specific object must be cached because page
 1316                  * identity can change during the sleep, causing the
 1317                  * re-lock of a different object.
 1318                  * It is assumed that a reference to the object is already
 1319                  * held by the callers.
 1320                  */
 1321                 obj = m->object;
 1322                 vm_page_lock(m);
 1323                 VM_OBJECT_WUNLOCK(obj);
 1324                 vm_page_busy_sleep(m, msg, false);
 1325                 VM_OBJECT_WLOCK(obj);
 1326                 return (TRUE);
 1327         }
 1328         return (FALSE);
 1329 }
 1330 
 1331 /*
 1332  *      vm_page_dirty_KBI:              [ internal use only ]
 1333  *
 1334  *      Set all bits in the page's dirty field.
 1335  *
 1336  *      The object containing the specified page must be locked if the
 1337  *      call is made from the machine-independent layer.
 1338  *
 1339  *      See vm_page_clear_dirty_mask().
 1340  *
 1341  *      This function should only be called by vm_page_dirty().
 1342  */
 1343 void
 1344 vm_page_dirty_KBI(vm_page_t m)
 1345 {
 1346 
 1347         /* Refer to this operation by its public name. */
 1348         KASSERT(m->valid == VM_PAGE_BITS_ALL,
 1349             ("vm_page_dirty: page is invalid!"));
 1350         m->dirty = VM_PAGE_BITS_ALL;
 1351 }
 1352 
 1353 /*
 1354  *      vm_page_insert:         [ internal use only ]
 1355  *
 1356  *      Inserts the given mem entry into the object and object list.
 1357  *
 1358  *      The object must be locked.
 1359  */
 1360 int
 1361 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
 1362 {
 1363         vm_page_t mpred;
 1364 
 1365         VM_OBJECT_ASSERT_WLOCKED(object);
 1366         mpred = vm_radix_lookup_le(&object->rtree, pindex);
 1367         return (vm_page_insert_after(m, object, pindex, mpred));
 1368 }
 1369 
 1370 /*
 1371  *      vm_page_insert_after:
 1372  *
 1373  *      Inserts the page "m" into the specified object at offset "pindex".
 1374  *
 1375  *      The page "mpred" must immediately precede the offset "pindex" within
 1376  *      the specified object.
 1377  *
 1378  *      The object must be locked.
 1379  */
 1380 static int
 1381 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
 1382     vm_page_t mpred)
 1383 {
 1384         vm_page_t msucc;
 1385 
 1386         VM_OBJECT_ASSERT_WLOCKED(object);
 1387         KASSERT(m->object == NULL,
 1388             ("vm_page_insert_after: page already inserted"));
 1389         if (mpred != NULL) {
 1390                 KASSERT(mpred->object == object,
 1391                     ("vm_page_insert_after: object doesn't contain mpred"));
 1392                 KASSERT(mpred->pindex < pindex,
 1393                     ("vm_page_insert_after: mpred doesn't precede pindex"));
 1394                 msucc = TAILQ_NEXT(mpred, listq);
 1395         } else
 1396                 msucc = TAILQ_FIRST(&object->memq);
 1397         if (msucc != NULL)
 1398                 KASSERT(msucc->pindex > pindex,
 1399                     ("vm_page_insert_after: msucc doesn't succeed pindex"));
 1400 
 1401         /*
 1402          * Record the object/offset pair in this page
 1403          */
 1404         m->object = object;
 1405         m->pindex = pindex;
 1406 
 1407         /*
 1408          * Now link into the object's ordered list of backed pages.
 1409          */
 1410         if (vm_radix_insert(&object->rtree, m)) {
 1411                 m->object = NULL;
 1412                 m->pindex = 0;
 1413                 return (1);
 1414         }
 1415         vm_page_insert_radixdone(m, object, mpred);
 1416         return (0);
 1417 }
 1418 
 1419 /*
 1420  *      vm_page_insert_radixdone:
 1421  *
 1422  *      Complete page "m" insertion into the specified object after the
 1423  *      radix trie hooking.
 1424  *
 1425  *      The page "mpred" must precede the offset "m->pindex" within the
 1426  *      specified object.
 1427  *
 1428  *      The object must be locked.
 1429  */
 1430 static void
 1431 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
 1432 {
 1433 
 1434         VM_OBJECT_ASSERT_WLOCKED(object);
 1435         KASSERT(object != NULL && m->object == object,
 1436             ("vm_page_insert_radixdone: page %p has inconsistent object", m));
 1437         if (mpred != NULL) {
 1438                 KASSERT(mpred->object == object,
 1439                     ("vm_page_insert_after: object doesn't contain mpred"));
 1440                 KASSERT(mpred->pindex < m->pindex,
 1441                     ("vm_page_insert_after: mpred doesn't precede pindex"));
 1442         }
 1443 
 1444         if (mpred != NULL)
 1445                 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
 1446         else
 1447                 TAILQ_INSERT_HEAD(&object->memq, m, listq);
 1448 
 1449         /*
 1450          * Show that the object has one more resident page.
 1451          */
 1452         object->resident_page_count++;
 1453 
 1454         /*
 1455          * Hold the vnode until the last page is released.
 1456          */
 1457         if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
 1458                 vhold(object->handle);
 1459 
 1460         /*
 1461          * Since we are inserting a new and possibly dirty page,
 1462          * update the object's OBJ_MIGHTBEDIRTY flag.
 1463          */
 1464         if (pmap_page_is_write_mapped(m))
 1465                 vm_object_set_writeable_dirty(object);
 1466 }
 1467 
 1468 /*
 1469  *      vm_page_remove:
 1470  *
 1471  *      Removes the specified page from its containing object, but does not
 1472  *      invalidate any backing storage.
 1473  *
 1474  *      The object must be locked.  The page must be locked if it is managed.
 1475  */
 1476 void
 1477 vm_page_remove(vm_page_t m)
 1478 {
 1479         vm_object_t object;
 1480         vm_page_t mrem;
 1481 
 1482         if ((m->oflags & VPO_UNMANAGED) == 0)
 1483                 vm_page_assert_locked(m);
 1484         if ((object = m->object) == NULL)
 1485                 return;
 1486         VM_OBJECT_ASSERT_WLOCKED(object);
 1487         if (vm_page_xbusied(m))
 1488                 vm_page_xunbusy_maybelocked(m);
 1489         mrem = vm_radix_remove(&object->rtree, m->pindex);
 1490         KASSERT(mrem == m, ("removed page %p, expected page %p", mrem, m));
 1491 
 1492         /*
 1493          * Now remove from the object's list of backed pages.
 1494          */
 1495         TAILQ_REMOVE(&object->memq, m, listq);
 1496 
 1497         /*
 1498          * And show that the object has one fewer resident page.
 1499          */
 1500         object->resident_page_count--;
 1501 
 1502         /*
 1503          * The vnode may now be recycled.
 1504          */
 1505         if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
 1506                 vdrop(object->handle);
 1507 
 1508         m->object = NULL;
 1509 }
 1510 
 1511 /*
 1512  *      vm_page_lookup:
 1513  *
 1514  *      Returns the page associated with the object/offset
 1515  *      pair specified; if none is found, NULL is returned.
 1516  *
 1517  *      The object must be locked.
 1518  */
 1519 vm_page_t
 1520 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
 1521 {
 1522 
 1523         VM_OBJECT_ASSERT_LOCKED(object);
 1524         return (vm_radix_lookup(&object->rtree, pindex));
 1525 }
 1526 
 1527 /*
 1528  *      vm_page_find_least:
 1529  *
 1530  *      Returns the page associated with the object with least pindex
 1531  *      greater than or equal to the parameter pindex, or NULL.
 1532  *
 1533  *      The object must be locked.
 1534  */
 1535 vm_page_t
 1536 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
 1537 {
 1538         vm_page_t m;
 1539 
 1540         VM_OBJECT_ASSERT_LOCKED(object);
 1541         if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
 1542                 m = vm_radix_lookup_ge(&object->rtree, pindex);
 1543         return (m);
 1544 }
 1545 
 1546 /*
 1547  * Returns the given page's successor (by pindex) within the object if it is
 1548  * resident; if none is found, NULL is returned.
 1549  *
 1550  * The object must be locked.
 1551  */
 1552 vm_page_t
 1553 vm_page_next(vm_page_t m)
 1554 {
 1555         vm_page_t next;
 1556 
 1557         VM_OBJECT_ASSERT_LOCKED(m->object);
 1558         if ((next = TAILQ_NEXT(m, listq)) != NULL) {
 1559                 MPASS(next->object == m->object);
 1560                 if (next->pindex != m->pindex + 1)
 1561                         next = NULL;
 1562         }
 1563         return (next);
 1564 }
 1565 
 1566 /*
 1567  * Returns the given page's predecessor (by pindex) within the object if it is
 1568  * resident; if none is found, NULL is returned.
 1569  *
 1570  * The object must be locked.
 1571  */
 1572 vm_page_t
 1573 vm_page_prev(vm_page_t m)
 1574 {
 1575         vm_page_t prev;
 1576 
 1577         VM_OBJECT_ASSERT_LOCKED(m->object);
 1578         if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
 1579                 MPASS(prev->object == m->object);
 1580                 if (prev->pindex != m->pindex - 1)
 1581                         prev = NULL;
 1582         }
 1583         return (prev);
 1584 }
 1585 
 1586 /*
 1587  * Uses the page mnew as a replacement for an existing page at index
 1588  * pindex which must be already present in the object.
 1589  *
 1590  * The existing page must not be on a paging queue.
 1591  */
 1592 vm_page_t
 1593 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
 1594 {
 1595         vm_page_t mold;
 1596 
 1597         VM_OBJECT_ASSERT_WLOCKED(object);
 1598         KASSERT(mnew->object == NULL,
 1599             ("vm_page_replace: page %p already in object", mnew));
 1600         KASSERT(mnew->queue == PQ_NONE,
 1601             ("vm_page_replace: new page %p is on a paging queue", mnew));
 1602 
 1603         /*
 1604          * This function mostly follows vm_page_insert() and
 1605          * vm_page_remove() without the radix, object count and vnode
 1606          * dance.  Double check such functions for more comments.
 1607          */
 1608 
 1609         mnew->object = object;
 1610         mnew->pindex = pindex;
 1611         mold = vm_radix_replace(&object->rtree, mnew);
 1612         KASSERT(mold->queue == PQ_NONE,
 1613             ("vm_page_replace: old page %p is on a paging queue", mold));
 1614 
 1615         /* Keep the resident page list in sorted order. */
 1616         TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
 1617         TAILQ_REMOVE(&object->memq, mold, listq);
 1618 
 1619         mold->object = NULL;
 1620         vm_page_xunbusy_maybelocked(mold);
 1621 
 1622         /*
 1623          * The object's resident_page_count does not change because we have
 1624          * swapped one page for another, but OBJ_MIGHTBEDIRTY.
 1625          */
 1626         if (pmap_page_is_write_mapped(mnew))
 1627                 vm_object_set_writeable_dirty(object);
 1628         return (mold);
 1629 }
 1630 
 1631 /*
 1632  *      vm_page_rename:
 1633  *
 1634  *      Move the given memory entry from its
 1635  *      current object to the specified target object/offset.
 1636  *
 1637  *      Note: swap associated with the page must be invalidated by the move.  We
 1638  *            have to do this for several reasons:  (1) we aren't freeing the
 1639  *            page, (2) we are dirtying the page, (3) the VM system is probably
 1640  *            moving the page from object A to B, and will then later move
 1641  *            the backing store from A to B and we can't have a conflict.
 1642  *
 1643  *      Note: we *always* dirty the page.  It is necessary both for the
 1644  *            fact that we moved it, and because we may be invalidating
 1645  *            swap.
 1646  *
 1647  *      The objects must be locked.
 1648  */
 1649 int
 1650 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
 1651 {
 1652         vm_page_t mpred;
 1653         vm_pindex_t opidx;
 1654 
 1655         VM_OBJECT_ASSERT_WLOCKED(new_object);
 1656 
 1657         mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
 1658         KASSERT(mpred == NULL || mpred->pindex != new_pindex,
 1659             ("vm_page_rename: pindex already renamed"));
 1660 
 1661         /*
 1662          * Create a custom version of vm_page_insert() which does not depend
 1663          * by m_prev and can cheat on the implementation aspects of the
 1664          * function.
 1665          */
 1666         opidx = m->pindex;
 1667         m->pindex = new_pindex;
 1668         if (vm_radix_insert(&new_object->rtree, m)) {
 1669                 m->pindex = opidx;
 1670                 return (1);
 1671         }
 1672 
 1673         /*
 1674          * The operation cannot fail anymore.  The removal must happen before
 1675          * the listq iterator is tainted.
 1676          */
 1677         m->pindex = opidx;
 1678         vm_page_lock(m);
 1679         vm_page_remove(m);
 1680 
 1681         /* Return back to the new pindex to complete vm_page_insert(). */
 1682         m->pindex = new_pindex;
 1683         m->object = new_object;
 1684         vm_page_unlock(m);
 1685         vm_page_insert_radixdone(m, new_object, mpred);
 1686         vm_page_dirty(m);
 1687         return (0);
 1688 }
 1689 
 1690 /*
 1691  *      vm_page_alloc:
 1692  *
 1693  *      Allocate and return a page that is associated with the specified
 1694  *      object and offset pair.  By default, this page is exclusive busied.
 1695  *
 1696  *      The caller must always specify an allocation class.
 1697  *
 1698  *      allocation classes:
 1699  *      VM_ALLOC_NORMAL         normal process request
 1700  *      VM_ALLOC_SYSTEM         system *really* needs a page
 1701  *      VM_ALLOC_INTERRUPT      interrupt time request
 1702  *
 1703  *      optional allocation flags:
 1704  *      VM_ALLOC_COUNT(number)  the number of additional pages that the caller
 1705  *                              intends to allocate
 1706  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
 1707  *      VM_ALLOC_NODUMP         do not include the page in a kernel core dump
 1708  *      VM_ALLOC_NOOBJ          page is not associated with an object and
 1709  *                              should not be exclusive busy
 1710  *      VM_ALLOC_SBUSY          shared busy the allocated page
 1711  *      VM_ALLOC_WIRED          wire the allocated page
 1712  *      VM_ALLOC_ZERO           prefer a zeroed page
 1713  */
 1714 vm_page_t
 1715 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
 1716 {
 1717 
 1718         return (vm_page_alloc_after(object, pindex, req, object != NULL ?
 1719             vm_radix_lookup_le(&object->rtree, pindex) : NULL));
 1720 }
 1721 
 1722 vm_page_t
 1723 vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain,
 1724     int req)
 1725 {
 1726 
 1727         return (vm_page_alloc_domain_after(object, pindex, domain, req,
 1728             object != NULL ? vm_radix_lookup_le(&object->rtree, pindex) :
 1729             NULL));
 1730 }
 1731 
 1732 /*
 1733  * Allocate a page in the specified object with the given page index.  To
 1734  * optimize insertion of the page into the object, the caller must also specifiy
 1735  * the resident page in the object with largest index smaller than the given
 1736  * page index, or NULL if no such page exists.
 1737  */
 1738 vm_page_t
 1739 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
 1740     int req, vm_page_t mpred)
 1741 {
 1742         struct vm_domainset_iter di;
 1743         vm_page_t m;
 1744         int domain;
 1745 
 1746         vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
 1747         do {
 1748                 m = vm_page_alloc_domain_after(object, pindex, domain, req,
 1749                     mpred);
 1750                 if (m != NULL)
 1751                         break;
 1752         } while (vm_domainset_iter_page(&di, object, &domain) == 0);
 1753 
 1754         return (m);
 1755 }
 1756 
 1757 /*
 1758  * Returns true if the number of free pages exceeds the minimum
 1759  * for the request class and false otherwise.
 1760  */
 1761 int
 1762 vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
 1763 {
 1764         u_int limit, old, new;
 1765 
 1766         req = req & VM_ALLOC_CLASS_MASK;
 1767 
 1768         /*
 1769          * The page daemon is allowed to dig deeper into the free page list.
 1770          */
 1771         if (curproc == pageproc && req != VM_ALLOC_INTERRUPT)
 1772                 req = VM_ALLOC_SYSTEM;
 1773         if (req == VM_ALLOC_INTERRUPT)
 1774                 limit = 0;
 1775         else if (req == VM_ALLOC_SYSTEM)
 1776                 limit = vmd->vmd_interrupt_free_min;
 1777         else
 1778                 limit = vmd->vmd_free_reserved;
 1779 
 1780         /*
 1781          * Attempt to reserve the pages.  Fail if we're below the limit.
 1782          */
 1783         limit += npages;
 1784         old = vmd->vmd_free_count;
 1785         do {
 1786                 if (old < limit)
 1787                         return (0);
 1788                 new = old - npages;
 1789         } while (atomic_fcmpset_int(&vmd->vmd_free_count, &old, new) == 0);
 1790 
 1791         /* Wake the page daemon if we've crossed the threshold. */
 1792         if (vm_paging_needed(vmd, new) && !vm_paging_needed(vmd, old))
 1793                 pagedaemon_wakeup(vmd->vmd_domain);
 1794 
 1795         /* Only update bitsets on transitions. */
 1796         if ((old >= vmd->vmd_free_min && new < vmd->vmd_free_min) ||
 1797             (old >= vmd->vmd_free_severe && new < vmd->vmd_free_severe))
 1798                 vm_domain_set(vmd);
 1799 
 1800         return (1);
 1801 }
 1802 
 1803 vm_page_t
 1804 vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
 1805     int req, vm_page_t mpred)
 1806 {
 1807         struct vm_domain *vmd;
 1808         vm_page_t m;
 1809         int flags;
 1810 
 1811         KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
 1812             (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
 1813             ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 1814             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 1815             ("inconsistent object(%p)/req(%x)", object, req));
 1816         KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
 1817             ("Can't sleep and retry object insertion."));
 1818         KASSERT(mpred == NULL || mpred->pindex < pindex,
 1819             ("mpred %p doesn't precede pindex 0x%jx", mpred,
 1820             (uintmax_t)pindex));
 1821         if (object != NULL)
 1822                 VM_OBJECT_ASSERT_WLOCKED(object);
 1823 
 1824 again:
 1825         m = NULL;
 1826 #if VM_NRESERVLEVEL > 0
 1827         /*
 1828          * Can we allocate the page from a reservation?
 1829          */
 1830         if (vm_object_reserv(object) &&
 1831             ((m = vm_reserv_extend(req, object, pindex, domain, mpred)) != NULL ||
 1832             (m = vm_reserv_alloc_page(req, object, pindex, domain, mpred)) != NULL)) {
 1833                 domain = vm_phys_domain(m);
 1834                 vmd = VM_DOMAIN(domain);
 1835                 goto found;
 1836         }
 1837 #endif
 1838         vmd = VM_DOMAIN(domain);
 1839         if (object != NULL && vmd->vmd_pgcache != NULL) {
 1840                 m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT);
 1841                 if (m != NULL)
 1842                         goto found;
 1843         }
 1844         if (vm_domain_allocate(vmd, req, 1)) {
 1845                 /*
 1846                  * If not, allocate it from the free page queues.
 1847                  */
 1848                 vm_domain_free_lock(vmd);
 1849                 m = vm_phys_alloc_pages(domain, object != NULL ?
 1850                     VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
 1851                 vm_domain_free_unlock(vmd);
 1852                 if (m == NULL) {
 1853                         vm_domain_freecnt_inc(vmd, 1);
 1854 #if VM_NRESERVLEVEL > 0
 1855                         if (vm_reserv_reclaim_inactive(domain))
 1856                                 goto again;
 1857 #endif
 1858                 }
 1859         }
 1860         if (m == NULL) {
 1861                 /*
 1862                  * Not allocatable, give up.
 1863                  */
 1864                 if (vm_domain_alloc_fail(vmd, object, req))
 1865                         goto again;
 1866                 return (NULL);
 1867         }
 1868 
 1869         /*
 1870          *  At this point we had better have found a good page.
 1871          */
 1872         KASSERT(m != NULL, ("missing page"));
 1873 
 1874 found:
 1875         vm_page_dequeue(m);
 1876         vm_page_alloc_check(m);
 1877 
 1878         /*
 1879          * Initialize the page.  Only the PG_ZERO flag is inherited.
 1880          */
 1881         flags = 0;
 1882         if ((req & VM_ALLOC_ZERO) != 0)
 1883                 flags = PG_ZERO;
 1884         flags &= m->flags;
 1885         if ((req & VM_ALLOC_NODUMP) != 0)
 1886                 flags |= PG_NODUMP;
 1887         m->flags = flags;
 1888         m->aflags = 0;
 1889         m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
 1890             VPO_UNMANAGED : 0;
 1891         m->busy_lock = VPB_UNBUSIED;
 1892         if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
 1893                 m->busy_lock = VPB_SINGLE_EXCLUSIVER;
 1894         if ((req & VM_ALLOC_SBUSY) != 0)
 1895                 m->busy_lock = VPB_SHARERS_WORD(1);
 1896         if (req & VM_ALLOC_WIRED) {
 1897                 /*
 1898                  * The page lock is not required for wiring a page until that
 1899                  * page is inserted into the object.
 1900                  */
 1901                 vm_wire_add(1);
 1902                 m->wire_count = 1;
 1903         }
 1904         m->act_count = 0;
 1905 
 1906         if (object != NULL) {
 1907                 if (vm_page_insert_after(m, object, pindex, mpred)) {
 1908                         if (req & VM_ALLOC_WIRED) {
 1909                                 vm_wire_sub(1);
 1910                                 m->wire_count = 0;
 1911                         }
 1912                         KASSERT(m->object == NULL, ("page %p has object", m));
 1913                         m->oflags = VPO_UNMANAGED;
 1914                         m->busy_lock = VPB_UNBUSIED;
 1915                         /* Don't change PG_ZERO. */
 1916                         vm_page_free_toq(m);
 1917                         if (req & VM_ALLOC_WAITFAIL) {
 1918                                 VM_OBJECT_WUNLOCK(object);
 1919                                 vm_radix_wait();
 1920                                 VM_OBJECT_WLOCK(object);
 1921                         }
 1922                         return (NULL);
 1923                 }
 1924 
 1925                 /* Ignore device objects; the pager sets "memattr" for them. */
 1926                 if (object->memattr != VM_MEMATTR_DEFAULT &&
 1927                     (object->flags & OBJ_FICTITIOUS) == 0)
 1928                         pmap_page_set_memattr(m, object->memattr);
 1929         } else
 1930                 m->pindex = pindex;
 1931 
 1932         return (m);
 1933 }
 1934 
 1935 /*
 1936  *      vm_page_alloc_contig:
 1937  *
 1938  *      Allocate a contiguous set of physical pages of the given size "npages"
 1939  *      from the free lists.  All of the physical pages must be at or above
 1940  *      the given physical address "low" and below the given physical address
 1941  *      "high".  The given value "alignment" determines the alignment of the
 1942  *      first physical page in the set.  If the given value "boundary" is
 1943  *      non-zero, then the set of physical pages cannot cross any physical
 1944  *      address boundary that is a multiple of that value.  Both "alignment"
 1945  *      and "boundary" must be a power of two.
 1946  *
 1947  *      If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
 1948  *      then the memory attribute setting for the physical pages is configured
 1949  *      to the object's memory attribute setting.  Otherwise, the memory
 1950  *      attribute setting for the physical pages is configured to "memattr",
 1951  *      overriding the object's memory attribute setting.  However, if the
 1952  *      object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
 1953  *      memory attribute setting for the physical pages cannot be configured
 1954  *      to VM_MEMATTR_DEFAULT.
 1955  *
 1956  *      The specified object may not contain fictitious pages.
 1957  *
 1958  *      The caller must always specify an allocation class.
 1959  *
 1960  *      allocation classes:
 1961  *      VM_ALLOC_NORMAL         normal process request
 1962  *      VM_ALLOC_SYSTEM         system *really* needs a page
 1963  *      VM_ALLOC_INTERRUPT      interrupt time request
 1964  *
 1965  *      optional allocation flags:
 1966  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
 1967  *      VM_ALLOC_NODUMP         do not include the page in a kernel core dump
 1968  *      VM_ALLOC_NOOBJ          page is not associated with an object and
 1969  *                              should not be exclusive busy
 1970  *      VM_ALLOC_SBUSY          shared busy the allocated page
 1971  *      VM_ALLOC_WIRED          wire the allocated page
 1972  *      VM_ALLOC_ZERO           prefer a zeroed page
 1973  */
 1974 vm_page_t
 1975 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
 1976     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
 1977     vm_paddr_t boundary, vm_memattr_t memattr)
 1978 {
 1979         struct vm_domainset_iter di;
 1980         vm_page_t m;
 1981         int domain;
 1982 
 1983         vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
 1984         do {
 1985                 m = vm_page_alloc_contig_domain(object, pindex, domain, req,
 1986                     npages, low, high, alignment, boundary, memattr);
 1987                 if (m != NULL)
 1988                         break;
 1989         } while (vm_domainset_iter_page(&di, object, &domain) == 0);
 1990 
 1991         return (m);
 1992 }
 1993 
 1994 vm_page_t
 1995 vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
 1996     int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
 1997     vm_paddr_t boundary, vm_memattr_t memattr)
 1998 {
 1999         struct vm_domain *vmd;
 2000         vm_page_t m, m_ret, mpred;
 2001         u_int busy_lock, flags, oflags;
 2002 
 2003         mpred = NULL;   /* XXX: pacify gcc */
 2004         KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
 2005             (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
 2006             ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
 2007             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
 2008             ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object,
 2009             req));
 2010         KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
 2011             ("Can't sleep and retry object insertion."));
 2012         if (object != NULL) {
 2013                 VM_OBJECT_ASSERT_WLOCKED(object);
 2014                 KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
 2015                     ("vm_page_alloc_contig: object %p has fictitious pages",
 2016                     object));
 2017         }
 2018         KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
 2019 
 2020         if (object != NULL) {
 2021                 mpred = vm_radix_lookup_le(&object->rtree, pindex);
 2022                 KASSERT(mpred == NULL || mpred->pindex != pindex,
 2023                     ("vm_page_alloc_contig: pindex already allocated"));
 2024         }
 2025 
 2026         /*
 2027          * Can we allocate the pages without the number of free pages falling
 2028          * below the lower bound for the allocation class?
 2029          */
 2030 again:
 2031 #if VM_NRESERVLEVEL > 0
 2032         /*
 2033          * Can we allocate the pages from a reservation?
 2034          */
 2035         if (vm_object_reserv(object) &&
 2036             ((m_ret = vm_reserv_extend_contig(req, object, pindex, domain,
 2037             npages, low, high, alignment, boundary, mpred)) != NULL ||
 2038             (m_ret = vm_reserv_alloc_contig(req, object, pindex, domain,
 2039             npages, low, high, alignment, boundary, mpred)) != NULL)) {
 2040                 domain = vm_phys_domain(m_ret);
 2041                 vmd = VM_DOMAIN(domain);
 2042                 goto found;
 2043         }
 2044 #endif
 2045         m_ret = NULL;
 2046         vmd = VM_DOMAIN(domain);
 2047         if (vm_domain_allocate(vmd, req, npages)) {
 2048                 /*
 2049                  * allocate them from the free page queues.
 2050                  */
 2051                 vm_domain_free_lock(vmd);
 2052                 m_ret = vm_phys_alloc_contig(domain, npages, low, high,
 2053                     alignment, boundary);
 2054                 vm_domain_free_unlock(vmd);
 2055                 if (m_ret == NULL) {
 2056                         vm_domain_freecnt_inc(vmd, npages);
 2057 #if VM_NRESERVLEVEL > 0
 2058                         if (vm_reserv_reclaim_contig(domain, npages, low,
 2059                             high, alignment, boundary))
 2060                                 goto again;
 2061 #endif
 2062                 }
 2063         }
 2064         if (m_ret == NULL) {
 2065                 if (vm_domain_alloc_fail(vmd, object, req))
 2066                         goto again;
 2067                 return (NULL);
 2068         }
 2069 #if VM_NRESERVLEVEL > 0
 2070 found:
 2071 #endif
 2072         for (m = m_ret; m < &m_ret[npages]; m++) {
 2073                 vm_page_dequeue(m);
 2074                 vm_page_alloc_check(m);
 2075         }
 2076 
 2077         /*
 2078          * Initialize the pages.  Only the PG_ZERO flag is inherited.
 2079          */
 2080         flags = 0;
 2081         if ((req & VM_ALLOC_ZERO) != 0)
 2082                 flags = PG_ZERO;
 2083         if ((req & VM_ALLOC_NODUMP) != 0)
 2084                 flags |= PG_NODUMP;
 2085         oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
 2086             VPO_UNMANAGED : 0;
 2087         busy_lock = VPB_UNBUSIED;
 2088         if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
 2089                 busy_lock = VPB_SINGLE_EXCLUSIVER;
 2090         if ((req & VM_ALLOC_SBUSY) != 0)
 2091                 busy_lock = VPB_SHARERS_WORD(1);
 2092         if ((req & VM_ALLOC_WIRED) != 0)
 2093                 vm_wire_add(npages);
 2094         if (object != NULL) {
 2095                 if (object->memattr != VM_MEMATTR_DEFAULT &&
 2096                     memattr == VM_MEMATTR_DEFAULT)
 2097                         memattr = object->memattr;
 2098         }
 2099         for (m = m_ret; m < &m_ret[npages]; m++) {
 2100                 m->aflags = 0;
 2101                 m->flags = (m->flags | PG_NODUMP) & flags;
 2102                 m->busy_lock = busy_lock;
 2103                 if ((req & VM_ALLOC_WIRED) != 0)
 2104                         m->wire_count = 1;
 2105                 m->act_count = 0;
 2106                 m->oflags = oflags;
 2107                 if (object != NULL) {
 2108                         if (vm_page_insert_after(m, object, pindex, mpred)) {
 2109                                 if ((req & VM_ALLOC_WIRED) != 0)
 2110                                         vm_wire_sub(npages);
 2111                                 KASSERT(m->object == NULL,
 2112                                     ("page %p has object", m));
 2113                                 mpred = m;
 2114                                 for (m = m_ret; m < &m_ret[npages]; m++) {
 2115                                         if (m <= mpred &&
 2116                                             (req & VM_ALLOC_WIRED) != 0)
 2117                                                 m->wire_count = 0;
 2118                                         m->oflags = VPO_UNMANAGED;
 2119                                         m->busy_lock = VPB_UNBUSIED;
 2120                                         /* Don't change PG_ZERO. */
 2121                                         vm_page_free_toq(m);
 2122                                 }
 2123                                 if (req & VM_ALLOC_WAITFAIL) {
 2124                                         VM_OBJECT_WUNLOCK(object);
 2125                                         vm_radix_wait();
 2126                                         VM_OBJECT_WLOCK(object);
 2127                                 }
 2128                                 return (NULL);
 2129                         }
 2130                         mpred = m;
 2131                 } else
 2132                         m->pindex = pindex;
 2133                 if (memattr != VM_MEMATTR_DEFAULT)
 2134                         pmap_page_set_memattr(m, memattr);
 2135                 pindex++;
 2136         }
 2137         return (m_ret);
 2138 }
 2139 
 2140 /*
 2141  * Check a page that has been freshly dequeued from a freelist.
 2142  */
 2143 static void
 2144 vm_page_alloc_check(vm_page_t m)
 2145 {
 2146 
 2147         KASSERT(m->object == NULL, ("page %p has object", m));
 2148         KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
 2149             ("page %p has unexpected queue %d, flags %#x",
 2150             m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK)));
 2151         KASSERT(!vm_page_held(m), ("page %p is held", m));
 2152         KASSERT(!vm_page_busied(m), ("page %p is busy", m));
 2153         KASSERT(m->dirty == 0, ("page %p is dirty", m));
 2154         KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
 2155             ("page %p has unexpected memattr %d",
 2156             m, pmap_page_get_memattr(m)));
 2157         KASSERT(m->valid == 0, ("free page %p is valid", m));
 2158 }
 2159 
 2160 /*
 2161  *      vm_page_alloc_freelist:
 2162  *
 2163  *      Allocate a physical page from the specified free page list.
 2164  *
 2165  *      The caller must always specify an allocation class.
 2166  *
 2167  *      allocation classes:
 2168  *      VM_ALLOC_NORMAL         normal process request
 2169  *      VM_ALLOC_SYSTEM         system *really* needs a page
 2170  *      VM_ALLOC_INTERRUPT      interrupt time request
 2171  *
 2172  *      optional allocation flags:
 2173  *      VM_ALLOC_COUNT(number)  the number of additional pages that the caller
 2174  *                              intends to allocate
 2175  *      VM_ALLOC_WIRED          wire the allocated page
 2176  *      VM_ALLOC_ZERO           prefer a zeroed page
 2177  */
 2178 vm_page_t
 2179 vm_page_alloc_freelist(int freelist, int req)
 2180 {
 2181         struct vm_domainset_iter di;
 2182         vm_page_t m;
 2183         int domain;
 2184 
 2185         vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 2186         do {
 2187                 m = vm_page_alloc_freelist_domain(domain, freelist, req);
 2188                 if (m != NULL)
 2189                         break;
 2190         } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 2191 
 2192         return (m);
 2193 }
 2194 
 2195 vm_page_t
 2196 vm_page_alloc_freelist_domain(int domain, int freelist, int req)
 2197 {
 2198         struct vm_domain *vmd;
 2199         vm_page_t m;
 2200         u_int flags;
 2201 
 2202         m = NULL;
 2203         vmd = VM_DOMAIN(domain);
 2204 again:
 2205         if (vm_domain_allocate(vmd, req, 1)) {
 2206                 vm_domain_free_lock(vmd);
 2207                 m = vm_phys_alloc_freelist_pages(domain, freelist,
 2208                     VM_FREEPOOL_DIRECT, 0);
 2209                 vm_domain_free_unlock(vmd);
 2210                 if (m == NULL)
 2211                         vm_domain_freecnt_inc(vmd, 1);
 2212         }
 2213         if (m == NULL) {
 2214                 if (vm_domain_alloc_fail(vmd, NULL, req))
 2215                         goto again;
 2216                 return (NULL);
 2217         }
 2218         vm_page_dequeue(m);
 2219         vm_page_alloc_check(m);
 2220 
 2221         /*
 2222          * Initialize the page.  Only the PG_ZERO flag is inherited.
 2223          */
 2224         m->aflags = 0;
 2225         flags = 0;
 2226         if ((req & VM_ALLOC_ZERO) != 0)
 2227                 flags = PG_ZERO;
 2228         m->flags &= flags;
 2229         if ((req & VM_ALLOC_WIRED) != 0) {
 2230                 /*
 2231                  * The page lock is not required for wiring a page that does
 2232                  * not belong to an object.
 2233                  */
 2234                 vm_wire_add(1);
 2235                 m->wire_count = 1;
 2236         }
 2237         /* Unmanaged pages don't use "act_count". */
 2238         m->oflags = VPO_UNMANAGED;
 2239         return (m);
 2240 }
 2241 
 2242 static int
 2243 vm_page_import(void *arg, void **store, int cnt, int domain, int flags)
 2244 {
 2245         struct vm_domain *vmd;
 2246         int i;
 2247 
 2248         vmd = arg;
 2249         /* Only import if we can bring in a full bucket. */
 2250         if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
 2251                 return (0);
 2252         domain = vmd->vmd_domain;
 2253         vm_domain_free_lock(vmd);
 2254         i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt,
 2255             (vm_page_t *)store);
 2256         vm_domain_free_unlock(vmd);
 2257         if (cnt != i)
 2258                 vm_domain_freecnt_inc(vmd, cnt - i);
 2259 
 2260         return (i);
 2261 }
 2262 
 2263 static void
 2264 vm_page_release(void *arg, void **store, int cnt)
 2265 {
 2266         struct vm_domain *vmd;
 2267         vm_page_t m;
 2268         int i;
 2269 
 2270         vmd = arg;
 2271         vm_domain_free_lock(vmd);
 2272         for (i = 0; i < cnt; i++) {
 2273                 m = (vm_page_t)store[i];
 2274                 vm_phys_free_pages(m, 0);
 2275         }
 2276         vm_domain_free_unlock(vmd);
 2277         vm_domain_freecnt_inc(vmd, cnt);
 2278 }
 2279 
 2280 #define VPSC_ANY        0       /* No restrictions. */
 2281 #define VPSC_NORESERV   1       /* Skip reservations; implies VPSC_NOSUPER. */
 2282 #define VPSC_NOSUPER    2       /* Skip superpages. */
 2283 
 2284 /*
 2285  *      vm_page_scan_contig:
 2286  *
 2287  *      Scan vm_page_array[] between the specified entries "m_start" and
 2288  *      "m_end" for a run of contiguous physical pages that satisfy the
 2289  *      specified conditions, and return the lowest page in the run.  The
 2290  *      specified "alignment" determines the alignment of the lowest physical
 2291  *      page in the run.  If the specified "boundary" is non-zero, then the
 2292  *      run of physical pages cannot span a physical address that is a
 2293  *      multiple of "boundary".
 2294  *
 2295  *      "m_end" is never dereferenced, so it need not point to a vm_page
 2296  *      structure within vm_page_array[].
 2297  *
 2298  *      "npages" must be greater than zero.  "m_start" and "m_end" must not
 2299  *      span a hole (or discontiguity) in the physical address space.  Both
 2300  *      "alignment" and "boundary" must be a power of two.
 2301  */
 2302 vm_page_t
 2303 vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
 2304     u_long alignment, vm_paddr_t boundary, int options)
 2305 {
 2306         struct mtx *m_mtx;
 2307         vm_object_t object;
 2308         vm_paddr_t pa;
 2309         vm_page_t m, m_run;
 2310 #if VM_NRESERVLEVEL > 0
 2311         int level;
 2312 #endif
 2313         int m_inc, order, run_ext, run_len;
 2314 
 2315         KASSERT(npages > 0, ("npages is 0"));
 2316         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 2317         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
 2318         m_run = NULL;
 2319         run_len = 0;
 2320         m_mtx = NULL;
 2321         for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
 2322                 KASSERT((m->flags & PG_MARKER) == 0,
 2323                     ("page %p is PG_MARKER", m));
 2324                 KASSERT((m->flags & PG_FICTITIOUS) == 0 || m->wire_count == 1,
 2325                     ("fictitious page %p has invalid wire count", m));
 2326 
 2327                 /*
 2328                  * If the current page would be the start of a run, check its
 2329                  * physical address against the end, alignment, and boundary
 2330                  * conditions.  If it doesn't satisfy these conditions, either
 2331                  * terminate the scan or advance to the next page that
 2332                  * satisfies the failed condition.
 2333                  */
 2334                 if (run_len == 0) {
 2335                         KASSERT(m_run == NULL, ("m_run != NULL"));
 2336                         if (m + npages > m_end)
 2337                                 break;
 2338                         pa = VM_PAGE_TO_PHYS(m);
 2339                         if ((pa & (alignment - 1)) != 0) {
 2340                                 m_inc = atop(roundup2(pa, alignment) - pa);
 2341                                 continue;
 2342                         }
 2343                         if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
 2344                             boundary) != 0) {
 2345                                 m_inc = atop(roundup2(pa, boundary) - pa);
 2346                                 continue;
 2347                         }
 2348                 } else
 2349                         KASSERT(m_run != NULL, ("m_run == NULL"));
 2350 
 2351                 vm_page_change_lock(m, &m_mtx);
 2352                 m_inc = 1;
 2353 retry:
 2354                 if (vm_page_held(m))
 2355                         run_ext = 0;
 2356 #if VM_NRESERVLEVEL > 0
 2357                 else if ((level = vm_reserv_level(m)) >= 0 &&
 2358                     (options & VPSC_NORESERV) != 0) {
 2359                         run_ext = 0;
 2360                         /* Advance to the end of the reservation. */
 2361                         pa = VM_PAGE_TO_PHYS(m);
 2362                         m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
 2363                             pa);
 2364                 }
 2365 #endif
 2366                 else if ((object = m->object) != NULL) {
 2367                         /*
 2368                          * The page is considered eligible for relocation if
 2369                          * and only if it could be laundered or reclaimed by
 2370                          * the page daemon.
 2371                          */
 2372                         if (!VM_OBJECT_TRYRLOCK(object)) {
 2373                                 mtx_unlock(m_mtx);
 2374                                 VM_OBJECT_RLOCK(object);
 2375                                 mtx_lock(m_mtx);
 2376                                 if (m->object != object) {
 2377                                         /*
 2378                                          * The page may have been freed.
 2379                                          */
 2380                                         VM_OBJECT_RUNLOCK(object);
 2381                                         goto retry;
 2382                                 } else if (vm_page_held(m)) {
 2383                                         run_ext = 0;
 2384                                         goto unlock;
 2385                                 }
 2386                         }
 2387                         KASSERT((m->flags & PG_UNHOLDFREE) == 0,
 2388                             ("page %p is PG_UNHOLDFREE", m));
 2389                         /* Don't care: PG_NODUMP, PG_ZERO. */
 2390                         if (object->type != OBJT_DEFAULT &&
 2391                             object->type != OBJT_SWAP &&
 2392                             object->type != OBJT_VNODE) {
 2393                                 run_ext = 0;
 2394 #if VM_NRESERVLEVEL > 0
 2395                         } else if ((options & VPSC_NOSUPER) != 0 &&
 2396                             (level = vm_reserv_level_iffullpop(m)) >= 0) {
 2397                                 run_ext = 0;
 2398                                 /* Advance to the end of the superpage. */
 2399                                 pa = VM_PAGE_TO_PHYS(m);
 2400                                 m_inc = atop(roundup2(pa + 1,
 2401                                     vm_reserv_size(level)) - pa);
 2402 #endif
 2403                         } else if (object->memattr == VM_MEMATTR_DEFAULT &&
 2404                             vm_page_queue(m) != PQ_NONE && !vm_page_busied(m)) {
 2405                                 /*
 2406                                  * The page is allocated but eligible for
 2407                                  * relocation.  Extend the current run by one
 2408                                  * page.
 2409                                  */
 2410                                 KASSERT(pmap_page_get_memattr(m) ==
 2411                                     VM_MEMATTR_DEFAULT,
 2412                                     ("page %p has an unexpected memattr", m));
 2413                                 KASSERT((m->oflags & (VPO_SWAPINPROG |
 2414                                     VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
 2415                                     ("page %p has unexpected oflags", m));
 2416                                 /* Don't care: VPO_NOSYNC. */
 2417                                 run_ext = 1;
 2418                         } else
 2419                                 run_ext = 0;
 2420 unlock:
 2421                         VM_OBJECT_RUNLOCK(object);
 2422 #if VM_NRESERVLEVEL > 0
 2423                 } else if (level >= 0) {
 2424                         /*
 2425                          * The page is reserved but not yet allocated.  In
 2426                          * other words, it is still free.  Extend the current
 2427                          * run by one page.
 2428                          */
 2429                         run_ext = 1;
 2430 #endif
 2431                 } else if ((order = m->order) < VM_NFREEORDER) {
 2432                         /*
 2433                          * The page is enqueued in the physical memory
 2434                          * allocator's free page queues.  Moreover, it is the
 2435                          * first page in a power-of-two-sized run of
 2436                          * contiguous free pages.  Add these pages to the end
 2437                          * of the current run, and jump ahead.
 2438                          */
 2439                         run_ext = 1 << order;
 2440                         m_inc = 1 << order;
 2441                 } else {
 2442                         /*
 2443                          * Skip the page for one of the following reasons: (1)
 2444                          * It is enqueued in the physical memory allocator's
 2445                          * free page queues.  However, it is not the first
 2446                          * page in a run of contiguous free pages.  (This case
 2447                          * rarely occurs because the scan is performed in
 2448                          * ascending order.) (2) It is not reserved, and it is
 2449                          * transitioning from free to allocated.  (Conversely,
 2450                          * the transition from allocated to free for managed
 2451                          * pages is blocked by the page lock.) (3) It is
 2452                          * allocated but not contained by an object and not
 2453                          * wired, e.g., allocated by Xen's balloon driver.
 2454                          */
 2455                         run_ext = 0;
 2456                 }
 2457 
 2458                 /*
 2459                  * Extend or reset the current run of pages.
 2460                  */
 2461                 if (run_ext > 0) {
 2462                         if (run_len == 0)
 2463                                 m_run = m;
 2464                         run_len += run_ext;
 2465                 } else {
 2466                         if (run_len > 0) {
 2467                                 m_run = NULL;
 2468                                 run_len = 0;
 2469                         }
 2470                 }
 2471         }
 2472         if (m_mtx != NULL)
 2473                 mtx_unlock(m_mtx);
 2474         if (run_len >= npages)
 2475                 return (m_run);
 2476         return (NULL);
 2477 }
 2478 
 2479 /*
 2480  *      vm_page_reclaim_run:
 2481  *
 2482  *      Try to relocate each of the allocated virtual pages within the
 2483  *      specified run of physical pages to a new physical address.  Free the
 2484  *      physical pages underlying the relocated virtual pages.  A virtual page
 2485  *      is relocatable if and only if it could be laundered or reclaimed by
 2486  *      the page daemon.  Whenever possible, a virtual page is relocated to a
 2487  *      physical address above "high".
 2488  *
 2489  *      Returns 0 if every physical page within the run was already free or
 2490  *      just freed by a successful relocation.  Otherwise, returns a non-zero
 2491  *      value indicating why the last attempt to relocate a virtual page was
 2492  *      unsuccessful.
 2493  *
 2494  *      "req_class" must be an allocation class.
 2495  */
 2496 static int
 2497 vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
 2498     vm_paddr_t high)
 2499 {
 2500         struct vm_domain *vmd;
 2501         struct mtx *m_mtx;
 2502         struct spglist free;
 2503         vm_object_t object;
 2504         vm_paddr_t pa;
 2505         vm_page_t m, m_end, m_new;
 2506         int error, order, req;
 2507 
 2508         KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
 2509             ("req_class is not an allocation class"));
 2510         SLIST_INIT(&free);
 2511         error = 0;
 2512         m = m_run;
 2513         m_end = m_run + npages;
 2514         m_mtx = NULL;
 2515         for (; error == 0 && m < m_end; m++) {
 2516                 KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
 2517                     ("page %p is PG_FICTITIOUS or PG_MARKER", m));
 2518 
 2519                 /*
 2520                  * Avoid releasing and reacquiring the same page lock.
 2521                  */
 2522                 vm_page_change_lock(m, &m_mtx);
 2523 retry:
 2524                 if (vm_page_held(m))
 2525                         error = EBUSY;
 2526                 else if ((object = m->object) != NULL) {
 2527                         /*
 2528                          * The page is relocated if and only if it could be
 2529                          * laundered or reclaimed by the page daemon.
 2530                          */
 2531                         if (!VM_OBJECT_TRYWLOCK(object)) {
 2532                                 mtx_unlock(m_mtx);
 2533                                 VM_OBJECT_WLOCK(object);
 2534                                 mtx_lock(m_mtx);
 2535                                 if (m->object != object) {
 2536                                         /*
 2537                                          * The page may have been freed.
 2538                                          */
 2539                                         VM_OBJECT_WUNLOCK(object);
 2540                                         goto retry;
 2541                                 } else if (vm_page_held(m)) {
 2542                                         error = EBUSY;
 2543                                         goto unlock;
 2544                                 }
 2545                         }
 2546                         KASSERT((m->flags & PG_UNHOLDFREE) == 0,
 2547                             ("page %p is PG_UNHOLDFREE", m));
 2548                         /* Don't care: PG_NODUMP, PG_ZERO. */
 2549                         if (object->type != OBJT_DEFAULT &&
 2550                             object->type != OBJT_SWAP &&
 2551                             object->type != OBJT_VNODE)
 2552                                 error = EINVAL;
 2553                         else if (object->memattr != VM_MEMATTR_DEFAULT)
 2554                                 error = EINVAL;
 2555                         else if (vm_page_queue(m) != PQ_NONE &&
 2556                             !vm_page_busied(m)) {
 2557                                 KASSERT(pmap_page_get_memattr(m) ==
 2558                                     VM_MEMATTR_DEFAULT,
 2559                                     ("page %p has an unexpected memattr", m));
 2560                                 KASSERT((m->oflags & (VPO_SWAPINPROG |
 2561                                     VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
 2562                                     ("page %p has unexpected oflags", m));
 2563                                 /* Don't care: VPO_NOSYNC. */
 2564                                 if (m->valid != 0) {
 2565                                         /*
 2566                                          * First, try to allocate a new page
 2567                                          * that is above "high".  Failing
 2568                                          * that, try to allocate a new page
 2569                                          * that is below "m_run".  Allocate
 2570                                          * the new page between the end of
 2571                                          * "m_run" and "high" only as a last
 2572                                          * resort.
 2573                                          */
 2574                                         req = req_class | VM_ALLOC_NOOBJ;
 2575                                         if ((m->flags & PG_NODUMP) != 0)
 2576                                                 req |= VM_ALLOC_NODUMP;
 2577                                         if (trunc_page(high) !=
 2578                                             ~(vm_paddr_t)PAGE_MASK) {
 2579                                                 m_new = vm_page_alloc_contig(
 2580                                                     NULL, 0, req, 1,
 2581                                                     round_page(high),
 2582                                                     ~(vm_paddr_t)0,
 2583                                                     PAGE_SIZE, 0,
 2584                                                     VM_MEMATTR_DEFAULT);
 2585                                         } else
 2586                                                 m_new = NULL;
 2587                                         if (m_new == NULL) {
 2588                                                 pa = VM_PAGE_TO_PHYS(m_run);
 2589                                                 m_new = vm_page_alloc_contig(
 2590                                                     NULL, 0, req, 1,
 2591                                                     0, pa - 1, PAGE_SIZE, 0,
 2592                                                     VM_MEMATTR_DEFAULT);
 2593                                         }
 2594                                         if (m_new == NULL) {
 2595                                                 pa += ptoa(npages);
 2596                                                 m_new = vm_page_alloc_contig(
 2597                                                     NULL, 0, req, 1,
 2598                                                     pa, high, PAGE_SIZE, 0,
 2599                                                     VM_MEMATTR_DEFAULT);
 2600                                         }
 2601                                         if (m_new == NULL) {
 2602                                                 error = ENOMEM;
 2603                                                 goto unlock;
 2604                                         }
 2605                                         KASSERT(m_new->wire_count == 0,
 2606                                             ("page %p is wired", m_new));
 2607 
 2608                                         /*
 2609                                          * Replace "m" with the new page.  For
 2610                                          * vm_page_replace(), "m" must be busy
 2611                                          * and dequeued.  Finally, change "m"
 2612                                          * as if vm_page_free() was called.
 2613                                          */
 2614                                         if (object->ref_count != 0)
 2615                                                 pmap_remove_all(m);
 2616                                         m_new->aflags = m->aflags &
 2617                                             ~PGA_QUEUE_STATE_MASK;
 2618                                         KASSERT(m_new->oflags == VPO_UNMANAGED,
 2619                                             ("page %p is managed", m_new));
 2620                                         m_new->oflags = m->oflags & VPO_NOSYNC;
 2621                                         pmap_copy_page(m, m_new);
 2622                                         m_new->valid = m->valid;
 2623                                         m_new->dirty = m->dirty;
 2624                                         m->flags &= ~PG_ZERO;
 2625                                         vm_page_xbusy(m);
 2626                                         vm_page_dequeue(m);
 2627                                         vm_page_replace_checked(m_new, object,
 2628                                             m->pindex, m);
 2629                                         if (vm_page_free_prep(m))
 2630                                                 SLIST_INSERT_HEAD(&free, m,
 2631                                                     plinks.s.ss);
 2632 
 2633                                         /*
 2634                                          * The new page must be deactivated
 2635                                          * before the object is unlocked.
 2636                                          */
 2637                                         vm_page_change_lock(m_new, &m_mtx);
 2638                                         vm_page_deactivate(m_new);
 2639                                 } else {
 2640                                         m->flags &= ~PG_ZERO;
 2641                                         vm_page_dequeue(m);
 2642                                         vm_page_remove(m);
 2643                                         if (vm_page_free_prep(m))
 2644                                                 SLIST_INSERT_HEAD(&free, m,
 2645                                                     plinks.s.ss);
 2646                                         KASSERT(m->dirty == 0,
 2647                                             ("page %p is dirty", m));
 2648                                 }
 2649                         } else
 2650                                 error = EBUSY;
 2651 unlock:
 2652                         VM_OBJECT_WUNLOCK(object);
 2653                 } else {
 2654                         MPASS(vm_phys_domain(m) == domain);
 2655                         vmd = VM_DOMAIN(domain);
 2656                         vm_domain_free_lock(vmd);
 2657                         order = m->order;
 2658                         if (order < VM_NFREEORDER) {
 2659                                 /*
 2660                                  * The page is enqueued in the physical memory
 2661                                  * allocator's free page queues.  Moreover, it
 2662                                  * is the first page in a power-of-two-sized
 2663                                  * run of contiguous free pages.  Jump ahead
 2664                                  * to the last page within that run, and
 2665                                  * continue from there.
 2666                                  */
 2667                                 m += (1 << order) - 1;
 2668                         }
 2669 #if VM_NRESERVLEVEL > 0
 2670                         else if (vm_reserv_is_page_free(m))
 2671                                 order = 0;
 2672 #endif
 2673                         vm_domain_free_unlock(vmd);
 2674                         if (order == VM_NFREEORDER)
 2675                                 error = EINVAL;
 2676                 }
 2677         }
 2678         if (m_mtx != NULL)
 2679                 mtx_unlock(m_mtx);
 2680         if ((m = SLIST_FIRST(&free)) != NULL) {
 2681                 int cnt;
 2682 
 2683                 vmd = VM_DOMAIN(domain);
 2684                 cnt = 0;
 2685                 vm_domain_free_lock(vmd);
 2686                 do {
 2687                         MPASS(vm_phys_domain(m) == domain);
 2688                         SLIST_REMOVE_HEAD(&free, plinks.s.ss);
 2689                         vm_phys_free_pages(m, 0);
 2690                         cnt++;
 2691                 } while ((m = SLIST_FIRST(&free)) != NULL);
 2692                 vm_domain_free_unlock(vmd);
 2693                 vm_domain_freecnt_inc(vmd, cnt);
 2694         }
 2695         return (error);
 2696 }
 2697 
 2698 #define NRUNS   16
 2699 
 2700 CTASSERT(powerof2(NRUNS));
 2701 
 2702 #define RUN_INDEX(count)        ((count) & (NRUNS - 1))
 2703 
 2704 #define MIN_RECLAIM     8
 2705 
 2706 /*
 2707  *      vm_page_reclaim_contig:
 2708  *
 2709  *      Reclaim allocated, contiguous physical memory satisfying the specified
 2710  *      conditions by relocating the virtual pages using that physical memory.
 2711  *      Returns true if reclamation is successful and false otherwise.  Since
 2712  *      relocation requires the allocation of physical pages, reclamation may
 2713  *      fail due to a shortage of free pages.  When reclamation fails, callers
 2714  *      are expected to perform vm_wait() before retrying a failed allocation
 2715  *      operation, e.g., vm_page_alloc_contig().
 2716  *
 2717  *      The caller must always specify an allocation class through "req".
 2718  *
 2719  *      allocation classes:
 2720  *      VM_ALLOC_NORMAL         normal process request
 2721  *      VM_ALLOC_SYSTEM         system *really* needs a page
 2722  *      VM_ALLOC_INTERRUPT      interrupt time request
 2723  *
 2724  *      The optional allocation flags are ignored.
 2725  *
 2726  *      "npages" must be greater than zero.  Both "alignment" and "boundary"
 2727  *      must be a power of two.
 2728  */
 2729 bool
 2730 vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
 2731     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
 2732 {
 2733         struct vm_domain *vmd;
 2734         vm_paddr_t curr_low;
 2735         vm_page_t m_run, m_runs[NRUNS];
 2736         u_long count, reclaimed;
 2737         int error, i, options, req_class;
 2738 
 2739         KASSERT(npages > 0, ("npages is 0"));
 2740         KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
 2741         KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
 2742         req_class = req & VM_ALLOC_CLASS_MASK;
 2743 
 2744         /*
 2745          * The page daemon is allowed to dig deeper into the free page list.
 2746          */
 2747         if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
 2748                 req_class = VM_ALLOC_SYSTEM;
 2749 
 2750         /*
 2751          * Return if the number of free pages cannot satisfy the requested
 2752          * allocation.
 2753          */
 2754         vmd = VM_DOMAIN(domain);
 2755         count = vmd->vmd_free_count;
 2756         if (count < npages + vmd->vmd_free_reserved || (count < npages +
 2757             vmd->vmd_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
 2758             (count < npages && req_class == VM_ALLOC_INTERRUPT))
 2759                 return (false);
 2760 
 2761         /*
 2762          * Scan up to three times, relaxing the restrictions ("options") on
 2763          * the reclamation of reservations and superpages each time.
 2764          */
 2765         for (options = VPSC_NORESERV;;) {
 2766                 /*
 2767                  * Find the highest runs that satisfy the given constraints
 2768                  * and restrictions, and record them in "m_runs".
 2769                  */
 2770                 curr_low = low;
 2771                 count = 0;
 2772                 for (;;) {
 2773                         m_run = vm_phys_scan_contig(domain, npages, curr_low,
 2774                             high, alignment, boundary, options);
 2775                         if (m_run == NULL)
 2776                                 break;
 2777                         curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages);
 2778                         m_runs[RUN_INDEX(count)] = m_run;
 2779                         count++;
 2780                 }
 2781 
 2782                 /*
 2783                  * Reclaim the highest runs in LIFO (descending) order until
 2784                  * the number of reclaimed pages, "reclaimed", is at least
 2785                  * MIN_RECLAIM.  Reset "reclaimed" each time because each
 2786                  * reclamation is idempotent, and runs will (likely) recur
 2787                  * from one scan to the next as restrictions are relaxed.
 2788                  */
 2789                 reclaimed = 0;
 2790                 for (i = 0; count > 0 && i < NRUNS; i++) {
 2791                         count--;
 2792                         m_run = m_runs[RUN_INDEX(count)];
 2793                         error = vm_page_reclaim_run(req_class, domain, npages,
 2794                             m_run, high);
 2795                         if (error == 0) {
 2796                                 reclaimed += npages;
 2797                                 if (reclaimed >= MIN_RECLAIM)
 2798                                         return (true);
 2799                         }
 2800                 }
 2801 
 2802                 /*
 2803                  * Either relax the restrictions on the next scan or return if
 2804                  * the last scan had no restrictions.
 2805                  */
 2806                 if (options == VPSC_NORESERV)
 2807                         options = VPSC_NOSUPER;
 2808                 else if (options == VPSC_NOSUPER)
 2809                         options = VPSC_ANY;
 2810                 else if (options == VPSC_ANY)
 2811                         return (reclaimed != 0);
 2812         }
 2813 }
 2814 
 2815 bool
 2816 vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
 2817     u_long alignment, vm_paddr_t boundary)
 2818 {
 2819         struct vm_domainset_iter di;
 2820         int domain;
 2821         bool ret;
 2822 
 2823         vm_domainset_iter_page_init(&di, NULL, 0, &domain, &req);
 2824         do {
 2825                 ret = vm_page_reclaim_contig_domain(domain, req, npages, low,
 2826                     high, alignment, boundary);
 2827                 if (ret)
 2828                         break;
 2829         } while (vm_domainset_iter_page(&di, NULL, &domain) == 0);
 2830 
 2831         return (ret);
 2832 }
 2833 
 2834 /*
 2835  * Set the domain in the appropriate page level domainset.
 2836  */
 2837 void
 2838 vm_domain_set(struct vm_domain *vmd)
 2839 {
 2840 
 2841         mtx_lock(&vm_domainset_lock);
 2842         if (!vmd->vmd_minset && vm_paging_min(vmd)) {
 2843                 vmd->vmd_minset = 1;
 2844                 DOMAINSET_SET(vmd->vmd_domain, &vm_min_domains);
 2845         }
 2846         if (!vmd->vmd_severeset && vm_paging_severe(vmd)) {
 2847                 vmd->vmd_severeset = 1;
 2848                 DOMAINSET_SET(vmd->vmd_domain, &vm_severe_domains);
 2849         }
 2850         mtx_unlock(&vm_domainset_lock);
 2851 }
 2852 
 2853 /*
 2854  * Clear the domain from the appropriate page level domainset.
 2855  */
 2856 void
 2857 vm_domain_clear(struct vm_domain *vmd)
 2858 {
 2859 
 2860         mtx_lock(&vm_domainset_lock);
 2861         if (vmd->vmd_minset && !vm_paging_min(vmd)) {
 2862                 vmd->vmd_minset = 0;
 2863                 DOMAINSET_CLR(vmd->vmd_domain, &vm_min_domains);
 2864                 if (vm_min_waiters != 0) {
 2865                         vm_min_waiters = 0;
 2866                         wakeup(&vm_min_domains);
 2867                 }
 2868         }
 2869         if (vmd->vmd_severeset && !vm_paging_severe(vmd)) {
 2870                 vmd->vmd_severeset = 0;
 2871                 DOMAINSET_CLR(vmd->vmd_domain, &vm_severe_domains);
 2872                 if (vm_severe_waiters != 0) {
 2873                         vm_severe_waiters = 0;
 2874                         wakeup(&vm_severe_domains);
 2875                 }
 2876         }
 2877 
 2878         /*
 2879          * If pageout daemon needs pages, then tell it that there are
 2880          * some free.
 2881          */
 2882         if (vmd->vmd_pageout_pages_needed &&
 2883             vmd->vmd_free_count >= vmd->vmd_pageout_free_min) {
 2884                 wakeup(&vmd->vmd_pageout_pages_needed);
 2885                 vmd->vmd_pageout_pages_needed = 0;
 2886         }
 2887 
 2888         /* See comments in vm_wait_doms(). */
 2889         if (vm_pageproc_waiters) {
 2890                 vm_pageproc_waiters = 0;
 2891                 wakeup(&vm_pageproc_waiters);
 2892         }
 2893         mtx_unlock(&vm_domainset_lock);
 2894 }
 2895 
 2896 /*
 2897  * Wait for free pages to exceed the min threshold globally.
 2898  */
 2899 void
 2900 vm_wait_min(void)
 2901 {
 2902 
 2903         mtx_lock(&vm_domainset_lock);
 2904         while (vm_page_count_min()) {
 2905                 vm_min_waiters++;
 2906                 msleep(&vm_min_domains, &vm_domainset_lock, PVM, "vmwait", 0);
 2907         }
 2908         mtx_unlock(&vm_domainset_lock);
 2909 }
 2910 
 2911 /*
 2912  * Wait for free pages to exceed the severe threshold globally.
 2913  */
 2914 void
 2915 vm_wait_severe(void)
 2916 {
 2917 
 2918         mtx_lock(&vm_domainset_lock);
 2919         while (vm_page_count_severe()) {
 2920                 vm_severe_waiters++;
 2921                 msleep(&vm_severe_domains, &vm_domainset_lock, PVM,
 2922                     "vmwait", 0);
 2923         }
 2924         mtx_unlock(&vm_domainset_lock);
 2925 }
 2926 
 2927 u_int
 2928 vm_wait_count(void)
 2929 {
 2930 
 2931         return (vm_severe_waiters + vm_min_waiters + vm_pageproc_waiters);
 2932 }
 2933 
 2934 void
 2935 vm_wait_doms(const domainset_t *wdoms)
 2936 {
 2937 
 2938         /*
 2939          * We use racey wakeup synchronization to avoid expensive global
 2940          * locking for the pageproc when sleeping with a non-specific vm_wait.
 2941          * To handle this, we only sleep for one tick in this instance.  It
 2942          * is expected that most allocations for the pageproc will come from
 2943          * kmem or vm_page_grab* which will use the more specific and
 2944          * race-free vm_wait_domain().
 2945          */
 2946         if (curproc == pageproc) {
 2947                 mtx_lock(&vm_domainset_lock);
 2948                 vm_pageproc_waiters++;
 2949                 msleep(&vm_pageproc_waiters, &vm_domainset_lock, PVM | PDROP,
 2950                     "pageprocwait", 1);
 2951         } else {
 2952                 /*
 2953                  * XXX Ideally we would wait only until the allocation could
 2954                  * be satisfied.  This condition can cause new allocators to
 2955                  * consume all freed pages while old allocators wait.
 2956                  */
 2957                 mtx_lock(&vm_domainset_lock);
 2958                 if (vm_page_count_min_set(wdoms)) {
 2959                         vm_min_waiters++;
 2960                         msleep(&vm_min_domains, &vm_domainset_lock,
 2961                             PVM | PDROP, "vmwait", 0);
 2962                 } else
 2963                         mtx_unlock(&vm_domainset_lock);
 2964         }
 2965 }
 2966 
 2967 /*
 2968  *      vm_wait_domain:
 2969  *
 2970  *      Sleep until free pages are available for allocation.
 2971  *      - Called in various places after failed memory allocations.
 2972  */
 2973 void
 2974 vm_wait_domain(int domain)
 2975 {
 2976         struct vm_domain *vmd;
 2977         domainset_t wdom;
 2978 
 2979         vmd = VM_DOMAIN(domain);
 2980         vm_domain_free_assert_unlocked(vmd);
 2981 
 2982         if (curproc == pageproc) {
 2983                 mtx_lock(&vm_domainset_lock);
 2984                 if (vmd->vmd_free_count < vmd->vmd_pageout_free_min) {
 2985                         vmd->vmd_pageout_pages_needed = 1;
 2986                         msleep(&vmd->vmd_pageout_pages_needed,
 2987                             &vm_domainset_lock, PDROP | PSWP, "VMWait", 0);
 2988                 } else
 2989                         mtx_unlock(&vm_domainset_lock);
 2990         } else {
 2991                 if (pageproc == NULL)
 2992                         panic("vm_wait in early boot");
 2993                 DOMAINSET_ZERO(&wdom);
 2994                 DOMAINSET_SET(vmd->vmd_domain, &wdom);
 2995                 vm_wait_doms(&wdom);
 2996         }
 2997 }
 2998 
 2999 /*
 3000  *      vm_wait:
 3001  *
 3002  *      Sleep until free pages are available for allocation in the
 3003  *      affinity domains of the obj.  If obj is NULL, the domain set
 3004  *      for the calling thread is used.
 3005  *      Called in various places after failed memory allocations.
 3006  */
 3007 void
 3008 vm_wait(vm_object_t obj)
 3009 {
 3010         struct domainset *d;
 3011 
 3012         d = NULL;
 3013 
 3014         /*
 3015          * Carefully fetch pointers only once: the struct domainset
 3016          * itself is ummutable but the pointer might change.
 3017          */
 3018         if (obj != NULL)
 3019                 d = obj->domain.dr_policy;
 3020         if (d == NULL)
 3021                 d = curthread->td_domain.dr_policy;
 3022 
 3023         vm_wait_doms(&d->ds_mask);
 3024 }
 3025 
 3026 /*
 3027  *      vm_domain_alloc_fail:
 3028  *
 3029  *      Called when a page allocation function fails.  Informs the
 3030  *      pagedaemon and performs the requested wait.  Requires the
 3031  *      domain_free and object lock on entry.  Returns with the
 3032  *      object lock held and free lock released.  Returns an error when
 3033  *      retry is necessary.
 3034  *
 3035  */
 3036 static int
 3037 vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
 3038 {
 3039 
 3040         vm_domain_free_assert_unlocked(vmd);
 3041 
 3042         atomic_add_int(&vmd->vmd_pageout_deficit,
 3043             max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
 3044         if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
 3045                 if (object != NULL) 
 3046                         VM_OBJECT_WUNLOCK(object);
 3047                 vm_wait_domain(vmd->vmd_domain);
 3048                 if (object != NULL) 
 3049                         VM_OBJECT_WLOCK(object);
 3050                 if (req & VM_ALLOC_WAITOK)
 3051                         return (EAGAIN);
 3052         }
 3053 
 3054         return (0);
 3055 }
 3056 
 3057 /*
 3058  *      vm_waitpfault:
 3059  *
 3060  *      Sleep until free pages are available for allocation.
 3061  *      - Called only in vm_fault so that processes page faulting
 3062  *        can be easily tracked.
 3063  *      - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
 3064  *        processes will be able to grab memory first.  Do not change
 3065  *        this balance without careful testing first.
 3066  */
 3067 void
 3068 vm_waitpfault(struct domainset *dset)
 3069 {
 3070 
 3071         /*
 3072          * XXX Ideally we would wait only until the allocation could
 3073          * be satisfied.  This condition can cause new allocators to
 3074          * consume all freed pages while old allocators wait.
 3075          */
 3076         mtx_lock(&vm_domainset_lock);
 3077         if (vm_page_count_min_set(&dset->ds_mask)) {
 3078                 vm_min_waiters++;
 3079                 msleep(&vm_min_domains, &vm_domainset_lock, PUSER | PDROP,
 3080                     "pfault", 0);
 3081         } else
 3082                 mtx_unlock(&vm_domainset_lock);
 3083 }
 3084 
 3085 struct vm_pagequeue *
 3086 vm_page_pagequeue(vm_page_t m)
 3087 {
 3088 
 3089         return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]);
 3090 }
 3091 
 3092 static struct mtx *
 3093 vm_page_pagequeue_lockptr(vm_page_t m)
 3094 {
 3095         uint8_t queue;
 3096 
 3097         if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
 3098                 return (NULL);
 3099         return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue].pq_mutex);
 3100 }
 3101 
 3102 static inline void
 3103 vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
 3104 {
 3105         struct vm_domain *vmd;
 3106         uint8_t qflags;
 3107 
 3108         CRITICAL_ASSERT(curthread);
 3109         vm_pagequeue_assert_locked(pq);
 3110 
 3111         /*
 3112          * The page daemon is allowed to set m->queue = PQ_NONE without
 3113          * the page queue lock held.  In this case it is about to free the page,
 3114          * which must not have any queue state.
 3115          */
 3116         qflags = atomic_load_8(&m->aflags) & PGA_QUEUE_STATE_MASK;
 3117         KASSERT(pq == vm_page_pagequeue(m) || qflags == 0,
 3118             ("page %p doesn't belong to queue %p but has queue state %#x",
 3119             m, pq, qflags));
 3120 
 3121         if ((qflags & PGA_DEQUEUE) != 0) {
 3122                 if (__predict_true((qflags & PGA_ENQUEUED) != 0)) {
 3123                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 3124                         vm_pagequeue_cnt_dec(pq);
 3125                 }
 3126                 vm_page_dequeue_complete(m);
 3127         } else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) {
 3128                 if ((qflags & PGA_ENQUEUED) != 0)
 3129                         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 3130                 else {
 3131                         vm_pagequeue_cnt_inc(pq);
 3132                         vm_page_aflag_set(m, PGA_ENQUEUED);
 3133                 }
 3134                 if ((qflags & PGA_REQUEUE_HEAD) != 0) {
 3135                         KASSERT(m->queue == PQ_INACTIVE,
 3136                             ("head enqueue not supported for page %p", m));
 3137                         vmd = vm_pagequeue_domain(m);
 3138                         TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
 3139                 } else
 3140                         TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
 3141 
 3142                 /*
 3143                  * PGA_REQUEUE and PGA_REQUEUE_HEAD must be cleared after
 3144                  * setting PGA_ENQUEUED in order to synchronize with the
 3145                  * page daemon.
 3146                  */
 3147                 vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
 3148         }
 3149 }
 3150 
 3151 static void
 3152 vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
 3153     uint8_t queue)
 3154 {
 3155         vm_page_t m;
 3156         int i;
 3157 
 3158         for (i = 0; i < bq->bq_cnt; i++) {
 3159                 m = bq->bq_pa[i];
 3160                 if (__predict_false(m->queue != queue))
 3161                         continue;
 3162                 vm_pqbatch_process_page(pq, m);
 3163         }
 3164         vm_batchqueue_init(bq);
 3165 }
 3166 
 3167 static void
 3168 vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
 3169 {
 3170         struct vm_batchqueue *bq;
 3171         struct vm_pagequeue *pq;
 3172         int domain;
 3173 
 3174         vm_page_assert_locked(m);
 3175         KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
 3176 
 3177         domain = vm_phys_domain(m);
 3178         pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
 3179 
 3180         critical_enter();
 3181         bq = DPCPU_PTR(pqbatch[domain][queue]);
 3182         if (vm_batchqueue_insert(bq, m)) {
 3183                 critical_exit();
 3184                 return;
 3185         }
 3186         if (!vm_pagequeue_trylock(pq)) {
 3187                 critical_exit();
 3188                 vm_pagequeue_lock(pq);
 3189                 critical_enter();
 3190                 bq = DPCPU_PTR(pqbatch[domain][queue]);
 3191         }
 3192         vm_pqbatch_process(pq, bq, queue);
 3193 
 3194         /*
 3195          * The page may have been logically dequeued before we acquired the
 3196          * page queue lock.  In this case, the page lock prevents the page
 3197          * from being logically enqueued elsewhere.
 3198          */
 3199         if (__predict_true(m->queue == queue))
 3200                 vm_pqbatch_process_page(pq, m);
 3201         else {
 3202                 KASSERT(m->queue == PQ_NONE,
 3203                     ("invalid queue transition for page %p", m));
 3204                 KASSERT((m->aflags & PGA_ENQUEUED) == 0,
 3205                     ("page %p is enqueued with invalid queue index", m));
 3206                 vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
 3207         }
 3208         vm_pagequeue_unlock(pq);
 3209         critical_exit();
 3210 }
 3211 
 3212 /*
 3213  *      vm_page_drain_pqbatch:          [ internal use only ]
 3214  *
 3215  *      Force all per-CPU page queue batch queues to be drained.  This is
 3216  *      intended for use in severe memory shortages, to ensure that pages
 3217  *      do not remain stuck in the batch queues.
 3218  */
 3219 void
 3220 vm_page_drain_pqbatch(void)
 3221 {
 3222         struct thread *td;
 3223         struct vm_domain *vmd;
 3224         struct vm_pagequeue *pq;
 3225         int cpu, domain, queue;
 3226 
 3227         td = curthread;
 3228         CPU_FOREACH(cpu) {
 3229                 thread_lock(td);
 3230                 sched_bind(td, cpu);
 3231                 thread_unlock(td);
 3232 
 3233                 for (domain = 0; domain < vm_ndomains; domain++) {
 3234                         vmd = VM_DOMAIN(domain);
 3235                         for (queue = 0; queue < PQ_COUNT; queue++) {
 3236                                 pq = &vmd->vmd_pagequeues[queue];
 3237                                 vm_pagequeue_lock(pq);
 3238                                 critical_enter();
 3239                                 vm_pqbatch_process(pq,
 3240                                     DPCPU_PTR(pqbatch[domain][queue]), queue);
 3241                                 critical_exit();
 3242                                 vm_pagequeue_unlock(pq);
 3243                         }
 3244                 }
 3245         }
 3246         thread_lock(td);
 3247         sched_unbind(td);
 3248         thread_unlock(td);
 3249 }
 3250 
 3251 /*
 3252  * Complete the logical removal of a page from a page queue.  We must be
 3253  * careful to synchronize with the page daemon, which may be concurrently
 3254  * examining the page with only the page lock held.  The page must not be
 3255  * in a state where it appears to be logically enqueued.
 3256  */
 3257 static void
 3258 vm_page_dequeue_complete(vm_page_t m)
 3259 {
 3260 
 3261         m->queue = PQ_NONE;
 3262         atomic_thread_fence_rel();
 3263         vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
 3264 }
 3265 
 3266 /*
 3267  *      vm_page_dequeue_deferred:       [ internal use only ]
 3268  *
 3269  *      Request removal of the given page from its current page
 3270  *      queue.  Physical removal from the queue may be deferred
 3271  *      indefinitely.
 3272  *
 3273  *      The page must be locked.
 3274  */
 3275 void
 3276 vm_page_dequeue_deferred(vm_page_t m)
 3277 {
 3278         int queue;
 3279 
 3280         vm_page_assert_locked(m);
 3281 
 3282         queue = atomic_load_8(&m->queue);
 3283         if (queue == PQ_NONE) {
 3284                 KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0,
 3285                     ("page %p has queue state", m));
 3286                 return;
 3287         }
 3288         if ((m->aflags & PGA_DEQUEUE) == 0)
 3289                 vm_page_aflag_set(m, PGA_DEQUEUE);
 3290         vm_pqbatch_submit_page(m, queue);
 3291 }
 3292 
 3293 /*
 3294  *      vm_page_dequeue:
 3295  *
 3296  *      Remove the page from whichever page queue it's in, if any.
 3297  *      The page must either be locked or unallocated.  This constraint
 3298  *      ensures that the queue state of the page will remain consistent
 3299  *      after this function returns.
 3300  */
 3301 void
 3302 vm_page_dequeue(vm_page_t m)
 3303 {
 3304         struct mtx *lock, *lock1;
 3305         struct vm_pagequeue *pq;
 3306         uint8_t aflags;
 3307 
 3308         KASSERT(mtx_owned(vm_page_lockptr(m)) || m->order == VM_NFREEORDER,
 3309             ("page %p is allocated and unlocked", m));
 3310 
 3311         for (;;) {
 3312                 lock = vm_page_pagequeue_lockptr(m);
 3313                 if (lock == NULL) {
 3314                         /*
 3315                          * A thread may be concurrently executing
 3316                          * vm_page_dequeue_complete().  Ensure that all queue
 3317                          * state is cleared before we return.
 3318                          */
 3319                         aflags = atomic_load_8(&m->aflags);
 3320                         if ((aflags & PGA_QUEUE_STATE_MASK) == 0)
 3321                                 return;
 3322                         KASSERT((aflags & PGA_DEQUEUE) != 0,
 3323                             ("page %p has unexpected queue state flags %#x",
 3324                             m, aflags));
 3325 
 3326                         /*
 3327                          * Busy wait until the thread updating queue state is
 3328                          * finished.  Such a thread must be executing in a
 3329                          * critical section.
 3330                          */
 3331                         cpu_spinwait();
 3332                         continue;
 3333                 }
 3334                 mtx_lock(lock);
 3335                 if ((lock1 = vm_page_pagequeue_lockptr(m)) == lock)
 3336                         break;
 3337                 mtx_unlock(lock);
 3338                 lock = lock1;
 3339         }
 3340         KASSERT(lock == vm_page_pagequeue_lockptr(m),
 3341             ("%s: page %p migrated directly between queues", __func__, m));
 3342         KASSERT((m->aflags & PGA_DEQUEUE) != 0 ||
 3343             mtx_owned(vm_page_lockptr(m)),
 3344             ("%s: queued unlocked page %p", __func__, m));
 3345 
 3346         if ((m->aflags & PGA_ENQUEUED) != 0) {
 3347                 pq = vm_page_pagequeue(m);
 3348                 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
 3349                 vm_pagequeue_cnt_dec(pq);
 3350         }
 3351         vm_page_dequeue_complete(m);
 3352         mtx_unlock(lock);
 3353 }
 3354 
 3355 /*
 3356  * Schedule the given page for insertion into the specified page queue.
 3357  * Physical insertion of the page may be deferred indefinitely.
 3358  */
 3359 static void
 3360 vm_page_enqueue(vm_page_t m, uint8_t queue)
 3361 {
 3362 
 3363         vm_page_assert_locked(m);
 3364         KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
 3365             ("%s: page %p is already enqueued", __func__, m));
 3366 
 3367         m->queue = queue;
 3368         if ((m->aflags & PGA_REQUEUE) == 0)
 3369                 vm_page_aflag_set(m, PGA_REQUEUE);
 3370         vm_pqbatch_submit_page(m, queue);
 3371 }
 3372 
 3373 /*
 3374  *      vm_page_requeue:                [ internal use only ]
 3375  *
 3376  *      Schedule a requeue of the given page.
 3377  *
 3378  *      The page must be locked.
 3379  */
 3380 void
 3381 vm_page_requeue(vm_page_t m)
 3382 {
 3383 
 3384         vm_page_assert_locked(m);
 3385         KASSERT(m->queue != PQ_NONE,
 3386             ("%s: page %p is not logically enqueued", __func__, m));
 3387 
 3388         if ((m->aflags & PGA_REQUEUE) == 0)
 3389                 vm_page_aflag_set(m, PGA_REQUEUE);
 3390         vm_pqbatch_submit_page(m, atomic_load_8(&m->queue));
 3391 }
 3392 
 3393 /*
 3394  *      vm_page_activate:
 3395  *
 3396  *      Put the specified page on the active list (if appropriate).
 3397  *      Ensure that act_count is at least ACT_INIT but do not otherwise
 3398  *      mess with it.
 3399  *
 3400  *      The page must be locked.
 3401  */
 3402 void
 3403 vm_page_activate(vm_page_t m)
 3404 {
 3405 
 3406         vm_page_assert_locked(m);
 3407 
 3408         if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0)
 3409                 return;
 3410         if (vm_page_queue(m) == PQ_ACTIVE) {
 3411                 if (m->act_count < ACT_INIT)
 3412                         m->act_count = ACT_INIT;
 3413                 return;
 3414         }
 3415 
 3416         vm_page_dequeue(m);
 3417         if (m->act_count < ACT_INIT)
 3418                 m->act_count = ACT_INIT;
 3419         vm_page_enqueue(m, PQ_ACTIVE);
 3420 }
 3421 
 3422 /*
 3423  *      vm_page_free_prep:
 3424  *
 3425  *      Prepares the given page to be put on the free list,
 3426  *      disassociating it from any VM object. The caller may return
 3427  *      the page to the free list only if this function returns true.
 3428  *
 3429  *      The object must be locked.  The page must be locked if it is
 3430  *      managed.
 3431  */
 3432 bool
 3433 vm_page_free_prep(vm_page_t m)
 3434 {
 3435 
 3436 #if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
 3437         if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
 3438                 uint64_t *p;
 3439                 int i;
 3440                 p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3441                 for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
 3442                         KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
 3443                             m, i, (uintmax_t)*p));
 3444         }
 3445 #endif
 3446         if ((m->oflags & VPO_UNMANAGED) == 0) {
 3447                 vm_page_lock_assert(m, MA_OWNED);
 3448                 KASSERT(!pmap_page_is_mapped(m),
 3449                     ("vm_page_free_prep: freeing mapped page %p", m));
 3450         } else
 3451                 KASSERT(m->queue == PQ_NONE,
 3452                     ("vm_page_free_prep: unmanaged page %p is queued", m));
 3453         VM_CNT_INC(v_tfree);
 3454 
 3455         if (vm_page_sbusied(m))
 3456                 panic("vm_page_free_prep: freeing busy page %p", m);
 3457 
 3458         vm_page_remove(m);
 3459 
 3460         /*
 3461          * If fictitious remove object association and
 3462          * return.
 3463          */
 3464         if ((m->flags & PG_FICTITIOUS) != 0) {
 3465                 KASSERT(m->wire_count == 1,
 3466                     ("fictitious page %p is not wired", m));
 3467                 KASSERT(m->queue == PQ_NONE,
 3468                     ("fictitious page %p is queued", m));
 3469                 return (false);
 3470         }
 3471 
 3472         /*
 3473          * Pages need not be dequeued before they are returned to the physical
 3474          * memory allocator, but they must at least be marked for a deferred
 3475          * dequeue.
 3476          */
 3477         if ((m->oflags & VPO_UNMANAGED) == 0)
 3478                 vm_page_dequeue_deferred(m);
 3479 
 3480         m->valid = 0;
 3481         vm_page_undirty(m);
 3482 
 3483         if (m->wire_count != 0)
 3484                 panic("vm_page_free_prep: freeing wired page %p", m);
 3485         if (m->hold_count != 0) {
 3486                 m->flags &= ~PG_ZERO;
 3487                 KASSERT((m->flags & PG_UNHOLDFREE) == 0,
 3488                     ("vm_page_free_prep: freeing PG_UNHOLDFREE page %p", m));
 3489                 m->flags |= PG_UNHOLDFREE;
 3490                 return (false);
 3491         }
 3492 
 3493         /*
 3494          * Restore the default memory attribute to the page.
 3495          */
 3496         if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
 3497                 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
 3498 
 3499 #if VM_NRESERVLEVEL > 0
 3500         if (vm_reserv_free_page(m))
 3501                 return (false);
 3502 #endif
 3503 
 3504         return (true);
 3505 }
 3506 
 3507 /*
 3508  *      vm_page_free_toq:
 3509  *
 3510  *      Returns the given page to the free list, disassociating it
 3511  *      from any VM object.
 3512  *
 3513  *      The object must be locked.  The page must be locked if it is
 3514  *      managed.
 3515  */
 3516 void
 3517 vm_page_free_toq(vm_page_t m)
 3518 {
 3519         struct vm_domain *vmd;
 3520 
 3521         if (!vm_page_free_prep(m))
 3522                 return;
 3523 
 3524         vmd = vm_pagequeue_domain(m);
 3525         if (m->pool == VM_FREEPOOL_DEFAULT && vmd->vmd_pgcache != NULL) {
 3526                 uma_zfree(vmd->vmd_pgcache, m);
 3527                 return;
 3528         }
 3529         vm_domain_free_lock(vmd);
 3530         vm_phys_free_pages(m, 0);
 3531         vm_domain_free_unlock(vmd);
 3532         vm_domain_freecnt_inc(vmd, 1);
 3533 }
 3534 
 3535 /*
 3536  *      vm_page_free_pages_toq:
 3537  *
 3538  *      Returns a list of pages to the free list, disassociating it
 3539  *      from any VM object.  In other words, this is equivalent to
 3540  *      calling vm_page_free_toq() for each page of a list of VM objects.
 3541  *
 3542  *      The objects must be locked.  The pages must be locked if it is
 3543  *      managed.
 3544  */
 3545 void
 3546 vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
 3547 {
 3548         vm_page_t m;
 3549         int count;
 3550 
 3551         if (SLIST_EMPTY(free))
 3552                 return;
 3553 
 3554         count = 0;
 3555         while ((m = SLIST_FIRST(free)) != NULL) {
 3556                 count++;
 3557                 SLIST_REMOVE_HEAD(free, plinks.s.ss);
 3558                 vm_page_free_toq(m);
 3559         }
 3560 
 3561         if (update_wire_count)
 3562                 vm_wire_sub(count);
 3563 }
 3564 
 3565 /*
 3566  *      vm_page_wire:
 3567  *
 3568  * Mark this page as wired down.  If the page is fictitious, then
 3569  * its wire count must remain one.
 3570  *
 3571  * The page must be locked.
 3572  */
 3573 void
 3574 vm_page_wire(vm_page_t m)
 3575 {
 3576 
 3577         vm_page_assert_locked(m);
 3578         if ((m->flags & PG_FICTITIOUS) != 0) {
 3579                 KASSERT(m->wire_count == 1,
 3580                     ("vm_page_wire: fictitious page %p's wire count isn't one",
 3581                     m));
 3582                 return;
 3583         }
 3584         if (m->wire_count == 0) {
 3585                 KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
 3586                     m->queue == PQ_NONE,
 3587                     ("vm_page_wire: unmanaged page %p is queued", m));
 3588                 vm_wire_add(1);
 3589         }
 3590         m->wire_count++;
 3591         KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
 3592 }
 3593 
 3594 /*
 3595  * vm_page_unwire:
 3596  *
 3597  * Release one wiring of the specified page, potentially allowing it to be
 3598  * paged out.  Returns TRUE if the number of wirings transitions to zero and
 3599  * FALSE otherwise.
 3600  *
 3601  * Only managed pages belonging to an object can be paged out.  If the number
 3602  * of wirings transitions to zero and the page is eligible for page out, then
 3603  * the page is added to the specified paging queue (unless PQ_NONE is
 3604  * specified, in which case the page is dequeued if it belongs to a paging
 3605  * queue).
 3606  *
 3607  * If a page is fictitious, then its wire count must always be one.
 3608  *
 3609  * A managed page must be locked.
 3610  */
 3611 bool
 3612 vm_page_unwire(vm_page_t m, uint8_t queue)
 3613 {
 3614         bool unwired;
 3615 
 3616         KASSERT(queue < PQ_COUNT || queue == PQ_NONE,
 3617             ("vm_page_unwire: invalid queue %u request for page %p",
 3618             queue, m));
 3619         if ((m->oflags & VPO_UNMANAGED) == 0)
 3620                 vm_page_assert_locked(m);
 3621 
 3622         unwired = vm_page_unwire_noq(m);
 3623         if (!unwired || (m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL)
 3624                 return (unwired);
 3625 
 3626         if (vm_page_queue(m) == queue) {
 3627                 if (queue == PQ_ACTIVE)
 3628                         vm_page_reference(m);
 3629                 else if (queue != PQ_NONE)
 3630                         vm_page_requeue(m);
 3631         } else {
 3632                 vm_page_dequeue(m);
 3633                 if (queue != PQ_NONE) {
 3634                         vm_page_enqueue(m, queue);
 3635                         if (queue == PQ_ACTIVE)
 3636                                 /* Initialize act_count. */
 3637                                 vm_page_activate(m);
 3638                 }
 3639         }
 3640         return (unwired);
 3641 }
 3642 
 3643 /*
 3644  *
 3645  * vm_page_unwire_noq:
 3646  *
 3647  * Unwire a page without (re-)inserting it into a page queue.  It is up
 3648  * to the caller to enqueue, requeue, or free the page as appropriate.
 3649  * In most cases, vm_page_unwire() should be used instead.
 3650  */
 3651 bool
 3652 vm_page_unwire_noq(vm_page_t m)
 3653 {
 3654 
 3655         if ((m->oflags & VPO_UNMANAGED) == 0)
 3656                 vm_page_assert_locked(m);
 3657         if ((m->flags & PG_FICTITIOUS) != 0) {
 3658                 KASSERT(m->wire_count == 1,
 3659             ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
 3660                 return (false);
 3661         }
 3662         if (m->wire_count == 0)
 3663                 panic("vm_page_unwire: page %p's wire count is zero", m);
 3664         m->wire_count--;
 3665         if (m->wire_count == 0) {
 3666                 vm_wire_sub(1);
 3667                 return (true);
 3668         } else
 3669                 return (false);
 3670 }
 3671 
 3672 /*
 3673  * Move the specified page to the tail of the inactive queue, or requeue
 3674  * the page if it is already in the inactive queue.
 3675  *
 3676  * The page must be locked.
 3677  */
 3678 void
 3679 vm_page_deactivate(vm_page_t m)
 3680 {
 3681 
 3682         vm_page_assert_locked(m);
 3683 
 3684         if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0)
 3685                 return;
 3686 
 3687         if (!vm_page_inactive(m)) {
 3688                 vm_page_dequeue(m);
 3689                 vm_page_enqueue(m, PQ_INACTIVE);
 3690         } else
 3691                 vm_page_requeue(m);
 3692 }
 3693 
 3694 /*
 3695  * Move the specified page close to the head of the inactive queue,
 3696  * bypassing LRU.  A marker page is used to maintain FIFO ordering.
 3697  * As with regular enqueues, we use a per-CPU batch queue to reduce
 3698  * contention on the page queue lock.
 3699  *
 3700  * The page must be locked.
 3701  */
 3702 void
 3703 vm_page_deactivate_noreuse(vm_page_t m)
 3704 {
 3705 
 3706         vm_page_assert_locked(m);
 3707 
 3708         if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0)
 3709                 return;
 3710 
 3711         if (!vm_page_inactive(m)) {
 3712                 vm_page_dequeue(m);
 3713                 m->queue = PQ_INACTIVE;
 3714         }
 3715         if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
 3716                 vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
 3717         vm_pqbatch_submit_page(m, PQ_INACTIVE);
 3718 }
 3719 
 3720 /*
 3721  * vm_page_launder
 3722  *
 3723  *      Put a page in the laundry, or requeue it if it is already there.
 3724  */
 3725 void
 3726 vm_page_launder(vm_page_t m)
 3727 {
 3728 
 3729         vm_page_assert_locked(m);
 3730         if (m->wire_count > 0 || (m->oflags & VPO_UNMANAGED) != 0)
 3731                 return;
 3732 
 3733         if (vm_page_in_laundry(m))
 3734                 vm_page_requeue(m);
 3735         else {
 3736                 vm_page_dequeue(m);
 3737                 vm_page_enqueue(m, PQ_LAUNDRY);
 3738         }
 3739 }
 3740 
 3741 /*
 3742  * vm_page_unswappable
 3743  *
 3744  *      Put a page in the PQ_UNSWAPPABLE holding queue.
 3745  */
 3746 void
 3747 vm_page_unswappable(vm_page_t m)
 3748 {
 3749 
 3750         vm_page_assert_locked(m);
 3751         KASSERT(m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0,
 3752             ("page %p already unswappable", m));
 3753 
 3754         vm_page_dequeue(m);
 3755         vm_page_enqueue(m, PQ_UNSWAPPABLE);
 3756 }
 3757 
 3758 /*
 3759  * Attempt to free the page.  If it cannot be freed, do nothing.  Returns true
 3760  * if the page is freed and false otherwise.
 3761  *
 3762  * The page must be managed.  The page and its containing object must be
 3763  * locked.
 3764  */
 3765 bool
 3766 vm_page_try_to_free(vm_page_t m)
 3767 {
 3768 
 3769         vm_page_assert_locked(m);
 3770         VM_OBJECT_ASSERT_WLOCKED(m->object);
 3771         KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("page %p is unmanaged", m));
 3772         if (m->dirty != 0 || vm_page_held(m) || vm_page_busied(m))
 3773                 return (false);
 3774         if (m->object->ref_count != 0) {
 3775                 pmap_remove_all(m);
 3776                 if (m->dirty != 0)
 3777                         return (false);
 3778         }
 3779         vm_page_free(m);
 3780         return (true);
 3781 }
 3782 
 3783 /*
 3784  * vm_page_advise
 3785  *
 3786  *      Apply the specified advice to the given page.
 3787  *
 3788  *      The object and page must be locked.
 3789  */
 3790 void
 3791 vm_page_advise(vm_page_t m, int advice)
 3792 {
 3793 
 3794         vm_page_assert_locked(m);
 3795         VM_OBJECT_ASSERT_WLOCKED(m->object);
 3796         if (advice == MADV_FREE)
 3797                 /*
 3798                  * Mark the page clean.  This will allow the page to be freed
 3799                  * without first paging it out.  MADV_FREE pages are often
 3800                  * quickly reused by malloc(3), so we do not do anything that
 3801                  * would result in a page fault on a later access.
 3802                  */
 3803                 vm_page_undirty(m);
 3804         else if (advice != MADV_DONTNEED) {
 3805                 if (advice == MADV_WILLNEED)
 3806                         vm_page_activate(m);
 3807                 return;
 3808         }
 3809 
 3810         /*
 3811          * Clear any references to the page.  Otherwise, the page daemon will
 3812          * immediately reactivate the page.
 3813          */
 3814         vm_page_aflag_clear(m, PGA_REFERENCED);
 3815 
 3816         if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
 3817                 vm_page_dirty(m);
 3818 
 3819         /*
 3820          * Place clean pages near the head of the inactive queue rather than
 3821          * the tail, thus defeating the queue's LRU operation and ensuring that
 3822          * the page will be reused quickly.  Dirty pages not already in the
 3823          * laundry are moved there.
 3824          */
 3825         if (m->dirty == 0)
 3826                 vm_page_deactivate_noreuse(m);
 3827         else if (!vm_page_in_laundry(m))
 3828                 vm_page_launder(m);
 3829 }
 3830 
 3831 /*
 3832  * Grab a page, waiting until we are waken up due to the page
 3833  * changing state.  We keep on waiting, if the page continues
 3834  * to be in the object.  If the page doesn't exist, first allocate it
 3835  * and then conditionally zero it.
 3836  *
 3837  * This routine may sleep.
 3838  *
 3839  * The object must be locked on entry.  The lock will, however, be released
 3840  * and reacquired if the routine sleeps.
 3841  */
 3842 vm_page_t
 3843 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
 3844 {
 3845         vm_page_t m;
 3846         int sleep;
 3847         int pflags;
 3848 
 3849         VM_OBJECT_ASSERT_WLOCKED(object);
 3850         KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 3851             (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 3852             ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
 3853         pflags = allocflags &
 3854             ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
 3855         if ((allocflags & VM_ALLOC_NOWAIT) == 0)
 3856                 pflags |= VM_ALLOC_WAITFAIL;
 3857 retrylookup:
 3858         if ((m = vm_page_lookup(object, pindex)) != NULL) {
 3859                 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
 3860                     vm_page_xbusied(m) : vm_page_busied(m);
 3861                 if (sleep) {
 3862                         if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 3863                                 return (NULL);
 3864                         /*
 3865                          * Reference the page before unlocking and
 3866                          * sleeping so that the page daemon is less
 3867                          * likely to reclaim it.
 3868                          */
 3869                         vm_page_aflag_set(m, PGA_REFERENCED);
 3870                         vm_page_lock(m);
 3871                         VM_OBJECT_WUNLOCK(object);
 3872                         vm_page_busy_sleep(m, "pgrbwt", (allocflags &
 3873                             VM_ALLOC_IGN_SBUSY) != 0);
 3874                         VM_OBJECT_WLOCK(object);
 3875                         goto retrylookup;
 3876                 } else {
 3877                         if ((allocflags & VM_ALLOC_WIRED) != 0) {
 3878                                 vm_page_lock(m);
 3879                                 vm_page_wire(m);
 3880                                 vm_page_unlock(m);
 3881                         }
 3882                         if ((allocflags &
 3883                             (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
 3884                                 vm_page_xbusy(m);
 3885                         if ((allocflags & VM_ALLOC_SBUSY) != 0)
 3886                                 vm_page_sbusy(m);
 3887                         return (m);
 3888                 }
 3889         }
 3890         m = vm_page_alloc(object, pindex, pflags);
 3891         if (m == NULL) {
 3892                 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 3893                         return (NULL);
 3894                 goto retrylookup;
 3895         }
 3896         if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
 3897                 pmap_zero_page(m);
 3898         return (m);
 3899 }
 3900 
 3901 /*
 3902  * Return the specified range of pages from the given object.  For each
 3903  * page offset within the range, if a page already exists within the object
 3904  * at that offset and it is busy, then wait for it to change state.  If,
 3905  * instead, the page doesn't exist, then allocate it.
 3906  *
 3907  * The caller must always specify an allocation class.
 3908  *
 3909  * allocation classes:
 3910  *      VM_ALLOC_NORMAL         normal process request
 3911  *      VM_ALLOC_SYSTEM         system *really* needs the pages
 3912  *
 3913  * The caller must always specify that the pages are to be busied and/or
 3914  * wired.
 3915  *
 3916  * optional allocation flags:
 3917  *      VM_ALLOC_IGN_SBUSY      do not sleep on soft busy pages
 3918  *      VM_ALLOC_NOBUSY         do not exclusive busy the page
 3919  *      VM_ALLOC_NOWAIT         do not sleep
 3920  *      VM_ALLOC_SBUSY          set page to sbusy state
 3921  *      VM_ALLOC_WIRED          wire the pages
 3922  *      VM_ALLOC_ZERO           zero and validate any invalid pages
 3923  *
 3924  * If VM_ALLOC_NOWAIT is not specified, this routine may sleep.  Otherwise, it
 3925  * may return a partial prefix of the requested range.
 3926  */
 3927 int
 3928 vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
 3929     vm_page_t *ma, int count)
 3930 {
 3931         vm_page_t m, mpred;
 3932         int pflags;
 3933         int i;
 3934         bool sleep;
 3935 
 3936         VM_OBJECT_ASSERT_WLOCKED(object);
 3937         KASSERT(((u_int)allocflags >> VM_ALLOC_COUNT_SHIFT) == 0,
 3938             ("vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
 3939         KASSERT((allocflags & VM_ALLOC_NOBUSY) == 0 ||
 3940             (allocflags & VM_ALLOC_WIRED) != 0,
 3941             ("vm_page_grab_pages: the pages must be busied or wired"));
 3942         KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
 3943             (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
 3944             ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
 3945         if (count == 0)
 3946                 return (0);
 3947         pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK |
 3948             VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY);
 3949         if ((allocflags & VM_ALLOC_NOWAIT) == 0)
 3950                 pflags |= VM_ALLOC_WAITFAIL;
 3951         i = 0;
 3952 retrylookup:
 3953         m = vm_radix_lookup_le(&object->rtree, pindex + i);
 3954         if (m == NULL || m->pindex != pindex + i) {
 3955                 mpred = m;
 3956                 m = NULL;
 3957         } else
 3958                 mpred = TAILQ_PREV(m, pglist, listq);
 3959         for (; i < count; i++) {
 3960                 if (m != NULL) {
 3961                         sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
 3962                             vm_page_xbusied(m) : vm_page_busied(m);
 3963                         if (sleep) {
 3964                                 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 3965                                         break;
 3966                                 /*
 3967                                  * Reference the page before unlocking and
 3968                                  * sleeping so that the page daemon is less
 3969                                  * likely to reclaim it.
 3970                                  */
 3971                                 vm_page_aflag_set(m, PGA_REFERENCED);
 3972                                 vm_page_lock(m);
 3973                                 VM_OBJECT_WUNLOCK(object);
 3974                                 vm_page_busy_sleep(m, "grbmaw", (allocflags &
 3975                                     VM_ALLOC_IGN_SBUSY) != 0);
 3976                                 VM_OBJECT_WLOCK(object);
 3977                                 goto retrylookup;
 3978                         }
 3979                         if ((allocflags & VM_ALLOC_WIRED) != 0) {
 3980                                 vm_page_lock(m);
 3981                                 vm_page_wire(m);
 3982                                 vm_page_unlock(m);
 3983                         }
 3984                         if ((allocflags & (VM_ALLOC_NOBUSY |
 3985                             VM_ALLOC_SBUSY)) == 0)
 3986                                 vm_page_xbusy(m);
 3987                         if ((allocflags & VM_ALLOC_SBUSY) != 0)
 3988                                 vm_page_sbusy(m);
 3989                 } else {
 3990                         m = vm_page_alloc_after(object, pindex + i,
 3991                             pflags | VM_ALLOC_COUNT(count - i), mpred);
 3992                         if (m == NULL) {
 3993                                 if ((allocflags & VM_ALLOC_NOWAIT) != 0)
 3994                                         break;
 3995                                 goto retrylookup;
 3996                         }
 3997                 }
 3998                 if (m->valid == 0 && (allocflags & VM_ALLOC_ZERO) != 0) {
 3999                         if ((m->flags & PG_ZERO) == 0)
 4000                                 pmap_zero_page(m);
 4001                         m->valid = VM_PAGE_BITS_ALL;
 4002                 }
 4003                 ma[i] = mpred = m;
 4004                 m = vm_page_next(m);
 4005         }
 4006         return (i);
 4007 }
 4008 
 4009 /*
 4010  * Mapping function for valid or dirty bits in a page.
 4011  *
 4012  * Inputs are required to range within a page.
 4013  */
 4014 vm_page_bits_t
 4015 vm_page_bits(int base, int size)
 4016 {
 4017         int first_bit;
 4018         int last_bit;
 4019 
 4020         KASSERT(
 4021             base + size <= PAGE_SIZE,
 4022             ("vm_page_bits: illegal base/size %d/%d", base, size)
 4023         );
 4024 
 4025         if (size == 0)          /* handle degenerate case */
 4026                 return (0);
 4027 
 4028         first_bit = base >> DEV_BSHIFT;
 4029         last_bit = (base + size - 1) >> DEV_BSHIFT;
 4030 
 4031         return (((vm_page_bits_t)2 << last_bit) -
 4032             ((vm_page_bits_t)1 << first_bit));
 4033 }
 4034 
 4035 /*
 4036  *      vm_page_set_valid_range:
 4037  *
 4038  *      Sets portions of a page valid.  The arguments are expected
 4039  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
 4040  *      of any partial chunks touched by the range.  The invalid portion of
 4041  *      such chunks will be zeroed.
 4042  *
 4043  *      (base + size) must be less then or equal to PAGE_SIZE.
 4044  */
 4045 void
 4046 vm_page_set_valid_range(vm_page_t m, int base, int size)
 4047 {
 4048         int endoff, frag;
 4049 
 4050         VM_OBJECT_ASSERT_WLOCKED(m->object);
 4051         if (size == 0)  /* handle degenerate case */
 4052                 return;
 4053 
 4054         /*
 4055          * If the base is not DEV_BSIZE aligned and the valid
 4056          * bit is clear, we have to zero out a portion of the
 4057          * first block.
 4058          */
 4059         if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
 4060             (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
 4061                 pmap_zero_page_area(m, frag, base - frag);
 4062 
 4063         /*
 4064          * If the ending offset is not DEV_BSIZE aligned and the
 4065          * valid bit is clear, we have to zero out a portion of
 4066          * the last block.
 4067          */
 4068         endoff = base + size;
 4069         if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
 4070             (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
 4071                 pmap_zero_page_area(m, endoff,
 4072                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 4073 
 4074         /*
 4075          * Assert that no previously invalid block that is now being validated
 4076          * is already dirty.
 4077          */
 4078         KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
 4079             ("vm_page_set_valid_range: page %p is dirty", m));
 4080 
 4081         /*
 4082          * Set valid bits inclusive of any overlap.
 4083          */
 4084         m->valid |= vm_page_bits(base, size);
 4085 }
 4086 
 4087 /*
 4088  * Clear the given bits from the specified page's dirty field.
 4089  */
 4090 static __inline void
 4091 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
 4092 {
 4093         uintptr_t addr;
 4094 #if PAGE_SIZE < 16384
 4095         int shift;
 4096 #endif
 4097 
 4098         /*
 4099          * If the object is locked and the page is neither exclusive busy nor
 4100          * write mapped, then the page's dirty field cannot possibly be
 4101          * set by a concurrent pmap operation.
 4102          */
 4103         VM_OBJECT_ASSERT_WLOCKED(m->object);
 4104         if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
 4105                 m->dirty &= ~pagebits;
 4106         else {
 4107                 /*
 4108                  * The pmap layer can call vm_page_dirty() without
 4109                  * holding a distinguished lock.  The combination of
 4110                  * the object's lock and an atomic operation suffice
 4111                  * to guarantee consistency of the page dirty field.
 4112                  *
 4113                  * For PAGE_SIZE == 32768 case, compiler already
 4114                  * properly aligns the dirty field, so no forcible
 4115                  * alignment is needed. Only require existence of
 4116                  * atomic_clear_64 when page size is 32768.
 4117                  */
 4118                 addr = (uintptr_t)&m->dirty;
 4119 #if PAGE_SIZE == 32768
 4120                 atomic_clear_64((uint64_t *)addr, pagebits);
 4121 #elif PAGE_SIZE == 16384
 4122                 atomic_clear_32((uint32_t *)addr, pagebits);
 4123 #else           /* PAGE_SIZE <= 8192 */
 4124                 /*
 4125                  * Use a trick to perform a 32-bit atomic on the
 4126                  * containing aligned word, to not depend on the existence
 4127                  * of atomic_clear_{8, 16}.
 4128                  */
 4129                 shift = addr & (sizeof(uint32_t) - 1);
 4130 #if BYTE_ORDER == BIG_ENDIAN
 4131                 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
 4132 #else
 4133                 shift *= NBBY;
 4134 #endif
 4135                 addr &= ~(sizeof(uint32_t) - 1);
 4136                 atomic_clear_32((uint32_t *)addr, pagebits << shift);
 4137 #endif          /* PAGE_SIZE */
 4138         }
 4139 }
 4140 
 4141 /*
 4142  *      vm_page_set_validclean:
 4143  *
 4144  *      Sets portions of a page valid and clean.  The arguments are expected
 4145  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
 4146  *      of any partial chunks touched by the range.  The invalid portion of
 4147  *      such chunks will be zero'd.
 4148  *
 4149  *      (base + size) must be less then or equal to PAGE_SIZE.
 4150  */
 4151 void
 4152 vm_page_set_validclean(vm_page_t m, int base, int size)
 4153 {
 4154         vm_page_bits_t oldvalid, pagebits;
 4155         int endoff, frag;
 4156 
 4157         VM_OBJECT_ASSERT_WLOCKED(m->object);
 4158         if (size == 0)  /* handle degenerate case */
 4159                 return;
 4160 
 4161         /*
 4162          * If the base is not DEV_BSIZE aligned and the valid
 4163          * bit is clear, we have to zero out a portion of the
 4164          * first block.
 4165          */
 4166         if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
 4167             (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
 4168                 pmap_zero_page_area(m, frag, base - frag);
 4169 
 4170         /*
 4171          * If the ending offset is not DEV_BSIZE aligned and the
 4172          * valid bit is clear, we have to zero out a portion of
 4173          * the last block.
 4174          */
 4175         endoff = base + size;
 4176         if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
 4177             (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
 4178                 pmap_zero_page_area(m, endoff,
 4179                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 4180 
 4181         /*
 4182          * Set valid, clear dirty bits.  If validating the entire
 4183          * page we can safely clear the pmap modify bit.  We also
 4184          * use this opportunity to clear the VPO_NOSYNC flag.  If a process
 4185          * takes a write fault on a MAP_NOSYNC memory area the flag will
 4186          * be set again.
 4187          *
 4188          * We set valid bits inclusive of any overlap, but we can only
 4189          * clear dirty bits for DEV_BSIZE chunks that are fully within
 4190          * the range.
 4191          */
 4192         oldvalid = m->valid;
 4193         pagebits = vm_page_bits(base, size);
 4194         m->valid |= pagebits;
 4195 #if 0   /* NOT YET */
 4196         if ((frag = base & (DEV_BSIZE - 1)) != 0) {
 4197                 frag = DEV_BSIZE - frag;
 4198                 base += frag;
 4199                 size -= frag;
 4200                 if (size < 0)
 4201                         size = 0;
 4202         }
 4203         pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
 4204 #endif
 4205         if (base == 0 && size == PAGE_SIZE) {
 4206                 /*
 4207                  * The page can only be modified within the pmap if it is
 4208                  * mapped, and it can only be mapped if it was previously
 4209                  * fully valid.
 4210                  */
 4211                 if (oldvalid == VM_PAGE_BITS_ALL)
 4212                         /*
 4213                          * Perform the pmap_clear_modify() first.  Otherwise,
 4214                          * a concurrent pmap operation, such as
 4215                          * pmap_protect(), could clear a modification in the
 4216                          * pmap and set the dirty field on the page before
 4217                          * pmap_clear_modify() had begun and after the dirty
 4218                          * field was cleared here.
 4219                          */
 4220                         pmap_clear_modify(m);
 4221                 m->dirty = 0;
 4222                 m->oflags &= ~VPO_NOSYNC;
 4223         } else if (oldvalid != VM_PAGE_BITS_ALL)
 4224                 m->dirty &= ~pagebits;
 4225         else
 4226                 vm_page_clear_dirty_mask(m, pagebits);
 4227 }
 4228 
 4229 void
 4230 vm_page_clear_dirty(vm_page_t m, int base, int size)
 4231 {
 4232 
 4233         vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
 4234 }
 4235 
 4236 /*
 4237  *      vm_page_set_invalid:
 4238  *
 4239  *      Invalidates DEV_BSIZE'd chunks within a page.  Both the
 4240  *      valid and dirty bits for the effected areas are cleared.
 4241  */
 4242 void
 4243 vm_page_set_invalid(vm_page_t m, int base, int size)
 4244 {
 4245         vm_page_bits_t bits;
 4246         vm_object_t object;
 4247 
 4248         object = m->object;
 4249         VM_OBJECT_ASSERT_WLOCKED(object);
 4250         if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
 4251             size >= object->un_pager.vnp.vnp_size)
 4252                 bits = VM_PAGE_BITS_ALL;
 4253         else
 4254                 bits = vm_page_bits(base, size);
 4255         if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL &&
 4256             bits != 0)
 4257                 pmap_remove_all(m);
 4258         KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) ||
 4259             !pmap_page_is_mapped(m),
 4260             ("vm_page_set_invalid: page %p is mapped", m));
 4261         m->valid &= ~bits;
 4262         m->dirty &= ~bits;
 4263 }
 4264 
 4265 /*
 4266  * vm_page_zero_invalid()
 4267  *
 4268  *      The kernel assumes that the invalid portions of a page contain
 4269  *      garbage, but such pages can be mapped into memory by user code.
 4270  *      When this occurs, we must zero out the non-valid portions of the
 4271  *      page so user code sees what it expects.
 4272  *
 4273  *      Pages are most often semi-valid when the end of a file is mapped
 4274  *      into memory and the file's size is not page aligned.
 4275  */
 4276 void
 4277 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
 4278 {
 4279         int b;
 4280         int i;
 4281 
 4282         VM_OBJECT_ASSERT_WLOCKED(m->object);
 4283         /*
 4284          * Scan the valid bits looking for invalid sections that
 4285          * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
 4286          * valid bit may be set ) have already been zeroed by
 4287          * vm_page_set_validclean().
 4288          */
 4289         for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
 4290                 if (i == (PAGE_SIZE / DEV_BSIZE) ||
 4291                     (m->valid & ((vm_page_bits_t)1 << i))) {
 4292                         if (i > b) {
 4293                                 pmap_zero_page_area(m,
 4294                                     b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
 4295                         }
 4296                         b = i + 1;
 4297                 }
 4298         }
 4299 
 4300         /*
 4301          * setvalid is TRUE when we can safely set the zero'd areas
 4302          * as being valid.  We can do this if there are no cache consistancy
 4303          * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
 4304          */
 4305         if (setvalid)
 4306                 m->valid = VM_PAGE_BITS_ALL;
 4307 }
 4308 
 4309 /*
 4310  *      vm_page_is_valid:
 4311  *
 4312  *      Is (partial) page valid?  Note that the case where size == 0
 4313  *      will return FALSE in the degenerate case where the page is
 4314  *      entirely invalid, and TRUE otherwise.
 4315  */
 4316 int
 4317 vm_page_is_valid(vm_page_t m, int base, int size)
 4318 {
 4319         vm_page_bits_t bits;
 4320 
 4321         VM_OBJECT_ASSERT_LOCKED(m->object);
 4322         bits = vm_page_bits(base, size);
 4323         return (m->valid != 0 && (m->valid & bits) == bits);
 4324 }
 4325 
 4326 /*
 4327  * Returns true if all of the specified predicates are true for the entire
 4328  * (super)page and false otherwise.
 4329  */
 4330 bool
 4331 vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
 4332 {
 4333         vm_object_t object;
 4334         int i, npages;
 4335 
 4336         object = m->object;
 4337         if (skip_m != NULL && skip_m->object != object)
 4338                 return (false);
 4339         VM_OBJECT_ASSERT_LOCKED(object);
 4340         npages = atop(pagesizes[m->psind]);
 4341 
 4342         /*
 4343          * The physically contiguous pages that make up a superpage, i.e., a
 4344          * page with a page size index ("psind") greater than zero, will
 4345          * occupy adjacent entries in vm_page_array[].
 4346          */
 4347         for (i = 0; i < npages; i++) {
 4348                 /* Always test object consistency, including "skip_m". */
 4349                 if (m[i].object != object)
 4350                         return (false);
 4351                 if (&m[i] == skip_m)
 4352                         continue;
 4353                 if ((flags & PS_NONE_BUSY) != 0 && vm_page_busied(&m[i]))
 4354                         return (false);
 4355                 if ((flags & PS_ALL_DIRTY) != 0) {
 4356                         /*
 4357                          * Calling vm_page_test_dirty() or pmap_is_modified()
 4358                          * might stop this case from spuriously returning
 4359                          * "false".  However, that would require a write lock
 4360                          * on the object containing "m[i]".
 4361                          */
 4362                         if (m[i].dirty != VM_PAGE_BITS_ALL)
 4363                                 return (false);
 4364                 }
 4365                 if ((flags & PS_ALL_VALID) != 0 &&
 4366                     m[i].valid != VM_PAGE_BITS_ALL)
 4367                         return (false);
 4368         }
 4369         return (true);
 4370 }
 4371 
 4372 /*
 4373  * Set the page's dirty bits if the page is modified.
 4374  */
 4375 void
 4376 vm_page_test_dirty(vm_page_t m)
 4377 {
 4378 
 4379         VM_OBJECT_ASSERT_WLOCKED(m->object);
 4380         if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
 4381                 vm_page_dirty(m);
 4382 }
 4383 
 4384 void
 4385 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
 4386 {
 4387 
 4388         mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
 4389 }
 4390 
 4391 void
 4392 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
 4393 {
 4394 
 4395         mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
 4396 }
 4397 
 4398 int
 4399 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
 4400 {
 4401 
 4402         return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
 4403 }
 4404 
 4405 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
 4406 void
 4407 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
 4408 {
 4409 
 4410         vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
 4411 }
 4412 
 4413 void
 4414 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
 4415 {
 4416 
 4417         mtx_assert_(vm_page_lockptr(m), a, file, line);
 4418 }
 4419 #endif
 4420 
 4421 #ifdef INVARIANTS
 4422 void
 4423 vm_page_object_lock_assert(vm_page_t m)
 4424 {
 4425 
 4426         /*
 4427          * Certain of the page's fields may only be modified by the
 4428          * holder of the containing object's lock or the exclusive busy.
 4429          * holder.  Unfortunately, the holder of the write busy is
 4430          * not recorded, and thus cannot be checked here.
 4431          */
 4432         if (m->object != NULL && !vm_page_xbusied(m))
 4433                 VM_OBJECT_ASSERT_WLOCKED(m->object);
 4434 }
 4435 
 4436 void
 4437 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits)
 4438 {
 4439 
 4440         if ((bits & PGA_WRITEABLE) == 0)
 4441                 return;
 4442 
 4443         /*
 4444          * The PGA_WRITEABLE flag can only be set if the page is
 4445          * managed, is exclusively busied or the object is locked.
 4446          * Currently, this flag is only set by pmap_enter().
 4447          */
 4448         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4449             ("PGA_WRITEABLE on unmanaged page"));
 4450         if (!vm_page_xbusied(m))
 4451                 VM_OBJECT_ASSERT_LOCKED(m->object);
 4452 }
 4453 #endif
 4454 
 4455 #include "opt_ddb.h"
 4456 #ifdef DDB
 4457 #include <sys/kernel.h>
 4458 
 4459 #include <ddb/ddb.h>
 4460 
 4461 DB_SHOW_COMMAND(page, vm_page_print_page_info)
 4462 {
 4463 
 4464         db_printf("vm_cnt.v_free_count: %d\n", vm_free_count());
 4465         db_printf("vm_cnt.v_inactive_count: %d\n", vm_inactive_count());
 4466         db_printf("vm_cnt.v_active_count: %d\n", vm_active_count());
 4467         db_printf("vm_cnt.v_laundry_count: %d\n", vm_laundry_count());
 4468         db_printf("vm_cnt.v_wire_count: %d\n", vm_wire_count());
 4469         db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
 4470         db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
 4471         db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
 4472         db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
 4473 }
 4474 
 4475 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
 4476 {
 4477         int dom;
 4478 
 4479         db_printf("pq_free %d\n", vm_free_count());
 4480         for (dom = 0; dom < vm_ndomains; dom++) {
 4481                 db_printf(
 4482     "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
 4483                     dom,
 4484                     vm_dom[dom].vmd_page_count,
 4485                     vm_dom[dom].vmd_free_count,
 4486                     vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
 4487                     vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
 4488                     vm_dom[dom].vmd_pagequeues[PQ_LAUNDRY].pq_cnt,
 4489                     vm_dom[dom].vmd_pagequeues[PQ_UNSWAPPABLE].pq_cnt);
 4490         }
 4491 }
 4492 
 4493 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
 4494 {
 4495         vm_page_t m;
 4496         boolean_t phys;
 4497 
 4498         if (!have_addr) {
 4499                 db_printf("show pginfo addr\n");
 4500                 return;
 4501         }
 4502 
 4503         phys = strchr(modif, 'p') != NULL;
 4504         if (phys)
 4505                 m = PHYS_TO_VM_PAGE(addr);
 4506         else
 4507                 m = (vm_page_t)addr;
 4508         db_printf(
 4509     "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n"
 4510     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
 4511             m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
 4512             m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags,
 4513             m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
 4514 }
 4515 #endif /* DDB */

Cache object: fab2538c1f1df1f7e52d3b21c598579e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.