The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
   33  */
   34 
   35 /*-
   36  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   37  * All rights reserved.
   38  *
   39  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   40  *
   41  * Permission to use, copy, modify and distribute this software and
   42  * its documentation is hereby granted, provided that both the copyright
   43  * notice and this permission notice appear in all copies of the
   44  * software, derivative works or modified versions, and any portions
   45  * thereof, and that both notices appear in supporting documentation.
   46  *
   47  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   48  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   49  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   50  *
   51  * Carnegie Mellon requests users of this software to return to
   52  *
   53  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   54  *  School of Computer Science
   55  *  Carnegie Mellon University
   56  *  Pittsburgh PA 15213-3890
   57  *
   58  * any improvements or extensions that they make and grant Carnegie the
   59  * rights to redistribute these changes.
   60  */
   61 
   62 /*
   63  *                      GENERAL RULES ON VM_PAGE MANIPULATION
   64  *
   65  *      - a pageq mutex is required when adding or removing a page from a
   66  *        page queue (vm_page_queue[]), regardless of other mutexes or the
   67  *        busy state of a page.
   68  *
   69  *      - a hash chain mutex is required when associating or disassociating
   70  *        a page from the VM PAGE CACHE hash table (vm_page_buckets),
   71  *        regardless of other mutexes or the busy state of a page.
   72  *
   73  *      - either a hash chain mutex OR a busied page is required in order
   74  *        to modify the page flags.  A hash chain mutex must be obtained in
   75  *        order to busy a page.  A page's flags cannot be modified by a
   76  *        hash chain mutex if the page is marked busy.
   77  *
   78  *      - The object memq mutex is held when inserting or removing
   79  *        pages from an object (vm_page_insert() or vm_page_remove()).  This
   80  *        is different from the object's main mutex.
   81  *
   82  *      Generally speaking, you have to be aware of side effects when running
   83  *      vm_page ops.  A vm_page_lookup() will return with the hash chain
   84  *      locked, whether it was able to lookup the page or not.  vm_page_free(),
   85  *      vm_page_cache(), vm_page_activate(), and a number of other routines
   86  *      will release the hash chain mutex for you.  Intermediate manipulation
   87  *      routines such as vm_page_flag_set() expect the hash chain to be held
   88  *      on entry and the hash chain will remain held on return.
   89  *
   90  *      pageq scanning can only occur with the pageq in question locked.
   91  *      We have a known bottleneck with the active queue, but the cache
   92  *      and free queues are actually arrays already. 
   93  */
   94 
   95 /*
   96  *      Resident memory management module.
   97  */
   98 
   99 #include <sys/cdefs.h>
  100 __FBSDID("$FreeBSD$");
  101 
  102 #include <sys/param.h>
  103 #include <sys/systm.h>
  104 #include <sys/lock.h>
  105 #include <sys/kernel.h>
  106 #include <sys/malloc.h>
  107 #include <sys/mutex.h>
  108 #include <sys/proc.h>
  109 #include <sys/sysctl.h>
  110 #include <sys/vmmeter.h>
  111 #include <sys/vnode.h>
  112 
  113 #include <vm/vm.h>
  114 #include <vm/vm_param.h>
  115 #include <vm/vm_kern.h>
  116 #include <vm/vm_object.h>
  117 #include <vm/vm_page.h>
  118 #include <vm/vm_pageout.h>
  119 #include <vm/vm_pager.h>
  120 #include <vm/vm_extern.h>
  121 #include <vm/uma.h>
  122 #include <vm/uma_int.h>
  123 
  124 #include <machine/md_var.h>
  125 
  126 /*
  127  *      Associated with page of user-allocatable memory is a
  128  *      page structure.
  129  */
  130 
  131 struct mtx vm_page_queue_mtx;
  132 struct mtx vm_page_queue_free_mtx;
  133 
  134 vm_page_t vm_page_array = 0;
  135 int vm_page_array_size = 0;
  136 long first_page = 0;
  137 int vm_page_zero_count = 0;
  138 
  139 static int boot_pages = UMA_BOOT_PAGES;
  140 TUNABLE_INT("vm.boot_pages", &boot_pages);
  141 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
  142         "number of pages allocated for bootstrapping the VM system");
  143 
  144 /*
  145  *      vm_set_page_size:
  146  *
  147  *      Sets the page size, perhaps based upon the memory
  148  *      size.  Must be called before any use of page-size
  149  *      dependent functions.
  150  */
  151 void
  152 vm_set_page_size(void)
  153 {
  154         if (cnt.v_page_size == 0)
  155                 cnt.v_page_size = PAGE_SIZE;
  156         if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
  157                 panic("vm_set_page_size: page size not a power of two");
  158 }
  159 
  160 /*
  161  *      vm_page_blacklist_lookup:
  162  *
  163  *      See if a physical address in this page has been listed
  164  *      in the blacklist tunable.  Entries in the tunable are
  165  *      separated by spaces or commas.  If an invalid integer is
  166  *      encountered then the rest of the string is skipped.
  167  */
  168 static int
  169 vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
  170 {
  171         vm_paddr_t bad;
  172         char *cp, *pos;
  173 
  174         for (pos = list; *pos != '\0'; pos = cp) {
  175                 bad = strtoq(pos, &cp, 0);
  176                 if (*cp != '\0') {
  177                         if (*cp == ' ' || *cp == ',') {
  178                                 cp++;
  179                                 if (cp == pos)
  180                                         continue;
  181                         } else
  182                                 break;
  183                 }
  184                 if (pa == trunc_page(bad))
  185                         return (1);
  186         }
  187         return (0);
  188 }
  189 
  190 /*
  191  *      vm_page_startup:
  192  *
  193  *      Initializes the resident memory module.
  194  *
  195  *      Allocates memory for the page cells, and
  196  *      for the object/offset-to-page hash table headers.
  197  *      Each page cell is initialized and placed on the free list.
  198  */
  199 vm_offset_t
  200 vm_page_startup(vm_offset_t vaddr)
  201 {
  202         vm_offset_t mapped;
  203         vm_size_t npages;
  204         vm_paddr_t page_range;
  205         vm_paddr_t new_end;
  206         int i;
  207         vm_paddr_t pa;
  208         int nblocks;
  209         vm_paddr_t last_pa;
  210         char *list;
  211 
  212         /* the biggest memory array is the second group of pages */
  213         vm_paddr_t end;
  214         vm_paddr_t biggestsize;
  215         int biggestone;
  216 
  217         vm_paddr_t total;
  218 
  219         total = 0;
  220         biggestsize = 0;
  221         biggestone = 0;
  222         nblocks = 0;
  223         vaddr = round_page(vaddr);
  224 
  225         for (i = 0; phys_avail[i + 1]; i += 2) {
  226                 phys_avail[i] = round_page(phys_avail[i]);
  227                 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
  228         }
  229 
  230         for (i = 0; phys_avail[i + 1]; i += 2) {
  231                 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
  232 
  233                 if (size > biggestsize) {
  234                         biggestone = i;
  235                         biggestsize = size;
  236                 }
  237                 ++nblocks;
  238                 total += size;
  239         }
  240 
  241         end = phys_avail[biggestone+1];
  242 
  243         /*
  244          * Initialize the locks.
  245          */
  246         mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
  247             MTX_RECURSE);
  248         mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
  249             MTX_SPIN);
  250 
  251         /*
  252          * Initialize the queue headers for the free queue, the active queue
  253          * and the inactive queue.
  254          */
  255         vm_pageq_init();
  256 
  257         /*
  258          * Allocate memory for use when boot strapping the kernel memory
  259          * allocator.
  260          */
  261         new_end = end - (boot_pages * UMA_SLAB_SIZE);
  262         new_end = trunc_page(new_end);
  263         mapped = pmap_map(&vaddr, new_end, end,
  264             VM_PROT_READ | VM_PROT_WRITE);
  265         bzero((void *)mapped, end - new_end);
  266         uma_startup((void *)mapped, boot_pages);
  267 
  268 #if defined(__amd64__) || defined(__i386__)
  269         /*
  270          * Allocate a bitmap to indicate that a random physical page
  271          * needs to be included in a minidump.
  272          *
  273          * The amd64 port needs this to indicate which direct map pages
  274          * need to be dumped, via calls to dump_add_page()/dump_drop_page().
  275          *
  276          * However, i386 still needs this workspace internally within the
  277          * minidump code.  In theory, they are not needed on i386, but are
  278          * included should the sf_buf code decide to use them.
  279          */
  280         page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
  281         vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
  282         new_end -= vm_page_dump_size;
  283         vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
  284             new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
  285         bzero((void *)vm_page_dump, vm_page_dump_size);
  286 #endif
  287         /*
  288          * Compute the number of pages of memory that will be available for
  289          * use (taking into account the overhead of a page structure per
  290          * page).
  291          */
  292         first_page = phys_avail[0] / PAGE_SIZE;
  293         page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
  294         npages = (total - (page_range * sizeof(struct vm_page)) -
  295             (end - new_end)) / PAGE_SIZE;
  296         end = new_end;
  297 
  298         /*
  299          * Reserve an unmapped guard page to trap access to vm_page_array[-1].
  300          */
  301         vaddr += PAGE_SIZE;
  302 
  303         /*
  304          * Initialize the mem entry structures now, and put them in the free
  305          * queue.
  306          */
  307         new_end = trunc_page(end - page_range * sizeof(struct vm_page));
  308         mapped = pmap_map(&vaddr, new_end, end,
  309             VM_PROT_READ | VM_PROT_WRITE);
  310         vm_page_array = (vm_page_t) mapped;
  311 #ifdef __amd64__
  312         /*
  313          * pmap_map on amd64 comes out of the direct-map, not kvm like i386,
  314          * so the pages must be tracked for a crashdump to include this data.
  315          * This includes the vm_page_array and the early UMA bootstrap pages.
  316          */
  317         for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
  318                 dump_add_page(pa);
  319 #endif  
  320         phys_avail[biggestone + 1] = new_end;
  321 
  322         /*
  323          * Clear all of the page structures
  324          */
  325         bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
  326         vm_page_array_size = page_range;
  327 
  328         /*
  329          * Construct the free queue(s) in descending order (by physical
  330          * address) so that the first 16MB of physical memory is allocated
  331          * last rather than first.  On large-memory machines, this avoids
  332          * the exhaustion of low physical memory before isa_dma_init has run.
  333          */
  334         cnt.v_page_count = 0;
  335         cnt.v_free_count = 0;
  336         list = getenv("vm.blacklist");
  337         for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
  338                 pa = phys_avail[i];
  339                 last_pa = phys_avail[i + 1];
  340                 while (pa < last_pa && npages-- > 0) {
  341                         if (list != NULL &&
  342                             vm_page_blacklist_lookup(list, pa))
  343                                 printf("Skipping page with pa 0x%jx\n",
  344                                     (uintmax_t)pa);
  345                         else
  346                                 vm_pageq_add_new_page(pa);
  347                         pa += PAGE_SIZE;
  348                 }
  349         }
  350         freeenv(list);
  351         return (vaddr);
  352 }
  353 
  354 void
  355 vm_page_flag_set(vm_page_t m, unsigned short bits)
  356 {
  357 
  358         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  359         m->flags |= bits;
  360 } 
  361 
  362 void
  363 vm_page_flag_clear(vm_page_t m, unsigned short bits)
  364 {
  365 
  366         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  367         m->flags &= ~bits;
  368 }
  369 
  370 void
  371 vm_page_busy(vm_page_t m)
  372 {
  373 
  374         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  375         KASSERT((m->flags & PG_BUSY) == 0,
  376             ("vm_page_busy: page already busy!!!"));
  377         vm_page_flag_set(m, PG_BUSY);
  378 }
  379 
  380 /*
  381  *      vm_page_flash:
  382  *
  383  *      wakeup anyone waiting for the page.
  384  */
  385 void
  386 vm_page_flash(vm_page_t m)
  387 {
  388 
  389         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  390         if (m->flags & PG_WANTED) {
  391                 vm_page_flag_clear(m, PG_WANTED);
  392                 wakeup(m);
  393         }
  394 }
  395 
  396 /*
  397  *      vm_page_wakeup:
  398  *
  399  *      clear the PG_BUSY flag and wakeup anyone waiting for the
  400  *      page.
  401  *
  402  */
  403 void
  404 vm_page_wakeup(vm_page_t m)
  405 {
  406 
  407         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  408         KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
  409         vm_page_flag_clear(m, PG_BUSY);
  410         vm_page_flash(m);
  411 }
  412 
  413 void
  414 vm_page_io_start(vm_page_t m)
  415 {
  416 
  417         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  418         m->busy++;
  419 }
  420 
  421 void
  422 vm_page_io_finish(vm_page_t m)
  423 {
  424 
  425         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  426         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  427         m->busy--;
  428         if (m->busy == 0)
  429                 vm_page_flash(m);
  430 }
  431 
  432 /*
  433  * Keep page from being freed by the page daemon
  434  * much of the same effect as wiring, except much lower
  435  * overhead and should be used only for *very* temporary
  436  * holding ("wiring").
  437  */
  438 void
  439 vm_page_hold(vm_page_t mem)
  440 {
  441 
  442         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  443         mem->hold_count++;
  444 }
  445 
  446 void
  447 vm_page_unhold(vm_page_t mem)
  448 {
  449 
  450         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  451         --mem->hold_count;
  452         KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
  453         if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
  454                 vm_page_free_toq(mem);
  455 }
  456 
  457 /*
  458  *      vm_page_free:
  459  *
  460  *      Free a page
  461  *
  462  *      The clearing of PG_ZERO is a temporary safety until the code can be
  463  *      reviewed to determine that PG_ZERO is being properly cleared on
  464  *      write faults or maps.  PG_ZERO was previously cleared in
  465  *      vm_page_alloc().
  466  */
  467 void
  468 vm_page_free(vm_page_t m)
  469 {
  470         vm_page_flag_clear(m, PG_ZERO);
  471         vm_page_free_toq(m);
  472         vm_page_zero_idle_wakeup();
  473 }
  474 
  475 /*
  476  *      vm_page_free_zero:
  477  *
  478  *      Free a page to the zerod-pages queue
  479  */
  480 void
  481 vm_page_free_zero(vm_page_t m)
  482 {
  483         vm_page_flag_set(m, PG_ZERO);
  484         vm_page_free_toq(m);
  485 }
  486 
  487 /*
  488  *      vm_page_sleep_if_busy:
  489  *
  490  *      Sleep and release the page queues lock if PG_BUSY is set or,
  491  *      if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
  492  *      thread slept and the page queues lock was released.
  493  *      Otherwise, retains the page queues lock and returns FALSE.
  494  */
  495 int
  496 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
  497 {
  498         vm_object_t object;
  499 
  500         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  501         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  502         if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
  503                 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
  504                 /*
  505                  * It's possible that while we sleep, the page will get
  506                  * unbusied and freed.  If we are holding the object
  507                  * lock, we will assume we hold a reference to the object
  508                  * such that even if m->object changes, we can re-lock
  509                  * it.
  510                  */
  511                 object = m->object;
  512                 VM_OBJECT_UNLOCK(object);
  513                 msleep(m, &vm_page_queue_mtx, PDROP | PVM, msg, 0);
  514                 VM_OBJECT_LOCK(object);
  515                 return (TRUE);
  516         }
  517         return (FALSE);
  518 }
  519 
  520 /*
  521  *      vm_page_dirty:
  522  *
  523  *      make page all dirty
  524  */
  525 void
  526 vm_page_dirty(vm_page_t m)
  527 {
  528         KASSERT(m->queue - m->pc != PQ_CACHE,
  529             ("vm_page_dirty: page in cache!"));
  530         KASSERT(m->queue - m->pc != PQ_FREE,
  531             ("vm_page_dirty: page is free!"));
  532         m->dirty = VM_PAGE_BITS_ALL;
  533 }
  534 
  535 /*
  536  *      vm_page_splay:
  537  *
  538  *      Implements Sleator and Tarjan's top-down splay algorithm.  Returns
  539  *      the vm_page containing the given pindex.  If, however, that
  540  *      pindex is not found in the vm_object, returns a vm_page that is
  541  *      adjacent to the pindex, coming before or after it.
  542  */
  543 vm_page_t
  544 vm_page_splay(vm_pindex_t pindex, vm_page_t root)
  545 {
  546         struct vm_page dummy;
  547         vm_page_t lefttreemax, righttreemin, y;
  548 
  549         if (root == NULL)
  550                 return (root);
  551         lefttreemax = righttreemin = &dummy;
  552         for (;; root = y) {
  553                 if (pindex < root->pindex) {
  554                         if ((y = root->left) == NULL)
  555                                 break;
  556                         if (pindex < y->pindex) {
  557                                 /* Rotate right. */
  558                                 root->left = y->right;
  559                                 y->right = root;
  560                                 root = y;
  561                                 if ((y = root->left) == NULL)
  562                                         break;
  563                         }
  564                         /* Link into the new root's right tree. */
  565                         righttreemin->left = root;
  566                         righttreemin = root;
  567                 } else if (pindex > root->pindex) {
  568                         if ((y = root->right) == NULL)
  569                                 break;
  570                         if (pindex > y->pindex) {
  571                                 /* Rotate left. */
  572                                 root->right = y->left;
  573                                 y->left = root;
  574                                 root = y;
  575                                 if ((y = root->right) == NULL)
  576                                         break;
  577                         }
  578                         /* Link into the new root's left tree. */
  579                         lefttreemax->right = root;
  580                         lefttreemax = root;
  581                 } else
  582                         break;
  583         }
  584         /* Assemble the new root. */
  585         lefttreemax->right = root->left;
  586         righttreemin->left = root->right;
  587         root->left = dummy.right;
  588         root->right = dummy.left;
  589         return (root);
  590 }
  591 
  592 /*
  593  *      vm_page_insert:         [ internal use only ]
  594  *
  595  *      Inserts the given mem entry into the object and object list.
  596  *
  597  *      The pagetables are not updated but will presumably fault the page
  598  *      in if necessary, or if a kernel page the caller will at some point
  599  *      enter the page into the kernel's pmap.  We are not allowed to block
  600  *      here so we *can't* do this anyway.
  601  *
  602  *      The object and page must be locked.
  603  *      This routine may not block.
  604  */
  605 void
  606 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
  607 {
  608         vm_page_t root;
  609 
  610         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  611         if (m->object != NULL)
  612                 panic("vm_page_insert: page already inserted");
  613 
  614         /*
  615          * Record the object/offset pair in this page
  616          */
  617         m->object = object;
  618         m->pindex = pindex;
  619 
  620         /*
  621          * Now link into the object's ordered list of backed pages.
  622          */
  623         root = object->root;
  624         if (root == NULL) {
  625                 m->left = NULL;
  626                 m->right = NULL;
  627                 TAILQ_INSERT_TAIL(&object->memq, m, listq);
  628         } else {
  629                 root = vm_page_splay(pindex, root);
  630                 if (pindex < root->pindex) {
  631                         m->left = root->left;
  632                         m->right = root;
  633                         root->left = NULL;
  634                         TAILQ_INSERT_BEFORE(root, m, listq);
  635                 } else if (pindex == root->pindex)
  636                         panic("vm_page_insert: offset already allocated");
  637                 else {
  638                         m->right = root->right;
  639                         m->left = root;
  640                         root->right = NULL;
  641                         TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
  642                 }
  643         }
  644         object->root = m;
  645         object->generation++;
  646 
  647         /*
  648          * show that the object has one more resident page.
  649          */
  650         object->resident_page_count++;
  651         /*
  652          * Hold the vnode until the last page is released.
  653          */
  654         if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
  655                 vhold((struct vnode *)object->handle);
  656 
  657         /*
  658          * Since we are inserting a new and possibly dirty page,
  659          * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
  660          */
  661         if (m->flags & PG_WRITEABLE)
  662                 vm_object_set_writeable_dirty(object);
  663 }
  664 
  665 /*
  666  *      vm_page_remove:
  667  *                              NOTE: used by device pager as well -wfj
  668  *
  669  *      Removes the given mem entry from the object/offset-page
  670  *      table and the object page list, but do not invalidate/terminate
  671  *      the backing store.
  672  *
  673  *      The object and page must be locked.
  674  *      The underlying pmap entry (if any) is NOT removed here.
  675  *      This routine may not block.
  676  */
  677 void
  678 vm_page_remove(vm_page_t m)
  679 {
  680         vm_object_t object;
  681         vm_page_t root;
  682 
  683         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  684         if ((object = m->object) == NULL)
  685                 return;
  686         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  687         if (m->flags & PG_BUSY) {
  688                 vm_page_flag_clear(m, PG_BUSY);
  689                 vm_page_flash(m);
  690         }
  691 
  692         /*
  693          * Now remove from the object's list of backed pages.
  694          */
  695         if (m != object->root)
  696                 vm_page_splay(m->pindex, object->root);
  697         if (m->left == NULL)
  698                 root = m->right;
  699         else {
  700                 root = vm_page_splay(m->pindex, m->left);
  701                 root->right = m->right;
  702         }
  703         object->root = root;
  704         TAILQ_REMOVE(&object->memq, m, listq);
  705 
  706         /*
  707          * And show that the object has one fewer resident page.
  708          */
  709         object->resident_page_count--;
  710         object->generation++;
  711         /*
  712          * The vnode may now be recycled.
  713          */
  714         if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
  715                 vdrop((struct vnode *)object->handle);
  716 
  717         m->object = NULL;
  718 }
  719 
  720 /*
  721  *      vm_page_lookup:
  722  *
  723  *      Returns the page associated with the object/offset
  724  *      pair specified; if none is found, NULL is returned.
  725  *
  726  *      The object must be locked.
  727  *      This routine may not block.
  728  *      This is a critical path routine
  729  */
  730 vm_page_t
  731 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
  732 {
  733         vm_page_t m;
  734 
  735         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  736         if ((m = object->root) != NULL && m->pindex != pindex) {
  737                 m = vm_page_splay(pindex, m);
  738                 if ((object->root = m)->pindex != pindex)
  739                         m = NULL;
  740         }
  741         return (m);
  742 }
  743 
  744 /*
  745  *      vm_page_rename:
  746  *
  747  *      Move the given memory entry from its
  748  *      current object to the specified target object/offset.
  749  *
  750  *      The object must be locked.
  751  *      This routine may not block.
  752  *
  753  *      Note: swap associated with the page must be invalidated by the move.  We
  754  *            have to do this for several reasons:  (1) we aren't freeing the
  755  *            page, (2) we are dirtying the page, (3) the VM system is probably
  756  *            moving the page from object A to B, and will then later move
  757  *            the backing store from A to B and we can't have a conflict.
  758  *
  759  *      Note: we *always* dirty the page.  It is necessary both for the
  760  *            fact that we moved it, and because we may be invalidating
  761  *            swap.  If the page is on the cache, we have to deactivate it
  762  *            or vm_page_dirty() will panic.  Dirty pages are not allowed
  763  *            on the cache.
  764  */
  765 void
  766 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
  767 {
  768 
  769         vm_page_remove(m);
  770         vm_page_insert(m, new_object, new_pindex);
  771         if (m->queue - m->pc == PQ_CACHE)
  772                 vm_page_deactivate(m);
  773         vm_page_dirty(m);
  774 }
  775 
  776 /*
  777  *      vm_page_select_cache:
  778  *
  779  *      Move a page of the given color from the cache queue to the free
  780  *      queue.  As pages might be found, but are not applicable, they are
  781  *      deactivated.
  782  *
  783  *      This routine may not block.
  784  */
  785 vm_page_t
  786 vm_page_select_cache(int color)
  787 {
  788         vm_object_t object;
  789         vm_page_t m;
  790         boolean_t was_trylocked;
  791 
  792         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  793         while ((m = vm_pageq_find(PQ_CACHE, color, FALSE)) != NULL) {
  794                 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
  795                 KASSERT(!pmap_page_is_mapped(m),
  796                     ("Found mapped cache page %p", m));
  797                 KASSERT((m->flags & PG_UNMANAGED) == 0,
  798                     ("Found unmanaged cache page %p", m));
  799                 KASSERT(m->wire_count == 0, ("Found wired cache page %p", m));
  800                 if (m->hold_count == 0 && (object = m->object,
  801                     (was_trylocked = VM_OBJECT_TRYLOCK(object)) ||
  802                     VM_OBJECT_LOCKED(object))) {
  803                         KASSERT((m->flags & PG_BUSY) == 0 && m->busy == 0,
  804                             ("Found busy cache page %p", m));
  805                         vm_page_free(m);
  806                         if (was_trylocked)
  807                                 VM_OBJECT_UNLOCK(object);
  808                         break;
  809                 }
  810                 vm_page_deactivate(m);
  811         }
  812         return (m);
  813 }
  814 
  815 /*
  816  *      vm_page_alloc:
  817  *
  818  *      Allocate and return a memory cell associated
  819  *      with this VM object/offset pair.
  820  *
  821  *      page_req classes:
  822  *      VM_ALLOC_NORMAL         normal process request
  823  *      VM_ALLOC_SYSTEM         system *really* needs a page
  824  *      VM_ALLOC_INTERRUPT      interrupt time request
  825  *      VM_ALLOC_ZERO           zero page
  826  *
  827  *      This routine may not block.
  828  *
  829  *      Additional special handling is required when called from an
  830  *      interrupt (VM_ALLOC_INTERRUPT).  We are not allowed to mess with
  831  *      the page cache in this case.
  832  */
  833 vm_page_t
  834 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
  835 {
  836         vm_page_t m = NULL;
  837         int color, flags, page_req;
  838 
  839         page_req = req & VM_ALLOC_CLASS_MASK;
  840         KASSERT(curthread->td_intr_nesting_level == 0 ||
  841             page_req == VM_ALLOC_INTERRUPT,
  842             ("vm_page_alloc(NORMAL|SYSTEM) in interrupt context"));
  843 
  844         if ((req & VM_ALLOC_NOOBJ) == 0) {
  845                 KASSERT(object != NULL,
  846                     ("vm_page_alloc: NULL object."));
  847                 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  848                 color = (pindex + object->pg_color) & PQ_L2_MASK;
  849         } else
  850                 color = pindex & PQ_L2_MASK;
  851 
  852         /*
  853          * The pager is allowed to eat deeper into the free page list.
  854          */
  855         if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
  856                 page_req = VM_ALLOC_SYSTEM;
  857         };
  858 
  859 loop:
  860         mtx_lock_spin(&vm_page_queue_free_mtx);
  861         if (cnt.v_free_count > cnt.v_free_reserved ||
  862             (page_req == VM_ALLOC_SYSTEM && 
  863              cnt.v_cache_count == 0 && 
  864              cnt.v_free_count > cnt.v_interrupt_free_min) ||
  865             (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) {
  866                 /*
  867                  * Allocate from the free queue if the number of free pages
  868                  * exceeds the minimum for the request class.
  869                  */
  870                 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
  871         } else if (page_req != VM_ALLOC_INTERRUPT) {
  872                 mtx_unlock_spin(&vm_page_queue_free_mtx);
  873                 /*
  874                  * Allocatable from cache (non-interrupt only).  On success,
  875                  * we must free the page and try again, thus ensuring that
  876                  * cnt.v_*_free_min counters are replenished.
  877                  */
  878                 vm_page_lock_queues();
  879                 if ((m = vm_page_select_cache(color)) == NULL) {
  880 #if defined(DIAGNOSTIC)
  881                         if (cnt.v_cache_count > 0)
  882                                 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
  883 #endif
  884                         vm_page_unlock_queues();
  885                         atomic_add_int(&vm_pageout_deficit, 1);
  886                         pagedaemon_wakeup();
  887 
  888                         if (page_req != VM_ALLOC_SYSTEM) 
  889                                 return (NULL);
  890 
  891                         mtx_lock_spin(&vm_page_queue_free_mtx);
  892                         if (cnt.v_free_count <= cnt.v_interrupt_free_min) {
  893                                 mtx_unlock_spin(&vm_page_queue_free_mtx);
  894                                 return (NULL);
  895                         }
  896                         m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
  897                 } else {
  898                         vm_page_unlock_queues();
  899                         goto loop;
  900                 }
  901         } else {
  902                 /*
  903                  * Not allocatable from cache from interrupt, give up.
  904                  */
  905                 mtx_unlock_spin(&vm_page_queue_free_mtx);
  906                 atomic_add_int(&vm_pageout_deficit, 1);
  907                 pagedaemon_wakeup();
  908                 return (NULL);
  909         }
  910 
  911         /*
  912          *  At this point we had better have found a good page.
  913          */
  914 
  915         KASSERT(
  916             m != NULL,
  917             ("vm_page_alloc(): missing page on free queue")
  918         );
  919 
  920         /*
  921          * Remove from free queue
  922          */
  923         vm_pageq_remove_nowakeup(m);
  924 
  925         /*
  926          * Initialize structure.  Only the PG_ZERO flag is inherited.
  927          */
  928         flags = PG_BUSY;
  929         if (m->flags & PG_ZERO) {
  930                 vm_page_zero_count--;
  931                 if (req & VM_ALLOC_ZERO)
  932                         flags = PG_ZERO | PG_BUSY;
  933         }
  934         if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
  935                 flags &= ~PG_BUSY;
  936         m->flags = flags;
  937         if (req & VM_ALLOC_WIRED) {
  938                 atomic_add_int(&cnt.v_wire_count, 1);
  939                 m->wire_count = 1;
  940         } else
  941                 m->wire_count = 0;
  942         m->hold_count = 0;
  943         m->act_count = 0;
  944         m->busy = 0;
  945         m->valid = 0;
  946         KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
  947         mtx_unlock_spin(&vm_page_queue_free_mtx);
  948 
  949         if ((req & VM_ALLOC_NOOBJ) == 0)
  950                 vm_page_insert(m, object, pindex);
  951         else
  952                 m->pindex = pindex;
  953 
  954         /*
  955          * Don't wakeup too often - wakeup the pageout daemon when
  956          * we would be nearly out of memory.
  957          */
  958         if (vm_paging_needed())
  959                 pagedaemon_wakeup();
  960 
  961         return (m);
  962 }
  963 
  964 /*
  965  *      vm_wait:        (also see VM_WAIT macro)
  966  *
  967  *      Block until free pages are available for allocation
  968  *      - Called in various places before memory allocations.
  969  */
  970 void
  971 vm_wait(void)
  972 {
  973 
  974         vm_page_lock_queues();
  975         if (curproc == pageproc) {
  976                 vm_pageout_pages_needed = 1;
  977                 msleep(&vm_pageout_pages_needed, &vm_page_queue_mtx,
  978                     PDROP | PSWP, "VMWait", 0);
  979         } else {
  980                 if (!vm_pages_needed) {
  981                         vm_pages_needed = 1;
  982                         wakeup(&vm_pages_needed);
  983                 }
  984                 msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PVM,
  985                     "vmwait", 0);
  986         }
  987 }
  988 
  989 /*
  990  *      vm_waitpfault:  (also see VM_WAITPFAULT macro)
  991  *
  992  *      Block until free pages are available for allocation
  993  *      - Called only in vm_fault so that processes page faulting
  994  *        can be easily tracked.
  995  *      - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
  996  *        processes will be able to grab memory first.  Do not change
  997  *        this balance without careful testing first.
  998  */
  999 void
 1000 vm_waitpfault(void)
 1001 {
 1002 
 1003         vm_page_lock_queues();
 1004         if (!vm_pages_needed) {
 1005                 vm_pages_needed = 1;
 1006                 wakeup(&vm_pages_needed);
 1007         }
 1008         msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PUSER,
 1009             "pfault", 0);
 1010 }
 1011 
 1012 /*
 1013  *      vm_page_activate:
 1014  *
 1015  *      Put the specified page on the active list (if appropriate).
 1016  *      Ensure that act_count is at least ACT_INIT but do not otherwise
 1017  *      mess with it.
 1018  *
 1019  *      The page queues must be locked.
 1020  *      This routine may not block.
 1021  */
 1022 void
 1023 vm_page_activate(vm_page_t m)
 1024 {
 1025 
 1026         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1027         if (m->queue != PQ_ACTIVE) {
 1028                 if ((m->queue - m->pc) == PQ_CACHE)
 1029                         cnt.v_reactivated++;
 1030                 vm_pageq_remove(m);
 1031                 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
 1032                         if (m->act_count < ACT_INIT)
 1033                                 m->act_count = ACT_INIT;
 1034                         vm_pageq_enqueue(PQ_ACTIVE, m);
 1035                 }
 1036         } else {
 1037                 if (m->act_count < ACT_INIT)
 1038                         m->act_count = ACT_INIT;
 1039         }
 1040 }
 1041 
 1042 /*
 1043  *      vm_page_free_wakeup:
 1044  *
 1045  *      Helper routine for vm_page_free_toq() and vm_page_cache().  This
 1046  *      routine is called when a page has been added to the cache or free
 1047  *      queues.
 1048  *
 1049  *      The page queues must be locked.
 1050  *      This routine may not block.
 1051  */
 1052 static __inline void
 1053 vm_page_free_wakeup(void)
 1054 {
 1055 
 1056         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1057         /*
 1058          * if pageout daemon needs pages, then tell it that there are
 1059          * some free.
 1060          */
 1061         if (vm_pageout_pages_needed &&
 1062             cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
 1063                 wakeup(&vm_pageout_pages_needed);
 1064                 vm_pageout_pages_needed = 0;
 1065         }
 1066         /*
 1067          * wakeup processes that are waiting on memory if we hit a
 1068          * high water mark. And wakeup scheduler process if we have
 1069          * lots of memory. this process will swapin processes.
 1070          */
 1071         if (vm_pages_needed && !vm_page_count_min()) {
 1072                 vm_pages_needed = 0;
 1073                 wakeup(&cnt.v_free_count);
 1074         }
 1075 }
 1076 
 1077 /*
 1078  *      vm_page_free_toq:
 1079  *
 1080  *      Returns the given page to the PQ_FREE list,
 1081  *      disassociating it with any VM object.
 1082  *
 1083  *      Object and page must be locked prior to entry.
 1084  *      This routine may not block.
 1085  */
 1086 
 1087 void
 1088 vm_page_free_toq(vm_page_t m)
 1089 {
 1090         struct vpgqueues *pq;
 1091 
 1092         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1093         cnt.v_tfree++;
 1094 
 1095         if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
 1096                 printf(
 1097                 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
 1098                     (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
 1099                     m->hold_count);
 1100                 if ((m->queue - m->pc) == PQ_FREE)
 1101                         panic("vm_page_free: freeing free page");
 1102                 else
 1103                         panic("vm_page_free: freeing busy page");
 1104         }
 1105 
 1106         /*
 1107          * unqueue, then remove page.  Note that we cannot destroy
 1108          * the page here because we do not want to call the pager's
 1109          * callback routine until after we've put the page on the
 1110          * appropriate free queue.
 1111          */
 1112         vm_pageq_remove_nowakeup(m);
 1113         vm_page_remove(m);
 1114 
 1115         /*
 1116          * If fictitious remove object association and
 1117          * return, otherwise delay object association removal.
 1118          */
 1119         if ((m->flags & PG_FICTITIOUS) != 0) {
 1120                 return;
 1121         }
 1122 
 1123         m->valid = 0;
 1124         vm_page_undirty(m);
 1125 
 1126         if (m->wire_count != 0) {
 1127                 if (m->wire_count > 1) {
 1128                         panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
 1129                                 m->wire_count, (long)m->pindex);
 1130                 }
 1131                 panic("vm_page_free: freeing wired page");
 1132         }
 1133 
 1134         /*
 1135          * Clear the UNMANAGED flag when freeing an unmanaged page.
 1136          */
 1137         if (m->flags & PG_UNMANAGED) {
 1138                 m->flags &= ~PG_UNMANAGED;
 1139         }
 1140 
 1141         if (m->hold_count != 0) {
 1142                 m->flags &= ~PG_ZERO;
 1143                 m->queue = PQ_HOLD;
 1144         } else
 1145                 m->queue = PQ_FREE + m->pc;
 1146         pq = &vm_page_queues[m->queue];
 1147         mtx_lock_spin(&vm_page_queue_free_mtx);
 1148         pq->lcnt++;
 1149         ++(*pq->cnt);
 1150 
 1151         /*
 1152          * Put zero'd pages on the end ( where we look for zero'd pages
 1153          * first ) and non-zerod pages at the head.
 1154          */
 1155         if (m->flags & PG_ZERO) {
 1156                 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
 1157                 ++vm_page_zero_count;
 1158         } else {
 1159                 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
 1160         }
 1161         mtx_unlock_spin(&vm_page_queue_free_mtx);
 1162         vm_page_free_wakeup();
 1163 }
 1164 
 1165 /*
 1166  *      vm_page_unmanage:
 1167  *
 1168  *      Prevent PV management from being done on the page.  The page is
 1169  *      removed from the paging queues as if it were wired, and as a 
 1170  *      consequence of no longer being managed the pageout daemon will not
 1171  *      touch it (since there is no way to locate the pte mappings for the
 1172  *      page).  madvise() calls that mess with the pmap will also no longer
 1173  *      operate on the page.
 1174  *
 1175  *      Beyond that the page is still reasonably 'normal'.  Freeing the page
 1176  *      will clear the flag.
 1177  *
 1178  *      This routine is used by OBJT_PHYS objects - objects using unswappable
 1179  *      physical memory as backing store rather then swap-backed memory and
 1180  *      will eventually be extended to support 4MB unmanaged physical 
 1181  *      mappings.
 1182  */
 1183 void
 1184 vm_page_unmanage(vm_page_t m)
 1185 {
 1186 
 1187         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1188         if ((m->flags & PG_UNMANAGED) == 0) {
 1189                 if (m->wire_count == 0)
 1190                         vm_pageq_remove(m);
 1191         }
 1192         vm_page_flag_set(m, PG_UNMANAGED);
 1193 }
 1194 
 1195 /*
 1196  *      vm_page_wire:
 1197  *
 1198  *      Mark this page as wired down by yet
 1199  *      another map, removing it from paging queues
 1200  *      as necessary.
 1201  *
 1202  *      The page queues must be locked.
 1203  *      This routine may not block.
 1204  */
 1205 void
 1206 vm_page_wire(vm_page_t m)
 1207 {
 1208 
 1209         /*
 1210          * Only bump the wire statistics if the page is not already wired,
 1211          * and only unqueue the page if it is on some queue (if it is unmanaged
 1212          * it is already off the queues).
 1213          */
 1214         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1215         if (m->flags & PG_FICTITIOUS)
 1216                 return;
 1217         if (m->wire_count == 0) {
 1218                 if ((m->flags & PG_UNMANAGED) == 0)
 1219                         vm_pageq_remove(m);
 1220                 atomic_add_int(&cnt.v_wire_count, 1);
 1221         }
 1222         m->wire_count++;
 1223         KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
 1224 }
 1225 
 1226 /*
 1227  *      vm_page_unwire:
 1228  *
 1229  *      Release one wiring of this page, potentially
 1230  *      enabling it to be paged again.
 1231  *
 1232  *      Many pages placed on the inactive queue should actually go
 1233  *      into the cache, but it is difficult to figure out which.  What
 1234  *      we do instead, if the inactive target is well met, is to put
 1235  *      clean pages at the head of the inactive queue instead of the tail.
 1236  *      This will cause them to be moved to the cache more quickly and
 1237  *      if not actively re-referenced, freed more quickly.  If we just
 1238  *      stick these pages at the end of the inactive queue, heavy filesystem
 1239  *      meta-data accesses can cause an unnecessary paging load on memory bound 
 1240  *      processes.  This optimization causes one-time-use metadata to be
 1241  *      reused more quickly.
 1242  *
 1243  *      BUT, if we are in a low-memory situation we have no choice but to
 1244  *      put clean pages on the cache queue.
 1245  *
 1246  *      A number of routines use vm_page_unwire() to guarantee that the page
 1247  *      will go into either the inactive or active queues, and will NEVER
 1248  *      be placed in the cache - for example, just after dirtying a page.
 1249  *      dirty pages in the cache are not allowed.
 1250  *
 1251  *      The page queues must be locked.
 1252  *      This routine may not block.
 1253  */
 1254 void
 1255 vm_page_unwire(vm_page_t m, int activate)
 1256 {
 1257 
 1258         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1259         if (m->flags & PG_FICTITIOUS)
 1260                 return;
 1261         if (m->wire_count > 0) {
 1262                 m->wire_count--;
 1263                 if (m->wire_count == 0) {
 1264                         atomic_subtract_int(&cnt.v_wire_count, 1);
 1265                         if (m->flags & PG_UNMANAGED) {
 1266                                 ;
 1267                         } else if (activate)
 1268                                 vm_pageq_enqueue(PQ_ACTIVE, m);
 1269                         else {
 1270                                 vm_page_flag_clear(m, PG_WINATCFLS);
 1271                                 vm_pageq_enqueue(PQ_INACTIVE, m);
 1272                         }
 1273                 }
 1274         } else {
 1275                 panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
 1276         }
 1277 }
 1278 
 1279 
 1280 /*
 1281  * Move the specified page to the inactive queue.  If the page has
 1282  * any associated swap, the swap is deallocated.
 1283  *
 1284  * Normally athead is 0 resulting in LRU operation.  athead is set
 1285  * to 1 if we want this page to be 'as if it were placed in the cache',
 1286  * except without unmapping it from the process address space.
 1287  *
 1288  * This routine may not block.
 1289  */
 1290 static __inline void
 1291 _vm_page_deactivate(vm_page_t m, int athead)
 1292 {
 1293 
 1294         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1295 
 1296         /*
 1297          * Ignore if already inactive.
 1298          */
 1299         if (m->queue == PQ_INACTIVE)
 1300                 return;
 1301         if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
 1302                 if ((m->queue - m->pc) == PQ_CACHE)
 1303                         cnt.v_reactivated++;
 1304                 vm_page_flag_clear(m, PG_WINATCFLS);
 1305                 vm_pageq_remove(m);
 1306                 if (athead)
 1307                         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
 1308                 else
 1309                         TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
 1310                 m->queue = PQ_INACTIVE;
 1311                 vm_page_queues[PQ_INACTIVE].lcnt++;
 1312                 cnt.v_inactive_count++;
 1313         }
 1314 }
 1315 
 1316 void
 1317 vm_page_deactivate(vm_page_t m)
 1318 {
 1319     _vm_page_deactivate(m, 0);
 1320 }
 1321 
 1322 /*
 1323  * vm_page_try_to_cache:
 1324  *
 1325  * Returns 0 on failure, 1 on success
 1326  */
 1327 int
 1328 vm_page_try_to_cache(vm_page_t m)
 1329 {
 1330 
 1331         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1332         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1333         if (m->dirty || m->hold_count || m->busy || m->wire_count ||
 1334             (m->flags & (PG_BUSY|PG_UNMANAGED))) {
 1335                 return (0);
 1336         }
 1337         pmap_remove_all(m);
 1338         if (m->dirty)
 1339                 return (0);
 1340         vm_page_cache(m);
 1341         return (1);
 1342 }
 1343 
 1344 /*
 1345  * vm_page_try_to_free()
 1346  *
 1347  *      Attempt to free the page.  If we cannot free it, we do nothing.
 1348  *      1 is returned on success, 0 on failure.
 1349  */
 1350 int
 1351 vm_page_try_to_free(vm_page_t m)
 1352 {
 1353 
 1354         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1355         if (m->object != NULL)
 1356                 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1357         if (m->dirty || m->hold_count || m->busy || m->wire_count ||
 1358             (m->flags & (PG_BUSY|PG_UNMANAGED))) {
 1359                 return (0);
 1360         }
 1361         pmap_remove_all(m);
 1362         if (m->dirty)
 1363                 return (0);
 1364         vm_page_free(m);
 1365         return (1);
 1366 }
 1367 
 1368 /*
 1369  * vm_page_cache
 1370  *
 1371  * Put the specified page onto the page cache queue (if appropriate).
 1372  *
 1373  * This routine may not block.
 1374  */
 1375 void
 1376 vm_page_cache(vm_page_t m)
 1377 {
 1378 
 1379         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1380         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1381         if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
 1382             m->hold_count || m->wire_count) {
 1383                 printf("vm_page_cache: attempting to cache busy page\n");
 1384                 return;
 1385         }
 1386         if ((m->queue - m->pc) == PQ_CACHE)
 1387                 return;
 1388 
 1389         /*
 1390          * Remove all pmaps and indicate that the page is not
 1391          * writeable or mapped.
 1392          */
 1393         pmap_remove_all(m);
 1394         if (m->dirty != 0) {
 1395                 panic("vm_page_cache: caching a dirty page, pindex: %ld",
 1396                         (long)m->pindex);
 1397         }
 1398         vm_pageq_remove_nowakeup(m);
 1399         vm_pageq_enqueue(PQ_CACHE + m->pc, m);
 1400         vm_page_free_wakeup();
 1401 }
 1402 
 1403 /*
 1404  * vm_page_dontneed
 1405  *
 1406  *      Cache, deactivate, or do nothing as appropriate.  This routine
 1407  *      is typically used by madvise() MADV_DONTNEED.
 1408  *
 1409  *      Generally speaking we want to move the page into the cache so
 1410  *      it gets reused quickly.  However, this can result in a silly syndrome
 1411  *      due to the page recycling too quickly.  Small objects will not be
 1412  *      fully cached.  On the otherhand, if we move the page to the inactive
 1413  *      queue we wind up with a problem whereby very large objects 
 1414  *      unnecessarily blow away our inactive and cache queues.
 1415  *
 1416  *      The solution is to move the pages based on a fixed weighting.  We
 1417  *      either leave them alone, deactivate them, or move them to the cache,
 1418  *      where moving them to the cache has the highest weighting.
 1419  *      By forcing some pages into other queues we eventually force the
 1420  *      system to balance the queues, potentially recovering other unrelated
 1421  *      space from active.  The idea is to not force this to happen too
 1422  *      often.
 1423  */
 1424 void
 1425 vm_page_dontneed(vm_page_t m)
 1426 {
 1427         static int dnweight;
 1428         int dnw;
 1429         int head;
 1430 
 1431         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1432         dnw = ++dnweight;
 1433 
 1434         /*
 1435          * occassionally leave the page alone
 1436          */
 1437         if ((dnw & 0x01F0) == 0 ||
 1438             m->queue == PQ_INACTIVE || 
 1439             m->queue - m->pc == PQ_CACHE
 1440         ) {
 1441                 if (m->act_count >= ACT_INIT)
 1442                         --m->act_count;
 1443                 return;
 1444         }
 1445 
 1446         if (m->dirty == 0 && pmap_is_modified(m))
 1447                 vm_page_dirty(m);
 1448 
 1449         if (m->dirty || (dnw & 0x0070) == 0) {
 1450                 /*
 1451                  * Deactivate the page 3 times out of 32.
 1452                  */
 1453                 head = 0;
 1454         } else {
 1455                 /*
 1456                  * Cache the page 28 times out of every 32.  Note that
 1457                  * the page is deactivated instead of cached, but placed
 1458                  * at the head of the queue instead of the tail.
 1459                  */
 1460                 head = 1;
 1461         }
 1462         _vm_page_deactivate(m, head);
 1463 }
 1464 
 1465 /*
 1466  * Grab a page, waiting until we are waken up due to the page
 1467  * changing state.  We keep on waiting, if the page continues
 1468  * to be in the object.  If the page doesn't exist, first allocate it
 1469  * and then conditionally zero it.
 1470  *
 1471  * This routine may block.
 1472  */
 1473 vm_page_t
 1474 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
 1475 {
 1476         vm_page_t m;
 1477 
 1478         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1479 retrylookup:
 1480         if ((m = vm_page_lookup(object, pindex)) != NULL) {
 1481                 vm_page_lock_queues();
 1482                 if (m->busy || (m->flags & PG_BUSY)) {
 1483                         vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
 1484                         VM_OBJECT_UNLOCK(object);
 1485                         msleep(m, &vm_page_queue_mtx, PDROP | PVM, "pgrbwt", 0);
 1486                         VM_OBJECT_LOCK(object);
 1487                         if ((allocflags & VM_ALLOC_RETRY) == 0)
 1488                                 return (NULL);
 1489                         goto retrylookup;
 1490                 } else {
 1491                         if (allocflags & VM_ALLOC_WIRED)
 1492                                 vm_page_wire(m);
 1493                         if ((allocflags & VM_ALLOC_NOBUSY) == 0)
 1494                                 vm_page_busy(m);
 1495                         vm_page_unlock_queues();
 1496                         return (m);
 1497                 }
 1498         }
 1499         m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
 1500         if (m == NULL) {
 1501                 VM_OBJECT_UNLOCK(object);
 1502                 VM_WAIT;
 1503                 VM_OBJECT_LOCK(object);
 1504                 if ((allocflags & VM_ALLOC_RETRY) == 0)
 1505                         return (NULL);
 1506                 goto retrylookup;
 1507         }
 1508         if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
 1509                 pmap_zero_page(m);
 1510         return (m);
 1511 }
 1512 
 1513 /*
 1514  * Mapping function for valid bits or for dirty bits in
 1515  * a page.  May not block.
 1516  *
 1517  * Inputs are required to range within a page.
 1518  */
 1519 __inline int
 1520 vm_page_bits(int base, int size)
 1521 {
 1522         int first_bit;
 1523         int last_bit;
 1524 
 1525         KASSERT(
 1526             base + size <= PAGE_SIZE,
 1527             ("vm_page_bits: illegal base/size %d/%d", base, size)
 1528         );
 1529 
 1530         if (size == 0)          /* handle degenerate case */
 1531                 return (0);
 1532 
 1533         first_bit = base >> DEV_BSHIFT;
 1534         last_bit = (base + size - 1) >> DEV_BSHIFT;
 1535 
 1536         return ((2 << last_bit) - (1 << first_bit));
 1537 }
 1538 
 1539 /*
 1540  *      vm_page_set_validclean:
 1541  *
 1542  *      Sets portions of a page valid and clean.  The arguments are expected
 1543  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
 1544  *      of any partial chunks touched by the range.  The invalid portion of
 1545  *      such chunks will be zero'd.
 1546  *
 1547  *      This routine may not block.
 1548  *
 1549  *      (base + size) must be less then or equal to PAGE_SIZE.
 1550  */
 1551 void
 1552 vm_page_set_validclean(vm_page_t m, int base, int size)
 1553 {
 1554         int pagebits;
 1555         int frag;
 1556         int endoff;
 1557 
 1558         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1559         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1560         if (size == 0)  /* handle degenerate case */
 1561                 return;
 1562 
 1563         /*
 1564          * If the base is not DEV_BSIZE aligned and the valid
 1565          * bit is clear, we have to zero out a portion of the
 1566          * first block.
 1567          */
 1568         if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
 1569             (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
 1570                 pmap_zero_page_area(m, frag, base - frag);
 1571 
 1572         /*
 1573          * If the ending offset is not DEV_BSIZE aligned and the 
 1574          * valid bit is clear, we have to zero out a portion of
 1575          * the last block.
 1576          */
 1577         endoff = base + size;
 1578         if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
 1579             (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
 1580                 pmap_zero_page_area(m, endoff,
 1581                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 1582 
 1583         /*
 1584          * Set valid, clear dirty bits.  If validating the entire
 1585          * page we can safely clear the pmap modify bit.  We also
 1586          * use this opportunity to clear the PG_NOSYNC flag.  If a process
 1587          * takes a write fault on a MAP_NOSYNC memory area the flag will
 1588          * be set again.
 1589          *
 1590          * We set valid bits inclusive of any overlap, but we can only
 1591          * clear dirty bits for DEV_BSIZE chunks that are fully within
 1592          * the range.
 1593          */
 1594         pagebits = vm_page_bits(base, size);
 1595         m->valid |= pagebits;
 1596 #if 0   /* NOT YET */
 1597         if ((frag = base & (DEV_BSIZE - 1)) != 0) {
 1598                 frag = DEV_BSIZE - frag;
 1599                 base += frag;
 1600                 size -= frag;
 1601                 if (size < 0)
 1602                         size = 0;
 1603         }
 1604         pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
 1605 #endif
 1606         m->dirty &= ~pagebits;
 1607         if (base == 0 && size == PAGE_SIZE) {
 1608                 pmap_clear_modify(m);
 1609                 vm_page_flag_clear(m, PG_NOSYNC);
 1610         }
 1611 }
 1612 
 1613 void
 1614 vm_page_clear_dirty(vm_page_t m, int base, int size)
 1615 {
 1616 
 1617         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1618         m->dirty &= ~vm_page_bits(base, size);
 1619 }
 1620 
 1621 /*
 1622  *      vm_page_set_invalid:
 1623  *
 1624  *      Invalidates DEV_BSIZE'd chunks within a page.  Both the
 1625  *      valid and dirty bits for the effected areas are cleared.
 1626  *
 1627  *      May not block.
 1628  */
 1629 void
 1630 vm_page_set_invalid(vm_page_t m, int base, int size)
 1631 {
 1632         int bits;
 1633 
 1634         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1635         bits = vm_page_bits(base, size);
 1636         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1637         m->valid &= ~bits;
 1638         m->dirty &= ~bits;
 1639         m->object->generation++;
 1640 }
 1641 
 1642 /*
 1643  * vm_page_zero_invalid()
 1644  *
 1645  *      The kernel assumes that the invalid portions of a page contain 
 1646  *      garbage, but such pages can be mapped into memory by user code.
 1647  *      When this occurs, we must zero out the non-valid portions of the
 1648  *      page so user code sees what it expects.
 1649  *
 1650  *      Pages are most often semi-valid when the end of a file is mapped 
 1651  *      into memory and the file's size is not page aligned.
 1652  */
 1653 void
 1654 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
 1655 {
 1656         int b;
 1657         int i;
 1658 
 1659         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1660         /*
 1661          * Scan the valid bits looking for invalid sections that
 1662          * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
 1663          * valid bit may be set ) have already been zerod by
 1664          * vm_page_set_validclean().
 1665          */
 1666         for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
 1667                 if (i == (PAGE_SIZE / DEV_BSIZE) || 
 1668                     (m->valid & (1 << i))
 1669                 ) {
 1670                         if (i > b) {
 1671                                 pmap_zero_page_area(m, 
 1672                                     b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
 1673                         }
 1674                         b = i + 1;
 1675                 }
 1676         }
 1677 
 1678         /*
 1679          * setvalid is TRUE when we can safely set the zero'd areas
 1680          * as being valid.  We can do this if there are no cache consistancy
 1681          * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
 1682          */
 1683         if (setvalid)
 1684                 m->valid = VM_PAGE_BITS_ALL;
 1685 }
 1686 
 1687 /*
 1688  *      vm_page_is_valid:
 1689  *
 1690  *      Is (partial) page valid?  Note that the case where size == 0
 1691  *      will return FALSE in the degenerate case where the page is
 1692  *      entirely invalid, and TRUE otherwise.
 1693  *
 1694  *      May not block.
 1695  */
 1696 int
 1697 vm_page_is_valid(vm_page_t m, int base, int size)
 1698 {
 1699         int bits = vm_page_bits(base, size);
 1700 
 1701         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1702         if (m->valid && ((m->valid & bits) == bits))
 1703                 return 1;
 1704         else
 1705                 return 0;
 1706 }
 1707 
 1708 /*
 1709  * update dirty bits from pmap/mmu.  May not block.
 1710  */
 1711 void
 1712 vm_page_test_dirty(vm_page_t m)
 1713 {
 1714         if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
 1715                 vm_page_dirty(m);
 1716         }
 1717 }
 1718 
 1719 int so_zerocp_fullpage = 0;
 1720 
 1721 void
 1722 vm_page_cowfault(vm_page_t m)
 1723 {
 1724         vm_page_t mnew;
 1725         vm_object_t object;
 1726         vm_pindex_t pindex;
 1727 
 1728         object = m->object;
 1729         pindex = m->pindex;
 1730 
 1731  retry_alloc:
 1732         pmap_remove_all(m);
 1733         vm_page_remove(m);
 1734         mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
 1735         if (mnew == NULL) {
 1736                 vm_page_insert(m, object, pindex);
 1737                 vm_page_unlock_queues();
 1738                 VM_OBJECT_UNLOCK(object);
 1739                 VM_WAIT;
 1740                 VM_OBJECT_LOCK(object);
 1741                 vm_page_lock_queues();
 1742                 goto retry_alloc;
 1743         }
 1744 
 1745         if (m->cow == 0) {
 1746                 /* 
 1747                  * check to see if we raced with an xmit complete when 
 1748                  * waiting to allocate a page.  If so, put things back 
 1749                  * the way they were 
 1750                  */
 1751                 vm_page_free(mnew);
 1752                 vm_page_insert(m, object, pindex);
 1753         } else { /* clear COW & copy page */
 1754                 if (!so_zerocp_fullpage)
 1755                         pmap_copy_page(m, mnew);
 1756                 mnew->valid = VM_PAGE_BITS_ALL;
 1757                 vm_page_dirty(mnew);
 1758                 vm_page_flag_clear(mnew, PG_BUSY);
 1759                 mnew->wire_count = m->wire_count - m->cow;
 1760                 m->wire_count = m->cow;
 1761         }
 1762 }
 1763 
 1764 void 
 1765 vm_page_cowclear(vm_page_t m)
 1766 {
 1767 
 1768         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1769         if (m->cow) {
 1770                 m->cow--;
 1771                 /* 
 1772                  * let vm_fault add back write permission  lazily
 1773                  */
 1774         } 
 1775         /*
 1776          *  sf_buf_free() will free the page, so we needn't do it here
 1777          */ 
 1778 }
 1779 
 1780 void
 1781 vm_page_cowsetup(vm_page_t m)
 1782 {
 1783 
 1784         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1785         m->cow++;
 1786         pmap_page_protect(m, VM_PROT_READ);
 1787 }
 1788 
 1789 #include "opt_ddb.h"
 1790 #ifdef DDB
 1791 #include <sys/kernel.h>
 1792 
 1793 #include <ddb/ddb.h>
 1794 
 1795 DB_SHOW_COMMAND(page, vm_page_print_page_info)
 1796 {
 1797         db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
 1798         db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
 1799         db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
 1800         db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
 1801         db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
 1802         db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
 1803         db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
 1804         db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
 1805         db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
 1806         db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
 1807 }
 1808 
 1809 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
 1810 {
 1811         int i;
 1812         db_printf("PQ_FREE:");
 1813         for (i = 0; i < PQ_L2_SIZE; i++) {
 1814                 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
 1815         }
 1816         db_printf("\n");
 1817                 
 1818         db_printf("PQ_CACHE:");
 1819         for (i = 0; i < PQ_L2_SIZE; i++) {
 1820                 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
 1821         }
 1822         db_printf("\n");
 1823 
 1824         db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
 1825                 vm_page_queues[PQ_ACTIVE].lcnt,
 1826                 vm_page_queues[PQ_INACTIVE].lcnt);
 1827 }
 1828 #endif /* DDB */

Cache object: 4f2b863801e66ad8a6ed167c74340e67


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.