The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
   37  * $FreeBSD: releng/5.1/sys/vm/vm_page.c 114003 2003-04-25 06:35:05Z alc $
   38  */
   39 
   40 /*
   41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   42  * All rights reserved.
   43  *
   44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   45  *
   46  * Permission to use, copy, modify and distribute this software and
   47  * its documentation is hereby granted, provided that both the copyright
   48  * notice and this permission notice appear in all copies of the
   49  * software, derivative works or modified versions, and any portions
   50  * thereof, and that both notices appear in supporting documentation.
   51  *
   52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   55  *
   56  * Carnegie Mellon requests users of this software to return to
   57  *
   58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   59  *  School of Computer Science
   60  *  Carnegie Mellon University
   61  *  Pittsburgh PA 15213-3890
   62  *
   63  * any improvements or extensions that they make and grant Carnegie the
   64  * rights to redistribute these changes.
   65  */
   66 
   67 /*
   68  *                      GENERAL RULES ON VM_PAGE MANIPULATION
   69  *
   70  *      - a pageq mutex is required when adding or removing a page from a
   71  *        page queue (vm_page_queue[]), regardless of other mutexes or the
   72  *        busy state of a page.
   73  *
   74  *      - a hash chain mutex is required when associating or disassociating
   75  *        a page from the VM PAGE CACHE hash table (vm_page_buckets),
   76  *        regardless of other mutexes or the busy state of a page.
   77  *
   78  *      - either a hash chain mutex OR a busied page is required in order
   79  *        to modify the page flags.  A hash chain mutex must be obtained in
   80  *        order to busy a page.  A page's flags cannot be modified by a
   81  *        hash chain mutex if the page is marked busy.
   82  *
   83  *      - The object memq mutex is held when inserting or removing
   84  *        pages from an object (vm_page_insert() or vm_page_remove()).  This
   85  *        is different from the object's main mutex.
   86  *
   87  *      Generally speaking, you have to be aware of side effects when running
   88  *      vm_page ops.  A vm_page_lookup() will return with the hash chain
   89  *      locked, whether it was able to lookup the page or not.  vm_page_free(),
   90  *      vm_page_cache(), vm_page_activate(), and a number of other routines
   91  *      will release the hash chain mutex for you.  Intermediate manipulation
   92  *      routines such as vm_page_flag_set() expect the hash chain to be held
   93  *      on entry and the hash chain will remain held on return.
   94  *
   95  *      pageq scanning can only occur with the pageq in question locked.
   96  *      We have a known bottleneck with the active queue, but the cache
   97  *      and free queues are actually arrays already. 
   98  */
   99 
  100 /*
  101  *      Resident memory management module.
  102  */
  103 
  104 #include <sys/param.h>
  105 #include <sys/systm.h>
  106 #include <sys/lock.h>
  107 #include <sys/malloc.h>
  108 #include <sys/mutex.h>
  109 #include <sys/proc.h>
  110 #include <sys/vmmeter.h>
  111 #include <sys/vnode.h>
  112 
  113 #include <vm/vm.h>
  114 #include <vm/vm_param.h>
  115 #include <vm/vm_kern.h>
  116 #include <vm/vm_object.h>
  117 #include <vm/vm_page.h>
  118 #include <vm/vm_pageout.h>
  119 #include <vm/vm_pager.h>
  120 #include <vm/vm_extern.h>
  121 #include <vm/uma.h>
  122 #include <vm/uma_int.h>
  123 
  124 /*
  125  *      Associated with page of user-allocatable memory is a
  126  *      page structure.
  127  */
  128 
  129 struct mtx vm_page_queue_mtx;
  130 struct mtx vm_page_queue_free_mtx;
  131 
  132 vm_page_t vm_page_array = 0;
  133 int vm_page_array_size = 0;
  134 long first_page = 0;
  135 int vm_page_zero_count = 0;
  136 
  137 /*
  138  *      vm_set_page_size:
  139  *
  140  *      Sets the page size, perhaps based upon the memory
  141  *      size.  Must be called before any use of page-size
  142  *      dependent functions.
  143  */
  144 void
  145 vm_set_page_size(void)
  146 {
  147         if (cnt.v_page_size == 0)
  148                 cnt.v_page_size = PAGE_SIZE;
  149         if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
  150                 panic("vm_set_page_size: page size not a power of two");
  151 }
  152 
  153 /*
  154  *      vm_page_startup:
  155  *
  156  *      Initializes the resident memory module.
  157  *
  158  *      Allocates memory for the page cells, and
  159  *      for the object/offset-to-page hash table headers.
  160  *      Each page cell is initialized and placed on the free list.
  161  */
  162 vm_offset_t
  163 vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
  164 {
  165         vm_offset_t mapped;
  166         vm_size_t npages;
  167         vm_paddr_t page_range;
  168         vm_paddr_t new_end;
  169         int i;
  170         vm_paddr_t pa;
  171         int nblocks;
  172         vm_paddr_t last_pa;
  173 
  174         /* the biggest memory array is the second group of pages */
  175         vm_paddr_t end;
  176         vm_paddr_t biggestsize;
  177         int biggestone;
  178 
  179         vm_paddr_t total;
  180         vm_size_t bootpages;
  181 
  182         total = 0;
  183         biggestsize = 0;
  184         biggestone = 0;
  185         nblocks = 0;
  186         vaddr = round_page(vaddr);
  187 
  188         for (i = 0; phys_avail[i + 1]; i += 2) {
  189                 phys_avail[i] = round_page(phys_avail[i]);
  190                 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
  191         }
  192 
  193         for (i = 0; phys_avail[i + 1]; i += 2) {
  194                 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
  195 
  196                 if (size > biggestsize) {
  197                         biggestone = i;
  198                         biggestsize = size;
  199                 }
  200                 ++nblocks;
  201                 total += size;
  202         }
  203 
  204         end = phys_avail[biggestone+1];
  205 
  206         /*
  207          * Initialize the locks.
  208          */
  209         mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF);
  210         mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
  211            MTX_SPIN);
  212 
  213         /*
  214          * Initialize the queue headers for the free queue, the active queue
  215          * and the inactive queue.
  216          */
  217         vm_pageq_init();
  218 
  219         /*
  220          * Allocate memory for use when boot strapping the kernel memory
  221          * allocator.
  222          */
  223         bootpages = UMA_BOOT_PAGES * UMA_SLAB_SIZE;
  224         new_end = end - bootpages;
  225         new_end = trunc_page(new_end);
  226         mapped = pmap_map(&vaddr, new_end, end,
  227             VM_PROT_READ | VM_PROT_WRITE);
  228         bzero((caddr_t) mapped, end - new_end);
  229         uma_startup((caddr_t)mapped);
  230 
  231         /*
  232          * Compute the number of pages of memory that will be available for
  233          * use (taking into account the overhead of a page structure per
  234          * page).
  235          */
  236         first_page = phys_avail[0] / PAGE_SIZE;
  237         page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
  238         npages = (total - (page_range * sizeof(struct vm_page)) -
  239             (end - new_end)) / PAGE_SIZE;
  240         end = new_end;
  241 
  242         /*
  243          * Initialize the mem entry structures now, and put them in the free
  244          * queue.
  245          */
  246         new_end = trunc_page(end - page_range * sizeof(struct vm_page));
  247         mapped = pmap_map(&vaddr, new_end, end,
  248             VM_PROT_READ | VM_PROT_WRITE);
  249         vm_page_array = (vm_page_t) mapped;
  250         phys_avail[biggestone + 1] = new_end;
  251 
  252         /*
  253          * Clear all of the page structures
  254          */
  255         bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
  256         vm_page_array_size = page_range;
  257 
  258         /*
  259          * Construct the free queue(s) in descending order (by physical
  260          * address) so that the first 16MB of physical memory is allocated
  261          * last rather than first.  On large-memory machines, this avoids
  262          * the exhaustion of low physical memory before isa_dmainit has run.
  263          */
  264         cnt.v_page_count = 0;
  265         cnt.v_free_count = 0;
  266         for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
  267                 pa = phys_avail[i];
  268                 last_pa = phys_avail[i + 1];
  269                 while (pa < last_pa && npages-- > 0) {
  270                         vm_pageq_add_new_page(pa);
  271                         pa += PAGE_SIZE;
  272                 }
  273         }
  274         return (vaddr);
  275 }
  276 
  277 void
  278 vm_page_flag_set(vm_page_t m, unsigned short bits)
  279 {
  280 
  281         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  282         m->flags |= bits;
  283 } 
  284 
  285 void
  286 vm_page_flag_clear(vm_page_t m, unsigned short bits)
  287 {
  288 
  289         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  290         m->flags &= ~bits;
  291 }
  292 
  293 void
  294 vm_page_busy(vm_page_t m)
  295 {
  296         KASSERT((m->flags & PG_BUSY) == 0,
  297             ("vm_page_busy: page already busy!!!"));
  298         vm_page_flag_set(m, PG_BUSY);
  299 }
  300 
  301 /*
  302  *      vm_page_flash:
  303  *
  304  *      wakeup anyone waiting for the page.
  305  */
  306 void
  307 vm_page_flash(vm_page_t m)
  308 {
  309         if (m->flags & PG_WANTED) {
  310                 vm_page_flag_clear(m, PG_WANTED);
  311                 wakeup(m);
  312         }
  313 }
  314 
  315 /*
  316  *      vm_page_wakeup:
  317  *
  318  *      clear the PG_BUSY flag and wakeup anyone waiting for the
  319  *      page.
  320  *
  321  */
  322 void
  323 vm_page_wakeup(vm_page_t m)
  324 {
  325         KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
  326         vm_page_flag_clear(m, PG_BUSY);
  327         vm_page_flash(m);
  328 }
  329 
  330 void
  331 vm_page_io_start(vm_page_t m)
  332 {
  333 
  334         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  335         m->busy++;
  336 }
  337 
  338 void
  339 vm_page_io_finish(vm_page_t m)
  340 {
  341 
  342         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  343         m->busy--;
  344         if (m->busy == 0)
  345                 vm_page_flash(m);
  346 }
  347 
  348 /*
  349  * Keep page from being freed by the page daemon
  350  * much of the same effect as wiring, except much lower
  351  * overhead and should be used only for *very* temporary
  352  * holding ("wiring").
  353  */
  354 void
  355 vm_page_hold(vm_page_t mem)
  356 {
  357 
  358         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  359         mem->hold_count++;
  360 }
  361 
  362 void
  363 vm_page_unhold(vm_page_t mem)
  364 {
  365 
  366         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  367         --mem->hold_count;
  368         KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
  369         if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
  370                 vm_page_free_toq(mem);
  371 }
  372 
  373 /*
  374  *      vm_page_copy:
  375  *
  376  *      Copy one page to another
  377  */
  378 void
  379 vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
  380 {
  381         pmap_copy_page(src_m, dest_m);
  382         dest_m->valid = VM_PAGE_BITS_ALL;
  383 }
  384 
  385 /*
  386  *      vm_page_free:
  387  *
  388  *      Free a page
  389  *
  390  *      The clearing of PG_ZERO is a temporary safety until the code can be
  391  *      reviewed to determine that PG_ZERO is being properly cleared on
  392  *      write faults or maps.  PG_ZERO was previously cleared in
  393  *      vm_page_alloc().
  394  */
  395 void
  396 vm_page_free(vm_page_t m)
  397 {
  398         vm_page_flag_clear(m, PG_ZERO);
  399         vm_page_free_toq(m);
  400         vm_page_zero_idle_wakeup();
  401 }
  402 
  403 /*
  404  *      vm_page_free_zero:
  405  *
  406  *      Free a page to the zerod-pages queue
  407  */
  408 void
  409 vm_page_free_zero(vm_page_t m)
  410 {
  411         vm_page_flag_set(m, PG_ZERO);
  412         vm_page_free_toq(m);
  413 }
  414 
  415 /*
  416  *      vm_page_sleep_if_busy:
  417  *
  418  *      Sleep and release the page queues lock if PG_BUSY is set or,
  419  *      if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
  420  *      thread slept and the page queues lock was released.
  421  *      Otherwise, retains the page queues lock and returns FALSE.
  422  */
  423 int
  424 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
  425 {
  426         int is_object_locked;
  427 
  428         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  429         if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
  430                 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
  431                 /*
  432                  * Remove mtx_owned() after vm_object locking is finished.
  433                  */
  434                 if ((is_object_locked = m->object != NULL &&
  435                      mtx_owned(&m->object->mtx)))
  436                         mtx_unlock(&m->object->mtx);
  437                 msleep(m, &vm_page_queue_mtx, PDROP | PVM, msg, 0);
  438                 if (is_object_locked)
  439                         mtx_lock(&m->object->mtx);
  440                 return (TRUE);
  441         }
  442         return (FALSE);
  443 }
  444 
  445 /*
  446  *      vm_page_dirty:
  447  *
  448  *      make page all dirty
  449  */
  450 void
  451 vm_page_dirty(vm_page_t m)
  452 {
  453         KASSERT(m->queue - m->pc != PQ_CACHE,
  454             ("vm_page_dirty: page in cache!"));
  455         KASSERT(m->queue - m->pc != PQ_FREE,
  456             ("vm_page_dirty: page is free!"));
  457         m->dirty = VM_PAGE_BITS_ALL;
  458 }
  459 
  460 /*
  461  *      vm_page_splay:
  462  *
  463  *      Implements Sleator and Tarjan's top-down splay algorithm.  Returns
  464  *      the vm_page containing the given pindex.  If, however, that
  465  *      pindex is not found in the vm_object, returns a vm_page that is
  466  *      adjacent to the pindex, coming before or after it.
  467  */
  468 vm_page_t
  469 vm_page_splay(vm_pindex_t pindex, vm_page_t root)
  470 {
  471         struct vm_page dummy;
  472         vm_page_t lefttreemax, righttreemin, y;
  473 
  474         if (root == NULL)
  475                 return (root);
  476         lefttreemax = righttreemin = &dummy;
  477         for (;; root = y) {
  478                 if (pindex < root->pindex) {
  479                         if ((y = root->left) == NULL)
  480                                 break;
  481                         if (pindex < y->pindex) {
  482                                 /* Rotate right. */
  483                                 root->left = y->right;
  484                                 y->right = root;
  485                                 root = y;
  486                                 if ((y = root->left) == NULL)
  487                                         break;
  488                         }
  489                         /* Link into the new root's right tree. */
  490                         righttreemin->left = root;
  491                         righttreemin = root;
  492                 } else if (pindex > root->pindex) {
  493                         if ((y = root->right) == NULL)
  494                                 break;
  495                         if (pindex > y->pindex) {
  496                                 /* Rotate left. */
  497                                 root->right = y->left;
  498                                 y->left = root;
  499                                 root = y;
  500                                 if ((y = root->right) == NULL)
  501                                         break;
  502                         }
  503                         /* Link into the new root's left tree. */
  504                         lefttreemax->right = root;
  505                         lefttreemax = root;
  506                 } else
  507                         break;
  508         }
  509         /* Assemble the new root. */
  510         lefttreemax->right = root->left;
  511         righttreemin->left = root->right;
  512         root->left = dummy.right;
  513         root->right = dummy.left;
  514         return (root);
  515 }
  516 
  517 /*
  518  *      vm_page_insert:         [ internal use only ]
  519  *
  520  *      Inserts the given mem entry into the object and object list.
  521  *
  522  *      The pagetables are not updated but will presumably fault the page
  523  *      in if necessary, or if a kernel page the caller will at some point
  524  *      enter the page into the kernel's pmap.  We are not allowed to block
  525  *      here so we *can't* do this anyway.
  526  *
  527  *      The object and page must be locked, and must be splhigh.
  528  *      This routine may not block.
  529  */
  530 void
  531 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
  532 {
  533         vm_page_t root;
  534 
  535         if (!VM_OBJECT_LOCKED(object))
  536                 GIANT_REQUIRED;
  537         if (m->object != NULL)
  538                 panic("vm_page_insert: already inserted");
  539 
  540         /*
  541          * Record the object/offset pair in this page
  542          */
  543         m->object = object;
  544         m->pindex = pindex;
  545 
  546         /*
  547          * Now link into the object's ordered list of backed pages.
  548          */
  549         root = object->root;
  550         if (root == NULL) {
  551                 m->left = NULL;
  552                 m->right = NULL;
  553                 TAILQ_INSERT_TAIL(&object->memq, m, listq);
  554         } else {
  555                 root = vm_page_splay(pindex, root);
  556                 if (pindex < root->pindex) {
  557                         m->left = root->left;
  558                         m->right = root;
  559                         root->left = NULL;
  560                         TAILQ_INSERT_BEFORE(root, m, listq);
  561                 } else {
  562                         m->right = root->right;
  563                         m->left = root;
  564                         root->right = NULL;
  565                         TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
  566                 }
  567         }
  568         object->root = m;
  569         object->generation++;
  570 
  571         /*
  572          * show that the object has one more resident page.
  573          */
  574         object->resident_page_count++;
  575 
  576         /*
  577          * Since we are inserting a new and possibly dirty page,
  578          * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
  579          */
  580         if (m->flags & PG_WRITEABLE)
  581                 vm_object_set_writeable_dirty(object);
  582 }
  583 
  584 /*
  585  *      vm_page_remove:
  586  *                              NOTE: used by device pager as well -wfj
  587  *
  588  *      Removes the given mem entry from the object/offset-page
  589  *      table and the object page list, but do not invalidate/terminate
  590  *      the backing store.
  591  *
  592  *      The object and page must be locked, and at splhigh.
  593  *      The underlying pmap entry (if any) is NOT removed here.
  594  *      This routine may not block.
  595  */
  596 void
  597 vm_page_remove(vm_page_t m)
  598 {
  599         vm_object_t object;
  600         vm_page_t root;
  601 
  602         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  603         if (m->object == NULL)
  604                 return;
  605         if (!VM_OBJECT_LOCKED(m->object))
  606                 GIANT_REQUIRED;
  607         if ((m->flags & PG_BUSY) == 0) {
  608                 panic("vm_page_remove: page not busy");
  609         }
  610 
  611         /*
  612          * Basically destroy the page.
  613          */
  614         vm_page_wakeup(m);
  615 
  616         object = m->object;
  617 
  618         /*
  619          * Now remove from the object's list of backed pages.
  620          */
  621         if (m != object->root)
  622                 vm_page_splay(m->pindex, object->root);
  623         if (m->left == NULL)
  624                 root = m->right;
  625         else {
  626                 root = vm_page_splay(m->pindex, m->left);
  627                 root->right = m->right;
  628         }
  629         object->root = root;
  630         TAILQ_REMOVE(&object->memq, m, listq);
  631 
  632         /*
  633          * And show that the object has one fewer resident page.
  634          */
  635         object->resident_page_count--;
  636         object->generation++;
  637 
  638         m->object = NULL;
  639 }
  640 
  641 /*
  642  *      vm_page_lookup:
  643  *
  644  *      Returns the page associated with the object/offset
  645  *      pair specified; if none is found, NULL is returned.
  646  *
  647  *      The object must be locked.
  648  *      This routine may not block.
  649  *      This is a critical path routine
  650  */
  651 vm_page_t
  652 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
  653 {
  654         vm_page_t m;
  655 
  656         if (!VM_OBJECT_LOCKED(object))
  657                 GIANT_REQUIRED;
  658         m = vm_page_splay(pindex, object->root);
  659         if ((object->root = m) != NULL && m->pindex != pindex)
  660                 m = NULL;
  661         return (m);
  662 }
  663 
  664 /*
  665  *      vm_page_rename:
  666  *
  667  *      Move the given memory entry from its
  668  *      current object to the specified target object/offset.
  669  *
  670  *      The object must be locked.
  671  *      This routine may not block.
  672  *
  673  *      Note: this routine will raise itself to splvm(), the caller need not. 
  674  *
  675  *      Note: swap associated with the page must be invalidated by the move.  We
  676  *            have to do this for several reasons:  (1) we aren't freeing the
  677  *            page, (2) we are dirtying the page, (3) the VM system is probably
  678  *            moving the page from object A to B, and will then later move
  679  *            the backing store from A to B and we can't have a conflict.
  680  *
  681  *      Note: we *always* dirty the page.  It is necessary both for the
  682  *            fact that we moved it, and because we may be invalidating
  683  *            swap.  If the page is on the cache, we have to deactivate it
  684  *            or vm_page_dirty() will panic.  Dirty pages are not allowed
  685  *            on the cache.
  686  */
  687 void
  688 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
  689 {
  690         int s;
  691 
  692         s = splvm();
  693         vm_page_remove(m);
  694         vm_page_insert(m, new_object, new_pindex);
  695         if (m->queue - m->pc == PQ_CACHE)
  696                 vm_page_deactivate(m);
  697         vm_page_dirty(m);
  698         splx(s);
  699 }
  700 
  701 /*
  702  *      vm_page_select_cache:
  703  *
  704  *      Find a page on the cache queue with color optimization.  As pages
  705  *      might be found, but not applicable, they are deactivated.  This
  706  *      keeps us from using potentially busy cached pages.
  707  *
  708  *      This routine must be called at splvm().
  709  *      This routine may not block.
  710  */
  711 static vm_page_t
  712 vm_page_select_cache(vm_pindex_t color)
  713 {
  714         vm_page_t m;
  715 
  716         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  717         while (TRUE) {
  718                 m = vm_pageq_find(PQ_CACHE, color & PQ_L2_MASK, FALSE);
  719                 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
  720                                m->hold_count || m->wire_count)) {
  721                         vm_page_deactivate(m);
  722                         continue;
  723                 }
  724                 return m;
  725         }
  726 }
  727 
  728 /*
  729  *      vm_page_select_free:
  730  *
  731  *      Find a free or zero page, with specified preference. 
  732  *
  733  *      This routine must be called at splvm().
  734  *      This routine may not block.
  735  */
  736 static __inline vm_page_t
  737 vm_page_select_free(vm_pindex_t color, boolean_t prefer_zero)
  738 {
  739         vm_page_t m;
  740 
  741         m = vm_pageq_find(PQ_FREE, color & PQ_L2_MASK, prefer_zero);
  742         return (m);
  743 }
  744 
  745 /*
  746  *      vm_page_alloc:
  747  *
  748  *      Allocate and return a memory cell associated
  749  *      with this VM object/offset pair.
  750  *
  751  *      page_req classes:
  752  *      VM_ALLOC_NORMAL         normal process request
  753  *      VM_ALLOC_SYSTEM         system *really* needs a page
  754  *      VM_ALLOC_INTERRUPT      interrupt time request
  755  *      VM_ALLOC_ZERO           zero page
  756  *
  757  *      This routine may not block.
  758  *
  759  *      Additional special handling is required when called from an
  760  *      interrupt (VM_ALLOC_INTERRUPT).  We are not allowed to mess with
  761  *      the page cache in this case.
  762  */
  763 vm_page_t
  764 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
  765 {
  766         vm_page_t m = NULL;
  767         vm_pindex_t color;
  768         int flags, page_req, s;
  769 
  770         page_req = req & VM_ALLOC_CLASS_MASK;
  771 
  772         if ((req & VM_ALLOC_NOOBJ) == 0) {
  773                 KASSERT(object != NULL,
  774                     ("vm_page_alloc: NULL object."));
  775                 KASSERT(!vm_page_lookup(object, pindex),
  776                     ("vm_page_alloc: page already allocated"));
  777                 color = pindex + object->pg_color;
  778         } else
  779                 color = pindex;
  780 
  781         /*
  782          * The pager is allowed to eat deeper into the free page list.
  783          */
  784         if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
  785                 page_req = VM_ALLOC_SYSTEM;
  786         };
  787 
  788         s = splvm();
  789 loop:
  790         mtx_lock_spin(&vm_page_queue_free_mtx);
  791         if (cnt.v_free_count > cnt.v_free_reserved ||
  792             (page_req == VM_ALLOC_SYSTEM && 
  793              cnt.v_cache_count == 0 && 
  794              cnt.v_free_count > cnt.v_interrupt_free_min) ||
  795             (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) {
  796                 /*
  797                  * Allocate from the free queue if the number of free pages
  798                  * exceeds the minimum for the request class.
  799                  */
  800                 m = vm_page_select_free(color, (req & VM_ALLOC_ZERO) != 0);
  801         } else if (page_req != VM_ALLOC_INTERRUPT) {
  802                 mtx_unlock_spin(&vm_page_queue_free_mtx);
  803                 /*
  804                  * Allocatable from cache (non-interrupt only).  On success,
  805                  * we must free the page and try again, thus ensuring that
  806                  * cnt.v_*_free_min counters are replenished.
  807                  */
  808                 vm_page_lock_queues();
  809                 if ((m = vm_page_select_cache(color)) == NULL) {
  810                         vm_page_unlock_queues();
  811                         splx(s);
  812 #if defined(DIAGNOSTIC)
  813                         if (cnt.v_cache_count > 0)
  814                                 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
  815 #endif
  816                         atomic_add_int(&vm_pageout_deficit, 1);
  817                         pagedaemon_wakeup();
  818                         return (NULL);
  819                 }
  820                 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
  821                 vm_page_busy(m);
  822                 pmap_remove_all(m);
  823                 vm_page_free(m);
  824                 vm_page_unlock_queues();
  825                 goto loop;
  826         } else {
  827                 /*
  828                  * Not allocatable from cache from interrupt, give up.
  829                  */
  830                 mtx_unlock_spin(&vm_page_queue_free_mtx);
  831                 splx(s);
  832                 atomic_add_int(&vm_pageout_deficit, 1);
  833                 pagedaemon_wakeup();
  834                 return (NULL);
  835         }
  836 
  837         /*
  838          *  At this point we had better have found a good page.
  839          */
  840 
  841         KASSERT(
  842             m != NULL,
  843             ("vm_page_alloc(): missing page on free queue\n")
  844         );
  845 
  846         /*
  847          * Remove from free queue
  848          */
  849 
  850         vm_pageq_remove_nowakeup(m);
  851 
  852         /*
  853          * Initialize structure.  Only the PG_ZERO flag is inherited.
  854          */
  855         flags = PG_BUSY;
  856         if (m->flags & PG_ZERO) {
  857                 vm_page_zero_count--;
  858                 if (req & VM_ALLOC_ZERO)
  859                         flags = PG_ZERO | PG_BUSY;
  860         }
  861         m->flags = flags;
  862         if (req & VM_ALLOC_WIRED) {
  863                 atomic_add_int(&cnt.v_wire_count, 1);
  864                 m->wire_count = 1;
  865         } else
  866                 m->wire_count = 0;
  867         m->hold_count = 0;
  868         m->act_count = 0;
  869         m->busy = 0;
  870         m->valid = 0;
  871         KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
  872         mtx_unlock_spin(&vm_page_queue_free_mtx);
  873 
  874         /*
  875          * vm_page_insert() is safe prior to the splx().  Note also that
  876          * inserting a page here does not insert it into the pmap (which
  877          * could cause us to block allocating memory).  We cannot block 
  878          * anywhere.
  879          */
  880         if ((req & VM_ALLOC_NOOBJ) == 0)
  881                 vm_page_insert(m, object, pindex);
  882 
  883         /*
  884          * Don't wakeup too often - wakeup the pageout daemon when
  885          * we would be nearly out of memory.
  886          */
  887         if (vm_paging_needed())
  888                 pagedaemon_wakeup();
  889 
  890         splx(s);
  891         return (m);
  892 }
  893 
  894 /*
  895  *      vm_wait:        (also see VM_WAIT macro)
  896  *
  897  *      Block until free pages are available for allocation
  898  *      - Called in various places before memory allocations.
  899  */
  900 void
  901 vm_wait(void)
  902 {
  903         int s;
  904 
  905         s = splvm();
  906         vm_page_lock_queues();
  907         if (curproc == pageproc) {
  908                 vm_pageout_pages_needed = 1;
  909                 msleep(&vm_pageout_pages_needed, &vm_page_queue_mtx,
  910                     PDROP | PSWP, "VMWait", 0);
  911         } else {
  912                 if (!vm_pages_needed) {
  913                         vm_pages_needed = 1;
  914                         wakeup(&vm_pages_needed);
  915                 }
  916                 msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PVM,
  917                     "vmwait", 0);
  918         }
  919         splx(s);
  920 }
  921 
  922 /*
  923  *      vm_waitpfault:  (also see VM_WAITPFAULT macro)
  924  *
  925  *      Block until free pages are available for allocation
  926  *      - Called only in vm_fault so that processes page faulting
  927  *        can be easily tracked.
  928  *      - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
  929  *        processes will be able to grab memory first.  Do not change
  930  *        this balance without careful testing first.
  931  */
  932 void
  933 vm_waitpfault(void)
  934 {
  935         int s;
  936 
  937         s = splvm();
  938         vm_page_lock_queues();
  939         if (!vm_pages_needed) {
  940                 vm_pages_needed = 1;
  941                 wakeup(&vm_pages_needed);
  942         }
  943         msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PUSER,
  944             "pfault", 0);
  945         splx(s);
  946 }
  947 
  948 /*
  949  *      vm_page_activate:
  950  *
  951  *      Put the specified page on the active list (if appropriate).
  952  *      Ensure that act_count is at least ACT_INIT but do not otherwise
  953  *      mess with it.
  954  *
  955  *      The page queues must be locked.
  956  *      This routine may not block.
  957  */
  958 void
  959 vm_page_activate(vm_page_t m)
  960 {
  961         int s;
  962 
  963         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  964         s = splvm();
  965         if (m->queue != PQ_ACTIVE) {
  966                 if ((m->queue - m->pc) == PQ_CACHE)
  967                         cnt.v_reactivated++;
  968                 vm_pageq_remove(m);
  969                 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
  970                         if (m->act_count < ACT_INIT)
  971                                 m->act_count = ACT_INIT;
  972                         vm_pageq_enqueue(PQ_ACTIVE, m);
  973                 }
  974         } else {
  975                 if (m->act_count < ACT_INIT)
  976                         m->act_count = ACT_INIT;
  977         }
  978         splx(s);
  979 }
  980 
  981 /*
  982  *      vm_page_free_wakeup:
  983  *
  984  *      Helper routine for vm_page_free_toq() and vm_page_cache().  This
  985  *      routine is called when a page has been added to the cache or free
  986  *      queues.
  987  *
  988  *      This routine may not block.
  989  *      This routine must be called at splvm()
  990  */
  991 static __inline void
  992 vm_page_free_wakeup(void)
  993 {
  994 
  995         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  996         /*
  997          * if pageout daemon needs pages, then tell it that there are
  998          * some free.
  999          */
 1000         if (vm_pageout_pages_needed &&
 1001             cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
 1002                 wakeup(&vm_pageout_pages_needed);
 1003                 vm_pageout_pages_needed = 0;
 1004         }
 1005         /*
 1006          * wakeup processes that are waiting on memory if we hit a
 1007          * high water mark. And wakeup scheduler process if we have
 1008          * lots of memory. this process will swapin processes.
 1009          */
 1010         if (vm_pages_needed && !vm_page_count_min()) {
 1011                 vm_pages_needed = 0;
 1012                 wakeup(&cnt.v_free_count);
 1013         }
 1014 }
 1015 
 1016 /*
 1017  *      vm_page_free_toq:
 1018  *
 1019  *      Returns the given page to the PQ_FREE list,
 1020  *      disassociating it with any VM object.
 1021  *
 1022  *      Object and page must be locked prior to entry.
 1023  *      This routine may not block.
 1024  */
 1025 
 1026 void
 1027 vm_page_free_toq(vm_page_t m)
 1028 {
 1029         int s;
 1030         struct vpgqueues *pq;
 1031         vm_object_t object = m->object;
 1032 
 1033         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1034         s = splvm();
 1035         cnt.v_tfree++;
 1036 
 1037         if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
 1038                 printf(
 1039                 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
 1040                     (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
 1041                     m->hold_count);
 1042                 if ((m->queue - m->pc) == PQ_FREE)
 1043                         panic("vm_page_free: freeing free page");
 1044                 else
 1045                         panic("vm_page_free: freeing busy page");
 1046         }
 1047 
 1048         /*
 1049          * unqueue, then remove page.  Note that we cannot destroy
 1050          * the page here because we do not want to call the pager's
 1051          * callback routine until after we've put the page on the
 1052          * appropriate free queue.
 1053          */
 1054         vm_pageq_remove_nowakeup(m);
 1055         vm_page_remove(m);
 1056 
 1057         /*
 1058          * If fictitious remove object association and
 1059          * return, otherwise delay object association removal.
 1060          */
 1061         if ((m->flags & PG_FICTITIOUS) != 0) {
 1062                 splx(s);
 1063                 return;
 1064         }
 1065 
 1066         m->valid = 0;
 1067         vm_page_undirty(m);
 1068 
 1069         if (m->wire_count != 0) {
 1070                 if (m->wire_count > 1) {
 1071                         panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
 1072                                 m->wire_count, (long)m->pindex);
 1073                 }
 1074                 panic("vm_page_free: freeing wired page\n");
 1075         }
 1076 
 1077         /*
 1078          * If we've exhausted the object's resident pages we want to free
 1079          * it up.
 1080          */
 1081         if (object && 
 1082             (object->type == OBJT_VNODE) &&
 1083             ((object->flags & OBJ_DEAD) == 0)
 1084         ) {
 1085                 struct vnode *vp = (struct vnode *)object->handle;
 1086 
 1087                 if (vp) {
 1088                         VI_LOCK(vp);
 1089                         if (VSHOULDFREE(vp))
 1090                                 vfree(vp);
 1091                         VI_UNLOCK(vp);
 1092                 }
 1093         }
 1094 
 1095         /*
 1096          * Clear the UNMANAGED flag when freeing an unmanaged page.
 1097          */
 1098         if (m->flags & PG_UNMANAGED) {
 1099                 m->flags &= ~PG_UNMANAGED;
 1100         }
 1101 
 1102         if (m->hold_count != 0) {
 1103                 m->flags &= ~PG_ZERO;
 1104                 m->queue = PQ_HOLD;
 1105         } else
 1106                 m->queue = PQ_FREE + m->pc;
 1107         pq = &vm_page_queues[m->queue];
 1108         mtx_lock_spin(&vm_page_queue_free_mtx);
 1109         pq->lcnt++;
 1110         ++(*pq->cnt);
 1111 
 1112         /*
 1113          * Put zero'd pages on the end ( where we look for zero'd pages
 1114          * first ) and non-zerod pages at the head.
 1115          */
 1116         if (m->flags & PG_ZERO) {
 1117                 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
 1118                 ++vm_page_zero_count;
 1119         } else {
 1120                 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
 1121         }
 1122         mtx_unlock_spin(&vm_page_queue_free_mtx);
 1123         vm_page_free_wakeup();
 1124         splx(s);
 1125 }
 1126 
 1127 /*
 1128  *      vm_page_unmanage:
 1129  *
 1130  *      Prevent PV management from being done on the page.  The page is
 1131  *      removed from the paging queues as if it were wired, and as a 
 1132  *      consequence of no longer being managed the pageout daemon will not
 1133  *      touch it (since there is no way to locate the pte mappings for the
 1134  *      page).  madvise() calls that mess with the pmap will also no longer
 1135  *      operate on the page.
 1136  *
 1137  *      Beyond that the page is still reasonably 'normal'.  Freeing the page
 1138  *      will clear the flag.
 1139  *
 1140  *      This routine is used by OBJT_PHYS objects - objects using unswappable
 1141  *      physical memory as backing store rather then swap-backed memory and
 1142  *      will eventually be extended to support 4MB unmanaged physical 
 1143  *      mappings.
 1144  */
 1145 void
 1146 vm_page_unmanage(vm_page_t m)
 1147 {
 1148         int s;
 1149 
 1150         s = splvm();
 1151         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1152         if ((m->flags & PG_UNMANAGED) == 0) {
 1153                 if (m->wire_count == 0)
 1154                         vm_pageq_remove(m);
 1155         }
 1156         vm_page_flag_set(m, PG_UNMANAGED);
 1157         splx(s);
 1158 }
 1159 
 1160 /*
 1161  *      vm_page_wire:
 1162  *
 1163  *      Mark this page as wired down by yet
 1164  *      another map, removing it from paging queues
 1165  *      as necessary.
 1166  *
 1167  *      The page queues must be locked.
 1168  *      This routine may not block.
 1169  */
 1170 void
 1171 vm_page_wire(vm_page_t m)
 1172 {
 1173         int s;
 1174 
 1175         /*
 1176          * Only bump the wire statistics if the page is not already wired,
 1177          * and only unqueue the page if it is on some queue (if it is unmanaged
 1178          * it is already off the queues).
 1179          */
 1180         s = splvm();
 1181         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1182         if (m->wire_count == 0) {
 1183                 if ((m->flags & PG_UNMANAGED) == 0)
 1184                         vm_pageq_remove(m);
 1185                 atomic_add_int(&cnt.v_wire_count, 1);
 1186         }
 1187         m->wire_count++;
 1188         KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
 1189         splx(s);
 1190 }
 1191 
 1192 /*
 1193  *      vm_page_unwire:
 1194  *
 1195  *      Release one wiring of this page, potentially
 1196  *      enabling it to be paged again.
 1197  *
 1198  *      Many pages placed on the inactive queue should actually go
 1199  *      into the cache, but it is difficult to figure out which.  What
 1200  *      we do instead, if the inactive target is well met, is to put
 1201  *      clean pages at the head of the inactive queue instead of the tail.
 1202  *      This will cause them to be moved to the cache more quickly and
 1203  *      if not actively re-referenced, freed more quickly.  If we just
 1204  *      stick these pages at the end of the inactive queue, heavy filesystem
 1205  *      meta-data accesses can cause an unnecessary paging load on memory bound 
 1206  *      processes.  This optimization causes one-time-use metadata to be
 1207  *      reused more quickly.
 1208  *
 1209  *      BUT, if we are in a low-memory situation we have no choice but to
 1210  *      put clean pages on the cache queue.
 1211  *
 1212  *      A number of routines use vm_page_unwire() to guarantee that the page
 1213  *      will go into either the inactive or active queues, and will NEVER
 1214  *      be placed in the cache - for example, just after dirtying a page.
 1215  *      dirty pages in the cache are not allowed.
 1216  *
 1217  *      The page queues must be locked.
 1218  *      This routine may not block.
 1219  */
 1220 void
 1221 vm_page_unwire(vm_page_t m, int activate)
 1222 {
 1223         int s;
 1224 
 1225         s = splvm();
 1226         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1227         if (m->wire_count > 0) {
 1228                 m->wire_count--;
 1229                 if (m->wire_count == 0) {
 1230                         atomic_subtract_int(&cnt.v_wire_count, 1);
 1231                         if (m->flags & PG_UNMANAGED) {
 1232                                 ;
 1233                         } else if (activate)
 1234                                 vm_pageq_enqueue(PQ_ACTIVE, m);
 1235                         else {
 1236                                 vm_page_flag_clear(m, PG_WINATCFLS);
 1237                                 vm_pageq_enqueue(PQ_INACTIVE, m);
 1238                         }
 1239                 }
 1240         } else {
 1241                 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
 1242         }
 1243         splx(s);
 1244 }
 1245 
 1246 
 1247 /*
 1248  * Move the specified page to the inactive queue.  If the page has
 1249  * any associated swap, the swap is deallocated.
 1250  *
 1251  * Normally athead is 0 resulting in LRU operation.  athead is set
 1252  * to 1 if we want this page to be 'as if it were placed in the cache',
 1253  * except without unmapping it from the process address space.
 1254  *
 1255  * This routine may not block.
 1256  */
 1257 static __inline void
 1258 _vm_page_deactivate(vm_page_t m, int athead)
 1259 {
 1260         int s;
 1261 
 1262         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1263         /*
 1264          * Ignore if already inactive.
 1265          */
 1266         if (m->queue == PQ_INACTIVE)
 1267                 return;
 1268 
 1269         s = splvm();
 1270         if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
 1271                 if ((m->queue - m->pc) == PQ_CACHE)
 1272                         cnt.v_reactivated++;
 1273                 vm_page_flag_clear(m, PG_WINATCFLS);
 1274                 vm_pageq_remove(m);
 1275                 if (athead)
 1276                         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
 1277                 else
 1278                         TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
 1279                 m->queue = PQ_INACTIVE;
 1280                 vm_page_queues[PQ_INACTIVE].lcnt++;
 1281                 cnt.v_inactive_count++;
 1282         }
 1283         splx(s);
 1284 }
 1285 
 1286 void
 1287 vm_page_deactivate(vm_page_t m)
 1288 {
 1289     _vm_page_deactivate(m, 0);
 1290 }
 1291 
 1292 /*
 1293  * vm_page_try_to_cache:
 1294  *
 1295  * Returns 0 on failure, 1 on success
 1296  */
 1297 int
 1298 vm_page_try_to_cache(vm_page_t m)
 1299 {
 1300 
 1301         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1302         if (m->dirty || m->hold_count || m->busy || m->wire_count ||
 1303             (m->flags & (PG_BUSY|PG_UNMANAGED))) {
 1304                 return (0);
 1305         }
 1306         vm_page_test_dirty(m);
 1307         if (m->dirty)
 1308                 return (0);
 1309         vm_page_cache(m);
 1310         return (1);
 1311 }
 1312 
 1313 /*
 1314  * vm_page_try_to_free()
 1315  *
 1316  *      Attempt to free the page.  If we cannot free it, we do nothing.
 1317  *      1 is returned on success, 0 on failure.
 1318  */
 1319 int
 1320 vm_page_try_to_free(vm_page_t m)
 1321 {
 1322 
 1323         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1324         if (m->dirty || m->hold_count || m->busy || m->wire_count ||
 1325             (m->flags & (PG_BUSY|PG_UNMANAGED))) {
 1326                 return (0);
 1327         }
 1328         vm_page_test_dirty(m);
 1329         if (m->dirty)
 1330                 return (0);
 1331         vm_page_busy(m);
 1332         pmap_remove_all(m);
 1333         vm_page_free(m);
 1334         return (1);
 1335 }
 1336 
 1337 /*
 1338  * vm_page_cache
 1339  *
 1340  * Put the specified page onto the page cache queue (if appropriate).
 1341  *
 1342  * This routine may not block.
 1343  */
 1344 void
 1345 vm_page_cache(vm_page_t m)
 1346 {
 1347         int s;
 1348 
 1349         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1350         if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
 1351                 printf("vm_page_cache: attempting to cache busy page\n");
 1352                 return;
 1353         }
 1354         if ((m->queue - m->pc) == PQ_CACHE)
 1355                 return;
 1356 
 1357         /*
 1358          * Remove all pmaps and indicate that the page is not
 1359          * writeable or mapped.
 1360          */
 1361         pmap_remove_all(m);
 1362         if (m->dirty != 0) {
 1363                 panic("vm_page_cache: caching a dirty page, pindex: %ld",
 1364                         (long)m->pindex);
 1365         }
 1366         s = splvm();
 1367         vm_pageq_remove_nowakeup(m);
 1368         vm_pageq_enqueue(PQ_CACHE + m->pc, m);
 1369         vm_page_free_wakeup();
 1370         splx(s);
 1371 }
 1372 
 1373 /*
 1374  * vm_page_dontneed
 1375  *
 1376  *      Cache, deactivate, or do nothing as appropriate.  This routine
 1377  *      is typically used by madvise() MADV_DONTNEED.
 1378  *
 1379  *      Generally speaking we want to move the page into the cache so
 1380  *      it gets reused quickly.  However, this can result in a silly syndrome
 1381  *      due to the page recycling too quickly.  Small objects will not be
 1382  *      fully cached.  On the otherhand, if we move the page to the inactive
 1383  *      queue we wind up with a problem whereby very large objects 
 1384  *      unnecessarily blow away our inactive and cache queues.
 1385  *
 1386  *      The solution is to move the pages based on a fixed weighting.  We
 1387  *      either leave them alone, deactivate them, or move them to the cache,
 1388  *      where moving them to the cache has the highest weighting.
 1389  *      By forcing some pages into other queues we eventually force the
 1390  *      system to balance the queues, potentially recovering other unrelated
 1391  *      space from active.  The idea is to not force this to happen too
 1392  *      often.
 1393  */
 1394 void
 1395 vm_page_dontneed(vm_page_t m)
 1396 {
 1397         static int dnweight;
 1398         int dnw;
 1399         int head;
 1400 
 1401         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1402         dnw = ++dnweight;
 1403 
 1404         /*
 1405          * occassionally leave the page alone
 1406          */
 1407         if ((dnw & 0x01F0) == 0 ||
 1408             m->queue == PQ_INACTIVE || 
 1409             m->queue - m->pc == PQ_CACHE
 1410         ) {
 1411                 if (m->act_count >= ACT_INIT)
 1412                         --m->act_count;
 1413                 return;
 1414         }
 1415 
 1416         if (m->dirty == 0)
 1417                 vm_page_test_dirty(m);
 1418 
 1419         if (m->dirty || (dnw & 0x0070) == 0) {
 1420                 /*
 1421                  * Deactivate the page 3 times out of 32.
 1422                  */
 1423                 head = 0;
 1424         } else {
 1425                 /*
 1426                  * Cache the page 28 times out of every 32.  Note that
 1427                  * the page is deactivated instead of cached, but placed
 1428                  * at the head of the queue instead of the tail.
 1429                  */
 1430                 head = 1;
 1431         }
 1432         _vm_page_deactivate(m, head);
 1433 }
 1434 
 1435 /*
 1436  * Grab a page, waiting until we are waken up due to the page
 1437  * changing state.  We keep on waiting, if the page continues
 1438  * to be in the object.  If the page doesn't exist, allocate it.
 1439  *
 1440  * This routine may block.
 1441  */
 1442 vm_page_t
 1443 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
 1444 {
 1445         vm_page_t m;
 1446         int s, generation;
 1447 
 1448         GIANT_REQUIRED;
 1449 retrylookup:
 1450         if ((m = vm_page_lookup(object, pindex)) != NULL) {
 1451                 vm_page_lock_queues();
 1452                 if (m->busy || (m->flags & PG_BUSY)) {
 1453                         generation = object->generation;
 1454 
 1455                         s = splvm();
 1456                         while ((object->generation == generation) &&
 1457                                         (m->busy || (m->flags & PG_BUSY))) {
 1458                                 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
 1459                                 msleep(m, &vm_page_queue_mtx, PVM, "pgrbwt", 0);
 1460                                 if ((allocflags & VM_ALLOC_RETRY) == 0) {
 1461                                         vm_page_unlock_queues();
 1462                                         splx(s);
 1463                                         return NULL;
 1464                                 }
 1465                         }
 1466                         vm_page_unlock_queues();
 1467                         splx(s);
 1468                         goto retrylookup;
 1469                 } else {
 1470                         if (allocflags & VM_ALLOC_WIRED)
 1471                                 vm_page_wire(m);
 1472                         vm_page_busy(m);
 1473                         vm_page_unlock_queues();
 1474                         return m;
 1475                 }
 1476         }
 1477 
 1478         m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
 1479         if (m == NULL) {
 1480                 VM_WAIT;
 1481                 if ((allocflags & VM_ALLOC_RETRY) == 0)
 1482                         return NULL;
 1483                 goto retrylookup;
 1484         }
 1485 
 1486         return m;
 1487 }
 1488 
 1489 /*
 1490  * Mapping function for valid bits or for dirty bits in
 1491  * a page.  May not block.
 1492  *
 1493  * Inputs are required to range within a page.
 1494  */
 1495 __inline int
 1496 vm_page_bits(int base, int size)
 1497 {
 1498         int first_bit;
 1499         int last_bit;
 1500 
 1501         KASSERT(
 1502             base + size <= PAGE_SIZE,
 1503             ("vm_page_bits: illegal base/size %d/%d", base, size)
 1504         );
 1505 
 1506         if (size == 0)          /* handle degenerate case */
 1507                 return (0);
 1508 
 1509         first_bit = base >> DEV_BSHIFT;
 1510         last_bit = (base + size - 1) >> DEV_BSHIFT;
 1511 
 1512         return ((2 << last_bit) - (1 << first_bit));
 1513 }
 1514 
 1515 /*
 1516  *      vm_page_set_validclean:
 1517  *
 1518  *      Sets portions of a page valid and clean.  The arguments are expected
 1519  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
 1520  *      of any partial chunks touched by the range.  The invalid portion of
 1521  *      such chunks will be zero'd.
 1522  *
 1523  *      This routine may not block.
 1524  *
 1525  *      (base + size) must be less then or equal to PAGE_SIZE.
 1526  */
 1527 void
 1528 vm_page_set_validclean(vm_page_t m, int base, int size)
 1529 {
 1530         int pagebits;
 1531         int frag;
 1532         int endoff;
 1533 
 1534         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1535         if (size == 0)  /* handle degenerate case */
 1536                 return;
 1537 
 1538         /*
 1539          * If the base is not DEV_BSIZE aligned and the valid
 1540          * bit is clear, we have to zero out a portion of the
 1541          * first block.
 1542          */
 1543         if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
 1544             (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
 1545                 pmap_zero_page_area(m, frag, base - frag);
 1546 
 1547         /*
 1548          * If the ending offset is not DEV_BSIZE aligned and the 
 1549          * valid bit is clear, we have to zero out a portion of
 1550          * the last block.
 1551          */
 1552         endoff = base + size;
 1553         if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
 1554             (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
 1555                 pmap_zero_page_area(m, endoff,
 1556                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 1557 
 1558         /*
 1559          * Set valid, clear dirty bits.  If validating the entire
 1560          * page we can safely clear the pmap modify bit.  We also
 1561          * use this opportunity to clear the PG_NOSYNC flag.  If a process
 1562          * takes a write fault on a MAP_NOSYNC memory area the flag will
 1563          * be set again.
 1564          *
 1565          * We set valid bits inclusive of any overlap, but we can only
 1566          * clear dirty bits for DEV_BSIZE chunks that are fully within
 1567          * the range.
 1568          */
 1569         pagebits = vm_page_bits(base, size);
 1570         m->valid |= pagebits;
 1571 #if 0   /* NOT YET */
 1572         if ((frag = base & (DEV_BSIZE - 1)) != 0) {
 1573                 frag = DEV_BSIZE - frag;
 1574                 base += frag;
 1575                 size -= frag;
 1576                 if (size < 0)
 1577                         size = 0;
 1578         }
 1579         pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
 1580 #endif
 1581         m->dirty &= ~pagebits;
 1582         if (base == 0 && size == PAGE_SIZE) {
 1583                 pmap_clear_modify(m);
 1584                 vm_page_flag_clear(m, PG_NOSYNC);
 1585         }
 1586 }
 1587 
 1588 #if 0
 1589 
 1590 void
 1591 vm_page_set_dirty(vm_page_t m, int base, int size)
 1592 {
 1593         m->dirty |= vm_page_bits(base, size);
 1594 }
 1595 
 1596 #endif
 1597 
 1598 void
 1599 vm_page_clear_dirty(vm_page_t m, int base, int size)
 1600 {
 1601         GIANT_REQUIRED;
 1602         m->dirty &= ~vm_page_bits(base, size);
 1603 }
 1604 
 1605 /*
 1606  *      vm_page_set_invalid:
 1607  *
 1608  *      Invalidates DEV_BSIZE'd chunks within a page.  Both the
 1609  *      valid and dirty bits for the effected areas are cleared.
 1610  *
 1611  *      May not block.
 1612  */
 1613 void
 1614 vm_page_set_invalid(vm_page_t m, int base, int size)
 1615 {
 1616         int bits;
 1617 
 1618         GIANT_REQUIRED;
 1619         bits = vm_page_bits(base, size);
 1620         m->valid &= ~bits;
 1621         m->dirty &= ~bits;
 1622         m->object->generation++;
 1623 }
 1624 
 1625 /*
 1626  * vm_page_zero_invalid()
 1627  *
 1628  *      The kernel assumes that the invalid portions of a page contain 
 1629  *      garbage, but such pages can be mapped into memory by user code.
 1630  *      When this occurs, we must zero out the non-valid portions of the
 1631  *      page so user code sees what it expects.
 1632  *
 1633  *      Pages are most often semi-valid when the end of a file is mapped 
 1634  *      into memory and the file's size is not page aligned.
 1635  */
 1636 void
 1637 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
 1638 {
 1639         int b;
 1640         int i;
 1641 
 1642         /*
 1643          * Scan the valid bits looking for invalid sections that
 1644          * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
 1645          * valid bit may be set ) have already been zerod by
 1646          * vm_page_set_validclean().
 1647          */
 1648         for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
 1649                 if (i == (PAGE_SIZE / DEV_BSIZE) || 
 1650                     (m->valid & (1 << i))
 1651                 ) {
 1652                         if (i > b) {
 1653                                 pmap_zero_page_area(m, 
 1654                                     b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
 1655                         }
 1656                         b = i + 1;
 1657                 }
 1658         }
 1659 
 1660         /*
 1661          * setvalid is TRUE when we can safely set the zero'd areas
 1662          * as being valid.  We can do this if there are no cache consistancy
 1663          * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
 1664          */
 1665         if (setvalid)
 1666                 m->valid = VM_PAGE_BITS_ALL;
 1667 }
 1668 
 1669 /*
 1670  *      vm_page_is_valid:
 1671  *
 1672  *      Is (partial) page valid?  Note that the case where size == 0
 1673  *      will return FALSE in the degenerate case where the page is
 1674  *      entirely invalid, and TRUE otherwise.
 1675  *
 1676  *      May not block.
 1677  */
 1678 int
 1679 vm_page_is_valid(vm_page_t m, int base, int size)
 1680 {
 1681         int bits = vm_page_bits(base, size);
 1682 
 1683         if (m->valid && ((m->valid & bits) == bits))
 1684                 return 1;
 1685         else
 1686                 return 0;
 1687 }
 1688 
 1689 /*
 1690  * update dirty bits from pmap/mmu.  May not block.
 1691  */
 1692 void
 1693 vm_page_test_dirty(vm_page_t m)
 1694 {
 1695         if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
 1696                 vm_page_dirty(m);
 1697         }
 1698 }
 1699 
 1700 int so_zerocp_fullpage = 0;
 1701 
 1702 void
 1703 vm_page_cowfault(vm_page_t m)
 1704 {
 1705         vm_page_t mnew;
 1706         vm_object_t object;
 1707         vm_pindex_t pindex;
 1708 
 1709         object = m->object;
 1710         pindex = m->pindex;
 1711         vm_page_busy(m);
 1712 
 1713  retry_alloc:
 1714         vm_page_remove(m);
 1715         /*
 1716          * An interrupt allocation is requested because the page
 1717          * queues lock is held. 
 1718          */
 1719         mnew = vm_page_alloc(object, pindex, VM_ALLOC_INTERRUPT);
 1720         if (mnew == NULL) {
 1721                 vm_page_insert(m, object, pindex);
 1722                 vm_page_unlock_queues();
 1723                 VM_WAIT;
 1724                 vm_page_lock_queues();
 1725                 goto retry_alloc;
 1726         }
 1727 
 1728         if (m->cow == 0) {
 1729                 /* 
 1730                  * check to see if we raced with an xmit complete when 
 1731                  * waiting to allocate a page.  If so, put things back 
 1732                  * the way they were 
 1733                  */
 1734                 vm_page_busy(mnew);
 1735                 vm_page_free(mnew);
 1736                 vm_page_insert(m, object, pindex);
 1737         } else { /* clear COW & copy page */
 1738                 if (so_zerocp_fullpage) {
 1739                         mnew->valid = VM_PAGE_BITS_ALL;
 1740                 } else {
 1741                         vm_page_copy(m, mnew);
 1742                 }
 1743                 vm_page_dirty(mnew);
 1744                 vm_page_flag_clear(mnew, PG_BUSY);
 1745         }
 1746 }
 1747 
 1748 void 
 1749 vm_page_cowclear(vm_page_t m)
 1750 {
 1751 
 1752         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1753         if (m->cow) {
 1754                 m->cow--;
 1755                 /* 
 1756                  * let vm_fault add back write permission  lazily
 1757                  */
 1758         } 
 1759         /*
 1760          *  sf_buf_free() will free the page, so we needn't do it here
 1761          */ 
 1762 }
 1763 
 1764 void
 1765 vm_page_cowsetup(vm_page_t m)
 1766 {
 1767 
 1768         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1769         m->cow++;
 1770         pmap_page_protect(m, VM_PROT_READ);
 1771 }
 1772 
 1773 #include "opt_ddb.h"
 1774 #ifdef DDB
 1775 #include <sys/kernel.h>
 1776 
 1777 #include <ddb/ddb.h>
 1778 
 1779 DB_SHOW_COMMAND(page, vm_page_print_page_info)
 1780 {
 1781         db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
 1782         db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
 1783         db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
 1784         db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
 1785         db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
 1786         db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
 1787         db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
 1788         db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
 1789         db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
 1790         db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
 1791 }
 1792 
 1793 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
 1794 {
 1795         int i;
 1796         db_printf("PQ_FREE:");
 1797         for (i = 0; i < PQ_L2_SIZE; i++) {
 1798                 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
 1799         }
 1800         db_printf("\n");
 1801                 
 1802         db_printf("PQ_CACHE:");
 1803         for (i = 0; i < PQ_L2_SIZE; i++) {
 1804                 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
 1805         }
 1806         db_printf("\n");
 1807 
 1808         db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
 1809                 vm_page_queues[PQ_ACTIVE].lcnt,
 1810                 vm_page_queues[PQ_INACTIVE].lcnt);
 1811 }
 1812 #endif /* DDB */

Cache object: 2917ffe473541077ecd788ab010a12b8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.