The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
   37  */
   38 
   39 /*
   40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   41  * All rights reserved.
   42  *
   43  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   44  *
   45  * Permission to use, copy, modify and distribute this software and
   46  * its documentation is hereby granted, provided that both the copyright
   47  * notice and this permission notice appear in all copies of the
   48  * software, derivative works or modified versions, and any portions
   49  * thereof, and that both notices appear in supporting documentation.
   50  *
   51  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   52  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   53  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   54  *
   55  * Carnegie Mellon requests users of this software to return to
   56  *
   57  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   58  *  School of Computer Science
   59  *  Carnegie Mellon University
   60  *  Pittsburgh PA 15213-3890
   61  *
   62  * any improvements or extensions that they make and grant Carnegie the
   63  * rights to redistribute these changes.
   64  */
   65 
   66 /*
   67  *                      GENERAL RULES ON VM_PAGE MANIPULATION
   68  *
   69  *      - a pageq mutex is required when adding or removing a page from a
   70  *        page queue (vm_page_queue[]), regardless of other mutexes or the
   71  *        busy state of a page.
   72  *
   73  *      - a hash chain mutex is required when associating or disassociating
   74  *        a page from the VM PAGE CACHE hash table (vm_page_buckets),
   75  *        regardless of other mutexes or the busy state of a page.
   76  *
   77  *      - either a hash chain mutex OR a busied page is required in order
   78  *        to modify the page flags.  A hash chain mutex must be obtained in
   79  *        order to busy a page.  A page's flags cannot be modified by a
   80  *        hash chain mutex if the page is marked busy.
   81  *
   82  *      - The object memq mutex is held when inserting or removing
   83  *        pages from an object (vm_page_insert() or vm_page_remove()).  This
   84  *        is different from the object's main mutex.
   85  *
   86  *      Generally speaking, you have to be aware of side effects when running
   87  *      vm_page ops.  A vm_page_lookup() will return with the hash chain
   88  *      locked, whether it was able to lookup the page or not.  vm_page_free(),
   89  *      vm_page_cache(), vm_page_activate(), and a number of other routines
   90  *      will release the hash chain mutex for you.  Intermediate manipulation
   91  *      routines such as vm_page_flag_set() expect the hash chain to be held
   92  *      on entry and the hash chain will remain held on return.
   93  *
   94  *      pageq scanning can only occur with the pageq in question locked.
   95  *      We have a known bottleneck with the active queue, but the cache
   96  *      and free queues are actually arrays already. 
   97  */
   98 
   99 /*
  100  *      Resident memory management module.
  101  */
  102 
  103 #include <sys/cdefs.h>
  104 __FBSDID("$FreeBSD: releng/5.2/sys/vm/vm_page.c 121844 2003-11-01 04:54:23Z alc $");
  105 
  106 #include <sys/param.h>
  107 #include <sys/systm.h>
  108 #include <sys/lock.h>
  109 #include <sys/malloc.h>
  110 #include <sys/mutex.h>
  111 #include <sys/proc.h>
  112 #include <sys/vmmeter.h>
  113 #include <sys/vnode.h>
  114 
  115 #include <vm/vm.h>
  116 #include <vm/vm_param.h>
  117 #include <vm/vm_kern.h>
  118 #include <vm/vm_object.h>
  119 #include <vm/vm_page.h>
  120 #include <vm/vm_pageout.h>
  121 #include <vm/vm_pager.h>
  122 #include <vm/vm_extern.h>
  123 #include <vm/uma.h>
  124 #include <vm/uma_int.h>
  125 
  126 /*
  127  *      Associated with page of user-allocatable memory is a
  128  *      page structure.
  129  */
  130 
  131 struct mtx vm_page_queue_mtx;
  132 struct mtx vm_page_queue_free_mtx;
  133 
  134 vm_page_t vm_page_array = 0;
  135 int vm_page_array_size = 0;
  136 long first_page = 0;
  137 int vm_page_zero_count = 0;
  138 
  139 /*
  140  *      vm_set_page_size:
  141  *
  142  *      Sets the page size, perhaps based upon the memory
  143  *      size.  Must be called before any use of page-size
  144  *      dependent functions.
  145  */
  146 void
  147 vm_set_page_size(void)
  148 {
  149         if (cnt.v_page_size == 0)
  150                 cnt.v_page_size = PAGE_SIZE;
  151         if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
  152                 panic("vm_set_page_size: page size not a power of two");
  153 }
  154 
  155 /*
  156  *      vm_page_startup:
  157  *
  158  *      Initializes the resident memory module.
  159  *
  160  *      Allocates memory for the page cells, and
  161  *      for the object/offset-to-page hash table headers.
  162  *      Each page cell is initialized and placed on the free list.
  163  */
  164 vm_offset_t
  165 vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
  166 {
  167         vm_offset_t mapped;
  168         vm_size_t npages;
  169         vm_paddr_t page_range;
  170         vm_paddr_t new_end;
  171         int i;
  172         vm_paddr_t pa;
  173         int nblocks;
  174         vm_paddr_t last_pa;
  175 
  176         /* the biggest memory array is the second group of pages */
  177         vm_paddr_t end;
  178         vm_paddr_t biggestsize;
  179         int biggestone;
  180 
  181         vm_paddr_t total;
  182         vm_size_t bootpages;
  183 
  184         total = 0;
  185         biggestsize = 0;
  186         biggestone = 0;
  187         nblocks = 0;
  188         vaddr = round_page(vaddr);
  189 
  190         for (i = 0; phys_avail[i + 1]; i += 2) {
  191                 phys_avail[i] = round_page(phys_avail[i]);
  192                 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
  193         }
  194 
  195         for (i = 0; phys_avail[i + 1]; i += 2) {
  196                 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
  197 
  198                 if (size > biggestsize) {
  199                         biggestone = i;
  200                         biggestsize = size;
  201                 }
  202                 ++nblocks;
  203                 total += size;
  204         }
  205 
  206         end = phys_avail[biggestone+1];
  207 
  208         /*
  209          * Initialize the locks.
  210          */
  211         mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF);
  212         mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
  213            MTX_SPIN);
  214 
  215         /*
  216          * Initialize the queue headers for the free queue, the active queue
  217          * and the inactive queue.
  218          */
  219         vm_pageq_init();
  220 
  221         /*
  222          * Allocate memory for use when boot strapping the kernel memory
  223          * allocator.
  224          */
  225         bootpages = UMA_BOOT_PAGES * UMA_SLAB_SIZE;
  226         new_end = end - bootpages;
  227         new_end = trunc_page(new_end);
  228         mapped = pmap_map(&vaddr, new_end, end,
  229             VM_PROT_READ | VM_PROT_WRITE);
  230         bzero((caddr_t) mapped, end - new_end);
  231         uma_startup((caddr_t)mapped);
  232 
  233         /*
  234          * Compute the number of pages of memory that will be available for
  235          * use (taking into account the overhead of a page structure per
  236          * page).
  237          */
  238         first_page = phys_avail[0] / PAGE_SIZE;
  239         page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
  240         npages = (total - (page_range * sizeof(struct vm_page)) -
  241             (end - new_end)) / PAGE_SIZE;
  242         end = new_end;
  243 
  244         /*
  245          * Initialize the mem entry structures now, and put them in the free
  246          * queue.
  247          */
  248         new_end = trunc_page(end - page_range * sizeof(struct vm_page));
  249         mapped = pmap_map(&vaddr, new_end, end,
  250             VM_PROT_READ | VM_PROT_WRITE);
  251         vm_page_array = (vm_page_t) mapped;
  252         phys_avail[biggestone + 1] = new_end;
  253 
  254         /*
  255          * Clear all of the page structures
  256          */
  257         bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
  258         vm_page_array_size = page_range;
  259 
  260         /*
  261          * Construct the free queue(s) in descending order (by physical
  262          * address) so that the first 16MB of physical memory is allocated
  263          * last rather than first.  On large-memory machines, this avoids
  264          * the exhaustion of low physical memory before isa_dmainit has run.
  265          */
  266         cnt.v_page_count = 0;
  267         cnt.v_free_count = 0;
  268         for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
  269                 pa = phys_avail[i];
  270                 last_pa = phys_avail[i + 1];
  271                 while (pa < last_pa && npages-- > 0) {
  272                         vm_pageq_add_new_page(pa);
  273                         pa += PAGE_SIZE;
  274                 }
  275         }
  276         return (vaddr);
  277 }
  278 
  279 void
  280 vm_page_flag_set(vm_page_t m, unsigned short bits)
  281 {
  282 
  283         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  284         m->flags |= bits;
  285 } 
  286 
  287 void
  288 vm_page_flag_clear(vm_page_t m, unsigned short bits)
  289 {
  290 
  291         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  292         m->flags &= ~bits;
  293 }
  294 
  295 void
  296 vm_page_busy(vm_page_t m)
  297 {
  298         KASSERT((m->flags & PG_BUSY) == 0,
  299             ("vm_page_busy: page already busy!!!"));
  300         vm_page_flag_set(m, PG_BUSY);
  301 }
  302 
  303 /*
  304  *      vm_page_flash:
  305  *
  306  *      wakeup anyone waiting for the page.
  307  */
  308 void
  309 vm_page_flash(vm_page_t m)
  310 {
  311         if (m->flags & PG_WANTED) {
  312                 vm_page_flag_clear(m, PG_WANTED);
  313                 wakeup(m);
  314         }
  315 }
  316 
  317 /*
  318  *      vm_page_wakeup:
  319  *
  320  *      clear the PG_BUSY flag and wakeup anyone waiting for the
  321  *      page.
  322  *
  323  */
  324 void
  325 vm_page_wakeup(vm_page_t m)
  326 {
  327         KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
  328         vm_page_flag_clear(m, PG_BUSY);
  329         vm_page_flash(m);
  330 }
  331 
  332 void
  333 vm_page_io_start(vm_page_t m)
  334 {
  335 
  336         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  337         m->busy++;
  338 }
  339 
  340 void
  341 vm_page_io_finish(vm_page_t m)
  342 {
  343 
  344         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  345         m->busy--;
  346         if (m->busy == 0)
  347                 vm_page_flash(m);
  348 }
  349 
  350 /*
  351  * Keep page from being freed by the page daemon
  352  * much of the same effect as wiring, except much lower
  353  * overhead and should be used only for *very* temporary
  354  * holding ("wiring").
  355  */
  356 void
  357 vm_page_hold(vm_page_t mem)
  358 {
  359 
  360         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  361         mem->hold_count++;
  362 }
  363 
  364 void
  365 vm_page_unhold(vm_page_t mem)
  366 {
  367 
  368         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  369         --mem->hold_count;
  370         KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
  371         if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
  372                 vm_page_free_toq(mem);
  373 }
  374 
  375 /*
  376  *      vm_page_free:
  377  *
  378  *      Free a page
  379  *
  380  *      The clearing of PG_ZERO is a temporary safety until the code can be
  381  *      reviewed to determine that PG_ZERO is being properly cleared on
  382  *      write faults or maps.  PG_ZERO was previously cleared in
  383  *      vm_page_alloc().
  384  */
  385 void
  386 vm_page_free(vm_page_t m)
  387 {
  388         vm_page_flag_clear(m, PG_ZERO);
  389         vm_page_free_toq(m);
  390         vm_page_zero_idle_wakeup();
  391 }
  392 
  393 /*
  394  *      vm_page_free_zero:
  395  *
  396  *      Free a page to the zerod-pages queue
  397  */
  398 void
  399 vm_page_free_zero(vm_page_t m)
  400 {
  401         vm_page_flag_set(m, PG_ZERO);
  402         vm_page_free_toq(m);
  403 }
  404 
  405 /*
  406  *      vm_page_sleep_if_busy:
  407  *
  408  *      Sleep and release the page queues lock if PG_BUSY is set or,
  409  *      if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
  410  *      thread slept and the page queues lock was released.
  411  *      Otherwise, retains the page queues lock and returns FALSE.
  412  */
  413 int
  414 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
  415 {
  416         int is_object_locked;
  417 
  418         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  419         if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
  420                 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
  421                 /*
  422                  * Remove mtx_owned() after vm_object locking is finished.
  423                  */
  424                 if ((is_object_locked = m->object != NULL &&
  425                      mtx_owned(&m->object->mtx)))
  426                         mtx_unlock(&m->object->mtx);
  427                 msleep(m, &vm_page_queue_mtx, PDROP | PVM, msg, 0);
  428                 if (is_object_locked)
  429                         mtx_lock(&m->object->mtx);
  430                 return (TRUE);
  431         }
  432         return (FALSE);
  433 }
  434 
  435 /*
  436  *      vm_page_dirty:
  437  *
  438  *      make page all dirty
  439  */
  440 void
  441 vm_page_dirty(vm_page_t m)
  442 {
  443         KASSERT(m->queue - m->pc != PQ_CACHE,
  444             ("vm_page_dirty: page in cache!"));
  445         KASSERT(m->queue - m->pc != PQ_FREE,
  446             ("vm_page_dirty: page is free!"));
  447         m->dirty = VM_PAGE_BITS_ALL;
  448 }
  449 
  450 /*
  451  *      vm_page_splay:
  452  *
  453  *      Implements Sleator and Tarjan's top-down splay algorithm.  Returns
  454  *      the vm_page containing the given pindex.  If, however, that
  455  *      pindex is not found in the vm_object, returns a vm_page that is
  456  *      adjacent to the pindex, coming before or after it.
  457  */
  458 vm_page_t
  459 vm_page_splay(vm_pindex_t pindex, vm_page_t root)
  460 {
  461         struct vm_page dummy;
  462         vm_page_t lefttreemax, righttreemin, y;
  463 
  464         if (root == NULL)
  465                 return (root);
  466         lefttreemax = righttreemin = &dummy;
  467         for (;; root = y) {
  468                 if (pindex < root->pindex) {
  469                         if ((y = root->left) == NULL)
  470                                 break;
  471                         if (pindex < y->pindex) {
  472                                 /* Rotate right. */
  473                                 root->left = y->right;
  474                                 y->right = root;
  475                                 root = y;
  476                                 if ((y = root->left) == NULL)
  477                                         break;
  478                         }
  479                         /* Link into the new root's right tree. */
  480                         righttreemin->left = root;
  481                         righttreemin = root;
  482                 } else if (pindex > root->pindex) {
  483                         if ((y = root->right) == NULL)
  484                                 break;
  485                         if (pindex > y->pindex) {
  486                                 /* Rotate left. */
  487                                 root->right = y->left;
  488                                 y->left = root;
  489                                 root = y;
  490                                 if ((y = root->right) == NULL)
  491                                         break;
  492                         }
  493                         /* Link into the new root's left tree. */
  494                         lefttreemax->right = root;
  495                         lefttreemax = root;
  496                 } else
  497                         break;
  498         }
  499         /* Assemble the new root. */
  500         lefttreemax->right = root->left;
  501         righttreemin->left = root->right;
  502         root->left = dummy.right;
  503         root->right = dummy.left;
  504         return (root);
  505 }
  506 
  507 /*
  508  *      vm_page_insert:         [ internal use only ]
  509  *
  510  *      Inserts the given mem entry into the object and object list.
  511  *
  512  *      The pagetables are not updated but will presumably fault the page
  513  *      in if necessary, or if a kernel page the caller will at some point
  514  *      enter the page into the kernel's pmap.  We are not allowed to block
  515  *      here so we *can't* do this anyway.
  516  *
  517  *      The object and page must be locked, and must be splhigh.
  518  *      This routine may not block.
  519  */
  520 void
  521 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
  522 {
  523         vm_page_t root;
  524 
  525         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  526         if (m->object != NULL)
  527                 panic("vm_page_insert: already inserted");
  528 
  529         /*
  530          * Record the object/offset pair in this page
  531          */
  532         m->object = object;
  533         m->pindex = pindex;
  534 
  535         /*
  536          * Now link into the object's ordered list of backed pages.
  537          */
  538         root = object->root;
  539         if (root == NULL) {
  540                 m->left = NULL;
  541                 m->right = NULL;
  542                 TAILQ_INSERT_TAIL(&object->memq, m, listq);
  543         } else {
  544                 root = vm_page_splay(pindex, root);
  545                 if (pindex < root->pindex) {
  546                         m->left = root->left;
  547                         m->right = root;
  548                         root->left = NULL;
  549                         TAILQ_INSERT_BEFORE(root, m, listq);
  550                 } else {
  551                         m->right = root->right;
  552                         m->left = root;
  553                         root->right = NULL;
  554                         TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
  555                 }
  556         }
  557         object->root = m;
  558         object->generation++;
  559 
  560         /*
  561          * show that the object has one more resident page.
  562          */
  563         object->resident_page_count++;
  564 
  565         /*
  566          * Since we are inserting a new and possibly dirty page,
  567          * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
  568          */
  569         if (m->flags & PG_WRITEABLE)
  570                 vm_object_set_writeable_dirty(object);
  571 }
  572 
  573 /*
  574  *      vm_page_remove:
  575  *                              NOTE: used by device pager as well -wfj
  576  *
  577  *      Removes the given mem entry from the object/offset-page
  578  *      table and the object page list, but do not invalidate/terminate
  579  *      the backing store.
  580  *
  581  *      The object and page must be locked, and at splhigh.
  582  *      The underlying pmap entry (if any) is NOT removed here.
  583  *      This routine may not block.
  584  */
  585 void
  586 vm_page_remove(vm_page_t m)
  587 {
  588         vm_object_t object;
  589         vm_page_t root;
  590 
  591         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  592         if (m->object == NULL)
  593                 return;
  594 #ifndef __alpha__
  595         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  596 #endif
  597         if ((m->flags & PG_BUSY) == 0) {
  598                 panic("vm_page_remove: page not busy");
  599         }
  600 
  601         /*
  602          * Basically destroy the page.
  603          */
  604         vm_page_wakeup(m);
  605 
  606         object = m->object;
  607 
  608         /*
  609          * Now remove from the object's list of backed pages.
  610          */
  611         if (m != object->root)
  612                 vm_page_splay(m->pindex, object->root);
  613         if (m->left == NULL)
  614                 root = m->right;
  615         else {
  616                 root = vm_page_splay(m->pindex, m->left);
  617                 root->right = m->right;
  618         }
  619         object->root = root;
  620         TAILQ_REMOVE(&object->memq, m, listq);
  621 
  622         /*
  623          * And show that the object has one fewer resident page.
  624          */
  625         object->resident_page_count--;
  626         object->generation++;
  627 
  628         m->object = NULL;
  629 }
  630 
  631 /*
  632  *      vm_page_lookup:
  633  *
  634  *      Returns the page associated with the object/offset
  635  *      pair specified; if none is found, NULL is returned.
  636  *
  637  *      The object must be locked.
  638  *      This routine may not block.
  639  *      This is a critical path routine
  640  */
  641 vm_page_t
  642 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
  643 {
  644         vm_page_t m;
  645 
  646         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  647         m = vm_page_splay(pindex, object->root);
  648         if ((object->root = m) != NULL && m->pindex != pindex)
  649                 m = NULL;
  650         return (m);
  651 }
  652 
  653 /*
  654  *      vm_page_rename:
  655  *
  656  *      Move the given memory entry from its
  657  *      current object to the specified target object/offset.
  658  *
  659  *      The object must be locked.
  660  *      This routine may not block.
  661  *
  662  *      Note: this routine will raise itself to splvm(), the caller need not. 
  663  *
  664  *      Note: swap associated with the page must be invalidated by the move.  We
  665  *            have to do this for several reasons:  (1) we aren't freeing the
  666  *            page, (2) we are dirtying the page, (3) the VM system is probably
  667  *            moving the page from object A to B, and will then later move
  668  *            the backing store from A to B and we can't have a conflict.
  669  *
  670  *      Note: we *always* dirty the page.  It is necessary both for the
  671  *            fact that we moved it, and because we may be invalidating
  672  *            swap.  If the page is on the cache, we have to deactivate it
  673  *            or vm_page_dirty() will panic.  Dirty pages are not allowed
  674  *            on the cache.
  675  */
  676 void
  677 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
  678 {
  679         int s;
  680 
  681         s = splvm();
  682         vm_page_remove(m);
  683         vm_page_insert(m, new_object, new_pindex);
  684         if (m->queue - m->pc == PQ_CACHE)
  685                 vm_page_deactivate(m);
  686         vm_page_dirty(m);
  687         splx(s);
  688 }
  689 
  690 /*
  691  *      vm_page_select_cache:
  692  *
  693  *      Find a page on the cache queue with color optimization.  As pages
  694  *      might be found, but not applicable, they are deactivated.  This
  695  *      keeps us from using potentially busy cached pages.
  696  *
  697  *      This routine must be called at splvm().
  698  *      This routine may not block.
  699  */
  700 vm_page_t
  701 vm_page_select_cache(int color)
  702 {
  703         vm_page_t m;
  704 
  705         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  706         while (TRUE) {
  707                 m = vm_pageq_find(PQ_CACHE, color, FALSE);
  708                 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
  709                                m->hold_count || m->wire_count ||
  710                           (!VM_OBJECT_TRYLOCK(m->object) &&
  711                            !VM_OBJECT_LOCKED(m->object)))) {
  712                         vm_page_deactivate(m);
  713                         continue;
  714                 }
  715                 return m;
  716         }
  717 }
  718 
  719 /*
  720  *      vm_page_alloc:
  721  *
  722  *      Allocate and return a memory cell associated
  723  *      with this VM object/offset pair.
  724  *
  725  *      page_req classes:
  726  *      VM_ALLOC_NORMAL         normal process request
  727  *      VM_ALLOC_SYSTEM         system *really* needs a page
  728  *      VM_ALLOC_INTERRUPT      interrupt time request
  729  *      VM_ALLOC_ZERO           zero page
  730  *
  731  *      This routine may not block.
  732  *
  733  *      Additional special handling is required when called from an
  734  *      interrupt (VM_ALLOC_INTERRUPT).  We are not allowed to mess with
  735  *      the page cache in this case.
  736  */
  737 vm_page_t
  738 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
  739 {
  740         vm_object_t m_object;
  741         vm_page_t m = NULL;
  742         int color, flags, page_req, s;
  743 
  744         page_req = req & VM_ALLOC_CLASS_MASK;
  745 
  746         if ((req & VM_ALLOC_NOOBJ) == 0) {
  747                 KASSERT(object != NULL,
  748                     ("vm_page_alloc: NULL object."));
  749                 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  750                 KASSERT(!vm_page_lookup(object, pindex),
  751                     ("vm_page_alloc: page already allocated"));
  752                 color = (pindex + object->pg_color) & PQ_L2_MASK;
  753         } else
  754                 color = pindex & PQ_L2_MASK;
  755 
  756         /*
  757          * The pager is allowed to eat deeper into the free page list.
  758          */
  759         if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
  760                 page_req = VM_ALLOC_SYSTEM;
  761         };
  762 
  763         s = splvm();
  764 loop:
  765         mtx_lock_spin(&vm_page_queue_free_mtx);
  766         if (cnt.v_free_count > cnt.v_free_reserved ||
  767             (page_req == VM_ALLOC_SYSTEM && 
  768              cnt.v_cache_count == 0 && 
  769              cnt.v_free_count > cnt.v_interrupt_free_min) ||
  770             (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)) {
  771                 /*
  772                  * Allocate from the free queue if the number of free pages
  773                  * exceeds the minimum for the request class.
  774                  */
  775                 m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
  776         } else if (page_req != VM_ALLOC_INTERRUPT) {
  777                 mtx_unlock_spin(&vm_page_queue_free_mtx);
  778                 /*
  779                  * Allocatable from cache (non-interrupt only).  On success,
  780                  * we must free the page and try again, thus ensuring that
  781                  * cnt.v_*_free_min counters are replenished.
  782                  */
  783                 vm_page_lock_queues();
  784                 if ((m = vm_page_select_cache(color)) == NULL) {
  785                         vm_page_unlock_queues();
  786                         splx(s);
  787 #if defined(DIAGNOSTIC)
  788                         if (cnt.v_cache_count > 0)
  789                                 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
  790 #endif
  791                         atomic_add_int(&vm_pageout_deficit, 1);
  792                         pagedaemon_wakeup();
  793                         return (NULL);
  794                 }
  795                 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
  796                 m_object = m->object;
  797                 VM_OBJECT_LOCK_ASSERT(m_object, MA_OWNED);
  798                 vm_page_busy(m);
  799                 pmap_remove_all(m);
  800                 vm_page_free(m);
  801                 vm_page_unlock_queues();
  802                 if (m_object != object)
  803                         VM_OBJECT_UNLOCK(m_object);
  804                 goto loop;
  805         } else {
  806                 /*
  807                  * Not allocatable from cache from interrupt, give up.
  808                  */
  809                 mtx_unlock_spin(&vm_page_queue_free_mtx);
  810                 splx(s);
  811                 atomic_add_int(&vm_pageout_deficit, 1);
  812                 pagedaemon_wakeup();
  813                 return (NULL);
  814         }
  815 
  816         /*
  817          *  At this point we had better have found a good page.
  818          */
  819 
  820         KASSERT(
  821             m != NULL,
  822             ("vm_page_alloc(): missing page on free queue\n")
  823         );
  824 
  825         /*
  826          * Remove from free queue
  827          */
  828 
  829         vm_pageq_remove_nowakeup(m);
  830 
  831         /*
  832          * Initialize structure.  Only the PG_ZERO flag is inherited.
  833          */
  834         flags = PG_BUSY;
  835         if (m->flags & PG_ZERO) {
  836                 vm_page_zero_count--;
  837                 if (req & VM_ALLOC_ZERO)
  838                         flags = PG_ZERO | PG_BUSY;
  839         }
  840         m->flags = flags;
  841         if (req & VM_ALLOC_WIRED) {
  842                 atomic_add_int(&cnt.v_wire_count, 1);
  843                 m->wire_count = 1;
  844         } else
  845                 m->wire_count = 0;
  846         m->hold_count = 0;
  847         m->act_count = 0;
  848         m->busy = 0;
  849         m->valid = 0;
  850         KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
  851         mtx_unlock_spin(&vm_page_queue_free_mtx);
  852 
  853         /*
  854          * vm_page_insert() is safe prior to the splx().  Note also that
  855          * inserting a page here does not insert it into the pmap (which
  856          * could cause us to block allocating memory).  We cannot block 
  857          * anywhere.
  858          */
  859         if ((req & VM_ALLOC_NOOBJ) == 0)
  860                 vm_page_insert(m, object, pindex);
  861         else
  862                 m->pindex = pindex;
  863 
  864         /*
  865          * Don't wakeup too often - wakeup the pageout daemon when
  866          * we would be nearly out of memory.
  867          */
  868         if (vm_paging_needed())
  869                 pagedaemon_wakeup();
  870 
  871         splx(s);
  872         return (m);
  873 }
  874 
  875 /*
  876  *      vm_wait:        (also see VM_WAIT macro)
  877  *
  878  *      Block until free pages are available for allocation
  879  *      - Called in various places before memory allocations.
  880  */
  881 void
  882 vm_wait(void)
  883 {
  884         int s;
  885 
  886         s = splvm();
  887         vm_page_lock_queues();
  888         if (curproc == pageproc) {
  889                 vm_pageout_pages_needed = 1;
  890                 msleep(&vm_pageout_pages_needed, &vm_page_queue_mtx,
  891                     PDROP | PSWP, "VMWait", 0);
  892         } else {
  893                 if (!vm_pages_needed) {
  894                         vm_pages_needed = 1;
  895                         wakeup(&vm_pages_needed);
  896                 }
  897                 msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PVM,
  898                     "vmwait", 0);
  899         }
  900         splx(s);
  901 }
  902 
  903 /*
  904  *      vm_waitpfault:  (also see VM_WAITPFAULT macro)
  905  *
  906  *      Block until free pages are available for allocation
  907  *      - Called only in vm_fault so that processes page faulting
  908  *        can be easily tracked.
  909  *      - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
  910  *        processes will be able to grab memory first.  Do not change
  911  *        this balance without careful testing first.
  912  */
  913 void
  914 vm_waitpfault(void)
  915 {
  916         int s;
  917 
  918         s = splvm();
  919         vm_page_lock_queues();
  920         if (!vm_pages_needed) {
  921                 vm_pages_needed = 1;
  922                 wakeup(&vm_pages_needed);
  923         }
  924         msleep(&cnt.v_free_count, &vm_page_queue_mtx, PDROP | PUSER,
  925             "pfault", 0);
  926         splx(s);
  927 }
  928 
  929 /*
  930  *      vm_page_activate:
  931  *
  932  *      Put the specified page on the active list (if appropriate).
  933  *      Ensure that act_count is at least ACT_INIT but do not otherwise
  934  *      mess with it.
  935  *
  936  *      The page queues must be locked.
  937  *      This routine may not block.
  938  */
  939 void
  940 vm_page_activate(vm_page_t m)
  941 {
  942         int s;
  943 
  944         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  945         s = splvm();
  946         if (m->queue != PQ_ACTIVE) {
  947                 if ((m->queue - m->pc) == PQ_CACHE)
  948                         cnt.v_reactivated++;
  949                 vm_pageq_remove(m);
  950                 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
  951                         if (m->act_count < ACT_INIT)
  952                                 m->act_count = ACT_INIT;
  953                         vm_pageq_enqueue(PQ_ACTIVE, m);
  954                 }
  955         } else {
  956                 if (m->act_count < ACT_INIT)
  957                         m->act_count = ACT_INIT;
  958         }
  959         splx(s);
  960 }
  961 
  962 /*
  963  *      vm_page_free_wakeup:
  964  *
  965  *      Helper routine for vm_page_free_toq() and vm_page_cache().  This
  966  *      routine is called when a page has been added to the cache or free
  967  *      queues.
  968  *
  969  *      This routine may not block.
  970  *      This routine must be called at splvm()
  971  */
  972 static __inline void
  973 vm_page_free_wakeup(void)
  974 {
  975 
  976         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  977         /*
  978          * if pageout daemon needs pages, then tell it that there are
  979          * some free.
  980          */
  981         if (vm_pageout_pages_needed &&
  982             cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
  983                 wakeup(&vm_pageout_pages_needed);
  984                 vm_pageout_pages_needed = 0;
  985         }
  986         /*
  987          * wakeup processes that are waiting on memory if we hit a
  988          * high water mark. And wakeup scheduler process if we have
  989          * lots of memory. this process will swapin processes.
  990          */
  991         if (vm_pages_needed && !vm_page_count_min()) {
  992                 vm_pages_needed = 0;
  993                 wakeup(&cnt.v_free_count);
  994         }
  995 }
  996 
  997 /*
  998  *      vm_page_free_toq:
  999  *
 1000  *      Returns the given page to the PQ_FREE list,
 1001  *      disassociating it with any VM object.
 1002  *
 1003  *      Object and page must be locked prior to entry.
 1004  *      This routine may not block.
 1005  */
 1006 
 1007 void
 1008 vm_page_free_toq(vm_page_t m)
 1009 {
 1010         int s;
 1011         struct vpgqueues *pq;
 1012         vm_object_t object = m->object;
 1013 
 1014         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1015         s = splvm();
 1016         cnt.v_tfree++;
 1017 
 1018         if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
 1019                 printf(
 1020                 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
 1021                     (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
 1022                     m->hold_count);
 1023                 if ((m->queue - m->pc) == PQ_FREE)
 1024                         panic("vm_page_free: freeing free page");
 1025                 else
 1026                         panic("vm_page_free: freeing busy page");
 1027         }
 1028 
 1029         /*
 1030          * unqueue, then remove page.  Note that we cannot destroy
 1031          * the page here because we do not want to call the pager's
 1032          * callback routine until after we've put the page on the
 1033          * appropriate free queue.
 1034          */
 1035         vm_pageq_remove_nowakeup(m);
 1036         vm_page_remove(m);
 1037 
 1038         /*
 1039          * If fictitious remove object association and
 1040          * return, otherwise delay object association removal.
 1041          */
 1042         if ((m->flags & PG_FICTITIOUS) != 0) {
 1043                 splx(s);
 1044                 return;
 1045         }
 1046 
 1047         m->valid = 0;
 1048         vm_page_undirty(m);
 1049 
 1050         if (m->wire_count != 0) {
 1051                 if (m->wire_count > 1) {
 1052                         panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
 1053                                 m->wire_count, (long)m->pindex);
 1054                 }
 1055                 panic("vm_page_free: freeing wired page\n");
 1056         }
 1057 
 1058         /*
 1059          * If we've exhausted the object's resident pages we want to free
 1060          * it up.
 1061          */
 1062         if (object && 
 1063             (object->type == OBJT_VNODE) &&
 1064             ((object->flags & OBJ_DEAD) == 0)
 1065         ) {
 1066                 struct vnode *vp = (struct vnode *)object->handle;
 1067 
 1068                 if (vp) {
 1069                         VI_LOCK(vp);
 1070                         if (VSHOULDFREE(vp))
 1071                                 vfree(vp);
 1072                         VI_UNLOCK(vp);
 1073                 }
 1074         }
 1075 
 1076         /*
 1077          * Clear the UNMANAGED flag when freeing an unmanaged page.
 1078          */
 1079         if (m->flags & PG_UNMANAGED) {
 1080                 m->flags &= ~PG_UNMANAGED;
 1081         }
 1082 
 1083         if (m->hold_count != 0) {
 1084                 m->flags &= ~PG_ZERO;
 1085                 m->queue = PQ_HOLD;
 1086         } else
 1087                 m->queue = PQ_FREE + m->pc;
 1088         pq = &vm_page_queues[m->queue];
 1089         mtx_lock_spin(&vm_page_queue_free_mtx);
 1090         pq->lcnt++;
 1091         ++(*pq->cnt);
 1092 
 1093         /*
 1094          * Put zero'd pages on the end ( where we look for zero'd pages
 1095          * first ) and non-zerod pages at the head.
 1096          */
 1097         if (m->flags & PG_ZERO) {
 1098                 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
 1099                 ++vm_page_zero_count;
 1100         } else {
 1101                 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
 1102         }
 1103         mtx_unlock_spin(&vm_page_queue_free_mtx);
 1104         vm_page_free_wakeup();
 1105         splx(s);
 1106 }
 1107 
 1108 /*
 1109  *      vm_page_unmanage:
 1110  *
 1111  *      Prevent PV management from being done on the page.  The page is
 1112  *      removed from the paging queues as if it were wired, and as a 
 1113  *      consequence of no longer being managed the pageout daemon will not
 1114  *      touch it (since there is no way to locate the pte mappings for the
 1115  *      page).  madvise() calls that mess with the pmap will also no longer
 1116  *      operate on the page.
 1117  *
 1118  *      Beyond that the page is still reasonably 'normal'.  Freeing the page
 1119  *      will clear the flag.
 1120  *
 1121  *      This routine is used by OBJT_PHYS objects - objects using unswappable
 1122  *      physical memory as backing store rather then swap-backed memory and
 1123  *      will eventually be extended to support 4MB unmanaged physical 
 1124  *      mappings.
 1125  */
 1126 void
 1127 vm_page_unmanage(vm_page_t m)
 1128 {
 1129         int s;
 1130 
 1131         s = splvm();
 1132         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1133         if ((m->flags & PG_UNMANAGED) == 0) {
 1134                 if (m->wire_count == 0)
 1135                         vm_pageq_remove(m);
 1136         }
 1137         vm_page_flag_set(m, PG_UNMANAGED);
 1138         splx(s);
 1139 }
 1140 
 1141 /*
 1142  *      vm_page_wire:
 1143  *
 1144  *      Mark this page as wired down by yet
 1145  *      another map, removing it from paging queues
 1146  *      as necessary.
 1147  *
 1148  *      The page queues must be locked.
 1149  *      This routine may not block.
 1150  */
 1151 void
 1152 vm_page_wire(vm_page_t m)
 1153 {
 1154         int s;
 1155 
 1156         /*
 1157          * Only bump the wire statistics if the page is not already wired,
 1158          * and only unqueue the page if it is on some queue (if it is unmanaged
 1159          * it is already off the queues).
 1160          */
 1161         s = splvm();
 1162         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1163         if (m->wire_count == 0) {
 1164                 if ((m->flags & PG_UNMANAGED) == 0)
 1165                         vm_pageq_remove(m);
 1166                 atomic_add_int(&cnt.v_wire_count, 1);
 1167         }
 1168         m->wire_count++;
 1169         KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
 1170         splx(s);
 1171 }
 1172 
 1173 /*
 1174  *      vm_page_unwire:
 1175  *
 1176  *      Release one wiring of this page, potentially
 1177  *      enabling it to be paged again.
 1178  *
 1179  *      Many pages placed on the inactive queue should actually go
 1180  *      into the cache, but it is difficult to figure out which.  What
 1181  *      we do instead, if the inactive target is well met, is to put
 1182  *      clean pages at the head of the inactive queue instead of the tail.
 1183  *      This will cause them to be moved to the cache more quickly and
 1184  *      if not actively re-referenced, freed more quickly.  If we just
 1185  *      stick these pages at the end of the inactive queue, heavy filesystem
 1186  *      meta-data accesses can cause an unnecessary paging load on memory bound 
 1187  *      processes.  This optimization causes one-time-use metadata to be
 1188  *      reused more quickly.
 1189  *
 1190  *      BUT, if we are in a low-memory situation we have no choice but to
 1191  *      put clean pages on the cache queue.
 1192  *
 1193  *      A number of routines use vm_page_unwire() to guarantee that the page
 1194  *      will go into either the inactive or active queues, and will NEVER
 1195  *      be placed in the cache - for example, just after dirtying a page.
 1196  *      dirty pages in the cache are not allowed.
 1197  *
 1198  *      The page queues must be locked.
 1199  *      This routine may not block.
 1200  */
 1201 void
 1202 vm_page_unwire(vm_page_t m, int activate)
 1203 {
 1204         int s;
 1205 
 1206         s = splvm();
 1207         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1208         if (m->wire_count > 0) {
 1209                 m->wire_count--;
 1210                 if (m->wire_count == 0) {
 1211                         atomic_subtract_int(&cnt.v_wire_count, 1);
 1212                         if (m->flags & PG_UNMANAGED) {
 1213                                 ;
 1214                         } else if (activate)
 1215                                 vm_pageq_enqueue(PQ_ACTIVE, m);
 1216                         else {
 1217                                 vm_page_flag_clear(m, PG_WINATCFLS);
 1218                                 vm_pageq_enqueue(PQ_INACTIVE, m);
 1219                         }
 1220                 }
 1221         } else {
 1222                 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
 1223         }
 1224         splx(s);
 1225 }
 1226 
 1227 
 1228 /*
 1229  * Move the specified page to the inactive queue.  If the page has
 1230  * any associated swap, the swap is deallocated.
 1231  *
 1232  * Normally athead is 0 resulting in LRU operation.  athead is set
 1233  * to 1 if we want this page to be 'as if it were placed in the cache',
 1234  * except without unmapping it from the process address space.
 1235  *
 1236  * This routine may not block.
 1237  */
 1238 static __inline void
 1239 _vm_page_deactivate(vm_page_t m, int athead)
 1240 {
 1241         int s;
 1242 
 1243         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1244         /*
 1245          * Ignore if already inactive.
 1246          */
 1247         if (m->queue == PQ_INACTIVE)
 1248                 return;
 1249 
 1250         s = splvm();
 1251         if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
 1252                 if ((m->queue - m->pc) == PQ_CACHE)
 1253                         cnt.v_reactivated++;
 1254                 vm_page_flag_clear(m, PG_WINATCFLS);
 1255                 vm_pageq_remove(m);
 1256                 if (athead)
 1257                         TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
 1258                 else
 1259                         TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
 1260                 m->queue = PQ_INACTIVE;
 1261                 vm_page_queues[PQ_INACTIVE].lcnt++;
 1262                 cnt.v_inactive_count++;
 1263         }
 1264         splx(s);
 1265 }
 1266 
 1267 void
 1268 vm_page_deactivate(vm_page_t m)
 1269 {
 1270     _vm_page_deactivate(m, 0);
 1271 }
 1272 
 1273 /*
 1274  * vm_page_try_to_cache:
 1275  *
 1276  * Returns 0 on failure, 1 on success
 1277  */
 1278 int
 1279 vm_page_try_to_cache(vm_page_t m)
 1280 {
 1281 
 1282         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1283         if (m->dirty || m->hold_count || m->busy || m->wire_count ||
 1284             (m->flags & (PG_BUSY|PG_UNMANAGED))) {
 1285                 return (0);
 1286         }
 1287         vm_page_test_dirty(m);
 1288         if (m->dirty)
 1289                 return (0);
 1290         vm_page_cache(m);
 1291         return (1);
 1292 }
 1293 
 1294 /*
 1295  * vm_page_try_to_free()
 1296  *
 1297  *      Attempt to free the page.  If we cannot free it, we do nothing.
 1298  *      1 is returned on success, 0 on failure.
 1299  */
 1300 int
 1301 vm_page_try_to_free(vm_page_t m)
 1302 {
 1303 
 1304         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1305         if (m->object != NULL)
 1306                 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1307         if (m->dirty || m->hold_count || m->busy || m->wire_count ||
 1308             (m->flags & (PG_BUSY|PG_UNMANAGED))) {
 1309                 return (0);
 1310         }
 1311         vm_page_test_dirty(m);
 1312         if (m->dirty)
 1313                 return (0);
 1314         vm_page_busy(m);
 1315         pmap_remove_all(m);
 1316         vm_page_free(m);
 1317         return (1);
 1318 }
 1319 
 1320 /*
 1321  * vm_page_cache
 1322  *
 1323  * Put the specified page onto the page cache queue (if appropriate).
 1324  *
 1325  * This routine may not block.
 1326  */
 1327 void
 1328 vm_page_cache(vm_page_t m)
 1329 {
 1330         int s;
 1331 
 1332         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1333         if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
 1334             m->hold_count || m->wire_count) {
 1335                 printf("vm_page_cache: attempting to cache busy page\n");
 1336                 return;
 1337         }
 1338         if ((m->queue - m->pc) == PQ_CACHE)
 1339                 return;
 1340 
 1341         /*
 1342          * Remove all pmaps and indicate that the page is not
 1343          * writeable or mapped.
 1344          */
 1345         pmap_remove_all(m);
 1346         if (m->dirty != 0) {
 1347                 panic("vm_page_cache: caching a dirty page, pindex: %ld",
 1348                         (long)m->pindex);
 1349         }
 1350         s = splvm();
 1351         vm_pageq_remove_nowakeup(m);
 1352         vm_pageq_enqueue(PQ_CACHE + m->pc, m);
 1353         vm_page_free_wakeup();
 1354         splx(s);
 1355 }
 1356 
 1357 /*
 1358  * vm_page_dontneed
 1359  *
 1360  *      Cache, deactivate, or do nothing as appropriate.  This routine
 1361  *      is typically used by madvise() MADV_DONTNEED.
 1362  *
 1363  *      Generally speaking we want to move the page into the cache so
 1364  *      it gets reused quickly.  However, this can result in a silly syndrome
 1365  *      due to the page recycling too quickly.  Small objects will not be
 1366  *      fully cached.  On the otherhand, if we move the page to the inactive
 1367  *      queue we wind up with a problem whereby very large objects 
 1368  *      unnecessarily blow away our inactive and cache queues.
 1369  *
 1370  *      The solution is to move the pages based on a fixed weighting.  We
 1371  *      either leave them alone, deactivate them, or move them to the cache,
 1372  *      where moving them to the cache has the highest weighting.
 1373  *      By forcing some pages into other queues we eventually force the
 1374  *      system to balance the queues, potentially recovering other unrelated
 1375  *      space from active.  The idea is to not force this to happen too
 1376  *      often.
 1377  */
 1378 void
 1379 vm_page_dontneed(vm_page_t m)
 1380 {
 1381         static int dnweight;
 1382         int dnw;
 1383         int head;
 1384 
 1385         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1386         dnw = ++dnweight;
 1387 
 1388         /*
 1389          * occassionally leave the page alone
 1390          */
 1391         if ((dnw & 0x01F0) == 0 ||
 1392             m->queue == PQ_INACTIVE || 
 1393             m->queue - m->pc == PQ_CACHE
 1394         ) {
 1395                 if (m->act_count >= ACT_INIT)
 1396                         --m->act_count;
 1397                 return;
 1398         }
 1399 
 1400         if (m->dirty == 0)
 1401                 vm_page_test_dirty(m);
 1402 
 1403         if (m->dirty || (dnw & 0x0070) == 0) {
 1404                 /*
 1405                  * Deactivate the page 3 times out of 32.
 1406                  */
 1407                 head = 0;
 1408         } else {
 1409                 /*
 1410                  * Cache the page 28 times out of every 32.  Note that
 1411                  * the page is deactivated instead of cached, but placed
 1412                  * at the head of the queue instead of the tail.
 1413                  */
 1414                 head = 1;
 1415         }
 1416         _vm_page_deactivate(m, head);
 1417 }
 1418 
 1419 /*
 1420  * Grab a page, waiting until we are waken up due to the page
 1421  * changing state.  We keep on waiting, if the page continues
 1422  * to be in the object.  If the page doesn't exist, allocate it.
 1423  *
 1424  * This routine may block.
 1425  */
 1426 vm_page_t
 1427 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
 1428 {
 1429         vm_page_t m;
 1430         int s, generation;
 1431 
 1432         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1433 retrylookup:
 1434         if ((m = vm_page_lookup(object, pindex)) != NULL) {
 1435                 vm_page_lock_queues();
 1436                 if (m->busy || (m->flags & PG_BUSY)) {
 1437                         generation = object->generation;
 1438 
 1439                         s = splvm();
 1440                         while ((object->generation == generation) &&
 1441                                         (m->busy || (m->flags & PG_BUSY))) {
 1442                                 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
 1443                                 VM_OBJECT_UNLOCK(object);
 1444                                 msleep(m, &vm_page_queue_mtx, PDROP | PVM, "pgrbwt", 0);
 1445                                 VM_OBJECT_LOCK(object);
 1446                                 if ((allocflags & VM_ALLOC_RETRY) == 0) {
 1447                                         splx(s);
 1448                                         return NULL;
 1449                                 }
 1450                                 vm_page_lock_queues();
 1451                         }
 1452                         vm_page_unlock_queues();
 1453                         splx(s);
 1454                         goto retrylookup;
 1455                 } else {
 1456                         if (allocflags & VM_ALLOC_WIRED)
 1457                                 vm_page_wire(m);
 1458                         vm_page_busy(m);
 1459                         vm_page_unlock_queues();
 1460                         return m;
 1461                 }
 1462         }
 1463 
 1464         m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
 1465         if (m == NULL) {
 1466                 VM_OBJECT_UNLOCK(object);
 1467                 VM_WAIT;
 1468                 VM_OBJECT_LOCK(object);
 1469                 if ((allocflags & VM_ALLOC_RETRY) == 0)
 1470                         return NULL;
 1471                 goto retrylookup;
 1472         }
 1473 
 1474         return m;
 1475 }
 1476 
 1477 /*
 1478  * Mapping function for valid bits or for dirty bits in
 1479  * a page.  May not block.
 1480  *
 1481  * Inputs are required to range within a page.
 1482  */
 1483 __inline int
 1484 vm_page_bits(int base, int size)
 1485 {
 1486         int first_bit;
 1487         int last_bit;
 1488 
 1489         KASSERT(
 1490             base + size <= PAGE_SIZE,
 1491             ("vm_page_bits: illegal base/size %d/%d", base, size)
 1492         );
 1493 
 1494         if (size == 0)          /* handle degenerate case */
 1495                 return (0);
 1496 
 1497         first_bit = base >> DEV_BSHIFT;
 1498         last_bit = (base + size - 1) >> DEV_BSHIFT;
 1499 
 1500         return ((2 << last_bit) - (1 << first_bit));
 1501 }
 1502 
 1503 /*
 1504  *      vm_page_set_validclean:
 1505  *
 1506  *      Sets portions of a page valid and clean.  The arguments are expected
 1507  *      to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
 1508  *      of any partial chunks touched by the range.  The invalid portion of
 1509  *      such chunks will be zero'd.
 1510  *
 1511  *      This routine may not block.
 1512  *
 1513  *      (base + size) must be less then or equal to PAGE_SIZE.
 1514  */
 1515 void
 1516 vm_page_set_validclean(vm_page_t m, int base, int size)
 1517 {
 1518         int pagebits;
 1519         int frag;
 1520         int endoff;
 1521 
 1522         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1523         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1524         if (size == 0)  /* handle degenerate case */
 1525                 return;
 1526 
 1527         /*
 1528          * If the base is not DEV_BSIZE aligned and the valid
 1529          * bit is clear, we have to zero out a portion of the
 1530          * first block.
 1531          */
 1532         if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
 1533             (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
 1534                 pmap_zero_page_area(m, frag, base - frag);
 1535 
 1536         /*
 1537          * If the ending offset is not DEV_BSIZE aligned and the 
 1538          * valid bit is clear, we have to zero out a portion of
 1539          * the last block.
 1540          */
 1541         endoff = base + size;
 1542         if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
 1543             (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
 1544                 pmap_zero_page_area(m, endoff,
 1545                     DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
 1546 
 1547         /*
 1548          * Set valid, clear dirty bits.  If validating the entire
 1549          * page we can safely clear the pmap modify bit.  We also
 1550          * use this opportunity to clear the PG_NOSYNC flag.  If a process
 1551          * takes a write fault on a MAP_NOSYNC memory area the flag will
 1552          * be set again.
 1553          *
 1554          * We set valid bits inclusive of any overlap, but we can only
 1555          * clear dirty bits for DEV_BSIZE chunks that are fully within
 1556          * the range.
 1557          */
 1558         pagebits = vm_page_bits(base, size);
 1559         m->valid |= pagebits;
 1560 #if 0   /* NOT YET */
 1561         if ((frag = base & (DEV_BSIZE - 1)) != 0) {
 1562                 frag = DEV_BSIZE - frag;
 1563                 base += frag;
 1564                 size -= frag;
 1565                 if (size < 0)
 1566                         size = 0;
 1567         }
 1568         pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
 1569 #endif
 1570         m->dirty &= ~pagebits;
 1571         if (base == 0 && size == PAGE_SIZE) {
 1572                 pmap_clear_modify(m);
 1573                 vm_page_flag_clear(m, PG_NOSYNC);
 1574         }
 1575 }
 1576 
 1577 void
 1578 vm_page_clear_dirty(vm_page_t m, int base, int size)
 1579 {
 1580 
 1581         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1582         m->dirty &= ~vm_page_bits(base, size);
 1583 }
 1584 
 1585 /*
 1586  *      vm_page_set_invalid:
 1587  *
 1588  *      Invalidates DEV_BSIZE'd chunks within a page.  Both the
 1589  *      valid and dirty bits for the effected areas are cleared.
 1590  *
 1591  *      May not block.
 1592  */
 1593 void
 1594 vm_page_set_invalid(vm_page_t m, int base, int size)
 1595 {
 1596         int bits;
 1597 
 1598         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1599         bits = vm_page_bits(base, size);
 1600         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1601         m->valid &= ~bits;
 1602         m->dirty &= ~bits;
 1603         m->object->generation++;
 1604 }
 1605 
 1606 /*
 1607  * vm_page_zero_invalid()
 1608  *
 1609  *      The kernel assumes that the invalid portions of a page contain 
 1610  *      garbage, but such pages can be mapped into memory by user code.
 1611  *      When this occurs, we must zero out the non-valid portions of the
 1612  *      page so user code sees what it expects.
 1613  *
 1614  *      Pages are most often semi-valid when the end of a file is mapped 
 1615  *      into memory and the file's size is not page aligned.
 1616  */
 1617 void
 1618 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
 1619 {
 1620         int b;
 1621         int i;
 1622 
 1623         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1624         /*
 1625          * Scan the valid bits looking for invalid sections that
 1626          * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
 1627          * valid bit may be set ) have already been zerod by
 1628          * vm_page_set_validclean().
 1629          */
 1630         for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
 1631                 if (i == (PAGE_SIZE / DEV_BSIZE) || 
 1632                     (m->valid & (1 << i))
 1633                 ) {
 1634                         if (i > b) {
 1635                                 pmap_zero_page_area(m, 
 1636                                     b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
 1637                         }
 1638                         b = i + 1;
 1639                 }
 1640         }
 1641 
 1642         /*
 1643          * setvalid is TRUE when we can safely set the zero'd areas
 1644          * as being valid.  We can do this if there are no cache consistancy
 1645          * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
 1646          */
 1647         if (setvalid)
 1648                 m->valid = VM_PAGE_BITS_ALL;
 1649 }
 1650 
 1651 /*
 1652  *      vm_page_is_valid:
 1653  *
 1654  *      Is (partial) page valid?  Note that the case where size == 0
 1655  *      will return FALSE in the degenerate case where the page is
 1656  *      entirely invalid, and TRUE otherwise.
 1657  *
 1658  *      May not block.
 1659  */
 1660 int
 1661 vm_page_is_valid(vm_page_t m, int base, int size)
 1662 {
 1663         int bits = vm_page_bits(base, size);
 1664 
 1665         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1666         if (m->valid && ((m->valid & bits) == bits))
 1667                 return 1;
 1668         else
 1669                 return 0;
 1670 }
 1671 
 1672 /*
 1673  * update dirty bits from pmap/mmu.  May not block.
 1674  */
 1675 void
 1676 vm_page_test_dirty(vm_page_t m)
 1677 {
 1678         if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
 1679                 vm_page_dirty(m);
 1680         }
 1681 }
 1682 
 1683 int so_zerocp_fullpage = 0;
 1684 
 1685 void
 1686 vm_page_cowfault(vm_page_t m)
 1687 {
 1688         vm_page_t mnew;
 1689         vm_object_t object;
 1690         vm_pindex_t pindex;
 1691 
 1692         object = m->object;
 1693         pindex = m->pindex;
 1694         vm_page_busy(m);
 1695 
 1696  retry_alloc:
 1697         vm_page_remove(m);
 1698         /*
 1699          * An interrupt allocation is requested because the page
 1700          * queues lock is held. 
 1701          */
 1702         mnew = vm_page_alloc(object, pindex, VM_ALLOC_INTERRUPT);
 1703         if (mnew == NULL) {
 1704                 vm_page_insert(m, object, pindex);
 1705                 vm_page_unlock_queues();
 1706                 VM_OBJECT_UNLOCK(object);
 1707                 VM_WAIT;
 1708                 VM_OBJECT_LOCK(object);
 1709                 vm_page_lock_queues();
 1710                 goto retry_alloc;
 1711         }
 1712 
 1713         if (m->cow == 0) {
 1714                 /* 
 1715                  * check to see if we raced with an xmit complete when 
 1716                  * waiting to allocate a page.  If so, put things back 
 1717                  * the way they were 
 1718                  */
 1719                 vm_page_busy(mnew);
 1720                 vm_page_free(mnew);
 1721                 vm_page_insert(m, object, pindex);
 1722         } else { /* clear COW & copy page */
 1723                 if (!so_zerocp_fullpage)
 1724                         pmap_copy_page(m, mnew);
 1725                 mnew->valid = VM_PAGE_BITS_ALL;
 1726                 vm_page_dirty(mnew);
 1727                 vm_page_flag_clear(mnew, PG_BUSY);
 1728         }
 1729 }
 1730 
 1731 void 
 1732 vm_page_cowclear(vm_page_t m)
 1733 {
 1734 
 1735         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1736         if (m->cow) {
 1737                 m->cow--;
 1738                 /* 
 1739                  * let vm_fault add back write permission  lazily
 1740                  */
 1741         } 
 1742         /*
 1743          *  sf_buf_free() will free the page, so we needn't do it here
 1744          */ 
 1745 }
 1746 
 1747 void
 1748 vm_page_cowsetup(vm_page_t m)
 1749 {
 1750 
 1751         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1752         m->cow++;
 1753         pmap_page_protect(m, VM_PROT_READ);
 1754 }
 1755 
 1756 #include "opt_ddb.h"
 1757 #ifdef DDB
 1758 #include <sys/kernel.h>
 1759 
 1760 #include <ddb/ddb.h>
 1761 
 1762 DB_SHOW_COMMAND(page, vm_page_print_page_info)
 1763 {
 1764         db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
 1765         db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
 1766         db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
 1767         db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
 1768         db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
 1769         db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
 1770         db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
 1771         db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
 1772         db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
 1773         db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
 1774 }
 1775 
 1776 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
 1777 {
 1778         int i;
 1779         db_printf("PQ_FREE:");
 1780         for (i = 0; i < PQ_L2_SIZE; i++) {
 1781                 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
 1782         }
 1783         db_printf("\n");
 1784                 
 1785         db_printf("PQ_CACHE:");
 1786         for (i = 0; i < PQ_L2_SIZE; i++) {
 1787                 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
 1788         }
 1789         db_printf("\n");
 1790 
 1791         db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
 1792                 vm_page_queues[PQ_ACTIVE].lcnt,
 1793                 vm_page_queues[PQ_INACTIVE].lcnt);
 1794 }
 1795 #endif /* DDB */

Cache object: 6ba888cc45b16d7a7b197626487ffb5b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.