The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pageout.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to Berkeley by
   10  * The Mach Operating System project at Carnegie-Mellon University.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by the University of
   23  *      California, Berkeley and its contributors.
   24  * 4. Neither the name of the University nor the names of its contributors
   25  *    may be used to endorse or promote products derived from this software
   26  *    without specific prior written permission.
   27  *
   28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   38  * SUCH DAMAGE.
   39  *
   40  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
   41  *
   42  *
   43  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   44  * All rights reserved.
   45  *
   46  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   47  *
   48  * Permission to use, copy, modify and distribute this software and
   49  * its documentation is hereby granted, provided that both the copyright
   50  * notice and this permission notice appear in all copies of the
   51  * software, derivative works or modified versions, and any portions
   52  * thereof, and that both notices appear in supporting documentation.
   53  *
   54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   57  *
   58  * Carnegie Mellon requests users of this software to return to
   59  *
   60  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   61  *  School of Computer Science
   62  *  Carnegie Mellon University
   63  *  Pittsburgh PA 15213-3890
   64  *
   65  * any improvements or extensions that they make and grant Carnegie the
   66  * rights to redistribute these changes.
   67  */
   68 
   69 /*
   70  *      The proverbial page-out daemon.
   71  */
   72 
   73 #include <sys/cdefs.h>
   74 __FBSDID("$FreeBSD: releng/5.2/sys/vm/vm_pageout.c 121455 2003-10-24 06:43:04Z alc $");
   75 
   76 #include "opt_vm.h"
   77 #include <sys/param.h>
   78 #include <sys/systm.h>
   79 #include <sys/kernel.h>
   80 #include <sys/eventhandler.h>
   81 #include <sys/lock.h>
   82 #include <sys/mutex.h>
   83 #include <sys/proc.h>
   84 #include <sys/kthread.h>
   85 #include <sys/ktr.h>
   86 #include <sys/resourcevar.h>
   87 #include <sys/sched.h>
   88 #include <sys/signalvar.h>
   89 #include <sys/vnode.h>
   90 #include <sys/vmmeter.h>
   91 #include <sys/sx.h>
   92 #include <sys/sysctl.h>
   93 
   94 #include <vm/vm.h>
   95 #include <vm/vm_param.h>
   96 #include <vm/vm_object.h>
   97 #include <vm/vm_page.h>
   98 #include <vm/vm_map.h>
   99 #include <vm/vm_pageout.h>
  100 #include <vm/vm_pager.h>
  101 #include <vm/swap_pager.h>
  102 #include <vm/vm_extern.h>
  103 #include <vm/uma.h>
  104 
  105 #include <machine/mutex.h>
  106 
  107 /*
  108  * System initialization
  109  */
  110 
  111 /* the kernel process "vm_pageout"*/
  112 static void vm_pageout(void);
  113 static int vm_pageout_clean(vm_page_t);
  114 static void vm_pageout_pmap_collect(void);
  115 static void vm_pageout_scan(int pass);
  116 
  117 struct proc *pageproc;
  118 
  119 static struct kproc_desc page_kp = {
  120         "pagedaemon",
  121         vm_pageout,
  122         &pageproc
  123 };
  124 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp)
  125 
  126 #if !defined(NO_SWAPPING)
  127 /* the kernel process "vm_daemon"*/
  128 static void vm_daemon(void);
  129 static struct   proc *vmproc;
  130 
  131 static struct kproc_desc vm_kp = {
  132         "vmdaemon",
  133         vm_daemon,
  134         &vmproc
  135 };
  136 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
  137 #endif
  138 
  139 
  140 int vm_pages_needed;            /* Event on which pageout daemon sleeps */
  141 int vm_pageout_deficit;         /* Estimated number of pages deficit */
  142 int vm_pageout_pages_needed;    /* flag saying that the pageout daemon needs pages */
  143 
  144 #if !defined(NO_SWAPPING)
  145 static int vm_pageout_req_swapout;      /* XXX */
  146 static int vm_daemon_needed;
  147 #endif
  148 static int vm_max_launder = 32;
  149 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
  150 static int vm_pageout_full_stats_interval = 0;
  151 static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
  152 static int defer_swap_pageouts=0;
  153 static int disable_swap_pageouts=0;
  154 
  155 #if defined(NO_SWAPPING)
  156 static int vm_swap_enabled=0;
  157 static int vm_swap_idle_enabled=0;
  158 #else
  159 static int vm_swap_enabled=1;
  160 static int vm_swap_idle_enabled=0;
  161 #endif
  162 
  163 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
  164         CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
  165 
  166 SYSCTL_INT(_vm, OID_AUTO, max_launder,
  167         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
  168 
  169 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
  170         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
  171 
  172 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
  173         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
  174 
  175 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
  176         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
  177 
  178 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max,
  179         CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented");
  180 
  181 #if defined(NO_SWAPPING)
  182 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
  183         CTLFLAG_RD, &vm_swap_enabled, 0, "");
  184 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
  185         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "");
  186 #else
  187 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
  188         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
  189 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
  190         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
  191 #endif
  192 
  193 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
  194         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
  195 
  196 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
  197         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
  198 
  199 static int pageout_lock_miss;
  200 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
  201         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
  202 
  203 #define VM_PAGEOUT_PAGE_COUNT 16
  204 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
  205 
  206 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
  207 
  208 #if !defined(NO_SWAPPING)
  209 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
  210 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
  211 static void vm_req_vmdaemon(void);
  212 #endif
  213 static void vm_pageout_page_stats(void);
  214 
  215 /*
  216  * vm_pageout_clean:
  217  *
  218  * Clean the page and remove it from the laundry.
  219  * 
  220  * We set the busy bit to cause potential page faults on this page to
  221  * block.  Note the careful timing, however, the busy bit isn't set till
  222  * late and we cannot do anything that will mess with the page.
  223  */
  224 static int
  225 vm_pageout_clean(m)
  226         vm_page_t m;
  227 {
  228         vm_object_t object;
  229         vm_page_t mc[2*vm_pageout_page_count];
  230         int pageout_count;
  231         int ib, is, page_base;
  232         vm_pindex_t pindex = m->pindex;
  233 
  234         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  235         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  236 
  237         /*
  238          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
  239          * with the new swapper, but we could have serious problems paging
  240          * out other object types if there is insufficient memory.  
  241          *
  242          * Unfortunately, checking free memory here is far too late, so the
  243          * check has been moved up a procedural level.
  244          */
  245 
  246         /*
  247          * Don't mess with the page if it's busy, held, or special
  248          */
  249         if ((m->hold_count != 0) ||
  250             ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) {
  251                 return 0;
  252         }
  253 
  254         mc[vm_pageout_page_count] = m;
  255         pageout_count = 1;
  256         page_base = vm_pageout_page_count;
  257         ib = 1;
  258         is = 1;
  259 
  260         /*
  261          * Scan object for clusterable pages.
  262          *
  263          * We can cluster ONLY if: ->> the page is NOT
  264          * clean, wired, busy, held, or mapped into a
  265          * buffer, and one of the following:
  266          * 1) The page is inactive, or a seldom used
  267          *    active page.
  268          * -or-
  269          * 2) we force the issue.
  270          *
  271          * During heavy mmap/modification loads the pageout
  272          * daemon can really fragment the underlying file
  273          * due to flushing pages out of order and not trying
  274          * align the clusters (which leave sporatic out-of-order
  275          * holes).  To solve this problem we do the reverse scan
  276          * first and attempt to align our cluster, then do a 
  277          * forward scan if room remains.
  278          */
  279         object = m->object;
  280 more:
  281         while (ib && pageout_count < vm_pageout_page_count) {
  282                 vm_page_t p;
  283 
  284                 if (ib > pindex) {
  285                         ib = 0;
  286                         break;
  287                 }
  288 
  289                 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
  290                         ib = 0;
  291                         break;
  292                 }
  293                 if (((p->queue - p->pc) == PQ_CACHE) ||
  294                     (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
  295                         ib = 0;
  296                         break;
  297                 }
  298                 vm_page_test_dirty(p);
  299                 if ((p->dirty & p->valid) == 0 ||
  300                     p->queue != PQ_INACTIVE ||
  301                     p->wire_count != 0 ||       /* may be held by buf cache */
  302                     p->hold_count != 0) {       /* may be undergoing I/O */
  303                         ib = 0;
  304                         break;
  305                 }
  306                 mc[--page_base] = p;
  307                 ++pageout_count;
  308                 ++ib;
  309                 /*
  310                  * alignment boundry, stop here and switch directions.  Do
  311                  * not clear ib.
  312                  */
  313                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
  314                         break;
  315         }
  316 
  317         while (pageout_count < vm_pageout_page_count && 
  318             pindex + is < object->size) {
  319                 vm_page_t p;
  320 
  321                 if ((p = vm_page_lookup(object, pindex + is)) == NULL)
  322                         break;
  323                 if (((p->queue - p->pc) == PQ_CACHE) ||
  324                     (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) {
  325                         break;
  326                 }
  327                 vm_page_test_dirty(p);
  328                 if ((p->dirty & p->valid) == 0 ||
  329                     p->queue != PQ_INACTIVE ||
  330                     p->wire_count != 0 ||       /* may be held by buf cache */
  331                     p->hold_count != 0) {       /* may be undergoing I/O */
  332                         break;
  333                 }
  334                 mc[page_base + pageout_count] = p;
  335                 ++pageout_count;
  336                 ++is;
  337         }
  338 
  339         /*
  340          * If we exhausted our forward scan, continue with the reverse scan
  341          * when possible, even past a page boundry.  This catches boundry
  342          * conditions.
  343          */
  344         if (ib && pageout_count < vm_pageout_page_count)
  345                 goto more;
  346 
  347         /*
  348          * we allow reads during pageouts...
  349          */
  350         return (vm_pageout_flush(&mc[page_base], pageout_count, 0));
  351 }
  352 
  353 /*
  354  * vm_pageout_flush() - launder the given pages
  355  *
  356  *      The given pages are laundered.  Note that we setup for the start of
  357  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
  358  *      reference count all in here rather then in the parent.  If we want
  359  *      the parent to do more sophisticated things we may have to change
  360  *      the ordering.
  361  */
  362 int
  363 vm_pageout_flush(vm_page_t *mc, int count, int flags)
  364 {
  365         vm_object_t object = mc[0]->object;
  366         int pageout_status[count];
  367         int numpagedout = 0;
  368         int i;
  369 
  370         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  371         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  372         /*
  373          * Initiate I/O.  Bump the vm_page_t->busy counter and
  374          * mark the pages read-only.
  375          *
  376          * We do not have to fixup the clean/dirty bits here... we can
  377          * allow the pager to do it after the I/O completes.
  378          *
  379          * NOTE! mc[i]->dirty may be partial or fragmented due to an
  380          * edge case with file fragments.
  381          */
  382         for (i = 0; i < count; i++) {
  383                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
  384                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
  385                         mc[i], i, count));
  386                 vm_page_io_start(mc[i]);
  387                 pmap_page_protect(mc[i], VM_PROT_READ);
  388         }
  389         vm_page_unlock_queues();
  390         vm_object_pip_add(object, count);
  391 
  392         vm_pager_put_pages(object, mc, count,
  393             (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)),
  394             pageout_status);
  395 
  396         vm_page_lock_queues();
  397         for (i = 0; i < count; i++) {
  398                 vm_page_t mt = mc[i];
  399 
  400                 switch (pageout_status[i]) {
  401                 case VM_PAGER_OK:
  402                 case VM_PAGER_PEND:
  403                         numpagedout++;
  404                         break;
  405                 case VM_PAGER_BAD:
  406                         /*
  407                          * Page outside of range of object. Right now we
  408                          * essentially lose the changes by pretending it
  409                          * worked.
  410                          */
  411                         pmap_clear_modify(mt);
  412                         vm_page_undirty(mt);
  413                         break;
  414                 case VM_PAGER_ERROR:
  415                 case VM_PAGER_FAIL:
  416                         /*
  417                          * If page couldn't be paged out, then reactivate the
  418                          * page so it doesn't clog the inactive list.  (We
  419                          * will try paging out it again later).
  420                          */
  421                         vm_page_activate(mt);
  422                         break;
  423                 case VM_PAGER_AGAIN:
  424                         break;
  425                 }
  426 
  427                 /*
  428                  * If the operation is still going, leave the page busy to
  429                  * block all other accesses. Also, leave the paging in
  430                  * progress indicator set so that we don't attempt an object
  431                  * collapse.
  432                  */
  433                 if (pageout_status[i] != VM_PAGER_PEND) {
  434                         vm_object_pip_wakeup(object);
  435                         vm_page_io_finish(mt);
  436                         if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
  437                                 pmap_page_protect(mt, VM_PROT_READ);
  438                 }
  439         }
  440         return numpagedout;
  441 }
  442 
  443 #if !defined(NO_SWAPPING)
  444 /*
  445  *      vm_pageout_object_deactivate_pages
  446  *
  447  *      deactivate enough pages to satisfy the inactive target
  448  *      requirements or if vm_page_proc_limit is set, then
  449  *      deactivate all of the pages in the object and its
  450  *      backing_objects.
  451  *
  452  *      The object and map must be locked.
  453  */
  454 static void
  455 vm_pageout_object_deactivate_pages(pmap, first_object, desired)
  456         pmap_t pmap;
  457         vm_object_t first_object;
  458         long desired;
  459 {
  460         vm_object_t backing_object, object;
  461         vm_page_t p, next;
  462         int actcount, rcount, remove_mode;
  463 
  464         VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
  465         if (first_object->type == OBJT_DEVICE || first_object->type == OBJT_PHYS)
  466                 return;
  467         for (object = first_object;; object = backing_object) {
  468                 if (pmap_resident_count(pmap) <= desired)
  469                         goto unlock_return;
  470                 if (object->paging_in_progress)
  471                         goto unlock_return;
  472 
  473                 remove_mode = 0;
  474                 if (object->shadow_count > 1)
  475                         remove_mode = 1;
  476                 /*
  477                  * scan the objects entire memory queue
  478                  */
  479                 rcount = object->resident_page_count;
  480                 p = TAILQ_FIRST(&object->memq);
  481                 vm_page_lock_queues();
  482                 while (p && (rcount-- > 0)) {
  483                         if (pmap_resident_count(pmap) <= desired) {
  484                                 vm_page_unlock_queues();
  485                                 goto unlock_return;
  486                         }
  487                         next = TAILQ_NEXT(p, listq);
  488                         cnt.v_pdpages++;
  489                         if (p->wire_count != 0 ||
  490                             p->hold_count != 0 ||
  491                             p->busy != 0 ||
  492                             (p->flags & (PG_BUSY|PG_UNMANAGED)) ||
  493                             !pmap_page_exists_quick(pmap, p)) {
  494                                 p = next;
  495                                 continue;
  496                         }
  497                         actcount = pmap_ts_referenced(p);
  498                         if (actcount) {
  499                                 vm_page_flag_set(p, PG_REFERENCED);
  500                         } else if (p->flags & PG_REFERENCED) {
  501                                 actcount = 1;
  502                         }
  503                         if ((p->queue != PQ_ACTIVE) &&
  504                                 (p->flags & PG_REFERENCED)) {
  505                                 vm_page_activate(p);
  506                                 p->act_count += actcount;
  507                                 vm_page_flag_clear(p, PG_REFERENCED);
  508                         } else if (p->queue == PQ_ACTIVE) {
  509                                 if ((p->flags & PG_REFERENCED) == 0) {
  510                                         p->act_count -= min(p->act_count, ACT_DECLINE);
  511                                         if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
  512                                                 pmap_remove_all(p);
  513                                                 vm_page_deactivate(p);
  514                                         } else {
  515                                                 vm_pageq_requeue(p);
  516                                         }
  517                                 } else {
  518                                         vm_page_activate(p);
  519                                         vm_page_flag_clear(p, PG_REFERENCED);
  520                                         if (p->act_count < (ACT_MAX - ACT_ADVANCE))
  521                                                 p->act_count += ACT_ADVANCE;
  522                                         vm_pageq_requeue(p);
  523                                 }
  524                         } else if (p->queue == PQ_INACTIVE) {
  525                                 pmap_remove_all(p);
  526                         }
  527                         p = next;
  528                 }
  529                 vm_page_unlock_queues();
  530                 if ((backing_object = object->backing_object) == NULL)
  531                         goto unlock_return;
  532                 VM_OBJECT_LOCK(backing_object);
  533                 if (object != first_object)
  534                         VM_OBJECT_UNLOCK(object);
  535         }
  536 unlock_return:
  537         if (object != first_object)
  538                 VM_OBJECT_UNLOCK(object);
  539 }
  540 
  541 /*
  542  * deactivate some number of pages in a map, try to do it fairly, but
  543  * that is really hard to do.
  544  */
  545 static void
  546 vm_pageout_map_deactivate_pages(map, desired)
  547         vm_map_t map;
  548         long desired;
  549 {
  550         vm_map_entry_t tmpe;
  551         vm_object_t obj, bigobj;
  552         int nothingwired;
  553 
  554         if (!vm_map_trylock(map))
  555                 return;
  556 
  557         bigobj = NULL;
  558         nothingwired = TRUE;
  559 
  560         /*
  561          * first, search out the biggest object, and try to free pages from
  562          * that.
  563          */
  564         tmpe = map->header.next;
  565         while (tmpe != &map->header) {
  566                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
  567                         obj = tmpe->object.vm_object;
  568                         if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
  569                                 if (obj->shadow_count <= 1 &&
  570                                     (bigobj == NULL ||
  571                                      bigobj->resident_page_count < obj->resident_page_count)) {
  572                                         if (bigobj != NULL)
  573                                                 VM_OBJECT_UNLOCK(bigobj);
  574                                         bigobj = obj;
  575                                 } else
  576                                         VM_OBJECT_UNLOCK(obj);
  577                         }
  578                 }
  579                 if (tmpe->wired_count > 0)
  580                         nothingwired = FALSE;
  581                 tmpe = tmpe->next;
  582         }
  583 
  584         if (bigobj != NULL) {
  585                 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
  586                 VM_OBJECT_UNLOCK(bigobj);
  587         }
  588         /*
  589          * Next, hunt around for other pages to deactivate.  We actually
  590          * do this search sort of wrong -- .text first is not the best idea.
  591          */
  592         tmpe = map->header.next;
  593         while (tmpe != &map->header) {
  594                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
  595                         break;
  596                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
  597                         obj = tmpe->object.vm_object;
  598                         if (obj != NULL) {
  599                                 VM_OBJECT_LOCK(obj);
  600                                 vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
  601                                 VM_OBJECT_UNLOCK(obj);
  602                         }
  603                 }
  604                 tmpe = tmpe->next;
  605         }
  606 
  607         /*
  608          * Remove all mappings if a process is swapped out, this will free page
  609          * table pages.
  610          */
  611         if (desired == 0 && nothingwired) {
  612                 GIANT_REQUIRED;
  613                 vm_page_lock_queues();
  614                 pmap_remove(vm_map_pmap(map), vm_map_min(map),
  615                     vm_map_max(map));
  616                 vm_page_unlock_queues();
  617         }
  618         vm_map_unlock(map);
  619 }
  620 #endif          /* !defined(NO_SWAPPING) */
  621 
  622 /*
  623  * This routine is very drastic, but can save the system
  624  * in a pinch.
  625  */
  626 static void
  627 vm_pageout_pmap_collect(void)
  628 {
  629         int i;
  630         vm_page_t m;
  631         static int warningdone;
  632 
  633         if (pmap_pagedaemon_waken == 0)
  634                 return;
  635         if (warningdone < 5) {
  636                 printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
  637                 warningdone++;
  638         }
  639         vm_page_lock_queues();
  640         for (i = 0; i < vm_page_array_size; i++) {
  641                 m = &vm_page_array[i];
  642                 if (m->wire_count || m->hold_count || m->busy ||
  643                     (m->flags & (PG_BUSY | PG_UNMANAGED)))
  644                         continue;
  645                 pmap_remove_all(m);
  646         }
  647         vm_page_unlock_queues();
  648         pmap_pagedaemon_waken = 0;
  649 }
  650         
  651 /*
  652  *      vm_pageout_scan does the dirty work for the pageout daemon.
  653  */
  654 static void
  655 vm_pageout_scan(int pass)
  656 {
  657         vm_page_t m, next;
  658         struct vm_page marker;
  659         int page_shortage, maxscan, pcount;
  660         int addl_page_shortage, addl_page_shortage_init;
  661         struct proc *p, *bigproc;
  662         vm_offset_t size, bigsize;
  663         vm_object_t object;
  664         int actcount;
  665         int vnodes_skipped = 0;
  666         int maxlaunder;
  667         int s;
  668         struct thread *td;
  669 
  670         mtx_lock(&Giant);
  671         /*
  672          * Decrease registered cache sizes.
  673          */
  674         EVENTHANDLER_INVOKE(vm_lowmem, 0);
  675         /*
  676          * We do this explicitly after the caches have been drained above.
  677          */
  678         uma_reclaim();
  679         /*
  680          * Do whatever cleanup that the pmap code can.
  681          */
  682         vm_pageout_pmap_collect();
  683 
  684         addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit);
  685 
  686         /*
  687          * Calculate the number of pages we want to either free or move
  688          * to the cache.
  689          */
  690         page_shortage = vm_paging_target() + addl_page_shortage_init;
  691 
  692         /*
  693          * Initialize our marker
  694          */
  695         bzero(&marker, sizeof(marker));
  696         marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
  697         marker.queue = PQ_INACTIVE;
  698         marker.wire_count = 1;
  699 
  700         /*
  701          * Start scanning the inactive queue for pages we can move to the
  702          * cache or free.  The scan will stop when the target is reached or
  703          * we have scanned the entire inactive queue.  Note that m->act_count
  704          * is not used to form decisions for the inactive queue, only for the
  705          * active queue.
  706          *
  707          * maxlaunder limits the number of dirty pages we flush per scan.
  708          * For most systems a smaller value (16 or 32) is more robust under
  709          * extreme memory and disk pressure because any unnecessary writes
  710          * to disk can result in extreme performance degredation.  However,
  711          * systems with excessive dirty pages (especially when MAP_NOSYNC is
  712          * used) will die horribly with limited laundering.  If the pageout
  713          * daemon cannot clean enough pages in the first pass, we let it go
  714          * all out in succeeding passes.
  715          */
  716         if ((maxlaunder = vm_max_launder) <= 1)
  717                 maxlaunder = 1;
  718         if (pass)
  719                 maxlaunder = 10000;
  720         vm_page_lock_queues();
  721 rescan0:
  722         addl_page_shortage = addl_page_shortage_init;
  723         maxscan = cnt.v_inactive_count;
  724 
  725         for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
  726              m != NULL && maxscan-- > 0 && page_shortage > 0;
  727              m = next) {
  728 
  729                 cnt.v_pdpages++;
  730 
  731                 if (m->queue != PQ_INACTIVE) {
  732                         goto rescan0;
  733                 }
  734 
  735                 next = TAILQ_NEXT(m, pageq);
  736 
  737                 /*
  738                  * skip marker pages
  739                  */
  740                 if (m->flags & PG_MARKER)
  741                         continue;
  742 
  743                 /*
  744                  * A held page may be undergoing I/O, so skip it.
  745                  */
  746                 if (m->hold_count) {
  747                         vm_pageq_requeue(m);
  748                         addl_page_shortage++;
  749                         continue;
  750                 }
  751                 /*
  752                  * Don't mess with busy pages, keep in the front of the
  753                  * queue, most likely are being paged out.
  754                  */
  755                 if (m->busy || (m->flags & PG_BUSY)) {
  756                         addl_page_shortage++;
  757                         continue;
  758                 }
  759 
  760                 /*
  761                  * If the object is not being used, we ignore previous 
  762                  * references.
  763                  */
  764                 if (m->object->ref_count == 0) {
  765                         vm_page_flag_clear(m, PG_REFERENCED);
  766                         pmap_clear_reference(m);
  767 
  768                 /*
  769                  * Otherwise, if the page has been referenced while in the 
  770                  * inactive queue, we bump the "activation count" upwards, 
  771                  * making it less likely that the page will be added back to 
  772                  * the inactive queue prematurely again.  Here we check the 
  773                  * page tables (or emulated bits, if any), given the upper 
  774                  * level VM system not knowing anything about existing 
  775                  * references.
  776                  */
  777                 } else if (((m->flags & PG_REFERENCED) == 0) &&
  778                         (actcount = pmap_ts_referenced(m))) {
  779                         vm_page_activate(m);
  780                         m->act_count += (actcount + ACT_ADVANCE);
  781                         continue;
  782                 }
  783 
  784                 /*
  785                  * If the upper level VM system knows about any page 
  786                  * references, we activate the page.  We also set the 
  787                  * "activation count" higher than normal so that we will less 
  788                  * likely place pages back onto the inactive queue again.
  789                  */
  790                 if ((m->flags & PG_REFERENCED) != 0) {
  791                         vm_page_flag_clear(m, PG_REFERENCED);
  792                         actcount = pmap_ts_referenced(m);
  793                         vm_page_activate(m);
  794                         m->act_count += (actcount + ACT_ADVANCE + 1);
  795                         continue;
  796                 }
  797 
  798                 /*
  799                  * If the upper level VM system doesn't know anything about 
  800                  * the page being dirty, we have to check for it again.  As 
  801                  * far as the VM code knows, any partially dirty pages are 
  802                  * fully dirty.
  803                  */
  804                 if (m->dirty == 0) {
  805                         vm_page_test_dirty(m);
  806                 } else {
  807                         vm_page_dirty(m);
  808                 }
  809                 object = m->object;
  810                 if (!VM_OBJECT_TRYLOCK(object))
  811                         continue;
  812                 if (m->valid == 0) {
  813                         /*
  814                          * Invalid pages can be easily freed
  815                          */
  816                         vm_page_busy(m);
  817                         pmap_remove_all(m);
  818                         vm_page_free(m);
  819                         cnt.v_dfree++;
  820                         --page_shortage;
  821                 } else if (m->dirty == 0) {
  822                         /*
  823                          * Clean pages can be placed onto the cache queue.
  824                          * This effectively frees them.
  825                          */
  826                         vm_page_cache(m);
  827                         --page_shortage;
  828                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
  829                         /*
  830                          * Dirty pages need to be paged out, but flushing
  831                          * a page is extremely expensive verses freeing
  832                          * a clean page.  Rather then artificially limiting
  833                          * the number of pages we can flush, we instead give
  834                          * dirty pages extra priority on the inactive queue
  835                          * by forcing them to be cycled through the queue
  836                          * twice before being flushed, after which the
  837                          * (now clean) page will cycle through once more
  838                          * before being freed.  This significantly extends
  839                          * the thrash point for a heavily loaded machine.
  840                          */
  841                         vm_page_flag_set(m, PG_WINATCFLS);
  842                         vm_pageq_requeue(m);
  843                 } else if (maxlaunder > 0) {
  844                         /*
  845                          * We always want to try to flush some dirty pages if
  846                          * we encounter them, to keep the system stable.
  847                          * Normally this number is small, but under extreme
  848                          * pressure where there are insufficient clean pages
  849                          * on the inactive queue, we may have to go all out.
  850                          */
  851                         int swap_pageouts_ok;
  852                         struct vnode *vp = NULL;
  853                         struct mount *mp;
  854 
  855                         if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
  856                                 swap_pageouts_ok = 1;
  857                         } else {
  858                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
  859                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
  860                                 vm_page_count_min());
  861                                                                                 
  862                         }
  863 
  864                         /*
  865                          * We don't bother paging objects that are "dead".  
  866                          * Those objects are in a "rundown" state.
  867                          */
  868                         if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
  869                                 VM_OBJECT_UNLOCK(object);
  870                                 vm_pageq_requeue(m);
  871                                 continue;
  872                         }
  873 
  874                         /*
  875                          * The object is already known NOT to be dead.   It
  876                          * is possible for the vget() to block the whole
  877                          * pageout daemon, but the new low-memory handling
  878                          * code should prevent it.
  879                          *
  880                          * The previous code skipped locked vnodes and, worse,
  881                          * reordered pages in the queue.  This results in
  882                          * completely non-deterministic operation and, on a
  883                          * busy system, can lead to extremely non-optimal
  884                          * pageouts.  For example, it can cause clean pages
  885                          * to be freed and dirty pages to be moved to the end
  886                          * of the queue.  Since dirty pages are also moved to
  887                          * the end of the queue once-cleaned, this gives
  888                          * way too large a weighting to defering the freeing
  889                          * of dirty pages.
  890                          *
  891                          * We can't wait forever for the vnode lock, we might
  892                          * deadlock due to a vn_read() getting stuck in
  893                          * vm_wait while holding this vnode.  We skip the 
  894                          * vnode if we can't get it in a reasonable amount
  895                          * of time.
  896                          */
  897                         if (object->type == OBJT_VNODE) {
  898                                 vp = object->handle;
  899                                 mp = NULL;
  900                                 if (vp->v_type == VREG)
  901                                         vn_start_write(vp, &mp, V_NOWAIT);
  902                                 vm_page_unlock_queues();
  903                                 VI_LOCK(vp);
  904                                 VM_OBJECT_UNLOCK(object);
  905                                 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK |
  906                                     LK_TIMELOCK, curthread)) {
  907                                         VM_OBJECT_LOCK(object);
  908                                         vm_page_lock_queues();
  909                                         ++pageout_lock_miss;
  910                                         vn_finished_write(mp);
  911                                         if (object->flags & OBJ_MIGHTBEDIRTY)
  912                                                 vnodes_skipped++;
  913                                         VM_OBJECT_UNLOCK(object);
  914                                         continue;
  915                                 }
  916                                 VM_OBJECT_LOCK(object);
  917                                 vm_page_lock_queues();
  918                                 /*
  919                                  * The page might have been moved to another
  920                                  * queue during potential blocking in vget()
  921                                  * above.  The page might have been freed and
  922                                  * reused for another vnode.  The object might
  923                                  * have been reused for another vnode.
  924                                  */
  925                                 if (m->queue != PQ_INACTIVE ||
  926                                     m->object != object ||
  927                                     object->handle != vp) {
  928                                         if (object->flags & OBJ_MIGHTBEDIRTY)
  929                                                 vnodes_skipped++;
  930                                         goto unlock_and_continue;
  931                                 }
  932         
  933                                 /*
  934                                  * The page may have been busied during the
  935                                  * blocking in vput();  We don't move the
  936                                  * page back onto the end of the queue so that
  937                                  * statistics are more correct if we don't.
  938                                  */
  939                                 if (m->busy || (m->flags & PG_BUSY)) {
  940                                         goto unlock_and_continue;
  941                                 }
  942 
  943                                 /*
  944                                  * If the page has become held it might
  945                                  * be undergoing I/O, so skip it
  946                                  */
  947                                 if (m->hold_count) {
  948                                         vm_pageq_requeue(m);
  949                                         if (object->flags & OBJ_MIGHTBEDIRTY)
  950                                                 vnodes_skipped++;
  951                                         goto unlock_and_continue;
  952                                 }
  953                         }
  954 
  955                         /*
  956                          * If a page is dirty, then it is either being washed
  957                          * (but not yet cleaned) or it is still in the
  958                          * laundry.  If it is still in the laundry, then we
  959                          * start the cleaning operation. 
  960                          *
  961                          * This operation may cluster, invalidating the 'next'
  962                          * pointer.  To prevent an inordinate number of
  963                          * restarts we use our marker to remember our place.
  964                          *
  965                          * decrement page_shortage on success to account for
  966                          * the (future) cleaned page.  Otherwise we could wind
  967                          * up laundering or cleaning too many pages.
  968                          */
  969                         s = splvm();
  970                         TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
  971                         splx(s);
  972                         if (vm_pageout_clean(m) != 0) {
  973                                 --page_shortage;
  974                                 --maxlaunder;
  975                         }
  976                         s = splvm();
  977                         next = TAILQ_NEXT(&marker, pageq);
  978                         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
  979                         splx(s);
  980 unlock_and_continue:
  981                         VM_OBJECT_UNLOCK(object);
  982                         if (vp) {
  983                                 vm_page_unlock_queues();
  984                                 vput(vp);
  985                                 vn_finished_write(mp);
  986                                 vm_page_lock_queues();
  987                         }
  988                         continue;
  989                 }
  990                 VM_OBJECT_UNLOCK(object);
  991         }
  992 
  993         /*
  994          * Compute the number of pages we want to try to move from the
  995          * active queue to the inactive queue.
  996          */
  997         page_shortage = vm_paging_target() +
  998                 cnt.v_inactive_target - cnt.v_inactive_count;
  999         page_shortage += addl_page_shortage;
 1000 
 1001         /*
 1002          * Scan the active queue for things we can deactivate. We nominally
 1003          * track the per-page activity counter and use it to locate
 1004          * deactivation candidates.
 1005          */
 1006         pcount = cnt.v_active_count;
 1007         m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
 1008 
 1009         while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
 1010 
 1011                 KASSERT(m->queue == PQ_ACTIVE,
 1012                     ("vm_pageout_scan: page %p isn't active", m));
 1013 
 1014                 next = TAILQ_NEXT(m, pageq);
 1015                 /*
 1016                  * Don't deactivate pages that are busy.
 1017                  */
 1018                 if ((m->busy != 0) ||
 1019                     (m->flags & PG_BUSY) ||
 1020                     (m->hold_count != 0)) {
 1021                         vm_pageq_requeue(m);
 1022                         m = next;
 1023                         continue;
 1024                 }
 1025 
 1026                 /*
 1027                  * The count for pagedaemon pages is done after checking the
 1028                  * page for eligibility...
 1029                  */
 1030                 cnt.v_pdpages++;
 1031 
 1032                 /*
 1033                  * Check to see "how much" the page has been used.
 1034                  */
 1035                 actcount = 0;
 1036                 if (m->object->ref_count != 0) {
 1037                         if (m->flags & PG_REFERENCED) {
 1038                                 actcount += 1;
 1039                         }
 1040                         actcount += pmap_ts_referenced(m);
 1041                         if (actcount) {
 1042                                 m->act_count += ACT_ADVANCE + actcount;
 1043                                 if (m->act_count > ACT_MAX)
 1044                                         m->act_count = ACT_MAX;
 1045                         }
 1046                 }
 1047 
 1048                 /*
 1049                  * Since we have "tested" this bit, we need to clear it now.
 1050                  */
 1051                 vm_page_flag_clear(m, PG_REFERENCED);
 1052 
 1053                 /*
 1054                  * Only if an object is currently being used, do we use the
 1055                  * page activation count stats.
 1056                  */
 1057                 if (actcount && (m->object->ref_count != 0)) {
 1058                         vm_pageq_requeue(m);
 1059                 } else {
 1060                         m->act_count -= min(m->act_count, ACT_DECLINE);
 1061                         if (vm_pageout_algorithm ||
 1062                             m->object->ref_count == 0 ||
 1063                             m->act_count == 0) {
 1064                                 page_shortage--;
 1065                                 if (m->object->ref_count == 0) {
 1066                                         pmap_remove_all(m);
 1067                                         if (m->dirty == 0)
 1068                                                 vm_page_cache(m);
 1069                                         else
 1070                                                 vm_page_deactivate(m);
 1071                                 } else {
 1072                                         vm_page_deactivate(m);
 1073                                 }
 1074                         } else {
 1075                                 vm_pageq_requeue(m);
 1076                         }
 1077                 }
 1078                 m = next;
 1079         }
 1080         s = splvm();
 1081 
 1082         /*
 1083          * We try to maintain some *really* free pages, this allows interrupt
 1084          * code to be guaranteed space.  Since both cache and free queues 
 1085          * are considered basically 'free', moving pages from cache to free
 1086          * does not effect other calculations.
 1087          */
 1088         while (cnt.v_free_count < cnt.v_free_reserved) {
 1089                 static int cache_rover = 0;
 1090 
 1091                 if ((m = vm_page_select_cache(cache_rover)) == NULL)
 1092                         break;
 1093                 cache_rover = (m->pc + PQ_PRIME2) & PQ_L2_MASK;
 1094                 object = m->object;
 1095                 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1096                 vm_page_busy(m);
 1097                 pmap_remove_all(m);
 1098                 vm_page_free(m);
 1099                 VM_OBJECT_UNLOCK(object);
 1100                 cnt.v_dfree++;
 1101         }
 1102         splx(s);
 1103         vm_page_unlock_queues();
 1104 #if !defined(NO_SWAPPING)
 1105         /*
 1106          * Idle process swapout -- run once per second.
 1107          */
 1108         if (vm_swap_idle_enabled) {
 1109                 static long lsec;
 1110                 if (time_second != lsec) {
 1111                         vm_pageout_req_swapout |= VM_SWAP_IDLE;
 1112                         vm_req_vmdaemon();
 1113                         lsec = time_second;
 1114                 }
 1115         }
 1116 #endif
 1117                 
 1118         /*
 1119          * If we didn't get enough free pages, and we have skipped a vnode
 1120          * in a writeable object, wakeup the sync daemon.  And kick swapout
 1121          * if we did not get enough free pages.
 1122          */
 1123         if (vm_paging_target() > 0) {
 1124                 if (vnodes_skipped && vm_page_count_min())
 1125                         (void) speedup_syncer();
 1126 #if !defined(NO_SWAPPING)
 1127                 if (vm_swap_enabled && vm_page_count_target()) {
 1128                         vm_req_vmdaemon();
 1129                         vm_pageout_req_swapout |= VM_SWAP_NORMAL;
 1130                 }
 1131 #endif
 1132         }
 1133 
 1134         /*
 1135          * If we are critically low on one of RAM or swap and low on
 1136          * the other, kill the largest process.  However, we avoid
 1137          * doing this on the first pass in order to give ourselves a
 1138          * chance to flush out dirty vnode-backed pages and to allow
 1139          * active pages to be moved to the inactive queue and reclaimed.
 1140          *
 1141          * We keep the process bigproc locked once we find it to keep anyone
 1142          * from messing with it; however, there is a possibility of
 1143          * deadlock if process B is bigproc and one of it's child processes
 1144          * attempts to propagate a signal to B while we are waiting for A's
 1145          * lock while walking this list.  To avoid this, we don't block on
 1146          * the process lock but just skip a process if it is already locked.
 1147          */
 1148         if (pass != 0 &&
 1149             ((swap_pager_avail < 64 && vm_page_count_min()) ||
 1150              (swap_pager_full && vm_paging_target() > 0))) {
 1151                 bigproc = NULL;
 1152                 bigsize = 0;
 1153                 sx_slock(&allproc_lock);
 1154                 FOREACH_PROC_IN_SYSTEM(p) {
 1155                         int breakout;
 1156                         /*
 1157                          * If this process is already locked, skip it.
 1158                          */
 1159                         if (PROC_TRYLOCK(p) == 0)
 1160                                 continue;
 1161                         /*
 1162                          * If this is a system or protected process, skip it.
 1163                          */
 1164                         if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) ||
 1165                             (p->p_flag & P_PROTECTED) ||
 1166                             ((p->p_pid < 48) && (swap_pager_avail != 0))) {
 1167                                 PROC_UNLOCK(p);
 1168                                 continue;
 1169                         }
 1170                         /*
 1171                          * if the process is in a non-running type state,
 1172                          * don't touch it. Check all the threads individually.
 1173                          */
 1174                         mtx_lock_spin(&sched_lock);
 1175                         breakout = 0;
 1176                         FOREACH_THREAD_IN_PROC(p, td) {
 1177                                 if (!TD_ON_RUNQ(td) &&
 1178                                     !TD_IS_RUNNING(td) &&
 1179                                     !TD_IS_SLEEPING(td)) {
 1180                                         breakout = 1;
 1181                                         break;
 1182                                 }
 1183                         }
 1184                         if (breakout) {
 1185                                 mtx_unlock_spin(&sched_lock);
 1186                                 PROC_UNLOCK(p);
 1187                                 continue;
 1188                         }
 1189                         mtx_unlock_spin(&sched_lock);
 1190                         /*
 1191                          * get the process size
 1192                          */
 1193                         if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) {
 1194                                 PROC_UNLOCK(p);
 1195                                 continue;
 1196                         }
 1197                         size = vmspace_swap_count(p->p_vmspace);
 1198                         vm_map_unlock_read(&p->p_vmspace->vm_map);
 1199                         size += vmspace_resident_count(p->p_vmspace);
 1200                         /*
 1201                          * if the this process is bigger than the biggest one
 1202                          * remember it.
 1203                          */
 1204                         if (size > bigsize) {
 1205                                 if (bigproc != NULL)
 1206                                         PROC_UNLOCK(bigproc);
 1207                                 bigproc = p;
 1208                                 bigsize = size;
 1209                         } else
 1210                                 PROC_UNLOCK(p);
 1211                 }
 1212                 sx_sunlock(&allproc_lock);
 1213                 if (bigproc != NULL) {
 1214                         struct ksegrp *kg;
 1215                         killproc(bigproc, "out of swap space");
 1216                         mtx_lock_spin(&sched_lock);
 1217                         FOREACH_KSEGRP_IN_PROC(bigproc, kg) {
 1218                                 sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */
 1219                         }
 1220                         mtx_unlock_spin(&sched_lock);
 1221                         PROC_UNLOCK(bigproc);
 1222                         wakeup(&cnt.v_free_count);
 1223                 }
 1224         }
 1225         mtx_unlock(&Giant);
 1226 }
 1227 
 1228 /*
 1229  * This routine tries to maintain the pseudo LRU active queue,
 1230  * so that during long periods of time where there is no paging,
 1231  * that some statistic accumulation still occurs.  This code
 1232  * helps the situation where paging just starts to occur.
 1233  */
 1234 static void
 1235 vm_pageout_page_stats()
 1236 {
 1237         vm_page_t m,next;
 1238         int pcount,tpcount;             /* Number of pages to check */
 1239         static int fullintervalcount = 0;
 1240         int page_shortage;
 1241         int s0;
 1242 
 1243         page_shortage = 
 1244             (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
 1245             (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
 1246 
 1247         if (page_shortage <= 0)
 1248                 return;
 1249 
 1250         s0 = splvm();
 1251         vm_page_lock_queues();
 1252         pcount = cnt.v_active_count;
 1253         fullintervalcount += vm_pageout_stats_interval;
 1254         if (fullintervalcount < vm_pageout_full_stats_interval) {
 1255                 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count;
 1256                 if (pcount > tpcount)
 1257                         pcount = tpcount;
 1258         } else {
 1259                 fullintervalcount = 0;
 1260         }
 1261 
 1262         m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
 1263         while ((m != NULL) && (pcount-- > 0)) {
 1264                 int actcount;
 1265 
 1266                 KASSERT(m->queue == PQ_ACTIVE,
 1267                     ("vm_pageout_page_stats: page %p isn't active", m));
 1268 
 1269                 next = TAILQ_NEXT(m, pageq);
 1270                 /*
 1271                  * Don't deactivate pages that are busy.
 1272                  */
 1273                 if ((m->busy != 0) ||
 1274                     (m->flags & PG_BUSY) ||
 1275                     (m->hold_count != 0)) {
 1276                         vm_pageq_requeue(m);
 1277                         m = next;
 1278                         continue;
 1279                 }
 1280 
 1281                 actcount = 0;
 1282                 if (m->flags & PG_REFERENCED) {
 1283                         vm_page_flag_clear(m, PG_REFERENCED);
 1284                         actcount += 1;
 1285                 }
 1286 
 1287                 actcount += pmap_ts_referenced(m);
 1288                 if (actcount) {
 1289                         m->act_count += ACT_ADVANCE + actcount;
 1290                         if (m->act_count > ACT_MAX)
 1291                                 m->act_count = ACT_MAX;
 1292                         vm_pageq_requeue(m);
 1293                 } else {
 1294                         if (m->act_count == 0) {
 1295                                 /*
 1296                                  * We turn off page access, so that we have
 1297                                  * more accurate RSS stats.  We don't do this
 1298                                  * in the normal page deactivation when the
 1299                                  * system is loaded VM wise, because the
 1300                                  * cost of the large number of page protect
 1301                                  * operations would be higher than the value
 1302                                  * of doing the operation.
 1303                                  */
 1304                                 pmap_remove_all(m);
 1305                                 vm_page_deactivate(m);
 1306                         } else {
 1307                                 m->act_count -= min(m->act_count, ACT_DECLINE);
 1308                                 vm_pageq_requeue(m);
 1309                         }
 1310                 }
 1311 
 1312                 m = next;
 1313         }
 1314         vm_page_unlock_queues();
 1315         splx(s0);
 1316 }
 1317 
 1318 /*
 1319  *      vm_pageout is the high level pageout daemon.
 1320  */
 1321 static void
 1322 vm_pageout()
 1323 {
 1324         int error, pass, s;
 1325 
 1326         /*
 1327          * Initialize some paging parameters.
 1328          */
 1329         cnt.v_interrupt_free_min = 2;
 1330         if (cnt.v_page_count < 2000)
 1331                 vm_pageout_page_count = 8;
 1332 
 1333         /*
 1334          * v_free_reserved needs to include enough for the largest
 1335          * swap pager structures plus enough for any pv_entry structs
 1336          * when paging. 
 1337          */
 1338         if (cnt.v_page_count > 1024)
 1339                 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
 1340         else
 1341                 cnt.v_free_min = 4;
 1342         cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
 1343             cnt.v_interrupt_free_min;
 1344         cnt.v_free_reserved = vm_pageout_page_count +
 1345             cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_L2_SIZE;
 1346         cnt.v_free_severe = cnt.v_free_min / 2;
 1347         cnt.v_free_min += cnt.v_free_reserved;
 1348         cnt.v_free_severe += cnt.v_free_reserved;
 1349 
 1350         /*
 1351          * v_free_target and v_cache_min control pageout hysteresis.  Note
 1352          * that these are more a measure of the VM cache queue hysteresis
 1353          * then the VM free queue.  Specifically, v_free_target is the
 1354          * high water mark (free+cache pages).
 1355          *
 1356          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
 1357          * low water mark, while v_free_min is the stop.  v_cache_min must
 1358          * be big enough to handle memory needs while the pageout daemon
 1359          * is signalled and run to free more pages.
 1360          */
 1361         if (cnt.v_free_count > 6144)
 1362                 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
 1363         else
 1364                 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
 1365 
 1366         if (cnt.v_free_count > 2048) {
 1367                 cnt.v_cache_min = cnt.v_free_target;
 1368                 cnt.v_cache_max = 2 * cnt.v_cache_min;
 1369                 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
 1370         } else {
 1371                 cnt.v_cache_min = 0;
 1372                 cnt.v_cache_max = 0;
 1373                 cnt.v_inactive_target = cnt.v_free_count / 4;
 1374         }
 1375         if (cnt.v_inactive_target > cnt.v_free_count / 3)
 1376                 cnt.v_inactive_target = cnt.v_free_count / 3;
 1377 
 1378         /* XXX does not really belong here */
 1379         if (vm_page_max_wired == 0)
 1380                 vm_page_max_wired = cnt.v_free_count / 3;
 1381 
 1382         if (vm_pageout_stats_max == 0)
 1383                 vm_pageout_stats_max = cnt.v_free_target;
 1384 
 1385         /*
 1386          * Set interval in seconds for stats scan.
 1387          */
 1388         if (vm_pageout_stats_interval == 0)
 1389                 vm_pageout_stats_interval = 5;
 1390         if (vm_pageout_full_stats_interval == 0)
 1391                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
 1392 
 1393         /*
 1394          * Set maximum free per pass
 1395          */
 1396         if (vm_pageout_stats_free_max == 0)
 1397                 vm_pageout_stats_free_max = 5;
 1398 
 1399         swap_pager_swap_init();
 1400         pass = 0;
 1401         /*
 1402          * The pageout daemon is never done, so loop forever.
 1403          */
 1404         while (TRUE) {
 1405                 s = splvm();
 1406                 vm_page_lock_queues();
 1407                 /*
 1408                  * If we have enough free memory, wakeup waiters.  Do
 1409                  * not clear vm_pages_needed until we reach our target,
 1410                  * otherwise we may be woken up over and over again and
 1411                  * waste a lot of cpu.
 1412                  */
 1413                 if (vm_pages_needed && !vm_page_count_min()) {
 1414                         if (!vm_paging_needed())
 1415                                 vm_pages_needed = 0;
 1416                         wakeup(&cnt.v_free_count);
 1417                 }
 1418                 if (vm_pages_needed) {
 1419                         /*
 1420                          * Still not done, take a second pass without waiting
 1421                          * (unlimited dirty cleaning), otherwise sleep a bit
 1422                          * and try again.
 1423                          */
 1424                         ++pass;
 1425                         if (pass > 1)
 1426                                 msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM,
 1427                                        "psleep", hz/2);
 1428                 } else {
 1429                         /*
 1430                          * Good enough, sleep & handle stats.  Prime the pass
 1431                          * for the next run.
 1432                          */
 1433                         if (pass > 1)
 1434                                 pass = 1;
 1435                         else
 1436                                 pass = 0;
 1437                         error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM,
 1438                                     "psleep", vm_pageout_stats_interval * hz);
 1439                         if (error && !vm_pages_needed) {
 1440                                 vm_page_unlock_queues();
 1441                                 splx(s);
 1442                                 pass = 0;
 1443                                 vm_pageout_page_stats();
 1444                                 continue;
 1445                         }
 1446                 }
 1447                 if (vm_pages_needed)
 1448                         cnt.v_pdwakeups++;
 1449                 vm_page_unlock_queues();
 1450                 splx(s);
 1451                 vm_pageout_scan(pass);
 1452         }
 1453 }
 1454 
 1455 /*
 1456  * Unless the page queue lock is held by the caller, this function
 1457  * should be regarded as advisory.  Specifically, the caller should
 1458  * not msleep() on &cnt.v_free_count following this function unless
 1459  * the page queue lock is held until the msleep() is performed.
 1460  */
 1461 void
 1462 pagedaemon_wakeup()
 1463 {
 1464 
 1465         if (!vm_pages_needed && curthread->td_proc != pageproc) {
 1466                 vm_pages_needed = 1;
 1467                 wakeup(&vm_pages_needed);
 1468         }
 1469 }
 1470 
 1471 #if !defined(NO_SWAPPING)
 1472 static void
 1473 vm_req_vmdaemon()
 1474 {
 1475         static int lastrun = 0;
 1476 
 1477         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
 1478                 wakeup(&vm_daemon_needed);
 1479                 lastrun = ticks;
 1480         }
 1481 }
 1482 
 1483 static void
 1484 vm_daemon()
 1485 {
 1486         struct proc *p;
 1487         int breakout;
 1488         struct thread *td;
 1489 
 1490         mtx_lock(&Giant);
 1491         while (TRUE) {
 1492                 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0);
 1493                 if (vm_pageout_req_swapout) {
 1494                         swapout_procs(vm_pageout_req_swapout);
 1495                         vm_pageout_req_swapout = 0;
 1496                 }
 1497                 /*
 1498                  * scan the processes for exceeding their rlimits or if
 1499                  * process is swapped out -- deactivate pages
 1500                  */
 1501                 sx_slock(&allproc_lock);
 1502                 LIST_FOREACH(p, &allproc, p_list) {
 1503                         vm_pindex_t limit, size;
 1504 
 1505                         /*
 1506                          * if this is a system process or if we have already
 1507                          * looked at this process, skip it.
 1508                          */
 1509                         PROC_LOCK(p);
 1510                         if (p->p_flag & (P_SYSTEM | P_WEXIT)) {
 1511                                 PROC_UNLOCK(p);
 1512                                 continue;
 1513                         }
 1514                         /*
 1515                          * if the process is in a non-running type state,
 1516                          * don't touch it.
 1517                          */
 1518                         mtx_lock_spin(&sched_lock);
 1519                         breakout = 0;
 1520                         FOREACH_THREAD_IN_PROC(p, td) {
 1521                                 if (!TD_ON_RUNQ(td) &&
 1522                                     !TD_IS_RUNNING(td) &&
 1523                                     !TD_IS_SLEEPING(td)) {
 1524                                         breakout = 1;
 1525                                         break;
 1526                                 }
 1527                         }
 1528                         mtx_unlock_spin(&sched_lock);
 1529                         if (breakout) {
 1530                                 PROC_UNLOCK(p);
 1531                                 continue;
 1532                         }
 1533                         /*
 1534                          * get a limit
 1535                          */
 1536                         limit = OFF_TO_IDX(
 1537                             qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur,
 1538                                 p->p_rlimit[RLIMIT_RSS].rlim_max));
 1539 
 1540                         /*
 1541                          * let processes that are swapped out really be
 1542                          * swapped out set the limit to nothing (will force a
 1543                          * swap-out.)
 1544                          */
 1545                         if ((p->p_sflag & PS_INMEM) == 0)
 1546                                 limit = 0;      /* XXX */
 1547                         PROC_UNLOCK(p);
 1548 
 1549                         size = vmspace_resident_count(p->p_vmspace);
 1550                         if (limit >= 0 && size >= limit) {
 1551                                 vm_pageout_map_deactivate_pages(
 1552                                     &p->p_vmspace->vm_map, limit);
 1553                         }
 1554                 }
 1555                 sx_sunlock(&allproc_lock);
 1556         }
 1557 }
 1558 #endif                  /* !defined(NO_SWAPPING) */

Cache object: 13d6ccb655b46bac2ba331f06892b244


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.