The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pageout.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2005 Yahoo! Technologies Norway AS
    9  * All rights reserved.
   10  *
   11  * This code is derived from software contributed to Berkeley by
   12  * The Mach Operating System project at Carnegie-Mellon University.
   13  *
   14  * Redistribution and use in source and binary forms, with or without
   15  * modification, are permitted provided that the following conditions
   16  * are met:
   17  * 1. Redistributions of source code must retain the above copyright
   18  *    notice, this list of conditions and the following disclaimer.
   19  * 2. Redistributions in binary form must reproduce the above copyright
   20  *    notice, this list of conditions and the following disclaimer in the
   21  *    documentation and/or other materials provided with the distribution.
   22  * 3. All advertising materials mentioning features or use of this software
   23  *    must display the following acknowledgement:
   24  *      This product includes software developed by the University of
   25  *      California, Berkeley and its contributors.
   26  * 4. Neither the name of the University nor the names of its contributors
   27  *    may be used to endorse or promote products derived from this software
   28  *    without specific prior written permission.
   29  *
   30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   40  * SUCH DAMAGE.
   41  *
   42  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
   43  *
   44  *
   45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   46  * All rights reserved.
   47  *
   48  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   49  *
   50  * Permission to use, copy, modify and distribute this software and
   51  * its documentation is hereby granted, provided that both the copyright
   52  * notice and this permission notice appear in all copies of the
   53  * software, derivative works or modified versions, and any portions
   54  * thereof, and that both notices appear in supporting documentation.
   55  *
   56  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   57  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   58  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   59  *
   60  * Carnegie Mellon requests users of this software to return to
   61  *
   62  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   63  *  School of Computer Science
   64  *  Carnegie Mellon University
   65  *  Pittsburgh PA 15213-3890
   66  *
   67  * any improvements or extensions that they make and grant Carnegie the
   68  * rights to redistribute these changes.
   69  */
   70 
   71 /*
   72  *      The proverbial page-out daemon.
   73  */
   74 
   75 #include <sys/cdefs.h>
   76 __FBSDID("$FreeBSD: releng/8.1/sys/vm/vm_pageout.c 208046 2010-05-13 20:31:24Z kib $");
   77 
   78 #include "opt_vm.h"
   79 #include <sys/param.h>
   80 #include <sys/systm.h>
   81 #include <sys/kernel.h>
   82 #include <sys/eventhandler.h>
   83 #include <sys/lock.h>
   84 #include <sys/mutex.h>
   85 #include <sys/proc.h>
   86 #include <sys/kthread.h>
   87 #include <sys/ktr.h>
   88 #include <sys/mount.h>
   89 #include <sys/resourcevar.h>
   90 #include <sys/sched.h>
   91 #include <sys/signalvar.h>
   92 #include <sys/vnode.h>
   93 #include <sys/vmmeter.h>
   94 #include <sys/sx.h>
   95 #include <sys/sysctl.h>
   96 
   97 #include <vm/vm.h>
   98 #include <vm/vm_param.h>
   99 #include <vm/vm_object.h>
  100 #include <vm/vm_page.h>
  101 #include <vm/vm_map.h>
  102 #include <vm/vm_pageout.h>
  103 #include <vm/vm_pager.h>
  104 #include <vm/swap_pager.h>
  105 #include <vm/vm_extern.h>
  106 #include <vm/uma.h>
  107 
  108 /*
  109  * System initialization
  110  */
  111 
  112 /* the kernel process "vm_pageout"*/
  113 static void vm_pageout(void);
  114 static int vm_pageout_clean(vm_page_t);
  115 static void vm_pageout_scan(int pass);
  116 
  117 struct proc *pageproc;
  118 
  119 static struct kproc_desc page_kp = {
  120         "pagedaemon",
  121         vm_pageout,
  122         &pageproc
  123 };
  124 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
  125     &page_kp);
  126 
  127 #if !defined(NO_SWAPPING)
  128 /* the kernel process "vm_daemon"*/
  129 static void vm_daemon(void);
  130 static struct   proc *vmproc;
  131 
  132 static struct kproc_desc vm_kp = {
  133         "vmdaemon",
  134         vm_daemon,
  135         &vmproc
  136 };
  137 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
  138 #endif
  139 
  140 
  141 int vm_pages_needed;            /* Event on which pageout daemon sleeps */
  142 int vm_pageout_deficit;         /* Estimated number of pages deficit */
  143 int vm_pageout_pages_needed;    /* flag saying that the pageout daemon needs pages */
  144 
  145 #if !defined(NO_SWAPPING)
  146 static int vm_pageout_req_swapout;      /* XXX */
  147 static int vm_daemon_needed;
  148 static struct mtx vm_daemon_mtx;
  149 /* Allow for use by vm_pageout before vm_daemon is initialized. */
  150 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
  151 #endif
  152 static int vm_max_launder = 32;
  153 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
  154 static int vm_pageout_full_stats_interval = 0;
  155 static int vm_pageout_algorithm=0;
  156 static int defer_swap_pageouts=0;
  157 static int disable_swap_pageouts=0;
  158 
  159 #if defined(NO_SWAPPING)
  160 static int vm_swap_enabled=0;
  161 static int vm_swap_idle_enabled=0;
  162 #else
  163 static int vm_swap_enabled=1;
  164 static int vm_swap_idle_enabled=0;
  165 #endif
  166 
  167 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
  168         CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
  169 
  170 SYSCTL_INT(_vm, OID_AUTO, max_launder,
  171         CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
  172 
  173 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
  174         CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
  175 
  176 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
  177         CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
  178 
  179 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
  180         CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
  181 
  182 #if defined(NO_SWAPPING)
  183 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
  184         CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
  185 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
  186         CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
  187 #else
  188 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
  189         CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
  190 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
  191         CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
  192 #endif
  193 
  194 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
  195         CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
  196 
  197 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
  198         CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
  199 
  200 static int pageout_lock_miss;
  201 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
  202         CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
  203 
  204 #define VM_PAGEOUT_PAGE_COUNT 16
  205 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
  206 
  207 int vm_page_max_wired;          /* XXX max # of wired pages system-wide */
  208 SYSCTL_INT(_vm, OID_AUTO, max_wired,
  209         CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
  210 
  211 #if !defined(NO_SWAPPING)
  212 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
  213 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
  214 static void vm_req_vmdaemon(int req);
  215 #endif
  216 static void vm_pageout_page_stats(void);
  217 
  218 /*
  219  * vm_pageout_fallback_object_lock:
  220  * 
  221  * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
  222  * known to have failed and page queue must be either PQ_ACTIVE or
  223  * PQ_INACTIVE.  To avoid lock order violation, unlock the page queues
  224  * while locking the vm object.  Use marker page to detect page queue
  225  * changes and maintain notion of next page on page queue.  Return
  226  * TRUE if no changes were detected, FALSE otherwise.  vm object is
  227  * locked on return.
  228  * 
  229  * This function depends on both the lock portion of struct vm_object
  230  * and normal struct vm_page being type stable.
  231  */
  232 boolean_t
  233 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
  234 {
  235         struct vm_page marker;
  236         boolean_t unchanged;
  237         u_short queue;
  238         vm_object_t object;
  239 
  240         /*
  241          * Initialize our marker
  242          */
  243         bzero(&marker, sizeof(marker));
  244         marker.flags = PG_FICTITIOUS | PG_MARKER;
  245         marker.oflags = VPO_BUSY;
  246         marker.queue = m->queue;
  247         marker.wire_count = 1;
  248 
  249         queue = m->queue;
  250         object = m->object;
  251         
  252         TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl,
  253                            m, &marker, pageq);
  254         vm_page_unlock_queues();
  255         VM_OBJECT_LOCK(object);
  256         vm_page_lock_queues();
  257 
  258         /* Page queue might have changed. */
  259         *next = TAILQ_NEXT(&marker, pageq);
  260         unchanged = (m->queue == queue &&
  261                      m->object == object &&
  262                      &marker == TAILQ_NEXT(m, pageq));
  263         TAILQ_REMOVE(&vm_page_queues[queue].pl,
  264                      &marker, pageq);
  265         return (unchanged);
  266 }
  267 
  268 /*
  269  * vm_pageout_clean:
  270  *
  271  * Clean the page and remove it from the laundry.
  272  * 
  273  * We set the busy bit to cause potential page faults on this page to
  274  * block.  Note the careful timing, however, the busy bit isn't set till
  275  * late and we cannot do anything that will mess with the page.
  276  */
  277 static int
  278 vm_pageout_clean(m)
  279         vm_page_t m;
  280 {
  281         vm_object_t object;
  282         vm_page_t mc[2*vm_pageout_page_count];
  283         int pageout_count;
  284         int ib, is, page_base;
  285         vm_pindex_t pindex = m->pindex;
  286 
  287         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  288         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
  289 
  290         /*
  291          * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
  292          * with the new swapper, but we could have serious problems paging
  293          * out other object types if there is insufficient memory.  
  294          *
  295          * Unfortunately, checking free memory here is far too late, so the
  296          * check has been moved up a procedural level.
  297          */
  298 
  299         /*
  300          * Can't clean the page if it's busy or held.
  301          */
  302         if ((m->hold_count != 0) ||
  303             ((m->busy != 0) || (m->oflags & VPO_BUSY))) {
  304                 return 0;
  305         }
  306 
  307         mc[vm_pageout_page_count] = m;
  308         pageout_count = 1;
  309         page_base = vm_pageout_page_count;
  310         ib = 1;
  311         is = 1;
  312 
  313         /*
  314          * Scan object for clusterable pages.
  315          *
  316          * We can cluster ONLY if: ->> the page is NOT
  317          * clean, wired, busy, held, or mapped into a
  318          * buffer, and one of the following:
  319          * 1) The page is inactive, or a seldom used
  320          *    active page.
  321          * -or-
  322          * 2) we force the issue.
  323          *
  324          * During heavy mmap/modification loads the pageout
  325          * daemon can really fragment the underlying file
  326          * due to flushing pages out of order and not trying
  327          * align the clusters (which leave sporatic out-of-order
  328          * holes).  To solve this problem we do the reverse scan
  329          * first and attempt to align our cluster, then do a 
  330          * forward scan if room remains.
  331          */
  332         object = m->object;
  333 more:
  334         while (ib && pageout_count < vm_pageout_page_count) {
  335                 vm_page_t p;
  336 
  337                 if (ib > pindex) {
  338                         ib = 0;
  339                         break;
  340                 }
  341 
  342                 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) {
  343                         ib = 0;
  344                         break;
  345                 }
  346                 if ((p->oflags & VPO_BUSY) || p->busy) {
  347                         ib = 0;
  348                         break;
  349                 }
  350                 vm_page_test_dirty(p);
  351                 if (p->dirty == 0 ||
  352                     p->queue != PQ_INACTIVE ||
  353                     p->hold_count != 0) {       /* may be undergoing I/O */
  354                         ib = 0;
  355                         break;
  356                 }
  357                 mc[--page_base] = p;
  358                 ++pageout_count;
  359                 ++ib;
  360                 /*
  361                  * alignment boundry, stop here and switch directions.  Do
  362                  * not clear ib.
  363                  */
  364                 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
  365                         break;
  366         }
  367 
  368         while (pageout_count < vm_pageout_page_count && 
  369             pindex + is < object->size) {
  370                 vm_page_t p;
  371 
  372                 if ((p = vm_page_lookup(object, pindex + is)) == NULL)
  373                         break;
  374                 if ((p->oflags & VPO_BUSY) || p->busy) {
  375                         break;
  376                 }
  377                 vm_page_test_dirty(p);
  378                 if (p->dirty == 0 ||
  379                     p->queue != PQ_INACTIVE ||
  380                     p->hold_count != 0) {       /* may be undergoing I/O */
  381                         break;
  382                 }
  383                 mc[page_base + pageout_count] = p;
  384                 ++pageout_count;
  385                 ++is;
  386         }
  387 
  388         /*
  389          * If we exhausted our forward scan, continue with the reverse scan
  390          * when possible, even past a page boundry.  This catches boundry
  391          * conditions.
  392          */
  393         if (ib && pageout_count < vm_pageout_page_count)
  394                 goto more;
  395 
  396         /*
  397          * we allow reads during pageouts...
  398          */
  399         return (vm_pageout_flush(&mc[page_base], pageout_count, 0));
  400 }
  401 
  402 /*
  403  * vm_pageout_flush() - launder the given pages
  404  *
  405  *      The given pages are laundered.  Note that we setup for the start of
  406  *      I/O ( i.e. busy the page ), mark it read-only, and bump the object
  407  *      reference count all in here rather then in the parent.  If we want
  408  *      the parent to do more sophisticated things we may have to change
  409  *      the ordering.
  410  */
  411 int
  412 vm_pageout_flush(vm_page_t *mc, int count, int flags)
  413 {
  414         vm_object_t object = mc[0]->object;
  415         int pageout_status[count];
  416         int numpagedout = 0;
  417         int i;
  418 
  419         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  420         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  421         /*
  422          * Initiate I/O.  Bump the vm_page_t->busy counter and
  423          * mark the pages read-only.
  424          *
  425          * We do not have to fixup the clean/dirty bits here... we can
  426          * allow the pager to do it after the I/O completes.
  427          *
  428          * NOTE! mc[i]->dirty may be partial or fragmented due to an
  429          * edge case with file fragments.
  430          */
  431         for (i = 0; i < count; i++) {
  432                 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
  433                     ("vm_pageout_flush: partially invalid page %p index %d/%d",
  434                         mc[i], i, count));
  435                 vm_page_io_start(mc[i]);
  436                 pmap_remove_write(mc[i]);
  437         }
  438         vm_page_unlock_queues();
  439         vm_object_pip_add(object, count);
  440 
  441         vm_pager_put_pages(object, mc, count, flags, pageout_status);
  442 
  443         vm_page_lock_queues();
  444         for (i = 0; i < count; i++) {
  445                 vm_page_t mt = mc[i];
  446 
  447                 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
  448                     (mt->flags & PG_WRITEABLE) == 0,
  449                     ("vm_pageout_flush: page %p is not write protected", mt));
  450                 switch (pageout_status[i]) {
  451                 case VM_PAGER_OK:
  452                 case VM_PAGER_PEND:
  453                         numpagedout++;
  454                         break;
  455                 case VM_PAGER_BAD:
  456                         /*
  457                          * Page outside of range of object. Right now we
  458                          * essentially lose the changes by pretending it
  459                          * worked.
  460                          */
  461                         vm_page_undirty(mt);
  462                         break;
  463                 case VM_PAGER_ERROR:
  464                 case VM_PAGER_FAIL:
  465                         /*
  466                          * If page couldn't be paged out, then reactivate the
  467                          * page so it doesn't clog the inactive list.  (We
  468                          * will try paging out it again later).
  469                          */
  470                         vm_page_activate(mt);
  471                         break;
  472                 case VM_PAGER_AGAIN:
  473                         break;
  474                 }
  475 
  476                 /*
  477                  * If the operation is still going, leave the page busy to
  478                  * block all other accesses. Also, leave the paging in
  479                  * progress indicator set so that we don't attempt an object
  480                  * collapse.
  481                  */
  482                 if (pageout_status[i] != VM_PAGER_PEND) {
  483                         vm_object_pip_wakeup(object);
  484                         vm_page_io_finish(mt);
  485                         if (vm_page_count_severe())
  486                                 vm_page_try_to_cache(mt);
  487                 }
  488         }
  489         return numpagedout;
  490 }
  491 
  492 #if !defined(NO_SWAPPING)
  493 /*
  494  *      vm_pageout_object_deactivate_pages
  495  *
  496  *      deactivate enough pages to satisfy the inactive target
  497  *      requirements or if vm_page_proc_limit is set, then
  498  *      deactivate all of the pages in the object and its
  499  *      backing_objects.
  500  *
  501  *      The object and map must be locked.
  502  */
  503 static void
  504 vm_pageout_object_deactivate_pages(pmap, first_object, desired)
  505         pmap_t pmap;
  506         vm_object_t first_object;
  507         long desired;
  508 {
  509         vm_object_t backing_object, object;
  510         vm_page_t p, next;
  511         int actcount, rcount, remove_mode;
  512 
  513         VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
  514         if (first_object->type == OBJT_DEVICE ||
  515             first_object->type == OBJT_SG ||
  516             first_object->type == OBJT_PHYS)
  517                 return;
  518         for (object = first_object;; object = backing_object) {
  519                 if (pmap_resident_count(pmap) <= desired)
  520                         goto unlock_return;
  521                 if (object->paging_in_progress)
  522                         goto unlock_return;
  523 
  524                 remove_mode = 0;
  525                 if (object->shadow_count > 1)
  526                         remove_mode = 1;
  527                 /*
  528                  * scan the objects entire memory queue
  529                  */
  530                 rcount = object->resident_page_count;
  531                 p = TAILQ_FIRST(&object->memq);
  532                 vm_page_lock_queues();
  533                 while (p && (rcount-- > 0)) {
  534                         if (pmap_resident_count(pmap) <= desired) {
  535                                 vm_page_unlock_queues();
  536                                 goto unlock_return;
  537                         }
  538                         next = TAILQ_NEXT(p, listq);
  539                         cnt.v_pdpages++;
  540                         if (p->wire_count != 0 ||
  541                             p->hold_count != 0 ||
  542                             p->busy != 0 ||
  543                             (p->oflags & VPO_BUSY) ||
  544                             (p->flags & PG_UNMANAGED) ||
  545                             !pmap_page_exists_quick(pmap, p)) {
  546                                 p = next;
  547                                 continue;
  548                         }
  549                         actcount = pmap_ts_referenced(p);
  550                         if (actcount) {
  551                                 vm_page_flag_set(p, PG_REFERENCED);
  552                         } else if (p->flags & PG_REFERENCED) {
  553                                 actcount = 1;
  554                         }
  555                         if ((p->queue != PQ_ACTIVE) &&
  556                                 (p->flags & PG_REFERENCED)) {
  557                                 vm_page_activate(p);
  558                                 p->act_count += actcount;
  559                                 vm_page_flag_clear(p, PG_REFERENCED);
  560                         } else if (p->queue == PQ_ACTIVE) {
  561                                 if ((p->flags & PG_REFERENCED) == 0) {
  562                                         p->act_count -= min(p->act_count, ACT_DECLINE);
  563                                         if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
  564                                                 pmap_remove_all(p);
  565                                                 vm_page_deactivate(p);
  566                                         } else {
  567                                                 vm_page_requeue(p);
  568                                         }
  569                                 } else {
  570                                         vm_page_activate(p);
  571                                         vm_page_flag_clear(p, PG_REFERENCED);
  572                                         if (p->act_count < (ACT_MAX - ACT_ADVANCE))
  573                                                 p->act_count += ACT_ADVANCE;
  574                                         vm_page_requeue(p);
  575                                 }
  576                         } else if (p->queue == PQ_INACTIVE) {
  577                                 pmap_remove_all(p);
  578                         }
  579                         p = next;
  580                 }
  581                 vm_page_unlock_queues();
  582                 if ((backing_object = object->backing_object) == NULL)
  583                         goto unlock_return;
  584                 VM_OBJECT_LOCK(backing_object);
  585                 if (object != first_object)
  586                         VM_OBJECT_UNLOCK(object);
  587         }
  588 unlock_return:
  589         if (object != first_object)
  590                 VM_OBJECT_UNLOCK(object);
  591 }
  592 
  593 /*
  594  * deactivate some number of pages in a map, try to do it fairly, but
  595  * that is really hard to do.
  596  */
  597 static void
  598 vm_pageout_map_deactivate_pages(map, desired)
  599         vm_map_t map;
  600         long desired;
  601 {
  602         vm_map_entry_t tmpe;
  603         vm_object_t obj, bigobj;
  604         int nothingwired;
  605 
  606         if (!vm_map_trylock(map))
  607                 return;
  608 
  609         bigobj = NULL;
  610         nothingwired = TRUE;
  611 
  612         /*
  613          * first, search out the biggest object, and try to free pages from
  614          * that.
  615          */
  616         tmpe = map->header.next;
  617         while (tmpe != &map->header) {
  618                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
  619                         obj = tmpe->object.vm_object;
  620                         if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
  621                                 if (obj->shadow_count <= 1 &&
  622                                     (bigobj == NULL ||
  623                                      bigobj->resident_page_count < obj->resident_page_count)) {
  624                                         if (bigobj != NULL)
  625                                                 VM_OBJECT_UNLOCK(bigobj);
  626                                         bigobj = obj;
  627                                 } else
  628                                         VM_OBJECT_UNLOCK(obj);
  629                         }
  630                 }
  631                 if (tmpe->wired_count > 0)
  632                         nothingwired = FALSE;
  633                 tmpe = tmpe->next;
  634         }
  635 
  636         if (bigobj != NULL) {
  637                 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
  638                 VM_OBJECT_UNLOCK(bigobj);
  639         }
  640         /*
  641          * Next, hunt around for other pages to deactivate.  We actually
  642          * do this search sort of wrong -- .text first is not the best idea.
  643          */
  644         tmpe = map->header.next;
  645         while (tmpe != &map->header) {
  646                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
  647                         break;
  648                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
  649                         obj = tmpe->object.vm_object;
  650                         if (obj != NULL) {
  651                                 VM_OBJECT_LOCK(obj);
  652                                 vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
  653                                 VM_OBJECT_UNLOCK(obj);
  654                         }
  655                 }
  656                 tmpe = tmpe->next;
  657         }
  658 
  659         /*
  660          * Remove all mappings if a process is swapped out, this will free page
  661          * table pages.
  662          */
  663         if (desired == 0 && nothingwired) {
  664                 pmap_remove(vm_map_pmap(map), vm_map_min(map),
  665                     vm_map_max(map));
  666         }
  667         vm_map_unlock(map);
  668 }
  669 #endif          /* !defined(NO_SWAPPING) */
  670 
  671 /*
  672  *      vm_pageout_scan does the dirty work for the pageout daemon.
  673  */
  674 static void
  675 vm_pageout_scan(int pass)
  676 {
  677         vm_page_t m, next;
  678         struct vm_page marker;
  679         int page_shortage, maxscan, pcount;
  680         int addl_page_shortage, addl_page_shortage_init;
  681         vm_object_t object;
  682         int actcount;
  683         int vnodes_skipped = 0;
  684         int maxlaunder;
  685 
  686         /*
  687          * Decrease registered cache sizes.
  688          */
  689         EVENTHANDLER_INVOKE(vm_lowmem, 0);
  690         /*
  691          * We do this explicitly after the caches have been drained above.
  692          */
  693         uma_reclaim();
  694 
  695         addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit);
  696 
  697         /*
  698          * Calculate the number of pages we want to either free or move
  699          * to the cache.
  700          */
  701         page_shortage = vm_paging_target() + addl_page_shortage_init;
  702 
  703         /*
  704          * Initialize our marker
  705          */
  706         bzero(&marker, sizeof(marker));
  707         marker.flags = PG_FICTITIOUS | PG_MARKER;
  708         marker.oflags = VPO_BUSY;
  709         marker.queue = PQ_INACTIVE;
  710         marker.wire_count = 1;
  711 
  712         /*
  713          * Start scanning the inactive queue for pages we can move to the
  714          * cache or free.  The scan will stop when the target is reached or
  715          * we have scanned the entire inactive queue.  Note that m->act_count
  716          * is not used to form decisions for the inactive queue, only for the
  717          * active queue.
  718          *
  719          * maxlaunder limits the number of dirty pages we flush per scan.
  720          * For most systems a smaller value (16 or 32) is more robust under
  721          * extreme memory and disk pressure because any unnecessary writes
  722          * to disk can result in extreme performance degredation.  However,
  723          * systems with excessive dirty pages (especially when MAP_NOSYNC is
  724          * used) will die horribly with limited laundering.  If the pageout
  725          * daemon cannot clean enough pages in the first pass, we let it go
  726          * all out in succeeding passes.
  727          */
  728         if ((maxlaunder = vm_max_launder) <= 1)
  729                 maxlaunder = 1;
  730         if (pass)
  731                 maxlaunder = 10000;
  732         vm_page_lock_queues();
  733 rescan0:
  734         addl_page_shortage = addl_page_shortage_init;
  735         maxscan = cnt.v_inactive_count;
  736 
  737         for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
  738              m != NULL && maxscan-- > 0 && page_shortage > 0;
  739              m = next) {
  740 
  741                 cnt.v_pdpages++;
  742 
  743                 if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
  744                         goto rescan0;
  745                 }
  746 
  747                 next = TAILQ_NEXT(m, pageq);
  748                 object = m->object;
  749 
  750                 /*
  751                  * skip marker pages
  752                  */
  753                 if (m->flags & PG_MARKER)
  754                         continue;
  755 
  756                 /*
  757                  * A held page may be undergoing I/O, so skip it.
  758                  */
  759                 if (m->hold_count) {
  760                         vm_page_requeue(m);
  761                         addl_page_shortage++;
  762                         continue;
  763                 }
  764                 /*
  765                  * Don't mess with busy pages, keep in the front of the
  766                  * queue, most likely are being paged out.
  767                  */
  768                 if (!VM_OBJECT_TRYLOCK(object) &&
  769                     (!vm_pageout_fallback_object_lock(m, &next) ||
  770                      m->hold_count != 0)) {
  771                         VM_OBJECT_UNLOCK(object);
  772                         addl_page_shortage++;
  773                         continue;
  774                 }
  775                 if (m->busy || (m->oflags & VPO_BUSY)) {
  776                         VM_OBJECT_UNLOCK(object);
  777                         addl_page_shortage++;
  778                         continue;
  779                 }
  780 
  781                 /*
  782                  * If the object is not being used, we ignore previous 
  783                  * references.
  784                  */
  785                 if (object->ref_count == 0) {
  786                         vm_page_flag_clear(m, PG_REFERENCED);
  787                         KASSERT(!pmap_page_is_mapped(m),
  788                             ("vm_pageout_scan: page %p is mapped", m));
  789 
  790                 /*
  791                  * Otherwise, if the page has been referenced while in the 
  792                  * inactive queue, we bump the "activation count" upwards, 
  793                  * making it less likely that the page will be added back to 
  794                  * the inactive queue prematurely again.  Here we check the 
  795                  * page tables (or emulated bits, if any), given the upper 
  796                  * level VM system not knowing anything about existing 
  797                  * references.
  798                  */
  799                 } else if (((m->flags & PG_REFERENCED) == 0) &&
  800                         (actcount = pmap_ts_referenced(m))) {
  801                         vm_page_activate(m);
  802                         VM_OBJECT_UNLOCK(object);
  803                         m->act_count += (actcount + ACT_ADVANCE);
  804                         continue;
  805                 }
  806 
  807                 /*
  808                  * If the upper level VM system knows about any page 
  809                  * references, we activate the page.  We also set the 
  810                  * "activation count" higher than normal so that we will less 
  811                  * likely place pages back onto the inactive queue again.
  812                  */
  813                 if ((m->flags & PG_REFERENCED) != 0) {
  814                         vm_page_flag_clear(m, PG_REFERENCED);
  815                         actcount = pmap_ts_referenced(m);
  816                         vm_page_activate(m);
  817                         VM_OBJECT_UNLOCK(object);
  818                         m->act_count += (actcount + ACT_ADVANCE + 1);
  819                         continue;
  820                 }
  821 
  822                 /*
  823                  * If the upper level VM system does not believe that the page
  824                  * is fully dirty, but it is mapped for write access, then we
  825                  * consult the pmap to see if the page's dirty status should
  826                  * be updated.
  827                  */
  828                 if (m->dirty != VM_PAGE_BITS_ALL &&
  829                     (m->flags & PG_WRITEABLE) != 0) {
  830                         /*
  831                          * Avoid a race condition: Unless write access is
  832                          * removed from the page, another processor could
  833                          * modify it before all access is removed by the call
  834                          * to vm_page_cache() below.  If vm_page_cache() finds
  835                          * that the page has been modified when it removes all
  836                          * access, it panics because it cannot cache dirty
  837                          * pages.  In principle, we could eliminate just write
  838                          * access here rather than all access.  In the expected
  839                          * case, when there are no last instant modifications
  840                          * to the page, removing all access will be cheaper
  841                          * overall.
  842                          */
  843                         if (pmap_is_modified(m))
  844                                 vm_page_dirty(m);
  845                         else if (m->dirty == 0)
  846                                 pmap_remove_all(m);
  847                 }
  848 
  849                 if (m->valid == 0) {
  850                         /*
  851                          * Invalid pages can be easily freed
  852                          */
  853                         vm_page_free(m);
  854                         cnt.v_dfree++;
  855                         --page_shortage;
  856                 } else if (m->dirty == 0) {
  857                         /*
  858                          * Clean pages can be placed onto the cache queue.
  859                          * This effectively frees them.
  860                          */
  861                         vm_page_cache(m);
  862                         --page_shortage;
  863                 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
  864                         /*
  865                          * Dirty pages need to be paged out, but flushing
  866                          * a page is extremely expensive verses freeing
  867                          * a clean page.  Rather then artificially limiting
  868                          * the number of pages we can flush, we instead give
  869                          * dirty pages extra priority on the inactive queue
  870                          * by forcing them to be cycled through the queue
  871                          * twice before being flushed, after which the
  872                          * (now clean) page will cycle through once more
  873                          * before being freed.  This significantly extends
  874                          * the thrash point for a heavily loaded machine.
  875                          */
  876                         vm_page_flag_set(m, PG_WINATCFLS);
  877                         vm_page_requeue(m);
  878                 } else if (maxlaunder > 0) {
  879                         /*
  880                          * We always want to try to flush some dirty pages if
  881                          * we encounter them, to keep the system stable.
  882                          * Normally this number is small, but under extreme
  883                          * pressure where there are insufficient clean pages
  884                          * on the inactive queue, we may have to go all out.
  885                          */
  886                         int swap_pageouts_ok, vfslocked = 0;
  887                         struct vnode *vp = NULL;
  888                         struct mount *mp = NULL;
  889 
  890                         if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
  891                                 swap_pageouts_ok = 1;
  892                         } else {
  893                                 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
  894                                 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
  895                                 vm_page_count_min());
  896                                                                                 
  897                         }
  898 
  899                         /*
  900                          * We don't bother paging objects that are "dead".  
  901                          * Those objects are in a "rundown" state.
  902                          */
  903                         if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
  904                                 VM_OBJECT_UNLOCK(object);
  905                                 vm_page_requeue(m);
  906                                 continue;
  907                         }
  908 
  909                         /*
  910                          * Following operations may unlock
  911                          * vm_page_queue_mtx, invalidating the 'next'
  912                          * pointer.  To prevent an inordinate number
  913                          * of restarts we use our marker to remember
  914                          * our place.
  915                          *
  916                          */
  917                         TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl,
  918                                            m, &marker, pageq);
  919                         /*
  920                          * The object is already known NOT to be dead.   It
  921                          * is possible for the vget() to block the whole
  922                          * pageout daemon, but the new low-memory handling
  923                          * code should prevent it.
  924                          *
  925                          * The previous code skipped locked vnodes and, worse,
  926                          * reordered pages in the queue.  This results in
  927                          * completely non-deterministic operation and, on a
  928                          * busy system, can lead to extremely non-optimal
  929                          * pageouts.  For example, it can cause clean pages
  930                          * to be freed and dirty pages to be moved to the end
  931                          * of the queue.  Since dirty pages are also moved to
  932                          * the end of the queue once-cleaned, this gives
  933                          * way too large a weighting to defering the freeing
  934                          * of dirty pages.
  935                          *
  936                          * We can't wait forever for the vnode lock, we might
  937                          * deadlock due to a vn_read() getting stuck in
  938                          * vm_wait while holding this vnode.  We skip the 
  939                          * vnode if we can't get it in a reasonable amount
  940                          * of time.
  941                          */
  942                         if (object->type == OBJT_VNODE) {
  943                                 vp = object->handle;
  944                                 if (vp->v_type == VREG &&
  945                                     vn_start_write(vp, &mp, V_NOWAIT) != 0) {
  946                                         mp = NULL;
  947                                         ++pageout_lock_miss;
  948                                         if (object->flags & OBJ_MIGHTBEDIRTY)
  949                                                 vnodes_skipped++;
  950                                         goto unlock_and_continue;
  951                                 }
  952                                 KASSERT(mp != NULL,
  953                                     ("vp %p with NULL v_mount", vp));
  954                                 vm_page_unlock_queues();
  955                                 vm_object_reference_locked(object);
  956                                 VM_OBJECT_UNLOCK(object);
  957                                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  958                                 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
  959                                     curthread)) {
  960                                         VM_OBJECT_LOCK(object);
  961                                         vm_page_lock_queues();
  962                                         ++pageout_lock_miss;
  963                                         if (object->flags & OBJ_MIGHTBEDIRTY)
  964                                                 vnodes_skipped++;
  965                                         vp = NULL;
  966                                         goto unlock_and_continue;
  967                                 }
  968                                 VM_OBJECT_LOCK(object);
  969                                 vm_page_lock_queues();
  970                                 /*
  971                                  * The page might have been moved to another
  972                                  * queue during potential blocking in vget()
  973                                  * above.  The page might have been freed and
  974                                  * reused for another vnode.
  975                                  */
  976                                 if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE ||
  977                                     m->object != object ||
  978                                     TAILQ_NEXT(m, pageq) != &marker) {
  979                                         if (object->flags & OBJ_MIGHTBEDIRTY)
  980                                                 vnodes_skipped++;
  981                                         goto unlock_and_continue;
  982                                 }
  983         
  984                                 /*
  985                                  * The page may have been busied during the
  986                                  * blocking in vget().  We don't move the
  987                                  * page back onto the end of the queue so that
  988                                  * statistics are more correct if we don't.
  989                                  */
  990                                 if (m->busy || (m->oflags & VPO_BUSY)) {
  991                                         goto unlock_and_continue;
  992                                 }
  993 
  994                                 /*
  995                                  * If the page has become held it might
  996                                  * be undergoing I/O, so skip it
  997                                  */
  998                                 if (m->hold_count) {
  999                                         vm_page_requeue(m);
 1000                                         if (object->flags & OBJ_MIGHTBEDIRTY)
 1001                                                 vnodes_skipped++;
 1002                                         goto unlock_and_continue;
 1003                                 }
 1004                         }
 1005 
 1006                         /*
 1007                          * If a page is dirty, then it is either being washed
 1008                          * (but not yet cleaned) or it is still in the
 1009                          * laundry.  If it is still in the laundry, then we
 1010                          * start the cleaning operation. 
 1011                          *
 1012                          * decrement page_shortage on success to account for
 1013                          * the (future) cleaned page.  Otherwise we could wind
 1014                          * up laundering or cleaning too many pages.
 1015                          */
 1016                         if (vm_pageout_clean(m) != 0) {
 1017                                 --page_shortage;
 1018                                 --maxlaunder;
 1019                         }
 1020 unlock_and_continue:
 1021                         VM_OBJECT_UNLOCK(object);
 1022                         if (mp != NULL) {
 1023                                 vm_page_unlock_queues();
 1024                                 if (vp != NULL)
 1025                                         vput(vp);
 1026                                 VFS_UNLOCK_GIANT(vfslocked);
 1027                                 vm_object_deallocate(object);
 1028                                 vn_finished_write(mp);
 1029                                 vm_page_lock_queues();
 1030                         }
 1031                         next = TAILQ_NEXT(&marker, pageq);
 1032                         TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl,
 1033                                      &marker, pageq);
 1034                         continue;
 1035                 }
 1036                 VM_OBJECT_UNLOCK(object);
 1037         }
 1038 
 1039         /*
 1040          * Compute the number of pages we want to try to move from the
 1041          * active queue to the inactive queue.
 1042          */
 1043         page_shortage = vm_paging_target() +
 1044                 cnt.v_inactive_target - cnt.v_inactive_count;
 1045         page_shortage += addl_page_shortage;
 1046 
 1047         /*
 1048          * Scan the active queue for things we can deactivate. We nominally
 1049          * track the per-page activity counter and use it to locate
 1050          * deactivation candidates.
 1051          */
 1052         pcount = cnt.v_active_count;
 1053         m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
 1054 
 1055         while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
 1056 
 1057                 KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
 1058                     ("vm_pageout_scan: page %p isn't active", m));
 1059 
 1060                 next = TAILQ_NEXT(m, pageq);
 1061                 object = m->object;
 1062                 if ((m->flags & PG_MARKER) != 0) {
 1063                         m = next;
 1064                         continue;
 1065                 }
 1066                 if (!VM_OBJECT_TRYLOCK(object) &&
 1067                     !vm_pageout_fallback_object_lock(m, &next)) {
 1068                         VM_OBJECT_UNLOCK(object);
 1069                         m = next;
 1070                         continue;
 1071                 }
 1072 
 1073                 /*
 1074                  * Don't deactivate pages that are busy.
 1075                  */
 1076                 if ((m->busy != 0) ||
 1077                     (m->oflags & VPO_BUSY) ||
 1078                     (m->hold_count != 0)) {
 1079                         VM_OBJECT_UNLOCK(object);
 1080                         vm_page_requeue(m);
 1081                         m = next;
 1082                         continue;
 1083                 }
 1084 
 1085                 /*
 1086                  * The count for pagedaemon pages is done after checking the
 1087                  * page for eligibility...
 1088                  */
 1089                 cnt.v_pdpages++;
 1090 
 1091                 /*
 1092                  * Check to see "how much" the page has been used.
 1093                  */
 1094                 actcount = 0;
 1095                 if (object->ref_count != 0) {
 1096                         if (m->flags & PG_REFERENCED) {
 1097                                 actcount += 1;
 1098                         }
 1099                         actcount += pmap_ts_referenced(m);
 1100                         if (actcount) {
 1101                                 m->act_count += ACT_ADVANCE + actcount;
 1102                                 if (m->act_count > ACT_MAX)
 1103                                         m->act_count = ACT_MAX;
 1104                         }
 1105                 }
 1106 
 1107                 /*
 1108                  * Since we have "tested" this bit, we need to clear it now.
 1109                  */
 1110                 vm_page_flag_clear(m, PG_REFERENCED);
 1111 
 1112                 /*
 1113                  * Only if an object is currently being used, do we use the
 1114                  * page activation count stats.
 1115                  */
 1116                 if (actcount && (object->ref_count != 0)) {
 1117                         vm_page_requeue(m);
 1118                 } else {
 1119                         m->act_count -= min(m->act_count, ACT_DECLINE);
 1120                         if (vm_pageout_algorithm ||
 1121                             object->ref_count == 0 ||
 1122                             m->act_count == 0) {
 1123                                 page_shortage--;
 1124                                 if (object->ref_count == 0) {
 1125                                         pmap_remove_all(m);
 1126                                         if (m->dirty == 0)
 1127                                                 vm_page_cache(m);
 1128                                         else
 1129                                                 vm_page_deactivate(m);
 1130                                 } else {
 1131                                         vm_page_deactivate(m);
 1132                                 }
 1133                         } else {
 1134                                 vm_page_requeue(m);
 1135                         }
 1136                 }
 1137                 VM_OBJECT_UNLOCK(object);
 1138                 m = next;
 1139         }
 1140         vm_page_unlock_queues();
 1141 #if !defined(NO_SWAPPING)
 1142         /*
 1143          * Idle process swapout -- run once per second.
 1144          */
 1145         if (vm_swap_idle_enabled) {
 1146                 static long lsec;
 1147                 if (time_second != lsec) {
 1148                         vm_req_vmdaemon(VM_SWAP_IDLE);
 1149                         lsec = time_second;
 1150                 }
 1151         }
 1152 #endif
 1153                 
 1154         /*
 1155          * If we didn't get enough free pages, and we have skipped a vnode
 1156          * in a writeable object, wakeup the sync daemon.  And kick swapout
 1157          * if we did not get enough free pages.
 1158          */
 1159         if (vm_paging_target() > 0) {
 1160                 if (vnodes_skipped && vm_page_count_min())
 1161                         (void) speedup_syncer();
 1162 #if !defined(NO_SWAPPING)
 1163                 if (vm_swap_enabled && vm_page_count_target())
 1164                         vm_req_vmdaemon(VM_SWAP_NORMAL);
 1165 #endif
 1166         }
 1167 
 1168         /*
 1169          * If we are critically low on one of RAM or swap and low on
 1170          * the other, kill the largest process.  However, we avoid
 1171          * doing this on the first pass in order to give ourselves a
 1172          * chance to flush out dirty vnode-backed pages and to allow
 1173          * active pages to be moved to the inactive queue and reclaimed.
 1174          */
 1175         if (pass != 0 &&
 1176             ((swap_pager_avail < 64 && vm_page_count_min()) ||
 1177              (swap_pager_full && vm_paging_target() > 0)))
 1178                 vm_pageout_oom(VM_OOM_MEM);
 1179 }
 1180 
 1181 
 1182 void
 1183 vm_pageout_oom(int shortage)
 1184 {
 1185         struct proc *p, *bigproc;
 1186         vm_offset_t size, bigsize;
 1187         struct thread *td;
 1188         struct vmspace *vm;
 1189 
 1190         /*
 1191          * We keep the process bigproc locked once we find it to keep anyone
 1192          * from messing with it; however, there is a possibility of
 1193          * deadlock if process B is bigproc and one of it's child processes
 1194          * attempts to propagate a signal to B while we are waiting for A's
 1195          * lock while walking this list.  To avoid this, we don't block on
 1196          * the process lock but just skip a process if it is already locked.
 1197          */
 1198         bigproc = NULL;
 1199         bigsize = 0;
 1200         sx_slock(&allproc_lock);
 1201         FOREACH_PROC_IN_SYSTEM(p) {
 1202                 int breakout;
 1203 
 1204                 if (PROC_TRYLOCK(p) == 0)
 1205                         continue;
 1206                 /*
 1207                  * If this is a system, protected or killed process, skip it.
 1208                  */
 1209                 if ((p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) ||
 1210                     (p->p_pid == 1) || P_KILLED(p) ||
 1211                     ((p->p_pid < 48) && (swap_pager_avail != 0))) {
 1212                         PROC_UNLOCK(p);
 1213                         continue;
 1214                 }
 1215                 /*
 1216                  * If the process is in a non-running type state,
 1217                  * don't touch it.  Check all the threads individually.
 1218                  */
 1219                 breakout = 0;
 1220                 FOREACH_THREAD_IN_PROC(p, td) {
 1221                         thread_lock(td);
 1222                         if (!TD_ON_RUNQ(td) &&
 1223                             !TD_IS_RUNNING(td) &&
 1224                             !TD_IS_SLEEPING(td)) {
 1225                                 thread_unlock(td);
 1226                                 breakout = 1;
 1227                                 break;
 1228                         }
 1229                         thread_unlock(td);
 1230                 }
 1231                 if (breakout) {
 1232                         PROC_UNLOCK(p);
 1233                         continue;
 1234                 }
 1235                 /*
 1236                  * get the process size
 1237                  */
 1238                 vm = vmspace_acquire_ref(p);
 1239                 if (vm == NULL) {
 1240                         PROC_UNLOCK(p);
 1241                         continue;
 1242                 }
 1243                 if (!vm_map_trylock_read(&vm->vm_map)) {
 1244                         vmspace_free(vm);
 1245                         PROC_UNLOCK(p);
 1246                         continue;
 1247                 }
 1248                 size = vmspace_swap_count(vm);
 1249                 vm_map_unlock_read(&vm->vm_map);
 1250                 if (shortage == VM_OOM_MEM)
 1251                         size += vmspace_resident_count(vm);
 1252                 vmspace_free(vm);
 1253                 /*
 1254                  * if the this process is bigger than the biggest one
 1255                  * remember it.
 1256                  */
 1257                 if (size > bigsize) {
 1258                         if (bigproc != NULL)
 1259                                 PROC_UNLOCK(bigproc);
 1260                         bigproc = p;
 1261                         bigsize = size;
 1262                 } else
 1263                         PROC_UNLOCK(p);
 1264         }
 1265         sx_sunlock(&allproc_lock);
 1266         if (bigproc != NULL) {
 1267                 killproc(bigproc, "out of swap space");
 1268                 sched_nice(bigproc, PRIO_MIN);
 1269                 PROC_UNLOCK(bigproc);
 1270                 wakeup(&cnt.v_free_count);
 1271         }
 1272 }
 1273 
 1274 /*
 1275  * This routine tries to maintain the pseudo LRU active queue,
 1276  * so that during long periods of time where there is no paging,
 1277  * that some statistic accumulation still occurs.  This code
 1278  * helps the situation where paging just starts to occur.
 1279  */
 1280 static void
 1281 vm_pageout_page_stats()
 1282 {
 1283         vm_object_t object;
 1284         vm_page_t m,next;
 1285         int pcount,tpcount;             /* Number of pages to check */
 1286         static int fullintervalcount = 0;
 1287         int page_shortage;
 1288 
 1289         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1290         page_shortage = 
 1291             (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
 1292             (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
 1293 
 1294         if (page_shortage <= 0)
 1295                 return;
 1296 
 1297         pcount = cnt.v_active_count;
 1298         fullintervalcount += vm_pageout_stats_interval;
 1299         if (fullintervalcount < vm_pageout_full_stats_interval) {
 1300                 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count /
 1301                     cnt.v_page_count;
 1302                 if (pcount > tpcount)
 1303                         pcount = tpcount;
 1304         } else {
 1305                 fullintervalcount = 0;
 1306         }
 1307 
 1308         m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
 1309         while ((m != NULL) && (pcount-- > 0)) {
 1310                 int actcount;
 1311 
 1312                 KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
 1313                     ("vm_pageout_page_stats: page %p isn't active", m));
 1314 
 1315                 next = TAILQ_NEXT(m, pageq);
 1316                 object = m->object;
 1317 
 1318                 if ((m->flags & PG_MARKER) != 0) {
 1319                         m = next;
 1320                         continue;
 1321                 }
 1322                 if (!VM_OBJECT_TRYLOCK(object) &&
 1323                     !vm_pageout_fallback_object_lock(m, &next)) {
 1324                         VM_OBJECT_UNLOCK(object);
 1325                         m = next;
 1326                         continue;
 1327                 }
 1328 
 1329                 /*
 1330                  * Don't deactivate pages that are busy.
 1331                  */
 1332                 if ((m->busy != 0) ||
 1333                     (m->oflags & VPO_BUSY) ||
 1334                     (m->hold_count != 0)) {
 1335                         VM_OBJECT_UNLOCK(object);
 1336                         vm_page_requeue(m);
 1337                         m = next;
 1338                         continue;
 1339                 }
 1340 
 1341                 actcount = 0;
 1342                 if (m->flags & PG_REFERENCED) {
 1343                         vm_page_flag_clear(m, PG_REFERENCED);
 1344                         actcount += 1;
 1345                 }
 1346 
 1347                 actcount += pmap_ts_referenced(m);
 1348                 if (actcount) {
 1349                         m->act_count += ACT_ADVANCE + actcount;
 1350                         if (m->act_count > ACT_MAX)
 1351                                 m->act_count = ACT_MAX;
 1352                         vm_page_requeue(m);
 1353                 } else {
 1354                         if (m->act_count == 0) {
 1355                                 /*
 1356                                  * We turn off page access, so that we have
 1357                                  * more accurate RSS stats.  We don't do this
 1358                                  * in the normal page deactivation when the
 1359                                  * system is loaded VM wise, because the
 1360                                  * cost of the large number of page protect
 1361                                  * operations would be higher than the value
 1362                                  * of doing the operation.
 1363                                  */
 1364                                 pmap_remove_all(m);
 1365                                 vm_page_deactivate(m);
 1366                         } else {
 1367                                 m->act_count -= min(m->act_count, ACT_DECLINE);
 1368                                 vm_page_requeue(m);
 1369                         }
 1370                 }
 1371                 VM_OBJECT_UNLOCK(object);
 1372                 m = next;
 1373         }
 1374 }
 1375 
 1376 /*
 1377  *      vm_pageout is the high level pageout daemon.
 1378  */
 1379 static void
 1380 vm_pageout()
 1381 {
 1382         int error, pass;
 1383 
 1384         /*
 1385          * Initialize some paging parameters.
 1386          */
 1387         cnt.v_interrupt_free_min = 2;
 1388         if (cnt.v_page_count < 2000)
 1389                 vm_pageout_page_count = 8;
 1390 
 1391         /*
 1392          * v_free_reserved needs to include enough for the largest
 1393          * swap pager structures plus enough for any pv_entry structs
 1394          * when paging. 
 1395          */
 1396         if (cnt.v_page_count > 1024)
 1397                 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
 1398         else
 1399                 cnt.v_free_min = 4;
 1400         cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
 1401             cnt.v_interrupt_free_min;
 1402         cnt.v_free_reserved = vm_pageout_page_count +
 1403             cnt.v_pageout_free_min + (cnt.v_page_count / 768);
 1404         cnt.v_free_severe = cnt.v_free_min / 2;
 1405         cnt.v_free_min += cnt.v_free_reserved;
 1406         cnt.v_free_severe += cnt.v_free_reserved;
 1407 
 1408         /*
 1409          * v_free_target and v_cache_min control pageout hysteresis.  Note
 1410          * that these are more a measure of the VM cache queue hysteresis
 1411          * then the VM free queue.  Specifically, v_free_target is the
 1412          * high water mark (free+cache pages).
 1413          *
 1414          * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
 1415          * low water mark, while v_free_min is the stop.  v_cache_min must
 1416          * be big enough to handle memory needs while the pageout daemon
 1417          * is signalled and run to free more pages.
 1418          */
 1419         if (cnt.v_free_count > 6144)
 1420                 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
 1421         else
 1422                 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
 1423 
 1424         if (cnt.v_free_count > 2048) {
 1425                 cnt.v_cache_min = cnt.v_free_target;
 1426                 cnt.v_cache_max = 2 * cnt.v_cache_min;
 1427                 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
 1428         } else {
 1429                 cnt.v_cache_min = 0;
 1430                 cnt.v_cache_max = 0;
 1431                 cnt.v_inactive_target = cnt.v_free_count / 4;
 1432         }
 1433         if (cnt.v_inactive_target > cnt.v_free_count / 3)
 1434                 cnt.v_inactive_target = cnt.v_free_count / 3;
 1435 
 1436         /* XXX does not really belong here */
 1437         if (vm_page_max_wired == 0)
 1438                 vm_page_max_wired = cnt.v_free_count / 3;
 1439 
 1440         if (vm_pageout_stats_max == 0)
 1441                 vm_pageout_stats_max = cnt.v_free_target;
 1442 
 1443         /*
 1444          * Set interval in seconds for stats scan.
 1445          */
 1446         if (vm_pageout_stats_interval == 0)
 1447                 vm_pageout_stats_interval = 5;
 1448         if (vm_pageout_full_stats_interval == 0)
 1449                 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
 1450 
 1451         swap_pager_swap_init();
 1452         pass = 0;
 1453         /*
 1454          * The pageout daemon is never done, so loop forever.
 1455          */
 1456         while (TRUE) {
 1457                 /*
 1458                  * If we have enough free memory, wakeup waiters.  Do
 1459                  * not clear vm_pages_needed until we reach our target,
 1460                  * otherwise we may be woken up over and over again and
 1461                  * waste a lot of cpu.
 1462                  */
 1463                 mtx_lock(&vm_page_queue_free_mtx);
 1464                 if (vm_pages_needed && !vm_page_count_min()) {
 1465                         if (!vm_paging_needed())
 1466                                 vm_pages_needed = 0;
 1467                         wakeup(&cnt.v_free_count);
 1468                 }
 1469                 if (vm_pages_needed) {
 1470                         /*
 1471                          * Still not done, take a second pass without waiting
 1472                          * (unlimited dirty cleaning), otherwise sleep a bit
 1473                          * and try again.
 1474                          */
 1475                         ++pass;
 1476                         if (pass > 1)
 1477                                 msleep(&vm_pages_needed,
 1478                                     &vm_page_queue_free_mtx, PVM, "psleep",
 1479                                     hz / 2);
 1480                 } else {
 1481                         /*
 1482                          * Good enough, sleep & handle stats.  Prime the pass
 1483                          * for the next run.
 1484                          */
 1485                         if (pass > 1)
 1486                                 pass = 1;
 1487                         else
 1488                                 pass = 0;
 1489                         error = msleep(&vm_pages_needed,
 1490                             &vm_page_queue_free_mtx, PVM, "psleep",
 1491                             vm_pageout_stats_interval * hz);
 1492                         if (error && !vm_pages_needed) {
 1493                                 mtx_unlock(&vm_page_queue_free_mtx);
 1494                                 pass = 0;
 1495                                 vm_page_lock_queues();
 1496                                 vm_pageout_page_stats();
 1497                                 vm_page_unlock_queues();
 1498                                 continue;
 1499                         }
 1500                 }
 1501                 if (vm_pages_needed)
 1502                         cnt.v_pdwakeups++;
 1503                 mtx_unlock(&vm_page_queue_free_mtx);
 1504                 vm_pageout_scan(pass);
 1505         }
 1506 }
 1507 
 1508 /*
 1509  * Unless the free page queue lock is held by the caller, this function
 1510  * should be regarded as advisory.  Specifically, the caller should
 1511  * not msleep() on &cnt.v_free_count following this function unless
 1512  * the free page queue lock is held until the msleep() is performed.
 1513  */
 1514 void
 1515 pagedaemon_wakeup()
 1516 {
 1517 
 1518         if (!vm_pages_needed && curthread->td_proc != pageproc) {
 1519                 vm_pages_needed = 1;
 1520                 wakeup(&vm_pages_needed);
 1521         }
 1522 }
 1523 
 1524 #if !defined(NO_SWAPPING)
 1525 static void
 1526 vm_req_vmdaemon(int req)
 1527 {
 1528         static int lastrun = 0;
 1529 
 1530         mtx_lock(&vm_daemon_mtx);
 1531         vm_pageout_req_swapout |= req;
 1532         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
 1533                 wakeup(&vm_daemon_needed);
 1534                 lastrun = ticks;
 1535         }
 1536         mtx_unlock(&vm_daemon_mtx);
 1537 }
 1538 
 1539 static void
 1540 vm_daemon()
 1541 {
 1542         struct rlimit rsslim;
 1543         struct proc *p;
 1544         struct thread *td;
 1545         struct vmspace *vm;
 1546         int breakout, swapout_flags;
 1547 
 1548         while (TRUE) {
 1549                 mtx_lock(&vm_daemon_mtx);
 1550                 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0);
 1551                 swapout_flags = vm_pageout_req_swapout;
 1552                 vm_pageout_req_swapout = 0;
 1553                 mtx_unlock(&vm_daemon_mtx);
 1554                 if (swapout_flags)
 1555                         swapout_procs(swapout_flags);
 1556 
 1557                 /*
 1558                  * scan the processes for exceeding their rlimits or if
 1559                  * process is swapped out -- deactivate pages
 1560                  */
 1561                 sx_slock(&allproc_lock);
 1562                 FOREACH_PROC_IN_SYSTEM(p) {
 1563                         vm_pindex_t limit, size;
 1564 
 1565                         /*
 1566                          * if this is a system process or if we have already
 1567                          * looked at this process, skip it.
 1568                          */
 1569                         PROC_LOCK(p);
 1570                         if (p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
 1571                                 PROC_UNLOCK(p);
 1572                                 continue;
 1573                         }
 1574                         /*
 1575                          * if the process is in a non-running type state,
 1576                          * don't touch it.
 1577                          */
 1578                         breakout = 0;
 1579                         FOREACH_THREAD_IN_PROC(p, td) {
 1580                                 thread_lock(td);
 1581                                 if (!TD_ON_RUNQ(td) &&
 1582                                     !TD_IS_RUNNING(td) &&
 1583                                     !TD_IS_SLEEPING(td)) {
 1584                                         thread_unlock(td);
 1585                                         breakout = 1;
 1586                                         break;
 1587                                 }
 1588                                 thread_unlock(td);
 1589                         }
 1590                         if (breakout) {
 1591                                 PROC_UNLOCK(p);
 1592                                 continue;
 1593                         }
 1594                         /*
 1595                          * get a limit
 1596                          */
 1597                         lim_rlimit(p, RLIMIT_RSS, &rsslim);
 1598                         limit = OFF_TO_IDX(
 1599                             qmin(rsslim.rlim_cur, rsslim.rlim_max));
 1600 
 1601                         /*
 1602                          * let processes that are swapped out really be
 1603                          * swapped out set the limit to nothing (will force a
 1604                          * swap-out.)
 1605                          */
 1606                         if ((p->p_flag & P_INMEM) == 0)
 1607                                 limit = 0;      /* XXX */
 1608                         vm = vmspace_acquire_ref(p);
 1609                         PROC_UNLOCK(p);
 1610                         if (vm == NULL)
 1611                                 continue;
 1612 
 1613                         size = vmspace_resident_count(vm);
 1614                         if (limit >= 0 && size >= limit) {
 1615                                 vm_pageout_map_deactivate_pages(
 1616                                     &vm->vm_map, limit);
 1617                         }
 1618                         vmspace_free(vm);
 1619                 }
 1620                 sx_sunlock(&allproc_lock);
 1621         }
 1622 }
 1623 #endif                  /* !defined(NO_SWAPPING) */

Cache object: 62a74898d4d24efb68af98e0c2e8ec37


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.