The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_swapout.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
    3  *
    4  * Copyright (c) 1991 Regents of the University of California.
    5  * All rights reserved.
    6  * Copyright (c) 1994 John S. Dyson
    7  * All rights reserved.
    8  * Copyright (c) 1994 David Greenman
    9  * All rights reserved.
   10  * Copyright (c) 2005 Yahoo! Technologies Norway AS
   11  * All rights reserved.
   12  *
   13  * This code is derived from software contributed to Berkeley by
   14  * The Mach Operating System project at Carnegie-Mellon University.
   15  *
   16  * Redistribution and use in source and binary forms, with or without
   17  * modification, are permitted provided that the following conditions
   18  * are met:
   19  * 1. Redistributions of source code must retain the above copyright
   20  *    notice, this list of conditions and the following disclaimer.
   21  * 2. Redistributions in binary form must reproduce the above copyright
   22  *    notice, this list of conditions and the following disclaimer in the
   23  *    documentation and/or other materials provided with the distribution.
   24  * 3. All advertising materials mentioning features or use of this software
   25  *    must display the following acknowledgement:
   26  *      This product includes software developed by the University of
   27  *      California, Berkeley and its contributors.
   28  * 4. Neither the name of the University nor the names of its contributors
   29  *    may be used to endorse or promote products derived from this software
   30  *    without specific prior written permission.
   31  *
   32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   42  * SUCH DAMAGE.
   43  *
   44  *      from: @(#)vm_pageout.c  7.4 (Berkeley) 5/7/91
   45  *
   46  *
   47  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   48  * All rights reserved.
   49  *
   50  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   51  *
   52  * Permission to use, copy, modify and distribute this software and
   53  * its documentation is hereby granted, provided that both the copyright
   54  * notice and this permission notice appear in all copies of the
   55  * software, derivative works or modified versions, and any portions
   56  * thereof, and that both notices appear in supporting documentation.
   57  *
   58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   61  *
   62  * Carnegie Mellon requests users of this software to return to
   63  *
   64  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   65  *  School of Computer Science
   66  *  Carnegie Mellon University
   67  *  Pittsburgh PA 15213-3890
   68  *
   69  * any improvements or extensions that they make and grant Carnegie the
   70  * rights to redistribute these changes.
   71  */
   72 
   73 #include <sys/cdefs.h>
   74 __FBSDID("$FreeBSD: releng/12.0/sys/vm/vm_swapout.c 339968 2018-10-31 19:28:05Z markj $");
   75 
   76 #include "opt_kstack_pages.h"
   77 #include "opt_kstack_max_pages.h"
   78 #include "opt_vm.h"
   79 
   80 #include <sys/param.h>
   81 #include <sys/systm.h>
   82 #include <sys/limits.h>
   83 #include <sys/kernel.h>
   84 #include <sys/eventhandler.h>
   85 #include <sys/lock.h>
   86 #include <sys/mutex.h>
   87 #include <sys/proc.h>
   88 #include <sys/_kstack_cache.h>
   89 #include <sys/kthread.h>
   90 #include <sys/ktr.h>
   91 #include <sys/mount.h>
   92 #include <sys/racct.h>
   93 #include <sys/resourcevar.h>
   94 #include <sys/sched.h>
   95 #include <sys/sdt.h>
   96 #include <sys/signalvar.h>
   97 #include <sys/smp.h>
   98 #include <sys/time.h>
   99 #include <sys/vnode.h>
  100 #include <sys/vmmeter.h>
  101 #include <sys/rwlock.h>
  102 #include <sys/sx.h>
  103 #include <sys/sysctl.h>
  104 
  105 #include <vm/vm.h>
  106 #include <vm/vm_param.h>
  107 #include <vm/vm_object.h>
  108 #include <vm/vm_page.h>
  109 #include <vm/vm_map.h>
  110 #include <vm/vm_pageout.h>
  111 #include <vm/vm_pager.h>
  112 #include <vm/vm_phys.h>
  113 #include <vm/swap_pager.h>
  114 #include <vm/vm_extern.h>
  115 #include <vm/uma.h>
  116 
  117 /* the kernel process "vm_daemon" */
  118 static void vm_daemon(void);
  119 static struct proc *vmproc;
  120 
  121 static struct kproc_desc vm_kp = {
  122         "vmdaemon",
  123         vm_daemon,
  124         &vmproc
  125 };
  126 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
  127 
  128 static int vm_swap_enabled = 1;
  129 static int vm_swap_idle_enabled = 0;
  130 
  131 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW,
  132     &vm_swap_enabled, 0,
  133     "Enable entire process swapout");
  134 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, CTLFLAG_RW,
  135     &vm_swap_idle_enabled, 0,
  136     "Allow swapout on idle criteria");
  137 
  138 /*
  139  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  140  */
  141 static int swap_idle_threshold1 = 2;
  142 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  143     &swap_idle_threshold1, 0,
  144     "Guaranteed swapped in time for a process");
  145 
  146 /*
  147  * Swap_idle_threshold2 is the time that a process can be idle before
  148  * it will be swapped out, if idle swapping is enabled.
  149  */
  150 static int swap_idle_threshold2 = 10;
  151 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  152     &swap_idle_threshold2, 0,
  153     "Time before a process will be swapped out");
  154 
  155 static int vm_pageout_req_swapout;      /* XXX */
  156 static int vm_daemon_needed;
  157 static struct mtx vm_daemon_mtx;
  158 /* Allow for use by vm_pageout before vm_daemon is initialized. */
  159 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
  160 
  161 static int swapped_cnt;
  162 static int swap_inprogress;     /* Pending swap-ins done outside swapper. */
  163 static int last_swapin;
  164 
  165 static void swapclear(struct proc *);
  166 static int swapout(struct proc *);
  167 static void vm_swapout_map_deactivate_pages(vm_map_t, long);
  168 static void vm_swapout_object_deactivate_pages(pmap_t, vm_object_t, long);
  169 static void swapout_procs(int action);
  170 static void vm_req_vmdaemon(int req);
  171 static void vm_thread_swapout(struct thread *td);
  172 
  173 /*
  174  *      vm_swapout_object_deactivate_pages
  175  *
  176  *      Deactivate enough pages to satisfy the inactive target
  177  *      requirements.
  178  *
  179  *      The object and map must be locked.
  180  */
  181 static void
  182 vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
  183     long desired)
  184 {
  185         vm_object_t backing_object, object;
  186         vm_page_t p;
  187         int act_delta, remove_mode;
  188 
  189         VM_OBJECT_ASSERT_LOCKED(first_object);
  190         if ((first_object->flags & OBJ_FICTITIOUS) != 0)
  191                 return;
  192         for (object = first_object;; object = backing_object) {
  193                 if (pmap_resident_count(pmap) <= desired)
  194                         goto unlock_return;
  195                 VM_OBJECT_ASSERT_LOCKED(object);
  196                 if ((object->flags & OBJ_UNMANAGED) != 0 ||
  197                     object->paging_in_progress != 0)
  198                         goto unlock_return;
  199 
  200                 remove_mode = 0;
  201                 if (object->shadow_count > 1)
  202                         remove_mode = 1;
  203                 /*
  204                  * Scan the object's entire memory queue.
  205                  */
  206                 TAILQ_FOREACH(p, &object->memq, listq) {
  207                         if (pmap_resident_count(pmap) <= desired)
  208                                 goto unlock_return;
  209                         if (should_yield())
  210                                 goto unlock_return;
  211                         if (vm_page_busied(p))
  212                                 continue;
  213                         VM_CNT_INC(v_pdpages);
  214                         vm_page_lock(p);
  215                         if (vm_page_held(p) ||
  216                             !pmap_page_exists_quick(pmap, p)) {
  217                                 vm_page_unlock(p);
  218                                 continue;
  219                         }
  220                         act_delta = pmap_ts_referenced(p);
  221                         if ((p->aflags & PGA_REFERENCED) != 0) {
  222                                 if (act_delta == 0)
  223                                         act_delta = 1;
  224                                 vm_page_aflag_clear(p, PGA_REFERENCED);
  225                         }
  226                         if (!vm_page_active(p) && act_delta != 0) {
  227                                 vm_page_activate(p);
  228                                 p->act_count += act_delta;
  229                         } else if (vm_page_active(p)) {
  230                                 if (act_delta == 0) {
  231                                         p->act_count -= min(p->act_count,
  232                                             ACT_DECLINE);
  233                                         if (!remove_mode && p->act_count == 0) {
  234                                                 pmap_remove_all(p);
  235                                                 vm_page_deactivate(p);
  236                                         } else
  237                                                 vm_page_requeue(p);
  238                                 } else {
  239                                         vm_page_activate(p);
  240                                         if (p->act_count < ACT_MAX -
  241                                             ACT_ADVANCE)
  242                                                 p->act_count += ACT_ADVANCE;
  243                                         vm_page_requeue(p);
  244                                 }
  245                         } else if (vm_page_inactive(p))
  246                                 pmap_remove_all(p);
  247                         vm_page_unlock(p);
  248                 }
  249                 if ((backing_object = object->backing_object) == NULL)
  250                         goto unlock_return;
  251                 VM_OBJECT_RLOCK(backing_object);
  252                 if (object != first_object)
  253                         VM_OBJECT_RUNLOCK(object);
  254         }
  255 unlock_return:
  256         if (object != first_object)
  257                 VM_OBJECT_RUNLOCK(object);
  258 }
  259 
  260 /*
  261  * deactivate some number of pages in a map, try to do it fairly, but
  262  * that is really hard to do.
  263  */
  264 static void
  265 vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
  266 {
  267         vm_map_entry_t tmpe;
  268         vm_object_t obj, bigobj;
  269         int nothingwired;
  270 
  271         if (!vm_map_trylock_read(map))
  272                 return;
  273 
  274         bigobj = NULL;
  275         nothingwired = TRUE;
  276 
  277         /*
  278          * first, search out the biggest object, and try to free pages from
  279          * that.
  280          */
  281         tmpe = map->header.next;
  282         while (tmpe != &map->header) {
  283                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
  284                         obj = tmpe->object.vm_object;
  285                         if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
  286                                 if (obj->shadow_count <= 1 &&
  287                                     (bigobj == NULL ||
  288                                      bigobj->resident_page_count <
  289                                      obj->resident_page_count)) {
  290                                         if (bigobj != NULL)
  291                                                 VM_OBJECT_RUNLOCK(bigobj);
  292                                         bigobj = obj;
  293                                 } else
  294                                         VM_OBJECT_RUNLOCK(obj);
  295                         }
  296                 }
  297                 if (tmpe->wired_count > 0)
  298                         nothingwired = FALSE;
  299                 tmpe = tmpe->next;
  300         }
  301 
  302         if (bigobj != NULL) {
  303                 vm_swapout_object_deactivate_pages(map->pmap, bigobj, desired);
  304                 VM_OBJECT_RUNLOCK(bigobj);
  305         }
  306         /*
  307          * Next, hunt around for other pages to deactivate.  We actually
  308          * do this search sort of wrong -- .text first is not the best idea.
  309          */
  310         tmpe = map->header.next;
  311         while (tmpe != &map->header) {
  312                 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
  313                         break;
  314                 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
  315                         obj = tmpe->object.vm_object;
  316                         if (obj != NULL) {
  317                                 VM_OBJECT_RLOCK(obj);
  318                                 vm_swapout_object_deactivate_pages(map->pmap,
  319                                     obj, desired);
  320                                 VM_OBJECT_RUNLOCK(obj);
  321                         }
  322                 }
  323                 tmpe = tmpe->next;
  324         }
  325 
  326         /*
  327          * Remove all mappings if a process is swapped out, this will free page
  328          * table pages.
  329          */
  330         if (desired == 0 && nothingwired) {
  331                 pmap_remove(vm_map_pmap(map), vm_map_min(map),
  332                     vm_map_max(map));
  333         }
  334 
  335         vm_map_unlock_read(map);
  336 }
  337 
  338 /*
  339  * Swap out requests
  340  */
  341 #define VM_SWAP_NORMAL 1
  342 #define VM_SWAP_IDLE 2
  343 
  344 void
  345 vm_swapout_run(void)
  346 {
  347 
  348         if (vm_swap_enabled)
  349                 vm_req_vmdaemon(VM_SWAP_NORMAL);
  350 }
  351 
  352 /*
  353  * Idle process swapout -- run once per second when pagedaemons are
  354  * reclaiming pages.
  355  */
  356 void
  357 vm_swapout_run_idle(void)
  358 {
  359         static long lsec;
  360 
  361         if (!vm_swap_idle_enabled || time_second == lsec)
  362                 return;
  363         vm_req_vmdaemon(VM_SWAP_IDLE);
  364         lsec = time_second;
  365 }
  366 
  367 static void
  368 vm_req_vmdaemon(int req)
  369 {
  370         static int lastrun = 0;
  371 
  372         mtx_lock(&vm_daemon_mtx);
  373         vm_pageout_req_swapout |= req;
  374         if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
  375                 wakeup(&vm_daemon_needed);
  376                 lastrun = ticks;
  377         }
  378         mtx_unlock(&vm_daemon_mtx);
  379 }
  380 
  381 static void
  382 vm_daemon(void)
  383 {
  384         struct rlimit rsslim;
  385         struct proc *p;
  386         struct thread *td;
  387         struct vmspace *vm;
  388         int breakout, swapout_flags, tryagain, attempts;
  389 #ifdef RACCT
  390         uint64_t rsize, ravailable;
  391 #endif
  392 
  393         while (TRUE) {
  394                 mtx_lock(&vm_daemon_mtx);
  395                 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
  396 #ifdef RACCT
  397                     racct_enable ? hz : 0
  398 #else
  399                     0
  400 #endif
  401                 );
  402                 swapout_flags = vm_pageout_req_swapout;
  403                 vm_pageout_req_swapout = 0;
  404                 mtx_unlock(&vm_daemon_mtx);
  405                 if (swapout_flags != 0) {
  406                         /*
  407                          * Drain the per-CPU page queue batches as a deadlock
  408                          * avoidance measure.
  409                          */
  410                         if ((swapout_flags & VM_SWAP_NORMAL) != 0)
  411                                 vm_page_drain_pqbatch();
  412                         swapout_procs(swapout_flags);
  413                 }
  414 
  415                 /*
  416                  * scan the processes for exceeding their rlimits or if
  417                  * process is swapped out -- deactivate pages
  418                  */
  419                 tryagain = 0;
  420                 attempts = 0;
  421 again:
  422                 attempts++;
  423                 sx_slock(&allproc_lock);
  424                 FOREACH_PROC_IN_SYSTEM(p) {
  425                         vm_pindex_t limit, size;
  426 
  427                         /*
  428                          * if this is a system process or if we have already
  429                          * looked at this process, skip it.
  430                          */
  431                         PROC_LOCK(p);
  432                         if (p->p_state != PRS_NORMAL ||
  433                             p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
  434                                 PROC_UNLOCK(p);
  435                                 continue;
  436                         }
  437                         /*
  438                          * if the process is in a non-running type state,
  439                          * don't touch it.
  440                          */
  441                         breakout = 0;
  442                         FOREACH_THREAD_IN_PROC(p, td) {
  443                                 thread_lock(td);
  444                                 if (!TD_ON_RUNQ(td) &&
  445                                     !TD_IS_RUNNING(td) &&
  446                                     !TD_IS_SLEEPING(td) &&
  447                                     !TD_IS_SUSPENDED(td)) {
  448                                         thread_unlock(td);
  449                                         breakout = 1;
  450                                         break;
  451                                 }
  452                                 thread_unlock(td);
  453                         }
  454                         if (breakout) {
  455                                 PROC_UNLOCK(p);
  456                                 continue;
  457                         }
  458                         /*
  459                          * get a limit
  460                          */
  461                         lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
  462                         limit = OFF_TO_IDX(
  463                             qmin(rsslim.rlim_cur, rsslim.rlim_max));
  464 
  465                         /*
  466                          * let processes that are swapped out really be
  467                          * swapped out set the limit to nothing (will force a
  468                          * swap-out.)
  469                          */
  470                         if ((p->p_flag & P_INMEM) == 0)
  471                                 limit = 0;      /* XXX */
  472                         vm = vmspace_acquire_ref(p);
  473                         _PHOLD_LITE(p);
  474                         PROC_UNLOCK(p);
  475                         if (vm == NULL) {
  476                                 PRELE(p);
  477                                 continue;
  478                         }
  479                         sx_sunlock(&allproc_lock);
  480 
  481                         size = vmspace_resident_count(vm);
  482                         if (size >= limit) {
  483                                 vm_swapout_map_deactivate_pages(
  484                                     &vm->vm_map, limit);
  485                                 size = vmspace_resident_count(vm);
  486                         }
  487 #ifdef RACCT
  488                         if (racct_enable) {
  489                                 rsize = IDX_TO_OFF(size);
  490                                 PROC_LOCK(p);
  491                                 if (p->p_state == PRS_NORMAL)
  492                                         racct_set(p, RACCT_RSS, rsize);
  493                                 ravailable = racct_get_available(p, RACCT_RSS);
  494                                 PROC_UNLOCK(p);
  495                                 if (rsize > ravailable) {
  496                                         /*
  497                                          * Don't be overly aggressive; this
  498                                          * might be an innocent process,
  499                                          * and the limit could've been exceeded
  500                                          * by some memory hog.  Don't try
  501                                          * to deactivate more than 1/4th
  502                                          * of process' resident set size.
  503                                          */
  504                                         if (attempts <= 8) {
  505                                                 if (ravailable < rsize -
  506                                                     (rsize / 4)) {
  507                                                         ravailable = rsize -
  508                                                             (rsize / 4);
  509                                                 }
  510                                         }
  511                                         vm_swapout_map_deactivate_pages(
  512                                             &vm->vm_map,
  513                                             OFF_TO_IDX(ravailable));
  514                                         /* Update RSS usage after paging out. */
  515                                         size = vmspace_resident_count(vm);
  516                                         rsize = IDX_TO_OFF(size);
  517                                         PROC_LOCK(p);
  518                                         if (p->p_state == PRS_NORMAL)
  519                                                 racct_set(p, RACCT_RSS, rsize);
  520                                         PROC_UNLOCK(p);
  521                                         if (rsize > ravailable)
  522                                                 tryagain = 1;
  523                                 }
  524                         }
  525 #endif
  526                         vmspace_free(vm);
  527                         sx_slock(&allproc_lock);
  528                         PRELE(p);
  529                 }
  530                 sx_sunlock(&allproc_lock);
  531                 if (tryagain != 0 && attempts <= 10) {
  532                         maybe_yield();
  533                         goto again;
  534                 }
  535         }
  536 }
  537 
  538 /*
  539  * Allow a thread's kernel stack to be paged out.
  540  */
  541 static void
  542 vm_thread_swapout(struct thread *td)
  543 {
  544         vm_object_t ksobj;
  545         vm_page_t m;
  546         int i, pages;
  547 
  548         cpu_thread_swapout(td);
  549         pages = td->td_kstack_pages;
  550         ksobj = td->td_kstack_obj;
  551         pmap_qremove(td->td_kstack, pages);
  552         VM_OBJECT_WLOCK(ksobj);
  553         for (i = 0; i < pages; i++) {
  554                 m = vm_page_lookup(ksobj, i);
  555                 if (m == NULL)
  556                         panic("vm_thread_swapout: kstack already missing?");
  557                 vm_page_dirty(m);
  558                 vm_page_lock(m);
  559                 vm_page_unwire(m, PQ_LAUNDRY);
  560                 vm_page_unlock(m);
  561         }
  562         VM_OBJECT_WUNLOCK(ksobj);
  563 }
  564 
  565 /*
  566  * Bring the kernel stack for a specified thread back in.
  567  */
  568 static void
  569 vm_thread_swapin(struct thread *td, int oom_alloc)
  570 {
  571         vm_object_t ksobj;
  572         vm_page_t ma[KSTACK_MAX_PAGES];
  573         int a, count, i, j, pages, rv;
  574 
  575         pages = td->td_kstack_pages;
  576         ksobj = td->td_kstack_obj;
  577         VM_OBJECT_WLOCK(ksobj);
  578         (void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma,
  579             pages);
  580         for (i = 0; i < pages;) {
  581                 vm_page_assert_xbusied(ma[i]);
  582                 if (ma[i]->valid == VM_PAGE_BITS_ALL) {
  583                         vm_page_xunbusy(ma[i]);
  584                         i++;
  585                         continue;
  586                 }
  587                 vm_object_pip_add(ksobj, 1);
  588                 for (j = i + 1; j < pages; j++)
  589                         if (ma[j]->valid == VM_PAGE_BITS_ALL)
  590                                 break;
  591                 rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a);
  592                 KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
  593                 count = min(a + 1, j - i);
  594                 rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL);
  595                 KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
  596                     __func__, td->td_proc->p_pid));
  597                 vm_object_pip_wakeup(ksobj);
  598                 for (j = i; j < i + count; j++)
  599                         vm_page_xunbusy(ma[j]);
  600                 i += count;
  601         }
  602         VM_OBJECT_WUNLOCK(ksobj);
  603         pmap_qenter(td->td_kstack, ma, pages);
  604         cpu_thread_swapin(td);
  605 }
  606 
  607 void
  608 faultin(struct proc *p)
  609 {
  610         struct thread *td;
  611         int oom_alloc;
  612 
  613         PROC_LOCK_ASSERT(p, MA_OWNED);
  614 
  615         /*
  616          * If another process is swapping in this process,
  617          * just wait until it finishes.
  618          */
  619         if (p->p_flag & P_SWAPPINGIN) {
  620                 while (p->p_flag & P_SWAPPINGIN)
  621                         msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
  622                 return;
  623         }
  624 
  625         if ((p->p_flag & P_INMEM) == 0) {
  626                 oom_alloc = (p->p_flag & P_WKILLED) != 0 ? VM_ALLOC_SYSTEM :
  627                     VM_ALLOC_NORMAL;
  628 
  629                 /*
  630                  * Don't let another thread swap process p out while we are
  631                  * busy swapping it in.
  632                  */
  633                 ++p->p_lock;
  634                 p->p_flag |= P_SWAPPINGIN;
  635                 PROC_UNLOCK(p);
  636                 sx_xlock(&allproc_lock);
  637                 MPASS(swapped_cnt > 0);
  638                 swapped_cnt--;
  639                 if (curthread != &thread0)
  640                         swap_inprogress++;
  641                 sx_xunlock(&allproc_lock);
  642 
  643                 /*
  644                  * We hold no lock here because the list of threads
  645                  * can not change while all threads in the process are
  646                  * swapped out.
  647                  */
  648                 FOREACH_THREAD_IN_PROC(p, td)
  649                         vm_thread_swapin(td, oom_alloc);
  650 
  651                 if (curthread != &thread0) {
  652                         sx_xlock(&allproc_lock);
  653                         MPASS(swap_inprogress > 0);
  654                         swap_inprogress--;
  655                         last_swapin = ticks;
  656                         sx_xunlock(&allproc_lock);
  657                 }
  658                 PROC_LOCK(p);
  659                 swapclear(p);
  660                 p->p_swtick = ticks;
  661 
  662                 /* Allow other threads to swap p out now. */
  663                 wakeup(&p->p_flag);
  664                 --p->p_lock;
  665         }
  666 }
  667 
  668 /*
  669  * This swapin algorithm attempts to swap-in processes only if there
  670  * is enough space for them.  Of course, if a process waits for a long
  671  * time, it will be swapped in anyway.
  672  */
  673 
  674 static struct proc *
  675 swapper_selector(bool wkilled_only)
  676 {
  677         struct proc *p, *res;
  678         struct thread *td;
  679         int ppri, pri, slptime, swtime;
  680 
  681         sx_assert(&allproc_lock, SA_SLOCKED);
  682         if (swapped_cnt == 0)
  683                 return (NULL);
  684         res = NULL;
  685         ppri = INT_MIN;
  686         FOREACH_PROC_IN_SYSTEM(p) {
  687                 PROC_LOCK(p);
  688                 if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT |
  689                     P_SWAPPINGIN | P_INMEM)) != 0) {
  690                         PROC_UNLOCK(p);
  691                         continue;
  692                 }
  693                 if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) {
  694                         /*
  695                          * A swapped-out process might have mapped a
  696                          * large portion of the system's pages as
  697                          * anonymous memory.  There is no other way to
  698                          * release the memory other than to kill the
  699                          * process, for which we need to swap it in.
  700                          */
  701                         return (p);
  702                 }
  703                 if (wkilled_only) {
  704                         PROC_UNLOCK(p);
  705                         continue;
  706                 }
  707                 swtime = (ticks - p->p_swtick) / hz;
  708                 FOREACH_THREAD_IN_PROC(p, td) {
  709                         /*
  710                          * An otherwise runnable thread of a process
  711                          * swapped out has only the TDI_SWAPPED bit set.
  712                          */
  713                         thread_lock(td);
  714                         if (td->td_inhibitors == TDI_SWAPPED) {
  715                                 slptime = (ticks - td->td_slptick) / hz;
  716                                 pri = swtime + slptime;
  717                                 if ((td->td_flags & TDF_SWAPINREQ) == 0)
  718                                         pri -= p->p_nice * 8;
  719                                 /*
  720                                  * if this thread is higher priority
  721                                  * and there is enough space, then select
  722                                  * this process instead of the previous
  723                                  * selection.
  724                                  */
  725                                 if (pri > ppri) {
  726                                         res = p;
  727                                         ppri = pri;
  728                                 }
  729                         }
  730                         thread_unlock(td);
  731                 }
  732                 PROC_UNLOCK(p);
  733         }
  734 
  735         if (res != NULL)
  736                 PROC_LOCK(res);
  737         return (res);
  738 }
  739 
  740 #define SWAPIN_INTERVAL (MAXSLP * hz / 2)
  741 
  742 /*
  743  * Limit swapper to swap in one non-WKILLED process in MAXSLP/2
  744  * interval, assuming that there is:
  745  * - there exists at least one domain that is not suffering from a shortage of
  746  *   free memory;
  747  * - no parallel swap-ins;
  748  * - no other swap-ins in the current SWAPIN_INTERVAL.
  749  */
  750 static bool
  751 swapper_wkilled_only(void)
  752 {
  753 
  754         return (vm_page_count_min_set(&all_domains) || swap_inprogress > 0 ||
  755             (u_int)(ticks - last_swapin) < SWAPIN_INTERVAL);
  756 }
  757 
  758 void
  759 swapper(void)
  760 {
  761         struct proc *p;
  762 
  763         for (;;) {
  764                 sx_slock(&allproc_lock);
  765                 p = swapper_selector(swapper_wkilled_only());
  766                 sx_sunlock(&allproc_lock);
  767 
  768                 if (p == NULL) {
  769                         tsleep(&proc0, PVM, "swapin", SWAPIN_INTERVAL);
  770                 } else {
  771                         PROC_LOCK_ASSERT(p, MA_OWNED);
  772 
  773                         /*
  774                          * Another process may be bringing or may have
  775                          * already brought this process in while we
  776                          * traverse all threads.  Or, this process may
  777                          * have exited or even being swapped out
  778                          * again.
  779                          */
  780                         if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM |
  781                             P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) {
  782                                 faultin(p);
  783                         }
  784                         PROC_UNLOCK(p);
  785                 }
  786         }
  787 }
  788 
  789 /*
  790  * First, if any processes have been sleeping or stopped for at least
  791  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
  792  * no such processes exist, then the longest-sleeping or stopped
  793  * process is swapped out.  Finally, and only as a last resort, if
  794  * there are no sleeping or stopped processes, the longest-resident
  795  * process is swapped out.
  796  */
  797 static void
  798 swapout_procs(int action)
  799 {
  800         struct proc *p;
  801         struct thread *td;
  802         int slptime;
  803         bool didswap, doswap;
  804 
  805         MPASS((action & (VM_SWAP_NORMAL | VM_SWAP_IDLE)) != 0);
  806 
  807         didswap = false;
  808         sx_slock(&allproc_lock);
  809         FOREACH_PROC_IN_SYSTEM(p) {
  810                 /*
  811                  * Filter out not yet fully constructed processes.  Do
  812                  * not swap out held processes.  Avoid processes which
  813                  * are system, exiting, execing, traced, already swapped
  814                  * out or are in the process of being swapped in or out.
  815                  */
  816                 PROC_LOCK(p);
  817                 if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag &
  818                     (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE |
  819                     P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) !=
  820                     P_INMEM) {
  821                         PROC_UNLOCK(p);
  822                         continue;
  823                 }
  824 
  825                 /*
  826                  * Further consideration of this process for swap out
  827                  * requires iterating over its threads.  We release
  828                  * allproc_lock here so that process creation and
  829                  * destruction are not blocked while we iterate.
  830                  *
  831                  * To later reacquire allproc_lock and resume
  832                  * iteration over the allproc list, we will first have
  833                  * to release the lock on the process.  We place a
  834                  * hold on the process so that it remains in the
  835                  * allproc list while it is unlocked.
  836                  */
  837                 _PHOLD_LITE(p);
  838                 sx_sunlock(&allproc_lock);
  839 
  840                 /*
  841                  * Do not swapout a realtime process.
  842                  * Guarantee swap_idle_threshold1 time in memory.
  843                  * If the system is under memory stress, or if we are
  844                  * swapping idle processes >= swap_idle_threshold2,
  845                  * then swap the process out.
  846                  */
  847                 doswap = true;
  848                 FOREACH_THREAD_IN_PROC(p, td) {
  849                         thread_lock(td);
  850                         slptime = (ticks - td->td_slptick) / hz;
  851                         if (PRI_IS_REALTIME(td->td_pri_class) ||
  852                             slptime < swap_idle_threshold1 ||
  853                             !thread_safetoswapout(td) ||
  854                             ((action & VM_SWAP_NORMAL) == 0 &&
  855                             slptime < swap_idle_threshold2))
  856                                 doswap = false;
  857                         thread_unlock(td);
  858                         if (!doswap)
  859                                 break;
  860                 }
  861                 if (doswap && swapout(p) == 0)
  862                         didswap = true;
  863 
  864                 PROC_UNLOCK(p);
  865                 if (didswap) {
  866                         sx_xlock(&allproc_lock);
  867                         swapped_cnt++;
  868                         sx_downgrade(&allproc_lock);
  869                 } else
  870                         sx_slock(&allproc_lock);
  871                 PRELE(p);
  872         }
  873         sx_sunlock(&allproc_lock);
  874 
  875         /*
  876          * If we swapped something out, and another process needed memory,
  877          * then wakeup the sched process.
  878          */
  879         if (didswap)
  880                 wakeup(&proc0);
  881 }
  882 
  883 static void
  884 swapclear(struct proc *p)
  885 {
  886         struct thread *td;
  887 
  888         PROC_LOCK_ASSERT(p, MA_OWNED);
  889 
  890         FOREACH_THREAD_IN_PROC(p, td) {
  891                 thread_lock(td);
  892                 td->td_flags |= TDF_INMEM;
  893                 td->td_flags &= ~TDF_SWAPINREQ;
  894                 TD_CLR_SWAPPED(td);
  895                 if (TD_CAN_RUN(td))
  896                         if (setrunnable(td)) {
  897 #ifdef INVARIANTS
  898                                 /*
  899                                  * XXX: We just cleared TDI_SWAPPED
  900                                  * above and set TDF_INMEM, so this
  901                                  * should never happen.
  902                                  */
  903                                 panic("not waking up swapper");
  904 #endif
  905                         }
  906                 thread_unlock(td);
  907         }
  908         p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT);
  909         p->p_flag |= P_INMEM;
  910 }
  911 
  912 static int
  913 swapout(struct proc *p)
  914 {
  915         struct thread *td;
  916 
  917         PROC_LOCK_ASSERT(p, MA_OWNED);
  918 
  919         /*
  920          * The states of this process and its threads may have changed
  921          * by now.  Assuming that there is only one pageout daemon thread,
  922          * this process should still be in memory.
  923          */
  924         KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) ==
  925             P_INMEM, ("swapout: lost a swapout race?"));
  926 
  927         /*
  928          * Remember the resident count.
  929          */
  930         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
  931 
  932         /*
  933          * Check and mark all threads before we proceed.
  934          */
  935         p->p_flag &= ~P_INMEM;
  936         p->p_flag |= P_SWAPPINGOUT;
  937         FOREACH_THREAD_IN_PROC(p, td) {
  938                 thread_lock(td);
  939                 if (!thread_safetoswapout(td)) {
  940                         thread_unlock(td);
  941                         swapclear(p);
  942                         return (EBUSY);
  943                 }
  944                 td->td_flags &= ~TDF_INMEM;
  945                 TD_SET_SWAPPED(td);
  946                 thread_unlock(td);
  947         }
  948         td = FIRST_THREAD_IN_PROC(p);
  949         ++td->td_ru.ru_nswap;
  950         PROC_UNLOCK(p);
  951 
  952         /*
  953          * This list is stable because all threads are now prevented from
  954          * running.  The list is only modified in the context of a running
  955          * thread in this process.
  956          */
  957         FOREACH_THREAD_IN_PROC(p, td)
  958                 vm_thread_swapout(td);
  959 
  960         PROC_LOCK(p);
  961         p->p_flag &= ~P_SWAPPINGOUT;
  962         p->p_swtick = ticks;
  963         return (0);
  964 }

Cache object: 2fb00082b211a6204d1acca2b21415fd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.