The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD$");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/limits.h>
   69 #include <sys/lock.h>
   70 #include <sys/mutex.h>
   71 #include <sys/proc.h>
   72 #include <sys/resourcevar.h>
   73 #include <sys/sched.h>
   74 #include <sys/sf_buf.h>
   75 #include <sys/shm.h>
   76 #include <sys/vmmeter.h>
   77 #include <sys/sx.h>
   78 #include <sys/sysctl.h>
   79 
   80 #include <sys/kernel.h>
   81 #include <sys/ktr.h>
   82 #include <sys/unistd.h>
   83 
   84 #include <vm/vm.h>
   85 #include <vm/vm_param.h>
   86 #include <vm/pmap.h>
   87 #include <vm/vm_map.h>
   88 #include <vm/vm_page.h>
   89 #include <vm/vm_pageout.h>
   90 #include <vm/vm_object.h>
   91 #include <vm/vm_kern.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/vm_pager.h>
   94 #include <vm/swap_pager.h>
   95 
   96 extern int maxslp;
   97 
   98 /*
   99  * System initialization
  100  *
  101  * Note: proc0 from proc.h
  102  */
  103 static void vm_init_limits(void *);
  104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
  105 
  106 /*
  107  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
  108  *
  109  * Note: run scheduling should be divorced from the vm system.
  110  */
  111 static void scheduler(void *);
  112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
  113 
  114 #ifndef NO_SWAPPING
  115 static void swapout(struct proc *);
  116 #endif
  117 
  118 
  119 static volatile int proc0_rescan;
  120 
  121 
  122 /*
  123  * MPSAFE
  124  *
  125  * WARNING!  This code calls vm_map_check_protection() which only checks
  126  * the associated vm_map_entry range.  It does not determine whether the
  127  * contents of the memory is actually readable or writable.  In most cases
  128  * just checking the vm_map_entry is sufficient within the kernel's address
  129  * space.
  130  */
  131 int
  132 kernacc(addr, len, rw)
  133         void *addr;
  134         int len, rw;
  135 {
  136         boolean_t rv;
  137         vm_offset_t saddr, eaddr;
  138         vm_prot_t prot;
  139 
  140         KASSERT((rw & ~VM_PROT_ALL) == 0,
  141             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  142 
  143         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  144             (vm_offset_t)addr + len < (vm_offset_t)addr)
  145                 return (FALSE);
  146 
  147         prot = rw;
  148         saddr = trunc_page((vm_offset_t)addr);
  149         eaddr = round_page((vm_offset_t)addr + len);
  150         vm_map_lock_read(kernel_map);
  151         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  152         vm_map_unlock_read(kernel_map);
  153         return (rv == TRUE);
  154 }
  155 
  156 /*
  157  * MPSAFE
  158  *
  159  * WARNING!  This code calls vm_map_check_protection() which only checks
  160  * the associated vm_map_entry range.  It does not determine whether the
  161  * contents of the memory is actually readable or writable.  vmapbuf(),
  162  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  163  * used in conjuction with this call.
  164  */
  165 int
  166 useracc(addr, len, rw)
  167         void *addr;
  168         int len, rw;
  169 {
  170         boolean_t rv;
  171         vm_prot_t prot;
  172         vm_map_t map;
  173 
  174         KASSERT((rw & ~VM_PROT_ALL) == 0,
  175             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  176         prot = rw;
  177         map = &curproc->p_vmspace->vm_map;
  178         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  179             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  180                 return (FALSE);
  181         }
  182         vm_map_lock_read(map);
  183         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  184             round_page((vm_offset_t)addr + len), prot);
  185         vm_map_unlock_read(map);
  186         return (rv == TRUE);
  187 }
  188 
  189 int
  190 vslock(void *addr, size_t len)
  191 {
  192         vm_offset_t end, last, start;
  193         vm_size_t npages;
  194         int error;
  195 
  196         last = (vm_offset_t)addr + len;
  197         start = trunc_page((vm_offset_t)addr);
  198         end = round_page(last);
  199         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  200                 return (EINVAL);
  201         npages = atop(end - start);
  202         if (npages > vm_page_max_wired)
  203                 return (ENOMEM);
  204         PROC_LOCK(curproc);
  205         if (ptoa(npages +
  206             pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
  207             lim_cur(curproc, RLIMIT_MEMLOCK)) {
  208                 PROC_UNLOCK(curproc);
  209                 return (ENOMEM);
  210         }
  211         PROC_UNLOCK(curproc);
  212 #if 0
  213         /*
  214          * XXX - not yet
  215          *
  216          * The limit for transient usage of wired pages should be
  217          * larger than for "permanent" wired pages (mlock()).
  218          *
  219          * Also, the sysctl code, which is the only present user
  220          * of vslock(), does a hard loop on EAGAIN.
  221          */
  222         if (npages + cnt.v_wire_count > vm_page_max_wired)
  223                 return (EAGAIN);
  224 #endif
  225         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  226             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  227         /*
  228          * Return EFAULT on error to match copy{in,out}() behaviour
  229          * rather than returning ENOMEM like mlock() would.
  230          */
  231         return (error == KERN_SUCCESS ? 0 : EFAULT);
  232 }
  233 
  234 void
  235 vsunlock(void *addr, size_t len)
  236 {
  237 
  238         /* Rely on the parameter sanity checks performed by vslock(). */
  239         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  240             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  241             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  242 }
  243 
  244 /*
  245  * Pin the page contained within the given object at the given offset.  If the
  246  * page is not resident, allocate and load it using the given object's pager.
  247  * Return the pinned page if successful; otherwise, return NULL.
  248  */
  249 static vm_page_t
  250 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
  251 {
  252         vm_page_t m, ma[1];
  253         vm_pindex_t pindex;
  254         int rv;
  255 
  256         VM_OBJECT_LOCK(object);
  257         pindex = OFF_TO_IDX(offset);
  258         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  259         if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
  260                 ma[0] = m;
  261                 rv = vm_pager_get_pages(object, ma, 1, 0);
  262                 m = vm_page_lookup(object, pindex);
  263                 if (m == NULL)
  264                         goto out;
  265                 if (m->valid == 0 || rv != VM_PAGER_OK) {
  266                         vm_page_lock_queues();
  267                         vm_page_free(m);
  268                         vm_page_unlock_queues();
  269                         m = NULL;
  270                         goto out;
  271                 }
  272         }
  273         vm_page_lock_queues();
  274         vm_page_hold(m);
  275         vm_page_wakeup(m);
  276         vm_page_unlock_queues();
  277 out:
  278         VM_OBJECT_UNLOCK(object);
  279         return (m);
  280 }
  281 
  282 /*
  283  * Return a CPU private mapping to the page at the given offset within the
  284  * given object.  The page is pinned before it is mapped.
  285  */
  286 struct sf_buf *
  287 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
  288 {
  289         vm_page_t m;
  290 
  291         m = vm_imgact_hold_page(object, offset);
  292         if (m == NULL)
  293                 return (NULL);
  294         sched_pin();
  295         return (sf_buf_alloc(m, SFB_CPUPRIVATE));
  296 }
  297 
  298 /*
  299  * Destroy the given CPU private mapping and unpin the page that it mapped.
  300  */
  301 void
  302 vm_imgact_unmap_page(struct sf_buf *sf)
  303 {
  304         vm_page_t m;
  305 
  306         m = sf_buf_page(sf);
  307         sf_buf_free(sf);
  308         sched_unpin();
  309         vm_page_lock_queues();
  310         vm_page_unhold(m);
  311         vm_page_unlock_queues();
  312 }
  313 
  314 #ifndef KSTACK_MAX_PAGES
  315 #define KSTACK_MAX_PAGES 32
  316 #endif
  317 
  318 /*
  319  * Create the kernel stack (including pcb for i386) for a new thread.
  320  * This routine directly affects the fork perf for a process and
  321  * create performance for a thread.
  322  */
  323 void
  324 vm_thread_new(struct thread *td, int pages)
  325 {
  326         vm_object_t ksobj;
  327         vm_offset_t ks;
  328         vm_page_t m, ma[KSTACK_MAX_PAGES];
  329         int i;
  330 
  331         /* Bounds check */
  332         if (pages <= 1)
  333                 pages = KSTACK_PAGES;
  334         else if (pages > KSTACK_MAX_PAGES)
  335                 pages = KSTACK_MAX_PAGES;
  336         /*
  337          * Allocate an object for the kstack.
  338          */
  339         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  340         td->td_kstack_obj = ksobj;
  341         /*
  342          * Get a kernel virtual address for this thread's kstack.
  343          */
  344         ks = kmem_alloc_nofault(kernel_map,
  345            (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  346         if (ks == 0)
  347                 panic("vm_thread_new: kstack allocation failed");
  348         if (KSTACK_GUARD_PAGES != 0) {
  349                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  350                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  351         }
  352         td->td_kstack = ks;
  353         /*
  354          * Knowing the number of pages allocated is useful when you
  355          * want to deallocate them.
  356          */
  357         td->td_kstack_pages = pages;
  358         /* 
  359          * For the length of the stack, link in a real page of ram for each
  360          * page of stack.
  361          */
  362         VM_OBJECT_LOCK(ksobj);
  363         for (i = 0; i < pages; i++) {
  364                 /*
  365                  * Get a kernel stack page.
  366                  */
  367                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  368                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
  369                 ma[i] = m;
  370                 m->valid = VM_PAGE_BITS_ALL;
  371         }
  372         VM_OBJECT_UNLOCK(ksobj);
  373         pmap_qenter(ks, ma, pages);
  374 }
  375 
  376 /*
  377  * Dispose of a thread's kernel stack.
  378  */
  379 void
  380 vm_thread_dispose(struct thread *td)
  381 {
  382         vm_object_t ksobj;
  383         vm_offset_t ks;
  384         vm_page_t m;
  385         int i, pages;
  386 
  387         pages = td->td_kstack_pages;
  388         ksobj = td->td_kstack_obj;
  389         ks = td->td_kstack;
  390         pmap_qremove(ks, pages);
  391         VM_OBJECT_LOCK(ksobj);
  392         for (i = 0; i < pages; i++) {
  393                 m = vm_page_lookup(ksobj, i);
  394                 if (m == NULL)
  395                         panic("vm_thread_dispose: kstack already missing?");
  396                 vm_page_lock_queues();
  397                 vm_page_unwire(m, 0);
  398                 vm_page_free(m);
  399                 vm_page_unlock_queues();
  400         }
  401         VM_OBJECT_UNLOCK(ksobj);
  402         vm_object_deallocate(ksobj);
  403         kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  404             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  405 }
  406 
  407 /*
  408  * Allow a thread's kernel stack to be paged out.
  409  */
  410 void
  411 vm_thread_swapout(struct thread *td)
  412 {
  413         vm_object_t ksobj;
  414         vm_page_t m;
  415         int i, pages;
  416 
  417         cpu_thread_swapout(td);
  418         pages = td->td_kstack_pages;
  419         ksobj = td->td_kstack_obj;
  420         pmap_qremove(td->td_kstack, pages);
  421         VM_OBJECT_LOCK(ksobj);
  422         for (i = 0; i < pages; i++) {
  423                 m = vm_page_lookup(ksobj, i);
  424                 if (m == NULL)
  425                         panic("vm_thread_swapout: kstack already missing?");
  426                 vm_page_lock_queues();
  427                 vm_page_dirty(m);
  428                 vm_page_unwire(m, 0);
  429                 vm_page_unlock_queues();
  430         }
  431         VM_OBJECT_UNLOCK(ksobj);
  432 }
  433 
  434 /*
  435  * Bring the kernel stack for a specified thread back in.
  436  */
  437 void
  438 vm_thread_swapin(struct thread *td)
  439 {
  440         vm_object_t ksobj;
  441         vm_page_t m, ma[KSTACK_MAX_PAGES];
  442         int i, pages, rv;
  443 
  444         pages = td->td_kstack_pages;
  445         ksobj = td->td_kstack_obj;
  446         VM_OBJECT_LOCK(ksobj);
  447         for (i = 0; i < pages; i++) {
  448                 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  449                 if (m->valid != VM_PAGE_BITS_ALL) {
  450                         rv = vm_pager_get_pages(ksobj, &m, 1, 0);
  451                         if (rv != VM_PAGER_OK)
  452                                 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
  453                         m = vm_page_lookup(ksobj, i);
  454                         m->valid = VM_PAGE_BITS_ALL;
  455                 }
  456                 ma[i] = m;
  457                 vm_page_lock_queues();
  458                 vm_page_wire(m);
  459                 vm_page_wakeup(m);
  460                 vm_page_unlock_queues();
  461         }
  462         VM_OBJECT_UNLOCK(ksobj);
  463         pmap_qenter(td->td_kstack, ma, pages);
  464         cpu_thread_swapin(td);
  465 }
  466 
  467 /*
  468  * Set up a variable-sized alternate kstack.
  469  */
  470 void
  471 vm_thread_new_altkstack(struct thread *td, int pages)
  472 {
  473 
  474         td->td_altkstack = td->td_kstack;
  475         td->td_altkstack_obj = td->td_kstack_obj;
  476         td->td_altkstack_pages = td->td_kstack_pages;
  477 
  478         vm_thread_new(td, pages);
  479 }
  480 
  481 /*
  482  * Restore the original kstack.
  483  */
  484 void
  485 vm_thread_dispose_altkstack(struct thread *td)
  486 {
  487 
  488         vm_thread_dispose(td);
  489 
  490         td->td_kstack = td->td_altkstack;
  491         td->td_kstack_obj = td->td_altkstack_obj;
  492         td->td_kstack_pages = td->td_altkstack_pages;
  493         td->td_altkstack = 0;
  494         td->td_altkstack_obj = NULL;
  495         td->td_altkstack_pages = 0;
  496 }
  497 
  498 /*
  499  * Implement fork's actions on an address space.
  500  * Here we arrange for the address space to be copied or referenced,
  501  * allocate a user struct (pcb and kernel stack), then call the
  502  * machine-dependent layer to fill those in and make the new process
  503  * ready to run.  The new process is set up so that it returns directly
  504  * to user mode to avoid stack copying and relocation problems.
  505  */
  506 void
  507 vm_forkproc(td, p2, td2, flags)
  508         struct thread *td;
  509         struct proc *p2;
  510         struct thread *td2;
  511         int flags;
  512 {
  513         struct proc *p1 = td->td_proc;
  514 
  515         if ((flags & RFPROC) == 0) {
  516                 /*
  517                  * Divorce the memory, if it is shared, essentially
  518                  * this changes shared memory amongst threads, into
  519                  * COW locally.
  520                  */
  521                 if ((flags & RFMEM) == 0) {
  522                         if (p1->p_vmspace->vm_refcnt > 1) {
  523                                 vmspace_unshare(p1);
  524                         }
  525                 }
  526                 cpu_fork(td, p2, td2, flags);
  527                 return;
  528         }
  529 
  530         if (flags & RFMEM) {
  531                 p2->p_vmspace = p1->p_vmspace;
  532                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  533         }
  534 
  535         while (vm_page_count_severe()) {
  536                 VM_WAIT;
  537         }
  538 
  539         if ((flags & RFMEM) == 0) {
  540                 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
  541                 if (p1->p_vmspace->vm_shm)
  542                         shmfork(p1, p2);
  543         }
  544 
  545         /*
  546          * cpu_fork will copy and update the pcb, set up the kernel stack,
  547          * and make the child ready to run.
  548          */
  549         cpu_fork(td, p2, td2, flags);
  550 }
  551 
  552 /*
  553  * Called after process has been wait(2)'ed apon and is being reaped.
  554  * The idea is to reclaim resources that we could not reclaim while
  555  * the process was still executing.
  556  */
  557 void
  558 vm_waitproc(p)
  559         struct proc *p;
  560 {
  561 
  562         vmspace_exitfree(p);            /* and clean-out the vmspace */
  563 }
  564 
  565 /*
  566  * Set default limits for VM system.
  567  * Called for proc 0, and then inherited by all others.
  568  *
  569  * XXX should probably act directly on proc0.
  570  */
  571 static void
  572 vm_init_limits(udata)
  573         void *udata;
  574 {
  575         struct proc *p = udata;
  576         struct plimit *limp;
  577         int rss_limit;
  578 
  579         /*
  580          * Set up the initial limits on process VM. Set the maximum resident
  581          * set size to be half of (reasonably) available memory.  Since this
  582          * is a soft limit, it comes into effect only when the system is out
  583          * of memory - half of main memory helps to favor smaller processes,
  584          * and reduces thrashing of the object cache.
  585          */
  586         limp = p->p_limit;
  587         limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
  588         limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
  589         limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
  590         limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
  591         /* limit the limit to no less than 2MB */
  592         rss_limit = max(cnt.v_free_count, 512);
  593         limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
  594         limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
  595 }
  596 
  597 void
  598 faultin(p)
  599         struct proc *p;
  600 {
  601 #ifdef NO_SWAPPING
  602 
  603         PROC_LOCK_ASSERT(p, MA_OWNED);
  604         if ((p->p_sflag & PS_INMEM) == 0)
  605                 panic("faultin: proc swapped out with NO_SWAPPING!");
  606 #else /* !NO_SWAPPING */
  607         struct thread *td;
  608 
  609         PROC_LOCK_ASSERT(p, MA_OWNED);
  610         /*
  611          * If another process is swapping in this process,
  612          * just wait until it finishes.
  613          */
  614         if (p->p_sflag & PS_SWAPPINGIN)
  615                 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
  616         else if ((p->p_sflag & PS_INMEM) == 0) {
  617                 /*
  618                  * Don't let another thread swap process p out while we are
  619                  * busy swapping it in.
  620                  */
  621                 ++p->p_lock;
  622                 mtx_lock_spin(&sched_lock);
  623                 p->p_sflag |= PS_SWAPPINGIN;
  624                 mtx_unlock_spin(&sched_lock);
  625                 PROC_UNLOCK(p);
  626 
  627                 FOREACH_THREAD_IN_PROC(p, td)
  628                         vm_thread_swapin(td);
  629 
  630                 PROC_LOCK(p);
  631                 mtx_lock_spin(&sched_lock);
  632                 p->p_sflag &= ~PS_SWAPPINGIN;
  633                 p->p_sflag |= PS_INMEM;
  634                 FOREACH_THREAD_IN_PROC(p, td) {
  635                         TD_CLR_SWAPPED(td);
  636                         if (TD_CAN_RUN(td))
  637                                 setrunnable(td);
  638                 }
  639                 mtx_unlock_spin(&sched_lock);
  640 
  641                 wakeup(&p->p_sflag);
  642 
  643                 /* Allow other threads to swap p out now. */
  644                 --p->p_lock;
  645         }
  646 #endif /* NO_SWAPPING */
  647 }
  648 
  649 /*
  650  * This swapin algorithm attempts to swap-in processes only if there
  651  * is enough space for them.  Of course, if a process waits for a long
  652  * time, it will be swapped in anyway.
  653  *
  654  *  XXXKSE - process with the thread with highest priority counts..
  655  *
  656  * Giant is held on entry.
  657  */
  658 /* ARGSUSED*/
  659 static void
  660 scheduler(dummy)
  661         void *dummy;
  662 {
  663         struct proc *p;
  664         struct thread *td;
  665         int pri;
  666         struct proc *pp;
  667         int ppri;
  668 
  669         mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
  670         mtx_unlock(&Giant);
  671 
  672 loop:
  673         if (vm_page_count_min()) {
  674                 VM_WAIT;
  675                 mtx_lock_spin(&sched_lock);
  676                 proc0_rescan = 0;
  677                 mtx_unlock_spin(&sched_lock);
  678                 goto loop;
  679         }
  680 
  681         pp = NULL;
  682         ppri = INT_MIN;
  683         sx_slock(&allproc_lock);
  684         FOREACH_PROC_IN_SYSTEM(p) {
  685                 struct ksegrp *kg;
  686                 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
  687                         continue;
  688                 }
  689                 mtx_lock_spin(&sched_lock);
  690                 FOREACH_THREAD_IN_PROC(p, td) {
  691                         /*
  692                          * An otherwise runnable thread of a process
  693                          * swapped out has only the TDI_SWAPPED bit set.
  694                          * 
  695                          */
  696                         if (td->td_inhibitors == TDI_SWAPPED) {
  697                                 kg = td->td_ksegrp;
  698                                 pri = p->p_swtime + kg->kg_slptime;
  699                                 if ((p->p_sflag & PS_SWAPINREQ) == 0) {
  700                                         pri -= p->p_nice * 8;
  701                                 }
  702 
  703                                 /*
  704                                  * if this ksegrp is higher priority
  705                                  * and there is enough space, then select
  706                                  * this process instead of the previous
  707                                  * selection.
  708                                  */
  709                                 if (pri > ppri) {
  710                                         pp = p;
  711                                         ppri = pri;
  712                                 }
  713                         }
  714                 }
  715                 mtx_unlock_spin(&sched_lock);
  716         }
  717         sx_sunlock(&allproc_lock);
  718 
  719         /*
  720          * Nothing to do, back to sleep.
  721          */
  722         if ((p = pp) == NULL) {
  723                 mtx_lock_spin(&sched_lock);
  724                 if (!proc0_rescan) {
  725                         TD_SET_IWAIT(&thread0);
  726                         mi_switch(SW_VOL, NULL);
  727                 }
  728                 proc0_rescan = 0;
  729                 mtx_unlock_spin(&sched_lock);
  730                 goto loop;
  731         }
  732         PROC_LOCK(p);
  733 
  734         /*
  735          * Another process may be bringing or may have already
  736          * brought this process in while we traverse all threads.
  737          * Or, this process may even be being swapped out again.
  738          */
  739         if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
  740                 PROC_UNLOCK(p);
  741                 mtx_lock_spin(&sched_lock);
  742                 proc0_rescan = 0;
  743                 mtx_unlock_spin(&sched_lock);
  744                 goto loop;
  745         }
  746 
  747         mtx_lock_spin(&sched_lock);
  748         p->p_sflag &= ~PS_SWAPINREQ;
  749         mtx_unlock_spin(&sched_lock);
  750 
  751         /*
  752          * We would like to bring someone in. (only if there is space).
  753          * [What checks the space? ]
  754          */
  755         faultin(p);
  756         PROC_UNLOCK(p);
  757         mtx_lock_spin(&sched_lock);
  758         p->p_swtime = 0;
  759         proc0_rescan = 0;
  760         mtx_unlock_spin(&sched_lock);
  761         goto loop;
  762 }
  763 
  764 void kick_proc0(void)
  765 {
  766         struct thread *td = &thread0;
  767 
  768                 
  769         if (TD_AWAITING_INTR(td)) {
  770                 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, 0);
  771                 TD_CLR_IWAIT(td);
  772                 setrunqueue(td, SRQ_INTR);
  773         } else {
  774                 proc0_rescan = 1;
  775                 CTR2(KTR_INTR, "%s: state %d",
  776                     __func__, td->td_state);
  777         }
  778         
  779 }
  780 
  781 
  782 #ifndef NO_SWAPPING
  783 
  784 /*
  785  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  786  */
  787 static int swap_idle_threshold1 = 2;
  788 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  789     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  790 
  791 /*
  792  * Swap_idle_threshold2 is the time that a process can be idle before
  793  * it will be swapped out, if idle swapping is enabled.
  794  */
  795 static int swap_idle_threshold2 = 10;
  796 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  797     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  798 
  799 /*
  800  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
  801  * procs and unwire their u-areas.  We try to always "swap" at least one
  802  * process in case we need the room for a swapin.
  803  * If any procs have been sleeping/stopped for at least maxslp seconds,
  804  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
  805  * if any, otherwise the longest-resident process.
  806  */
  807 void
  808 swapout_procs(action)
  809 int action;
  810 {
  811         struct proc *p;
  812         struct thread *td;
  813         struct ksegrp *kg;
  814         int didswap = 0;
  815 
  816 retry:
  817         sx_slock(&allproc_lock);
  818         FOREACH_PROC_IN_SYSTEM(p) {
  819                 struct vmspace *vm;
  820                 int minslptime = 100000;
  821                 
  822                 /*
  823                  * Watch out for a process in
  824                  * creation.  It may have no
  825                  * address space or lock yet.
  826                  */
  827                 mtx_lock_spin(&sched_lock);
  828                 if (p->p_state == PRS_NEW) {
  829                         mtx_unlock_spin(&sched_lock);
  830                         continue;
  831                 }
  832                 mtx_unlock_spin(&sched_lock);
  833 
  834                 /*
  835                  * An aio daemon switches its
  836                  * address space while running.
  837                  * Perform a quick check whether
  838                  * a process has P_SYSTEM.
  839                  */
  840                 if ((p->p_flag & P_SYSTEM) != 0)
  841                         continue;
  842 
  843                 /*
  844                  * Do not swapout a process that
  845                  * is waiting for VM data
  846                  * structures as there is a possible
  847                  * deadlock.  Test this first as
  848                  * this may block.
  849                  *
  850                  * Lock the map until swapout
  851                  * finishes, or a thread of this
  852                  * process may attempt to alter
  853                  * the map.
  854                  */
  855                 vm = vmspace_acquire_ref(p);
  856                 if (vm == NULL)
  857                         continue;
  858                 if (!vm_map_trylock(&vm->vm_map))
  859                         goto nextproc1;
  860 
  861                 PROC_LOCK(p);
  862                 if (p->p_lock != 0 ||
  863                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
  864                     ) != 0) {
  865                         goto nextproc2;
  866                 }
  867                 /*
  868                  * only aiod changes vmspace, however it will be
  869                  * skipped because of the if statement above checking 
  870                  * for P_SYSTEM
  871                  */
  872                 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
  873                         goto nextproc2;
  874 
  875                 switch (p->p_state) {
  876                 default:
  877                         /* Don't swap out processes in any sort
  878                          * of 'special' state. */
  879                         break;
  880 
  881                 case PRS_NORMAL:
  882                         mtx_lock_spin(&sched_lock);
  883                         /*
  884                          * do not swapout a realtime process
  885                          * Check all the thread groups..
  886                          */
  887                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  888                                 if (PRI_IS_REALTIME(kg->kg_pri_class))
  889                                         goto nextproc;
  890 
  891                                 /*
  892                                  * Guarantee swap_idle_threshold1
  893                                  * time in memory.
  894                                  */
  895                                 if (kg->kg_slptime < swap_idle_threshold1)
  896                                         goto nextproc;
  897 
  898                                 /*
  899                                  * Do not swapout a process if it is
  900                                  * waiting on a critical event of some
  901                                  * kind or there is a thread whose
  902                                  * pageable memory may be accessed.
  903                                  *
  904                                  * This could be refined to support
  905                                  * swapping out a thread.
  906                                  */
  907                                 FOREACH_THREAD_IN_GROUP(kg, td) {
  908                                         if ((td->td_priority) < PSOCK ||
  909                                             !thread_safetoswapout(td))
  910                                                 goto nextproc;
  911                                 }
  912                                 /*
  913                                  * If the system is under memory stress,
  914                                  * or if we are swapping
  915                                  * idle processes >= swap_idle_threshold2,
  916                                  * then swap the process out.
  917                                  */
  918                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  919                                     (((action & VM_SWAP_IDLE) == 0) ||
  920                                     (kg->kg_slptime < swap_idle_threshold2)))
  921                                         goto nextproc;
  922 
  923                                 if (minslptime > kg->kg_slptime)
  924                                         minslptime = kg->kg_slptime;
  925                         }
  926 
  927                         /*
  928                          * If the pageout daemon didn't free enough pages,
  929                          * or if this process is idle and the system is
  930                          * configured to swap proactively, swap it out.
  931                          */
  932                         if ((action & VM_SWAP_NORMAL) ||
  933                                 ((action & VM_SWAP_IDLE) &&
  934                                  (minslptime > swap_idle_threshold2))) {
  935                                 swapout(p);
  936                                 didswap++;
  937                                 mtx_unlock_spin(&sched_lock);
  938                                 PROC_UNLOCK(p);
  939                                 vm_map_unlock(&vm->vm_map);
  940                                 vmspace_free(vm);
  941                                 sx_sunlock(&allproc_lock);
  942                                 goto retry;
  943                         }
  944 nextproc:                       
  945                         mtx_unlock_spin(&sched_lock);
  946                 }
  947 nextproc2:
  948                 PROC_UNLOCK(p);
  949                 vm_map_unlock(&vm->vm_map);
  950 nextproc1:
  951                 vmspace_free(vm);
  952                 continue;
  953         }
  954         sx_sunlock(&allproc_lock);
  955         /*
  956          * If we swapped something out, and another process needed memory,
  957          * then wakeup the sched process.
  958          */
  959         if (didswap)
  960                 wakeup(&proc0);
  961 }
  962 
  963 static void
  964 swapout(p)
  965         struct proc *p;
  966 {
  967         struct thread *td;
  968 
  969         PROC_LOCK_ASSERT(p, MA_OWNED);
  970         mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
  971 #if defined(SWAP_DEBUG)
  972         printf("swapping out %d\n", p->p_pid);
  973 #endif
  974 
  975         /*
  976          * The states of this process and its threads may have changed
  977          * by now.  Assuming that there is only one pageout daemon thread,
  978          * this process should still be in memory.
  979          */
  980         KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
  981                 ("swapout: lost a swapout race?"));
  982 
  983 #if defined(INVARIANTS)
  984         /*
  985          * Make sure that all threads are safe to be swapped out.
  986          *
  987          * Alternatively, we could swap out only safe threads.
  988          */
  989         FOREACH_THREAD_IN_PROC(p, td) {
  990                 KASSERT(thread_safetoswapout(td),
  991                         ("swapout: there is a thread not safe for swapout"));
  992         }
  993 #endif /* INVARIANTS */
  994 
  995         ++p->p_stats->p_ru.ru_nswap;
  996         /*
  997          * remember the process resident count
  998          */
  999         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
 1000 
 1001         p->p_sflag &= ~PS_INMEM;
 1002         p->p_sflag |= PS_SWAPPINGOUT;
 1003         PROC_UNLOCK(p);
 1004         FOREACH_THREAD_IN_PROC(p, td)
 1005                 TD_SET_SWAPPED(td);
 1006         mtx_unlock_spin(&sched_lock);
 1007 
 1008         FOREACH_THREAD_IN_PROC(p, td)
 1009                 vm_thread_swapout(td);
 1010 
 1011         PROC_LOCK(p);
 1012         mtx_lock_spin(&sched_lock);
 1013         p->p_sflag &= ~PS_SWAPPINGOUT;
 1014         p->p_swtime = 0;
 1015 }
 1016 #endif /* !NO_SWAPPING */

Cache object: 28518ccef34c9fc75ab5898b2ad34839


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.