The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD: releng/6.0/sys/vm/vm_glue.c 146554 2005-05-23 23:01:53Z ups $");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/limits.h>
   69 #include <sys/lock.h>
   70 #include <sys/mutex.h>
   71 #include <sys/proc.h>
   72 #include <sys/resourcevar.h>
   73 #include <sys/shm.h>
   74 #include <sys/vmmeter.h>
   75 #include <sys/sx.h>
   76 #include <sys/sysctl.h>
   77 
   78 #include <sys/kernel.h>
   79 #include <sys/ktr.h>
   80 #include <sys/unistd.h>
   81 
   82 #include <vm/vm.h>
   83 #include <vm/vm_param.h>
   84 #include <vm/pmap.h>
   85 #include <vm/vm_map.h>
   86 #include <vm/vm_page.h>
   87 #include <vm/vm_pageout.h>
   88 #include <vm/vm_object.h>
   89 #include <vm/vm_kern.h>
   90 #include <vm/vm_extern.h>
   91 #include <vm/vm_pager.h>
   92 #include <vm/swap_pager.h>
   93 
   94 extern int maxslp;
   95 
   96 /*
   97  * System initialization
   98  *
   99  * Note: proc0 from proc.h
  100  */
  101 static void vm_init_limits(void *);
  102 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
  103 
  104 /*
  105  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
  106  *
  107  * Note: run scheduling should be divorced from the vm system.
  108  */
  109 static void scheduler(void *);
  110 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
  111 
  112 #ifndef NO_SWAPPING
  113 static void swapout(struct proc *);
  114 #endif
  115 
  116 
  117 static volatile int proc0_rescan;
  118 
  119 
  120 /*
  121  * MPSAFE
  122  *
  123  * WARNING!  This code calls vm_map_check_protection() which only checks
  124  * the associated vm_map_entry range.  It does not determine whether the
  125  * contents of the memory is actually readable or writable.  In most cases
  126  * just checking the vm_map_entry is sufficient within the kernel's address
  127  * space.
  128  */
  129 int
  130 kernacc(addr, len, rw)
  131         void *addr;
  132         int len, rw;
  133 {
  134         boolean_t rv;
  135         vm_offset_t saddr, eaddr;
  136         vm_prot_t prot;
  137 
  138         KASSERT((rw & ~VM_PROT_ALL) == 0,
  139             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  140 
  141         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  142             (vm_offset_t)addr + len < (vm_offset_t)addr)
  143                 return (FALSE);
  144 
  145         prot = rw;
  146         saddr = trunc_page((vm_offset_t)addr);
  147         eaddr = round_page((vm_offset_t)addr + len);
  148         vm_map_lock_read(kernel_map);
  149         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  150         vm_map_unlock_read(kernel_map);
  151         return (rv == TRUE);
  152 }
  153 
  154 /*
  155  * MPSAFE
  156  *
  157  * WARNING!  This code calls vm_map_check_protection() which only checks
  158  * the associated vm_map_entry range.  It does not determine whether the
  159  * contents of the memory is actually readable or writable.  vmapbuf(),
  160  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  161  * used in conjuction with this call.
  162  */
  163 int
  164 useracc(addr, len, rw)
  165         void *addr;
  166         int len, rw;
  167 {
  168         boolean_t rv;
  169         vm_prot_t prot;
  170         vm_map_t map;
  171 
  172         KASSERT((rw & ~VM_PROT_ALL) == 0,
  173             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  174         prot = rw;
  175         map = &curproc->p_vmspace->vm_map;
  176         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  177             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  178                 return (FALSE);
  179         }
  180         vm_map_lock_read(map);
  181         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  182             round_page((vm_offset_t)addr + len), prot);
  183         vm_map_unlock_read(map);
  184         return (rv == TRUE);
  185 }
  186 
  187 int
  188 vslock(void *addr, size_t len)
  189 {
  190         vm_offset_t end, last, start;
  191         vm_size_t npages;
  192         int error;
  193 
  194         last = (vm_offset_t)addr + len;
  195         start = trunc_page((vm_offset_t)addr);
  196         end = round_page(last);
  197         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  198                 return (EINVAL);
  199         npages = atop(end - start);
  200         if (npages > vm_page_max_wired)
  201                 return (ENOMEM);
  202         PROC_LOCK(curproc);
  203         if (ptoa(npages +
  204             pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
  205             lim_cur(curproc, RLIMIT_MEMLOCK)) {
  206                 PROC_UNLOCK(curproc);
  207                 return (ENOMEM);
  208         }
  209         PROC_UNLOCK(curproc);
  210 #if 0
  211         /*
  212          * XXX - not yet
  213          *
  214          * The limit for transient usage of wired pages should be
  215          * larger than for "permanent" wired pages (mlock()).
  216          *
  217          * Also, the sysctl code, which is the only present user
  218          * of vslock(), does a hard loop on EAGAIN.
  219          */
  220         if (npages + cnt.v_wire_count > vm_page_max_wired)
  221                 return (EAGAIN);
  222 #endif
  223         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  224             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  225         /*
  226          * Return EFAULT on error to match copy{in,out}() behaviour
  227          * rather than returning ENOMEM like mlock() would.
  228          */
  229         return (error == KERN_SUCCESS ? 0 : EFAULT);
  230 }
  231 
  232 void
  233 vsunlock(void *addr, size_t len)
  234 {
  235 
  236         /* Rely on the parameter sanity checks performed by vslock(). */
  237         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  238             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  239             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  240 }
  241 
  242 #ifndef KSTACK_MAX_PAGES
  243 #define KSTACK_MAX_PAGES 32
  244 #endif
  245 
  246 /*
  247  * Create the kernel stack (including pcb for i386) for a new thread.
  248  * This routine directly affects the fork perf for a process and
  249  * create performance for a thread.
  250  */
  251 void
  252 vm_thread_new(struct thread *td, int pages)
  253 {
  254         vm_object_t ksobj;
  255         vm_offset_t ks;
  256         vm_page_t m, ma[KSTACK_MAX_PAGES];
  257         int i;
  258 
  259         /* Bounds check */
  260         if (pages <= 1)
  261                 pages = KSTACK_PAGES;
  262         else if (pages > KSTACK_MAX_PAGES)
  263                 pages = KSTACK_MAX_PAGES;
  264         /*
  265          * Allocate an object for the kstack.
  266          */
  267         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  268         td->td_kstack_obj = ksobj;
  269         /*
  270          * Get a kernel virtual address for this thread's kstack.
  271          */
  272         ks = kmem_alloc_nofault(kernel_map,
  273            (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  274         if (ks == 0)
  275                 panic("vm_thread_new: kstack allocation failed");
  276         if (KSTACK_GUARD_PAGES != 0) {
  277                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  278                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  279         }
  280         td->td_kstack = ks;
  281         /*
  282          * Knowing the number of pages allocated is useful when you
  283          * want to deallocate them.
  284          */
  285         td->td_kstack_pages = pages;
  286         /* 
  287          * For the length of the stack, link in a real page of ram for each
  288          * page of stack.
  289          */
  290         VM_OBJECT_LOCK(ksobj);
  291         for (i = 0; i < pages; i++) {
  292                 /*
  293                  * Get a kernel stack page.
  294                  */
  295                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  296                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
  297                 ma[i] = m;
  298                 m->valid = VM_PAGE_BITS_ALL;
  299         }
  300         VM_OBJECT_UNLOCK(ksobj);
  301         pmap_qenter(ks, ma, pages);
  302 }
  303 
  304 /*
  305  * Dispose of a thread's kernel stack.
  306  */
  307 void
  308 vm_thread_dispose(struct thread *td)
  309 {
  310         vm_object_t ksobj;
  311         vm_offset_t ks;
  312         vm_page_t m;
  313         int i, pages;
  314 
  315         pages = td->td_kstack_pages;
  316         ksobj = td->td_kstack_obj;
  317         ks = td->td_kstack;
  318         pmap_qremove(ks, pages);
  319         VM_OBJECT_LOCK(ksobj);
  320         for (i = 0; i < pages; i++) {
  321                 m = vm_page_lookup(ksobj, i);
  322                 if (m == NULL)
  323                         panic("vm_thread_dispose: kstack already missing?");
  324                 vm_page_lock_queues();
  325                 vm_page_unwire(m, 0);
  326                 vm_page_free(m);
  327                 vm_page_unlock_queues();
  328         }
  329         VM_OBJECT_UNLOCK(ksobj);
  330         vm_object_deallocate(ksobj);
  331         kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  332             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  333 }
  334 
  335 /*
  336  * Allow a thread's kernel stack to be paged out.
  337  */
  338 void
  339 vm_thread_swapout(struct thread *td)
  340 {
  341         vm_object_t ksobj;
  342         vm_page_t m;
  343         int i, pages;
  344 
  345         cpu_thread_swapout(td);
  346         pages = td->td_kstack_pages;
  347         ksobj = td->td_kstack_obj;
  348         pmap_qremove(td->td_kstack, pages);
  349         VM_OBJECT_LOCK(ksobj);
  350         for (i = 0; i < pages; i++) {
  351                 m = vm_page_lookup(ksobj, i);
  352                 if (m == NULL)
  353                         panic("vm_thread_swapout: kstack already missing?");
  354                 vm_page_lock_queues();
  355                 vm_page_dirty(m);
  356                 vm_page_unwire(m, 0);
  357                 vm_page_unlock_queues();
  358         }
  359         VM_OBJECT_UNLOCK(ksobj);
  360 }
  361 
  362 /*
  363  * Bring the kernel stack for a specified thread back in.
  364  */
  365 void
  366 vm_thread_swapin(struct thread *td)
  367 {
  368         vm_object_t ksobj;
  369         vm_page_t m, ma[KSTACK_MAX_PAGES];
  370         int i, pages, rv;
  371 
  372         pages = td->td_kstack_pages;
  373         ksobj = td->td_kstack_obj;
  374         VM_OBJECT_LOCK(ksobj);
  375         for (i = 0; i < pages; i++) {
  376                 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  377                 if (m->valid != VM_PAGE_BITS_ALL) {
  378                         rv = vm_pager_get_pages(ksobj, &m, 1, 0);
  379                         if (rv != VM_PAGER_OK)
  380                                 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
  381                         m = vm_page_lookup(ksobj, i);
  382                         m->valid = VM_PAGE_BITS_ALL;
  383                 }
  384                 ma[i] = m;
  385                 vm_page_lock_queues();
  386                 vm_page_wire(m);
  387                 vm_page_wakeup(m);
  388                 vm_page_unlock_queues();
  389         }
  390         VM_OBJECT_UNLOCK(ksobj);
  391         pmap_qenter(td->td_kstack, ma, pages);
  392         cpu_thread_swapin(td);
  393 }
  394 
  395 /*
  396  * Set up a variable-sized alternate kstack.
  397  */
  398 void
  399 vm_thread_new_altkstack(struct thread *td, int pages)
  400 {
  401 
  402         td->td_altkstack = td->td_kstack;
  403         td->td_altkstack_obj = td->td_kstack_obj;
  404         td->td_altkstack_pages = td->td_kstack_pages;
  405 
  406         vm_thread_new(td, pages);
  407 }
  408 
  409 /*
  410  * Restore the original kstack.
  411  */
  412 void
  413 vm_thread_dispose_altkstack(struct thread *td)
  414 {
  415 
  416         vm_thread_dispose(td);
  417 
  418         td->td_kstack = td->td_altkstack;
  419         td->td_kstack_obj = td->td_altkstack_obj;
  420         td->td_kstack_pages = td->td_altkstack_pages;
  421         td->td_altkstack = 0;
  422         td->td_altkstack_obj = NULL;
  423         td->td_altkstack_pages = 0;
  424 }
  425 
  426 /*
  427  * Implement fork's actions on an address space.
  428  * Here we arrange for the address space to be copied or referenced,
  429  * allocate a user struct (pcb and kernel stack), then call the
  430  * machine-dependent layer to fill those in and make the new process
  431  * ready to run.  The new process is set up so that it returns directly
  432  * to user mode to avoid stack copying and relocation problems.
  433  */
  434 void
  435 vm_forkproc(td, p2, td2, flags)
  436         struct thread *td;
  437         struct proc *p2;
  438         struct thread *td2;
  439         int flags;
  440 {
  441         struct proc *p1 = td->td_proc;
  442 
  443         if ((flags & RFPROC) == 0) {
  444                 /*
  445                  * Divorce the memory, if it is shared, essentially
  446                  * this changes shared memory amongst threads, into
  447                  * COW locally.
  448                  */
  449                 if ((flags & RFMEM) == 0) {
  450                         if (p1->p_vmspace->vm_refcnt > 1) {
  451                                 vmspace_unshare(p1);
  452                         }
  453                 }
  454                 cpu_fork(td, p2, td2, flags);
  455                 return;
  456         }
  457 
  458         if (flags & RFMEM) {
  459                 p2->p_vmspace = p1->p_vmspace;
  460                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  461         }
  462 
  463         while (vm_page_count_severe()) {
  464                 VM_WAIT;
  465         }
  466 
  467         if ((flags & RFMEM) == 0) {
  468                 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
  469                 if (p1->p_vmspace->vm_shm)
  470                         shmfork(p1, p2);
  471         }
  472 
  473         /*
  474          * cpu_fork will copy and update the pcb, set up the kernel stack,
  475          * and make the child ready to run.
  476          */
  477         cpu_fork(td, p2, td2, flags);
  478 }
  479 
  480 /*
  481  * Called after process has been wait(2)'ed apon and is being reaped.
  482  * The idea is to reclaim resources that we could not reclaim while
  483  * the process was still executing.
  484  */
  485 void
  486 vm_waitproc(p)
  487         struct proc *p;
  488 {
  489 
  490         vmspace_exitfree(p);            /* and clean-out the vmspace */
  491 }
  492 
  493 /*
  494  * Set default limits for VM system.
  495  * Called for proc 0, and then inherited by all others.
  496  *
  497  * XXX should probably act directly on proc0.
  498  */
  499 static void
  500 vm_init_limits(udata)
  501         void *udata;
  502 {
  503         struct proc *p = udata;
  504         struct plimit *limp;
  505         int rss_limit;
  506 
  507         /*
  508          * Set up the initial limits on process VM. Set the maximum resident
  509          * set size to be half of (reasonably) available memory.  Since this
  510          * is a soft limit, it comes into effect only when the system is out
  511          * of memory - half of main memory helps to favor smaller processes,
  512          * and reduces thrashing of the object cache.
  513          */
  514         limp = p->p_limit;
  515         limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
  516         limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
  517         limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
  518         limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
  519         /* limit the limit to no less than 2MB */
  520         rss_limit = max(cnt.v_free_count, 512);
  521         limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
  522         limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
  523 }
  524 
  525 void
  526 faultin(p)
  527         struct proc *p;
  528 {
  529 #ifdef NO_SWAPPING
  530 
  531         PROC_LOCK_ASSERT(p, MA_OWNED);
  532         if ((p->p_sflag & PS_INMEM) == 0)
  533                 panic("faultin: proc swapped out with NO_SWAPPING!");
  534 #else /* !NO_SWAPPING */
  535         struct thread *td;
  536 
  537         PROC_LOCK_ASSERT(p, MA_OWNED);
  538         /*
  539          * If another process is swapping in this process,
  540          * just wait until it finishes.
  541          */
  542         if (p->p_sflag & PS_SWAPPINGIN)
  543                 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
  544         else if ((p->p_sflag & PS_INMEM) == 0) {
  545                 /*
  546                  * Don't let another thread swap process p out while we are
  547                  * busy swapping it in.
  548                  */
  549                 ++p->p_lock;
  550                 mtx_lock_spin(&sched_lock);
  551                 p->p_sflag |= PS_SWAPPINGIN;
  552                 mtx_unlock_spin(&sched_lock);
  553                 PROC_UNLOCK(p);
  554 
  555                 FOREACH_THREAD_IN_PROC(p, td)
  556                         vm_thread_swapin(td);
  557 
  558                 PROC_LOCK(p);
  559                 mtx_lock_spin(&sched_lock);
  560                 p->p_sflag &= ~PS_SWAPPINGIN;
  561                 p->p_sflag |= PS_INMEM;
  562                 FOREACH_THREAD_IN_PROC(p, td) {
  563                         TD_CLR_SWAPPED(td);
  564                         if (TD_CAN_RUN(td))
  565                                 setrunnable(td);
  566                 }
  567                 mtx_unlock_spin(&sched_lock);
  568 
  569                 wakeup(&p->p_sflag);
  570 
  571                 /* Allow other threads to swap p out now. */
  572                 --p->p_lock;
  573         }
  574 #endif /* NO_SWAPPING */
  575 }
  576 
  577 /*
  578  * This swapin algorithm attempts to swap-in processes only if there
  579  * is enough space for them.  Of course, if a process waits for a long
  580  * time, it will be swapped in anyway.
  581  *
  582  *  XXXKSE - process with the thread with highest priority counts..
  583  *
  584  * Giant is held on entry.
  585  */
  586 /* ARGSUSED*/
  587 static void
  588 scheduler(dummy)
  589         void *dummy;
  590 {
  591         struct proc *p;
  592         struct thread *td;
  593         int pri;
  594         struct proc *pp;
  595         int ppri;
  596 
  597         mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
  598         mtx_unlock(&Giant);
  599 
  600 loop:
  601         if (vm_page_count_min()) {
  602                 VM_WAIT;
  603                 mtx_lock_spin(&sched_lock);
  604                 proc0_rescan = 0;
  605                 mtx_unlock_spin(&sched_lock);
  606                 goto loop;
  607         }
  608 
  609         pp = NULL;
  610         ppri = INT_MIN;
  611         sx_slock(&allproc_lock);
  612         FOREACH_PROC_IN_SYSTEM(p) {
  613                 struct ksegrp *kg;
  614                 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
  615                         continue;
  616                 }
  617                 mtx_lock_spin(&sched_lock);
  618                 FOREACH_THREAD_IN_PROC(p, td) {
  619                         /*
  620                          * An otherwise runnable thread of a process
  621                          * swapped out has only the TDI_SWAPPED bit set.
  622                          * 
  623                          */
  624                         if (td->td_inhibitors == TDI_SWAPPED) {
  625                                 kg = td->td_ksegrp;
  626                                 pri = p->p_swtime + kg->kg_slptime;
  627                                 if ((p->p_sflag & PS_SWAPINREQ) == 0) {
  628                                         pri -= p->p_nice * 8;
  629                                 }
  630 
  631                                 /*
  632                                  * if this ksegrp is higher priority
  633                                  * and there is enough space, then select
  634                                  * this process instead of the previous
  635                                  * selection.
  636                                  */
  637                                 if (pri > ppri) {
  638                                         pp = p;
  639                                         ppri = pri;
  640                                 }
  641                         }
  642                 }
  643                 mtx_unlock_spin(&sched_lock);
  644         }
  645         sx_sunlock(&allproc_lock);
  646 
  647         /*
  648          * Nothing to do, back to sleep.
  649          */
  650         if ((p = pp) == NULL) {
  651                 mtx_lock_spin(&sched_lock);
  652                 if (!proc0_rescan) {
  653                         TD_SET_IWAIT(&thread0);
  654                         mi_switch(SW_VOL, NULL);
  655                 }
  656                 proc0_rescan = 0;
  657                 mtx_unlock_spin(&sched_lock);
  658                 goto loop;
  659         }
  660         PROC_LOCK(p);
  661 
  662         /*
  663          * Another process may be bringing or may have already
  664          * brought this process in while we traverse all threads.
  665          * Or, this process may even be being swapped out again.
  666          */
  667         if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
  668                 PROC_UNLOCK(p);
  669                 mtx_lock_spin(&sched_lock);
  670                 proc0_rescan = 0;
  671                 mtx_unlock_spin(&sched_lock);
  672                 goto loop;
  673         }
  674 
  675         mtx_lock_spin(&sched_lock);
  676         p->p_sflag &= ~PS_SWAPINREQ;
  677         mtx_unlock_spin(&sched_lock);
  678 
  679         /*
  680          * We would like to bring someone in. (only if there is space).
  681          * [What checks the space? ]
  682          */
  683         faultin(p);
  684         PROC_UNLOCK(p);
  685         mtx_lock_spin(&sched_lock);
  686         p->p_swtime = 0;
  687         proc0_rescan = 0;
  688         mtx_unlock_spin(&sched_lock);
  689         goto loop;
  690 }
  691 
  692 void kick_proc0(void)
  693 {
  694         struct thread *td = &thread0;
  695 
  696                 
  697         if (TD_AWAITING_INTR(td)) {
  698                 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, 0);
  699                 TD_CLR_IWAIT(td);
  700                 setrunqueue(td, SRQ_INTR);
  701         } else {
  702                 proc0_rescan = 1;
  703                 CTR2(KTR_INTR, "%s: state %d",
  704                     __func__, td->td_state);
  705         }
  706         
  707 }
  708 
  709 
  710 #ifndef NO_SWAPPING
  711 
  712 /*
  713  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  714  */
  715 static int swap_idle_threshold1 = 2;
  716 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  717     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  718 
  719 /*
  720  * Swap_idle_threshold2 is the time that a process can be idle before
  721  * it will be swapped out, if idle swapping is enabled.
  722  */
  723 static int swap_idle_threshold2 = 10;
  724 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  725     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  726 
  727 /*
  728  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
  729  * procs and unwire their u-areas.  We try to always "swap" at least one
  730  * process in case we need the room for a swapin.
  731  * If any procs have been sleeping/stopped for at least maxslp seconds,
  732  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
  733  * if any, otherwise the longest-resident process.
  734  */
  735 void
  736 swapout_procs(action)
  737 int action;
  738 {
  739         struct proc *p;
  740         struct thread *td;
  741         struct ksegrp *kg;
  742         int didswap = 0;
  743 
  744 retry:
  745         sx_slock(&allproc_lock);
  746         FOREACH_PROC_IN_SYSTEM(p) {
  747                 struct vmspace *vm;
  748                 int minslptime = 100000;
  749                 
  750                 /*
  751                  * Watch out for a process in
  752                  * creation.  It may have no
  753                  * address space or lock yet.
  754                  */
  755                 mtx_lock_spin(&sched_lock);
  756                 if (p->p_state == PRS_NEW) {
  757                         mtx_unlock_spin(&sched_lock);
  758                         continue;
  759                 }
  760                 mtx_unlock_spin(&sched_lock);
  761 
  762                 /*
  763                  * An aio daemon switches its
  764                  * address space while running.
  765                  * Perform a quick check whether
  766                  * a process has P_SYSTEM.
  767                  */
  768                 if ((p->p_flag & P_SYSTEM) != 0)
  769                         continue;
  770 
  771                 /*
  772                  * Do not swapout a process that
  773                  * is waiting for VM data
  774                  * structures as there is a possible
  775                  * deadlock.  Test this first as
  776                  * this may block.
  777                  *
  778                  * Lock the map until swapout
  779                  * finishes, or a thread of this
  780                  * process may attempt to alter
  781                  * the map.
  782                  */
  783                 PROC_LOCK(p);
  784                 vm = p->p_vmspace;
  785                 KASSERT(vm != NULL,
  786                         ("swapout_procs: a process has no address space"));
  787                 atomic_add_int(&vm->vm_refcnt, 1);
  788                 PROC_UNLOCK(p);
  789                 if (!vm_map_trylock(&vm->vm_map))
  790                         goto nextproc1;
  791 
  792                 PROC_LOCK(p);
  793                 if (p->p_lock != 0 ||
  794                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
  795                     ) != 0) {
  796                         goto nextproc2;
  797                 }
  798                 /*
  799                  * only aiod changes vmspace, however it will be
  800                  * skipped because of the if statement above checking 
  801                  * for P_SYSTEM
  802                  */
  803                 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
  804                         goto nextproc2;
  805 
  806                 switch (p->p_state) {
  807                 default:
  808                         /* Don't swap out processes in any sort
  809                          * of 'special' state. */
  810                         break;
  811 
  812                 case PRS_NORMAL:
  813                         mtx_lock_spin(&sched_lock);
  814                         /*
  815                          * do not swapout a realtime process
  816                          * Check all the thread groups..
  817                          */
  818                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  819                                 if (PRI_IS_REALTIME(kg->kg_pri_class))
  820                                         goto nextproc;
  821 
  822                                 /*
  823                                  * Guarantee swap_idle_threshold1
  824                                  * time in memory.
  825                                  */
  826                                 if (kg->kg_slptime < swap_idle_threshold1)
  827                                         goto nextproc;
  828 
  829                                 /*
  830                                  * Do not swapout a process if it is
  831                                  * waiting on a critical event of some
  832                                  * kind or there is a thread whose
  833                                  * pageable memory may be accessed.
  834                                  *
  835                                  * This could be refined to support
  836                                  * swapping out a thread.
  837                                  */
  838                                 FOREACH_THREAD_IN_GROUP(kg, td) {
  839                                         if ((td->td_priority) < PSOCK ||
  840                                             !thread_safetoswapout(td))
  841                                                 goto nextproc;
  842                                 }
  843                                 /*
  844                                  * If the system is under memory stress,
  845                                  * or if we are swapping
  846                                  * idle processes >= swap_idle_threshold2,
  847                                  * then swap the process out.
  848                                  */
  849                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  850                                     (((action & VM_SWAP_IDLE) == 0) ||
  851                                     (kg->kg_slptime < swap_idle_threshold2)))
  852                                         goto nextproc;
  853 
  854                                 if (minslptime > kg->kg_slptime)
  855                                         minslptime = kg->kg_slptime;
  856                         }
  857 
  858                         /*
  859                          * If the pageout daemon didn't free enough pages,
  860                          * or if this process is idle and the system is
  861                          * configured to swap proactively, swap it out.
  862                          */
  863                         if ((action & VM_SWAP_NORMAL) ||
  864                                 ((action & VM_SWAP_IDLE) &&
  865                                  (minslptime > swap_idle_threshold2))) {
  866                                 swapout(p);
  867                                 didswap++;
  868                                 mtx_unlock_spin(&sched_lock);
  869                                 PROC_UNLOCK(p);
  870                                 vm_map_unlock(&vm->vm_map);
  871                                 vmspace_free(vm);
  872                                 sx_sunlock(&allproc_lock);
  873                                 goto retry;
  874                         }
  875 nextproc:                       
  876                         mtx_unlock_spin(&sched_lock);
  877                 }
  878 nextproc2:
  879                 PROC_UNLOCK(p);
  880                 vm_map_unlock(&vm->vm_map);
  881 nextproc1:
  882                 vmspace_free(vm);
  883                 continue;
  884         }
  885         sx_sunlock(&allproc_lock);
  886         /*
  887          * If we swapped something out, and another process needed memory,
  888          * then wakeup the sched process.
  889          */
  890         if (didswap)
  891                 wakeup(&proc0);
  892 }
  893 
  894 static void
  895 swapout(p)
  896         struct proc *p;
  897 {
  898         struct thread *td;
  899 
  900         PROC_LOCK_ASSERT(p, MA_OWNED);
  901         mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
  902 #if defined(SWAP_DEBUG)
  903         printf("swapping out %d\n", p->p_pid);
  904 #endif
  905 
  906         /*
  907          * The states of this process and its threads may have changed
  908          * by now.  Assuming that there is only one pageout daemon thread,
  909          * this process should still be in memory.
  910          */
  911         KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
  912                 ("swapout: lost a swapout race?"));
  913 
  914 #if defined(INVARIANTS)
  915         /*
  916          * Make sure that all threads are safe to be swapped out.
  917          *
  918          * Alternatively, we could swap out only safe threads.
  919          */
  920         FOREACH_THREAD_IN_PROC(p, td) {
  921                 KASSERT(thread_safetoswapout(td),
  922                         ("swapout: there is a thread not safe for swapout"));
  923         }
  924 #endif /* INVARIANTS */
  925 
  926         ++p->p_stats->p_ru.ru_nswap;
  927         /*
  928          * remember the process resident count
  929          */
  930         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
  931 
  932         p->p_sflag &= ~PS_INMEM;
  933         p->p_sflag |= PS_SWAPPINGOUT;
  934         PROC_UNLOCK(p);
  935         FOREACH_THREAD_IN_PROC(p, td)
  936                 TD_SET_SWAPPED(td);
  937         mtx_unlock_spin(&sched_lock);
  938 
  939         FOREACH_THREAD_IN_PROC(p, td)
  940                 vm_thread_swapout(td);
  941 
  942         PROC_LOCK(p);
  943         mtx_lock_spin(&sched_lock);
  944         p->p_sflag &= ~PS_SWAPPINGOUT;
  945         p->p_swtime = 0;
  946 }
  947 #endif /* !NO_SWAPPING */

Cache object: 3c8542dbeb26593691b5a5bcf1542788


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.