The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD: src/sys/vm/vm_glue.c,v 1.202.2.5 2005/02/23 06:41:44 alc Exp $");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/limits.h>
   69 #include <sys/lock.h>
   70 #include <sys/mutex.h>
   71 #include <sys/proc.h>
   72 #include <sys/resourcevar.h>
   73 #include <sys/shm.h>
   74 #include <sys/vmmeter.h>
   75 #include <sys/sx.h>
   76 #include <sys/sysctl.h>
   77 
   78 #include <sys/kernel.h>
   79 #include <sys/ktr.h>
   80 #include <sys/unistd.h>
   81 
   82 #include <vm/vm.h>
   83 #include <vm/vm_param.h>
   84 #include <vm/pmap.h>
   85 #include <vm/vm_map.h>
   86 #include <vm/vm_page.h>
   87 #include <vm/vm_pageout.h>
   88 #include <vm/vm_object.h>
   89 #include <vm/vm_kern.h>
   90 #include <vm/vm_extern.h>
   91 #include <vm/vm_pager.h>
   92 #include <vm/swap_pager.h>
   93 
   94 extern int maxslp;
   95 
   96 /*
   97  * System initialization
   98  *
   99  * Note: proc0 from proc.h
  100  */
  101 static void vm_init_limits(void *);
  102 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
  103 
  104 /*
  105  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
  106  *
  107  * Note: run scheduling should be divorced from the vm system.
  108  */
  109 static void scheduler(void *);
  110 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
  111 
  112 #ifndef NO_SWAPPING
  113 static void swapout(struct proc *);
  114 #endif
  115 
  116 /*
  117  * MPSAFE
  118  *
  119  * WARNING!  This code calls vm_map_check_protection() which only checks
  120  * the associated vm_map_entry range.  It does not determine whether the
  121  * contents of the memory is actually readable or writable.  In most cases
  122  * just checking the vm_map_entry is sufficient within the kernel's address
  123  * space.
  124  */
  125 int
  126 kernacc(addr, len, rw)
  127         void *addr;
  128         int len, rw;
  129 {
  130         boolean_t rv;
  131         vm_offset_t saddr, eaddr;
  132         vm_prot_t prot;
  133 
  134         KASSERT((rw & ~VM_PROT_ALL) == 0,
  135             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  136 
  137         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  138             (vm_offset_t)addr + len < (vm_offset_t)addr)
  139                 return (FALSE);
  140 
  141         prot = rw;
  142         saddr = trunc_page((vm_offset_t)addr);
  143         eaddr = round_page((vm_offset_t)addr + len);
  144         vm_map_lock_read(kernel_map);
  145         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  146         vm_map_unlock_read(kernel_map);
  147         return (rv == TRUE);
  148 }
  149 
  150 /*
  151  * MPSAFE
  152  *
  153  * WARNING!  This code calls vm_map_check_protection() which only checks
  154  * the associated vm_map_entry range.  It does not determine whether the
  155  * contents of the memory is actually readable or writable.  vmapbuf(),
  156  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  157  * used in conjuction with this call.
  158  */
  159 int
  160 useracc(addr, len, rw)
  161         void *addr;
  162         int len, rw;
  163 {
  164         boolean_t rv;
  165         vm_prot_t prot;
  166         vm_map_t map;
  167 
  168         KASSERT((rw & ~VM_PROT_ALL) == 0,
  169             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  170         prot = rw;
  171         map = &curproc->p_vmspace->vm_map;
  172         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  173             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  174                 return (FALSE);
  175         }
  176         vm_map_lock_read(map);
  177         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  178             round_page((vm_offset_t)addr + len), prot);
  179         vm_map_unlock_read(map);
  180         return (rv == TRUE);
  181 }
  182 
  183 int
  184 vslock(void *addr, size_t len)
  185 {
  186         vm_offset_t end, last, start;
  187         vm_size_t npages;
  188         int error;
  189 
  190         last = (vm_offset_t)addr + len;
  191         start = trunc_page((vm_offset_t)addr);
  192         end = round_page(last);
  193         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  194                 return (EINVAL);
  195         npages = atop(end - start);
  196         if (npages > vm_page_max_wired)
  197                 return (ENOMEM);
  198         PROC_LOCK(curproc);
  199         if (ptoa(npages +
  200             pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
  201             lim_cur(curproc, RLIMIT_MEMLOCK)) {
  202                 PROC_UNLOCK(curproc);
  203                 return (ENOMEM);
  204         }
  205         PROC_UNLOCK(curproc);
  206 #if 0
  207         /*
  208          * XXX - not yet
  209          *
  210          * The limit for transient usage of wired pages should be
  211          * larger than for "permanent" wired pages (mlock()).
  212          *
  213          * Also, the sysctl code, which is the only present user
  214          * of vslock(), does a hard loop on EAGAIN.
  215          */
  216         if (npages + cnt.v_wire_count > vm_page_max_wired)
  217                 return (EAGAIN);
  218 #endif
  219         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  220             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  221         /*
  222          * Return EFAULT on error to match copy{in,out}() behaviour
  223          * rather than returning ENOMEM like mlock() would.
  224          */
  225         return (error == KERN_SUCCESS ? 0 : EFAULT);
  226 }
  227 
  228 void
  229 vsunlock(void *addr, size_t len)
  230 {
  231 
  232         /* Rely on the parameter sanity checks performed by vslock(). */
  233         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  234             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  235             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  236 }
  237 
  238 #ifndef KSTACK_MAX_PAGES
  239 #define KSTACK_MAX_PAGES 32
  240 #endif
  241 
  242 /*
  243  * Create the kernel stack (including pcb for i386) for a new thread.
  244  * This routine directly affects the fork perf for a process and
  245  * create performance for a thread.
  246  */
  247 void
  248 vm_thread_new(struct thread *td, int pages)
  249 {
  250         vm_object_t ksobj;
  251         vm_offset_t ks;
  252         vm_page_t m, ma[KSTACK_MAX_PAGES];
  253         int i;
  254 
  255         /* Bounds check */
  256         if (pages <= 1)
  257                 pages = KSTACK_PAGES;
  258         else if (pages > KSTACK_MAX_PAGES)
  259                 pages = KSTACK_MAX_PAGES;
  260         /*
  261          * Allocate an object for the kstack.
  262          */
  263         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  264         td->td_kstack_obj = ksobj;
  265         /*
  266          * Get a kernel virtual address for this thread's kstack.
  267          */
  268         ks = kmem_alloc_nofault(kernel_map,
  269            (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  270         if (ks == 0)
  271                 panic("vm_thread_new: kstack allocation failed");
  272         if (KSTACK_GUARD_PAGES != 0) {
  273                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  274                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  275         }
  276         td->td_kstack = ks;
  277         /*
  278          * Knowing the number of pages allocated is useful when you
  279          * want to deallocate them.
  280          */
  281         td->td_kstack_pages = pages;
  282         /* 
  283          * For the length of the stack, link in a real page of ram for each
  284          * page of stack.
  285          */
  286         VM_OBJECT_LOCK(ksobj);
  287         for (i = 0; i < pages; i++) {
  288                 /*
  289                  * Get a kernel stack page.
  290                  */
  291                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  292                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
  293                 ma[i] = m;
  294                 m->valid = VM_PAGE_BITS_ALL;
  295         }
  296         VM_OBJECT_UNLOCK(ksobj);
  297         pmap_qenter(ks, ma, pages);
  298 }
  299 
  300 /*
  301  * Dispose of a thread's kernel stack.
  302  */
  303 void
  304 vm_thread_dispose(struct thread *td)
  305 {
  306         vm_object_t ksobj;
  307         vm_offset_t ks;
  308         vm_page_t m;
  309         int i, pages;
  310 
  311         pages = td->td_kstack_pages;
  312         ksobj = td->td_kstack_obj;
  313         ks = td->td_kstack;
  314         pmap_qremove(ks, pages);
  315         VM_OBJECT_LOCK(ksobj);
  316         for (i = 0; i < pages; i++) {
  317                 m = vm_page_lookup(ksobj, i);
  318                 if (m == NULL)
  319                         panic("vm_thread_dispose: kstack already missing?");
  320                 vm_page_lock_queues();
  321                 vm_page_unwire(m, 0);
  322                 vm_page_free(m);
  323                 vm_page_unlock_queues();
  324         }
  325         VM_OBJECT_UNLOCK(ksobj);
  326         vm_object_deallocate(ksobj);
  327         kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  328             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  329 }
  330 
  331 /*
  332  * Allow a thread's kernel stack to be paged out.
  333  */
  334 void
  335 vm_thread_swapout(struct thread *td)
  336 {
  337         vm_object_t ksobj;
  338         vm_page_t m;
  339         int i, pages;
  340 
  341         cpu_thread_swapout(td);
  342         pages = td->td_kstack_pages;
  343         ksobj = td->td_kstack_obj;
  344         pmap_qremove(td->td_kstack, pages);
  345         VM_OBJECT_LOCK(ksobj);
  346         for (i = 0; i < pages; i++) {
  347                 m = vm_page_lookup(ksobj, i);
  348                 if (m == NULL)
  349                         panic("vm_thread_swapout: kstack already missing?");
  350                 vm_page_lock_queues();
  351                 vm_page_dirty(m);
  352                 vm_page_unwire(m, 0);
  353                 vm_page_unlock_queues();
  354         }
  355         VM_OBJECT_UNLOCK(ksobj);
  356 }
  357 
  358 /*
  359  * Bring the kernel stack for a specified thread back in.
  360  */
  361 void
  362 vm_thread_swapin(struct thread *td)
  363 {
  364         vm_object_t ksobj;
  365         vm_page_t m, ma[KSTACK_MAX_PAGES];
  366         int i, pages, rv;
  367 
  368         pages = td->td_kstack_pages;
  369         ksobj = td->td_kstack_obj;
  370         VM_OBJECT_LOCK(ksobj);
  371         for (i = 0; i < pages; i++) {
  372                 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  373                 if (m->valid != VM_PAGE_BITS_ALL) {
  374                         rv = vm_pager_get_pages(ksobj, &m, 1, 0);
  375                         if (rv != VM_PAGER_OK)
  376                                 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
  377                         m = vm_page_lookup(ksobj, i);
  378                         m->valid = VM_PAGE_BITS_ALL;
  379                 }
  380                 ma[i] = m;
  381                 vm_page_lock_queues();
  382                 vm_page_wire(m);
  383                 vm_page_wakeup(m);
  384                 vm_page_unlock_queues();
  385         }
  386         VM_OBJECT_UNLOCK(ksobj);
  387         pmap_qenter(td->td_kstack, ma, pages);
  388         cpu_thread_swapin(td);
  389 }
  390 
  391 /*
  392  * Set up a variable-sized alternate kstack.
  393  */
  394 void
  395 vm_thread_new_altkstack(struct thread *td, int pages)
  396 {
  397 
  398         td->td_altkstack = td->td_kstack;
  399         td->td_altkstack_obj = td->td_kstack_obj;
  400         td->td_altkstack_pages = td->td_kstack_pages;
  401 
  402         vm_thread_new(td, pages);
  403 }
  404 
  405 /*
  406  * Restore the original kstack.
  407  */
  408 void
  409 vm_thread_dispose_altkstack(struct thread *td)
  410 {
  411 
  412         vm_thread_dispose(td);
  413 
  414         td->td_kstack = td->td_altkstack;
  415         td->td_kstack_obj = td->td_altkstack_obj;
  416         td->td_kstack_pages = td->td_altkstack_pages;
  417         td->td_altkstack = 0;
  418         td->td_altkstack_obj = NULL;
  419         td->td_altkstack_pages = 0;
  420 }
  421 
  422 /*
  423  * Implement fork's actions on an address space.
  424  * Here we arrange for the address space to be copied or referenced,
  425  * allocate a user struct (pcb and kernel stack), then call the
  426  * machine-dependent layer to fill those in and make the new process
  427  * ready to run.  The new process is set up so that it returns directly
  428  * to user mode to avoid stack copying and relocation problems.
  429  */
  430 void
  431 vm_forkproc(td, p2, td2, flags)
  432         struct thread *td;
  433         struct proc *p2;
  434         struct thread *td2;
  435         int flags;
  436 {
  437         struct proc *p1 = td->td_proc;
  438 
  439         if ((flags & RFPROC) == 0) {
  440                 /*
  441                  * Divorce the memory, if it is shared, essentially
  442                  * this changes shared memory amongst threads, into
  443                  * COW locally.
  444                  */
  445                 if ((flags & RFMEM) == 0) {
  446                         if (p1->p_vmspace->vm_refcnt > 1) {
  447                                 vmspace_unshare(p1);
  448                         }
  449                 }
  450                 cpu_fork(td, p2, td2, flags);
  451                 return;
  452         }
  453 
  454         if (flags & RFMEM) {
  455                 p2->p_vmspace = p1->p_vmspace;
  456                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  457         }
  458 
  459         while (vm_page_count_severe()) {
  460                 VM_WAIT;
  461         }
  462 
  463         if ((flags & RFMEM) == 0) {
  464                 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
  465                 if (p1->p_vmspace->vm_shm)
  466                         shmfork(p1, p2);
  467         }
  468 
  469         /*
  470          * cpu_fork will copy and update the pcb, set up the kernel stack,
  471          * and make the child ready to run.
  472          */
  473         cpu_fork(td, p2, td2, flags);
  474 }
  475 
  476 /*
  477  * Called after process has been wait(2)'ed apon and is being reaped.
  478  * The idea is to reclaim resources that we could not reclaim while
  479  * the process was still executing.
  480  */
  481 void
  482 vm_waitproc(p)
  483         struct proc *p;
  484 {
  485 
  486         vmspace_exitfree(p);            /* and clean-out the vmspace */
  487 }
  488 
  489 /*
  490  * Set default limits for VM system.
  491  * Called for proc 0, and then inherited by all others.
  492  *
  493  * XXX should probably act directly on proc0.
  494  */
  495 static void
  496 vm_init_limits(udata)
  497         void *udata;
  498 {
  499         struct proc *p = udata;
  500         struct plimit *limp;
  501         int rss_limit;
  502 
  503         /*
  504          * Set up the initial limits on process VM. Set the maximum resident
  505          * set size to be half of (reasonably) available memory.  Since this
  506          * is a soft limit, it comes into effect only when the system is out
  507          * of memory - half of main memory helps to favor smaller processes,
  508          * and reduces thrashing of the object cache.
  509          */
  510         limp = p->p_limit;
  511         limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
  512         limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
  513         limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
  514         limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
  515         /* limit the limit to no less than 2MB */
  516         rss_limit = max(cnt.v_free_count, 512);
  517         limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
  518         limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
  519 }
  520 
  521 void
  522 faultin(p)
  523         struct proc *p;
  524 {
  525 #ifdef NO_SWAPPING
  526 
  527         PROC_LOCK_ASSERT(p, MA_OWNED);
  528         if ((p->p_sflag & PS_INMEM) == 0)
  529                 panic("faultin: proc swapped out with NO_SWAPPING!");
  530 #else /* !NO_SWAPPING */
  531         struct thread *td;
  532 
  533         GIANT_REQUIRED;
  534         PROC_LOCK_ASSERT(p, MA_OWNED);
  535         /*
  536          * If another process is swapping in this process,
  537          * just wait until it finishes.
  538          */
  539         if (p->p_sflag & PS_SWAPPINGIN)
  540                 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0);
  541         else if ((p->p_sflag & PS_INMEM) == 0) {
  542                 /*
  543                  * Don't let another thread swap process p out while we are
  544                  * busy swapping it in.
  545                  */
  546                 ++p->p_lock;
  547                 mtx_lock_spin(&sched_lock);
  548                 p->p_sflag |= PS_SWAPPINGIN;
  549                 mtx_unlock_spin(&sched_lock);
  550                 PROC_UNLOCK(p);
  551 
  552                 FOREACH_THREAD_IN_PROC(p, td)
  553                         vm_thread_swapin(td);
  554 
  555                 PROC_LOCK(p);
  556                 mtx_lock_spin(&sched_lock);
  557                 p->p_sflag &= ~PS_SWAPPINGIN;
  558                 p->p_sflag |= PS_INMEM;
  559                 FOREACH_THREAD_IN_PROC(p, td) {
  560                         TD_CLR_SWAPPED(td);
  561                         if (TD_CAN_RUN(td))
  562                                 setrunnable(td);
  563                 }
  564                 mtx_unlock_spin(&sched_lock);
  565 
  566                 wakeup(&p->p_sflag);
  567 
  568                 /* Allow other threads to swap p out now. */
  569                 --p->p_lock;
  570         }
  571 #endif /* NO_SWAPPING */
  572 }
  573 
  574 /*
  575  * This swapin algorithm attempts to swap-in processes only if there
  576  * is enough space for them.  Of course, if a process waits for a long
  577  * time, it will be swapped in anyway.
  578  *
  579  *  XXXKSE - process with the thread with highest priority counts..
  580  *
  581  * Giant is still held at this point, to be released in tsleep.
  582  */
  583 /* ARGSUSED*/
  584 static void
  585 scheduler(dummy)
  586         void *dummy;
  587 {
  588         struct proc *p;
  589         struct thread *td;
  590         int pri;
  591         struct proc *pp;
  592         int ppri;
  593 
  594         mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
  595         /* GIANT_REQUIRED */
  596 
  597 loop:
  598         if (vm_page_count_min()) {
  599                 VM_WAIT;
  600                 goto loop;
  601         }
  602 
  603         pp = NULL;
  604         ppri = INT_MIN;
  605         sx_slock(&allproc_lock);
  606         FOREACH_PROC_IN_SYSTEM(p) {
  607                 struct ksegrp *kg;
  608                 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
  609                         continue;
  610                 }
  611                 mtx_lock_spin(&sched_lock);
  612                 FOREACH_THREAD_IN_PROC(p, td) {
  613                         /*
  614                          * An otherwise runnable thread of a process
  615                          * swapped out has only the TDI_SWAPPED bit set.
  616                          * 
  617                          */
  618                         if (td->td_inhibitors == TDI_SWAPPED) {
  619                                 kg = td->td_ksegrp;
  620                                 pri = p->p_swtime + kg->kg_slptime;
  621                                 if ((p->p_sflag & PS_SWAPINREQ) == 0) {
  622                                         pri -= p->p_nice * 8;
  623                                 }
  624 
  625                                 /*
  626                                  * if this ksegrp is higher priority
  627                                  * and there is enough space, then select
  628                                  * this process instead of the previous
  629                                  * selection.
  630                                  */
  631                                 if (pri > ppri) {
  632                                         pp = p;
  633                                         ppri = pri;
  634                                 }
  635                         }
  636                 }
  637                 mtx_unlock_spin(&sched_lock);
  638         }
  639         sx_sunlock(&allproc_lock);
  640 
  641         /*
  642          * Nothing to do, back to sleep.
  643          */
  644         if ((p = pp) == NULL) {
  645                 tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
  646                 goto loop;
  647         }
  648         PROC_LOCK(p);
  649 
  650         /*
  651          * Another process may be bringing or may have already
  652          * brought this process in while we traverse all threads.
  653          * Or, this process may even be being swapped out again.
  654          */
  655         if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
  656                 PROC_UNLOCK(p);
  657                 goto loop;
  658         }
  659 
  660         mtx_lock_spin(&sched_lock);
  661         p->p_sflag &= ~PS_SWAPINREQ;
  662         mtx_unlock_spin(&sched_lock);
  663 
  664         /*
  665          * We would like to bring someone in. (only if there is space).
  666          * [What checks the space? ]
  667          */
  668         faultin(p);
  669         PROC_UNLOCK(p);
  670         mtx_lock_spin(&sched_lock);
  671         p->p_swtime = 0;
  672         mtx_unlock_spin(&sched_lock);
  673         goto loop;
  674 }
  675 
  676 #ifndef NO_SWAPPING
  677 
  678 /*
  679  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  680  */
  681 static int swap_idle_threshold1 = 2;
  682 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  683     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  684 
  685 /*
  686  * Swap_idle_threshold2 is the time that a process can be idle before
  687  * it will be swapped out, if idle swapping is enabled.
  688  */
  689 static int swap_idle_threshold2 = 10;
  690 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  691     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  692 
  693 /*
  694  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
  695  * procs and unwire their u-areas.  We try to always "swap" at least one
  696  * process in case we need the room for a swapin.
  697  * If any procs have been sleeping/stopped for at least maxslp seconds,
  698  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
  699  * if any, otherwise the longest-resident process.
  700  */
  701 void
  702 swapout_procs(action)
  703 int action;
  704 {
  705         struct proc *p;
  706         struct thread *td;
  707         struct ksegrp *kg;
  708         int didswap = 0;
  709 
  710         GIANT_REQUIRED;
  711 
  712 retry:
  713         sx_slock(&allproc_lock);
  714         FOREACH_PROC_IN_SYSTEM(p) {
  715                 struct vmspace *vm;
  716                 int minslptime = 100000;
  717                 
  718                 /*
  719                  * Watch out for a process in
  720                  * creation.  It may have no
  721                  * address space or lock yet.
  722                  */
  723                 mtx_lock_spin(&sched_lock);
  724                 if (p->p_state == PRS_NEW) {
  725                         mtx_unlock_spin(&sched_lock);
  726                         continue;
  727                 }
  728                 mtx_unlock_spin(&sched_lock);
  729 
  730                 /*
  731                  * An aio daemon switches its
  732                  * address space while running.
  733                  * Perform a quick check whether
  734                  * a process has P_SYSTEM.
  735                  */
  736                 if ((p->p_flag & P_SYSTEM) != 0)
  737                         continue;
  738 
  739                 /*
  740                  * Do not swapout a process that
  741                  * is waiting for VM data
  742                  * structures as there is a possible
  743                  * deadlock.  Test this first as
  744                  * this may block.
  745                  *
  746                  * Lock the map until swapout
  747                  * finishes, or a thread of this
  748                  * process may attempt to alter
  749                  * the map.
  750                  */
  751                 PROC_LOCK(p);
  752                 vm = p->p_vmspace;
  753                 KASSERT(vm != NULL,
  754                         ("swapout_procs: a process has no address space"));
  755                 atomic_add_int(&vm->vm_refcnt, 1);
  756                 PROC_UNLOCK(p);
  757                 if (!vm_map_trylock(&vm->vm_map))
  758                         goto nextproc1;
  759 
  760                 PROC_LOCK(p);
  761                 if (p->p_lock != 0 ||
  762                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
  763                     ) != 0) {
  764                         goto nextproc2;
  765                 }
  766                 /*
  767                  * only aiod changes vmspace, however it will be
  768                  * skipped because of the if statement above checking 
  769                  * for P_SYSTEM
  770                  */
  771                 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
  772                         goto nextproc2;
  773 
  774                 switch (p->p_state) {
  775                 default:
  776                         /* Don't swap out processes in any sort
  777                          * of 'special' state. */
  778                         break;
  779 
  780                 case PRS_NORMAL:
  781                         mtx_lock_spin(&sched_lock);
  782                         /*
  783                          * do not swapout a realtime process
  784                          * Check all the thread groups..
  785                          */
  786                         FOREACH_KSEGRP_IN_PROC(p, kg) {
  787                                 if (PRI_IS_REALTIME(kg->kg_pri_class))
  788                                         goto nextproc;
  789 
  790                                 /*
  791                                  * Guarantee swap_idle_threshold1
  792                                  * time in memory.
  793                                  */
  794                                 if (kg->kg_slptime < swap_idle_threshold1)
  795                                         goto nextproc;
  796 
  797                                 /*
  798                                  * Do not swapout a process if it is
  799                                  * waiting on a critical event of some
  800                                  * kind or there is a thread whose
  801                                  * pageable memory may be accessed.
  802                                  *
  803                                  * This could be refined to support
  804                                  * swapping out a thread.
  805                                  */
  806                                 FOREACH_THREAD_IN_GROUP(kg, td) {
  807                                         if ((td->td_priority) < PSOCK ||
  808                                             !thread_safetoswapout(td))
  809                                                 goto nextproc;
  810                                 }
  811                                 /*
  812                                  * If the system is under memory stress,
  813                                  * or if we are swapping
  814                                  * idle processes >= swap_idle_threshold2,
  815                                  * then swap the process out.
  816                                  */
  817                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  818                                     (((action & VM_SWAP_IDLE) == 0) ||
  819                                     (kg->kg_slptime < swap_idle_threshold2)))
  820                                         goto nextproc;
  821 
  822                                 if (minslptime > kg->kg_slptime)
  823                                         minslptime = kg->kg_slptime;
  824                         }
  825 
  826                         /*
  827                          * If the pageout daemon didn't free enough pages,
  828                          * or if this process is idle and the system is
  829                          * configured to swap proactively, swap it out.
  830                          */
  831                         if ((action & VM_SWAP_NORMAL) ||
  832                                 ((action & VM_SWAP_IDLE) &&
  833                                  (minslptime > swap_idle_threshold2))) {
  834                                 swapout(p);
  835                                 didswap++;
  836                                 mtx_unlock_spin(&sched_lock);
  837                                 PROC_UNLOCK(p);
  838                                 vm_map_unlock(&vm->vm_map);
  839                                 vmspace_free(vm);
  840                                 sx_sunlock(&allproc_lock);
  841                                 goto retry;
  842                         }
  843 nextproc:                       
  844                         mtx_unlock_spin(&sched_lock);
  845                 }
  846 nextproc2:
  847                 PROC_UNLOCK(p);
  848                 vm_map_unlock(&vm->vm_map);
  849 nextproc1:
  850                 vmspace_free(vm);
  851                 continue;
  852         }
  853         sx_sunlock(&allproc_lock);
  854         /*
  855          * If we swapped something out, and another process needed memory,
  856          * then wakeup the sched process.
  857          */
  858         if (didswap)
  859                 wakeup(&proc0);
  860 }
  861 
  862 static void
  863 swapout(p)
  864         struct proc *p;
  865 {
  866         struct thread *td;
  867 
  868         PROC_LOCK_ASSERT(p, MA_OWNED);
  869         mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
  870 #if defined(SWAP_DEBUG)
  871         printf("swapping out %d\n", p->p_pid);
  872 #endif
  873 
  874         /*
  875          * The states of this process and its threads may have changed
  876          * by now.  Assuming that there is only one pageout daemon thread,
  877          * this process should still be in memory.
  878          */
  879         KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
  880                 ("swapout: lost a swapout race?"));
  881 
  882 #if defined(INVARIANTS)
  883         /*
  884          * Make sure that all threads are safe to be swapped out.
  885          *
  886          * Alternatively, we could swap out only safe threads.
  887          */
  888         FOREACH_THREAD_IN_PROC(p, td) {
  889                 KASSERT(thread_safetoswapout(td),
  890                         ("swapout: there is a thread not safe for swapout"));
  891         }
  892 #endif /* INVARIANTS */
  893 
  894         ++p->p_stats->p_ru.ru_nswap;
  895         /*
  896          * remember the process resident count
  897          */
  898         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
  899 
  900         p->p_sflag &= ~PS_INMEM;
  901         p->p_sflag |= PS_SWAPPINGOUT;
  902         PROC_UNLOCK(p);
  903         FOREACH_THREAD_IN_PROC(p, td)
  904                 TD_SET_SWAPPED(td);
  905         mtx_unlock_spin(&sched_lock);
  906 
  907         FOREACH_THREAD_IN_PROC(p, td)
  908                 vm_thread_swapout(td);
  909 
  910         PROC_LOCK(p);
  911         mtx_lock_spin(&sched_lock);
  912         p->p_sflag &= ~PS_SWAPPINGOUT;
  913         p->p_swtime = 0;
  914 }
  915 #endif /* !NO_SWAPPING */

Cache object: 2b1c5ba281c905f444bd8b49588bf1ca


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.