The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD$");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/limits.h>
   69 #include <sys/lock.h>
   70 #include <sys/mutex.h>
   71 #include <sys/proc.h>
   72 #include <sys/resourcevar.h>
   73 #include <sys/sched.h>
   74 #include <sys/sf_buf.h>
   75 #include <sys/shm.h>
   76 #include <sys/vmmeter.h>
   77 #include <sys/sx.h>
   78 #include <sys/sysctl.h>
   79 
   80 #include <sys/kernel.h>
   81 #include <sys/ktr.h>
   82 #include <sys/unistd.h>
   83 
   84 #include <vm/vm.h>
   85 #include <vm/vm_param.h>
   86 #include <vm/pmap.h>
   87 #include <vm/vm_map.h>
   88 #include <vm/vm_page.h>
   89 #include <vm/vm_pageout.h>
   90 #include <vm/vm_object.h>
   91 #include <vm/vm_kern.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/vm_pager.h>
   94 #include <vm/swap_pager.h>
   95 
   96 extern int maxslp;
   97 
   98 /*
   99  * System initialization
  100  *
  101  * Note: proc0 from proc.h
  102  */
  103 static void vm_init_limits(void *);
  104 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0)
  105 
  106 /*
  107  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
  108  *
  109  * Note: run scheduling should be divorced from the vm system.
  110  */
  111 static void scheduler(void *);
  112 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL)
  113 
  114 #ifndef NO_SWAPPING
  115 static int swapout(struct proc *);
  116 static void swapclear(struct proc *);
  117 #endif
  118 
  119 
  120 static volatile int proc0_rescan;
  121 
  122 
  123 /*
  124  * MPSAFE
  125  *
  126  * WARNING!  This code calls vm_map_check_protection() which only checks
  127  * the associated vm_map_entry range.  It does not determine whether the
  128  * contents of the memory is actually readable or writable.  In most cases
  129  * just checking the vm_map_entry is sufficient within the kernel's address
  130  * space.
  131  */
  132 int
  133 kernacc(addr, len, rw)
  134         void *addr;
  135         int len, rw;
  136 {
  137         boolean_t rv;
  138         vm_offset_t saddr, eaddr;
  139         vm_prot_t prot;
  140 
  141         KASSERT((rw & ~VM_PROT_ALL) == 0,
  142             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  143 
  144         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  145             (vm_offset_t)addr + len < (vm_offset_t)addr)
  146                 return (FALSE);
  147 
  148         prot = rw;
  149         saddr = trunc_page((vm_offset_t)addr);
  150         eaddr = round_page((vm_offset_t)addr + len);
  151         vm_map_lock_read(kernel_map);
  152         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  153         vm_map_unlock_read(kernel_map);
  154         return (rv == TRUE);
  155 }
  156 
  157 /*
  158  * MPSAFE
  159  *
  160  * WARNING!  This code calls vm_map_check_protection() which only checks
  161  * the associated vm_map_entry range.  It does not determine whether the
  162  * contents of the memory is actually readable or writable.  vmapbuf(),
  163  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  164  * used in conjuction with this call.
  165  */
  166 int
  167 useracc(addr, len, rw)
  168         void *addr;
  169         int len, rw;
  170 {
  171         boolean_t rv;
  172         vm_prot_t prot;
  173         vm_map_t map;
  174 
  175         KASSERT((rw & ~VM_PROT_ALL) == 0,
  176             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  177         prot = rw;
  178         map = &curproc->p_vmspace->vm_map;
  179         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  180             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  181                 return (FALSE);
  182         }
  183         vm_map_lock_read(map);
  184         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  185             round_page((vm_offset_t)addr + len), prot);
  186         vm_map_unlock_read(map);
  187         return (rv == TRUE);
  188 }
  189 
  190 int
  191 vslock(void *addr, size_t len)
  192 {
  193         vm_offset_t end, last, start;
  194         vm_size_t npages;
  195         int error;
  196 
  197         last = (vm_offset_t)addr + len;
  198         start = trunc_page((vm_offset_t)addr);
  199         end = round_page(last);
  200         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  201                 return (EINVAL);
  202         npages = atop(end - start);
  203         if (npages > vm_page_max_wired)
  204                 return (ENOMEM);
  205         PROC_LOCK(curproc);
  206         if (ptoa(npages +
  207             pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
  208             lim_cur(curproc, RLIMIT_MEMLOCK)) {
  209                 PROC_UNLOCK(curproc);
  210                 return (ENOMEM);
  211         }
  212         PROC_UNLOCK(curproc);
  213 #if 0
  214         /*
  215          * XXX - not yet
  216          *
  217          * The limit for transient usage of wired pages should be
  218          * larger than for "permanent" wired pages (mlock()).
  219          *
  220          * Also, the sysctl code, which is the only present user
  221          * of vslock(), does a hard loop on EAGAIN.
  222          */
  223         if (npages + cnt.v_wire_count > vm_page_max_wired)
  224                 return (EAGAIN);
  225 #endif
  226         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  227             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  228         /*
  229          * Return EFAULT on error to match copy{in,out}() behaviour
  230          * rather than returning ENOMEM like mlock() would.
  231          */
  232         return (error == KERN_SUCCESS ? 0 : EFAULT);
  233 }
  234 
  235 void
  236 vsunlock(void *addr, size_t len)
  237 {
  238 
  239         /* Rely on the parameter sanity checks performed by vslock(). */
  240         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  241             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  242             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  243 }
  244 
  245 /*
  246  * Pin the page contained within the given object at the given offset.  If the
  247  * page is not resident, allocate and load it using the given object's pager.
  248  * Return the pinned page if successful; otherwise, return NULL.
  249  */
  250 static vm_page_t
  251 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
  252 {
  253         vm_page_t m, ma[1];
  254         vm_pindex_t pindex;
  255         int rv;
  256 
  257         VM_OBJECT_LOCK(object);
  258         pindex = OFF_TO_IDX(offset);
  259         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  260         if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
  261                 ma[0] = m;
  262                 rv = vm_pager_get_pages(object, ma, 1, 0);
  263                 m = vm_page_lookup(object, pindex);
  264                 if (m == NULL)
  265                         goto out;
  266                 if (m->valid == 0 || rv != VM_PAGER_OK) {
  267                         vm_page_lock_queues();
  268                         vm_page_free(m);
  269                         vm_page_unlock_queues();
  270                         m = NULL;
  271                         goto out;
  272                 }
  273         }
  274         vm_page_lock_queues();
  275         vm_page_hold(m);
  276         vm_page_unlock_queues();
  277         vm_page_wakeup(m);
  278 out:
  279         VM_OBJECT_UNLOCK(object);
  280         return (m);
  281 }
  282 
  283 /*
  284  * Return a CPU private mapping to the page at the given offset within the
  285  * given object.  The page is pinned before it is mapped.
  286  */
  287 struct sf_buf *
  288 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
  289 {
  290         vm_page_t m;
  291 
  292         m = vm_imgact_hold_page(object, offset);
  293         if (m == NULL)
  294                 return (NULL);
  295         sched_pin();
  296         return (sf_buf_alloc(m, SFB_CPUPRIVATE));
  297 }
  298 
  299 /*
  300  * Destroy the given CPU private mapping and unpin the page that it mapped.
  301  */
  302 void
  303 vm_imgact_unmap_page(struct sf_buf *sf)
  304 {
  305         vm_page_t m;
  306 
  307         m = sf_buf_page(sf);
  308         sf_buf_free(sf);
  309         sched_unpin();
  310         vm_page_lock_queues();
  311         vm_page_unhold(m);
  312         vm_page_unlock_queues();
  313 }
  314 
  315 #ifndef KSTACK_MAX_PAGES
  316 #define KSTACK_MAX_PAGES 32
  317 #endif
  318 
  319 /*
  320  * Create the kernel stack (including pcb for i386) for a new thread.
  321  * This routine directly affects the fork perf for a process and
  322  * create performance for a thread.
  323  */
  324 int
  325 vm_thread_new(struct thread *td, int pages)
  326 {
  327         vm_object_t ksobj;
  328         vm_offset_t ks;
  329         vm_page_t m, ma[KSTACK_MAX_PAGES];
  330         int i;
  331 
  332         /* Bounds check */
  333         if (pages <= 1)
  334                 pages = KSTACK_PAGES;
  335         else if (pages > KSTACK_MAX_PAGES)
  336                 pages = KSTACK_MAX_PAGES;
  337         /*
  338          * Allocate an object for the kstack.
  339          */
  340         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  341         /*
  342          * Get a kernel virtual address for this thread's kstack.
  343          */
  344         ks = kmem_alloc_nofault(kernel_map,
  345            (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  346         if (ks == 0) {
  347                 printf("vm_thread_new: kstack allocation failed\n");
  348                 vm_object_deallocate(ksobj);
  349                 return (0);
  350         }
  351         
  352         if (KSTACK_GUARD_PAGES != 0) {
  353                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  354                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  355         }
  356         td->td_kstack_obj = ksobj;
  357         td->td_kstack = ks;
  358         /*
  359          * Knowing the number of pages allocated is useful when you
  360          * want to deallocate them.
  361          */
  362         td->td_kstack_pages = pages;
  363         /* 
  364          * For the length of the stack, link in a real page of ram for each
  365          * page of stack.
  366          */
  367         VM_OBJECT_LOCK(ksobj);
  368         for (i = 0; i < pages; i++) {
  369                 /*
  370                  * Get a kernel stack page.
  371                  */
  372                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  373                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
  374                 ma[i] = m;
  375                 m->valid = VM_PAGE_BITS_ALL;
  376         }
  377         VM_OBJECT_UNLOCK(ksobj);
  378         pmap_qenter(ks, ma, pages);
  379         return (1);
  380 }
  381 
  382 /*
  383  * Dispose of a thread's kernel stack.
  384  */
  385 void
  386 vm_thread_dispose(struct thread *td)
  387 {
  388         vm_object_t ksobj;
  389         vm_offset_t ks;
  390         vm_page_t m;
  391         int i, pages;
  392 
  393         pages = td->td_kstack_pages;
  394         ksobj = td->td_kstack_obj;
  395         ks = td->td_kstack;
  396         pmap_qremove(ks, pages);
  397         VM_OBJECT_LOCK(ksobj);
  398         for (i = 0; i < pages; i++) {
  399                 m = vm_page_lookup(ksobj, i);
  400                 if (m == NULL)
  401                         panic("vm_thread_dispose: kstack already missing?");
  402                 vm_page_lock_queues();
  403                 vm_page_unwire(m, 0);
  404                 vm_page_free(m);
  405                 vm_page_unlock_queues();
  406         }
  407         VM_OBJECT_UNLOCK(ksobj);
  408         vm_object_deallocate(ksobj);
  409         kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  410             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  411         td->td_kstack = 0;
  412 }
  413 
  414 /*
  415  * Allow a thread's kernel stack to be paged out.
  416  */
  417 void
  418 vm_thread_swapout(struct thread *td)
  419 {
  420         vm_object_t ksobj;
  421         vm_page_t m;
  422         int i, pages;
  423 
  424         cpu_thread_swapout(td);
  425         pages = td->td_kstack_pages;
  426         ksobj = td->td_kstack_obj;
  427         pmap_qremove(td->td_kstack, pages);
  428         VM_OBJECT_LOCK(ksobj);
  429         for (i = 0; i < pages; i++) {
  430                 m = vm_page_lookup(ksobj, i);
  431                 if (m == NULL)
  432                         panic("vm_thread_swapout: kstack already missing?");
  433                 vm_page_lock_queues();
  434                 vm_page_dirty(m);
  435                 vm_page_unwire(m, 0);
  436                 vm_page_unlock_queues();
  437         }
  438         VM_OBJECT_UNLOCK(ksobj);
  439 }
  440 
  441 /*
  442  * Bring the kernel stack for a specified thread back in.
  443  */
  444 void
  445 vm_thread_swapin(struct thread *td)
  446 {
  447         vm_object_t ksobj;
  448         vm_page_t m, ma[KSTACK_MAX_PAGES];
  449         int i, pages, rv;
  450 
  451         pages = td->td_kstack_pages;
  452         ksobj = td->td_kstack_obj;
  453         VM_OBJECT_LOCK(ksobj);
  454         for (i = 0; i < pages; i++) {
  455                 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  456                 if (m->valid != VM_PAGE_BITS_ALL) {
  457                         rv = vm_pager_get_pages(ksobj, &m, 1, 0);
  458                         if (rv != VM_PAGER_OK)
  459                                 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
  460                         m = vm_page_lookup(ksobj, i);
  461                         m->valid = VM_PAGE_BITS_ALL;
  462                 }
  463                 ma[i] = m;
  464                 vm_page_lock_queues();
  465                 vm_page_wire(m);
  466                 vm_page_unlock_queues();
  467                 vm_page_wakeup(m);
  468         }
  469         VM_OBJECT_UNLOCK(ksobj);
  470         pmap_qenter(td->td_kstack, ma, pages);
  471         cpu_thread_swapin(td);
  472 }
  473 
  474 /*
  475  * Set up a variable-sized alternate kstack.
  476  */
  477 int
  478 vm_thread_new_altkstack(struct thread *td, int pages)
  479 {
  480 
  481         td->td_altkstack = td->td_kstack;
  482         td->td_altkstack_obj = td->td_kstack_obj;
  483         td->td_altkstack_pages = td->td_kstack_pages;
  484 
  485         return (vm_thread_new(td, pages));
  486 }
  487 
  488 /*
  489  * Restore the original kstack.
  490  */
  491 void
  492 vm_thread_dispose_altkstack(struct thread *td)
  493 {
  494 
  495         vm_thread_dispose(td);
  496 
  497         td->td_kstack = td->td_altkstack;
  498         td->td_kstack_obj = td->td_altkstack_obj;
  499         td->td_kstack_pages = td->td_altkstack_pages;
  500         td->td_altkstack = 0;
  501         td->td_altkstack_obj = NULL;
  502         td->td_altkstack_pages = 0;
  503 }
  504 
  505 /*
  506  * Implement fork's actions on an address space.
  507  * Here we arrange for the address space to be copied or referenced,
  508  * allocate a user struct (pcb and kernel stack), then call the
  509  * machine-dependent layer to fill those in and make the new process
  510  * ready to run.  The new process is set up so that it returns directly
  511  * to user mode to avoid stack copying and relocation problems.
  512  */
  513 int
  514 vm_forkproc(td, p2, td2, vm2, flags)
  515         struct thread *td;
  516         struct proc *p2;
  517         struct thread *td2;
  518         struct vmspace *vm2;
  519         int flags;
  520 {
  521         struct proc *p1 = td->td_proc;
  522         int error;
  523 
  524         if ((flags & RFPROC) == 0) {
  525                 /*
  526                  * Divorce the memory, if it is shared, essentially
  527                  * this changes shared memory amongst threads, into
  528                  * COW locally.
  529                  */
  530                 if ((flags & RFMEM) == 0) {
  531                         if (p1->p_vmspace->vm_refcnt > 1) {
  532                                 error = vmspace_unshare(p1);
  533                                 if (error)
  534                                         return (error);
  535                         }
  536                 }
  537                 cpu_fork(td, p2, td2, flags);
  538                 return (0);
  539         }
  540 
  541         if (flags & RFMEM) {
  542                 p2->p_vmspace = p1->p_vmspace;
  543                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  544         }
  545 
  546         while (vm_page_count_severe()) {
  547                 VM_WAIT;
  548         }
  549 
  550         if ((flags & RFMEM) == 0) {
  551                 p2->p_vmspace = vm2;
  552                 if (p1->p_vmspace->vm_shm)
  553                         shmfork(p1, p2);
  554         }
  555 
  556         /*
  557          * cpu_fork will copy and update the pcb, set up the kernel stack,
  558          * and make the child ready to run.
  559          */
  560         cpu_fork(td, p2, td2, flags);
  561         return (0);
  562 }
  563 
  564 /*
  565  * Called after process has been wait(2)'ed apon and is being reaped.
  566  * The idea is to reclaim resources that we could not reclaim while
  567  * the process was still executing.
  568  */
  569 void
  570 vm_waitproc(p)
  571         struct proc *p;
  572 {
  573 
  574         vmspace_exitfree(p);            /* and clean-out the vmspace */
  575 }
  576 
  577 /*
  578  * Set default limits for VM system.
  579  * Called for proc 0, and then inherited by all others.
  580  *
  581  * XXX should probably act directly on proc0.
  582  */
  583 static void
  584 vm_init_limits(udata)
  585         void *udata;
  586 {
  587         struct proc *p = udata;
  588         struct plimit *limp;
  589         int rss_limit;
  590 
  591         /*
  592          * Set up the initial limits on process VM. Set the maximum resident
  593          * set size to be half of (reasonably) available memory.  Since this
  594          * is a soft limit, it comes into effect only when the system is out
  595          * of memory - half of main memory helps to favor smaller processes,
  596          * and reduces thrashing of the object cache.
  597          */
  598         limp = p->p_limit;
  599         limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
  600         limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
  601         limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
  602         limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
  603         /* limit the limit to no less than 2MB */
  604         rss_limit = max(cnt.v_free_count, 512);
  605         limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
  606         limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
  607 }
  608 
  609 void
  610 faultin(p)
  611         struct proc *p;
  612 {
  613 #ifdef NO_SWAPPING
  614 
  615         PROC_LOCK_ASSERT(p, MA_OWNED);
  616         if ((p->p_flag & P_INMEM) == 0)
  617                 panic("faultin: proc swapped out with NO_SWAPPING!");
  618 #else /* !NO_SWAPPING */
  619         struct thread *td;
  620 
  621         PROC_LOCK_ASSERT(p, MA_OWNED);
  622         /*
  623          * If another process is swapping in this process,
  624          * just wait until it finishes.
  625          */
  626         if (p->p_flag & P_SWAPPINGIN) {
  627                 while (p->p_flag & P_SWAPPINGIN)
  628                         msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
  629                 return;
  630         }
  631         if ((p->p_flag & P_INMEM) == 0) {
  632                 /*
  633                  * Don't let another thread swap process p out while we are
  634                  * busy swapping it in.
  635                  */
  636                 ++p->p_lock;
  637                 p->p_flag |= P_SWAPPINGIN;
  638                 PROC_UNLOCK(p);
  639 
  640                 /*
  641                  * We hold no lock here because the list of threads
  642                  * can not change while all threads in the process are
  643                  * swapped out.
  644                  */
  645                 FOREACH_THREAD_IN_PROC(p, td)
  646                         vm_thread_swapin(td);
  647                 PROC_LOCK(p);
  648                 PROC_SLOCK(p);
  649                 swapclear(p);
  650                 p->p_swtick = ticks;
  651                 PROC_SUNLOCK(p);
  652 
  653                 wakeup(&p->p_flag);
  654 
  655                 /* Allow other threads to swap p out now. */
  656                 --p->p_lock;
  657         }
  658 #endif /* NO_SWAPPING */
  659 }
  660 
  661 /*
  662  * This swapin algorithm attempts to swap-in processes only if there
  663  * is enough space for them.  Of course, if a process waits for a long
  664  * time, it will be swapped in anyway.
  665  *
  666  *  XXXKSE - process with the thread with highest priority counts..
  667  *
  668  * Giant is held on entry.
  669  */
  670 /* ARGSUSED*/
  671 static void
  672 scheduler(dummy)
  673         void *dummy;
  674 {
  675         struct proc *p;
  676         struct thread *td;
  677         struct proc *pp;
  678         int slptime;
  679         int swtime;
  680         int ppri;
  681         int pri;
  682 
  683         mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
  684         mtx_unlock(&Giant);
  685 
  686 loop:
  687         if (vm_page_count_min()) {
  688                 VM_WAIT;
  689                 thread_lock(&thread0);
  690                 proc0_rescan = 0;
  691                 thread_unlock(&thread0);
  692                 goto loop;
  693         }
  694 
  695         pp = NULL;
  696         ppri = INT_MIN;
  697         sx_slock(&allproc_lock);
  698         FOREACH_PROC_IN_SYSTEM(p) {
  699                 PROC_LOCK(p);
  700                 if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
  701                         PROC_UNLOCK(p);
  702                         continue;
  703                 }
  704                 swtime = (ticks - p->p_swtick) / hz;
  705                 PROC_SLOCK(p);
  706                 FOREACH_THREAD_IN_PROC(p, td) {
  707                         /*
  708                          * An otherwise runnable thread of a process
  709                          * swapped out has only the TDI_SWAPPED bit set.
  710                          * 
  711                          */
  712                         thread_lock(td);
  713                         if (td->td_inhibitors == TDI_SWAPPED) {
  714                                 slptime = (ticks - td->td_slptick) / hz;
  715                                 pri = swtime + slptime;
  716                                 if ((td->td_flags & TDF_SWAPINREQ) == 0)
  717                                         pri -= p->p_nice * 8;
  718                                 /*
  719                                  * if this thread is higher priority
  720                                  * and there is enough space, then select
  721                                  * this process instead of the previous
  722                                  * selection.
  723                                  */
  724                                 if (pri > ppri) {
  725                                         pp = p;
  726                                         ppri = pri;
  727                                 }
  728                         }
  729                         thread_unlock(td);
  730                 }
  731                 PROC_SUNLOCK(p);
  732                 PROC_UNLOCK(p);
  733         }
  734         sx_sunlock(&allproc_lock);
  735 
  736         /*
  737          * Nothing to do, back to sleep.
  738          */
  739         if ((p = pp) == NULL) {
  740                 thread_lock(&thread0);
  741                 if (!proc0_rescan) {
  742                         TD_SET_IWAIT(&thread0);
  743                         mi_switch(SW_VOL, NULL);
  744                 }
  745                 proc0_rescan = 0;
  746                 thread_unlock(&thread0);
  747                 goto loop;
  748         }
  749         PROC_LOCK(p);
  750 
  751         /*
  752          * Another process may be bringing or may have already
  753          * brought this process in while we traverse all threads.
  754          * Or, this process may even be being swapped out again.
  755          */
  756         if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
  757                 PROC_UNLOCK(p);
  758                 thread_lock(&thread0);
  759                 proc0_rescan = 0;
  760                 thread_unlock(&thread0);
  761                 goto loop;
  762         }
  763 
  764         /*
  765          * We would like to bring someone in. (only if there is space).
  766          * [What checks the space? ]
  767          */
  768         faultin(p);
  769         PROC_UNLOCK(p);
  770         thread_lock(&thread0);
  771         proc0_rescan = 0;
  772         thread_unlock(&thread0);
  773         goto loop;
  774 }
  775 
  776 void kick_proc0(void)
  777 {
  778         struct thread *td = &thread0;
  779 
  780         /* XXX This will probably cause a LOR in some cases */
  781         thread_lock(td);
  782         if (TD_AWAITING_INTR(td)) {
  783                 CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0);
  784                 TD_CLR_IWAIT(td);
  785                 sched_add(td, SRQ_INTR);
  786         } else {
  787                 proc0_rescan = 1;
  788                 CTR2(KTR_INTR, "%s: state %d",
  789                     __func__, td->td_state);
  790         }
  791         thread_unlock(td);
  792         
  793 }
  794 
  795 
  796 #ifndef NO_SWAPPING
  797 
  798 /*
  799  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  800  */
  801 static int swap_idle_threshold1 = 2;
  802 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  803     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  804 
  805 /*
  806  * Swap_idle_threshold2 is the time that a process can be idle before
  807  * it will be swapped out, if idle swapping is enabled.
  808  */
  809 static int swap_idle_threshold2 = 10;
  810 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  811     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  812 
  813 /*
  814  * Swapout is driven by the pageout daemon.  Very simple, we find eligible
  815  * procs and swap out their stacks.  We try to always "swap" at least one
  816  * process in case we need the room for a swapin.
  817  * If any procs have been sleeping/stopped for at least maxslp seconds,
  818  * they are swapped.  Else, we swap the longest-sleeping or stopped process,
  819  * if any, otherwise the longest-resident process.
  820  */
  821 void
  822 swapout_procs(action)
  823 int action;
  824 {
  825         struct proc *p;
  826         struct thread *td;
  827         int didswap = 0;
  828 
  829 retry:
  830         sx_slock(&allproc_lock);
  831         FOREACH_PROC_IN_SYSTEM(p) {
  832                 struct vmspace *vm;
  833                 int minslptime = 100000;
  834                 int slptime;
  835                 
  836                 /*
  837                  * Watch out for a process in
  838                  * creation.  It may have no
  839                  * address space or lock yet.
  840                  */
  841                 if (p->p_state == PRS_NEW)
  842                         continue;
  843                 /*
  844                  * An aio daemon switches its
  845                  * address space while running.
  846                  * Perform a quick check whether
  847                  * a process has P_SYSTEM.
  848                  */
  849                 if ((p->p_flag & P_SYSTEM) != 0)
  850                         continue;
  851                 /*
  852                  * Do not swapout a process that
  853                  * is waiting for VM data
  854                  * structures as there is a possible
  855                  * deadlock.  Test this first as
  856                  * this may block.
  857                  *
  858                  * Lock the map until swapout
  859                  * finishes, or a thread of this
  860                  * process may attempt to alter
  861                  * the map.
  862                  */
  863                 vm = vmspace_acquire_ref(p);
  864                 if (vm == NULL)
  865                         continue;
  866                 if (!vm_map_trylock(&vm->vm_map))
  867                         goto nextproc1;
  868 
  869                 PROC_LOCK(p);
  870                 if (p->p_lock != 0 ||
  871                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
  872                     ) != 0) {
  873                         goto nextproc2;
  874                 }
  875                 /*
  876                  * only aiod changes vmspace, however it will be
  877                  * skipped because of the if statement above checking 
  878                  * for P_SYSTEM
  879                  */
  880                 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
  881                         goto nextproc2;
  882 
  883                 switch (p->p_state) {
  884                 default:
  885                         /* Don't swap out processes in any sort
  886                          * of 'special' state. */
  887                         break;
  888 
  889                 case PRS_NORMAL:
  890                         PROC_SLOCK(p);
  891                         /*
  892                          * do not swapout a realtime process
  893                          * Check all the thread groups..
  894                          */
  895                         FOREACH_THREAD_IN_PROC(p, td) {
  896                                 thread_lock(td);
  897                                 if (PRI_IS_REALTIME(td->td_pri_class)) {
  898                                         thread_unlock(td);
  899                                         goto nextproc;
  900                                 }
  901                                 slptime = (ticks - td->td_slptick) / hz;
  902                                 /*
  903                                  * Guarantee swap_idle_threshold1
  904                                  * time in memory.
  905                                  */
  906                                 if (slptime < swap_idle_threshold1) {
  907                                         thread_unlock(td);
  908                                         goto nextproc;
  909                                 }
  910 
  911                                 /*
  912                                  * Do not swapout a process if it is
  913                                  * waiting on a critical event of some
  914                                  * kind or there is a thread whose
  915                                  * pageable memory may be accessed.
  916                                  *
  917                                  * This could be refined to support
  918                                  * swapping out a thread.
  919                                  */
  920                                 if ((td->td_priority) < PSOCK ||
  921                                     !thread_safetoswapout(td)) {
  922                                         thread_unlock(td);
  923                                         goto nextproc;
  924                                 }
  925                                 /*
  926                                  * If the system is under memory stress,
  927                                  * or if we are swapping
  928                                  * idle processes >= swap_idle_threshold2,
  929                                  * then swap the process out.
  930                                  */
  931                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  932                                     (((action & VM_SWAP_IDLE) == 0) ||
  933                                     (slptime < swap_idle_threshold2))) {
  934                                         thread_unlock(td);
  935                                         goto nextproc;
  936                                 }
  937 
  938                                 if (minslptime > slptime)
  939                                         minslptime = slptime;
  940                                 thread_unlock(td);
  941                         }
  942 
  943                         /*
  944                          * If the pageout daemon didn't free enough pages,
  945                          * or if this process is idle and the system is
  946                          * configured to swap proactively, swap it out.
  947                          */
  948                         if ((action & VM_SWAP_NORMAL) ||
  949                                 ((action & VM_SWAP_IDLE) &&
  950                                  (minslptime > swap_idle_threshold2))) {
  951                                 if (swapout(p) == 0)
  952                                         didswap++;
  953                                 PROC_SUNLOCK(p);
  954                                 PROC_UNLOCK(p);
  955                                 vm_map_unlock(&vm->vm_map);
  956                                 vmspace_free(vm);
  957                                 sx_sunlock(&allproc_lock);
  958                                 goto retry;
  959                         }
  960 nextproc:                       
  961                         PROC_SUNLOCK(p);
  962                 }
  963 nextproc2:
  964                 PROC_UNLOCK(p);
  965                 vm_map_unlock(&vm->vm_map);
  966 nextproc1:
  967                 vmspace_free(vm);
  968                 continue;
  969         }
  970         sx_sunlock(&allproc_lock);
  971         /*
  972          * If we swapped something out, and another process needed memory,
  973          * then wakeup the sched process.
  974          */
  975         if (didswap)
  976                 wakeup(&proc0);
  977 }
  978 
  979 static void
  980 swapclear(p)
  981         struct proc *p;
  982 {
  983         struct thread *td;
  984 
  985         PROC_LOCK_ASSERT(p, MA_OWNED);
  986         PROC_SLOCK_ASSERT(p, MA_OWNED);
  987 
  988         FOREACH_THREAD_IN_PROC(p, td) {
  989                 thread_lock(td);
  990                 td->td_flags |= TDF_INMEM;
  991                 td->td_flags &= ~TDF_SWAPINREQ;
  992                 TD_CLR_SWAPPED(td);
  993                 if (TD_CAN_RUN(td))
  994                         setrunnable(td);
  995                 thread_unlock(td);
  996         }
  997         p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
  998         p->p_flag |= P_INMEM;
  999 }
 1000 
 1001 static int
 1002 swapout(p)
 1003         struct proc *p;
 1004 {
 1005         struct thread *td;
 1006 
 1007         PROC_LOCK_ASSERT(p, MA_OWNED);
 1008         PROC_SLOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED);
 1009 #if defined(SWAP_DEBUG)
 1010         printf("swapping out %d\n", p->p_pid);
 1011 #endif
 1012 
 1013         /*
 1014          * The states of this process and its threads may have changed
 1015          * by now.  Assuming that there is only one pageout daemon thread,
 1016          * this process should still be in memory.
 1017          */
 1018         KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
 1019                 ("swapout: lost a swapout race?"));
 1020 
 1021         /*
 1022          * remember the process resident count
 1023          */
 1024         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
 1025         /*
 1026          * Check and mark all threads before we proceed.
 1027          */
 1028         p->p_flag &= ~P_INMEM;
 1029         p->p_flag |= P_SWAPPINGOUT;
 1030         FOREACH_THREAD_IN_PROC(p, td) {
 1031                 thread_lock(td);
 1032                 if (!thread_safetoswapout(td)) {
 1033                         thread_unlock(td);
 1034                         swapclear(p);
 1035                         return (EBUSY);
 1036                 }
 1037                 td->td_flags &= ~TDF_INMEM;
 1038                 TD_SET_SWAPPED(td);
 1039                 thread_unlock(td);
 1040         }
 1041         td = FIRST_THREAD_IN_PROC(p);
 1042         ++td->td_ru.ru_nswap;
 1043         PROC_SUNLOCK(p);
 1044         PROC_UNLOCK(p);
 1045 
 1046         /*
 1047          * This list is stable because all threads are now prevented from
 1048          * running.  The list is only modified in the context of a running
 1049          * thread in this process.
 1050          */
 1051         FOREACH_THREAD_IN_PROC(p, td)
 1052                 vm_thread_swapout(td);
 1053 
 1054         PROC_LOCK(p);
 1055         p->p_flag &= ~P_SWAPPINGOUT;
 1056         PROC_SLOCK(p);
 1057         p->p_swtick = ticks;
 1058         return (0);
 1059 }
 1060 #endif /* !NO_SWAPPING */

Cache object: 28187e24378bc527456519cbafb1bea5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.