The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD: releng/9.0/sys/vm/vm_glue.c 223825 2011-07-06 20:06:44Z trasz $");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/limits.h>
   69 #include <sys/lock.h>
   70 #include <sys/mutex.h>
   71 #include <sys/proc.h>
   72 #include <sys/racct.h>
   73 #include <sys/resourcevar.h>
   74 #include <sys/sched.h>
   75 #include <sys/sf_buf.h>
   76 #include <sys/shm.h>
   77 #include <sys/vmmeter.h>
   78 #include <sys/sx.h>
   79 #include <sys/sysctl.h>
   80 
   81 #include <sys/eventhandler.h>
   82 #include <sys/kernel.h>
   83 #include <sys/ktr.h>
   84 #include <sys/unistd.h>
   85 
   86 #include <vm/vm.h>
   87 #include <vm/vm_param.h>
   88 #include <vm/pmap.h>
   89 #include <vm/vm_map.h>
   90 #include <vm/vm_page.h>
   91 #include <vm/vm_pageout.h>
   92 #include <vm/vm_object.h>
   93 #include <vm/vm_kern.h>
   94 #include <vm/vm_extern.h>
   95 #include <vm/vm_pager.h>
   96 #include <vm/swap_pager.h>
   97 
   98 /*
   99  * System initialization
  100  *
  101  * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
  102  *
  103  * Note: run scheduling should be divorced from the vm system.
  104  */
  105 static void scheduler(void *);
  106 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
  107 
  108 #ifndef NO_SWAPPING
  109 static int swapout(struct proc *);
  110 static void swapclear(struct proc *);
  111 static void vm_thread_swapin(struct thread *td);
  112 static void vm_thread_swapout(struct thread *td);
  113 #endif
  114 
  115 /*
  116  * MPSAFE
  117  *
  118  * WARNING!  This code calls vm_map_check_protection() which only checks
  119  * the associated vm_map_entry range.  It does not determine whether the
  120  * contents of the memory is actually readable or writable.  In most cases
  121  * just checking the vm_map_entry is sufficient within the kernel's address
  122  * space.
  123  */
  124 int
  125 kernacc(addr, len, rw)
  126         void *addr;
  127         int len, rw;
  128 {
  129         boolean_t rv;
  130         vm_offset_t saddr, eaddr;
  131         vm_prot_t prot;
  132 
  133         KASSERT((rw & ~VM_PROT_ALL) == 0,
  134             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  135 
  136         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  137             (vm_offset_t)addr + len < (vm_offset_t)addr)
  138                 return (FALSE);
  139 
  140         prot = rw;
  141         saddr = trunc_page((vm_offset_t)addr);
  142         eaddr = round_page((vm_offset_t)addr + len);
  143         vm_map_lock_read(kernel_map);
  144         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  145         vm_map_unlock_read(kernel_map);
  146         return (rv == TRUE);
  147 }
  148 
  149 /*
  150  * MPSAFE
  151  *
  152  * WARNING!  This code calls vm_map_check_protection() which only checks
  153  * the associated vm_map_entry range.  It does not determine whether the
  154  * contents of the memory is actually readable or writable.  vmapbuf(),
  155  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  156  * used in conjuction with this call.
  157  */
  158 int
  159 useracc(addr, len, rw)
  160         void *addr;
  161         int len, rw;
  162 {
  163         boolean_t rv;
  164         vm_prot_t prot;
  165         vm_map_t map;
  166 
  167         KASSERT((rw & ~VM_PROT_ALL) == 0,
  168             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  169         prot = rw;
  170         map = &curproc->p_vmspace->vm_map;
  171         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  172             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  173                 return (FALSE);
  174         }
  175         vm_map_lock_read(map);
  176         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  177             round_page((vm_offset_t)addr + len), prot);
  178         vm_map_unlock_read(map);
  179         return (rv == TRUE);
  180 }
  181 
  182 int
  183 vslock(void *addr, size_t len)
  184 {
  185         vm_offset_t end, last, start;
  186         unsigned long nsize;
  187         vm_size_t npages;
  188         int error;
  189 
  190         last = (vm_offset_t)addr + len;
  191         start = trunc_page((vm_offset_t)addr);
  192         end = round_page(last);
  193         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  194                 return (EINVAL);
  195         npages = atop(end - start);
  196         if (npages > vm_page_max_wired)
  197                 return (ENOMEM);
  198         PROC_LOCK(curproc);
  199         nsize = ptoa(npages +
  200             pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map)));
  201         if (nsize > lim_cur(curproc, RLIMIT_MEMLOCK)) {
  202                 PROC_UNLOCK(curproc);
  203                 return (ENOMEM);
  204         }
  205         if (racct_set(curproc, RACCT_MEMLOCK, nsize)) {
  206                 PROC_UNLOCK(curproc);
  207                 return (ENOMEM);
  208         }
  209         PROC_UNLOCK(curproc);
  210 #if 0
  211         /*
  212          * XXX - not yet
  213          *
  214          * The limit for transient usage of wired pages should be
  215          * larger than for "permanent" wired pages (mlock()).
  216          *
  217          * Also, the sysctl code, which is the only present user
  218          * of vslock(), does a hard loop on EAGAIN.
  219          */
  220         if (npages + cnt.v_wire_count > vm_page_max_wired)
  221                 return (EAGAIN);
  222 #endif
  223         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  224             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  225 #ifdef RACCT
  226         if (error != KERN_SUCCESS) {
  227                 PROC_LOCK(curproc);
  228                 racct_set(curproc, RACCT_MEMLOCK, 
  229                     ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
  230                 PROC_UNLOCK(curproc);
  231         }
  232 #endif
  233         /*
  234          * Return EFAULT on error to match copy{in,out}() behaviour
  235          * rather than returning ENOMEM like mlock() would.
  236          */
  237         return (error == KERN_SUCCESS ? 0 : EFAULT);
  238 }
  239 
  240 void
  241 vsunlock(void *addr, size_t len)
  242 {
  243 
  244         /* Rely on the parameter sanity checks performed by vslock(). */
  245         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  246             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  247             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  248 
  249 #ifdef RACCT
  250         PROC_LOCK(curproc);
  251         racct_set(curproc, RACCT_MEMLOCK,
  252             ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
  253         PROC_UNLOCK(curproc);
  254 #endif
  255 }
  256 
  257 /*
  258  * Pin the page contained within the given object at the given offset.  If the
  259  * page is not resident, allocate and load it using the given object's pager.
  260  * Return the pinned page if successful; otherwise, return NULL.
  261  */
  262 static vm_page_t
  263 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
  264 {
  265         vm_page_t m, ma[1];
  266         vm_pindex_t pindex;
  267         int rv;
  268 
  269         VM_OBJECT_LOCK(object);
  270         pindex = OFF_TO_IDX(offset);
  271         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  272         if (m->valid != VM_PAGE_BITS_ALL) {
  273                 ma[0] = m;
  274                 rv = vm_pager_get_pages(object, ma, 1, 0);
  275                 m = vm_page_lookup(object, pindex);
  276                 if (m == NULL)
  277                         goto out;
  278                 if (rv != VM_PAGER_OK) {
  279                         vm_page_lock(m);
  280                         vm_page_free(m);
  281                         vm_page_unlock(m);
  282                         m = NULL;
  283                         goto out;
  284                 }
  285         }
  286         vm_page_lock(m);
  287         vm_page_hold(m);
  288         vm_page_unlock(m);
  289         vm_page_wakeup(m);
  290 out:
  291         VM_OBJECT_UNLOCK(object);
  292         return (m);
  293 }
  294 
  295 /*
  296  * Return a CPU private mapping to the page at the given offset within the
  297  * given object.  The page is pinned before it is mapped.
  298  */
  299 struct sf_buf *
  300 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
  301 {
  302         vm_page_t m;
  303 
  304         m = vm_imgact_hold_page(object, offset);
  305         if (m == NULL)
  306                 return (NULL);
  307         sched_pin();
  308         return (sf_buf_alloc(m, SFB_CPUPRIVATE));
  309 }
  310 
  311 /*
  312  * Destroy the given CPU private mapping and unpin the page that it mapped.
  313  */
  314 void
  315 vm_imgact_unmap_page(struct sf_buf *sf)
  316 {
  317         vm_page_t m;
  318 
  319         m = sf_buf_page(sf);
  320         sf_buf_free(sf);
  321         sched_unpin();
  322         vm_page_lock(m);
  323         vm_page_unhold(m);
  324         vm_page_unlock(m);
  325 }
  326 
  327 void
  328 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
  329 {
  330 
  331         pmap_sync_icache(map->pmap, va, sz);
  332 }
  333 
  334 struct kstack_cache_entry {
  335         vm_object_t ksobj;
  336         struct kstack_cache_entry *next_ks_entry;
  337 };
  338 
  339 static struct kstack_cache_entry *kstack_cache;
  340 static int kstack_cache_size = 128;
  341 static int kstacks;
  342 static struct mtx kstack_cache_mtx;
  343 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
  344     "");
  345 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
  346     "");
  347 
  348 #ifndef KSTACK_MAX_PAGES
  349 #define KSTACK_MAX_PAGES 32
  350 #endif
  351 
  352 /*
  353  * Create the kernel stack (including pcb for i386) for a new thread.
  354  * This routine directly affects the fork perf for a process and
  355  * create performance for a thread.
  356  */
  357 int
  358 vm_thread_new(struct thread *td, int pages)
  359 {
  360         vm_object_t ksobj;
  361         vm_offset_t ks;
  362         vm_page_t m, ma[KSTACK_MAX_PAGES];
  363         struct kstack_cache_entry *ks_ce;
  364         int i;
  365 
  366         /* Bounds check */
  367         if (pages <= 1)
  368                 pages = KSTACK_PAGES;
  369         else if (pages > KSTACK_MAX_PAGES)
  370                 pages = KSTACK_MAX_PAGES;
  371 
  372         if (pages == KSTACK_PAGES) {
  373                 mtx_lock(&kstack_cache_mtx);
  374                 if (kstack_cache != NULL) {
  375                         ks_ce = kstack_cache;
  376                         kstack_cache = ks_ce->next_ks_entry;
  377                         mtx_unlock(&kstack_cache_mtx);
  378 
  379                         td->td_kstack_obj = ks_ce->ksobj;
  380                         td->td_kstack = (vm_offset_t)ks_ce;
  381                         td->td_kstack_pages = KSTACK_PAGES;
  382                         return (1);
  383                 }
  384                 mtx_unlock(&kstack_cache_mtx);
  385         }
  386 
  387         /*
  388          * Allocate an object for the kstack.
  389          */
  390         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  391         
  392         /*
  393          * Get a kernel virtual address for this thread's kstack.
  394          */
  395 #if defined(__mips__)
  396         /*
  397          * We need to align the kstack's mapped address to fit within
  398          * a single TLB entry.
  399          */
  400         ks = kmem_alloc_nofault_space(kernel_map,
  401             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
  402 #else
  403         ks = kmem_alloc_nofault(kernel_map,
  404            (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  405 #endif
  406         if (ks == 0) {
  407                 printf("vm_thread_new: kstack allocation failed\n");
  408                 vm_object_deallocate(ksobj);
  409                 return (0);
  410         }
  411 
  412         atomic_add_int(&kstacks, 1);
  413         if (KSTACK_GUARD_PAGES != 0) {
  414                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  415                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  416         }
  417         td->td_kstack_obj = ksobj;
  418         td->td_kstack = ks;
  419         /*
  420          * Knowing the number of pages allocated is useful when you
  421          * want to deallocate them.
  422          */
  423         td->td_kstack_pages = pages;
  424         /* 
  425          * For the length of the stack, link in a real page of ram for each
  426          * page of stack.
  427          */
  428         VM_OBJECT_LOCK(ksobj);
  429         for (i = 0; i < pages; i++) {
  430                 /*
  431                  * Get a kernel stack page.
  432                  */
  433                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  434                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
  435                 ma[i] = m;
  436                 m->valid = VM_PAGE_BITS_ALL;
  437         }
  438         VM_OBJECT_UNLOCK(ksobj);
  439         pmap_qenter(ks, ma, pages);
  440         return (1);
  441 }
  442 
  443 static void
  444 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
  445 {
  446         vm_page_t m;
  447         int i;
  448 
  449         atomic_add_int(&kstacks, -1);
  450         pmap_qremove(ks, pages);
  451         VM_OBJECT_LOCK(ksobj);
  452         for (i = 0; i < pages; i++) {
  453                 m = vm_page_lookup(ksobj, i);
  454                 if (m == NULL)
  455                         panic("vm_thread_dispose: kstack already missing?");
  456                 vm_page_lock(m);
  457                 vm_page_unwire(m, 0);
  458                 vm_page_free(m);
  459                 vm_page_unlock(m);
  460         }
  461         VM_OBJECT_UNLOCK(ksobj);
  462         vm_object_deallocate(ksobj);
  463         kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  464             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  465 }
  466 
  467 /*
  468  * Dispose of a thread's kernel stack.
  469  */
  470 void
  471 vm_thread_dispose(struct thread *td)
  472 {
  473         vm_object_t ksobj;
  474         vm_offset_t ks;
  475         struct kstack_cache_entry *ks_ce;
  476         int pages;
  477 
  478         pages = td->td_kstack_pages;
  479         ksobj = td->td_kstack_obj;
  480         ks = td->td_kstack;
  481         td->td_kstack = 0;
  482         td->td_kstack_pages = 0;
  483         if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
  484                 ks_ce = (struct kstack_cache_entry *)ks;
  485                 ks_ce->ksobj = ksobj;
  486                 mtx_lock(&kstack_cache_mtx);
  487                 ks_ce->next_ks_entry = kstack_cache;
  488                 kstack_cache = ks_ce;
  489                 mtx_unlock(&kstack_cache_mtx);
  490                 return;
  491         }
  492         vm_thread_stack_dispose(ksobj, ks, pages);
  493 }
  494 
  495 static void
  496 vm_thread_stack_lowmem(void *nulll)
  497 {
  498         struct kstack_cache_entry *ks_ce, *ks_ce1;
  499 
  500         mtx_lock(&kstack_cache_mtx);
  501         ks_ce = kstack_cache;
  502         kstack_cache = NULL;
  503         mtx_unlock(&kstack_cache_mtx);
  504 
  505         while (ks_ce != NULL) {
  506                 ks_ce1 = ks_ce;
  507                 ks_ce = ks_ce->next_ks_entry;
  508 
  509                 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
  510                     KSTACK_PAGES);
  511         }
  512 }
  513 
  514 static void
  515 kstack_cache_init(void *nulll)
  516 {
  517 
  518         EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
  519             EVENTHANDLER_PRI_ANY);
  520 }
  521 
  522 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
  523 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
  524 
  525 #ifndef NO_SWAPPING
  526 /*
  527  * Allow a thread's kernel stack to be paged out.
  528  */
  529 static void
  530 vm_thread_swapout(struct thread *td)
  531 {
  532         vm_object_t ksobj;
  533         vm_page_t m;
  534         int i, pages;
  535 
  536         cpu_thread_swapout(td);
  537         pages = td->td_kstack_pages;
  538         ksobj = td->td_kstack_obj;
  539         pmap_qremove(td->td_kstack, pages);
  540         VM_OBJECT_LOCK(ksobj);
  541         for (i = 0; i < pages; i++) {
  542                 m = vm_page_lookup(ksobj, i);
  543                 if (m == NULL)
  544                         panic("vm_thread_swapout: kstack already missing?");
  545                 vm_page_dirty(m);
  546                 vm_page_lock(m);
  547                 vm_page_unwire(m, 0);
  548                 vm_page_unlock(m);
  549         }
  550         VM_OBJECT_UNLOCK(ksobj);
  551 }
  552 
  553 /*
  554  * Bring the kernel stack for a specified thread back in.
  555  */
  556 static void
  557 vm_thread_swapin(struct thread *td)
  558 {
  559         vm_object_t ksobj;
  560         vm_page_t ma[KSTACK_MAX_PAGES];
  561         int i, j, k, pages, rv;
  562 
  563         pages = td->td_kstack_pages;
  564         ksobj = td->td_kstack_obj;
  565         VM_OBJECT_LOCK(ksobj);
  566         for (i = 0; i < pages; i++)
  567                 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
  568                     VM_ALLOC_WIRED);
  569         for (i = 0; i < pages; i++) {
  570                 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
  571                         KASSERT(ma[i]->oflags & VPO_BUSY,
  572                             ("lost busy 1"));
  573                         vm_object_pip_add(ksobj, 1);
  574                         for (j = i + 1; j < pages; j++) {
  575                                 KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
  576                                     (ma[j]->oflags & VPO_BUSY),
  577                                     ("lost busy 2"));
  578                                 if (ma[j]->valid == VM_PAGE_BITS_ALL)
  579                                         break;
  580                         }
  581                         rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
  582                         if (rv != VM_PAGER_OK)
  583         panic("vm_thread_swapin: cannot get kstack for proc: %d",
  584                                     td->td_proc->p_pid);
  585                         vm_object_pip_wakeup(ksobj);
  586                         for (k = i; k < j; k++)
  587                                 ma[k] = vm_page_lookup(ksobj, k);
  588                         vm_page_wakeup(ma[i]);
  589                 } else if (ma[i]->oflags & VPO_BUSY)
  590                         vm_page_wakeup(ma[i]);
  591         }
  592         VM_OBJECT_UNLOCK(ksobj);
  593         pmap_qenter(td->td_kstack, ma, pages);
  594         cpu_thread_swapin(td);
  595 }
  596 #endif /* !NO_SWAPPING */
  597 
  598 /*
  599  * Implement fork's actions on an address space.
  600  * Here we arrange for the address space to be copied or referenced,
  601  * allocate a user struct (pcb and kernel stack), then call the
  602  * machine-dependent layer to fill those in and make the new process
  603  * ready to run.  The new process is set up so that it returns directly
  604  * to user mode to avoid stack copying and relocation problems.
  605  */
  606 int
  607 vm_forkproc(td, p2, td2, vm2, flags)
  608         struct thread *td;
  609         struct proc *p2;
  610         struct thread *td2;
  611         struct vmspace *vm2;
  612         int flags;
  613 {
  614         struct proc *p1 = td->td_proc;
  615         int error;
  616 
  617         if ((flags & RFPROC) == 0) {
  618                 /*
  619                  * Divorce the memory, if it is shared, essentially
  620                  * this changes shared memory amongst threads, into
  621                  * COW locally.
  622                  */
  623                 if ((flags & RFMEM) == 0) {
  624                         if (p1->p_vmspace->vm_refcnt > 1) {
  625                                 error = vmspace_unshare(p1);
  626                                 if (error)
  627                                         return (error);
  628                         }
  629                 }
  630                 cpu_fork(td, p2, td2, flags);
  631                 return (0);
  632         }
  633 
  634         if (flags & RFMEM) {
  635                 p2->p_vmspace = p1->p_vmspace;
  636                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  637         }
  638 
  639         while (vm_page_count_severe()) {
  640                 VM_WAIT;
  641         }
  642 
  643         if ((flags & RFMEM) == 0) {
  644                 p2->p_vmspace = vm2;
  645                 if (p1->p_vmspace->vm_shm)
  646                         shmfork(p1, p2);
  647         }
  648 
  649         /*
  650          * cpu_fork will copy and update the pcb, set up the kernel stack,
  651          * and make the child ready to run.
  652          */
  653         cpu_fork(td, p2, td2, flags);
  654         return (0);
  655 }
  656 
  657 /*
  658  * Called after process has been wait(2)'ed apon and is being reaped.
  659  * The idea is to reclaim resources that we could not reclaim while
  660  * the process was still executing.
  661  */
  662 void
  663 vm_waitproc(p)
  664         struct proc *p;
  665 {
  666 
  667         vmspace_exitfree(p);            /* and clean-out the vmspace */
  668 }
  669 
  670 void
  671 faultin(p)
  672         struct proc *p;
  673 {
  674 #ifdef NO_SWAPPING
  675 
  676         PROC_LOCK_ASSERT(p, MA_OWNED);
  677         if ((p->p_flag & P_INMEM) == 0)
  678                 panic("faultin: proc swapped out with NO_SWAPPING!");
  679 #else /* !NO_SWAPPING */
  680         struct thread *td;
  681 
  682         PROC_LOCK_ASSERT(p, MA_OWNED);
  683         /*
  684          * If another process is swapping in this process,
  685          * just wait until it finishes.
  686          */
  687         if (p->p_flag & P_SWAPPINGIN) {
  688                 while (p->p_flag & P_SWAPPINGIN)
  689                         msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
  690                 return;
  691         }
  692         if ((p->p_flag & P_INMEM) == 0) {
  693                 /*
  694                  * Don't let another thread swap process p out while we are
  695                  * busy swapping it in.
  696                  */
  697                 ++p->p_lock;
  698                 p->p_flag |= P_SWAPPINGIN;
  699                 PROC_UNLOCK(p);
  700 
  701                 /*
  702                  * We hold no lock here because the list of threads
  703                  * can not change while all threads in the process are
  704                  * swapped out.
  705                  */
  706                 FOREACH_THREAD_IN_PROC(p, td)
  707                         vm_thread_swapin(td);
  708                 PROC_LOCK(p);
  709                 swapclear(p);
  710                 p->p_swtick = ticks;
  711 
  712                 wakeup(&p->p_flag);
  713 
  714                 /* Allow other threads to swap p out now. */
  715                 --p->p_lock;
  716         }
  717 #endif /* NO_SWAPPING */
  718 }
  719 
  720 /*
  721  * This swapin algorithm attempts to swap-in processes only if there
  722  * is enough space for them.  Of course, if a process waits for a long
  723  * time, it will be swapped in anyway.
  724  *
  725  * Giant is held on entry.
  726  */
  727 /* ARGSUSED*/
  728 static void
  729 scheduler(dummy)
  730         void *dummy;
  731 {
  732         struct proc *p;
  733         struct thread *td;
  734         struct proc *pp;
  735         int slptime;
  736         int swtime;
  737         int ppri;
  738         int pri;
  739 
  740         mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
  741         mtx_unlock(&Giant);
  742 
  743 loop:
  744         if (vm_page_count_min()) {
  745                 VM_WAIT;
  746                 goto loop;
  747         }
  748 
  749         pp = NULL;
  750         ppri = INT_MIN;
  751         sx_slock(&allproc_lock);
  752         FOREACH_PROC_IN_SYSTEM(p) {
  753                 PROC_LOCK(p);
  754                 if (p->p_state == PRS_NEW ||
  755                     p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
  756                         PROC_UNLOCK(p);
  757                         continue;
  758                 }
  759                 swtime = (ticks - p->p_swtick) / hz;
  760                 FOREACH_THREAD_IN_PROC(p, td) {
  761                         /*
  762                          * An otherwise runnable thread of a process
  763                          * swapped out has only the TDI_SWAPPED bit set.
  764                          * 
  765                          */
  766                         thread_lock(td);
  767                         if (td->td_inhibitors == TDI_SWAPPED) {
  768                                 slptime = (ticks - td->td_slptick) / hz;
  769                                 pri = swtime + slptime;
  770                                 if ((td->td_flags & TDF_SWAPINREQ) == 0)
  771                                         pri -= p->p_nice * 8;
  772                                 /*
  773                                  * if this thread is higher priority
  774                                  * and there is enough space, then select
  775                                  * this process instead of the previous
  776                                  * selection.
  777                                  */
  778                                 if (pri > ppri) {
  779                                         pp = p;
  780                                         ppri = pri;
  781                                 }
  782                         }
  783                         thread_unlock(td);
  784                 }
  785                 PROC_UNLOCK(p);
  786         }
  787         sx_sunlock(&allproc_lock);
  788 
  789         /*
  790          * Nothing to do, back to sleep.
  791          */
  792         if ((p = pp) == NULL) {
  793                 tsleep(&proc0, PVM, "sched", MAXSLP * hz / 2);
  794                 goto loop;
  795         }
  796         PROC_LOCK(p);
  797 
  798         /*
  799          * Another process may be bringing or may have already
  800          * brought this process in while we traverse all threads.
  801          * Or, this process may even be being swapped out again.
  802          */
  803         if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
  804                 PROC_UNLOCK(p);
  805                 goto loop;
  806         }
  807 
  808         /*
  809          * We would like to bring someone in. (only if there is space).
  810          * [What checks the space? ]
  811          */
  812         faultin(p);
  813         PROC_UNLOCK(p);
  814         goto loop;
  815 }
  816 
  817 void
  818 kick_proc0(void)
  819 {
  820 
  821         wakeup(&proc0);
  822 }
  823 
  824 #ifndef NO_SWAPPING
  825 
  826 /*
  827  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  828  */
  829 static int swap_idle_threshold1 = 2;
  830 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  831     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  832 
  833 /*
  834  * Swap_idle_threshold2 is the time that a process can be idle before
  835  * it will be swapped out, if idle swapping is enabled.
  836  */
  837 static int swap_idle_threshold2 = 10;
  838 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  839     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  840 
  841 /*
  842  * First, if any processes have been sleeping or stopped for at least
  843  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
  844  * no such processes exist, then the longest-sleeping or stopped
  845  * process is swapped out.  Finally, and only as a last resort, if
  846  * there are no sleeping or stopped processes, the longest-resident
  847  * process is swapped out.
  848  */
  849 void
  850 swapout_procs(action)
  851 int action;
  852 {
  853         struct proc *p;
  854         struct thread *td;
  855         int didswap = 0;
  856 
  857 retry:
  858         sx_slock(&allproc_lock);
  859         FOREACH_PROC_IN_SYSTEM(p) {
  860                 struct vmspace *vm;
  861                 int minslptime = 100000;
  862                 int slptime;
  863                 
  864                 /*
  865                  * Watch out for a process in
  866                  * creation.  It may have no
  867                  * address space or lock yet.
  868                  */
  869                 if (p->p_state == PRS_NEW)
  870                         continue;
  871                 /*
  872                  * An aio daemon switches its
  873                  * address space while running.
  874                  * Perform a quick check whether
  875                  * a process has P_SYSTEM.
  876                  */
  877                 if ((p->p_flag & P_SYSTEM) != 0)
  878                         continue;
  879                 /*
  880                  * Do not swapout a process that
  881                  * is waiting for VM data
  882                  * structures as there is a possible
  883                  * deadlock.  Test this first as
  884                  * this may block.
  885                  *
  886                  * Lock the map until swapout
  887                  * finishes, or a thread of this
  888                  * process may attempt to alter
  889                  * the map.
  890                  */
  891                 vm = vmspace_acquire_ref(p);
  892                 if (vm == NULL)
  893                         continue;
  894                 if (!vm_map_trylock(&vm->vm_map))
  895                         goto nextproc1;
  896 
  897                 PROC_LOCK(p);
  898                 if (p->p_lock != 0 ||
  899                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
  900                     ) != 0) {
  901                         goto nextproc;
  902                 }
  903                 /*
  904                  * only aiod changes vmspace, however it will be
  905                  * skipped because of the if statement above checking 
  906                  * for P_SYSTEM
  907                  */
  908                 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
  909                         goto nextproc;
  910 
  911                 switch (p->p_state) {
  912                 default:
  913                         /* Don't swap out processes in any sort
  914                          * of 'special' state. */
  915                         break;
  916 
  917                 case PRS_NORMAL:
  918                         /*
  919                          * do not swapout a realtime process
  920                          * Check all the thread groups..
  921                          */
  922                         FOREACH_THREAD_IN_PROC(p, td) {
  923                                 thread_lock(td);
  924                                 if (PRI_IS_REALTIME(td->td_pri_class)) {
  925                                         thread_unlock(td);
  926                                         goto nextproc;
  927                                 }
  928                                 slptime = (ticks - td->td_slptick) / hz;
  929                                 /*
  930                                  * Guarantee swap_idle_threshold1
  931                                  * time in memory.
  932                                  */
  933                                 if (slptime < swap_idle_threshold1) {
  934                                         thread_unlock(td);
  935                                         goto nextproc;
  936                                 }
  937 
  938                                 /*
  939                                  * Do not swapout a process if it is
  940                                  * waiting on a critical event of some
  941                                  * kind or there is a thread whose
  942                                  * pageable memory may be accessed.
  943                                  *
  944                                  * This could be refined to support
  945                                  * swapping out a thread.
  946                                  */
  947                                 if (!thread_safetoswapout(td)) {
  948                                         thread_unlock(td);
  949                                         goto nextproc;
  950                                 }
  951                                 /*
  952                                  * If the system is under memory stress,
  953                                  * or if we are swapping
  954                                  * idle processes >= swap_idle_threshold2,
  955                                  * then swap the process out.
  956                                  */
  957                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  958                                     (((action & VM_SWAP_IDLE) == 0) ||
  959                                     (slptime < swap_idle_threshold2))) {
  960                                         thread_unlock(td);
  961                                         goto nextproc;
  962                                 }
  963 
  964                                 if (minslptime > slptime)
  965                                         minslptime = slptime;
  966                                 thread_unlock(td);
  967                         }
  968 
  969                         /*
  970                          * If the pageout daemon didn't free enough pages,
  971                          * or if this process is idle and the system is
  972                          * configured to swap proactively, swap it out.
  973                          */
  974                         if ((action & VM_SWAP_NORMAL) ||
  975                                 ((action & VM_SWAP_IDLE) &&
  976                                  (minslptime > swap_idle_threshold2))) {
  977                                 if (swapout(p) == 0)
  978                                         didswap++;
  979                                 PROC_UNLOCK(p);
  980                                 vm_map_unlock(&vm->vm_map);
  981                                 vmspace_free(vm);
  982                                 sx_sunlock(&allproc_lock);
  983                                 goto retry;
  984                         }
  985                 }
  986 nextproc:
  987                 PROC_UNLOCK(p);
  988                 vm_map_unlock(&vm->vm_map);
  989 nextproc1:
  990                 vmspace_free(vm);
  991                 continue;
  992         }
  993         sx_sunlock(&allproc_lock);
  994         /*
  995          * If we swapped something out, and another process needed memory,
  996          * then wakeup the sched process.
  997          */
  998         if (didswap)
  999                 wakeup(&proc0);
 1000 }
 1001 
 1002 static void
 1003 swapclear(p)
 1004         struct proc *p;
 1005 {
 1006         struct thread *td;
 1007 
 1008         PROC_LOCK_ASSERT(p, MA_OWNED);
 1009 
 1010         FOREACH_THREAD_IN_PROC(p, td) {
 1011                 thread_lock(td);
 1012                 td->td_flags |= TDF_INMEM;
 1013                 td->td_flags &= ~TDF_SWAPINREQ;
 1014                 TD_CLR_SWAPPED(td);
 1015                 if (TD_CAN_RUN(td))
 1016                         if (setrunnable(td)) {
 1017 #ifdef INVARIANTS
 1018                                 /*
 1019                                  * XXX: We just cleared TDI_SWAPPED
 1020                                  * above and set TDF_INMEM, so this
 1021                                  * should never happen.
 1022                                  */
 1023                                 panic("not waking up swapper");
 1024 #endif
 1025                         }
 1026                 thread_unlock(td);
 1027         }
 1028         p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
 1029         p->p_flag |= P_INMEM;
 1030 }
 1031 
 1032 static int
 1033 swapout(p)
 1034         struct proc *p;
 1035 {
 1036         struct thread *td;
 1037 
 1038         PROC_LOCK_ASSERT(p, MA_OWNED);
 1039 #if defined(SWAP_DEBUG)
 1040         printf("swapping out %d\n", p->p_pid);
 1041 #endif
 1042 
 1043         /*
 1044          * The states of this process and its threads may have changed
 1045          * by now.  Assuming that there is only one pageout daemon thread,
 1046          * this process should still be in memory.
 1047          */
 1048         KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
 1049                 ("swapout: lost a swapout race?"));
 1050 
 1051         /*
 1052          * remember the process resident count
 1053          */
 1054         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
 1055         /*
 1056          * Check and mark all threads before we proceed.
 1057          */
 1058         p->p_flag &= ~P_INMEM;
 1059         p->p_flag |= P_SWAPPINGOUT;
 1060         FOREACH_THREAD_IN_PROC(p, td) {
 1061                 thread_lock(td);
 1062                 if (!thread_safetoswapout(td)) {
 1063                         thread_unlock(td);
 1064                         swapclear(p);
 1065                         return (EBUSY);
 1066                 }
 1067                 td->td_flags &= ~TDF_INMEM;
 1068                 TD_SET_SWAPPED(td);
 1069                 thread_unlock(td);
 1070         }
 1071         td = FIRST_THREAD_IN_PROC(p);
 1072         ++td->td_ru.ru_nswap;
 1073         PROC_UNLOCK(p);
 1074 
 1075         /*
 1076          * This list is stable because all threads are now prevented from
 1077          * running.  The list is only modified in the context of a running
 1078          * thread in this process.
 1079          */
 1080         FOREACH_THREAD_IN_PROC(p, td)
 1081                 vm_thread_swapout(td);
 1082 
 1083         PROC_LOCK(p);
 1084         p->p_flag &= ~P_SWAPPINGOUT;
 1085         p->p_swtick = ticks;
 1086         return (0);
 1087 }
 1088 #endif /* !NO_SWAPPING */

Cache object: 94532aad2672b9042c1e567711797a7b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.