The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD$");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/limits.h>
   69 #include <sys/lock.h>
   70 #include <sys/mutex.h>
   71 #include <sys/proc.h>
   72 #include <sys/racct.h>
   73 #include <sys/resourcevar.h>
   74 #include <sys/sched.h>
   75 #include <sys/sf_buf.h>
   76 #include <sys/shm.h>
   77 #include <sys/vmmeter.h>
   78 #include <sys/sx.h>
   79 #include <sys/sysctl.h>
   80 #include <sys/_kstack_cache.h>
   81 #include <sys/eventhandler.h>
   82 #include <sys/kernel.h>
   83 #include <sys/ktr.h>
   84 #include <sys/unistd.h>
   85 
   86 #include <vm/vm.h>
   87 #include <vm/vm_param.h>
   88 #include <vm/pmap.h>
   89 #include <vm/vm_map.h>
   90 #include <vm/vm_page.h>
   91 #include <vm/vm_pageout.h>
   92 #include <vm/vm_object.h>
   93 #include <vm/vm_kern.h>
   94 #include <vm/vm_extern.h>
   95 #include <vm/vm_pager.h>
   96 #include <vm/swap_pager.h>
   97 
   98 #ifndef NO_SWAPPING
   99 static int swapout(struct proc *);
  100 static void swapclear(struct proc *);
  101 static void vm_thread_swapin(struct thread *td);
  102 static void vm_thread_swapout(struct thread *td);
  103 #endif
  104 
  105 /*
  106  * MPSAFE
  107  *
  108  * WARNING!  This code calls vm_map_check_protection() which only checks
  109  * the associated vm_map_entry range.  It does not determine whether the
  110  * contents of the memory is actually readable or writable.  In most cases
  111  * just checking the vm_map_entry is sufficient within the kernel's address
  112  * space.
  113  */
  114 int
  115 kernacc(addr, len, rw)
  116         void *addr;
  117         int len, rw;
  118 {
  119         boolean_t rv;
  120         vm_offset_t saddr, eaddr;
  121         vm_prot_t prot;
  122 
  123         KASSERT((rw & ~VM_PROT_ALL) == 0,
  124             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  125 
  126         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  127             (vm_offset_t)addr + len < (vm_offset_t)addr)
  128                 return (FALSE);
  129 
  130         prot = rw;
  131         saddr = trunc_page((vm_offset_t)addr);
  132         eaddr = round_page((vm_offset_t)addr + len);
  133         vm_map_lock_read(kernel_map);
  134         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  135         vm_map_unlock_read(kernel_map);
  136         return (rv == TRUE);
  137 }
  138 
  139 /*
  140  * MPSAFE
  141  *
  142  * WARNING!  This code calls vm_map_check_protection() which only checks
  143  * the associated vm_map_entry range.  It does not determine whether the
  144  * contents of the memory is actually readable or writable.  vmapbuf(),
  145  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  146  * used in conjuction with this call.
  147  */
  148 int
  149 useracc(addr, len, rw)
  150         void *addr;
  151         int len, rw;
  152 {
  153         boolean_t rv;
  154         vm_prot_t prot;
  155         vm_map_t map;
  156 
  157         KASSERT((rw & ~VM_PROT_ALL) == 0,
  158             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  159         prot = rw;
  160         map = &curproc->p_vmspace->vm_map;
  161         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  162             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  163                 return (FALSE);
  164         }
  165         vm_map_lock_read(map);
  166         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  167             round_page((vm_offset_t)addr + len), prot);
  168         vm_map_unlock_read(map);
  169         return (rv == TRUE);
  170 }
  171 
  172 int
  173 vslock(void *addr, size_t len)
  174 {
  175         vm_offset_t end, last, start;
  176         vm_size_t npages;
  177         int error;
  178 
  179         last = (vm_offset_t)addr + len;
  180         start = trunc_page((vm_offset_t)addr);
  181         end = round_page(last);
  182         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  183                 return (EINVAL);
  184         npages = atop(end - start);
  185         if (npages > vm_page_max_wired)
  186                 return (ENOMEM);
  187 #if 0
  188         /*
  189          * XXX - not yet
  190          *
  191          * The limit for transient usage of wired pages should be
  192          * larger than for "permanent" wired pages (mlock()).
  193          *
  194          * Also, the sysctl code, which is the only present user
  195          * of vslock(), does a hard loop on EAGAIN.
  196          */
  197         if (npages + cnt.v_wire_count > vm_page_max_wired)
  198                 return (EAGAIN);
  199 #endif
  200         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  201             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  202         /*
  203          * Return EFAULT on error to match copy{in,out}() behaviour
  204          * rather than returning ENOMEM like mlock() would.
  205          */
  206         return (error == KERN_SUCCESS ? 0 : EFAULT);
  207 }
  208 
  209 void
  210 vsunlock(void *addr, size_t len)
  211 {
  212 
  213         /* Rely on the parameter sanity checks performed by vslock(). */
  214         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  215             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  216             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  217 }
  218 
  219 /*
  220  * Pin the page contained within the given object at the given offset.  If the
  221  * page is not resident, allocate and load it using the given object's pager.
  222  * Return the pinned page if successful; otherwise, return NULL.
  223  */
  224 static vm_page_t
  225 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
  226 {
  227         vm_page_t m, ma[1];
  228         vm_pindex_t pindex;
  229         int rv;
  230 
  231         VM_OBJECT_LOCK(object);
  232         pindex = OFF_TO_IDX(offset);
  233         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
  234         if (m->valid != VM_PAGE_BITS_ALL) {
  235                 ma[0] = m;
  236                 rv = vm_pager_get_pages(object, ma, 1, 0);
  237                 m = vm_page_lookup(object, pindex);
  238                 if (m == NULL)
  239                         goto out;
  240                 if (rv != VM_PAGER_OK) {
  241                         vm_page_lock(m);
  242                         vm_page_free(m);
  243                         vm_page_unlock(m);
  244                         m = NULL;
  245                         goto out;
  246                 }
  247         }
  248         vm_page_lock(m);
  249         vm_page_hold(m);
  250         vm_page_unlock(m);
  251         vm_page_wakeup(m);
  252 out:
  253         VM_OBJECT_UNLOCK(object);
  254         return (m);
  255 }
  256 
  257 /*
  258  * Return a CPU private mapping to the page at the given offset within the
  259  * given object.  The page is pinned before it is mapped.
  260  */
  261 struct sf_buf *
  262 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
  263 {
  264         vm_page_t m;
  265 
  266         m = vm_imgact_hold_page(object, offset);
  267         if (m == NULL)
  268                 return (NULL);
  269         sched_pin();
  270         return (sf_buf_alloc(m, SFB_CPUPRIVATE));
  271 }
  272 
  273 /*
  274  * Destroy the given CPU private mapping and unpin the page that it mapped.
  275  */
  276 void
  277 vm_imgact_unmap_page(struct sf_buf *sf)
  278 {
  279         vm_page_t m;
  280 
  281         m = sf_buf_page(sf);
  282         sf_buf_free(sf);
  283         sched_unpin();
  284         vm_page_lock(m);
  285         vm_page_unhold(m);
  286         vm_page_unlock(m);
  287 }
  288 
  289 void
  290 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
  291 {
  292 
  293         pmap_sync_icache(map->pmap, va, sz);
  294 }
  295 
  296 struct kstack_cache_entry *kstack_cache;
  297 static int kstack_cache_size = 128;
  298 static int kstacks;
  299 static struct mtx kstack_cache_mtx;
  300 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
  301     "");
  302 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
  303     "");
  304 
  305 #ifndef KSTACK_MAX_PAGES
  306 #define KSTACK_MAX_PAGES 32
  307 #endif
  308 
  309 /*
  310  * Create the kernel stack (including pcb for i386) for a new thread.
  311  * This routine directly affects the fork perf for a process and
  312  * create performance for a thread.
  313  */
  314 int
  315 vm_thread_new(struct thread *td, int pages)
  316 {
  317         vm_object_t ksobj;
  318         vm_offset_t ks;
  319         vm_page_t m, ma[KSTACK_MAX_PAGES];
  320         struct kstack_cache_entry *ks_ce;
  321         int i;
  322 
  323         /* Bounds check */
  324         if (pages <= 1)
  325                 pages = KSTACK_PAGES;
  326         else if (pages > KSTACK_MAX_PAGES)
  327                 pages = KSTACK_MAX_PAGES;
  328 
  329         if (pages == KSTACK_PAGES) {
  330                 mtx_lock(&kstack_cache_mtx);
  331                 if (kstack_cache != NULL) {
  332                         ks_ce = kstack_cache;
  333                         kstack_cache = ks_ce->next_ks_entry;
  334                         mtx_unlock(&kstack_cache_mtx);
  335 
  336                         td->td_kstack_obj = ks_ce->ksobj;
  337                         td->td_kstack = (vm_offset_t)ks_ce;
  338                         td->td_kstack_pages = KSTACK_PAGES;
  339                         return (1);
  340                 }
  341                 mtx_unlock(&kstack_cache_mtx);
  342         }
  343 
  344         /*
  345          * Allocate an object for the kstack.
  346          */
  347         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  348         
  349         /*
  350          * Get a kernel virtual address for this thread's kstack.
  351          */
  352 #if defined(__mips__)
  353         /*
  354          * We need to align the kstack's mapped address to fit within
  355          * a single TLB entry.
  356          */
  357         ks = kmem_alloc_nofault_space(kernel_map,
  358             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE, VMFS_TLB_ALIGNED_SPACE);
  359 #else
  360         ks = kmem_alloc_nofault(kernel_map,
  361            (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  362 #endif
  363         if (ks == 0) {
  364                 printf("vm_thread_new: kstack allocation failed\n");
  365                 vm_object_deallocate(ksobj);
  366                 return (0);
  367         }
  368 
  369         atomic_add_int(&kstacks, 1);
  370         if (KSTACK_GUARD_PAGES != 0) {
  371                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  372                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  373         }
  374         td->td_kstack_obj = ksobj;
  375         td->td_kstack = ks;
  376         /*
  377          * Knowing the number of pages allocated is useful when you
  378          * want to deallocate them.
  379          */
  380         td->td_kstack_pages = pages;
  381         /* 
  382          * For the length of the stack, link in a real page of ram for each
  383          * page of stack.
  384          */
  385         VM_OBJECT_LOCK(ksobj);
  386         for (i = 0; i < pages; i++) {
  387                 /*
  388                  * Get a kernel stack page.
  389                  */
  390                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  391                     VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
  392                 ma[i] = m;
  393                 m->valid = VM_PAGE_BITS_ALL;
  394         }
  395         VM_OBJECT_UNLOCK(ksobj);
  396         pmap_qenter(ks, ma, pages);
  397         return (1);
  398 }
  399 
  400 static void
  401 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
  402 {
  403         vm_page_t m;
  404         int i;
  405 
  406         atomic_add_int(&kstacks, -1);
  407         pmap_qremove(ks, pages);
  408         VM_OBJECT_LOCK(ksobj);
  409         for (i = 0; i < pages; i++) {
  410                 m = vm_page_lookup(ksobj, i);
  411                 if (m == NULL)
  412                         panic("vm_thread_dispose: kstack already missing?");
  413                 vm_page_lock(m);
  414                 vm_page_unwire(m, 0);
  415                 vm_page_free(m);
  416                 vm_page_unlock(m);
  417         }
  418         VM_OBJECT_UNLOCK(ksobj);
  419         vm_object_deallocate(ksobj);
  420         kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  421             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  422 }
  423 
  424 /*
  425  * Dispose of a thread's kernel stack.
  426  */
  427 void
  428 vm_thread_dispose(struct thread *td)
  429 {
  430         vm_object_t ksobj;
  431         vm_offset_t ks;
  432         struct kstack_cache_entry *ks_ce;
  433         int pages;
  434 
  435         pages = td->td_kstack_pages;
  436         ksobj = td->td_kstack_obj;
  437         ks = td->td_kstack;
  438         td->td_kstack = 0;
  439         td->td_kstack_pages = 0;
  440         if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
  441                 ks_ce = (struct kstack_cache_entry *)ks;
  442                 ks_ce->ksobj = ksobj;
  443                 mtx_lock(&kstack_cache_mtx);
  444                 ks_ce->next_ks_entry = kstack_cache;
  445                 kstack_cache = ks_ce;
  446                 mtx_unlock(&kstack_cache_mtx);
  447                 return;
  448         }
  449         vm_thread_stack_dispose(ksobj, ks, pages);
  450 }
  451 
  452 static void
  453 vm_thread_stack_lowmem(void *nulll)
  454 {
  455         struct kstack_cache_entry *ks_ce, *ks_ce1;
  456 
  457         mtx_lock(&kstack_cache_mtx);
  458         ks_ce = kstack_cache;
  459         kstack_cache = NULL;
  460         mtx_unlock(&kstack_cache_mtx);
  461 
  462         while (ks_ce != NULL) {
  463                 ks_ce1 = ks_ce;
  464                 ks_ce = ks_ce->next_ks_entry;
  465 
  466                 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
  467                     KSTACK_PAGES);
  468         }
  469 }
  470 
  471 static void
  472 kstack_cache_init(void *nulll)
  473 {
  474 
  475         EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
  476             EVENTHANDLER_PRI_ANY);
  477 }
  478 
  479 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
  480 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
  481 
  482 #ifndef NO_SWAPPING
  483 /*
  484  * Allow a thread's kernel stack to be paged out.
  485  */
  486 static void
  487 vm_thread_swapout(struct thread *td)
  488 {
  489         vm_object_t ksobj;
  490         vm_page_t m;
  491         int i, pages;
  492 
  493         cpu_thread_swapout(td);
  494         pages = td->td_kstack_pages;
  495         ksobj = td->td_kstack_obj;
  496         pmap_qremove(td->td_kstack, pages);
  497         VM_OBJECT_LOCK(ksobj);
  498         for (i = 0; i < pages; i++) {
  499                 m = vm_page_lookup(ksobj, i);
  500                 if (m == NULL)
  501                         panic("vm_thread_swapout: kstack already missing?");
  502                 vm_page_dirty(m);
  503                 vm_page_lock(m);
  504                 vm_page_unwire(m, 0);
  505                 vm_page_unlock(m);
  506         }
  507         VM_OBJECT_UNLOCK(ksobj);
  508 }
  509 
  510 /*
  511  * Bring the kernel stack for a specified thread back in.
  512  */
  513 static void
  514 vm_thread_swapin(struct thread *td)
  515 {
  516         vm_object_t ksobj;
  517         vm_page_t ma[KSTACK_MAX_PAGES];
  518         int i, j, k, pages, rv;
  519 
  520         pages = td->td_kstack_pages;
  521         ksobj = td->td_kstack_obj;
  522         VM_OBJECT_LOCK(ksobj);
  523         for (i = 0; i < pages; i++)
  524                 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
  525                     VM_ALLOC_WIRED);
  526         for (i = 0; i < pages; i++) {
  527                 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
  528                         KASSERT(ma[i]->oflags & VPO_BUSY,
  529                             ("lost busy 1"));
  530                         vm_object_pip_add(ksobj, 1);
  531                         for (j = i + 1; j < pages; j++) {
  532                                 KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
  533                                     (ma[j]->oflags & VPO_BUSY),
  534                                     ("lost busy 2"));
  535                                 if (ma[j]->valid == VM_PAGE_BITS_ALL)
  536                                         break;
  537                         }
  538                         rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
  539                         if (rv != VM_PAGER_OK)
  540         panic("vm_thread_swapin: cannot get kstack for proc: %d",
  541                                     td->td_proc->p_pid);
  542                         vm_object_pip_wakeup(ksobj);
  543                         for (k = i; k < j; k++)
  544                                 ma[k] = vm_page_lookup(ksobj, k);
  545                         vm_page_wakeup(ma[i]);
  546                 } else if (ma[i]->oflags & VPO_BUSY)
  547                         vm_page_wakeup(ma[i]);
  548         }
  549         VM_OBJECT_UNLOCK(ksobj);
  550         pmap_qenter(td->td_kstack, ma, pages);
  551         cpu_thread_swapin(td);
  552 }
  553 #endif /* !NO_SWAPPING */
  554 
  555 /*
  556  * Implement fork's actions on an address space.
  557  * Here we arrange for the address space to be copied or referenced,
  558  * allocate a user struct (pcb and kernel stack), then call the
  559  * machine-dependent layer to fill those in and make the new process
  560  * ready to run.  The new process is set up so that it returns directly
  561  * to user mode to avoid stack copying and relocation problems.
  562  */
  563 int
  564 vm_forkproc(td, p2, td2, vm2, flags)
  565         struct thread *td;
  566         struct proc *p2;
  567         struct thread *td2;
  568         struct vmspace *vm2;
  569         int flags;
  570 {
  571         struct proc *p1 = td->td_proc;
  572         int error;
  573 
  574         if ((flags & RFPROC) == 0) {
  575                 /*
  576                  * Divorce the memory, if it is shared, essentially
  577                  * this changes shared memory amongst threads, into
  578                  * COW locally.
  579                  */
  580                 if ((flags & RFMEM) == 0) {
  581                         if (p1->p_vmspace->vm_refcnt > 1) {
  582                                 error = vmspace_unshare(p1);
  583                                 if (error)
  584                                         return (error);
  585                         }
  586                 }
  587                 cpu_fork(td, p2, td2, flags);
  588                 return (0);
  589         }
  590 
  591         if (flags & RFMEM) {
  592                 p2->p_vmspace = p1->p_vmspace;
  593                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  594         }
  595 
  596         while (vm_page_count_severe()) {
  597                 VM_WAIT;
  598         }
  599 
  600         if ((flags & RFMEM) == 0) {
  601                 p2->p_vmspace = vm2;
  602                 if (p1->p_vmspace->vm_shm)
  603                         shmfork(p1, p2);
  604         }
  605 
  606         /*
  607          * cpu_fork will copy and update the pcb, set up the kernel stack,
  608          * and make the child ready to run.
  609          */
  610         cpu_fork(td, p2, td2, flags);
  611         return (0);
  612 }
  613 
  614 /*
  615  * Called after process has been wait(2)'ed apon and is being reaped.
  616  * The idea is to reclaim resources that we could not reclaim while
  617  * the process was still executing.
  618  */
  619 void
  620 vm_waitproc(p)
  621         struct proc *p;
  622 {
  623 
  624         vmspace_exitfree(p);            /* and clean-out the vmspace */
  625 }
  626 
  627 void
  628 faultin(p)
  629         struct proc *p;
  630 {
  631 #ifdef NO_SWAPPING
  632 
  633         PROC_LOCK_ASSERT(p, MA_OWNED);
  634         if ((p->p_flag & P_INMEM) == 0)
  635                 panic("faultin: proc swapped out with NO_SWAPPING!");
  636 #else /* !NO_SWAPPING */
  637         struct thread *td;
  638 
  639         PROC_LOCK_ASSERT(p, MA_OWNED);
  640         /*
  641          * If another process is swapping in this process,
  642          * just wait until it finishes.
  643          */
  644         if (p->p_flag & P_SWAPPINGIN) {
  645                 while (p->p_flag & P_SWAPPINGIN)
  646                         msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
  647                 return;
  648         }
  649         if ((p->p_flag & P_INMEM) == 0) {
  650                 /*
  651                  * Don't let another thread swap process p out while we are
  652                  * busy swapping it in.
  653                  */
  654                 ++p->p_lock;
  655                 p->p_flag |= P_SWAPPINGIN;
  656                 PROC_UNLOCK(p);
  657 
  658                 /*
  659                  * We hold no lock here because the list of threads
  660                  * can not change while all threads in the process are
  661                  * swapped out.
  662                  */
  663                 FOREACH_THREAD_IN_PROC(p, td)
  664                         vm_thread_swapin(td);
  665                 PROC_LOCK(p);
  666                 swapclear(p);
  667                 p->p_swtick = ticks;
  668 
  669                 wakeup(&p->p_flag);
  670 
  671                 /* Allow other threads to swap p out now. */
  672                 --p->p_lock;
  673         }
  674 #endif /* NO_SWAPPING */
  675 }
  676 
  677 /*
  678  * This swapin algorithm attempts to swap-in processes only if there
  679  * is enough space for them.  Of course, if a process waits for a long
  680  * time, it will be swapped in anyway.
  681  *
  682  * Giant is held on entry.
  683  */
  684 void
  685 swapper(void)
  686 {
  687         struct proc *p;
  688         struct thread *td;
  689         struct proc *pp;
  690         int slptime;
  691         int swtime;
  692         int ppri;
  693         int pri;
  694 
  695 loop:
  696         if (vm_page_count_min()) {
  697                 VM_WAIT;
  698                 goto loop;
  699         }
  700 
  701         pp = NULL;
  702         ppri = INT_MIN;
  703         sx_slock(&allproc_lock);
  704         FOREACH_PROC_IN_SYSTEM(p) {
  705                 PROC_LOCK(p);
  706                 if (p->p_state == PRS_NEW ||
  707                     p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
  708                         PROC_UNLOCK(p);
  709                         continue;
  710                 }
  711                 swtime = (ticks - p->p_swtick) / hz;
  712                 FOREACH_THREAD_IN_PROC(p, td) {
  713                         /*
  714                          * An otherwise runnable thread of a process
  715                          * swapped out has only the TDI_SWAPPED bit set.
  716                          * 
  717                          */
  718                         thread_lock(td);
  719                         if (td->td_inhibitors == TDI_SWAPPED) {
  720                                 slptime = (ticks - td->td_slptick) / hz;
  721                                 pri = swtime + slptime;
  722                                 if ((td->td_flags & TDF_SWAPINREQ) == 0)
  723                                         pri -= p->p_nice * 8;
  724                                 /*
  725                                  * if this thread is higher priority
  726                                  * and there is enough space, then select
  727                                  * this process instead of the previous
  728                                  * selection.
  729                                  */
  730                                 if (pri > ppri) {
  731                                         pp = p;
  732                                         ppri = pri;
  733                                 }
  734                         }
  735                         thread_unlock(td);
  736                 }
  737                 PROC_UNLOCK(p);
  738         }
  739         sx_sunlock(&allproc_lock);
  740 
  741         /*
  742          * Nothing to do, back to sleep.
  743          */
  744         if ((p = pp) == NULL) {
  745                 tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
  746                 goto loop;
  747         }
  748         PROC_LOCK(p);
  749 
  750         /*
  751          * Another process may be bringing or may have already
  752          * brought this process in while we traverse all threads.
  753          * Or, this process may even be being swapped out again.
  754          */
  755         if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
  756                 PROC_UNLOCK(p);
  757                 goto loop;
  758         }
  759 
  760         /*
  761          * We would like to bring someone in. (only if there is space).
  762          * [What checks the space? ]
  763          */
  764         faultin(p);
  765         PROC_UNLOCK(p);
  766         goto loop;
  767 }
  768 
  769 void
  770 kick_proc0(void)
  771 {
  772 
  773         wakeup(&proc0);
  774 }
  775 
  776 #ifndef NO_SWAPPING
  777 
  778 /*
  779  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  780  */
  781 static int swap_idle_threshold1 = 2;
  782 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  783     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  784 
  785 /*
  786  * Swap_idle_threshold2 is the time that a process can be idle before
  787  * it will be swapped out, if idle swapping is enabled.
  788  */
  789 static int swap_idle_threshold2 = 10;
  790 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  791     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  792 
  793 /*
  794  * First, if any processes have been sleeping or stopped for at least
  795  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
  796  * no such processes exist, then the longest-sleeping or stopped
  797  * process is swapped out.  Finally, and only as a last resort, if
  798  * there are no sleeping or stopped processes, the longest-resident
  799  * process is swapped out.
  800  */
  801 void
  802 swapout_procs(action)
  803 int action;
  804 {
  805         struct proc *p;
  806         struct thread *td;
  807         int didswap = 0;
  808 
  809 retry:
  810         sx_slock(&allproc_lock);
  811         FOREACH_PROC_IN_SYSTEM(p) {
  812                 struct vmspace *vm;
  813                 int minslptime = 100000;
  814                 int slptime;
  815                 
  816                 /*
  817                  * Watch out for a process in
  818                  * creation.  It may have no
  819                  * address space or lock yet.
  820                  */
  821                 if (p->p_state == PRS_NEW)
  822                         continue;
  823                 /*
  824                  * An aio daemon switches its
  825                  * address space while running.
  826                  * Perform a quick check whether
  827                  * a process has P_SYSTEM.
  828                  */
  829                 if ((p->p_flag & P_SYSTEM) != 0)
  830                         continue;
  831                 /*
  832                  * Do not swapout a process that
  833                  * is waiting for VM data
  834                  * structures as there is a possible
  835                  * deadlock.  Test this first as
  836                  * this may block.
  837                  *
  838                  * Lock the map until swapout
  839                  * finishes, or a thread of this
  840                  * process may attempt to alter
  841                  * the map.
  842                  */
  843                 vm = vmspace_acquire_ref(p);
  844                 if (vm == NULL)
  845                         continue;
  846                 if (!vm_map_trylock(&vm->vm_map))
  847                         goto nextproc1;
  848 
  849                 PROC_LOCK(p);
  850                 if (p->p_lock != 0 ||
  851                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
  852                     ) != 0) {
  853                         goto nextproc;
  854                 }
  855                 /*
  856                  * only aiod changes vmspace, however it will be
  857                  * skipped because of the if statement above checking 
  858                  * for P_SYSTEM
  859                  */
  860                 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
  861                         goto nextproc;
  862 
  863                 switch (p->p_state) {
  864                 default:
  865                         /* Don't swap out processes in any sort
  866                          * of 'special' state. */
  867                         break;
  868 
  869                 case PRS_NORMAL:
  870                         /*
  871                          * do not swapout a realtime process
  872                          * Check all the thread groups..
  873                          */
  874                         FOREACH_THREAD_IN_PROC(p, td) {
  875                                 thread_lock(td);
  876                                 if (PRI_IS_REALTIME(td->td_pri_class)) {
  877                                         thread_unlock(td);
  878                                         goto nextproc;
  879                                 }
  880                                 slptime = (ticks - td->td_slptick) / hz;
  881                                 /*
  882                                  * Guarantee swap_idle_threshold1
  883                                  * time in memory.
  884                                  */
  885                                 if (slptime < swap_idle_threshold1) {
  886                                         thread_unlock(td);
  887                                         goto nextproc;
  888                                 }
  889 
  890                                 /*
  891                                  * Do not swapout a process if it is
  892                                  * waiting on a critical event of some
  893                                  * kind or there is a thread whose
  894                                  * pageable memory may be accessed.
  895                                  *
  896                                  * This could be refined to support
  897                                  * swapping out a thread.
  898                                  */
  899                                 if (!thread_safetoswapout(td)) {
  900                                         thread_unlock(td);
  901                                         goto nextproc;
  902                                 }
  903                                 /*
  904                                  * If the system is under memory stress,
  905                                  * or if we are swapping
  906                                  * idle processes >= swap_idle_threshold2,
  907                                  * then swap the process out.
  908                                  */
  909                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  910                                     (((action & VM_SWAP_IDLE) == 0) ||
  911                                     (slptime < swap_idle_threshold2))) {
  912                                         thread_unlock(td);
  913                                         goto nextproc;
  914                                 }
  915 
  916                                 if (minslptime > slptime)
  917                                         minslptime = slptime;
  918                                 thread_unlock(td);
  919                         }
  920 
  921                         /*
  922                          * If the pageout daemon didn't free enough pages,
  923                          * or if this process is idle and the system is
  924                          * configured to swap proactively, swap it out.
  925                          */
  926                         if ((action & VM_SWAP_NORMAL) ||
  927                                 ((action & VM_SWAP_IDLE) &&
  928                                  (minslptime > swap_idle_threshold2))) {
  929                                 if (swapout(p) == 0)
  930                                         didswap++;
  931                                 PROC_UNLOCK(p);
  932                                 vm_map_unlock(&vm->vm_map);
  933                                 vmspace_free(vm);
  934                                 sx_sunlock(&allproc_lock);
  935                                 goto retry;
  936                         }
  937                 }
  938 nextproc:
  939                 PROC_UNLOCK(p);
  940                 vm_map_unlock(&vm->vm_map);
  941 nextproc1:
  942                 vmspace_free(vm);
  943                 continue;
  944         }
  945         sx_sunlock(&allproc_lock);
  946         /*
  947          * If we swapped something out, and another process needed memory,
  948          * then wakeup the sched process.
  949          */
  950         if (didswap)
  951                 wakeup(&proc0);
  952 }
  953 
  954 static void
  955 swapclear(p)
  956         struct proc *p;
  957 {
  958         struct thread *td;
  959 
  960         PROC_LOCK_ASSERT(p, MA_OWNED);
  961 
  962         FOREACH_THREAD_IN_PROC(p, td) {
  963                 thread_lock(td);
  964                 td->td_flags |= TDF_INMEM;
  965                 td->td_flags &= ~TDF_SWAPINREQ;
  966                 TD_CLR_SWAPPED(td);
  967                 if (TD_CAN_RUN(td))
  968                         if (setrunnable(td)) {
  969 #ifdef INVARIANTS
  970                                 /*
  971                                  * XXX: We just cleared TDI_SWAPPED
  972                                  * above and set TDF_INMEM, so this
  973                                  * should never happen.
  974                                  */
  975                                 panic("not waking up swapper");
  976 #endif
  977                         }
  978                 thread_unlock(td);
  979         }
  980         p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
  981         p->p_flag |= P_INMEM;
  982 }
  983 
  984 static int
  985 swapout(p)
  986         struct proc *p;
  987 {
  988         struct thread *td;
  989 
  990         PROC_LOCK_ASSERT(p, MA_OWNED);
  991 #if defined(SWAP_DEBUG)
  992         printf("swapping out %d\n", p->p_pid);
  993 #endif
  994 
  995         /*
  996          * The states of this process and its threads may have changed
  997          * by now.  Assuming that there is only one pageout daemon thread,
  998          * this process should still be in memory.
  999          */
 1000         KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
 1001                 ("swapout: lost a swapout race?"));
 1002 
 1003         /*
 1004          * remember the process resident count
 1005          */
 1006         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
 1007         /*
 1008          * Check and mark all threads before we proceed.
 1009          */
 1010         p->p_flag &= ~P_INMEM;
 1011         p->p_flag |= P_SWAPPINGOUT;
 1012         FOREACH_THREAD_IN_PROC(p, td) {
 1013                 thread_lock(td);
 1014                 if (!thread_safetoswapout(td)) {
 1015                         thread_unlock(td);
 1016                         swapclear(p);
 1017                         return (EBUSY);
 1018                 }
 1019                 td->td_flags &= ~TDF_INMEM;
 1020                 TD_SET_SWAPPED(td);
 1021                 thread_unlock(td);
 1022         }
 1023         td = FIRST_THREAD_IN_PROC(p);
 1024         ++td->td_ru.ru_nswap;
 1025         PROC_UNLOCK(p);
 1026 
 1027         /*
 1028          * This list is stable because all threads are now prevented from
 1029          * running.  The list is only modified in the context of a running
 1030          * thread in this process.
 1031          */
 1032         FOREACH_THREAD_IN_PROC(p, td)
 1033                 vm_thread_swapout(td);
 1034 
 1035         PROC_LOCK(p);
 1036         p->p_flag &= ~P_SWAPPINGOUT;
 1037         p->p_swtick = ticks;
 1038         return (0);
 1039 }
 1040 #endif /* !NO_SWAPPING */

Cache object: 4bf8b4b023362ee49f246858160147ed


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.