The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD: releng/10.0/sys/vm/vm_glue.c 254649 2013-08-22 07:39:53Z kib $");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 
   66 #include <sys/param.h>
   67 #include <sys/systm.h>
   68 #include <sys/limits.h>
   69 #include <sys/lock.h>
   70 #include <sys/malloc.h>
   71 #include <sys/mutex.h>
   72 #include <sys/proc.h>
   73 #include <sys/racct.h>
   74 #include <sys/resourcevar.h>
   75 #include <sys/rwlock.h>
   76 #include <sys/sched.h>
   77 #include <sys/sf_buf.h>
   78 #include <sys/shm.h>
   79 #include <sys/vmmeter.h>
   80 #include <sys/vmem.h>
   81 #include <sys/sx.h>
   82 #include <sys/sysctl.h>
   83 #include <sys/_kstack_cache.h>
   84 #include <sys/eventhandler.h>
   85 #include <sys/kernel.h>
   86 #include <sys/ktr.h>
   87 #include <sys/unistd.h>
   88 
   89 #include <vm/vm.h>
   90 #include <vm/vm_param.h>
   91 #include <vm/pmap.h>
   92 #include <vm/vm_map.h>
   93 #include <vm/vm_page.h>
   94 #include <vm/vm_pageout.h>
   95 #include <vm/vm_object.h>
   96 #include <vm/vm_kern.h>
   97 #include <vm/vm_extern.h>
   98 #include <vm/vm_pager.h>
   99 #include <vm/swap_pager.h>
  100 
  101 #ifndef NO_SWAPPING
  102 static int swapout(struct proc *);
  103 static void swapclear(struct proc *);
  104 static void vm_thread_swapin(struct thread *td);
  105 static void vm_thread_swapout(struct thread *td);
  106 #endif
  107 
  108 /*
  109  * MPSAFE
  110  *
  111  * WARNING!  This code calls vm_map_check_protection() which only checks
  112  * the associated vm_map_entry range.  It does not determine whether the
  113  * contents of the memory is actually readable or writable.  In most cases
  114  * just checking the vm_map_entry is sufficient within the kernel's address
  115  * space.
  116  */
  117 int
  118 kernacc(addr, len, rw)
  119         void *addr;
  120         int len, rw;
  121 {
  122         boolean_t rv;
  123         vm_offset_t saddr, eaddr;
  124         vm_prot_t prot;
  125 
  126         KASSERT((rw & ~VM_PROT_ALL) == 0,
  127             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  128 
  129         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  130             (vm_offset_t)addr + len < (vm_offset_t)addr)
  131                 return (FALSE);
  132 
  133         prot = rw;
  134         saddr = trunc_page((vm_offset_t)addr);
  135         eaddr = round_page((vm_offset_t)addr + len);
  136         vm_map_lock_read(kernel_map);
  137         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  138         vm_map_unlock_read(kernel_map);
  139         return (rv == TRUE);
  140 }
  141 
  142 /*
  143  * MPSAFE
  144  *
  145  * WARNING!  This code calls vm_map_check_protection() which only checks
  146  * the associated vm_map_entry range.  It does not determine whether the
  147  * contents of the memory is actually readable or writable.  vmapbuf(),
  148  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  149  * used in conjuction with this call.
  150  */
  151 int
  152 useracc(addr, len, rw)
  153         void *addr;
  154         int len, rw;
  155 {
  156         boolean_t rv;
  157         vm_prot_t prot;
  158         vm_map_t map;
  159 
  160         KASSERT((rw & ~VM_PROT_ALL) == 0,
  161             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  162         prot = rw;
  163         map = &curproc->p_vmspace->vm_map;
  164         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  165             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  166                 return (FALSE);
  167         }
  168         vm_map_lock_read(map);
  169         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  170             round_page((vm_offset_t)addr + len), prot);
  171         vm_map_unlock_read(map);
  172         return (rv == TRUE);
  173 }
  174 
  175 int
  176 vslock(void *addr, size_t len)
  177 {
  178         vm_offset_t end, last, start;
  179         vm_size_t npages;
  180         int error;
  181 
  182         last = (vm_offset_t)addr + len;
  183         start = trunc_page((vm_offset_t)addr);
  184         end = round_page(last);
  185         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  186                 return (EINVAL);
  187         npages = atop(end - start);
  188         if (npages > vm_page_max_wired)
  189                 return (ENOMEM);
  190 #if 0
  191         /*
  192          * XXX - not yet
  193          *
  194          * The limit for transient usage of wired pages should be
  195          * larger than for "permanent" wired pages (mlock()).
  196          *
  197          * Also, the sysctl code, which is the only present user
  198          * of vslock(), does a hard loop on EAGAIN.
  199          */
  200         if (npages + cnt.v_wire_count > vm_page_max_wired)
  201                 return (EAGAIN);
  202 #endif
  203         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  204             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  205         /*
  206          * Return EFAULT on error to match copy{in,out}() behaviour
  207          * rather than returning ENOMEM like mlock() would.
  208          */
  209         return (error == KERN_SUCCESS ? 0 : EFAULT);
  210 }
  211 
  212 void
  213 vsunlock(void *addr, size_t len)
  214 {
  215 
  216         /* Rely on the parameter sanity checks performed by vslock(). */
  217         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  218             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  219             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  220 }
  221 
  222 /*
  223  * Pin the page contained within the given object at the given offset.  If the
  224  * page is not resident, allocate and load it using the given object's pager.
  225  * Return the pinned page if successful; otherwise, return NULL.
  226  */
  227 static vm_page_t
  228 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
  229 {
  230         vm_page_t m, ma[1];
  231         vm_pindex_t pindex;
  232         int rv;
  233 
  234         VM_OBJECT_WLOCK(object);
  235         pindex = OFF_TO_IDX(offset);
  236         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
  237         if (m->valid != VM_PAGE_BITS_ALL) {
  238                 ma[0] = m;
  239                 rv = vm_pager_get_pages(object, ma, 1, 0);
  240                 m = vm_page_lookup(object, pindex);
  241                 if (m == NULL)
  242                         goto out;
  243                 if (rv != VM_PAGER_OK) {
  244                         vm_page_lock(m);
  245                         vm_page_free(m);
  246                         vm_page_unlock(m);
  247                         m = NULL;
  248                         goto out;
  249                 }
  250         }
  251         vm_page_xunbusy(m);
  252         vm_page_lock(m);
  253         vm_page_hold(m);
  254         vm_page_unlock(m);
  255 out:
  256         VM_OBJECT_WUNLOCK(object);
  257         return (m);
  258 }
  259 
  260 /*
  261  * Return a CPU private mapping to the page at the given offset within the
  262  * given object.  The page is pinned before it is mapped.
  263  */
  264 struct sf_buf *
  265 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
  266 {
  267         vm_page_t m;
  268 
  269         m = vm_imgact_hold_page(object, offset);
  270         if (m == NULL)
  271                 return (NULL);
  272         sched_pin();
  273         return (sf_buf_alloc(m, SFB_CPUPRIVATE));
  274 }
  275 
  276 /*
  277  * Destroy the given CPU private mapping and unpin the page that it mapped.
  278  */
  279 void
  280 vm_imgact_unmap_page(struct sf_buf *sf)
  281 {
  282         vm_page_t m;
  283 
  284         m = sf_buf_page(sf);
  285         sf_buf_free(sf);
  286         sched_unpin();
  287         vm_page_lock(m);
  288         vm_page_unhold(m);
  289         vm_page_unlock(m);
  290 }
  291 
  292 void
  293 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
  294 {
  295 
  296         pmap_sync_icache(map->pmap, va, sz);
  297 }
  298 
  299 struct kstack_cache_entry *kstack_cache;
  300 static int kstack_cache_size = 128;
  301 static int kstacks;
  302 static struct mtx kstack_cache_mtx;
  303 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
  304 
  305 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
  306     "");
  307 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
  308     "");
  309 
  310 #ifndef KSTACK_MAX_PAGES
  311 #define KSTACK_MAX_PAGES 32
  312 #endif
  313 
  314 /*
  315  * Create the kernel stack (including pcb for i386) for a new thread.
  316  * This routine directly affects the fork perf for a process and
  317  * create performance for a thread.
  318  */
  319 int
  320 vm_thread_new(struct thread *td, int pages)
  321 {
  322         vm_object_t ksobj;
  323         vm_offset_t ks;
  324         vm_page_t m, ma[KSTACK_MAX_PAGES];
  325         struct kstack_cache_entry *ks_ce;
  326         int i;
  327 
  328         /* Bounds check */
  329         if (pages <= 1)
  330                 pages = KSTACK_PAGES;
  331         else if (pages > KSTACK_MAX_PAGES)
  332                 pages = KSTACK_MAX_PAGES;
  333 
  334         if (pages == KSTACK_PAGES) {
  335                 mtx_lock(&kstack_cache_mtx);
  336                 if (kstack_cache != NULL) {
  337                         ks_ce = kstack_cache;
  338                         kstack_cache = ks_ce->next_ks_entry;
  339                         mtx_unlock(&kstack_cache_mtx);
  340 
  341                         td->td_kstack_obj = ks_ce->ksobj;
  342                         td->td_kstack = (vm_offset_t)ks_ce;
  343                         td->td_kstack_pages = KSTACK_PAGES;
  344                         return (1);
  345                 }
  346                 mtx_unlock(&kstack_cache_mtx);
  347         }
  348 
  349         /*
  350          * Allocate an object for the kstack.
  351          */
  352         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  353         
  354         /*
  355          * Get a kernel virtual address for this thread's kstack.
  356          */
  357 #if defined(__mips__)
  358         /*
  359          * We need to align the kstack's mapped address to fit within
  360          * a single TLB entry.
  361          */
  362         if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
  363             PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
  364             M_BESTFIT | M_NOWAIT, &ks)) {
  365                 ks = 0;
  366         }
  367 #else
  368         ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  369 #endif
  370         if (ks == 0) {
  371                 printf("vm_thread_new: kstack allocation failed\n");
  372                 vm_object_deallocate(ksobj);
  373                 return (0);
  374         }
  375 
  376         atomic_add_int(&kstacks, 1);
  377         if (KSTACK_GUARD_PAGES != 0) {
  378                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  379                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  380         }
  381         td->td_kstack_obj = ksobj;
  382         td->td_kstack = ks;
  383         /*
  384          * Knowing the number of pages allocated is useful when you
  385          * want to deallocate them.
  386          */
  387         td->td_kstack_pages = pages;
  388         /* 
  389          * For the length of the stack, link in a real page of ram for each
  390          * page of stack.
  391          */
  392         VM_OBJECT_WLOCK(ksobj);
  393         for (i = 0; i < pages; i++) {
  394                 /*
  395                  * Get a kernel stack page.
  396                  */
  397                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  398                     VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  399                 ma[i] = m;
  400                 m->valid = VM_PAGE_BITS_ALL;
  401         }
  402         VM_OBJECT_WUNLOCK(ksobj);
  403         pmap_qenter(ks, ma, pages);
  404         return (1);
  405 }
  406 
  407 static void
  408 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
  409 {
  410         vm_page_t m;
  411         int i;
  412 
  413         atomic_add_int(&kstacks, -1);
  414         pmap_qremove(ks, pages);
  415         VM_OBJECT_WLOCK(ksobj);
  416         for (i = 0; i < pages; i++) {
  417                 m = vm_page_lookup(ksobj, i);
  418                 if (m == NULL)
  419                         panic("vm_thread_dispose: kstack already missing?");
  420                 vm_page_lock(m);
  421                 vm_page_unwire(m, 0);
  422                 vm_page_free(m);
  423                 vm_page_unlock(m);
  424         }
  425         VM_OBJECT_WUNLOCK(ksobj);
  426         vm_object_deallocate(ksobj);
  427         kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  428             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  429 }
  430 
  431 /*
  432  * Dispose of a thread's kernel stack.
  433  */
  434 void
  435 vm_thread_dispose(struct thread *td)
  436 {
  437         vm_object_t ksobj;
  438         vm_offset_t ks;
  439         struct kstack_cache_entry *ks_ce;
  440         int pages;
  441 
  442         pages = td->td_kstack_pages;
  443         ksobj = td->td_kstack_obj;
  444         ks = td->td_kstack;
  445         td->td_kstack = 0;
  446         td->td_kstack_pages = 0;
  447         if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
  448                 ks_ce = (struct kstack_cache_entry *)ks;
  449                 ks_ce->ksobj = ksobj;
  450                 mtx_lock(&kstack_cache_mtx);
  451                 ks_ce->next_ks_entry = kstack_cache;
  452                 kstack_cache = ks_ce;
  453                 mtx_unlock(&kstack_cache_mtx);
  454                 return;
  455         }
  456         vm_thread_stack_dispose(ksobj, ks, pages);
  457 }
  458 
  459 static void
  460 vm_thread_stack_lowmem(void *nulll)
  461 {
  462         struct kstack_cache_entry *ks_ce, *ks_ce1;
  463 
  464         mtx_lock(&kstack_cache_mtx);
  465         ks_ce = kstack_cache;
  466         kstack_cache = NULL;
  467         mtx_unlock(&kstack_cache_mtx);
  468 
  469         while (ks_ce != NULL) {
  470                 ks_ce1 = ks_ce;
  471                 ks_ce = ks_ce->next_ks_entry;
  472 
  473                 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
  474                     KSTACK_PAGES);
  475         }
  476 }
  477 
  478 static void
  479 kstack_cache_init(void *nulll)
  480 {
  481 
  482         EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
  483             EVENTHANDLER_PRI_ANY);
  484 }
  485 
  486 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
  487 
  488 #ifndef NO_SWAPPING
  489 /*
  490  * Allow a thread's kernel stack to be paged out.
  491  */
  492 static void
  493 vm_thread_swapout(struct thread *td)
  494 {
  495         vm_object_t ksobj;
  496         vm_page_t m;
  497         int i, pages;
  498 
  499         cpu_thread_swapout(td);
  500         pages = td->td_kstack_pages;
  501         ksobj = td->td_kstack_obj;
  502         pmap_qremove(td->td_kstack, pages);
  503         VM_OBJECT_WLOCK(ksobj);
  504         for (i = 0; i < pages; i++) {
  505                 m = vm_page_lookup(ksobj, i);
  506                 if (m == NULL)
  507                         panic("vm_thread_swapout: kstack already missing?");
  508                 vm_page_dirty(m);
  509                 vm_page_lock(m);
  510                 vm_page_unwire(m, 0);
  511                 vm_page_unlock(m);
  512         }
  513         VM_OBJECT_WUNLOCK(ksobj);
  514 }
  515 
  516 /*
  517  * Bring the kernel stack for a specified thread back in.
  518  */
  519 static void
  520 vm_thread_swapin(struct thread *td)
  521 {
  522         vm_object_t ksobj;
  523         vm_page_t ma[KSTACK_MAX_PAGES];
  524         int i, j, k, pages, rv;
  525 
  526         pages = td->td_kstack_pages;
  527         ksobj = td->td_kstack_obj;
  528         VM_OBJECT_WLOCK(ksobj);
  529         for (i = 0; i < pages; i++)
  530                 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL |
  531                     VM_ALLOC_WIRED);
  532         for (i = 0; i < pages; i++) {
  533                 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
  534                         vm_page_assert_xbusied(ma[i]);
  535                         vm_object_pip_add(ksobj, 1);
  536                         for (j = i + 1; j < pages; j++) {
  537                                 if (ma[j]->valid != VM_PAGE_BITS_ALL)
  538                                         vm_page_assert_xbusied(ma[j]);
  539                                 if (ma[j]->valid == VM_PAGE_BITS_ALL)
  540                                         break;
  541                         }
  542                         rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
  543                         if (rv != VM_PAGER_OK)
  544         panic("vm_thread_swapin: cannot get kstack for proc: %d",
  545                                     td->td_proc->p_pid);
  546                         vm_object_pip_wakeup(ksobj);
  547                         for (k = i; k < j; k++)
  548                                 ma[k] = vm_page_lookup(ksobj, k);
  549                         vm_page_xunbusy(ma[i]);
  550                 } else if (vm_page_xbusied(ma[i]))
  551                         vm_page_xunbusy(ma[i]);
  552         }
  553         VM_OBJECT_WUNLOCK(ksobj);
  554         pmap_qenter(td->td_kstack, ma, pages);
  555         cpu_thread_swapin(td);
  556 }
  557 #endif /* !NO_SWAPPING */
  558 
  559 /*
  560  * Implement fork's actions on an address space.
  561  * Here we arrange for the address space to be copied or referenced,
  562  * allocate a user struct (pcb and kernel stack), then call the
  563  * machine-dependent layer to fill those in and make the new process
  564  * ready to run.  The new process is set up so that it returns directly
  565  * to user mode to avoid stack copying and relocation problems.
  566  */
  567 int
  568 vm_forkproc(td, p2, td2, vm2, flags)
  569         struct thread *td;
  570         struct proc *p2;
  571         struct thread *td2;
  572         struct vmspace *vm2;
  573         int flags;
  574 {
  575         struct proc *p1 = td->td_proc;
  576         int error;
  577 
  578         if ((flags & RFPROC) == 0) {
  579                 /*
  580                  * Divorce the memory, if it is shared, essentially
  581                  * this changes shared memory amongst threads, into
  582                  * COW locally.
  583                  */
  584                 if ((flags & RFMEM) == 0) {
  585                         if (p1->p_vmspace->vm_refcnt > 1) {
  586                                 error = vmspace_unshare(p1);
  587                                 if (error)
  588                                         return (error);
  589                         }
  590                 }
  591                 cpu_fork(td, p2, td2, flags);
  592                 return (0);
  593         }
  594 
  595         if (flags & RFMEM) {
  596                 p2->p_vmspace = p1->p_vmspace;
  597                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  598         }
  599 
  600         while (vm_page_count_severe()) {
  601                 VM_WAIT;
  602         }
  603 
  604         if ((flags & RFMEM) == 0) {
  605                 p2->p_vmspace = vm2;
  606                 if (p1->p_vmspace->vm_shm)
  607                         shmfork(p1, p2);
  608         }
  609 
  610         /*
  611          * cpu_fork will copy and update the pcb, set up the kernel stack,
  612          * and make the child ready to run.
  613          */
  614         cpu_fork(td, p2, td2, flags);
  615         return (0);
  616 }
  617 
  618 /*
  619  * Called after process has been wait(2)'ed apon and is being reaped.
  620  * The idea is to reclaim resources that we could not reclaim while
  621  * the process was still executing.
  622  */
  623 void
  624 vm_waitproc(p)
  625         struct proc *p;
  626 {
  627 
  628         vmspace_exitfree(p);            /* and clean-out the vmspace */
  629 }
  630 
  631 void
  632 faultin(p)
  633         struct proc *p;
  634 {
  635 #ifdef NO_SWAPPING
  636 
  637         PROC_LOCK_ASSERT(p, MA_OWNED);
  638         if ((p->p_flag & P_INMEM) == 0)
  639                 panic("faultin: proc swapped out with NO_SWAPPING!");
  640 #else /* !NO_SWAPPING */
  641         struct thread *td;
  642 
  643         PROC_LOCK_ASSERT(p, MA_OWNED);
  644         /*
  645          * If another process is swapping in this process,
  646          * just wait until it finishes.
  647          */
  648         if (p->p_flag & P_SWAPPINGIN) {
  649                 while (p->p_flag & P_SWAPPINGIN)
  650                         msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
  651                 return;
  652         }
  653         if ((p->p_flag & P_INMEM) == 0) {
  654                 /*
  655                  * Don't let another thread swap process p out while we are
  656                  * busy swapping it in.
  657                  */
  658                 ++p->p_lock;
  659                 p->p_flag |= P_SWAPPINGIN;
  660                 PROC_UNLOCK(p);
  661 
  662                 /*
  663                  * We hold no lock here because the list of threads
  664                  * can not change while all threads in the process are
  665                  * swapped out.
  666                  */
  667                 FOREACH_THREAD_IN_PROC(p, td)
  668                         vm_thread_swapin(td);
  669                 PROC_LOCK(p);
  670                 swapclear(p);
  671                 p->p_swtick = ticks;
  672 
  673                 wakeup(&p->p_flag);
  674 
  675                 /* Allow other threads to swap p out now. */
  676                 --p->p_lock;
  677         }
  678 #endif /* NO_SWAPPING */
  679 }
  680 
  681 /*
  682  * This swapin algorithm attempts to swap-in processes only if there
  683  * is enough space for them.  Of course, if a process waits for a long
  684  * time, it will be swapped in anyway.
  685  *
  686  * Giant is held on entry.
  687  */
  688 void
  689 swapper(void)
  690 {
  691         struct proc *p;
  692         struct thread *td;
  693         struct proc *pp;
  694         int slptime;
  695         int swtime;
  696         int ppri;
  697         int pri;
  698 
  699 loop:
  700         if (vm_page_count_min()) {
  701                 VM_WAIT;
  702                 goto loop;
  703         }
  704 
  705         pp = NULL;
  706         ppri = INT_MIN;
  707         sx_slock(&allproc_lock);
  708         FOREACH_PROC_IN_SYSTEM(p) {
  709                 PROC_LOCK(p);
  710                 if (p->p_state == PRS_NEW ||
  711                     p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
  712                         PROC_UNLOCK(p);
  713                         continue;
  714                 }
  715                 swtime = (ticks - p->p_swtick) / hz;
  716                 FOREACH_THREAD_IN_PROC(p, td) {
  717                         /*
  718                          * An otherwise runnable thread of a process
  719                          * swapped out has only the TDI_SWAPPED bit set.
  720                          * 
  721                          */
  722                         thread_lock(td);
  723                         if (td->td_inhibitors == TDI_SWAPPED) {
  724                                 slptime = (ticks - td->td_slptick) / hz;
  725                                 pri = swtime + slptime;
  726                                 if ((td->td_flags & TDF_SWAPINREQ) == 0)
  727                                         pri -= p->p_nice * 8;
  728                                 /*
  729                                  * if this thread is higher priority
  730                                  * and there is enough space, then select
  731                                  * this process instead of the previous
  732                                  * selection.
  733                                  */
  734                                 if (pri > ppri) {
  735                                         pp = p;
  736                                         ppri = pri;
  737                                 }
  738                         }
  739                         thread_unlock(td);
  740                 }
  741                 PROC_UNLOCK(p);
  742         }
  743         sx_sunlock(&allproc_lock);
  744 
  745         /*
  746          * Nothing to do, back to sleep.
  747          */
  748         if ((p = pp) == NULL) {
  749                 tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
  750                 goto loop;
  751         }
  752         PROC_LOCK(p);
  753 
  754         /*
  755          * Another process may be bringing or may have already
  756          * brought this process in while we traverse all threads.
  757          * Or, this process may even be being swapped out again.
  758          */
  759         if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
  760                 PROC_UNLOCK(p);
  761                 goto loop;
  762         }
  763 
  764         /*
  765          * We would like to bring someone in. (only if there is space).
  766          * [What checks the space? ]
  767          */
  768         faultin(p);
  769         PROC_UNLOCK(p);
  770         goto loop;
  771 }
  772 
  773 void
  774 kick_proc0(void)
  775 {
  776 
  777         wakeup(&proc0);
  778 }
  779 
  780 #ifndef NO_SWAPPING
  781 
  782 /*
  783  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  784  */
  785 static int swap_idle_threshold1 = 2;
  786 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  787     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  788 
  789 /*
  790  * Swap_idle_threshold2 is the time that a process can be idle before
  791  * it will be swapped out, if idle swapping is enabled.
  792  */
  793 static int swap_idle_threshold2 = 10;
  794 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  795     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  796 
  797 /*
  798  * First, if any processes have been sleeping or stopped for at least
  799  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
  800  * no such processes exist, then the longest-sleeping or stopped
  801  * process is swapped out.  Finally, and only as a last resort, if
  802  * there are no sleeping or stopped processes, the longest-resident
  803  * process is swapped out.
  804  */
  805 void
  806 swapout_procs(action)
  807 int action;
  808 {
  809         struct proc *p;
  810         struct thread *td;
  811         int didswap = 0;
  812 
  813 retry:
  814         sx_slock(&allproc_lock);
  815         FOREACH_PROC_IN_SYSTEM(p) {
  816                 struct vmspace *vm;
  817                 int minslptime = 100000;
  818                 int slptime;
  819                 
  820                 /*
  821                  * Watch out for a process in
  822                  * creation.  It may have no
  823                  * address space or lock yet.
  824                  */
  825                 if (p->p_state == PRS_NEW)
  826                         continue;
  827                 /*
  828                  * An aio daemon switches its
  829                  * address space while running.
  830                  * Perform a quick check whether
  831                  * a process has P_SYSTEM.
  832                  */
  833                 if ((p->p_flag & P_SYSTEM) != 0)
  834                         continue;
  835                 /*
  836                  * Do not swapout a process that
  837                  * is waiting for VM data
  838                  * structures as there is a possible
  839                  * deadlock.  Test this first as
  840                  * this may block.
  841                  *
  842                  * Lock the map until swapout
  843                  * finishes, or a thread of this
  844                  * process may attempt to alter
  845                  * the map.
  846                  */
  847                 vm = vmspace_acquire_ref(p);
  848                 if (vm == NULL)
  849                         continue;
  850                 if (!vm_map_trylock(&vm->vm_map))
  851                         goto nextproc1;
  852 
  853                 PROC_LOCK(p);
  854                 if (p->p_lock != 0 ||
  855                     (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
  856                     ) != 0) {
  857                         goto nextproc;
  858                 }
  859                 /*
  860                  * only aiod changes vmspace, however it will be
  861                  * skipped because of the if statement above checking 
  862                  * for P_SYSTEM
  863                  */
  864                 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
  865                         goto nextproc;
  866 
  867                 switch (p->p_state) {
  868                 default:
  869                         /* Don't swap out processes in any sort
  870                          * of 'special' state. */
  871                         break;
  872 
  873                 case PRS_NORMAL:
  874                         /*
  875                          * do not swapout a realtime process
  876                          * Check all the thread groups..
  877                          */
  878                         FOREACH_THREAD_IN_PROC(p, td) {
  879                                 thread_lock(td);
  880                                 if (PRI_IS_REALTIME(td->td_pri_class)) {
  881                                         thread_unlock(td);
  882                                         goto nextproc;
  883                                 }
  884                                 slptime = (ticks - td->td_slptick) / hz;
  885                                 /*
  886                                  * Guarantee swap_idle_threshold1
  887                                  * time in memory.
  888                                  */
  889                                 if (slptime < swap_idle_threshold1) {
  890                                         thread_unlock(td);
  891                                         goto nextproc;
  892                                 }
  893 
  894                                 /*
  895                                  * Do not swapout a process if it is
  896                                  * waiting on a critical event of some
  897                                  * kind or there is a thread whose
  898                                  * pageable memory may be accessed.
  899                                  *
  900                                  * This could be refined to support
  901                                  * swapping out a thread.
  902                                  */
  903                                 if (!thread_safetoswapout(td)) {
  904                                         thread_unlock(td);
  905                                         goto nextproc;
  906                                 }
  907                                 /*
  908                                  * If the system is under memory stress,
  909                                  * or if we are swapping
  910                                  * idle processes >= swap_idle_threshold2,
  911                                  * then swap the process out.
  912                                  */
  913                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  914                                     (((action & VM_SWAP_IDLE) == 0) ||
  915                                     (slptime < swap_idle_threshold2))) {
  916                                         thread_unlock(td);
  917                                         goto nextproc;
  918                                 }
  919 
  920                                 if (minslptime > slptime)
  921                                         minslptime = slptime;
  922                                 thread_unlock(td);
  923                         }
  924 
  925                         /*
  926                          * If the pageout daemon didn't free enough pages,
  927                          * or if this process is idle and the system is
  928                          * configured to swap proactively, swap it out.
  929                          */
  930                         if ((action & VM_SWAP_NORMAL) ||
  931                                 ((action & VM_SWAP_IDLE) &&
  932                                  (minslptime > swap_idle_threshold2))) {
  933                                 if (swapout(p) == 0)
  934                                         didswap++;
  935                                 PROC_UNLOCK(p);
  936                                 vm_map_unlock(&vm->vm_map);
  937                                 vmspace_free(vm);
  938                                 sx_sunlock(&allproc_lock);
  939                                 goto retry;
  940                         }
  941                 }
  942 nextproc:
  943                 PROC_UNLOCK(p);
  944                 vm_map_unlock(&vm->vm_map);
  945 nextproc1:
  946                 vmspace_free(vm);
  947                 continue;
  948         }
  949         sx_sunlock(&allproc_lock);
  950         /*
  951          * If we swapped something out, and another process needed memory,
  952          * then wakeup the sched process.
  953          */
  954         if (didswap)
  955                 wakeup(&proc0);
  956 }
  957 
  958 static void
  959 swapclear(p)
  960         struct proc *p;
  961 {
  962         struct thread *td;
  963 
  964         PROC_LOCK_ASSERT(p, MA_OWNED);
  965 
  966         FOREACH_THREAD_IN_PROC(p, td) {
  967                 thread_lock(td);
  968                 td->td_flags |= TDF_INMEM;
  969                 td->td_flags &= ~TDF_SWAPINREQ;
  970                 TD_CLR_SWAPPED(td);
  971                 if (TD_CAN_RUN(td))
  972                         if (setrunnable(td)) {
  973 #ifdef INVARIANTS
  974                                 /*
  975                                  * XXX: We just cleared TDI_SWAPPED
  976                                  * above and set TDF_INMEM, so this
  977                                  * should never happen.
  978                                  */
  979                                 panic("not waking up swapper");
  980 #endif
  981                         }
  982                 thread_unlock(td);
  983         }
  984         p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
  985         p->p_flag |= P_INMEM;
  986 }
  987 
  988 static int
  989 swapout(p)
  990         struct proc *p;
  991 {
  992         struct thread *td;
  993 
  994         PROC_LOCK_ASSERT(p, MA_OWNED);
  995 #if defined(SWAP_DEBUG)
  996         printf("swapping out %d\n", p->p_pid);
  997 #endif
  998 
  999         /*
 1000          * The states of this process and its threads may have changed
 1001          * by now.  Assuming that there is only one pageout daemon thread,
 1002          * this process should still be in memory.
 1003          */
 1004         KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
 1005                 ("swapout: lost a swapout race?"));
 1006 
 1007         /*
 1008          * remember the process resident count
 1009          */
 1010         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
 1011         /*
 1012          * Check and mark all threads before we proceed.
 1013          */
 1014         p->p_flag &= ~P_INMEM;
 1015         p->p_flag |= P_SWAPPINGOUT;
 1016         FOREACH_THREAD_IN_PROC(p, td) {
 1017                 thread_lock(td);
 1018                 if (!thread_safetoswapout(td)) {
 1019                         thread_unlock(td);
 1020                         swapclear(p);
 1021                         return (EBUSY);
 1022                 }
 1023                 td->td_flags &= ~TDF_INMEM;
 1024                 TD_SET_SWAPPED(td);
 1025                 thread_unlock(td);
 1026         }
 1027         td = FIRST_THREAD_IN_PROC(p);
 1028         ++td->td_ru.ru_nswap;
 1029         PROC_UNLOCK(p);
 1030 
 1031         /*
 1032          * This list is stable because all threads are now prevented from
 1033          * running.  The list is only modified in the context of a running
 1034          * thread in this process.
 1035          */
 1036         FOREACH_THREAD_IN_PROC(p, td)
 1037                 vm_thread_swapout(td);
 1038 
 1039         PROC_LOCK(p);
 1040         p->p_flag &= ~P_SWAPPINGOUT;
 1041         p->p_swtick = ticks;
 1042         return (0);
 1043 }
 1044 #endif /* !NO_SWAPPING */

Cache object: 3f8bbf88cebd75a331b059c7808be79d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.