The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Permission to use, copy, modify and distribute this software and
   39  * its documentation is hereby granted, provided that both the copyright
   40  * notice and this permission notice appear in all copies of the
   41  * software, derivative works or modified versions, and any portions
   42  * thereof, and that both notices appear in supporting documentation.
   43  *
   44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   47  *
   48  * Carnegie Mellon requests users of this software to return to
   49  *
   50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   51  *  School of Computer Science
   52  *  Carnegie Mellon University
   53  *  Pittsburgh PA 15213-3890
   54  *
   55  * any improvements or extensions that they make and grant Carnegie the
   56  * rights to redistribute these changes.
   57  */
   58 
   59 #include <sys/cdefs.h>
   60 __FBSDID("$FreeBSD: releng/11.0/sys/vm/vm_glue.c 302089 2016-06-22 20:15:37Z kib $");
   61 
   62 #include "opt_vm.h"
   63 #include "opt_kstack_pages.h"
   64 #include "opt_kstack_max_pages.h"
   65 #include "opt_kstack_usage_prof.h"
   66 
   67 #include <sys/param.h>
   68 #include <sys/systm.h>
   69 #include <sys/limits.h>
   70 #include <sys/lock.h>
   71 #include <sys/malloc.h>
   72 #include <sys/mutex.h>
   73 #include <sys/proc.h>
   74 #include <sys/racct.h>
   75 #include <sys/resourcevar.h>
   76 #include <sys/rwlock.h>
   77 #include <sys/sched.h>
   78 #include <sys/sf_buf.h>
   79 #include <sys/shm.h>
   80 #include <sys/vmmeter.h>
   81 #include <sys/vmem.h>
   82 #include <sys/sx.h>
   83 #include <sys/sysctl.h>
   84 #include <sys/_kstack_cache.h>
   85 #include <sys/eventhandler.h>
   86 #include <sys/kernel.h>
   87 #include <sys/ktr.h>
   88 #include <sys/unistd.h>
   89 
   90 #include <vm/vm.h>
   91 #include <vm/vm_param.h>
   92 #include <vm/pmap.h>
   93 #include <vm/vm_map.h>
   94 #include <vm/vm_page.h>
   95 #include <vm/vm_pageout.h>
   96 #include <vm/vm_object.h>
   97 #include <vm/vm_kern.h>
   98 #include <vm/vm_extern.h>
   99 #include <vm/vm_pager.h>
  100 #include <vm/swap_pager.h>
  101 
  102 #include <machine/cpu.h>
  103 
  104 #ifndef NO_SWAPPING
  105 static int swapout(struct proc *);
  106 static void swapclear(struct proc *);
  107 static void vm_thread_swapin(struct thread *td);
  108 static void vm_thread_swapout(struct thread *td);
  109 #endif
  110 
  111 /*
  112  * MPSAFE
  113  *
  114  * WARNING!  This code calls vm_map_check_protection() which only checks
  115  * the associated vm_map_entry range.  It does not determine whether the
  116  * contents of the memory is actually readable or writable.  In most cases
  117  * just checking the vm_map_entry is sufficient within the kernel's address
  118  * space.
  119  */
  120 int
  121 kernacc(addr, len, rw)
  122         void *addr;
  123         int len, rw;
  124 {
  125         boolean_t rv;
  126         vm_offset_t saddr, eaddr;
  127         vm_prot_t prot;
  128 
  129         KASSERT((rw & ~VM_PROT_ALL) == 0,
  130             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  131 
  132         if ((vm_offset_t)addr + len > kernel_map->max_offset ||
  133             (vm_offset_t)addr + len < (vm_offset_t)addr)
  134                 return (FALSE);
  135 
  136         prot = rw;
  137         saddr = trunc_page((vm_offset_t)addr);
  138         eaddr = round_page((vm_offset_t)addr + len);
  139         vm_map_lock_read(kernel_map);
  140         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  141         vm_map_unlock_read(kernel_map);
  142         return (rv == TRUE);
  143 }
  144 
  145 /*
  146  * MPSAFE
  147  *
  148  * WARNING!  This code calls vm_map_check_protection() which only checks
  149  * the associated vm_map_entry range.  It does not determine whether the
  150  * contents of the memory is actually readable or writable.  vmapbuf(),
  151  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  152  * used in conjunction with this call.
  153  */
  154 int
  155 useracc(addr, len, rw)
  156         void *addr;
  157         int len, rw;
  158 {
  159         boolean_t rv;
  160         vm_prot_t prot;
  161         vm_map_t map;
  162 
  163         KASSERT((rw & ~VM_PROT_ALL) == 0,
  164             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  165         prot = rw;
  166         map = &curproc->p_vmspace->vm_map;
  167         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  168             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  169                 return (FALSE);
  170         }
  171         vm_map_lock_read(map);
  172         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  173             round_page((vm_offset_t)addr + len), prot);
  174         vm_map_unlock_read(map);
  175         return (rv == TRUE);
  176 }
  177 
  178 int
  179 vslock(void *addr, size_t len)
  180 {
  181         vm_offset_t end, last, start;
  182         vm_size_t npages;
  183         int error;
  184 
  185         last = (vm_offset_t)addr + len;
  186         start = trunc_page((vm_offset_t)addr);
  187         end = round_page(last);
  188         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  189                 return (EINVAL);
  190         npages = atop(end - start);
  191         if (npages > vm_page_max_wired)
  192                 return (ENOMEM);
  193 #if 0
  194         /*
  195          * XXX - not yet
  196          *
  197          * The limit for transient usage of wired pages should be
  198          * larger than for "permanent" wired pages (mlock()).
  199          *
  200          * Also, the sysctl code, which is the only present user
  201          * of vslock(), does a hard loop on EAGAIN.
  202          */
  203         if (npages + vm_cnt.v_wire_count > vm_page_max_wired)
  204                 return (EAGAIN);
  205 #endif
  206         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  207             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  208         /*
  209          * Return EFAULT on error to match copy{in,out}() behaviour
  210          * rather than returning ENOMEM like mlock() would.
  211          */
  212         return (error == KERN_SUCCESS ? 0 : EFAULT);
  213 }
  214 
  215 void
  216 vsunlock(void *addr, size_t len)
  217 {
  218 
  219         /* Rely on the parameter sanity checks performed by vslock(). */
  220         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  221             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  222             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  223 }
  224 
  225 /*
  226  * Pin the page contained within the given object at the given offset.  If the
  227  * page is not resident, allocate and load it using the given object's pager.
  228  * Return the pinned page if successful; otherwise, return NULL.
  229  */
  230 static vm_page_t
  231 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
  232 {
  233         vm_page_t m;
  234         vm_pindex_t pindex;
  235         int rv;
  236 
  237         VM_OBJECT_WLOCK(object);
  238         pindex = OFF_TO_IDX(offset);
  239         m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
  240         if (m->valid != VM_PAGE_BITS_ALL) {
  241                 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
  242                 if (rv != VM_PAGER_OK) {
  243                         vm_page_lock(m);
  244                         vm_page_free(m);
  245                         vm_page_unlock(m);
  246                         m = NULL;
  247                         goto out;
  248                 }
  249         }
  250         vm_page_xunbusy(m);
  251         vm_page_lock(m);
  252         vm_page_hold(m);
  253         vm_page_activate(m);
  254         vm_page_unlock(m);
  255 out:
  256         VM_OBJECT_WUNLOCK(object);
  257         return (m);
  258 }
  259 
  260 /*
  261  * Return a CPU private mapping to the page at the given offset within the
  262  * given object.  The page is pinned before it is mapped.
  263  */
  264 struct sf_buf *
  265 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
  266 {
  267         vm_page_t m;
  268 
  269         m = vm_imgact_hold_page(object, offset);
  270         if (m == NULL)
  271                 return (NULL);
  272         sched_pin();
  273         return (sf_buf_alloc(m, SFB_CPUPRIVATE));
  274 }
  275 
  276 /*
  277  * Destroy the given CPU private mapping and unpin the page that it mapped.
  278  */
  279 void
  280 vm_imgact_unmap_page(struct sf_buf *sf)
  281 {
  282         vm_page_t m;
  283 
  284         m = sf_buf_page(sf);
  285         sf_buf_free(sf);
  286         sched_unpin();
  287         vm_page_lock(m);
  288         vm_page_unhold(m);
  289         vm_page_unlock(m);
  290 }
  291 
  292 void
  293 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
  294 {
  295 
  296         pmap_sync_icache(map->pmap, va, sz);
  297 }
  298 
  299 struct kstack_cache_entry *kstack_cache;
  300 static int kstack_cache_size = 128;
  301 static int kstacks;
  302 static struct mtx kstack_cache_mtx;
  303 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
  304 
  305 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
  306     "");
  307 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
  308     "");
  309 
  310 #ifndef KSTACK_MAX_PAGES
  311 #define KSTACK_MAX_PAGES 32
  312 #endif
  313 
  314 /*
  315  * Create the kernel stack (including pcb for i386) for a new thread.
  316  * This routine directly affects the fork perf for a process and
  317  * create performance for a thread.
  318  */
  319 int
  320 vm_thread_new(struct thread *td, int pages)
  321 {
  322         vm_object_t ksobj;
  323         vm_offset_t ks;
  324         vm_page_t m, ma[KSTACK_MAX_PAGES];
  325         struct kstack_cache_entry *ks_ce;
  326         int i;
  327 
  328         /* Bounds check */
  329         if (pages <= 1)
  330                 pages = kstack_pages;
  331         else if (pages > KSTACK_MAX_PAGES)
  332                 pages = KSTACK_MAX_PAGES;
  333 
  334         if (pages == kstack_pages) {
  335                 mtx_lock(&kstack_cache_mtx);
  336                 if (kstack_cache != NULL) {
  337                         ks_ce = kstack_cache;
  338                         kstack_cache = ks_ce->next_ks_entry;
  339                         mtx_unlock(&kstack_cache_mtx);
  340 
  341                         td->td_kstack_obj = ks_ce->ksobj;
  342                         td->td_kstack = (vm_offset_t)ks_ce;
  343                         td->td_kstack_pages = kstack_pages;
  344                         return (1);
  345                 }
  346                 mtx_unlock(&kstack_cache_mtx);
  347         }
  348 
  349         /*
  350          * Allocate an object for the kstack.
  351          */
  352         ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
  353         
  354         /*
  355          * Get a kernel virtual address for this thread's kstack.
  356          */
  357 #if defined(__mips__)
  358         /*
  359          * We need to align the kstack's mapped address to fit within
  360          * a single TLB entry.
  361          */
  362         if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
  363             PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
  364             M_BESTFIT | M_NOWAIT, &ks)) {
  365                 ks = 0;
  366         }
  367 #else
  368         ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  369 #endif
  370         if (ks == 0) {
  371                 printf("vm_thread_new: kstack allocation failed\n");
  372                 vm_object_deallocate(ksobj);
  373                 return (0);
  374         }
  375 
  376         atomic_add_int(&kstacks, 1);
  377         if (KSTACK_GUARD_PAGES != 0) {
  378                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  379                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  380         }
  381         td->td_kstack_obj = ksobj;
  382         td->td_kstack = ks;
  383         /*
  384          * Knowing the number of pages allocated is useful when you
  385          * want to deallocate them.
  386          */
  387         td->td_kstack_pages = pages;
  388         /* 
  389          * For the length of the stack, link in a real page of ram for each
  390          * page of stack.
  391          */
  392         VM_OBJECT_WLOCK(ksobj);
  393         for (i = 0; i < pages; i++) {
  394                 /*
  395                  * Get a kernel stack page.
  396                  */
  397                 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
  398                     VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
  399                 ma[i] = m;
  400                 m->valid = VM_PAGE_BITS_ALL;
  401         }
  402         VM_OBJECT_WUNLOCK(ksobj);
  403         pmap_qenter(ks, ma, pages);
  404         return (1);
  405 }
  406 
  407 static void
  408 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
  409 {
  410         vm_page_t m;
  411         int i;
  412 
  413         atomic_add_int(&kstacks, -1);
  414         pmap_qremove(ks, pages);
  415         VM_OBJECT_WLOCK(ksobj);
  416         for (i = 0; i < pages; i++) {
  417                 m = vm_page_lookup(ksobj, i);
  418                 if (m == NULL)
  419                         panic("vm_thread_dispose: kstack already missing?");
  420                 vm_page_lock(m);
  421                 vm_page_unwire(m, PQ_NONE);
  422                 vm_page_free(m);
  423                 vm_page_unlock(m);
  424         }
  425         VM_OBJECT_WUNLOCK(ksobj);
  426         vm_object_deallocate(ksobj);
  427         kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  428             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  429 }
  430 
  431 /*
  432  * Dispose of a thread's kernel stack.
  433  */
  434 void
  435 vm_thread_dispose(struct thread *td)
  436 {
  437         vm_object_t ksobj;
  438         vm_offset_t ks;
  439         struct kstack_cache_entry *ks_ce;
  440         int pages;
  441 
  442         pages = td->td_kstack_pages;
  443         ksobj = td->td_kstack_obj;
  444         ks = td->td_kstack;
  445         td->td_kstack = 0;
  446         td->td_kstack_pages = 0;
  447         if (pages == kstack_pages && kstacks <= kstack_cache_size) {
  448                 ks_ce = (struct kstack_cache_entry *)ks;
  449                 ks_ce->ksobj = ksobj;
  450                 mtx_lock(&kstack_cache_mtx);
  451                 ks_ce->next_ks_entry = kstack_cache;
  452                 kstack_cache = ks_ce;
  453                 mtx_unlock(&kstack_cache_mtx);
  454                 return;
  455         }
  456         vm_thread_stack_dispose(ksobj, ks, pages);
  457 }
  458 
  459 static void
  460 vm_thread_stack_lowmem(void *nulll)
  461 {
  462         struct kstack_cache_entry *ks_ce, *ks_ce1;
  463 
  464         mtx_lock(&kstack_cache_mtx);
  465         ks_ce = kstack_cache;
  466         kstack_cache = NULL;
  467         mtx_unlock(&kstack_cache_mtx);
  468 
  469         while (ks_ce != NULL) {
  470                 ks_ce1 = ks_ce;
  471                 ks_ce = ks_ce->next_ks_entry;
  472 
  473                 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
  474                     kstack_pages);
  475         }
  476 }
  477 
  478 static void
  479 kstack_cache_init(void *nulll)
  480 {
  481 
  482         EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
  483             EVENTHANDLER_PRI_ANY);
  484 }
  485 
  486 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
  487 
  488 #ifdef KSTACK_USAGE_PROF
  489 /*
  490  * Track maximum stack used by a thread in kernel.
  491  */
  492 static int max_kstack_used;
  493 
  494 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
  495     &max_kstack_used, 0,
  496     "Maxiumum stack depth used by a thread in kernel");
  497 
  498 void
  499 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
  500 {
  501         vm_offset_t stack_top;
  502         vm_offset_t current;
  503         int used, prev_used;
  504 
  505         /*
  506          * Testing for interrupted kernel mode isn't strictly
  507          * needed. It optimizes the execution, since interrupts from
  508          * usermode will have only the trap frame on the stack.
  509          */
  510         if (TRAPF_USERMODE(frame))
  511                 return;
  512 
  513         stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
  514         current = (vm_offset_t)(uintptr_t)&stack_top;
  515 
  516         /*
  517          * Try to detect if interrupt is using kernel thread stack.
  518          * Hardware could use a dedicated stack for interrupt handling.
  519          */
  520         if (stack_top <= current || current < td->td_kstack)
  521                 return;
  522 
  523         used = stack_top - current;
  524         for (;;) {
  525                 prev_used = max_kstack_used;
  526                 if (prev_used >= used)
  527                         break;
  528                 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
  529                         break;
  530         }
  531 }
  532 #endif /* KSTACK_USAGE_PROF */
  533 
  534 #ifndef NO_SWAPPING
  535 /*
  536  * Allow a thread's kernel stack to be paged out.
  537  */
  538 static void
  539 vm_thread_swapout(struct thread *td)
  540 {
  541         vm_object_t ksobj;
  542         vm_page_t m;
  543         int i, pages;
  544 
  545         cpu_thread_swapout(td);
  546         pages = td->td_kstack_pages;
  547         ksobj = td->td_kstack_obj;
  548         pmap_qremove(td->td_kstack, pages);
  549         VM_OBJECT_WLOCK(ksobj);
  550         for (i = 0; i < pages; i++) {
  551                 m = vm_page_lookup(ksobj, i);
  552                 if (m == NULL)
  553                         panic("vm_thread_swapout: kstack already missing?");
  554                 vm_page_dirty(m);
  555                 vm_page_lock(m);
  556                 vm_page_unwire(m, PQ_INACTIVE);
  557                 vm_page_unlock(m);
  558         }
  559         VM_OBJECT_WUNLOCK(ksobj);
  560 }
  561 
  562 /*
  563  * Bring the kernel stack for a specified thread back in.
  564  */
  565 static void
  566 vm_thread_swapin(struct thread *td)
  567 {
  568         vm_object_t ksobj;
  569         vm_page_t ma[KSTACK_MAX_PAGES];
  570         int pages;
  571 
  572         pages = td->td_kstack_pages;
  573         ksobj = td->td_kstack_obj;
  574         VM_OBJECT_WLOCK(ksobj);
  575         for (int i = 0; i < pages; i++)
  576                 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL |
  577                     VM_ALLOC_WIRED);
  578         for (int i = 0; i < pages;) {
  579                 int j, a, count, rv;
  580 
  581                 vm_page_assert_xbusied(ma[i]);
  582                 if (ma[i]->valid == VM_PAGE_BITS_ALL) {
  583                         vm_page_xunbusy(ma[i]);
  584                         i++;
  585                         continue;
  586                 }
  587                 vm_object_pip_add(ksobj, 1);
  588                 for (j = i + 1; j < pages; j++)
  589                         if (ma[j]->valid == VM_PAGE_BITS_ALL)
  590                                 break;
  591                 rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a);
  592                 KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
  593                 count = min(a + 1, j - i);
  594                 rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL);
  595                 KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
  596                     __func__, td->td_proc->p_pid));
  597                 vm_object_pip_wakeup(ksobj);
  598                 for (j = i; j < i + count; j++)
  599                         vm_page_xunbusy(ma[j]);
  600                 i += count;
  601         }
  602         VM_OBJECT_WUNLOCK(ksobj);
  603         pmap_qenter(td->td_kstack, ma, pages);
  604         cpu_thread_swapin(td);
  605 }
  606 #endif /* !NO_SWAPPING */
  607 
  608 /*
  609  * Implement fork's actions on an address space.
  610  * Here we arrange for the address space to be copied or referenced,
  611  * allocate a user struct (pcb and kernel stack), then call the
  612  * machine-dependent layer to fill those in and make the new process
  613  * ready to run.  The new process is set up so that it returns directly
  614  * to user mode to avoid stack copying and relocation problems.
  615  */
  616 int
  617 vm_forkproc(td, p2, td2, vm2, flags)
  618         struct thread *td;
  619         struct proc *p2;
  620         struct thread *td2;
  621         struct vmspace *vm2;
  622         int flags;
  623 {
  624         struct proc *p1 = td->td_proc;
  625         int error;
  626 
  627         if ((flags & RFPROC) == 0) {
  628                 /*
  629                  * Divorce the memory, if it is shared, essentially
  630                  * this changes shared memory amongst threads, into
  631                  * COW locally.
  632                  */
  633                 if ((flags & RFMEM) == 0) {
  634                         if (p1->p_vmspace->vm_refcnt > 1) {
  635                                 error = vmspace_unshare(p1);
  636                                 if (error)
  637                                         return (error);
  638                         }
  639                 }
  640                 cpu_fork(td, p2, td2, flags);
  641                 return (0);
  642         }
  643 
  644         if (flags & RFMEM) {
  645                 p2->p_vmspace = p1->p_vmspace;
  646                 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
  647         }
  648 
  649         while (vm_page_count_severe()) {
  650                 VM_WAIT;
  651         }
  652 
  653         if ((flags & RFMEM) == 0) {
  654                 p2->p_vmspace = vm2;
  655                 if (p1->p_vmspace->vm_shm)
  656                         shmfork(p1, p2);
  657         }
  658 
  659         /*
  660          * cpu_fork will copy and update the pcb, set up the kernel stack,
  661          * and make the child ready to run.
  662          */
  663         cpu_fork(td, p2, td2, flags);
  664         return (0);
  665 }
  666 
  667 /*
  668  * Called after process has been wait(2)'ed upon and is being reaped.
  669  * The idea is to reclaim resources that we could not reclaim while
  670  * the process was still executing.
  671  */
  672 void
  673 vm_waitproc(p)
  674         struct proc *p;
  675 {
  676 
  677         vmspace_exitfree(p);            /* and clean-out the vmspace */
  678 }
  679 
  680 void
  681 faultin(p)
  682         struct proc *p;
  683 {
  684 #ifdef NO_SWAPPING
  685 
  686         PROC_LOCK_ASSERT(p, MA_OWNED);
  687         if ((p->p_flag & P_INMEM) == 0)
  688                 panic("faultin: proc swapped out with NO_SWAPPING!");
  689 #else /* !NO_SWAPPING */
  690         struct thread *td;
  691 
  692         PROC_LOCK_ASSERT(p, MA_OWNED);
  693         /*
  694          * If another process is swapping in this process,
  695          * just wait until it finishes.
  696          */
  697         if (p->p_flag & P_SWAPPINGIN) {
  698                 while (p->p_flag & P_SWAPPINGIN)
  699                         msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
  700                 return;
  701         }
  702         if ((p->p_flag & P_INMEM) == 0) {
  703                 /*
  704                  * Don't let another thread swap process p out while we are
  705                  * busy swapping it in.
  706                  */
  707                 ++p->p_lock;
  708                 p->p_flag |= P_SWAPPINGIN;
  709                 PROC_UNLOCK(p);
  710 
  711                 /*
  712                  * We hold no lock here because the list of threads
  713                  * can not change while all threads in the process are
  714                  * swapped out.
  715                  */
  716                 FOREACH_THREAD_IN_PROC(p, td)
  717                         vm_thread_swapin(td);
  718                 PROC_LOCK(p);
  719                 swapclear(p);
  720                 p->p_swtick = ticks;
  721 
  722                 wakeup(&p->p_flag);
  723 
  724                 /* Allow other threads to swap p out now. */
  725                 --p->p_lock;
  726         }
  727 #endif /* NO_SWAPPING */
  728 }
  729 
  730 /*
  731  * This swapin algorithm attempts to swap-in processes only if there
  732  * is enough space for them.  Of course, if a process waits for a long
  733  * time, it will be swapped in anyway.
  734  */
  735 void
  736 swapper(void)
  737 {
  738         struct proc *p;
  739         struct thread *td;
  740         struct proc *pp;
  741         int slptime;
  742         int swtime;
  743         int ppri;
  744         int pri;
  745 
  746 loop:
  747         if (vm_page_count_min()) {
  748                 VM_WAIT;
  749                 goto loop;
  750         }
  751 
  752         pp = NULL;
  753         ppri = INT_MIN;
  754         sx_slock(&allproc_lock);
  755         FOREACH_PROC_IN_SYSTEM(p) {
  756                 PROC_LOCK(p);
  757                 if (p->p_state == PRS_NEW ||
  758                     p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
  759                         PROC_UNLOCK(p);
  760                         continue;
  761                 }
  762                 swtime = (ticks - p->p_swtick) / hz;
  763                 FOREACH_THREAD_IN_PROC(p, td) {
  764                         /*
  765                          * An otherwise runnable thread of a process
  766                          * swapped out has only the TDI_SWAPPED bit set.
  767                          * 
  768                          */
  769                         thread_lock(td);
  770                         if (td->td_inhibitors == TDI_SWAPPED) {
  771                                 slptime = (ticks - td->td_slptick) / hz;
  772                                 pri = swtime + slptime;
  773                                 if ((td->td_flags & TDF_SWAPINREQ) == 0)
  774                                         pri -= p->p_nice * 8;
  775                                 /*
  776                                  * if this thread is higher priority
  777                                  * and there is enough space, then select
  778                                  * this process instead of the previous
  779                                  * selection.
  780                                  */
  781                                 if (pri > ppri) {
  782                                         pp = p;
  783                                         ppri = pri;
  784                                 }
  785                         }
  786                         thread_unlock(td);
  787                 }
  788                 PROC_UNLOCK(p);
  789         }
  790         sx_sunlock(&allproc_lock);
  791 
  792         /*
  793          * Nothing to do, back to sleep.
  794          */
  795         if ((p = pp) == NULL) {
  796                 tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
  797                 goto loop;
  798         }
  799         PROC_LOCK(p);
  800 
  801         /*
  802          * Another process may be bringing or may have already
  803          * brought this process in while we traverse all threads.
  804          * Or, this process may even be being swapped out again.
  805          */
  806         if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
  807                 PROC_UNLOCK(p);
  808                 goto loop;
  809         }
  810 
  811         /*
  812          * We would like to bring someone in. (only if there is space).
  813          * [What checks the space? ]
  814          */
  815         faultin(p);
  816         PROC_UNLOCK(p);
  817         goto loop;
  818 }
  819 
  820 void
  821 kick_proc0(void)
  822 {
  823 
  824         wakeup(&proc0);
  825 }
  826 
  827 #ifndef NO_SWAPPING
  828 
  829 /*
  830  * Swap_idle_threshold1 is the guaranteed swapped in time for a process
  831  */
  832 static int swap_idle_threshold1 = 2;
  833 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
  834     &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
  835 
  836 /*
  837  * Swap_idle_threshold2 is the time that a process can be idle before
  838  * it will be swapped out, if idle swapping is enabled.
  839  */
  840 static int swap_idle_threshold2 = 10;
  841 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
  842     &swap_idle_threshold2, 0, "Time before a process will be swapped out");
  843 
  844 /*
  845  * First, if any processes have been sleeping or stopped for at least
  846  * "swap_idle_threshold1" seconds, they are swapped out.  If, however,
  847  * no such processes exist, then the longest-sleeping or stopped
  848  * process is swapped out.  Finally, and only as a last resort, if
  849  * there are no sleeping or stopped processes, the longest-resident
  850  * process is swapped out.
  851  */
  852 void
  853 swapout_procs(action)
  854 int action;
  855 {
  856         struct proc *p;
  857         struct thread *td;
  858         int didswap = 0;
  859 
  860 retry:
  861         sx_slock(&allproc_lock);
  862         FOREACH_PROC_IN_SYSTEM(p) {
  863                 struct vmspace *vm;
  864                 int minslptime = 100000;
  865                 int slptime;
  866 
  867                 PROC_LOCK(p);
  868                 /*
  869                  * Watch out for a process in
  870                  * creation.  It may have no
  871                  * address space or lock yet.
  872                  */
  873                 if (p->p_state == PRS_NEW) {
  874                         PROC_UNLOCK(p);
  875                         continue;
  876                 }
  877                 /*
  878                  * An aio daemon switches its
  879                  * address space while running.
  880                  * Perform a quick check whether
  881                  * a process has P_SYSTEM.
  882                  * Filter out exiting processes.
  883                  */
  884                 if ((p->p_flag & (P_SYSTEM | P_WEXIT)) != 0) {
  885                         PROC_UNLOCK(p);
  886                         continue;
  887                 }
  888                 _PHOLD_LITE(p);
  889                 PROC_UNLOCK(p);
  890                 sx_sunlock(&allproc_lock);
  891 
  892                 /*
  893                  * Do not swapout a process that
  894                  * is waiting for VM data
  895                  * structures as there is a possible
  896                  * deadlock.  Test this first as
  897                  * this may block.
  898                  *
  899                  * Lock the map until swapout
  900                  * finishes, or a thread of this
  901                  * process may attempt to alter
  902                  * the map.
  903                  */
  904                 vm = vmspace_acquire_ref(p);
  905                 if (vm == NULL)
  906                         goto nextproc2;
  907                 if (!vm_map_trylock(&vm->vm_map))
  908                         goto nextproc1;
  909 
  910                 PROC_LOCK(p);
  911                 if (p->p_lock != 1 || (p->p_flag & (P_STOPPED_SINGLE |
  912                     P_TRACED | P_SYSTEM)) != 0)
  913                         goto nextproc;
  914 
  915                 /*
  916                  * only aiod changes vmspace, however it will be
  917                  * skipped because of the if statement above checking 
  918                  * for P_SYSTEM
  919                  */
  920                 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
  921                         goto nextproc;
  922 
  923                 switch (p->p_state) {
  924                 default:
  925                         /* Don't swap out processes in any sort
  926                          * of 'special' state. */
  927                         break;
  928 
  929                 case PRS_NORMAL:
  930                         /*
  931                          * do not swapout a realtime process
  932                          * Check all the thread groups..
  933                          */
  934                         FOREACH_THREAD_IN_PROC(p, td) {
  935                                 thread_lock(td);
  936                                 if (PRI_IS_REALTIME(td->td_pri_class)) {
  937                                         thread_unlock(td);
  938                                         goto nextproc;
  939                                 }
  940                                 slptime = (ticks - td->td_slptick) / hz;
  941                                 /*
  942                                  * Guarantee swap_idle_threshold1
  943                                  * time in memory.
  944                                  */
  945                                 if (slptime < swap_idle_threshold1) {
  946                                         thread_unlock(td);
  947                                         goto nextproc;
  948                                 }
  949 
  950                                 /*
  951                                  * Do not swapout a process if it is
  952                                  * waiting on a critical event of some
  953                                  * kind or there is a thread whose
  954                                  * pageable memory may be accessed.
  955                                  *
  956                                  * This could be refined to support
  957                                  * swapping out a thread.
  958                                  */
  959                                 if (!thread_safetoswapout(td)) {
  960                                         thread_unlock(td);
  961                                         goto nextproc;
  962                                 }
  963                                 /*
  964                                  * If the system is under memory stress,
  965                                  * or if we are swapping
  966                                  * idle processes >= swap_idle_threshold2,
  967                                  * then swap the process out.
  968                                  */
  969                                 if (((action & VM_SWAP_NORMAL) == 0) &&
  970                                     (((action & VM_SWAP_IDLE) == 0) ||
  971                                     (slptime < swap_idle_threshold2))) {
  972                                         thread_unlock(td);
  973                                         goto nextproc;
  974                                 }
  975 
  976                                 if (minslptime > slptime)
  977                                         minslptime = slptime;
  978                                 thread_unlock(td);
  979                         }
  980 
  981                         /*
  982                          * If the pageout daemon didn't free enough pages,
  983                          * or if this process is idle and the system is
  984                          * configured to swap proactively, swap it out.
  985                          */
  986                         if ((action & VM_SWAP_NORMAL) ||
  987                                 ((action & VM_SWAP_IDLE) &&
  988                                  (minslptime > swap_idle_threshold2))) {
  989                                 _PRELE(p);
  990                                 if (swapout(p) == 0)
  991                                         didswap++;
  992                                 PROC_UNLOCK(p);
  993                                 vm_map_unlock(&vm->vm_map);
  994                                 vmspace_free(vm);
  995                                 goto retry;
  996                         }
  997                 }
  998 nextproc:
  999                 PROC_UNLOCK(p);
 1000                 vm_map_unlock(&vm->vm_map);
 1001 nextproc1:
 1002                 vmspace_free(vm);
 1003 nextproc2:
 1004                 sx_slock(&allproc_lock);
 1005                 PRELE(p);
 1006         }
 1007         sx_sunlock(&allproc_lock);
 1008         /*
 1009          * If we swapped something out, and another process needed memory,
 1010          * then wakeup the sched process.
 1011          */
 1012         if (didswap)
 1013                 wakeup(&proc0);
 1014 }
 1015 
 1016 static void
 1017 swapclear(p)
 1018         struct proc *p;
 1019 {
 1020         struct thread *td;
 1021 
 1022         PROC_LOCK_ASSERT(p, MA_OWNED);
 1023 
 1024         FOREACH_THREAD_IN_PROC(p, td) {
 1025                 thread_lock(td);
 1026                 td->td_flags |= TDF_INMEM;
 1027                 td->td_flags &= ~TDF_SWAPINREQ;
 1028                 TD_CLR_SWAPPED(td);
 1029                 if (TD_CAN_RUN(td))
 1030                         if (setrunnable(td)) {
 1031 #ifdef INVARIANTS
 1032                                 /*
 1033                                  * XXX: We just cleared TDI_SWAPPED
 1034                                  * above and set TDF_INMEM, so this
 1035                                  * should never happen.
 1036                                  */
 1037                                 panic("not waking up swapper");
 1038 #endif
 1039                         }
 1040                 thread_unlock(td);
 1041         }
 1042         p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
 1043         p->p_flag |= P_INMEM;
 1044 }
 1045 
 1046 static int
 1047 swapout(p)
 1048         struct proc *p;
 1049 {
 1050         struct thread *td;
 1051 
 1052         PROC_LOCK_ASSERT(p, MA_OWNED);
 1053 #if defined(SWAP_DEBUG)
 1054         printf("swapping out %d\n", p->p_pid);
 1055 #endif
 1056 
 1057         /*
 1058          * The states of this process and its threads may have changed
 1059          * by now.  Assuming that there is only one pageout daemon thread,
 1060          * this process should still be in memory.
 1061          */
 1062         KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
 1063                 ("swapout: lost a swapout race?"));
 1064 
 1065         /*
 1066          * remember the process resident count
 1067          */
 1068         p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
 1069         /*
 1070          * Check and mark all threads before we proceed.
 1071          */
 1072         p->p_flag &= ~P_INMEM;
 1073         p->p_flag |= P_SWAPPINGOUT;
 1074         FOREACH_THREAD_IN_PROC(p, td) {
 1075                 thread_lock(td);
 1076                 if (!thread_safetoswapout(td)) {
 1077                         thread_unlock(td);
 1078                         swapclear(p);
 1079                         return (EBUSY);
 1080                 }
 1081                 td->td_flags &= ~TDF_INMEM;
 1082                 TD_SET_SWAPPED(td);
 1083                 thread_unlock(td);
 1084         }
 1085         td = FIRST_THREAD_IN_PROC(p);
 1086         ++td->td_ru.ru_nswap;
 1087         PROC_UNLOCK(p);
 1088 
 1089         /*
 1090          * This list is stable because all threads are now prevented from
 1091          * running.  The list is only modified in the context of a running
 1092          * thread in this process.
 1093          */
 1094         FOREACH_THREAD_IN_PROC(p, td)
 1095                 vm_thread_swapout(td);
 1096 
 1097         PROC_LOCK(p);
 1098         p->p_flag &= ~P_SWAPPINGOUT;
 1099         p->p_swtick = ticks;
 1100         return (0);
 1101 }
 1102 #endif /* !NO_SWAPPING */

Cache object: 7e5a02439598da5ac8ef8d5f0d78d97e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.