The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
    3  *
    4  * Copyright (c) 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  *
    7  * This code is derived from software contributed to Berkeley by
    8  * The Mach Operating System project at Carnegie-Mellon University.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      from: @(#)vm_glue.c     8.6 (Berkeley) 1/5/94
   35  *
   36  *
   37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   38  * All rights reserved.
   39  *
   40  * Permission to use, copy, modify and distribute this software and
   41  * its documentation is hereby granted, provided that both the copyright
   42  * notice and this permission notice appear in all copies of the
   43  * software, derivative works or modified versions, and any portions
   44  * thereof, and that both notices appear in supporting documentation.
   45  *
   46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   49  *
   50  * Carnegie Mellon requests users of this software to return to
   51  *
   52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   53  *  School of Computer Science
   54  *  Carnegie Mellon University
   55  *  Pittsburgh PA 15213-3890
   56  *
   57  * any improvements or extensions that they make and grant Carnegie the
   58  * rights to redistribute these changes.
   59  */
   60 
   61 #include <sys/cdefs.h>
   62 __FBSDID("$FreeBSD$");
   63 
   64 #include "opt_vm.h"
   65 #include "opt_kstack_pages.h"
   66 #include "opt_kstack_max_pages.h"
   67 #include "opt_kstack_usage_prof.h"
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/domainset.h>
   72 #include <sys/limits.h>
   73 #include <sys/lock.h>
   74 #include <sys/malloc.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/racct.h>
   78 #include <sys/refcount.h>
   79 #include <sys/resourcevar.h>
   80 #include <sys/rwlock.h>
   81 #include <sys/sched.h>
   82 #include <sys/sf_buf.h>
   83 #include <sys/shm.h>
   84 #include <sys/smp.h>
   85 #include <sys/vmmeter.h>
   86 #include <sys/vmem.h>
   87 #include <sys/sx.h>
   88 #include <sys/sysctl.h>
   89 #include <sys/eventhandler.h>
   90 #include <sys/kernel.h>
   91 #include <sys/ktr.h>
   92 #include <sys/unistd.h>
   93 
   94 #include <vm/uma.h>
   95 #include <vm/vm.h>
   96 #include <vm/vm_param.h>
   97 #include <vm/pmap.h>
   98 #include <vm/vm_domainset.h>
   99 #include <vm/vm_map.h>
  100 #include <vm/vm_page.h>
  101 #include <vm/vm_pageout.h>
  102 #include <vm/vm_object.h>
  103 #include <vm/vm_kern.h>
  104 #include <vm/vm_extern.h>
  105 #include <vm/vm_pager.h>
  106 #include <vm/swap_pager.h>
  107 
  108 #include <machine/cpu.h>
  109 
  110 /*
  111  * MPSAFE
  112  *
  113  * WARNING!  This code calls vm_map_check_protection() which only checks
  114  * the associated vm_map_entry range.  It does not determine whether the
  115  * contents of the memory is actually readable or writable.  In most cases
  116  * just checking the vm_map_entry is sufficient within the kernel's address
  117  * space.
  118  */
  119 int
  120 kernacc(void *addr, int len, int rw)
  121 {
  122         boolean_t rv;
  123         vm_offset_t saddr, eaddr;
  124         vm_prot_t prot;
  125 
  126         KASSERT((rw & ~VM_PROT_ALL) == 0,
  127             ("illegal ``rw'' argument to kernacc (%x)\n", rw));
  128 
  129         if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
  130             (vm_offset_t)addr + len < (vm_offset_t)addr)
  131                 return (FALSE);
  132 
  133         prot = rw;
  134         saddr = trunc_page((vm_offset_t)addr);
  135         eaddr = round_page((vm_offset_t)addr + len);
  136         vm_map_lock_read(kernel_map);
  137         rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
  138         vm_map_unlock_read(kernel_map);
  139         return (rv == TRUE);
  140 }
  141 
  142 /*
  143  * MPSAFE
  144  *
  145  * WARNING!  This code calls vm_map_check_protection() which only checks
  146  * the associated vm_map_entry range.  It does not determine whether the
  147  * contents of the memory is actually readable or writable.  vmapbuf(),
  148  * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
  149  * used in conjunction with this call.
  150  */
  151 int
  152 useracc(void *addr, int len, int rw)
  153 {
  154         boolean_t rv;
  155         vm_prot_t prot;
  156         vm_map_t map;
  157 
  158         KASSERT((rw & ~VM_PROT_ALL) == 0,
  159             ("illegal ``rw'' argument to useracc (%x)\n", rw));
  160         prot = rw;
  161         map = &curproc->p_vmspace->vm_map;
  162         if ((vm_offset_t)addr + len > vm_map_max(map) ||
  163             (vm_offset_t)addr + len < (vm_offset_t)addr) {
  164                 return (FALSE);
  165         }
  166         vm_map_lock_read(map);
  167         rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
  168             round_page((vm_offset_t)addr + len), prot);
  169         vm_map_unlock_read(map);
  170         return (rv == TRUE);
  171 }
  172 
  173 int
  174 vslock(void *addr, size_t len)
  175 {
  176         vm_offset_t end, last, start;
  177         vm_size_t npages;
  178         int error;
  179 
  180         last = (vm_offset_t)addr + len;
  181         start = trunc_page((vm_offset_t)addr);
  182         end = round_page(last);
  183         if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
  184                 return (EINVAL);
  185         npages = atop(end - start);
  186         if (npages > vm_page_max_user_wired)
  187                 return (ENOMEM);
  188         error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
  189             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  190         if (error == KERN_SUCCESS) {
  191                 curthread->td_vslock_sz += len;
  192                 return (0);
  193         }
  194 
  195         /*
  196          * Return EFAULT on error to match copy{in,out}() behaviour
  197          * rather than returning ENOMEM like mlock() would.
  198          */
  199         return (EFAULT);
  200 }
  201 
  202 void
  203 vsunlock(void *addr, size_t len)
  204 {
  205 
  206         /* Rely on the parameter sanity checks performed by vslock(). */
  207         MPASS(curthread->td_vslock_sz >= len);
  208         curthread->td_vslock_sz -= len;
  209         (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
  210             trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
  211             VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
  212 }
  213 
  214 /*
  215  * Pin the page contained within the given object at the given offset.  If the
  216  * page is not resident, allocate and load it using the given object's pager.
  217  * Return the pinned page if successful; otherwise, return NULL.
  218  */
  219 static vm_page_t
  220 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
  221 {
  222         vm_page_t m;
  223         vm_pindex_t pindex;
  224 
  225         pindex = OFF_TO_IDX(offset);
  226         (void)vm_page_grab_valid_unlocked(&m, object, pindex,
  227             VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
  228         return (m);
  229 }
  230 
  231 /*
  232  * Return a CPU private mapping to the page at the given offset within the
  233  * given object.  The page is pinned before it is mapped.
  234  */
  235 struct sf_buf *
  236 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
  237 {
  238         vm_page_t m;
  239 
  240         m = vm_imgact_hold_page(object, offset);
  241         if (m == NULL)
  242                 return (NULL);
  243         sched_pin();
  244         return (sf_buf_alloc(m, SFB_CPUPRIVATE));
  245 }
  246 
  247 /*
  248  * Destroy the given CPU private mapping and unpin the page that it mapped.
  249  */
  250 void
  251 vm_imgact_unmap_page(struct sf_buf *sf)
  252 {
  253         vm_page_t m;
  254 
  255         m = sf_buf_page(sf);
  256         sf_buf_free(sf);
  257         sched_unpin();
  258         vm_page_unwire(m, PQ_ACTIVE);
  259 }
  260 
  261 void
  262 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
  263 {
  264 
  265         pmap_sync_icache(map->pmap, va, sz);
  266 }
  267 
  268 vm_object_t kstack_object;
  269 static uma_zone_t kstack_cache;
  270 static int kstack_cache_size;
  271 
  272 static int
  273 sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)
  274 {
  275         int error, oldsize;
  276 
  277         oldsize = kstack_cache_size;
  278         error = sysctl_handle_int(oidp, arg1, arg2, req);
  279         if (error == 0 && req->newptr && oldsize != kstack_cache_size)
  280                 uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
  281         return (error);
  282 }
  283 SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size,
  284     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &kstack_cache_size, 0,
  285     sysctl_kstack_cache_size, "IU", "Maximum number of cached kernel stacks");
  286 
  287 /*
  288  * Create the kernel stack (including pcb for i386) for a new thread.
  289  */
  290 static vm_offset_t
  291 vm_thread_stack_create(struct domainset *ds, int pages)
  292 {
  293         vm_page_t ma[KSTACK_MAX_PAGES];
  294         vm_offset_t ks;
  295         int i;
  296 
  297         /*
  298          * Get a kernel virtual address for this thread's kstack.
  299          */
  300 #if defined(__mips__)
  301         /*
  302          * We need to align the kstack's mapped address to fit within
  303          * a single TLB entry.
  304          */
  305         if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
  306             PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
  307             M_BESTFIT | M_NOWAIT, &ks)) {
  308                 ks = 0;
  309         }
  310 #else
  311         ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  312 #endif
  313         if (ks == 0) {
  314                 printf("%s: kstack allocation failed\n", __func__);
  315                 return (0);
  316         }
  317 
  318         if (KSTACK_GUARD_PAGES != 0) {
  319                 pmap_qremove(ks, KSTACK_GUARD_PAGES);
  320                 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
  321         }
  322 
  323         /*
  324          * Allocate physical pages to back the stack.
  325          */
  326         vm_thread_stack_back(ds, ks, ma, pages, VM_ALLOC_NORMAL);
  327         for (i = 0; i < pages; i++)
  328                 vm_page_valid(ma[i]);
  329         pmap_qenter(ks, ma, pages);
  330 
  331         return (ks);
  332 }
  333 
  334 static void
  335 vm_thread_stack_dispose(vm_offset_t ks, int pages)
  336 {
  337         vm_page_t m;
  338         vm_pindex_t pindex;
  339         int i;
  340 
  341         pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
  342 
  343         pmap_qremove(ks, pages);
  344         VM_OBJECT_WLOCK(kstack_object);
  345         for (i = 0; i < pages; i++) {
  346                 m = vm_page_lookup(kstack_object, pindex + i);
  347                 if (m == NULL)
  348                         panic("%s: kstack already missing?", __func__);
  349                 vm_page_xbusy_claim(m);
  350                 vm_page_unwire_noq(m);
  351                 vm_page_free(m);
  352         }
  353         VM_OBJECT_WUNLOCK(kstack_object);
  354         kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
  355             (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
  356 }
  357 
  358 /*
  359  * Allocate the kernel stack for a new thread.
  360  */
  361 int
  362 vm_thread_new(struct thread *td, int pages)
  363 {
  364         vm_offset_t ks;
  365 
  366         /* Bounds check */
  367         if (pages <= 1)
  368                 pages = kstack_pages;
  369         else if (pages > KSTACK_MAX_PAGES)
  370                 pages = KSTACK_MAX_PAGES;
  371 
  372         ks = 0;
  373         if (pages == kstack_pages && kstack_cache != NULL)
  374                 ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
  375 
  376         /*
  377          * Ensure that kstack objects can draw pages from any memory
  378          * domain.  Otherwise a local memory shortage can block a process
  379          * swap-in.
  380          */
  381         if (ks == 0)
  382                 ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
  383                     pages);
  384         if (ks == 0)
  385                 return (0);
  386         td->td_kstack = ks;
  387         td->td_kstack_pages = pages;
  388         return (1);
  389 }
  390 
  391 /*
  392  * Dispose of a thread's kernel stack.
  393  */
  394 void
  395 vm_thread_dispose(struct thread *td)
  396 {
  397         vm_offset_t ks;
  398         int pages;
  399 
  400         pages = td->td_kstack_pages;
  401         ks = td->td_kstack;
  402         td->td_kstack = 0;
  403         td->td_kstack_pages = 0;
  404         if (pages == kstack_pages)
  405                 uma_zfree(kstack_cache, (void *)ks);
  406         else
  407                 vm_thread_stack_dispose(ks, pages);
  408 }
  409 
  410 /*
  411  * Allocate physical pages, following the specified NUMA policy, to back a
  412  * kernel stack.
  413  */
  414 void
  415 vm_thread_stack_back(struct domainset *ds, vm_offset_t ks, vm_page_t ma[],
  416     int npages, int req_class)
  417 {
  418         vm_pindex_t pindex;
  419         int n;
  420 
  421         pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
  422 
  423         VM_OBJECT_WLOCK(kstack_object);
  424         for (n = 0; n < npages;) {
  425                 if (vm_ndomains > 1)
  426                         kstack_object->domain.dr_policy = ds;
  427 
  428                 /*
  429                  * Use WAITFAIL to force a reset of the domain selection policy
  430                  * if we had to sleep for pages.
  431                  */
  432                 n += vm_page_grab_pages(kstack_object, pindex + n,
  433                     req_class | VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL,
  434                     &ma[n], npages - n);
  435         }
  436         VM_OBJECT_WUNLOCK(kstack_object);
  437 }
  438 
  439 static int
  440 kstack_import(void *arg, void **store, int cnt, int domain, int flags)
  441 {
  442         struct domainset *ds;
  443         int i;
  444 
  445         if (domain == UMA_ANYDOMAIN)
  446                 ds = DOMAINSET_RR();
  447         else
  448                 ds = DOMAINSET_PREF(domain);
  449 
  450         for (i = 0; i < cnt; i++) {
  451                 store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
  452                 if (store[i] == NULL)
  453                         break;
  454         }
  455         return (i);
  456 }
  457 
  458 static void
  459 kstack_release(void *arg, void **store, int cnt)
  460 {
  461         vm_offset_t ks;
  462         int i;
  463 
  464         for (i = 0; i < cnt; i++) {
  465                 ks = (vm_offset_t)store[i];
  466                 vm_thread_stack_dispose(ks, kstack_pages);
  467         }
  468 }
  469 
  470 static void
  471 kstack_cache_init(void *null)
  472 {
  473         kstack_object = vm_object_allocate(OBJT_SWAP,
  474             atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
  475         kstack_cache = uma_zcache_create("kstack_cache",
  476             kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL,
  477             kstack_import, kstack_release, NULL,
  478             UMA_ZONE_FIRSTTOUCH);
  479         kstack_cache_size = imax(128, mp_ncpus * 4);
  480         uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
  481 }
  482 SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL);
  483 
  484 #ifdef KSTACK_USAGE_PROF
  485 /*
  486  * Track maximum stack used by a thread in kernel.
  487  */
  488 static int max_kstack_used;
  489 
  490 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
  491     &max_kstack_used, 0,
  492     "Maxiumum stack depth used by a thread in kernel");
  493 
  494 void
  495 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
  496 {
  497         vm_offset_t stack_top;
  498         vm_offset_t current;
  499         int used, prev_used;
  500 
  501         /*
  502          * Testing for interrupted kernel mode isn't strictly
  503          * needed. It optimizes the execution, since interrupts from
  504          * usermode will have only the trap frame on the stack.
  505          */
  506         if (TRAPF_USERMODE(frame))
  507                 return;
  508 
  509         stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
  510         current = (vm_offset_t)(uintptr_t)&stack_top;
  511 
  512         /*
  513          * Try to detect if interrupt is using kernel thread stack.
  514          * Hardware could use a dedicated stack for interrupt handling.
  515          */
  516         if (stack_top <= current || current < td->td_kstack)
  517                 return;
  518 
  519         used = stack_top - current;
  520         for (;;) {
  521                 prev_used = max_kstack_used;
  522                 if (prev_used >= used)
  523                         break;
  524                 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
  525                         break;
  526         }
  527 }
  528 #endif /* KSTACK_USAGE_PROF */
  529 
  530 /*
  531  * Implement fork's actions on an address space.
  532  * Here we arrange for the address space to be copied or referenced,
  533  * allocate a user struct (pcb and kernel stack), then call the
  534  * machine-dependent layer to fill those in and make the new process
  535  * ready to run.  The new process is set up so that it returns directly
  536  * to user mode to avoid stack copying and relocation problems.
  537  */
  538 int
  539 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
  540     struct vmspace *vm2, int flags)
  541 {
  542         struct proc *p1 = td->td_proc;
  543         struct domainset *dset;
  544         int error;
  545 
  546         if ((flags & RFPROC) == 0) {
  547                 /*
  548                  * Divorce the memory, if it is shared, essentially
  549                  * this changes shared memory amongst threads, into
  550                  * COW locally.
  551                  */
  552                 if ((flags & RFMEM) == 0) {
  553                         if (refcount_load(&p1->p_vmspace->vm_refcnt) > 1) {
  554                                 error = vmspace_unshare(p1);
  555                                 if (error)
  556                                         return (error);
  557                         }
  558                 }
  559                 cpu_fork(td, p2, td2, flags);
  560                 return (0);
  561         }
  562 
  563         if (flags & RFMEM) {
  564                 p2->p_vmspace = p1->p_vmspace;
  565                 refcount_acquire(&p1->p_vmspace->vm_refcnt);
  566         }
  567         dset = td2->td_domain.dr_policy;
  568         while (vm_page_count_severe_set(&dset->ds_mask)) {
  569                 vm_wait_doms(&dset->ds_mask, 0);
  570         }
  571 
  572         if ((flags & RFMEM) == 0) {
  573                 p2->p_vmspace = vm2;
  574                 if (p1->p_vmspace->vm_shm)
  575                         shmfork(p1, p2);
  576         }
  577 
  578         /*
  579          * cpu_fork will copy and update the pcb, set up the kernel stack,
  580          * and make the child ready to run.
  581          */
  582         cpu_fork(td, p2, td2, flags);
  583         return (0);
  584 }
  585 
  586 /*
  587  * Called after process has been wait(2)'ed upon and is being reaped.
  588  * The idea is to reclaim resources that we could not reclaim while
  589  * the process was still executing.
  590  */
  591 void
  592 vm_waitproc(p)
  593         struct proc *p;
  594 {
  595 
  596         vmspace_exitfree(p);            /* and clean-out the vmspace */
  597 }
  598 
  599 void
  600 kick_proc0(void)
  601 {
  602 
  603         wakeup(&proc0);
  604 }

Cache object: ed782c08ed118cc31d0ddef6377af0cf


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.