The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.c

Version: -  FREEBSD  -  FREEBSD11  -  FREEBSD10  -  FREEBSD9  -  FREEBSD92  -  FREEBSD91  -  FREEBSD90  -  FREEBSD8  -  FREEBSD82  -  FREEBSD81  -  FREEBSD80  -  FREEBSD7  -  FREEBSD74  -  FREEBSD73  -  FREEBSD72  -  FREEBSD71  -  FREEBSD70  -  FREEBSD6  -  FREEBSD64  -  FREEBSD63  -  FREEBSD62  -  FREEBSD61  -  FREEBSD60  -  FREEBSD5  -  FREEBSD55  -  FREEBSD54  -  FREEBSD53  -  FREEBSD52  -  FREEBSD51  -  FREEBSD50  -  FREEBSD4  -  FREEBSD3  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
    3  *
    4  * Copyright (c) 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  *
    7  * This code is derived from software contributed to Berkeley by
    8  * The Mach Operating System project at Carnegie-Mellon University.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
   35  *
   36  *
   37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   38  * All rights reserved.
   39  *
   40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   41  *
   42  * Permission to use, copy, modify and distribute this software and
   43  * its documentation is hereby granted, provided that both the copyright
   44  * notice and this permission notice appear in all copies of the
   45  * software, derivative works or modified versions, and any portions
   46  * thereof, and that both notices appear in supporting documentation.
   47  *
   48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   51  *
   52  * Carnegie Mellon requests users of this software to return to
   53  *
   54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   55  *  School of Computer Science
   56  *  Carnegie Mellon University
   57  *  Pittsburgh PA 15213-3890
   58  *
   59  * any improvements or extensions that they make and grant Carnegie the
   60  * rights to redistribute these changes.
   61  */
   62 
   63 /*
   64  *      Virtual memory mapping module.
   65  */
   66 
   67 #include <sys/cdefs.h>
   68 __FBSDID("$FreeBSD: head/sys/vm/vm_map.c 340064 2018-11-02 16:26:44Z markj $");
   69 
   70 #include <sys/param.h>
   71 #include <sys/systm.h>
   72 #include <sys/kernel.h>
   73 #include <sys/ktr.h>
   74 #include <sys/lock.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/vmmeter.h>
   78 #include <sys/mman.h>
   79 #include <sys/vnode.h>
   80 #include <sys/racct.h>
   81 #include <sys/resourcevar.h>
   82 #include <sys/rwlock.h>
   83 #include <sys/file.h>
   84 #include <sys/sysctl.h>
   85 #include <sys/sysent.h>
   86 #include <sys/shm.h>
   87 
   88 #include <vm/vm.h>
   89 #include <vm/vm_param.h>
   90 #include <vm/pmap.h>
   91 #include <vm/vm_map.h>
   92 #include <vm/vm_page.h>
   93 #include <vm/vm_object.h>
   94 #include <vm/vm_pager.h>
   95 #include <vm/vm_kern.h>
   96 #include <vm/vm_extern.h>
   97 #include <vm/vnode_pager.h>
   98 #include <vm/swap_pager.h>
   99 #include <vm/uma.h>
  100 
  101 /*
  102  *      Virtual memory maps provide for the mapping, protection,
  103  *      and sharing of virtual memory objects.  In addition,
  104  *      this module provides for an efficient virtual copy of
  105  *      memory from one map to another.
  106  *
  107  *      Synchronization is required prior to most operations.
  108  *
  109  *      Maps consist of an ordered doubly-linked list of simple
  110  *      entries; a self-adjusting binary search tree of these
  111  *      entries is used to speed up lookups.
  112  *
  113  *      Since portions of maps are specified by start/end addresses,
  114  *      which may not align with existing map entries, all
  115  *      routines merely "clip" entries to these start/end values.
  116  *      [That is, an entry is split into two, bordering at a
  117  *      start or end value.]  Note that these clippings may not
  118  *      always be necessary (as the two resulting entries are then
  119  *      not changed); however, the clipping is done for convenience.
  120  *
  121  *      As mentioned above, virtual copy operations are performed
  122  *      by copying VM object references from one map to
  123  *      another, and then marking both regions as copy-on-write.
  124  */
  125 
  126 static struct mtx map_sleep_mtx;
  127 static uma_zone_t mapentzone;
  128 static uma_zone_t kmapentzone;
  129 static uma_zone_t mapzone;
  130 static uma_zone_t vmspace_zone;
  131 static int vmspace_zinit(void *mem, int size, int flags);
  132 static int vm_map_zinit(void *mem, int ize, int flags);
  133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
  134     vm_offset_t max);
  135 static int vm_map_alignspace(vm_map_t map, vm_object_t object,
  136     vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length,
  137     vm_offset_t max_addr, vm_offset_t alignment);
  138 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
  139 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
  140 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
  141 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
  142     vm_map_entry_t gap_entry);
  143 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
  144     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
  145 #ifdef INVARIANTS
  146 static void vm_map_zdtor(void *mem, int size, void *arg);
  147 static void vmspace_zdtor(void *mem, int size, void *arg);
  148 #endif
  149 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
  150     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
  151     int cow);
  152 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
  153     vm_offset_t failed_addr);
  154 
  155 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
  156     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
  157      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
  158 
  159 /* 
  160  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
  161  * stable.
  162  */
  163 #define PROC_VMSPACE_LOCK(p) do { } while (0)
  164 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
  165 
  166 /*
  167  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
  168  *
  169  *      Asserts that the starting and ending region
  170  *      addresses fall within the valid range of the map.
  171  */
  172 #define VM_MAP_RANGE_CHECK(map, start, end)             \
  173                 {                                       \
  174                 if (start < vm_map_min(map))            \
  175                         start = vm_map_min(map);        \
  176                 if (end > vm_map_max(map))              \
  177                         end = vm_map_max(map);          \
  178                 if (start > end)                        \
  179                         start = end;                    \
  180                 }
  181 
  182 /*
  183  *      vm_map_startup:
  184  *
  185  *      Initialize the vm_map module.  Must be called before
  186  *      any other vm_map routines.
  187  *
  188  *      Map and entry structures are allocated from the general
  189  *      purpose memory pool with some exceptions:
  190  *
  191  *      - The kernel map and kmem submap are allocated statically.
  192  *      - Kernel map entries are allocated out of a static pool.
  193  *
  194  *      These restrictions are necessary since malloc() uses the
  195  *      maps and requires map entries.
  196  */
  197 
  198 void
  199 vm_map_startup(void)
  200 {
  201         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
  202         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
  203 #ifdef INVARIANTS
  204             vm_map_zdtor,
  205 #else
  206             NULL,
  207 #endif
  208             vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  209         uma_prealloc(mapzone, MAX_KMAP);
  210         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
  211             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  212             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
  213         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
  214             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  215         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
  216 #ifdef INVARIANTS
  217             vmspace_zdtor,
  218 #else
  219             NULL,
  220 #endif
  221             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  222 }
  223 
  224 static int
  225 vmspace_zinit(void *mem, int size, int flags)
  226 {
  227         struct vmspace *vm;
  228 
  229         vm = (struct vmspace *)mem;
  230 
  231         vm->vm_map.pmap = NULL;
  232         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
  233         PMAP_LOCK_INIT(vmspace_pmap(vm));
  234         return (0);
  235 }
  236 
  237 static int
  238 vm_map_zinit(void *mem, int size, int flags)
  239 {
  240         vm_map_t map;
  241 
  242         map = (vm_map_t)mem;
  243         memset(map, 0, sizeof(*map));
  244         mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
  245         sx_init(&map->lock, "vm map (user)");
  246         return (0);
  247 }
  248 
  249 #ifdef INVARIANTS
  250 static void
  251 vmspace_zdtor(void *mem, int size, void *arg)
  252 {
  253         struct vmspace *vm;
  254 
  255         vm = (struct vmspace *)mem;
  256 
  257         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
  258 }
  259 static void
  260 vm_map_zdtor(void *mem, int size, void *arg)
  261 {
  262         vm_map_t map;
  263 
  264         map = (vm_map_t)mem;
  265         KASSERT(map->nentries == 0,
  266             ("map %p nentries == %d on free.",
  267             map, map->nentries));
  268         KASSERT(map->size == 0,
  269             ("map %p size == %lu on free.",
  270             map, (unsigned long)map->size));
  271 }
  272 #endif  /* INVARIANTS */
  273 
  274 /*
  275  * Allocate a vmspace structure, including a vm_map and pmap,
  276  * and initialize those structures.  The refcnt is set to 1.
  277  *
  278  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
  279  */
  280 struct vmspace *
  281 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
  282 {
  283         struct vmspace *vm;
  284 
  285         vm = uma_zalloc(vmspace_zone, M_WAITOK);
  286 
  287         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
  288 
  289         if (pinit == NULL)
  290                 pinit = &pmap_pinit;
  291 
  292         if (!pinit(vmspace_pmap(vm))) {
  293                 uma_zfree(vmspace_zone, vm);
  294                 return (NULL);
  295         }
  296         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
  297         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
  298         vm->vm_refcnt = 1;
  299         vm->vm_shm = NULL;
  300         vm->vm_swrss = 0;
  301         vm->vm_tsize = 0;
  302         vm->vm_dsize = 0;
  303         vm->vm_ssize = 0;
  304         vm->vm_taddr = 0;
  305         vm->vm_daddr = 0;
  306         vm->vm_maxsaddr = 0;
  307         return (vm);
  308 }
  309 
  310 #ifdef RACCT
  311 static void
  312 vmspace_container_reset(struct proc *p)
  313 {
  314 
  315         PROC_LOCK(p);
  316         racct_set(p, RACCT_DATA, 0);
  317         racct_set(p, RACCT_STACK, 0);
  318         racct_set(p, RACCT_RSS, 0);
  319         racct_set(p, RACCT_MEMLOCK, 0);
  320         racct_set(p, RACCT_VMEM, 0);
  321         PROC_UNLOCK(p);
  322 }
  323 #endif
  324 
  325 static inline void
  326 vmspace_dofree(struct vmspace *vm)
  327 {
  328 
  329         CTR1(KTR_VM, "vmspace_free: %p", vm);
  330 
  331         /*
  332          * Make sure any SysV shm is freed, it might not have been in
  333          * exit1().
  334          */
  335         shmexit(vm);
  336 
  337         /*
  338          * Lock the map, to wait out all other references to it.
  339          * Delete all of the mappings and pages they hold, then call
  340          * the pmap module to reclaim anything left.
  341          */
  342         (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
  343             vm_map_max(&vm->vm_map));
  344 
  345         pmap_release(vmspace_pmap(vm));
  346         vm->vm_map.pmap = NULL;
  347         uma_zfree(vmspace_zone, vm);
  348 }
  349 
  350 void
  351 vmspace_free(struct vmspace *vm)
  352 {
  353 
  354         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
  355             "vmspace_free() called");
  356 
  357         if (vm->vm_refcnt == 0)
  358                 panic("vmspace_free: attempt to free already freed vmspace");
  359 
  360         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
  361                 vmspace_dofree(vm);
  362 }
  363 
  364 void
  365 vmspace_exitfree(struct proc *p)
  366 {
  367         struct vmspace *vm;
  368 
  369         PROC_VMSPACE_LOCK(p);
  370         vm = p->p_vmspace;
  371         p->p_vmspace = NULL;
  372         PROC_VMSPACE_UNLOCK(p);
  373         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
  374         vmspace_free(vm);
  375 }
  376 
  377 void
  378 vmspace_exit(struct thread *td)
  379 {
  380         int refcnt;
  381         struct vmspace *vm;
  382         struct proc *p;
  383 
  384         /*
  385          * Release user portion of address space.
  386          * This releases references to vnodes,
  387          * which could cause I/O if the file has been unlinked.
  388          * Need to do this early enough that we can still sleep.
  389          *
  390          * The last exiting process to reach this point releases as
  391          * much of the environment as it can. vmspace_dofree() is the
  392          * slower fallback in case another process had a temporary
  393          * reference to the vmspace.
  394          */
  395 
  396         p = td->td_proc;
  397         vm = p->p_vmspace;
  398         atomic_add_int(&vmspace0.vm_refcnt, 1);
  399         do {
  400                 refcnt = vm->vm_refcnt;
  401                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
  402                         /* Switch now since other proc might free vmspace */
  403                         PROC_VMSPACE_LOCK(p);
  404                         p->p_vmspace = &vmspace0;
  405                         PROC_VMSPACE_UNLOCK(p);
  406                         pmap_activate(td);
  407                 }
  408         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
  409         if (refcnt == 1) {
  410                 if (p->p_vmspace != vm) {
  411                         /* vmspace not yet freed, switch back */
  412                         PROC_VMSPACE_LOCK(p);
  413                         p->p_vmspace = vm;
  414                         PROC_VMSPACE_UNLOCK(p);
  415                         pmap_activate(td);
  416                 }
  417                 pmap_remove_pages(vmspace_pmap(vm));
  418                 /* Switch now since this proc will free vmspace */
  419                 PROC_VMSPACE_LOCK(p);
  420                 p->p_vmspace = &vmspace0;
  421                 PROC_VMSPACE_UNLOCK(p);
  422                 pmap_activate(td);
  423                 vmspace_dofree(vm);
  424         }
  425 #ifdef RACCT
  426         if (racct_enable)
  427                 vmspace_container_reset(p);
  428 #endif
  429 }
  430 
  431 /* Acquire reference to vmspace owned by another process. */
  432 
  433 struct vmspace *
  434 vmspace_acquire_ref(struct proc *p)
  435 {
  436         struct vmspace *vm;
  437         int refcnt;
  438 
  439         PROC_VMSPACE_LOCK(p);
  440         vm = p->p_vmspace;
  441         if (vm == NULL) {
  442                 PROC_VMSPACE_UNLOCK(p);
  443                 return (NULL);
  444         }
  445         do {
  446                 refcnt = vm->vm_refcnt;
  447                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
  448                         PROC_VMSPACE_UNLOCK(p);
  449                         return (NULL);
  450                 }
  451         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
  452         if (vm != p->p_vmspace) {
  453                 PROC_VMSPACE_UNLOCK(p);
  454                 vmspace_free(vm);
  455                 return (NULL);
  456         }
  457         PROC_VMSPACE_UNLOCK(p);
  458         return (vm);
  459 }
  460 
  461 /*
  462  * Switch between vmspaces in an AIO kernel process.
  463  *
  464  * The AIO kernel processes switch to and from a user process's
  465  * vmspace while performing an I/O operation on behalf of a user
  466  * process.  The new vmspace is either the vmspace of a user process
  467  * obtained from an active AIO request or the initial vmspace of the
  468  * AIO kernel process (when it is idling).  Because user processes
  469  * will block to drain any active AIO requests before proceeding in
  470  * exit() or execve(), the vmspace reference count for these vmspaces
  471  * can never be 0.  This allows for a much simpler implementation than
  472  * the loop in vmspace_acquire_ref() above.  Similarly, AIO kernel
  473  * processes hold an extra reference on their initial vmspace for the
  474  * life of the process so that this guarantee is true for any vmspace
  475  * passed as 'newvm'.
  476  */
  477 void
  478 vmspace_switch_aio(struct vmspace *newvm)
  479 {
  480         struct vmspace *oldvm;
  481 
  482         /* XXX: Need some way to assert that this is an aio daemon. */
  483 
  484         KASSERT(newvm->vm_refcnt > 0,
  485             ("vmspace_switch_aio: newvm unreferenced"));
  486 
  487         oldvm = curproc->p_vmspace;
  488         if (oldvm == newvm)
  489                 return;
  490 
  491         /*
  492          * Point to the new address space and refer to it.
  493          */
  494         curproc->p_vmspace = newvm;
  495         atomic_add_int(&newvm->vm_refcnt, 1);
  496 
  497         /* Activate the new mapping. */
  498         pmap_activate(curthread);
  499 
  500         /* Remove the daemon's reference to the old address space. */
  501         KASSERT(oldvm->vm_refcnt > 1,
  502             ("vmspace_switch_aio: oldvm dropping last reference"));
  503         vmspace_free(oldvm);
  504 }
  505 
  506 void
  507 _vm_map_lock(vm_map_t map, const char *file, int line)
  508 {
  509 
  510         if (map->system_map)
  511                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
  512         else
  513                 sx_xlock_(&map->lock, file, line);
  514         map->timestamp++;
  515 }
  516 
  517 static void
  518 vm_map_process_deferred(void)
  519 {
  520         struct thread *td;
  521         vm_map_entry_t entry, next;
  522         vm_object_t object;
  523 
  524         td = curthread;
  525         entry = td->td_map_def_user;
  526         td->td_map_def_user = NULL;
  527         while (entry != NULL) {
  528                 next = entry->next;
  529                 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
  530                         /*
  531                          * Decrement the object's writemappings and
  532                          * possibly the vnode's v_writecount.
  533                          */
  534                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
  535                             ("Submap with writecount"));
  536                         object = entry->object.vm_object;
  537                         KASSERT(object != NULL, ("No object for writecount"));
  538                         vnode_pager_release_writecount(object, entry->start,
  539                             entry->end);
  540                 }
  541                 vm_map_entry_deallocate(entry, FALSE);
  542                 entry = next;
  543         }
  544 }
  545 
  546 void
  547 _vm_map_unlock(vm_map_t map, const char *file, int line)
  548 {
  549 
  550         if (map->system_map)
  551                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
  552         else {
  553                 sx_xunlock_(&map->lock, file, line);
  554                 vm_map_process_deferred();
  555         }
  556 }
  557 
  558 void
  559 _vm_map_lock_read(vm_map_t map, const char *file, int line)
  560 {
  561 
  562         if (map->system_map)
  563                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
  564         else
  565                 sx_slock_(&map->lock, file, line);
  566 }
  567 
  568 void
  569 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
  570 {
  571 
  572         if (map->system_map)
  573                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
  574         else {
  575                 sx_sunlock_(&map->lock, file, line);
  576                 vm_map_process_deferred();
  577         }
  578 }
  579 
  580 int
  581 _vm_map_trylock(vm_map_t map, const char *file, int line)
  582 {
  583         int error;
  584 
  585         error = map->system_map ?
  586             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
  587             !sx_try_xlock_(&map->lock, file, line);
  588         if (error == 0)
  589                 map->timestamp++;
  590         return (error == 0);
  591 }
  592 
  593 int
  594 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
  595 {
  596         int error;
  597 
  598         error = map->system_map ?
  599             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
  600             !sx_try_slock_(&map->lock, file, line);
  601         return (error == 0);
  602 }
  603 
  604 /*
  605  *      _vm_map_lock_upgrade:   [ internal use only ]
  606  *
  607  *      Tries to upgrade a read (shared) lock on the specified map to a write
  608  *      (exclusive) lock.  Returns the value "" if the upgrade succeeds and a
  609  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
  610  *      returned without a read or write lock held.
  611  *
  612  *      Requires that the map be read locked.
  613  */
  614 int
  615 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
  616 {
  617         unsigned int last_timestamp;
  618 
  619         if (map->system_map) {
  620                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
  621         } else {
  622                 if (!sx_try_upgrade_(&map->lock, file, line)) {
  623                         last_timestamp = map->timestamp;
  624                         sx_sunlock_(&map->lock, file, line);
  625                         vm_map_process_deferred();
  626                         /*
  627                          * If the map's timestamp does not change while the
  628                          * map is unlocked, then the upgrade succeeds.
  629                          */
  630                         sx_xlock_(&map->lock, file, line);
  631                         if (last_timestamp != map->timestamp) {
  632                                 sx_xunlock_(&map->lock, file, line);
  633                                 return (1);
  634                         }
  635                 }
  636         }
  637         map->timestamp++;
  638         return (0);
  639 }
  640 
  641 void
  642 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
  643 {
  644 
  645         if (map->system_map) {
  646                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
  647         } else
  648                 sx_downgrade_(&map->lock, file, line);
  649 }
  650 
  651 /*
  652  *      vm_map_locked:
  653  *
  654  *      Returns a non-zero value if the caller holds a write (exclusive) lock
  655  *      on the specified map and the value "" otherwise.
  656  */
  657 int
  658 vm_map_locked(vm_map_t map)
  659 {
  660 
  661         if (map->system_map)
  662                 return (mtx_owned(&map->system_mtx));
  663         else
  664                 return (sx_xlocked(&map->lock));
  665 }
  666 
  667 #ifdef INVARIANTS
  668 static void
  669 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
  670 {
  671 
  672         if (map->system_map)
  673                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
  674         else
  675                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
  676 }
  677 
  678 #define VM_MAP_ASSERT_LOCKED(map) \
  679     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
  680 #else
  681 #define VM_MAP_ASSERT_LOCKED(map)
  682 #endif
  683 
  684 /*
  685  *      _vm_map_unlock_and_wait:
  686  *
  687  *      Atomically releases the lock on the specified map and puts the calling
  688  *      thread to sleep.  The calling thread will remain asleep until either
  689  *      vm_map_wakeup() is performed on the map or the specified timeout is
  690  *      exceeded.
  691  *
  692  *      WARNING!  This function does not perform deferred deallocations of
  693  *      objects and map entries.  Therefore, the calling thread is expected to
  694  *      reacquire the map lock after reawakening and later perform an ordinary
  695  *      unlock operation, such as vm_map_unlock(), before completing its
  696  *      operation on the map.
  697  */
  698 int
  699 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
  700 {
  701 
  702         mtx_lock(&map_sleep_mtx);
  703         if (map->system_map)
  704                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
  705         else
  706                 sx_xunlock_(&map->lock, file, line);
  707         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
  708             timo));
  709 }
  710 
  711 /*
  712  *      vm_map_wakeup:
  713  *
  714  *      Awaken any threads that have slept on the map using
  715  *      vm_map_unlock_and_wait().
  716  */
  717 void
  718 vm_map_wakeup(vm_map_t map)
  719 {
  720 
  721         /*
  722          * Acquire and release map_sleep_mtx to prevent a wakeup()
  723          * from being performed (and lost) between the map unlock
  724          * and the msleep() in _vm_map_unlock_and_wait().
  725          */
  726         mtx_lock(&map_sleep_mtx);
  727         mtx_unlock(&map_sleep_mtx);
  728         wakeup(&map->root);
  729 }
  730 
  731 void
  732 vm_map_busy(vm_map_t map)
  733 {
  734 
  735         VM_MAP_ASSERT_LOCKED(map);
  736         map->busy++;
  737 }
  738 
  739 void
  740 vm_map_unbusy(vm_map_t map)
  741 {
  742 
  743         VM_MAP_ASSERT_LOCKED(map);
  744         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
  745         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
  746                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
  747                 wakeup(&map->busy);
  748         }
  749 }
  750 
  751 void 
  752 vm_map_wait_busy(vm_map_t map)
  753 {
  754 
  755         VM_MAP_ASSERT_LOCKED(map);
  756         while (map->busy) {
  757                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
  758                 if (map->system_map)
  759                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
  760                 else
  761                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
  762         }
  763         map->timestamp++;
  764 }
  765 
  766 long
  767 vmspace_resident_count(struct vmspace *vmspace)
  768 {
  769         return pmap_resident_count(vmspace_pmap(vmspace));
  770 }
  771 
  772 /*
  773  *      vm_map_create:
  774  *
  775  *      Creates and returns a new empty VM map with
  776  *      the given physical map structure, and having
  777  *      the given lower and upper address bounds.
  778  */
  779 vm_map_t
  780 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
  781 {
  782         vm_map_t result;
  783 
  784         result = uma_zalloc(mapzone, M_WAITOK);
  785         CTR1(KTR_VM, "vm_map_create: %p", result);
  786         _vm_map_init(result, pmap, min, max);
  787         return (result);
  788 }
  789 
  790 /*
  791  * Initialize an existing vm_map structure
  792  * such as that in the vmspace structure.
  793  */
  794 static void
  795 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
  796 {
  797 
  798         map->header.next = map->header.prev = &map->header;
  799         map->header.eflags = MAP_ENTRY_HEADER;
  800         map->needs_wakeup = FALSE;
  801         map->system_map = 0;
  802         map->pmap = pmap;
  803         map->header.end = min;
  804         map->header.start = max;
  805         map->flags = 0;
  806         map->root = NULL;
  807         map->timestamp = 0;
  808         map->busy = 0;
  809 }
  810 
  811 void
  812 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
  813 {
  814 
  815         _vm_map_init(map, pmap, min, max);
  816         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
  817         sx_init(&map->lock, "user map");
  818 }
  819 
  820 /*
  821  *      vm_map_entry_dispose:   [ internal use only ]
  822  *
  823  *      Inverse of vm_map_entry_create.
  824  */
  825 static void
  826 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
  827 {
  828         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
  829 }
  830 
  831 /*
  832  *      vm_map_entry_create:    [ internal use only ]
  833  *
  834  *      Allocates a VM map entry for insertion.
  835  *      No entry fields are filled in.
  836  */
  837 static vm_map_entry_t
  838 vm_map_entry_create(vm_map_t map)
  839 {
  840         vm_map_entry_t new_entry;
  841 
  842         if (map->system_map)
  843                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
  844         else
  845                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
  846         if (new_entry == NULL)
  847                 panic("vm_map_entry_create: kernel resources exhausted");
  848         return (new_entry);
  849 }
  850 
  851 /*
  852  *      vm_map_entry_set_behavior:
  853  *
  854  *      Set the expected access behavior, either normal, random, or
  855  *      sequential.
  856  */
  857 static inline void
  858 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
  859 {
  860         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
  861             (behavior & MAP_ENTRY_BEHAV_MASK);
  862 }
  863 
  864 /*
  865  *      vm_map_entry_set_max_free:
  866  *
  867  *      Set the max_free field in a vm_map_entry.
  868  */
  869 static inline void
  870 vm_map_entry_set_max_free(vm_map_entry_t entry)
  871 {
  872 
  873         entry->max_free = entry->adj_free;
  874         if (entry->left != NULL && entry->left->max_free > entry->max_free)
  875                 entry->max_free = entry->left->max_free;
  876         if (entry->right != NULL && entry->right->max_free > entry->max_free)
  877                 entry->max_free = entry->right->max_free;
  878 }
  879 
  880 /*
  881  *      vm_map_entry_splay:
  882  *
  883  *      The Sleator and Tarjan top-down splay algorithm with the
  884  *      following variation.  Max_free must be computed bottom-up, so
  885  *      on the downward pass, maintain the left and right spines in
  886  *      reverse order.  Then, make a second pass up each side to fix
  887  *      the pointers and compute max_free.  The time bound is O(log n)
  888  *      amortized.
  889  *
  890  *      The new root is the vm_map_entry containing "addr", or else an
  891  *      adjacent entry (lower or higher) if addr is not in the tree.
  892  *
  893  *      The map must be locked, and leaves it so.
  894  *
  895  *      Returns: the new root.
  896  */
  897 static vm_map_entry_t
  898 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
  899 {
  900         vm_map_entry_t llist, rlist;
  901         vm_map_entry_t ltree, rtree;
  902         vm_map_entry_t y;
  903 
  904         /* Special case of empty tree. */
  905         if (root == NULL)
  906                 return (root);
  907 
  908         /*
  909          * Pass One: Splay down the tree until we find addr or a NULL
  910          * pointer where addr would go.  llist and rlist are the two
  911          * sides in reverse order (bottom-up), with llist linked by
  912          * the right pointer and rlist linked by the left pointer in
  913          * the vm_map_entry.  Wait until Pass Two to set max_free on
  914          * the two spines.
  915          */
  916         llist = NULL;
  917         rlist = NULL;
  918         for (;;) {
  919                 /* root is never NULL in here. */
  920                 if (addr < root->start) {
  921                         y = root->left;
  922                         if (y == NULL)
  923                                 break;
  924                         if (addr < y->start && y->left != NULL) {
  925                                 /* Rotate right and put y on rlist. */
  926                                 root->left = y->right;
  927                                 y->right = root;
  928                                 vm_map_entry_set_max_free(root);
  929                                 root = y->left;
  930                                 y->left = rlist;
  931                                 rlist = y;
  932                         } else {
  933                                 /* Put root on rlist. */
  934                                 root->left = rlist;
  935                                 rlist = root;
  936                                 root = y;
  937                         }
  938                 } else if (addr >= root->end) {
  939                         y = root->right;
  940                         if (y == NULL)
  941                                 break;
  942                         if (addr >= y->end && y->right != NULL) {
  943                                 /* Rotate left and put y on llist. */
  944                                 root->right = y->left;
  945                                 y->left = root;
  946                                 vm_map_entry_set_max_free(root);
  947                                 root = y->right;
  948                                 y->right = llist;
  949                                 llist = y;
  950                         } else {
  951                                 /* Put root on llist. */
  952                                 root->right = llist;
  953                                 llist = root;
  954                                 root = y;
  955                         }
  956                 } else
  957                         break;
  958         }
  959 
  960         /*
  961          * Pass Two: Walk back up the two spines, flip the pointers
  962          * and set max_free.  The subtrees of the root go at the
  963          * bottom of llist and rlist.
  964          */
  965         ltree = root->left;
  966         while (llist != NULL) {
  967                 y = llist->right;
  968                 llist->right = ltree;
  969                 vm_map_entry_set_max_free(llist);
  970                 ltree = llist;
  971                 llist = y;
  972         }
  973         rtree = root->right;
  974         while (rlist != NULL) {
  975                 y = rlist->left;
  976                 rlist->left = rtree;
  977                 vm_map_entry_set_max_free(rlist);
  978                 rtree = rlist;
  979                 rlist = y;
  980         }
  981 
  982         /*
  983          * Final assembly: add ltree and rtree as subtrees of root.
  984          */
  985         root->left = ltree;
  986         root->right = rtree;
  987         vm_map_entry_set_max_free(root);
  988 
  989         return (root);
  990 }
  991 
  992 /*
  993  *      vm_map_entry_{un,}link:
  994  *
  995  *      Insert/remove entries from maps.
  996  */
  997 static void
  998 vm_map_entry_link(vm_map_t map,
  999                   vm_map_entry_t after_where,
 1000                   vm_map_entry_t entry)
 1001 {
 1002 
 1003         CTR4(KTR_VM,
 1004             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
 1005             map->nentries, entry, after_where);
 1006         VM_MAP_ASSERT_LOCKED(map);
 1007         KASSERT(after_where->end <= entry->start,
 1008             ("vm_map_entry_link: prev end %jx new start %jx overlap",
 1009             (uintmax_t)after_where->end, (uintmax_t)entry->start));
 1010         KASSERT(entry->end <= after_where->next->start,
 1011             ("vm_map_entry_link: new end %jx next start %jx overlap",
 1012             (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
 1013 
 1014         map->nentries++;
 1015         entry->prev = after_where;
 1016         entry->next = after_where->next;
 1017         entry->next->prev = entry;
 1018         after_where->next = entry;
 1019 
 1020         if (after_where != &map->header) {
 1021                 if (after_where != map->root)
 1022                         vm_map_entry_splay(after_where->start, map->root);
 1023                 entry->right = after_where->right;
 1024                 entry->left = after_where;
 1025                 after_where->right = NULL;
 1026                 after_where->adj_free = entry->start - after_where->end;
 1027                 vm_map_entry_set_max_free(after_where);
 1028         } else {
 1029                 entry->right = map->root;
 1030                 entry->left = NULL;
 1031         }
 1032         entry->adj_free = entry->next->start - entry->end;
 1033         vm_map_entry_set_max_free(entry);
 1034         map->root = entry;
 1035 }
 1036 
 1037 static void
 1038 vm_map_entry_unlink(vm_map_t map,
 1039                     vm_map_entry_t entry)
 1040 {
 1041         vm_map_entry_t next, prev, root;
 1042 
 1043         VM_MAP_ASSERT_LOCKED(map);
 1044         if (entry != map->root)
 1045                 vm_map_entry_splay(entry->start, map->root);
 1046         if (entry->left == NULL)
 1047                 root = entry->right;
 1048         else {
 1049                 root = vm_map_entry_splay(entry->start, entry->left);
 1050                 root->right = entry->right;
 1051                 root->adj_free = entry->next->start - root->end;
 1052                 vm_map_entry_set_max_free(root);
 1053         }
 1054         map->root = root;
 1055 
 1056         prev = entry->prev;
 1057         next = entry->next;
 1058         next->prev = prev;
 1059         prev->next = next;
 1060         map->nentries--;
 1061         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
 1062             map->nentries, entry);
 1063 }
 1064 
 1065 /*
 1066  *      vm_map_entry_resize_free:
 1067  *
 1068  *      Recompute the amount of free space following a vm_map_entry
 1069  *      and propagate that value up the tree.  Call this function after
 1070  *      resizing a map entry in-place, that is, without a call to
 1071  *      vm_map_entry_link() or _unlink().
 1072  *
 1073  *      The map must be locked, and leaves it so.
 1074  */
 1075 static void
 1076 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
 1077 {
 1078 
 1079         /*
 1080          * Using splay trees without parent pointers, propagating
 1081          * max_free up the tree is done by moving the entry to the
 1082          * root and making the change there.
 1083          */
 1084         if (entry != map->root)
 1085                 map->root = vm_map_entry_splay(entry->start, map->root);
 1086 
 1087         entry->adj_free = entry->next->start - entry->end;
 1088         vm_map_entry_set_max_free(entry);
 1089 }
 1090 
 1091 /*
 1092  *      vm_map_lookup_entry:    [ internal use only ]
 1093  *
 1094  *      Finds the map entry containing (or
 1095  *      immediately preceding) the specified address
 1096  *      in the given map; the entry is returned
 1097  *      in the "entry" parameter.  The boolean
 1098  *      result indicates whether the address is
 1099  *      actually contained in the map.
 1100  */
 1101 boolean_t
 1102 vm_map_lookup_entry(
 1103         vm_map_t map,
 1104         vm_offset_t address,
 1105         vm_map_entry_t *entry)  /* OUT */
 1106 {
 1107         vm_map_entry_t cur;
 1108         boolean_t locked;
 1109 
 1110         /*
 1111          * If the map is empty, then the map entry immediately preceding
 1112          * "address" is the map's header.
 1113          */
 1114         cur = map->root;
 1115         if (cur == NULL)
 1116                 *entry = &map->header;
 1117         else if (address >= cur->start && cur->end > address) {
 1118                 *entry = cur;
 1119                 return (TRUE);
 1120         } else if ((locked = vm_map_locked(map)) ||
 1121             sx_try_upgrade(&map->lock)) {
 1122                 /*
 1123                  * Splay requires a write lock on the map.  However, it only
 1124                  * restructures the binary search tree; it does not otherwise
 1125                  * change the map.  Thus, the map's timestamp need not change
 1126                  * on a temporary upgrade.
 1127                  */
 1128                 map->root = cur = vm_map_entry_splay(address, cur);
 1129                 if (!locked)
 1130                         sx_downgrade(&map->lock);
 1131 
 1132                 /*
 1133                  * If "address" is contained within a map entry, the new root
 1134                  * is that map entry.  Otherwise, the new root is a map entry
 1135                  * immediately before or after "address".
 1136                  */
 1137                 if (address >= cur->start) {
 1138                         *entry = cur;
 1139                         if (cur->end > address)
 1140                                 return (TRUE);
 1141                 } else
 1142                         *entry = cur->prev;
 1143         } else
 1144                 /*
 1145                  * Since the map is only locked for read access, perform a
 1146                  * standard binary search tree lookup for "address".
 1147                  */
 1148                 for (;;) {
 1149                         if (address < cur->start) {
 1150                                 if (cur->left == NULL) {
 1151                                         *entry = cur->prev;
 1152                                         break;
 1153                                 }
 1154                                 cur = cur->left;
 1155                         } else if (cur->end > address) {
 1156                                 *entry = cur;
 1157                                 return (TRUE);
 1158                         } else {
 1159                                 if (cur->right == NULL) {
 1160                                         *entry = cur;
 1161                                         break;
 1162                                 }
 1163                                 cur = cur->right;
 1164                         }
 1165                 }
 1166         return (FALSE);
 1167 }
 1168 
 1169 /*
 1170  *      vm_map_insert:
 1171  *
 1172  *      Inserts the given whole VM object into the target
 1173  *      map at the specified address range.  The object's
 1174  *      size should match that of the address range.
 1175  *
 1176  *      Requires that the map be locked, and leaves it so.
 1177  *
 1178  *      If object is non-NULL, ref count must be bumped by caller
 1179  *      prior to making call to account for the new entry.
 1180  */
 1181 int
 1182 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1183     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
 1184 {
 1185         vm_map_entry_t new_entry, prev_entry, temp_entry;
 1186         struct ucred *cred;
 1187         vm_eflags_t protoeflags;
 1188         vm_inherit_t inheritance;
 1189 
 1190         VM_MAP_ASSERT_LOCKED(map);
 1191         KASSERT(object != kernel_object ||
 1192             (cow & MAP_COPY_ON_WRITE) == 0,
 1193             ("vm_map_insert: kernel object and COW"));
 1194         KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
 1195             ("vm_map_insert: paradoxical MAP_NOFAULT request"));
 1196         KASSERT((prot & ~max) == 0,
 1197             ("prot %#x is not subset of max_prot %#x", prot, max));
 1198 
 1199         /*
 1200          * Check that the start and end points are not bogus.
 1201          */
 1202         if (start < vm_map_min(map) || end > vm_map_max(map) ||
 1203             start >= end)
 1204                 return (KERN_INVALID_ADDRESS);
 1205 
 1206         /*
 1207          * Find the entry prior to the proposed starting address; if it's part
 1208          * of an existing entry, this range is bogus.
 1209          */
 1210         if (vm_map_lookup_entry(map, start, &temp_entry))
 1211                 return (KERN_NO_SPACE);
 1212 
 1213         prev_entry = temp_entry;
 1214 
 1215         /*
 1216          * Assert that the next entry doesn't overlap the end point.
 1217          */
 1218         if (prev_entry->next->start < end)
 1219                 return (KERN_NO_SPACE);
 1220 
 1221         if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
 1222             max != VM_PROT_NONE))
 1223                 return (KERN_INVALID_ARGUMENT);
 1224 
 1225         protoeflags = 0;
 1226         if (cow & MAP_COPY_ON_WRITE)
 1227                 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
 1228         if (cow & MAP_NOFAULT)
 1229                 protoeflags |= MAP_ENTRY_NOFAULT;
 1230         if (cow & MAP_DISABLE_SYNCER)
 1231                 protoeflags |= MAP_ENTRY_NOSYNC;
 1232         if (cow & MAP_DISABLE_COREDUMP)
 1233                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
 1234         if (cow & MAP_STACK_GROWS_DOWN)
 1235                 protoeflags |= MAP_ENTRY_GROWS_DOWN;
 1236         if (cow & MAP_STACK_GROWS_UP)
 1237                 protoeflags |= MAP_ENTRY_GROWS_UP;
 1238         if (cow & MAP_VN_WRITECOUNT)
 1239                 protoeflags |= MAP_ENTRY_VN_WRITECNT;
 1240         if ((cow & MAP_CREATE_GUARD) != 0)
 1241                 protoeflags |= MAP_ENTRY_GUARD;
 1242         if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
 1243                 protoeflags |= MAP_ENTRY_STACK_GAP_DN;
 1244         if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
 1245                 protoeflags |= MAP_ENTRY_STACK_GAP_UP;
 1246         if (cow & MAP_INHERIT_SHARE)
 1247                 inheritance = VM_INHERIT_SHARE;
 1248         else
 1249                 inheritance = VM_INHERIT_DEFAULT;
 1250 
 1251         cred = NULL;
 1252         if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
 1253                 goto charged;
 1254         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
 1255             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
 1256                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
 1257                         return (KERN_RESOURCE_SHORTAGE);
 1258                 KASSERT(object == NULL ||
 1259                     (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
 1260                     object->cred == NULL,
 1261                     ("overcommit: vm_map_insert o %p", object));
 1262                 cred = curthread->td_ucred;
 1263         }
 1264 
 1265 charged:
 1266         /* Expand the kernel pmap, if necessary. */
 1267         if (map == kernel_map && end > kernel_vm_end)
 1268                 pmap_growkernel(end);
 1269         if (object != NULL) {
 1270                 /*
 1271                  * OBJ_ONEMAPPING must be cleared unless this mapping
 1272                  * is trivially proven to be the only mapping for any
 1273                  * of the object's pages.  (Object granularity
 1274                  * reference counting is insufficient to recognize
 1275                  * aliases with precision.)
 1276                  */
 1277                 VM_OBJECT_WLOCK(object);
 1278                 if (object->ref_count > 1 || object->shadow_count != 0)
 1279                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
 1280                 VM_OBJECT_WUNLOCK(object);
 1281         } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
 1282             protoeflags &&
 1283             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
 1284             prev_entry->end == start && (prev_entry->cred == cred ||
 1285             (prev_entry->object.vm_object != NULL &&
 1286             prev_entry->object.vm_object->cred == cred)) &&
 1287             vm_object_coalesce(prev_entry->object.vm_object,
 1288             prev_entry->offset,
 1289             (vm_size_t)(prev_entry->end - prev_entry->start),
 1290             (vm_size_t)(end - prev_entry->end), cred != NULL &&
 1291             (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
 1292                 /*
 1293                  * We were able to extend the object.  Determine if we
 1294                  * can extend the previous map entry to include the
 1295                  * new range as well.
 1296                  */
 1297                 if (prev_entry->inheritance == inheritance &&
 1298                     prev_entry->protection == prot &&
 1299                     prev_entry->max_protection == max &&
 1300                     prev_entry->wired_count == 0) {
 1301                         KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
 1302                             0, ("prev_entry %p has incoherent wiring",
 1303                             prev_entry));
 1304                         if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
 1305                                 map->size += end - prev_entry->end;
 1306                         prev_entry->end = end;
 1307                         vm_map_entry_resize_free(map, prev_entry);
 1308                         vm_map_simplify_entry(map, prev_entry);
 1309                         return (KERN_SUCCESS);
 1310                 }
 1311 
 1312                 /*
 1313                  * If we can extend the object but cannot extend the
 1314                  * map entry, we have to create a new map entry.  We
 1315                  * must bump the ref count on the extended object to
 1316                  * account for it.  object may be NULL.
 1317                  */
 1318                 object = prev_entry->object.vm_object;
 1319                 offset = prev_entry->offset +
 1320                     (prev_entry->end - prev_entry->start);
 1321                 vm_object_reference(object);
 1322                 if (cred != NULL && object != NULL && object->cred != NULL &&
 1323                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
 1324                         /* Object already accounts for this uid. */
 1325                         cred = NULL;
 1326                 }
 1327         }
 1328         if (cred != NULL)
 1329                 crhold(cred);
 1330 
 1331         /*
 1332          * Create a new entry
 1333          */
 1334         new_entry = vm_map_entry_create(map);
 1335         new_entry->start = start;
 1336         new_entry->end = end;
 1337         new_entry->cred = NULL;
 1338 
 1339         new_entry->eflags = protoeflags;
 1340         new_entry->object.vm_object = object;
 1341         new_entry->offset = offset;
 1342 
 1343         new_entry->inheritance = inheritance;
 1344         new_entry->protection = prot;
 1345         new_entry->max_protection = max;
 1346         new_entry->wired_count = 0;
 1347         new_entry->wiring_thread = NULL;
 1348         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
 1349         new_entry->next_read = start;
 1350 
 1351         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
 1352             ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
 1353         new_entry->cred = cred;
 1354 
 1355         /*
 1356          * Insert the new entry into the list
 1357          */
 1358         vm_map_entry_link(map, prev_entry, new_entry);
 1359         if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
 1360                 map->size += new_entry->end - new_entry->start;
 1361 
 1362         /*
 1363          * Try to coalesce the new entry with both the previous and next
 1364          * entries in the list.  Previously, we only attempted to coalesce
 1365          * with the previous entry when object is NULL.  Here, we handle the
 1366          * other cases, which are less common.
 1367          */
 1368         vm_map_simplify_entry(map, new_entry);
 1369 
 1370         if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
 1371                 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
 1372                     end - start, cow & MAP_PREFAULT_PARTIAL);
 1373         }
 1374 
 1375         return (KERN_SUCCESS);
 1376 }
 1377 
 1378 /*
 1379  *      vm_map_findspace:
 1380  *
 1381  *      Find the first fit (lowest VM address) for "length" free bytes
 1382  *      beginning at address >= start in the given map.
 1383  *
 1384  *      In a vm_map_entry, "adj_free" is the amount of free space
 1385  *      adjacent (higher address) to this entry, and "max_free" is the
 1386  *      maximum amount of contiguous free space in its subtree.  This
 1387  *      allows finding a free region in one path down the tree, so
 1388  *      O(log n) amortized with splay trees.
 1389  *
 1390  *      The map must be locked, and leaves it so.
 1391  *
 1392  *      Returns: 0 on success, and starting address in *addr,
 1393  *               1 if insufficient space.
 1394  */
 1395 int
 1396 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
 1397     vm_offset_t *addr)  /* OUT */
 1398 {
 1399         vm_map_entry_t entry;
 1400         vm_offset_t st;
 1401 
 1402         /*
 1403          * Request must fit within min/max VM address and must avoid
 1404          * address wrap.
 1405          */
 1406         start = MAX(start, vm_map_min(map));
 1407         if (start + length > vm_map_max(map) || start + length < start)
 1408                 return (1);
 1409 
 1410         /* Empty tree means wide open address space. */
 1411         if (map->root == NULL) {
 1412                 *addr = start;
 1413                 return (0);
 1414         }
 1415 
 1416         /*
 1417          * After splay, if start comes before root node, then there
 1418          * must be a gap from start to the root.
 1419          */
 1420         map->root = vm_map_entry_splay(start, map->root);
 1421         if (start + length <= map->root->start) {
 1422                 *addr = start;
 1423                 return (0);
 1424         }
 1425 
 1426         /*
 1427          * Root is the last node that might begin its gap before
 1428          * start, and this is the last comparison where address
 1429          * wrap might be a problem.
 1430          */
 1431         st = (start > map->root->end) ? start : map->root->end;
 1432         if (length <= map->root->end + map->root->adj_free - st) {
 1433                 *addr = st;
 1434                 return (0);
 1435         }
 1436 
 1437         /* With max_free, can immediately tell if no solution. */
 1438         entry = map->root->right;
 1439         if (entry == NULL || length > entry->max_free)
 1440                 return (1);
 1441 
 1442         /*
 1443          * Search the right subtree in the order: left subtree, root,
 1444          * right subtree (first fit).  The previous splay implies that
 1445          * all regions in the right subtree have addresses > start.
 1446          */
 1447         while (entry != NULL) {
 1448                 if (entry->left != NULL && entry->left->max_free >= length)
 1449                         entry = entry->left;
 1450                 else if (entry->adj_free >= length) {
 1451                         *addr = entry->end;
 1452                         return (0);
 1453                 } else
 1454                         entry = entry->right;
 1455         }
 1456 
 1457         /* Can't get here, so panic if we do. */
 1458         panic("vm_map_findspace: max_free corrupt");
 1459 }
 1460 
 1461 int
 1462 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1463     vm_offset_t start, vm_size_t length, vm_prot_t prot,
 1464     vm_prot_t max, int cow)
 1465 {
 1466         vm_offset_t end;
 1467         int result;
 1468 
 1469         end = start + length;
 1470         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
 1471             object == NULL,
 1472             ("vm_map_fixed: non-NULL backing object for stack"));
 1473         vm_map_lock(map);
 1474         VM_MAP_RANGE_CHECK(map, start, end);
 1475         if ((cow & MAP_CHECK_EXCL) == 0)
 1476                 vm_map_delete(map, start, end);
 1477         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
 1478                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
 1479                     prot, max, cow);
 1480         } else {
 1481                 result = vm_map_insert(map, object, offset, start, end,
 1482                     prot, max, cow);
 1483         }
 1484         vm_map_unlock(map);
 1485         return (result);
 1486 }
 1487 
 1488 /*
 1489  * Searches for the specified amount of free space in the given map with the
 1490  * specified alignment.  Performs an address-ordered, first-fit search from
 1491  * the given address "*addr", with an optional upper bound "max_addr".  If the
 1492  * parameter "alignment" is zero, then the alignment is computed from the
 1493  * given (object, offset) pair so as to enable the greatest possible use of
 1494  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
 1495  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
 1496  *
 1497  * The map must be locked.  Initially, there must be at least "length" bytes
 1498  * of free space at the given address.
 1499  */
 1500 static int
 1501 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1502     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
 1503     vm_offset_t alignment)
 1504 {
 1505         vm_offset_t aligned_addr, free_addr;
 1506 
 1507         VM_MAP_ASSERT_LOCKED(map);
 1508         free_addr = *addr;
 1509         KASSERT(!vm_map_findspace(map, free_addr, length, addr) &&
 1510             free_addr == *addr, ("caller provided insufficient free space"));
 1511         for (;;) {
 1512                 /*
 1513                  * At the start of every iteration, the free space at address
 1514                  * "*addr" is at least "length" bytes.
 1515                  */
 1516                 if (alignment == 0)
 1517                         pmap_align_superpage(object, offset, addr, length);
 1518                 else if ((*addr & (alignment - 1)) != 0) {
 1519                         *addr &= ~(alignment - 1);
 1520                         *addr += alignment;
 1521                 }
 1522                 aligned_addr = *addr;
 1523                 if (aligned_addr == free_addr) {
 1524                         /*
 1525                          * Alignment did not change "*addr", so "*addr" must
 1526                          * still provide sufficient free space.
 1527                          */
 1528                         return (KERN_SUCCESS);
 1529                 }
 1530 
 1531                 /*
 1532                  * Test for address wrap on "*addr".  A wrapped "*addr" could
 1533                  * be a valid address, in which case vm_map_findspace() cannot
 1534                  * be relied upon to fail.
 1535                  */
 1536                 if (aligned_addr < free_addr ||
 1537                     vm_map_findspace(map, aligned_addr, length, addr) ||
 1538                     (max_addr != 0 && *addr + length > max_addr))
 1539                         return (KERN_NO_SPACE);
 1540                 free_addr = *addr;
 1541                 if (free_addr == aligned_addr) {
 1542                         /*
 1543                          * If a successful call to vm_map_findspace() did not
 1544                          * change "*addr", then "*addr" must still be aligned
 1545                          * and provide sufficient free space.
 1546                          */
 1547                         return (KERN_SUCCESS);
 1548                 }
 1549         }
 1550 }
 1551 
 1552 /*
 1553  *      vm_map_find finds an unallocated region in the target address
 1554  *      map with the given length.  The search is defined to be
 1555  *      first-fit from the specified address; the region found is
 1556  *      returned in the same parameter.
 1557  *
 1558  *      If object is non-NULL, ref count must be bumped by caller
 1559  *      prior to making call to account for the new entry.
 1560  */
 1561 int
 1562 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1563             vm_offset_t *addr,  /* IN/OUT */
 1564             vm_size_t length, vm_offset_t max_addr, int find_space,
 1565             vm_prot_t prot, vm_prot_t max, int cow)
 1566 {
 1567         vm_offset_t alignment, min_addr;
 1568         int rv;
 1569 
 1570         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
 1571             object == NULL,
 1572             ("vm_map_find: non-NULL backing object for stack"));
 1573         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
 1574             (object->flags & OBJ_COLORED) == 0))
 1575                 find_space = VMFS_ANY_SPACE;
 1576         if (find_space >> 8 != 0) {
 1577                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
 1578                 alignment = (vm_offset_t)1 << (find_space >> 8);
 1579         } else
 1580                 alignment = 0;
 1581         vm_map_lock(map);
 1582         if (find_space != VMFS_NO_SPACE) {
 1583                 KASSERT(find_space == VMFS_ANY_SPACE ||
 1584                     find_space == VMFS_OPTIMAL_SPACE ||
 1585                     find_space == VMFS_SUPER_SPACE ||
 1586                     alignment != 0, ("unexpected VMFS flag"));
 1587                 min_addr = *addr;
 1588 again:
 1589                 if (vm_map_findspace(map, min_addr, length, addr) ||
 1590                     (max_addr != 0 && *addr + length > max_addr)) {
 1591                         rv = KERN_NO_SPACE;
 1592                         goto done;
 1593                 }
 1594                 if (find_space != VMFS_ANY_SPACE &&
 1595                     (rv = vm_map_alignspace(map, object, offset, addr, length,
 1596                     max_addr, alignment)) != KERN_SUCCESS) {
 1597                         if (find_space == VMFS_OPTIMAL_SPACE) {
 1598                                 find_space = VMFS_ANY_SPACE;
 1599                                 goto again;
 1600                         }
 1601                         goto done;
 1602                 }
 1603         }
 1604         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
 1605                 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
 1606                     max, cow);
 1607         } else {
 1608                 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
 1609                     prot, max, cow);
 1610         }
 1611 done:
 1612         vm_map_unlock(map);
 1613         return (rv);
 1614 }
 1615 
 1616 /*
 1617  *      vm_map_find_min() is a variant of vm_map_find() that takes an
 1618  *      additional parameter (min_addr) and treats the given address
 1619  *      (*addr) differently.  Specifically, it treats *addr as a hint
 1620  *      and not as the minimum address where the mapping is created.
 1621  *
 1622  *      This function works in two phases.  First, it tries to
 1623  *      allocate above the hint.  If that fails and the hint is
 1624  *      greater than min_addr, it performs a second pass, replacing
 1625  *      the hint with min_addr as the minimum address for the
 1626  *      allocation.
 1627  */
 1628 int
 1629 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1630     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
 1631     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
 1632     int cow)
 1633 {
 1634         vm_offset_t hint;
 1635         int rv;
 1636 
 1637         hint = *addr;
 1638         for (;;) {
 1639                 rv = vm_map_find(map, object, offset, addr, length, max_addr,
 1640                     find_space, prot, max, cow);
 1641                 if (rv == KERN_SUCCESS || min_addr >= hint)
 1642                         return (rv);
 1643                 *addr = hint = min_addr;
 1644         }
 1645 }
 1646 
 1647 static bool
 1648 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
 1649 {
 1650         vm_size_t prevsize;
 1651 
 1652         prevsize = prev->end - prev->start;
 1653         return (prev->end == entry->start &&
 1654             prev->object.vm_object == entry->object.vm_object &&
 1655             (prev->object.vm_object == NULL ||
 1656             prev->offset + prevsize == entry->offset) &&
 1657             prev->eflags == entry->eflags &&
 1658             prev->protection == entry->protection &&
 1659             prev->max_protection == entry->max_protection &&
 1660             prev->inheritance == entry->inheritance &&
 1661             prev->wired_count == entry->wired_count &&
 1662             prev->cred == entry->cred);
 1663 }
 1664 
 1665 static void
 1666 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
 1667 {
 1668 
 1669         /*
 1670          * If the backing object is a vnode object,
 1671          * vm_object_deallocate() calls vrele().
 1672          * However, vrele() does not lock the vnode
 1673          * because the vnode has additional
 1674          * references.  Thus, the map lock can be kept
 1675          * without causing a lock-order reversal with
 1676          * the vnode lock.
 1677          *
 1678          * Since we count the number of virtual page
 1679          * mappings in object->un_pager.vnp.writemappings,
 1680          * the writemappings value should not be adjusted
 1681          * when the entry is disposed of.
 1682          */
 1683         if (entry->object.vm_object != NULL)
 1684                 vm_object_deallocate(entry->object.vm_object);
 1685         if (entry->cred != NULL)
 1686                 crfree(entry->cred);
 1687         vm_map_entry_dispose(map, entry);
 1688 }
 1689 
 1690 /*
 1691  *      vm_map_simplify_entry:
 1692  *
 1693  *      Simplify the given map entry by merging with either neighbor.  This
 1694  *      routine also has the ability to merge with both neighbors.
 1695  *
 1696  *      The map must be locked.
 1697  *
 1698  *      This routine guarantees that the passed entry remains valid (though
 1699  *      possibly extended).  When merging, this routine may delete one or
 1700  *      both neighbors.
 1701  */
 1702 void
 1703 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
 1704 {
 1705         vm_map_entry_t next, prev;
 1706 
 1707         if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP |
 1708             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0)
 1709                 return;
 1710 
 1711         prev = entry->prev;
 1712         if (vm_map_mergeable_neighbors(prev, entry)) {
 1713                 vm_map_entry_unlink(map, prev);
 1714                 entry->start = prev->start;
 1715                 entry->offset = prev->offset;
 1716                 if (entry->prev != &map->header)
 1717                         vm_map_entry_resize_free(map, entry->prev);
 1718                 vm_map_merged_neighbor_dispose(map, prev);
 1719         }
 1720 
 1721         next = entry->next;
 1722         if (vm_map_mergeable_neighbors(entry, next)) {
 1723                 vm_map_entry_unlink(map, next);
 1724                 entry->end = next->end;
 1725                 vm_map_entry_resize_free(map, entry);
 1726                 vm_map_merged_neighbor_dispose(map, next);
 1727         }
 1728 }
 1729 /*
 1730  *      vm_map_clip_start:      [ internal use only ]
 1731  *
 1732  *      Asserts that the given entry begins at or after
 1733  *      the specified address; if necessary,
 1734  *      it splits the entry into two.
 1735  */
 1736 #define vm_map_clip_start(map, entry, startaddr) \
 1737 { \
 1738         if (startaddr > entry->start) \
 1739                 _vm_map_clip_start(map, entry, startaddr); \
 1740 }
 1741 
 1742 /*
 1743  *      This routine is called only when it is known that
 1744  *      the entry must be split.
 1745  */
 1746 static void
 1747 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
 1748 {
 1749         vm_map_entry_t new_entry;
 1750 
 1751         VM_MAP_ASSERT_LOCKED(map);
 1752         KASSERT(entry->end > start && entry->start < start,
 1753             ("_vm_map_clip_start: invalid clip of entry %p", entry));
 1754 
 1755         /*
 1756          * Split off the front portion -- note that we must insert the new
 1757          * entry BEFORE this one, so that this entry has the specified
 1758          * starting address.
 1759          */
 1760         vm_map_simplify_entry(map, entry);
 1761 
 1762         /*
 1763          * If there is no object backing this entry, we might as well create
 1764          * one now.  If we defer it, an object can get created after the map
 1765          * is clipped, and individual objects will be created for the split-up
 1766          * map.  This is a bit of a hack, but is also about the best place to
 1767          * put this improvement.
 1768          */
 1769         if (entry->object.vm_object == NULL && !map->system_map &&
 1770             (entry->eflags & MAP_ENTRY_GUARD) == 0) {
 1771                 vm_object_t object;
 1772                 object = vm_object_allocate(OBJT_DEFAULT,
 1773                                 atop(entry->end - entry->start));
 1774                 entry->object.vm_object = object;
 1775                 entry->offset = 0;
 1776                 if (entry->cred != NULL) {
 1777                         object->cred = entry->cred;
 1778                         object->charge = entry->end - entry->start;
 1779                         entry->cred = NULL;
 1780                 }
 1781         } else if (entry->object.vm_object != NULL &&
 1782                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
 1783                    entry->cred != NULL) {
 1784                 VM_OBJECT_WLOCK(entry->object.vm_object);
 1785                 KASSERT(entry->object.vm_object->cred == NULL,
 1786                     ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
 1787                 entry->object.vm_object->cred = entry->cred;
 1788                 entry->object.vm_object->charge = entry->end - entry->start;
 1789                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
 1790                 entry->cred = NULL;
 1791         }
 1792 
 1793         new_entry = vm_map_entry_create(map);
 1794         *new_entry = *entry;
 1795 
 1796         new_entry->end = start;
 1797         entry->offset += (start - entry->start);
 1798         entry->start = start;
 1799         if (new_entry->cred != NULL)
 1800                 crhold(entry->cred);
 1801 
 1802         vm_map_entry_link(map, entry->prev, new_entry);
 1803 
 1804         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1805                 vm_object_reference(new_entry->object.vm_object);
 1806                 /*
 1807                  * The object->un_pager.vnp.writemappings for the
 1808                  * object of MAP_ENTRY_VN_WRITECNT type entry shall be
 1809                  * kept as is here.  The virtual pages are
 1810                  * re-distributed among the clipped entries, so the sum is
 1811                  * left the same.
 1812                  */
 1813         }
 1814 }
 1815 
 1816 /*
 1817  *      vm_map_clip_end:        [ internal use only ]
 1818  *
 1819  *      Asserts that the given entry ends at or before
 1820  *      the specified address; if necessary,
 1821  *      it splits the entry into two.
 1822  */
 1823 #define vm_map_clip_end(map, entry, endaddr) \
 1824 { \
 1825         if ((endaddr) < (entry->end)) \
 1826                 _vm_map_clip_end((map), (entry), (endaddr)); \
 1827 }
 1828 
 1829 /*
 1830  *      This routine is called only when it is known that
 1831  *      the entry must be split.
 1832  */
 1833 static void
 1834 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
 1835 {
 1836         vm_map_entry_t new_entry;
 1837 
 1838         VM_MAP_ASSERT_LOCKED(map);
 1839         KASSERT(entry->start < end && entry->end > end,
 1840             ("_vm_map_clip_end: invalid clip of entry %p", entry));
 1841 
 1842         /*
 1843          * If there is no object backing this entry, we might as well create
 1844          * one now.  If we defer it, an object can get created after the map
 1845          * is clipped, and individual objects will be created for the split-up
 1846          * map.  This is a bit of a hack, but is also about the best place to
 1847          * put this improvement.
 1848          */
 1849         if (entry->object.vm_object == NULL && !map->system_map &&
 1850             (entry->eflags & MAP_ENTRY_GUARD) == 0) {
 1851                 vm_object_t object;
 1852                 object = vm_object_allocate(OBJT_DEFAULT,
 1853                                 atop(entry->end - entry->start));
 1854                 entry->object.vm_object = object;
 1855                 entry->offset = 0;
 1856                 if (entry->cred != NULL) {
 1857                         object->cred = entry->cred;
 1858                         object->charge = entry->end - entry->start;
 1859                         entry->cred = NULL;
 1860                 }
 1861         } else if (entry->object.vm_object != NULL &&
 1862                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
 1863                    entry->cred != NULL) {
 1864                 VM_OBJECT_WLOCK(entry->object.vm_object);
 1865                 KASSERT(entry->object.vm_object->cred == NULL,
 1866                     ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
 1867                 entry->object.vm_object->cred = entry->cred;
 1868                 entry->object.vm_object->charge = entry->end - entry->start;
 1869                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
 1870                 entry->cred = NULL;
 1871         }
 1872 
 1873         /*
 1874          * Create a new entry and insert it AFTER the specified entry
 1875          */
 1876         new_entry = vm_map_entry_create(map);
 1877         *new_entry = *entry;
 1878 
 1879         new_entry->start = entry->end = end;
 1880         new_entry->offset += (end - entry->start);
 1881         if (new_entry->cred != NULL)
 1882                 crhold(entry->cred);
 1883 
 1884         vm_map_entry_link(map, entry, new_entry);
 1885 
 1886         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1887                 vm_object_reference(new_entry->object.vm_object);
 1888         }
 1889 }
 1890 
 1891 /*
 1892  *      vm_map_submap:          [ kernel use only ]
 1893  *
 1894  *      Mark the given range as handled by a subordinate map.
 1895  *
 1896  *      This range must have been created with vm_map_find,
 1897  *      and no other operations may have been performed on this
 1898  *      range prior to calling vm_map_submap.
 1899  *
 1900  *      Only a limited number of operations can be performed
 1901  *      within this rage after calling vm_map_submap:
 1902  *              vm_fault
 1903  *      [Don't try vm_map_copy!]
 1904  *
 1905  *      To remove a submapping, one must first remove the
 1906  *      range from the superior map, and then destroy the
 1907  *      submap (if desired).  [Better yet, don't try it.]
 1908  */
 1909 int
 1910 vm_map_submap(
 1911         vm_map_t map,
 1912         vm_offset_t start,
 1913         vm_offset_t end,
 1914         vm_map_t submap)
 1915 {
 1916         vm_map_entry_t entry;
 1917         int result = KERN_INVALID_ARGUMENT;
 1918 
 1919         vm_map_lock(map);
 1920 
 1921         VM_MAP_RANGE_CHECK(map, start, end);
 1922 
 1923         if (vm_map_lookup_entry(map, start, &entry)) {
 1924                 vm_map_clip_start(map, entry, start);
 1925         } else
 1926                 entry = entry->next;
 1927 
 1928         vm_map_clip_end(map, entry, end);
 1929 
 1930         if ((entry->start == start) && (entry->end == end) &&
 1931             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
 1932             (entry->object.vm_object == NULL)) {
 1933                 entry->object.sub_map = submap;
 1934                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
 1935                 result = KERN_SUCCESS;
 1936         }
 1937         vm_map_unlock(map);
 1938 
 1939         return (result);
 1940 }
 1941 
 1942 /*
 1943  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
 1944  */
 1945 #define MAX_INIT_PT     96
 1946 
 1947 /*
 1948  *      vm_map_pmap_enter:
 1949  *
 1950  *      Preload the specified map's pmap with mappings to the specified
 1951  *      object's memory-resident pages.  No further physical pages are
 1952  *      allocated, and no further virtual pages are retrieved from secondary
 1953  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
 1954  *      limited number of page mappings are created at the low-end of the
 1955  *      specified address range.  (For this purpose, a superpage mapping
 1956  *      counts as one page mapping.)  Otherwise, all resident pages within
 1957  *      the specified address range are mapped.
 1958  */
 1959 static void
 1960 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
 1961     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
 1962 {
 1963         vm_offset_t start;
 1964         vm_page_t p, p_start;
 1965         vm_pindex_t mask, psize, threshold, tmpidx;
 1966 
 1967         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
 1968                 return;
 1969         VM_OBJECT_RLOCK(object);
 1970         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
 1971                 VM_OBJECT_RUNLOCK(object);
 1972                 VM_OBJECT_WLOCK(object);
 1973                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
 1974                         pmap_object_init_pt(map->pmap, addr, object, pindex,
 1975                             size);
 1976                         VM_OBJECT_WUNLOCK(object);
 1977                         return;
 1978                 }
 1979                 VM_OBJECT_LOCK_DOWNGRADE(object);
 1980         }
 1981 
 1982         psize = atop(size);
 1983         if (psize + pindex > object->size) {
 1984                 if (object->size < pindex) {
 1985                         VM_OBJECT_RUNLOCK(object);
 1986                         return;
 1987                 }
 1988                 psize = object->size - pindex;
 1989         }
 1990 
 1991         start = 0;
 1992         p_start = NULL;
 1993         threshold = MAX_INIT_PT;
 1994 
 1995         p = vm_page_find_least(object, pindex);
 1996         /*
 1997          * Assert: the variable p is either (1) the page with the
 1998          * least pindex greater than or equal to the parameter pindex
 1999          * or (2) NULL.
 2000          */
 2001         for (;
 2002              p != NULL && (tmpidx = p->pindex - pindex) < psize;
 2003              p = TAILQ_NEXT(p, listq)) {
 2004                 /*
 2005                  * don't allow an madvise to blow away our really
 2006                  * free pages allocating pv entries.
 2007                  */
 2008                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
 2009                     vm_page_count_severe()) ||
 2010                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
 2011                     tmpidx >= threshold)) {
 2012                         psize = tmpidx;
 2013                         break;
 2014                 }
 2015                 if (p->valid == VM_PAGE_BITS_ALL) {
 2016                         if (p_start == NULL) {
 2017                                 start = addr + ptoa(tmpidx);
 2018                                 p_start = p;
 2019                         }
 2020                         /* Jump ahead if a superpage mapping is possible. */
 2021                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
 2022                             (pagesizes[p->psind] - 1)) == 0) {
 2023                                 mask = atop(pagesizes[p->psind]) - 1;
 2024                                 if (tmpidx + mask < psize &&
 2025                                     vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
 2026                                         p += mask;
 2027                                         threshold += mask;
 2028                                 }
 2029                         }
 2030                 } else if (p_start != NULL) {
 2031                         pmap_enter_object(map->pmap, start, addr +
 2032                             ptoa(tmpidx), p_start, prot);
 2033                         p_start = NULL;
 2034                 }
 2035         }
 2036         if (p_start != NULL)
 2037                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
 2038                     p_start, prot);
 2039         VM_OBJECT_RUNLOCK(object);
 2040 }
 2041 
 2042 /*
 2043  *      vm_map_protect:
 2044  *
 2045  *      Sets the protection of the specified address
 2046  *      region in the target map.  If "set_max" is
 2047  *      specified, the maximum protection is to be set;
 2048  *      otherwise, only the current protection is affected.
 2049  */
 2050 int
 2051 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2052                vm_prot_t new_prot, boolean_t set_max)
 2053 {
 2054         vm_map_entry_t current, entry;
 2055         vm_object_t obj;
 2056         struct ucred *cred;
 2057         vm_prot_t old_prot;
 2058 
 2059         if (start == end)
 2060                 return (KERN_SUCCESS);
 2061 
 2062         vm_map_lock(map);
 2063 
 2064         /*
 2065          * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
 2066          * need to fault pages into the map and will drop the map lock while
 2067          * doing so, and the VM object may end up in an inconsistent state if we
 2068          * update the protection on the map entry in between faults.
 2069          */
 2070         vm_map_wait_busy(map);
 2071 
 2072         VM_MAP_RANGE_CHECK(map, start, end);
 2073 
 2074         if (vm_map_lookup_entry(map, start, &entry)) {
 2075                 vm_map_clip_start(map, entry, start);
 2076         } else {
 2077                 entry = entry->next;
 2078         }
 2079 
 2080         /*
 2081          * Make a first pass to check for protection violations.
 2082          */
 2083         for (current = entry; current->start < end; current = current->next) {
 2084                 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
 2085                         continue;
 2086                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 2087                         vm_map_unlock(map);
 2088                         return (KERN_INVALID_ARGUMENT);
 2089                 }
 2090                 if ((new_prot & current->max_protection) != new_prot) {
 2091                         vm_map_unlock(map);
 2092                         return (KERN_PROTECTION_FAILURE);
 2093                 }
 2094         }
 2095 
 2096         /*
 2097          * Do an accounting pass for private read-only mappings that
 2098          * now will do cow due to allowed write (e.g. debugger sets
 2099          * breakpoint on text segment)
 2100          */
 2101         for (current = entry; current->start < end; current = current->next) {
 2102 
 2103                 vm_map_clip_end(map, current, end);
 2104 
 2105                 if (set_max ||
 2106                     ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
 2107                     ENTRY_CHARGED(current) ||
 2108                     (current->eflags & MAP_ENTRY_GUARD) != 0) {
 2109                         continue;
 2110                 }
 2111 
 2112                 cred = curthread->td_ucred;
 2113                 obj = current->object.vm_object;
 2114 
 2115                 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
 2116                         if (!swap_reserve(current->end - current->start)) {
 2117                                 vm_map_unlock(map);
 2118                                 return (KERN_RESOURCE_SHORTAGE);
 2119                         }
 2120                         crhold(cred);
 2121                         current->cred = cred;
 2122                         continue;
 2123                 }
 2124 
 2125                 VM_OBJECT_WLOCK(obj);
 2126                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
 2127                         VM_OBJECT_WUNLOCK(obj);
 2128                         continue;
 2129                 }
 2130 
 2131                 /*
 2132                  * Charge for the whole object allocation now, since
 2133                  * we cannot distinguish between non-charged and
 2134                  * charged clipped mapping of the same object later.
 2135                  */
 2136                 KASSERT(obj->charge == 0,
 2137                     ("vm_map_protect: object %p overcharged (entry %p)",
 2138                     obj, current));
 2139                 if (!swap_reserve(ptoa(obj->size))) {
 2140                         VM_OBJECT_WUNLOCK(obj);
 2141                         vm_map_unlock(map);
 2142                         return (KERN_RESOURCE_SHORTAGE);
 2143                 }
 2144 
 2145                 crhold(cred);
 2146                 obj->cred = cred;
 2147                 obj->charge = ptoa(obj->size);
 2148                 VM_OBJECT_WUNLOCK(obj);
 2149         }
 2150 
 2151         /*
 2152          * Go back and fix up protections. [Note that clipping is not
 2153          * necessary the second time.]
 2154          */
 2155         for (current = entry; current->start < end; current = current->next) {
 2156                 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
 2157                         continue;
 2158 
 2159                 old_prot = current->protection;
 2160 
 2161                 if (set_max)
 2162                         current->protection =
 2163                             (current->max_protection = new_prot) &
 2164                             old_prot;
 2165                 else
 2166                         current->protection = new_prot;
 2167 
 2168                 /*
 2169                  * For user wired map entries, the normal lazy evaluation of
 2170                  * write access upgrades through soft page faults is
 2171                  * undesirable.  Instead, immediately copy any pages that are
 2172                  * copy-on-write and enable write access in the physical map.
 2173                  */
 2174                 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
 2175                     (current->protection & VM_PROT_WRITE) != 0 &&
 2176                     (old_prot & VM_PROT_WRITE) == 0)
 2177                         vm_fault_copy_entry(map, map, current, current, NULL);
 2178 
 2179                 /*
 2180                  * When restricting access, update the physical map.  Worry
 2181                  * about copy-on-write here.
 2182                  */
 2183                 if ((old_prot & ~current->protection) != 0) {
 2184 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
 2185                                                         VM_PROT_ALL)
 2186                         pmap_protect(map->pmap, current->start,
 2187                             current->end,
 2188                             current->protection & MASK(current));
 2189 #undef  MASK
 2190                 }
 2191                 vm_map_simplify_entry(map, current);
 2192         }
 2193         vm_map_unlock(map);
 2194         return (KERN_SUCCESS);
 2195 }
 2196 
 2197 /*
 2198  *      vm_map_madvise:
 2199  *
 2200  *      This routine traverses a processes map handling the madvise
 2201  *      system call.  Advisories are classified as either those effecting
 2202  *      the vm_map_entry structure, or those effecting the underlying
 2203  *      objects.
 2204  */
 2205 int
 2206 vm_map_madvise(
 2207         vm_map_t map,
 2208         vm_offset_t start,
 2209         vm_offset_t end,
 2210         int behav)
 2211 {
 2212         vm_map_entry_t current, entry;
 2213         bool modify_map;
 2214 
 2215         /*
 2216          * Some madvise calls directly modify the vm_map_entry, in which case
 2217          * we need to use an exclusive lock on the map and we need to perform
 2218          * various clipping operations.  Otherwise we only need a read-lock
 2219          * on the map.
 2220          */
 2221         switch(behav) {
 2222         case MADV_NORMAL:
 2223         case MADV_SEQUENTIAL:
 2224         case MADV_RANDOM:
 2225         case MADV_NOSYNC:
 2226         case MADV_AUTOSYNC:
 2227         case MADV_NOCORE:
 2228         case MADV_CORE:
 2229                 if (start == end)
 2230                         return (0);
 2231                 modify_map = true;
 2232                 vm_map_lock(map);
 2233                 break;
 2234         case MADV_WILLNEED:
 2235         case MADV_DONTNEED:
 2236         case MADV_FREE:
 2237                 if (start == end)
 2238                         return (0);
 2239                 modify_map = false;
 2240                 vm_map_lock_read(map);
 2241                 break;
 2242         default:
 2243                 return (EINVAL);
 2244         }
 2245 
 2246         /*
 2247          * Locate starting entry and clip if necessary.
 2248          */
 2249         VM_MAP_RANGE_CHECK(map, start, end);
 2250 
 2251         if (vm_map_lookup_entry(map, start, &entry)) {
 2252                 if (modify_map)
 2253                         vm_map_clip_start(map, entry, start);
 2254         } else {
 2255                 entry = entry->next;
 2256         }
 2257 
 2258         if (modify_map) {
 2259                 /*
 2260                  * madvise behaviors that are implemented in the vm_map_entry.
 2261                  *
 2262                  * We clip the vm_map_entry so that behavioral changes are
 2263                  * limited to the specified address range.
 2264                  */
 2265                 for (current = entry; current->start < end;
 2266                     current = current->next) {
 2267                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 2268                                 continue;
 2269 
 2270                         vm_map_clip_end(map, current, end);
 2271 
 2272                         switch (behav) {
 2273                         case MADV_NORMAL:
 2274                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
 2275                                 break;
 2276                         case MADV_SEQUENTIAL:
 2277                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
 2278                                 break;
 2279                         case MADV_RANDOM:
 2280                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
 2281                                 break;
 2282                         case MADV_NOSYNC:
 2283                                 current->eflags |= MAP_ENTRY_NOSYNC;
 2284                                 break;
 2285                         case MADV_AUTOSYNC:
 2286                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
 2287                                 break;
 2288                         case MADV_NOCORE:
 2289                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
 2290                                 break;
 2291                         case MADV_CORE:
 2292                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
 2293                                 break;
 2294                         default:
 2295                                 break;
 2296                         }
 2297                         vm_map_simplify_entry(map, current);
 2298                 }
 2299                 vm_map_unlock(map);
 2300         } else {
 2301                 vm_pindex_t pstart, pend;
 2302 
 2303                 /*
 2304                  * madvise behaviors that are implemented in the underlying
 2305                  * vm_object.
 2306                  *
 2307                  * Since we don't clip the vm_map_entry, we have to clip
 2308                  * the vm_object pindex and count.
 2309                  */
 2310                 for (current = entry; current->start < end;
 2311                     current = current->next) {
 2312                         vm_offset_t useEnd, useStart;
 2313 
 2314                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 2315                                 continue;
 2316 
 2317                         pstart = OFF_TO_IDX(current->offset);
 2318                         pend = pstart + atop(current->end - current->start);
 2319                         useStart = current->start;
 2320                         useEnd = current->end;
 2321 
 2322                         if (current->start < start) {
 2323                                 pstart += atop(start - current->start);
 2324                                 useStart = start;
 2325                         }
 2326                         if (current->end > end) {
 2327                                 pend -= atop(current->end - end);
 2328                                 useEnd = end;
 2329                         }
 2330 
 2331                         if (pstart >= pend)
 2332                                 continue;
 2333 
 2334                         /*
 2335                          * Perform the pmap_advise() before clearing
 2336                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
 2337                          * concurrent pmap operation, such as pmap_remove(),
 2338                          * could clear a reference in the pmap and set
 2339                          * PGA_REFERENCED on the page before the pmap_advise()
 2340                          * had completed.  Consequently, the page would appear
 2341                          * referenced based upon an old reference that
 2342                          * occurred before this pmap_advise() ran.
 2343                          */
 2344                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
 2345                                 pmap_advise(map->pmap, useStart, useEnd,
 2346                                     behav);
 2347 
 2348                         vm_object_madvise(current->object.vm_object, pstart,
 2349                             pend, behav);
 2350 
 2351                         /*
 2352                          * Pre-populate paging structures in the
 2353                          * WILLNEED case.  For wired entries, the
 2354                          * paging structures are already populated.
 2355                          */
 2356                         if (behav == MADV_WILLNEED &&
 2357                             current->wired_count == 0) {
 2358                                 vm_map_pmap_enter(map,
 2359                                     useStart,
 2360                                     current->protection,
 2361                                     current->object.vm_object,
 2362                                     pstart,
 2363                                     ptoa(pend - pstart),
 2364                                     MAP_PREFAULT_MADVISE
 2365                                 );
 2366                         }
 2367                 }
 2368                 vm_map_unlock_read(map);
 2369         }
 2370         return (0);
 2371 }
 2372 
 2373 
 2374 /*
 2375  *      vm_map_inherit:
 2376  *
 2377  *      Sets the inheritance of the specified address
 2378  *      range in the target map.  Inheritance
 2379  *      affects how the map will be shared with
 2380  *      child maps at the time of vmspace_fork.
 2381  */
 2382 int
 2383 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2384                vm_inherit_t new_inheritance)
 2385 {
 2386         vm_map_entry_t entry;
 2387         vm_map_entry_t temp_entry;
 2388 
 2389         switch (new_inheritance) {
 2390         case VM_INHERIT_NONE:
 2391         case VM_INHERIT_COPY:
 2392         case VM_INHERIT_SHARE:
 2393         case VM_INHERIT_ZERO:
 2394                 break;
 2395         default:
 2396                 return (KERN_INVALID_ARGUMENT);
 2397         }
 2398         if (start == end)
 2399                 return (KERN_SUCCESS);
 2400         vm_map_lock(map);
 2401         VM_MAP_RANGE_CHECK(map, start, end);
 2402         if (vm_map_lookup_entry(map, start, &temp_entry)) {
 2403                 entry = temp_entry;
 2404                 vm_map_clip_start(map, entry, start);
 2405         } else
 2406                 entry = temp_entry->next;
 2407         while (entry->start < end) {
 2408                 vm_map_clip_end(map, entry, end);
 2409                 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
 2410                     new_inheritance != VM_INHERIT_ZERO)
 2411                         entry->inheritance = new_inheritance;
 2412                 vm_map_simplify_entry(map, entry);
 2413                 entry = entry->next;
 2414         }
 2415         vm_map_unlock(map);
 2416         return (KERN_SUCCESS);
 2417 }
 2418 
 2419 /*
 2420  *      vm_map_unwire:
 2421  *
 2422  *      Implements both kernel and user unwiring.
 2423  */
 2424 int
 2425 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2426     int flags)
 2427 {
 2428         vm_map_entry_t entry, first_entry, tmp_entry;
 2429         vm_offset_t saved_start;
 2430         unsigned int last_timestamp;
 2431         int rv;
 2432         boolean_t need_wakeup, result, user_unwire;
 2433 
 2434         if (start == end)
 2435                 return (KERN_SUCCESS);
 2436         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
 2437         vm_map_lock(map);
 2438         VM_MAP_RANGE_CHECK(map, start, end);
 2439         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 2440                 if (flags & VM_MAP_WIRE_HOLESOK)
 2441                         first_entry = first_entry->next;
 2442                 else {
 2443                         vm_map_unlock(map);
 2444                         return (KERN_INVALID_ADDRESS);
 2445                 }
 2446         }
 2447         last_timestamp = map->timestamp;
 2448         entry = first_entry;
 2449         while (entry->start < end) {
 2450                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 2451                         /*
 2452                          * We have not yet clipped the entry.
 2453                          */
 2454                         saved_start = (start >= entry->start) ? start :
 2455                             entry->start;
 2456                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 2457                         if (vm_map_unlock_and_wait(map, 0)) {
 2458                                 /*
 2459                                  * Allow interruption of user unwiring?
 2460                                  */
 2461                         }
 2462                         vm_map_lock(map);
 2463                         if (last_timestamp+1 != map->timestamp) {
 2464                                 /*
 2465                                  * Look again for the entry because the map was
 2466                                  * modified while it was unlocked.
 2467                                  * Specifically, the entry may have been
 2468                                  * clipped, merged, or deleted.
 2469                                  */
 2470                                 if (!vm_map_lookup_entry(map, saved_start,
 2471                                     &tmp_entry)) {
 2472                                         if (flags & VM_MAP_WIRE_HOLESOK)
 2473                                                 tmp_entry = tmp_entry->next;
 2474                                         else {
 2475                                                 if (saved_start == start) {
 2476                                                         /*
 2477                                                          * First_entry has been deleted.
 2478                                                          */
 2479                                                         vm_map_unlock(map);
 2480                                                         return (KERN_INVALID_ADDRESS);
 2481                                                 }
 2482                                                 end = saved_start;
 2483                                                 rv = KERN_INVALID_ADDRESS;
 2484                                                 goto done;
 2485                                         }
 2486                                 }
 2487                                 if (entry == first_entry)
 2488                                         first_entry = tmp_entry;
 2489                                 else
 2490                                         first_entry = NULL;
 2491                                 entry = tmp_entry;
 2492                         }
 2493                         last_timestamp = map->timestamp;
 2494                         continue;
 2495                 }
 2496                 vm_map_clip_start(map, entry, start);
 2497                 vm_map_clip_end(map, entry, end);
 2498                 /*
 2499                  * Mark the entry in case the map lock is released.  (See
 2500                  * above.)
 2501                  */
 2502                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
 2503                     entry->wiring_thread == NULL,
 2504                     ("owned map entry %p", entry));
 2505                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 2506                 entry->wiring_thread = curthread;
 2507                 /*
 2508                  * Check the map for holes in the specified region.
 2509                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
 2510                  */
 2511                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
 2512                     (entry->end < end && entry->next->start > entry->end)) {
 2513                         end = entry->end;
 2514                         rv = KERN_INVALID_ADDRESS;
 2515                         goto done;
 2516                 }
 2517                 /*
 2518                  * If system unwiring, require that the entry is system wired.
 2519                  */
 2520                 if (!user_unwire &&
 2521                     vm_map_entry_system_wired_count(entry) == 0) {
 2522                         end = entry->end;
 2523                         rv = KERN_INVALID_ARGUMENT;
 2524                         goto done;
 2525                 }
 2526                 entry = entry->next;
 2527         }
 2528         rv = KERN_SUCCESS;
 2529 done:
 2530         need_wakeup = FALSE;
 2531         if (first_entry == NULL) {
 2532                 result = vm_map_lookup_entry(map, start, &first_entry);
 2533                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
 2534                         first_entry = first_entry->next;
 2535                 else
 2536                         KASSERT(result, ("vm_map_unwire: lookup failed"));
 2537         }
 2538         for (entry = first_entry; entry->start < end; entry = entry->next) {
 2539                 /*
 2540                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
 2541                  * space in the unwired region could have been mapped
 2542                  * while the map lock was dropped for draining
 2543                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
 2544                  * could be simultaneously wiring this new mapping
 2545                  * entry.  Detect these cases and skip any entries
 2546                  * marked as in transition by us.
 2547                  */
 2548                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
 2549                     entry->wiring_thread != curthread) {
 2550                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
 2551                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
 2552                         continue;
 2553                 }
 2554 
 2555                 if (rv == KERN_SUCCESS && (!user_unwire ||
 2556                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
 2557                         if (user_unwire)
 2558                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 2559                         if (entry->wired_count == 1)
 2560                                 vm_map_entry_unwire(map, entry);
 2561                         else
 2562                                 entry->wired_count--;
 2563                 }
 2564                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
 2565                     ("vm_map_unwire: in-transition flag missing %p", entry));
 2566                 KASSERT(entry->wiring_thread == curthread,
 2567                     ("vm_map_unwire: alien wire %p", entry));
 2568                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
 2569                 entry->wiring_thread = NULL;
 2570                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 2571                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 2572                         need_wakeup = TRUE;
 2573                 }
 2574                 vm_map_simplify_entry(map, entry);
 2575         }
 2576         vm_map_unlock(map);
 2577         if (need_wakeup)
 2578                 vm_map_wakeup(map);
 2579         return (rv);
 2580 }
 2581 
 2582 /*
 2583  *      vm_map_wire_entry_failure:
 2584  *
 2585  *      Handle a wiring failure on the given entry.
 2586  *
 2587  *      The map should be locked.
 2588  */
 2589 static void
 2590 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
 2591     vm_offset_t failed_addr)
 2592 {
 2593 
 2594         VM_MAP_ASSERT_LOCKED(map);
 2595         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
 2596             entry->wired_count == 1,
 2597             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
 2598         KASSERT(failed_addr < entry->end,
 2599             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
 2600 
 2601         /*
 2602          * If any pages at the start of this entry were successfully wired,
 2603          * then unwire them.
 2604          */
 2605         if (failed_addr > entry->start) {
 2606                 pmap_unwire(map->pmap, entry->start, failed_addr);
 2607                 vm_object_unwire(entry->object.vm_object, entry->offset,
 2608                     failed_addr - entry->start, PQ_ACTIVE);
 2609         }
 2610 
 2611         /*
 2612          * Assign an out-of-range value to represent the failure to wire this
 2613          * entry.
 2614          */
 2615         entry->wired_count = -1;
 2616 }
 2617 
 2618 /*
 2619  *      vm_map_wire:
 2620  *
 2621  *      Implements both kernel and user wiring.
 2622  */
 2623 int
 2624 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2625     int flags)
 2626 {
 2627         vm_map_entry_t entry, first_entry, tmp_entry;
 2628         vm_offset_t faddr, saved_end, saved_start;
 2629         unsigned int last_timestamp;
 2630         int rv;
 2631         boolean_t need_wakeup, result, user_wire;
 2632         vm_prot_t prot;
 2633 
 2634         if (start == end)
 2635                 return (KERN_SUCCESS);
 2636         prot = 0;
 2637         if (flags & VM_MAP_WIRE_WRITE)
 2638                 prot |= VM_PROT_WRITE;
 2639         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
 2640         vm_map_lock(map);
 2641         VM_MAP_RANGE_CHECK(map, start, end);
 2642         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 2643                 if (flags & VM_MAP_WIRE_HOLESOK)
 2644                         first_entry = first_entry->next;
 2645                 else {
 2646                         vm_map_unlock(map);
 2647                         return (KERN_INVALID_ADDRESS);
 2648                 }
 2649         }
 2650         last_timestamp = map->timestamp;
 2651         entry = first_entry;
 2652         while (entry->start < end) {
 2653                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 2654                         /*
 2655                          * We have not yet clipped the entry.
 2656                          */
 2657                         saved_start = (start >= entry->start) ? start :
 2658                             entry->start;
 2659                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 2660                         if (vm_map_unlock_and_wait(map, 0)) {
 2661                                 /*
 2662                                  * Allow interruption of user wiring?
 2663                                  */
 2664                         }
 2665                         vm_map_lock(map);
 2666                         if (last_timestamp + 1 != map->timestamp) {
 2667                                 /*
 2668                                  * Look again for the entry because the map was
 2669                                  * modified while it was unlocked.
 2670                                  * Specifically, the entry may have been
 2671                                  * clipped, merged, or deleted.
 2672                                  */
 2673                                 if (!vm_map_lookup_entry(map, saved_start,
 2674                                     &tmp_entry)) {
 2675                                         if (flags & VM_MAP_WIRE_HOLESOK)
 2676                                                 tmp_entry = tmp_entry->next;
 2677                                         else {
 2678                                                 if (saved_start == start) {
 2679                                                         /*
 2680                                                          * first_entry has been deleted.
 2681                                                          */
 2682                                                         vm_map_unlock(map);
 2683                                                         return (KERN_INVALID_ADDRESS);
 2684                                                 }
 2685                                                 end = saved_start;
 2686                                                 rv = KERN_INVALID_ADDRESS;
 2687                                                 goto done;
 2688                                         }
 2689                                 }
 2690                                 if (entry == first_entry)
 2691                                         first_entry = tmp_entry;
 2692                                 else
 2693                                         first_entry = NULL;
 2694                                 entry = tmp_entry;
 2695                         }
 2696                         last_timestamp = map->timestamp;
 2697                         continue;
 2698                 }
 2699                 vm_map_clip_start(map, entry, start);
 2700                 vm_map_clip_end(map, entry, end);
 2701                 /*
 2702                  * Mark the entry in case the map lock is released.  (See
 2703                  * above.)
 2704                  */
 2705                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
 2706                     entry->wiring_thread == NULL,
 2707                     ("owned map entry %p", entry));
 2708                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 2709                 entry->wiring_thread = curthread;
 2710                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
 2711                     || (entry->protection & prot) != prot) {
 2712                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
 2713                         if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
 2714                                 end = entry->end;
 2715                                 rv = KERN_INVALID_ADDRESS;
 2716                                 goto done;
 2717                         }
 2718                         goto next_entry;
 2719                 }
 2720                 if (entry->wired_count == 0) {
 2721                         entry->wired_count++;
 2722                         saved_start = entry->start;
 2723                         saved_end = entry->end;
 2724 
 2725                         /*
 2726                          * Release the map lock, relying on the in-transition
 2727                          * mark.  Mark the map busy for fork.
 2728                          */
 2729                         vm_map_busy(map);
 2730                         vm_map_unlock(map);
 2731 
 2732                         faddr = saved_start;
 2733                         do {
 2734                                 /*
 2735                                  * Simulate a fault to get the page and enter
 2736                                  * it into the physical map.
 2737                                  */
 2738                                 if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
 2739                                     VM_FAULT_WIRE)) != KERN_SUCCESS)
 2740                                         break;
 2741                         } while ((faddr += PAGE_SIZE) < saved_end);
 2742                         vm_map_lock(map);
 2743                         vm_map_unbusy(map);
 2744                         if (last_timestamp + 1 != map->timestamp) {
 2745                                 /*
 2746                                  * Look again for the entry because the map was
 2747                                  * modified while it was unlocked.  The entry
 2748                                  * may have been clipped, but NOT merged or
 2749                                  * deleted.
 2750                                  */
 2751                                 result = vm_map_lookup_entry(map, saved_start,
 2752                                     &tmp_entry);
 2753                                 KASSERT(result, ("vm_map_wire: lookup failed"));
 2754                                 if (entry == first_entry)
 2755                                         first_entry = tmp_entry;
 2756                                 else
 2757                                         first_entry = NULL;
 2758                                 entry = tmp_entry;
 2759                                 while (entry->end < saved_end) {
 2760                                         /*
 2761                                          * In case of failure, handle entries
 2762                                          * that were not fully wired here;
 2763                                          * fully wired entries are handled
 2764                                          * later.
 2765                                          */
 2766                                         if (rv != KERN_SUCCESS &&
 2767                                             faddr < entry->end)
 2768                                                 vm_map_wire_entry_failure(map,
 2769                                                     entry, faddr);
 2770                                         entry = entry->next;
 2771                                 }
 2772                         }
 2773                         last_timestamp = map->timestamp;
 2774                         if (rv != KERN_SUCCESS) {
 2775                                 vm_map_wire_entry_failure(map, entry, faddr);
 2776                                 end = entry->end;
 2777                                 goto done;
 2778                         }
 2779                 } else if (!user_wire ||
 2780                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
 2781                         entry->wired_count++;
 2782                 }
 2783                 /*
 2784                  * Check the map for holes in the specified region.
 2785                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
 2786                  */
 2787         next_entry:
 2788                 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 &&
 2789                     entry->end < end && entry->next->start > entry->end) {
 2790                         end = entry->end;
 2791                         rv = KERN_INVALID_ADDRESS;
 2792                         goto done;
 2793                 }
 2794                 entry = entry->next;
 2795         }
 2796         rv = KERN_SUCCESS;
 2797 done:
 2798         need_wakeup = FALSE;
 2799         if (first_entry == NULL) {
 2800                 result = vm_map_lookup_entry(map, start, &first_entry);
 2801                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
 2802                         first_entry = first_entry->next;
 2803                 else
 2804                         KASSERT(result, ("vm_map_wire: lookup failed"));
 2805         }
 2806         for (entry = first_entry; entry->start < end; entry = entry->next) {
 2807                 /*
 2808                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
 2809                  * space in the unwired region could have been mapped
 2810                  * while the map lock was dropped for faulting in the
 2811                  * pages or draining MAP_ENTRY_IN_TRANSITION.
 2812                  * Moreover, another thread could be simultaneously
 2813                  * wiring this new mapping entry.  Detect these cases
 2814                  * and skip any entries marked as in transition not by us.
 2815                  */
 2816                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
 2817                     entry->wiring_thread != curthread) {
 2818                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
 2819                             ("vm_map_wire: !HOLESOK and new/changed entry"));
 2820                         continue;
 2821                 }
 2822 
 2823                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
 2824                         goto next_entry_done;
 2825 
 2826                 if (rv == KERN_SUCCESS) {
 2827                         if (user_wire)
 2828                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
 2829                 } else if (entry->wired_count == -1) {
 2830                         /*
 2831                          * Wiring failed on this entry.  Thus, unwiring is
 2832                          * unnecessary.
 2833                          */
 2834                         entry->wired_count = 0;
 2835                 } else if (!user_wire ||
 2836                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
 2837                         /*
 2838                          * Undo the wiring.  Wiring succeeded on this entry
 2839                          * but failed on a later entry.  
 2840                          */
 2841                         if (entry->wired_count == 1)
 2842                                 vm_map_entry_unwire(map, entry);
 2843                         else
 2844                                 entry->wired_count--;
 2845                 }
 2846         next_entry_done:
 2847                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
 2848                     ("vm_map_wire: in-transition flag missing %p", entry));
 2849                 KASSERT(entry->wiring_thread == curthread,
 2850                     ("vm_map_wire: alien wire %p", entry));
 2851                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
 2852                     MAP_ENTRY_WIRE_SKIPPED);
 2853                 entry->wiring_thread = NULL;
 2854                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 2855                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 2856                         need_wakeup = TRUE;
 2857                 }
 2858                 vm_map_simplify_entry(map, entry);
 2859         }
 2860         vm_map_unlock(map);
 2861         if (need_wakeup)
 2862                 vm_map_wakeup(map);
 2863         return (rv);
 2864 }
 2865 
 2866 /*
 2867  * vm_map_sync
 2868  *
 2869  * Push any dirty cached pages in the address range to their pager.
 2870  * If syncio is TRUE, dirty pages are written synchronously.
 2871  * If invalidate is TRUE, any cached pages are freed as well.
 2872  *
 2873  * If the size of the region from start to end is zero, we are
 2874  * supposed to flush all modified pages within the region containing
 2875  * start.  Unfortunately, a region can be split or coalesced with
 2876  * neighboring regions, making it difficult to determine what the
 2877  * original region was.  Therefore, we approximate this requirement by
 2878  * flushing the current region containing start.
 2879  *
 2880  * Returns an error if any part of the specified range is not mapped.
 2881  */
 2882 int
 2883 vm_map_sync(
 2884         vm_map_t map,
 2885         vm_offset_t start,
 2886         vm_offset_t end,
 2887         boolean_t syncio,
 2888         boolean_t invalidate)
 2889 {
 2890         vm_map_entry_t current;
 2891         vm_map_entry_t entry;
 2892         vm_size_t size;
 2893         vm_object_t object;
 2894         vm_ooffset_t offset;
 2895         unsigned int last_timestamp;
 2896         boolean_t failed;
 2897 
 2898         vm_map_lock_read(map);
 2899         VM_MAP_RANGE_CHECK(map, start, end);
 2900         if (!vm_map_lookup_entry(map, start, &entry)) {
 2901                 vm_map_unlock_read(map);
 2902                 return (KERN_INVALID_ADDRESS);
 2903         } else if (start == end) {
 2904                 start = entry->start;
 2905                 end = entry->end;
 2906         }
 2907         /*
 2908          * Make a first pass to check for user-wired memory and holes.
 2909          */
 2910         for (current = entry; current->start < end; current = current->next) {
 2911                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
 2912                         vm_map_unlock_read(map);
 2913                         return (KERN_INVALID_ARGUMENT);
 2914                 }
 2915                 if (end > current->end &&
 2916                     current->end != current->next->start) {
 2917                         vm_map_unlock_read(map);
 2918                         return (KERN_INVALID_ADDRESS);
 2919                 }
 2920         }
 2921 
 2922         if (invalidate)
 2923                 pmap_remove(map->pmap, start, end);
 2924         failed = FALSE;
 2925 
 2926         /*
 2927          * Make a second pass, cleaning/uncaching pages from the indicated
 2928          * objects as we go.
 2929          */
 2930         for (current = entry; current->start < end;) {
 2931                 offset = current->offset + (start - current->start);
 2932                 size = (end <= current->end ? end : current->end) - start;
 2933                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 2934                         vm_map_t smap;
 2935                         vm_map_entry_t tentry;
 2936                         vm_size_t tsize;
 2937 
 2938                         smap = current->object.sub_map;
 2939                         vm_map_lock_read(smap);
 2940                         (void) vm_map_lookup_entry(smap, offset, &tentry);
 2941                         tsize = tentry->end - offset;
 2942                         if (tsize < size)
 2943                                 size = tsize;
 2944                         object = tentry->object.vm_object;
 2945                         offset = tentry->offset + (offset - tentry->start);
 2946                         vm_map_unlock_read(smap);
 2947                 } else {
 2948                         object = current->object.vm_object;
 2949                 }
 2950                 vm_object_reference(object);
 2951                 last_timestamp = map->timestamp;
 2952                 vm_map_unlock_read(map);
 2953                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
 2954                         failed = TRUE;
 2955                 start += size;
 2956                 vm_object_deallocate(object);
 2957                 vm_map_lock_read(map);
 2958                 if (last_timestamp == map->timestamp ||
 2959                     !vm_map_lookup_entry(map, start, &current))
 2960                         current = current->next;
 2961         }
 2962 
 2963         vm_map_unlock_read(map);
 2964         return (failed ? KERN_FAILURE : KERN_SUCCESS);
 2965 }
 2966 
 2967 /*
 2968  *      vm_map_entry_unwire:    [ internal use only ]
 2969  *
 2970  *      Make the region specified by this entry pageable.
 2971  *
 2972  *      The map in question should be locked.
 2973  *      [This is the reason for this routine's existence.]
 2974  */
 2975 static void
 2976 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
 2977 {
 2978 
 2979         VM_MAP_ASSERT_LOCKED(map);
 2980         KASSERT(entry->wired_count > 0,
 2981             ("vm_map_entry_unwire: entry %p isn't wired", entry));
 2982         pmap_unwire(map->pmap, entry->start, entry->end);
 2983         vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
 2984             entry->start, PQ_ACTIVE);
 2985         entry->wired_count = 0;
 2986 }
 2987 
 2988 static void
 2989 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
 2990 {
 2991 
 2992         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
 2993                 vm_object_deallocate(entry->object.vm_object);
 2994         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
 2995 }
 2996 
 2997 /*
 2998  *      vm_map_entry_delete:    [ internal use only ]
 2999  *
 3000  *      Deallocate the given entry from the target map.
 3001  */
 3002 static void
 3003 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
 3004 {
 3005         vm_object_t object;
 3006         vm_pindex_t offidxstart, offidxend, count, size1;
 3007         vm_size_t size;
 3008 
 3009         vm_map_entry_unlink(map, entry);
 3010         object = entry->object.vm_object;
 3011 
 3012         if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
 3013                 MPASS(entry->cred == NULL);
 3014                 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
 3015                 MPASS(object == NULL);
 3016                 vm_map_entry_deallocate(entry, map->system_map);
 3017                 return;
 3018         }
 3019 
 3020         size = entry->end - entry->start;
 3021         map->size -= size;
 3022 
 3023         if (entry->cred != NULL) {
 3024                 swap_release_by_cred(size, entry->cred);
 3025                 crfree(entry->cred);
 3026         }
 3027 
 3028         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
 3029             (object != NULL)) {
 3030                 KASSERT(entry->cred == NULL || object->cred == NULL ||
 3031                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
 3032                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
 3033                 count = atop(size);
 3034                 offidxstart = OFF_TO_IDX(entry->offset);
 3035                 offidxend = offidxstart + count;
 3036                 VM_OBJECT_WLOCK(object);
 3037                 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
 3038                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
 3039                     object == kernel_object)) {
 3040                         vm_object_collapse(object);
 3041 
 3042                         /*
 3043                          * The option OBJPR_NOTMAPPED can be passed here
 3044                          * because vm_map_delete() already performed
 3045                          * pmap_remove() on the only mapping to this range
 3046                          * of pages. 
 3047                          */
 3048                         vm_object_page_remove(object, offidxstart, offidxend,
 3049                             OBJPR_NOTMAPPED);
 3050                         if (object->type == OBJT_SWAP)
 3051                                 swap_pager_freespace(object, offidxstart,
 3052                                     count);
 3053                         if (offidxend >= object->size &&
 3054                             offidxstart < object->size) {
 3055                                 size1 = object->size;
 3056                                 object->size = offidxstart;
 3057                                 if (object->cred != NULL) {
 3058                                         size1 -= object->size;
 3059                                         KASSERT(object->charge >= ptoa(size1),
 3060                                             ("object %p charge < 0", object));
 3061                                         swap_release_by_cred(ptoa(size1),
 3062                                             object->cred);
 3063                                         object->charge -= ptoa(size1);
 3064                                 }
 3065                         }
 3066                 }
 3067                 VM_OBJECT_WUNLOCK(object);
 3068         } else
 3069                 entry->object.vm_object = NULL;
 3070         if (map->system_map)
 3071                 vm_map_entry_deallocate(entry, TRUE);
 3072         else {
 3073                 entry->next = curthread->td_map_def_user;
 3074                 curthread->td_map_def_user = entry;
 3075         }
 3076 }
 3077 
 3078 /*
 3079  *      vm_map_delete:  [ internal use only ]
 3080  *
 3081  *      Deallocates the given address range from the target
 3082  *      map.
 3083  */
 3084 int
 3085 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
 3086 {
 3087         vm_map_entry_t entry;
 3088         vm_map_entry_t first_entry;
 3089 
 3090         VM_MAP_ASSERT_LOCKED(map);
 3091         if (start == end)
 3092                 return (KERN_SUCCESS);
 3093 
 3094         /*
 3095          * Find the start of the region, and clip it
 3096          */
 3097         if (!vm_map_lookup_entry(map, start, &first_entry))
 3098                 entry = first_entry->next;
 3099         else {
 3100                 entry = first_entry;
 3101                 vm_map_clip_start(map, entry, start);
 3102         }
 3103 
 3104         /*
 3105          * Step through all entries in this region
 3106          */
 3107         while (entry->start < end) {
 3108                 vm_map_entry_t next;
 3109 
 3110                 /*
 3111                  * Wait for wiring or unwiring of an entry to complete.
 3112                  * Also wait for any system wirings to disappear on
 3113                  * user maps.
 3114                  */
 3115                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
 3116                     (vm_map_pmap(map) != kernel_pmap &&
 3117                     vm_map_entry_system_wired_count(entry) != 0)) {
 3118                         unsigned int last_timestamp;
 3119                         vm_offset_t saved_start;
 3120                         vm_map_entry_t tmp_entry;
 3121 
 3122                         saved_start = entry->start;
 3123                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 3124                         last_timestamp = map->timestamp;
 3125                         (void) vm_map_unlock_and_wait(map, 0);
 3126                         vm_map_lock(map);
 3127                         if (last_timestamp + 1 != map->timestamp) {
 3128                                 /*
 3129                                  * Look again for the entry because the map was
 3130                                  * modified while it was unlocked.
 3131                                  * Specifically, the entry may have been
 3132                                  * clipped, merged, or deleted.
 3133                                  */
 3134                                 if (!vm_map_lookup_entry(map, saved_start,
 3135                                                          &tmp_entry))
 3136                                         entry = tmp_entry->next;
 3137                                 else {
 3138                                         entry = tmp_entry;
 3139                                         vm_map_clip_start(map, entry,
 3140                                                           saved_start);
 3141                                 }
 3142                         }
 3143                         continue;
 3144                 }
 3145                 vm_map_clip_end(map, entry, end);
 3146 
 3147                 next = entry->next;
 3148 
 3149                 /*
 3150                  * Unwire before removing addresses from the pmap; otherwise,
 3151                  * unwiring will put the entries back in the pmap.
 3152                  */
 3153                 if (entry->wired_count != 0)
 3154                         vm_map_entry_unwire(map, entry);
 3155 
 3156                 /*
 3157                  * Remove mappings for the pages, but only if the
 3158                  * mappings could exist.  For instance, it does not
 3159                  * make sense to call pmap_remove() for guard entries.
 3160                  */
 3161                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
 3162                     entry->object.vm_object != NULL)
 3163                         pmap_remove(map->pmap, entry->start, entry->end);
 3164 
 3165                 /*
 3166                  * Delete the entry only after removing all pmap
 3167                  * entries pointing to its pages.  (Otherwise, its
 3168                  * page frames may be reallocated, and any modify bits
 3169                  * will be set in the wrong object!)
 3170                  */
 3171                 vm_map_entry_delete(map, entry);
 3172                 entry = next;
 3173         }
 3174         return (KERN_SUCCESS);
 3175 }
 3176 
 3177 /*
 3178  *      vm_map_remove:
 3179  *
 3180  *      Remove the given address range from the target map.
 3181  *      This is the exported form of vm_map_delete.
 3182  */
 3183 int
 3184 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
 3185 {
 3186         int result;
 3187 
 3188         vm_map_lock(map);
 3189         VM_MAP_RANGE_CHECK(map, start, end);
 3190         result = vm_map_delete(map, start, end);
 3191         vm_map_unlock(map);
 3192         return (result);
 3193 }
 3194 
 3195 /*
 3196  *      vm_map_check_protection:
 3197  *
 3198  *      Assert that the target map allows the specified privilege on the
 3199  *      entire address region given.  The entire region must be allocated.
 3200  *
 3201  *      WARNING!  This code does not and should not check whether the
 3202  *      contents of the region is accessible.  For example a smaller file
 3203  *      might be mapped into a larger address space.
 3204  *
 3205  *      NOTE!  This code is also called by munmap().
 3206  *
 3207  *      The map must be locked.  A read lock is sufficient.
 3208  */
 3209 boolean_t
 3210 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
 3211                         vm_prot_t protection)
 3212 {
 3213         vm_map_entry_t entry;
 3214         vm_map_entry_t tmp_entry;
 3215 
 3216         if (!vm_map_lookup_entry(map, start, &tmp_entry))
 3217                 return (FALSE);
 3218         entry = tmp_entry;
 3219 
 3220         while (start < end) {
 3221                 /*
 3222                  * No holes allowed!
 3223                  */
 3224                 if (start < entry->start)
 3225                         return (FALSE);
 3226                 /*
 3227                  * Check protection associated with entry.
 3228                  */
 3229                 if ((entry->protection & protection) != protection)
 3230                         return (FALSE);
 3231                 /* go to next entry */
 3232                 start = entry->end;
 3233                 entry = entry->next;
 3234         }
 3235         return (TRUE);
 3236 }
 3237 
 3238 /*
 3239  *      vm_map_copy_entry:
 3240  *
 3241  *      Copies the contents of the source entry to the destination
 3242  *      entry.  The entries *must* be aligned properly.
 3243  */
 3244 static void
 3245 vm_map_copy_entry(
 3246         vm_map_t src_map,
 3247         vm_map_t dst_map,
 3248         vm_map_entry_t src_entry,
 3249         vm_map_entry_t dst_entry,
 3250         vm_ooffset_t *fork_charge)
 3251 {
 3252         vm_object_t src_object;
 3253         vm_map_entry_t fake_entry;
 3254         vm_offset_t size;
 3255         struct ucred *cred;
 3256         int charged;
 3257 
 3258         VM_MAP_ASSERT_LOCKED(dst_map);
 3259 
 3260         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
 3261                 return;
 3262 
 3263         if (src_entry->wired_count == 0 ||
 3264             (src_entry->protection & VM_PROT_WRITE) == 0) {
 3265                 /*
 3266                  * If the source entry is marked needs_copy, it is already
 3267                  * write-protected.
 3268                  */
 3269                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
 3270                     (src_entry->protection & VM_PROT_WRITE) != 0) {
 3271                         pmap_protect(src_map->pmap,
 3272                             src_entry->start,
 3273                             src_entry->end,
 3274                             src_entry->protection & ~VM_PROT_WRITE);
 3275                 }
 3276 
 3277                 /*
 3278                  * Make a copy of the object.
 3279                  */
 3280                 size = src_entry->end - src_entry->start;
 3281                 if ((src_object = src_entry->object.vm_object) != NULL) {
 3282                         VM_OBJECT_WLOCK(src_object);
 3283                         charged = ENTRY_CHARGED(src_entry);
 3284                         if (src_object->handle == NULL &&
 3285                             (src_object->type == OBJT_DEFAULT ||
 3286                             src_object->type == OBJT_SWAP)) {
 3287                                 vm_object_collapse(src_object);
 3288                                 if ((src_object->flags & (OBJ_NOSPLIT |
 3289                                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
 3290                                         vm_object_split(src_entry);
 3291                                         src_object =
 3292                                             src_entry->object.vm_object;
 3293                                 }
 3294                         }
 3295                         vm_object_reference_locked(src_object);
 3296                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
 3297                         if (src_entry->cred != NULL &&
 3298                             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
 3299                                 KASSERT(src_object->cred == NULL,
 3300                                     ("OVERCOMMIT: vm_map_copy_entry: cred %p",
 3301                                      src_object));
 3302                                 src_object->cred = src_entry->cred;
 3303                                 src_object->charge = size;
 3304                         }
 3305                         VM_OBJECT_WUNLOCK(src_object);
 3306                         dst_entry->object.vm_object = src_object;
 3307                         if (charged) {
 3308                                 cred = curthread->td_ucred;
 3309                                 crhold(cred);
 3310                                 dst_entry->cred = cred;
 3311                                 *fork_charge += size;
 3312                                 if (!(src_entry->eflags &
 3313                                       MAP_ENTRY_NEEDS_COPY)) {
 3314                                         crhold(cred);
 3315                                         src_entry->cred = cred;
 3316                                         *fork_charge += size;
 3317                                 }
 3318                         }
 3319                         src_entry->eflags |= MAP_ENTRY_COW |
 3320                             MAP_ENTRY_NEEDS_COPY;
 3321                         dst_entry->eflags |= MAP_ENTRY_COW |
 3322                             MAP_ENTRY_NEEDS_COPY;
 3323                         dst_entry->offset = src_entry->offset;
 3324                         if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
 3325                                 /*
 3326                                  * MAP_ENTRY_VN_WRITECNT cannot
 3327                                  * indicate write reference from
 3328                                  * src_entry, since the entry is
 3329                                  * marked as needs copy.  Allocate a
 3330                                  * fake entry that is used to
 3331                                  * decrement object->un_pager.vnp.writecount
 3332                                  * at the appropriate time.  Attach
 3333                                  * fake_entry to the deferred list.
 3334                                  */
 3335                                 fake_entry = vm_map_entry_create(dst_map);
 3336                                 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
 3337                                 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
 3338                                 vm_object_reference(src_object);
 3339                                 fake_entry->object.vm_object = src_object;
 3340                                 fake_entry->start = src_entry->start;
 3341                                 fake_entry->end = src_entry->end;
 3342                                 fake_entry->next = curthread->td_map_def_user;
 3343                                 curthread->td_map_def_user = fake_entry;
 3344                         }
 3345 
 3346                         pmap_copy(dst_map->pmap, src_map->pmap,
 3347                             dst_entry->start, dst_entry->end - dst_entry->start,
 3348                             src_entry->start);
 3349                 } else {
 3350                         dst_entry->object.vm_object = NULL;
 3351                         dst_entry->offset = 0;
 3352                         if (src_entry->cred != NULL) {
 3353                                 dst_entry->cred = curthread->td_ucred;
 3354                                 crhold(dst_entry->cred);
 3355                                 *fork_charge += size;
 3356                         }
 3357                 }
 3358         } else {
 3359                 /*
 3360                  * We don't want to make writeable wired pages copy-on-write.
 3361                  * Immediately copy these pages into the new map by simulating
 3362                  * page faults.  The new pages are pageable.
 3363                  */
 3364                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
 3365                     fork_charge);
 3366         }
 3367 }
 3368 
 3369 /*
 3370  * vmspace_map_entry_forked:
 3371  * Update the newly-forked vmspace each time a map entry is inherited
 3372  * or copied.  The values for vm_dsize and vm_tsize are approximate
 3373  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
 3374  */
 3375 static void
 3376 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
 3377     vm_map_entry_t entry)
 3378 {
 3379         vm_size_t entrysize;
 3380         vm_offset_t newend;
 3381 
 3382         if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
 3383                 return;
 3384         entrysize = entry->end - entry->start;
 3385         vm2->vm_map.size += entrysize;
 3386         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
 3387                 vm2->vm_ssize += btoc(entrysize);
 3388         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
 3389             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
 3390                 newend = MIN(entry->end,
 3391                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
 3392                 vm2->vm_dsize += btoc(newend - entry->start);
 3393         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
 3394             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
 3395                 newend = MIN(entry->end,
 3396                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
 3397                 vm2->vm_tsize += btoc(newend - entry->start);
 3398         }
 3399 }
 3400 
 3401 /*
 3402  * vmspace_fork:
 3403  * Create a new process vmspace structure and vm_map
 3404  * based on those of an existing process.  The new map
 3405  * is based on the old map, according to the inheritance
 3406  * values on the regions in that map.
 3407  *
 3408  * XXX It might be worth coalescing the entries added to the new vmspace.
 3409  *
 3410  * The source map must not be locked.
 3411  */
 3412 struct vmspace *
 3413 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
 3414 {
 3415         struct vmspace *vm2;
 3416         vm_map_t new_map, old_map;
 3417         vm_map_entry_t new_entry, old_entry;
 3418         vm_object_t object;
 3419         int locked;
 3420         vm_inherit_t inh;
 3421 
 3422         old_map = &vm1->vm_map;
 3423         /* Copy immutable fields of vm1 to vm2. */
 3424         vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), NULL);
 3425         if (vm2 == NULL)
 3426                 return (NULL);
 3427         vm2->vm_taddr = vm1->vm_taddr;
 3428         vm2->vm_daddr = vm1->vm_daddr;
 3429         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
 3430         vm_map_lock(old_map);
 3431         if (old_map->busy)
 3432                 vm_map_wait_busy(old_map);
 3433         new_map = &vm2->vm_map;
 3434         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
 3435         KASSERT(locked, ("vmspace_fork: lock failed"));
 3436 
 3437         old_entry = old_map->header.next;
 3438 
 3439         while (old_entry != &old_map->header) {
 3440                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 3441                         panic("vm_map_fork: encountered a submap");
 3442 
 3443                 inh = old_entry->inheritance;
 3444                 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
 3445                     inh != VM_INHERIT_NONE)
 3446                         inh = VM_INHERIT_COPY;
 3447 
 3448                 switch (inh) {
 3449                 case VM_INHERIT_NONE:
 3450                         break;
 3451 
 3452                 case VM_INHERIT_SHARE:
 3453                         /*
 3454                          * Clone the entry, creating the shared object if necessary.
 3455                          */
 3456                         object = old_entry->object.vm_object;
 3457                         if (object == NULL) {
 3458                                 object = vm_object_allocate(OBJT_DEFAULT,
 3459                                         atop(old_entry->end - old_entry->start));
 3460                                 old_entry->object.vm_object = object;
 3461                                 old_entry->offset = 0;
 3462                                 if (old_entry->cred != NULL) {
 3463                                         object->cred = old_entry->cred;
 3464                                         object->charge = old_entry->end -
 3465                                             old_entry->start;
 3466                                         old_entry->cred = NULL;
 3467                                 }
 3468                         }
 3469 
 3470                         /*
 3471                          * Add the reference before calling vm_object_shadow
 3472                          * to insure that a shadow object is created.
 3473                          */
 3474                         vm_object_reference(object);
 3475                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 3476                                 vm_object_shadow(&old_entry->object.vm_object,
 3477                                     &old_entry->offset,
 3478                                     old_entry->end - old_entry->start);
 3479                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 3480                                 /* Transfer the second reference too. */
 3481                                 vm_object_reference(
 3482                                     old_entry->object.vm_object);
 3483 
 3484                                 /*
 3485                                  * As in vm_map_simplify_entry(), the
 3486                                  * vnode lock will not be acquired in
 3487                                  * this call to vm_object_deallocate().
 3488                                  */
 3489                                 vm_object_deallocate(object);
 3490                                 object = old_entry->object.vm_object;
 3491                         }
 3492                         VM_OBJECT_WLOCK(object);
 3493                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
 3494                         if (old_entry->cred != NULL) {
 3495                                 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
 3496                                 object->cred = old_entry->cred;
 3497                                 object->charge = old_entry->end - old_entry->start;
 3498                                 old_entry->cred = NULL;
 3499                         }
 3500 
 3501                         /*
 3502                          * Assert the correct state of the vnode
 3503                          * v_writecount while the object is locked, to
 3504                          * not relock it later for the assertion
 3505                          * correctness.
 3506                          */
 3507                         if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
 3508                             object->type == OBJT_VNODE) {
 3509                                 KASSERT(((struct vnode *)object->handle)->
 3510                                     v_writecount > 0,
 3511                                     ("vmspace_fork: v_writecount %p", object));
 3512                                 KASSERT(object->un_pager.vnp.writemappings > 0,
 3513                                     ("vmspace_fork: vnp.writecount %p",
 3514                                     object));
 3515                         }
 3516                         VM_OBJECT_WUNLOCK(object);
 3517 
 3518                         /*
 3519                          * Clone the entry, referencing the shared object.
 3520                          */
 3521                         new_entry = vm_map_entry_create(new_map);
 3522                         *new_entry = *old_entry;
 3523                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
 3524                             MAP_ENTRY_IN_TRANSITION);
 3525                         new_entry->wiring_thread = NULL;
 3526                         new_entry->wired_count = 0;
 3527                         if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
 3528                                 vnode_pager_update_writecount(object,
 3529                                     new_entry->start, new_entry->end);
 3530                         }
 3531 
 3532                         /*
 3533                          * Insert the entry into the new map -- we know we're
 3534                          * inserting at the end of the new map.
 3535                          */
 3536                         vm_map_entry_link(new_map, new_map->header.prev,
 3537                             new_entry);
 3538                         vmspace_map_entry_forked(vm1, vm2, new_entry);
 3539 
 3540                         /*
 3541                          * Update the physical map
 3542                          */
 3543                         pmap_copy(new_map->pmap, old_map->pmap,
 3544                             new_entry->start,
 3545                             (old_entry->end - old_entry->start),
 3546                             old_entry->start);
 3547                         break;
 3548 
 3549                 case VM_INHERIT_COPY:
 3550                         /*
 3551                          * Clone the entry and link into the map.
 3552                          */
 3553                         new_entry = vm_map_entry_create(new_map);
 3554                         *new_entry = *old_entry;
 3555                         /*
 3556                          * Copied entry is COW over the old object.
 3557                          */
 3558                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
 3559                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
 3560                         new_entry->wiring_thread = NULL;
 3561                         new_entry->wired_count = 0;
 3562                         new_entry->object.vm_object = NULL;
 3563                         new_entry->cred = NULL;
 3564                         vm_map_entry_link(new_map, new_map->header.prev,
 3565                             new_entry);
 3566                         vmspace_map_entry_forked(vm1, vm2, new_entry);
 3567                         vm_map_copy_entry(old_map, new_map, old_entry,
 3568                             new_entry, fork_charge);
 3569                         break;
 3570 
 3571                 case VM_INHERIT_ZERO:
 3572                         /*
 3573                          * Create a new anonymous mapping entry modelled from
 3574                          * the old one.
 3575                          */
 3576                         new_entry = vm_map_entry_create(new_map);
 3577                         memset(new_entry, 0, sizeof(*new_entry));
 3578 
 3579                         new_entry->start = old_entry->start;
 3580                         new_entry->end = old_entry->end;
 3581                         new_entry->eflags = old_entry->eflags &
 3582                             ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
 3583                             MAP_ENTRY_VN_WRITECNT);
 3584                         new_entry->protection = old_entry->protection;
 3585                         new_entry->max_protection = old_entry->max_protection;
 3586                         new_entry->inheritance = VM_INHERIT_ZERO;
 3587 
 3588                         vm_map_entry_link(new_map, new_map->header.prev,
 3589                             new_entry);
 3590                         vmspace_map_entry_forked(vm1, vm2, new_entry);
 3591 
 3592                         new_entry->cred = curthread->td_ucred;
 3593                         crhold(new_entry->cred);
 3594                         *fork_charge += (new_entry->end - new_entry->start);
 3595 
 3596                         break;
 3597                 }
 3598                 old_entry = old_entry->next;
 3599         }
 3600         /*
 3601          * Use inlined vm_map_unlock() to postpone handling the deferred
 3602          * map entries, which cannot be done until both old_map and
 3603          * new_map locks are released.
 3604          */
 3605         sx_xunlock(&old_map->lock);
 3606         sx_xunlock(&new_map->lock);
 3607         vm_map_process_deferred();
 3608 
 3609         return (vm2);
 3610 }
 3611 
 3612 /*
 3613  * Create a process's stack for exec_new_vmspace().  This function is never
 3614  * asked to wire the newly created stack.
 3615  */
 3616 int
 3617 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
 3618     vm_prot_t prot, vm_prot_t max, int cow)
 3619 {
 3620         vm_size_t growsize, init_ssize;
 3621         rlim_t vmemlim;
 3622         int rv;
 3623 
 3624         MPASS((map->flags & MAP_WIREFUTURE) == 0);
 3625         growsize = sgrowsiz;
 3626         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
 3627         vm_map_lock(map);
 3628         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
 3629         /* If we would blow our VMEM resource limit, no go */
 3630         if (map->size + init_ssize > vmemlim) {
 3631                 rv = KERN_NO_SPACE;
 3632                 goto out;
 3633         }
 3634         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
 3635             max, cow);
 3636 out:
 3637         vm_map_unlock(map);
 3638         return (rv);
 3639 }
 3640 
 3641 static int stack_guard_page = 1;
 3642 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
 3643     &stack_guard_page, 0,
 3644     "Specifies the number of guard pages for a stack that grows");
 3645 
 3646 static int
 3647 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
 3648     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
 3649 {
 3650         vm_map_entry_t new_entry, prev_entry;
 3651         vm_offset_t bot, gap_bot, gap_top, top;
 3652         vm_size_t init_ssize, sgp;
 3653         int orient, rv;
 3654 
 3655         /*
 3656          * The stack orientation is piggybacked with the cow argument.
 3657          * Extract it into orient and mask the cow argument so that we
 3658          * don't pass it around further.
 3659          */
 3660         orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
 3661         KASSERT(orient != 0, ("No stack grow direction"));
 3662         KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
 3663             ("bi-dir stack"));
 3664 
 3665         if (addrbos < vm_map_min(map) ||
 3666             addrbos + max_ssize > vm_map_max(map) ||
 3667             addrbos + max_ssize <= addrbos)
 3668                 return (KERN_INVALID_ADDRESS);
 3669         sgp = (vm_size_t)stack_guard_page * PAGE_SIZE;
 3670         if (sgp >= max_ssize)
 3671                 return (KERN_INVALID_ARGUMENT);
 3672 
 3673         init_ssize = growsize;
 3674         if (max_ssize < init_ssize + sgp)
 3675                 init_ssize = max_ssize - sgp;
 3676 
 3677         /* If addr is already mapped, no go */
 3678         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
 3679                 return (KERN_NO_SPACE);
 3680 
 3681         /*
 3682          * If we can't accommodate max_ssize in the current mapping, no go.
 3683          */
 3684         if (prev_entry->next->start < addrbos + max_ssize)
 3685                 return (KERN_NO_SPACE);
 3686 
 3687         /*
 3688          * We initially map a stack of only init_ssize.  We will grow as
 3689          * needed later.  Depending on the orientation of the stack (i.e.
 3690          * the grow direction) we either map at the top of the range, the
 3691          * bottom of the range or in the middle.
 3692          *
 3693          * Note: we would normally expect prot and max to be VM_PROT_ALL,
 3694          * and cow to be 0.  Possibly we should eliminate these as input
 3695          * parameters, and just pass these values here in the insert call.
 3696          */
 3697         if (orient == MAP_STACK_GROWS_DOWN) {
 3698                 bot = addrbos + max_ssize - init_ssize;
 3699                 top = bot + init_ssize;
 3700                 gap_bot = addrbos;
 3701                 gap_top = bot;
 3702         } else /* if (orient == MAP_STACK_GROWS_UP) */ {
 3703                 bot = addrbos;
 3704                 top = bot + init_ssize;
 3705                 gap_bot = top;
 3706                 gap_top = addrbos + max_ssize;
 3707         }
 3708         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
 3709         if (rv != KERN_SUCCESS)
 3710                 return (rv);
 3711         new_entry = prev_entry->next;
 3712         KASSERT(new_entry->end == top || new_entry->start == bot,
 3713             ("Bad entry start/end for new stack entry"));
 3714         KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
 3715             (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
 3716             ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
 3717         KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
 3718             (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
 3719             ("new entry lacks MAP_ENTRY_GROWS_UP"));
 3720         rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
 3721             VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
 3722             MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
 3723         if (rv != KERN_SUCCESS)
 3724                 (void)vm_map_delete(map, bot, top);
 3725         return (rv);
 3726 }
 3727 
 3728 /*
 3729  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
 3730  * successfully grow the stack.
 3731  */
 3732 static int
 3733 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
 3734 {
 3735         vm_map_entry_t stack_entry;
 3736         struct proc *p;
 3737         struct vmspace *vm;
 3738         struct ucred *cred;
 3739         vm_offset_t gap_end, gap_start, grow_start;
 3740         size_t grow_amount, guard, max_grow;
 3741         rlim_t lmemlim, stacklim, vmemlim;
 3742         int rv, rv1;
 3743         bool gap_deleted, grow_down, is_procstack;
 3744 #ifdef notyet
 3745         uint64_t limit;
 3746 #endif
 3747 #ifdef RACCT
 3748         int error;
 3749 #endif
 3750 
 3751         p = curproc;
 3752         vm = p->p_vmspace;
 3753 
 3754         /*
 3755          * Disallow stack growth when the access is performed by a
 3756          * debugger or AIO daemon.  The reason is that the wrong
 3757          * resource limits are applied.
 3758          */
 3759         if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL)
 3760                 return (KERN_FAILURE);
 3761 
 3762         MPASS(!map->system_map);
 3763 
 3764         guard = stack_guard_page * PAGE_SIZE;
 3765         lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
 3766         stacklim = lim_cur(curthread, RLIMIT_STACK);
 3767         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
 3768 retry:
 3769         /* If addr is not in a hole for a stack grow area, no need to grow. */
 3770         if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
 3771                 return (KERN_FAILURE);
 3772         if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
 3773                 return (KERN_SUCCESS);
 3774         if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
 3775                 stack_entry = gap_entry->next;
 3776                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
 3777                     stack_entry->start != gap_entry->end)
 3778                         return (KERN_FAILURE);
 3779                 grow_amount = round_page(stack_entry->start - addr);
 3780                 grow_down = true;
 3781         } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
 3782                 stack_entry = gap_entry->prev;
 3783                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
 3784                     stack_entry->end != gap_entry->start)
 3785                         return (KERN_FAILURE);
 3786                 grow_amount = round_page(addr + 1 - stack_entry->end);
 3787                 grow_down = false;
 3788         } else {
 3789                 return (KERN_FAILURE);
 3790         }
 3791         max_grow = gap_entry->end - gap_entry->start;
 3792         if (guard > max_grow)
 3793                 return (KERN_NO_SPACE);
 3794         max_grow -= guard;
 3795         if (grow_amount > max_grow)
 3796                 return (KERN_NO_SPACE);
 3797 
 3798         /*
 3799          * If this is the main process stack, see if we're over the stack
 3800          * limit.
 3801          */
 3802         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
 3803             addr < (vm_offset_t)p->p_sysent->sv_usrstack;
 3804         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
 3805                 return (KERN_NO_SPACE);
 3806 
 3807 #ifdef RACCT
 3808         if (racct_enable) {
 3809                 PROC_LOCK(p);
 3810                 if (is_procstack && racct_set(p, RACCT_STACK,
 3811                     ctob(vm->vm_ssize) + grow_amount)) {
 3812                         PROC_UNLOCK(p);
 3813                         return (KERN_NO_SPACE);
 3814                 }
 3815                 PROC_UNLOCK(p);
 3816         }
 3817 #endif
 3818 
 3819         grow_amount = roundup(grow_amount, sgrowsiz);
 3820         if (grow_amount > max_grow)
 3821                 grow_amount = max_grow;
 3822         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
 3823                 grow_amount = trunc_page((vm_size_t)stacklim) -
 3824                     ctob(vm->vm_ssize);
 3825         }
 3826 
 3827 #ifdef notyet
 3828         PROC_LOCK(p);
 3829         limit = racct_get_available(p, RACCT_STACK);
 3830         PROC_UNLOCK(p);
 3831         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
 3832                 grow_amount = limit - ctob(vm->vm_ssize);
 3833 #endif
 3834 
 3835         if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
 3836                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
 3837                         rv = KERN_NO_SPACE;
 3838                         goto out;
 3839                 }
 3840 #ifdef RACCT
 3841                 if (racct_enable) {
 3842                         PROC_LOCK(p);
 3843                         if (racct_set(p, RACCT_MEMLOCK,
 3844                             ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
 3845                                 PROC_UNLOCK(p);
 3846                                 rv = KERN_NO_SPACE;
 3847                                 goto out;
 3848                         }
 3849                         PROC_UNLOCK(p);
 3850                 }
 3851 #endif
 3852         }
 3853 
 3854         /* If we would blow our VMEM resource limit, no go */
 3855         if (map->size + grow_amount > vmemlim) {
 3856                 rv = KERN_NO_SPACE;
 3857                 goto out;
 3858         }
 3859 #ifdef RACCT
 3860         if (racct_enable) {
 3861                 PROC_LOCK(p);
 3862                 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
 3863                         PROC_UNLOCK(p);
 3864                         rv = KERN_NO_SPACE;
 3865                         goto out;
 3866                 }
 3867                 PROC_UNLOCK(p);
 3868         }
 3869 #endif
 3870 
 3871         if (vm_map_lock_upgrade(map)) {
 3872                 gap_entry = NULL;
 3873                 vm_map_lock_read(map);
 3874                 goto retry;
 3875         }
 3876 
 3877         if (grow_down) {
 3878                 grow_start = gap_entry->end - grow_amount;
 3879                 if (gap_entry->start + grow_amount == gap_entry->end) {
 3880                         gap_start = gap_entry->start;
 3881                         gap_end = gap_entry->end;
 3882                         vm_map_entry_delete(map, gap_entry);
 3883                         gap_deleted = true;
 3884                 } else {
 3885                         MPASS(gap_entry->start < gap_entry->end - grow_amount);
 3886                         gap_entry->end -= grow_amount;
 3887                         vm_map_entry_resize_free(map, gap_entry);
 3888                         gap_deleted = false;
 3889                 }
 3890                 rv = vm_map_insert(map, NULL, 0, grow_start,
 3891                     grow_start + grow_amount,
 3892                     stack_entry->protection, stack_entry->max_protection,
 3893                     MAP_STACK_GROWS_DOWN);
 3894                 if (rv != KERN_SUCCESS) {
 3895                         if (gap_deleted) {
 3896                                 rv1 = vm_map_insert(map, NULL, 0, gap_start,
 3897                                     gap_end, VM_PROT_NONE, VM_PROT_NONE,
 3898                                     MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
 3899                                 MPASS(rv1 == KERN_SUCCESS);
 3900                         } else {
 3901                                 gap_entry->end += grow_amount;
 3902                                 vm_map_entry_resize_free(map, gap_entry);
 3903                         }
 3904                 }
 3905         } else {
 3906                 grow_start = stack_entry->end;
 3907                 cred = stack_entry->cred;
 3908                 if (cred == NULL && stack_entry->object.vm_object != NULL)
 3909                         cred = stack_entry->object.vm_object->cred;
 3910                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
 3911                         rv = KERN_NO_SPACE;
 3912                 /* Grow the underlying object if applicable. */
 3913                 else if (stack_entry->object.vm_object == NULL ||
 3914                     vm_object_coalesce(stack_entry->object.vm_object,
 3915                     stack_entry->offset,
 3916                     (vm_size_t)(stack_entry->end - stack_entry->start),
 3917                     (vm_size_t)grow_amount, cred != NULL)) {
 3918                         if (gap_entry->start + grow_amount == gap_entry->end)
 3919                                 vm_map_entry_delete(map, gap_entry);
 3920                         else
 3921                                 gap_entry->start += grow_amount;
 3922                         stack_entry->end += grow_amount;
 3923                         map->size += grow_amount;
 3924                         vm_map_entry_resize_free(map, stack_entry);
 3925                         rv = KERN_SUCCESS;
 3926                 } else
 3927                         rv = KERN_FAILURE;
 3928         }
 3929         if (rv == KERN_SUCCESS && is_procstack)
 3930                 vm->vm_ssize += btoc(grow_amount);
 3931 
 3932         /*
 3933          * Heed the MAP_WIREFUTURE flag if it was set for this process.
 3934          */
 3935         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
 3936                 vm_map_unlock(map);
 3937                 vm_map_wire(map, grow_start, grow_start + grow_amount,
 3938                     VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
 3939                 vm_map_lock_read(map);
 3940         } else
 3941                 vm_map_lock_downgrade(map);
 3942 
 3943 out:
 3944 #ifdef RACCT
 3945         if (racct_enable && rv != KERN_SUCCESS) {
 3946                 PROC_LOCK(p);
 3947                 error = racct_set(p, RACCT_VMEM, map->size);
 3948                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
 3949                 if (!old_mlock) {
 3950                         error = racct_set(p, RACCT_MEMLOCK,
 3951                             ptoa(pmap_wired_count(map->pmap)));
 3952                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
 3953                 }
 3954                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
 3955                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
 3956                 PROC_UNLOCK(p);
 3957         }
 3958 #endif
 3959 
 3960         return (rv);
 3961 }
 3962 
 3963 /*
 3964  * Unshare the specified VM space for exec.  If other processes are
 3965  * mapped to it, then create a new one.  The new vmspace is null.
 3966  */
 3967 int
 3968 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
 3969 {
 3970         struct vmspace *oldvmspace = p->p_vmspace;
 3971         struct vmspace *newvmspace;
 3972 
 3973         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
 3974             ("vmspace_exec recursed"));
 3975         newvmspace = vmspace_alloc(minuser, maxuser, NULL);
 3976         if (newvmspace == NULL)
 3977                 return (ENOMEM);
 3978         newvmspace->vm_swrss = oldvmspace->vm_swrss;
 3979         /*
 3980          * This code is written like this for prototype purposes.  The
 3981          * goal is to avoid running down the vmspace here, but let the
 3982          * other process's that are still using the vmspace to finally
 3983          * run it down.  Even though there is little or no chance of blocking
 3984          * here, it is a good idea to keep this form for future mods.
 3985          */
 3986         PROC_VMSPACE_LOCK(p);
 3987         p->p_vmspace = newvmspace;
 3988         PROC_VMSPACE_UNLOCK(p);
 3989         if (p == curthread->td_proc)
 3990                 pmap_activate(curthread);
 3991         curthread->td_pflags |= TDP_EXECVMSPC;
 3992         return (0);
 3993 }
 3994 
 3995 /*
 3996  * Unshare the specified VM space for forcing COW.  This
 3997  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
 3998  */
 3999 int
 4000 vmspace_unshare(struct proc *p)
 4001 {
 4002         struct vmspace *oldvmspace = p->p_vmspace;
 4003         struct vmspace *newvmspace;
 4004         vm_ooffset_t fork_charge;
 4005 
 4006         if (oldvmspace->vm_refcnt == 1)
 4007                 return (0);
 4008         fork_charge = 0;
 4009         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
 4010         if (newvmspace == NULL)
 4011                 return (ENOMEM);
 4012         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
 4013                 vmspace_free(newvmspace);
 4014                 return (ENOMEM);
 4015         }
 4016         PROC_VMSPACE_LOCK(p);
 4017         p->p_vmspace = newvmspace;
 4018         PROC_VMSPACE_UNLOCK(p);
 4019         if (p == curthread->td_proc)
 4020                 pmap_activate(curthread);
 4021         vmspace_free(oldvmspace);
 4022         return (0);
 4023 }
 4024 
 4025 /*
 4026  *      vm_map_lookup:
 4027  *
 4028  *      Finds the VM object, offset, and
 4029  *      protection for a given virtual address in the
 4030  *      specified map, assuming a page fault of the
 4031  *      type specified.
 4032  *
 4033  *      Leaves the map in question locked for read; return
 4034  *      values are guaranteed until a vm_map_lookup_done
 4035  *      call is performed.  Note that the map argument
 4036  *      is in/out; the returned map must be used in
 4037  *      the call to vm_map_lookup_done.
 4038  *
 4039  *      A handle (out_entry) is returned for use in
 4040  *      vm_map_lookup_done, to make that fast.
 4041  *
 4042  *      If a lookup is requested with "write protection"
 4043  *      specified, the map may be changed to perform virtual
 4044  *      copying operations, although the data referenced will
 4045  *      remain the same.
 4046  */
 4047 int
 4048 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
 4049               vm_offset_t vaddr,
 4050               vm_prot_t fault_typea,
 4051               vm_map_entry_t *out_entry,        /* OUT */
 4052               vm_object_t *object,              /* OUT */
 4053               vm_pindex_t *pindex,              /* OUT */
 4054               vm_prot_t *out_prot,              /* OUT */
 4055               boolean_t *wired)                 /* OUT */
 4056 {
 4057         vm_map_entry_t entry;
 4058         vm_map_t map = *var_map;
 4059         vm_prot_t prot;
 4060         vm_prot_t fault_type = fault_typea;
 4061         vm_object_t eobject;
 4062         vm_size_t size;
 4063         struct ucred *cred;
 4064 
 4065 RetryLookup:
 4066 
 4067         vm_map_lock_read(map);
 4068 
 4069 RetryLookupLocked:
 4070         /*
 4071          * Lookup the faulting address.
 4072          */
 4073         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
 4074                 vm_map_unlock_read(map);
 4075                 return (KERN_INVALID_ADDRESS);
 4076         }
 4077 
 4078         entry = *out_entry;
 4079 
 4080         /*
 4081          * Handle submaps.
 4082          */
 4083         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 4084                 vm_map_t old_map = map;
 4085 
 4086                 *var_map = map = entry->object.sub_map;
 4087                 vm_map_unlock_read(old_map);
 4088                 goto RetryLookup;
 4089         }
 4090 
 4091         /*
 4092          * Check whether this task is allowed to have this page.
 4093          */
 4094         prot = entry->protection;
 4095         if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
 4096                 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
 4097                 if (prot == VM_PROT_NONE && map != kernel_map &&
 4098                     (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
 4099                     (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
 4100                     MAP_ENTRY_STACK_GAP_UP)) != 0 &&
 4101                     vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
 4102                         goto RetryLookupLocked;
 4103         }
 4104         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
 4105         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
 4106                 vm_map_unlock_read(map);
 4107                 return (KERN_PROTECTION_FAILURE);
 4108         }
 4109         KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
 4110             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
 4111             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
 4112             ("entry %p flags %x", entry, entry->eflags));
 4113         if ((fault_typea & VM_PROT_COPY) != 0 &&
 4114             (entry->max_protection & VM_PROT_WRITE) == 0 &&
 4115             (entry->eflags & MAP_ENTRY_COW) == 0) {
 4116                 vm_map_unlock_read(map);
 4117                 return (KERN_PROTECTION_FAILURE);
 4118         }
 4119 
 4120         /*
 4121          * If this page is not pageable, we have to get it for all possible
 4122          * accesses.
 4123          */
 4124         *wired = (entry->wired_count != 0);
 4125         if (*wired)
 4126                 fault_type = entry->protection;
 4127         size = entry->end - entry->start;
 4128         /*
 4129          * If the entry was copy-on-write, we either ...
 4130          */
 4131         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 4132                 /*
 4133                  * If we want to write the page, we may as well handle that
 4134                  * now since we've got the map locked.
 4135                  *
 4136                  * If we don't need to write the page, we just demote the
 4137                  * permissions allowed.
 4138                  */
 4139                 if ((fault_type & VM_PROT_WRITE) != 0 ||
 4140                     (fault_typea & VM_PROT_COPY) != 0) {
 4141                         /*
 4142                          * Make a new object, and place it in the object
 4143                          * chain.  Note that no new references have appeared
 4144                          * -- one just moved from the map to the new
 4145                          * object.
 4146                          */
 4147                         if (vm_map_lock_upgrade(map))
 4148                                 goto RetryLookup;
 4149 
 4150                         if (entry->cred == NULL) {
 4151                                 /*
 4152                                  * The debugger owner is charged for
 4153                                  * the memory.
 4154                                  */
 4155                                 cred = curthread->td_ucred;
 4156                                 crhold(cred);
 4157                                 if (!swap_reserve_by_cred(size, cred)) {
 4158                                         crfree(cred);
 4159                                         vm_map_unlock(map);
 4160                                         return (KERN_RESOURCE_SHORTAGE);
 4161                                 }
 4162                                 entry->cred = cred;
 4163                         }
 4164                         vm_object_shadow(&entry->object.vm_object,
 4165                             &entry->offset, size);
 4166                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 4167                         eobject = entry->object.vm_object;
 4168                         if (eobject->cred != NULL) {
 4169                                 /*
 4170                                  * The object was not shadowed.
 4171                                  */
 4172                                 swap_release_by_cred(size, entry->cred);
 4173                                 crfree(entry->cred);
 4174                                 entry->cred = NULL;
 4175                         } else if (entry->cred != NULL) {
 4176                                 VM_OBJECT_WLOCK(eobject);
 4177                                 eobject->cred = entry->cred;
 4178                                 eobject->charge = size;
 4179                                 VM_OBJECT_WUNLOCK(eobject);
 4180                                 entry->cred = NULL;
 4181                         }
 4182 
 4183                         vm_map_lock_downgrade(map);
 4184                 } else {
 4185                         /*
 4186                          * We're attempting to read a copy-on-write page --
 4187                          * don't allow writes.
 4188                          */
 4189                         prot &= ~VM_PROT_WRITE;
 4190                 }
 4191         }
 4192 
 4193         /*
 4194          * Create an object if necessary.
 4195          */
 4196         if (entry->object.vm_object == NULL &&
 4197             !map->system_map) {
 4198                 if (vm_map_lock_upgrade(map))
 4199                         goto RetryLookup;
 4200                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
 4201                     atop(size));
 4202                 entry->offset = 0;
 4203                 if (entry->cred != NULL) {
 4204                         VM_OBJECT_WLOCK(entry->object.vm_object);
 4205                         entry->object.vm_object->cred = entry->cred;
 4206                         entry->object.vm_object->charge = size;
 4207                         VM_OBJECT_WUNLOCK(entry->object.vm_object);
 4208                         entry->cred = NULL;
 4209                 }
 4210                 vm_map_lock_downgrade(map);
 4211         }
 4212 
 4213         /*
 4214          * Return the object/offset from this entry.  If the entry was
 4215          * copy-on-write or empty, it has been fixed up.
 4216          */
 4217         *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset);
 4218         *object = entry->object.vm_object;
 4219 
 4220         *out_prot = prot;
 4221         return (KERN_SUCCESS);
 4222 }
 4223 
 4224 /*
 4225  *      vm_map_lookup_locked:
 4226  *
 4227  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
 4228  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
 4229  */
 4230 int
 4231 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
 4232                      vm_offset_t vaddr,
 4233                      vm_prot_t fault_typea,
 4234                      vm_map_entry_t *out_entry, /* OUT */
 4235                      vm_object_t *object,       /* OUT */
 4236                      vm_pindex_t *pindex,       /* OUT */
 4237                      vm_prot_t *out_prot,       /* OUT */
 4238                      boolean_t *wired)          /* OUT */
 4239 {
 4240         vm_map_entry_t entry;
 4241         vm_map_t map = *var_map;
 4242         vm_prot_t prot;
 4243         vm_prot_t fault_type = fault_typea;
 4244 
 4245         /*
 4246          * Lookup the faulting address.
 4247          */
 4248         if (!vm_map_lookup_entry(map, vaddr, out_entry))
 4249                 return (KERN_INVALID_ADDRESS);
 4250 
 4251         entry = *out_entry;
 4252 
 4253         /*
 4254          * Fail if the entry refers to a submap.
 4255          */
 4256         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 4257                 return (KERN_FAILURE);
 4258 
 4259         /*
 4260          * Check whether this task is allowed to have this page.
 4261          */
 4262         prot = entry->protection;
 4263         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
 4264         if ((fault_type & prot) != fault_type)
 4265                 return (KERN_PROTECTION_FAILURE);
 4266 
 4267         /*
 4268          * If this page is not pageable, we have to get it for all possible
 4269          * accesses.
 4270          */
 4271         *wired = (entry->wired_count != 0);
 4272         if (*wired)
 4273                 fault_type = entry->protection;
 4274 
 4275         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 4276                 /*
 4277                  * Fail if the entry was copy-on-write for a write fault.
 4278                  */
 4279                 if (fault_type & VM_PROT_WRITE)
 4280                         return (KERN_FAILURE);
 4281                 /*
 4282                  * We're attempting to read a copy-on-write page --
 4283                  * don't allow writes.
 4284                  */
 4285                 prot &= ~VM_PROT_WRITE;
 4286         }
 4287 
 4288         /*
 4289          * Fail if an object should be created.
 4290          */
 4291         if (entry->object.vm_object == NULL && !map->system_map)
 4292                 return (KERN_FAILURE);
 4293 
 4294         /*
 4295          * Return the object/offset from this entry.  If the entry was
 4296          * copy-on-write or empty, it has been fixed up.
 4297          */
 4298         *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset);
 4299         *object = entry->object.vm_object;
 4300 
 4301         *out_prot = prot;
 4302         return (KERN_SUCCESS);
 4303 }
 4304 
 4305 /*
 4306  *      vm_map_lookup_done:
 4307  *
 4308  *      Releases locks acquired by a vm_map_lookup
 4309  *      (according to the handle returned by that lookup).
 4310  */
 4311 void
 4312 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
 4313 {
 4314         /*
 4315          * Unlock the main-level map
 4316          */
 4317         vm_map_unlock_read(map);
 4318 }
 4319 
 4320 vm_offset_t
 4321 vm_map_max_KBI(const struct vm_map *map)
 4322 {
 4323 
 4324         return (vm_map_max(map));
 4325 }
 4326 
 4327 vm_offset_t
 4328 vm_map_min_KBI(const struct vm_map *map)
 4329 {
 4330 
 4331         return (vm_map_min(map));
 4332 }
 4333 
 4334 pmap_t
 4335 vm_map_pmap_KBI(vm_map_t map)
 4336 {
 4337 
 4338         return (map->pmap);
 4339 }
 4340 
 4341 #include "opt_ddb.h"
 4342 #ifdef DDB
 4343 #include <sys/kernel.h>
 4344 
 4345 #include <ddb/ddb.h>
 4346 
 4347 static void
 4348 vm_map_print(vm_map_t map)
 4349 {
 4350         vm_map_entry_t entry;
 4351 
 4352         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
 4353             (void *)map,
 4354             (void *)map->pmap, map->nentries, map->timestamp);
 4355 
 4356         db_indent += 2;
 4357         for (entry = map->header.next; entry != &map->header;
 4358             entry = entry->next) {
 4359                 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
 4360                     (void *)entry, (void *)entry->start, (void *)entry->end,
 4361                     entry->eflags);
 4362                 {
 4363                         static char *inheritance_name[4] =
 4364                         {"share", "copy", "none", "donate_copy"};
 4365 
 4366                         db_iprintf(" prot=%x/%x/%s",
 4367                             entry->protection,
 4368                             entry->max_protection,
 4369                             inheritance_name[(int)(unsigned char)entry->inheritance]);
 4370                         if (entry->wired_count != 0)
 4371                                 db_printf(", wired");
 4372                 }
 4373                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 4374                         db_printf(", share=%p, offset=0x%jx\n",
 4375                             (void *)entry->object.sub_map,
 4376                             (uintmax_t)entry->offset);
 4377                         if ((entry->prev == &map->header) ||
 4378                             (entry->prev->object.sub_map !=
 4379                                 entry->object.sub_map)) {
 4380                                 db_indent += 2;
 4381                                 vm_map_print((vm_map_t)entry->object.sub_map);
 4382                                 db_indent -= 2;
 4383                         }
 4384                 } else {
 4385                         if (entry->cred != NULL)
 4386                                 db_printf(", ruid %d", entry->cred->cr_ruid);
 4387                         db_printf(", object=%p, offset=0x%jx",
 4388                             (void *)entry->object.vm_object,
 4389                             (uintmax_t)entry->offset);
 4390                         if (entry->object.vm_object && entry->object.vm_object->cred)
 4391                                 db_printf(", obj ruid %d charge %jx",
 4392                                     entry->object.vm_object->cred->cr_ruid,
 4393                                     (uintmax_t)entry->object.vm_object->charge);
 4394                         if (entry->eflags & MAP_ENTRY_COW)
 4395                                 db_printf(", copy (%s)",
 4396                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
 4397                         db_printf("\n");
 4398 
 4399                         if ((entry->prev == &map->header) ||
 4400                             (entry->prev->object.vm_object !=
 4401                                 entry->object.vm_object)) {
 4402                                 db_indent += 2;
 4403                                 vm_object_print((db_expr_t)(intptr_t)
 4404                                                 entry->object.vm_object,
 4405                                                 0, 0, (char *)0);
 4406                                 db_indent -= 2;
 4407                         }
 4408                 }
 4409         }
 4410         db_indent -= 2;
 4411 }
 4412 
 4413 DB_SHOW_COMMAND(map, map)
 4414 {
 4415 
 4416         if (!have_addr) {
 4417                 db_printf("usage: show map <addr>\n");
 4418                 return;
 4419         }
 4420         vm_map_print((vm_map_t)addr);
 4421 }
 4422 
 4423 DB_SHOW_COMMAND(procvm, procvm)
 4424 {
 4425         struct proc *p;
 4426 
 4427         if (have_addr) {
 4428                 p = db_lookup_proc(addr);
 4429         } else {
 4430                 p = curproc;
 4431         }
 4432 
 4433         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
 4434             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
 4435             (void *)vmspace_pmap(p->p_vmspace));
 4436 
 4437         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
 4438 }
 4439 
 4440 #endif /* DDB */

Cache object: 69c846a9afc55e3c1f5f774714ee7bbd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.