The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
    3  *
    4  * Copyright (c) 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  *
    7  * This code is derived from software contributed to Berkeley by
    8  * The Mach Operating System project at Carnegie-Mellon University.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
   35  *
   36  *
   37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   38  * All rights reserved.
   39  *
   40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   41  *
   42  * Permission to use, copy, modify and distribute this software and
   43  * its documentation is hereby granted, provided that both the copyright
   44  * notice and this permission notice appear in all copies of the
   45  * software, derivative works or modified versions, and any portions
   46  * thereof, and that both notices appear in supporting documentation.
   47  *
   48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   51  *
   52  * Carnegie Mellon requests users of this software to return to
   53  *
   54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   55  *  School of Computer Science
   56  *  Carnegie Mellon University
   57  *  Pittsburgh PA 15213-3890
   58  *
   59  * any improvements or extensions that they make and grant Carnegie the
   60  * rights to redistribute these changes.
   61  */
   62 
   63 /*
   64  *      Virtual memory mapping module.
   65  */
   66 
   67 #include <sys/cdefs.h>
   68 __FBSDID("$FreeBSD: releng/12.0/sys/vm/vm_map.c 338370 2018-08-29 12:24:19Z kib $");
   69 
   70 #include <sys/param.h>
   71 #include <sys/systm.h>
   72 #include <sys/kernel.h>
   73 #include <sys/ktr.h>
   74 #include <sys/lock.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/vmmeter.h>
   78 #include <sys/mman.h>
   79 #include <sys/vnode.h>
   80 #include <sys/racct.h>
   81 #include <sys/resourcevar.h>
   82 #include <sys/rwlock.h>
   83 #include <sys/file.h>
   84 #include <sys/sysctl.h>
   85 #include <sys/sysent.h>
   86 #include <sys/shm.h>
   87 
   88 #include <vm/vm.h>
   89 #include <vm/vm_param.h>
   90 #include <vm/pmap.h>
   91 #include <vm/vm_map.h>
   92 #include <vm/vm_page.h>
   93 #include <vm/vm_object.h>
   94 #include <vm/vm_pager.h>
   95 #include <vm/vm_kern.h>
   96 #include <vm/vm_extern.h>
   97 #include <vm/vnode_pager.h>
   98 #include <vm/swap_pager.h>
   99 #include <vm/uma.h>
  100 
  101 /*
  102  *      Virtual memory maps provide for the mapping, protection,
  103  *      and sharing of virtual memory objects.  In addition,
  104  *      this module provides for an efficient virtual copy of
  105  *      memory from one map to another.
  106  *
  107  *      Synchronization is required prior to most operations.
  108  *
  109  *      Maps consist of an ordered doubly-linked list of simple
  110  *      entries; a self-adjusting binary search tree of these
  111  *      entries is used to speed up lookups.
  112  *
  113  *      Since portions of maps are specified by start/end addresses,
  114  *      which may not align with existing map entries, all
  115  *      routines merely "clip" entries to these start/end values.
  116  *      [That is, an entry is split into two, bordering at a
  117  *      start or end value.]  Note that these clippings may not
  118  *      always be necessary (as the two resulting entries are then
  119  *      not changed); however, the clipping is done for convenience.
  120  *
  121  *      As mentioned above, virtual copy operations are performed
  122  *      by copying VM object references from one map to
  123  *      another, and then marking both regions as copy-on-write.
  124  */
  125 
  126 static struct mtx map_sleep_mtx;
  127 static uma_zone_t mapentzone;
  128 static uma_zone_t kmapentzone;
  129 static uma_zone_t mapzone;
  130 static uma_zone_t vmspace_zone;
  131 static int vmspace_zinit(void *mem, int size, int flags);
  132 static int vm_map_zinit(void *mem, int ize, int flags);
  133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
  134     vm_offset_t max);
  135 static int vm_map_alignspace(vm_map_t map, vm_object_t object,
  136     vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length,
  137     vm_offset_t max_addr, vm_offset_t alignment);
  138 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
  139 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
  140 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
  141 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
  142     vm_map_entry_t gap_entry);
  143 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
  144     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
  145 #ifdef INVARIANTS
  146 static void vm_map_zdtor(void *mem, int size, void *arg);
  147 static void vmspace_zdtor(void *mem, int size, void *arg);
  148 #endif
  149 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
  150     vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
  151     int cow);
  152 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
  153     vm_offset_t failed_addr);
  154 
  155 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
  156     ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
  157      !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
  158 
  159 /* 
  160  * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
  161  * stable.
  162  */
  163 #define PROC_VMSPACE_LOCK(p) do { } while (0)
  164 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
  165 
  166 /*
  167  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
  168  *
  169  *      Asserts that the starting and ending region
  170  *      addresses fall within the valid range of the map.
  171  */
  172 #define VM_MAP_RANGE_CHECK(map, start, end)             \
  173                 {                                       \
  174                 if (start < vm_map_min(map))            \
  175                         start = vm_map_min(map);        \
  176                 if (end > vm_map_max(map))              \
  177                         end = vm_map_max(map);          \
  178                 if (start > end)                        \
  179                         start = end;                    \
  180                 }
  181 
  182 /*
  183  *      vm_map_startup:
  184  *
  185  *      Initialize the vm_map module.  Must be called before
  186  *      any other vm_map routines.
  187  *
  188  *      Map and entry structures are allocated from the general
  189  *      purpose memory pool with some exceptions:
  190  *
  191  *      - The kernel map and kmem submap are allocated statically.
  192  *      - Kernel map entries are allocated out of a static pool.
  193  *
  194  *      These restrictions are necessary since malloc() uses the
  195  *      maps and requires map entries.
  196  */
  197 
  198 void
  199 vm_map_startup(void)
  200 {
  201         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
  202         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
  203 #ifdef INVARIANTS
  204             vm_map_zdtor,
  205 #else
  206             NULL,
  207 #endif
  208             vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  209         uma_prealloc(mapzone, MAX_KMAP);
  210         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
  211             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  212             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
  213         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
  214             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  215         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
  216 #ifdef INVARIANTS
  217             vmspace_zdtor,
  218 #else
  219             NULL,
  220 #endif
  221             vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  222 }
  223 
  224 static int
  225 vmspace_zinit(void *mem, int size, int flags)
  226 {
  227         struct vmspace *vm;
  228 
  229         vm = (struct vmspace *)mem;
  230 
  231         vm->vm_map.pmap = NULL;
  232         (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
  233         PMAP_LOCK_INIT(vmspace_pmap(vm));
  234         return (0);
  235 }
  236 
  237 static int
  238 vm_map_zinit(void *mem, int size, int flags)
  239 {
  240         vm_map_t map;
  241 
  242         map = (vm_map_t)mem;
  243         memset(map, 0, sizeof(*map));
  244         mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
  245         sx_init(&map->lock, "vm map (user)");
  246         return (0);
  247 }
  248 
  249 #ifdef INVARIANTS
  250 static void
  251 vmspace_zdtor(void *mem, int size, void *arg)
  252 {
  253         struct vmspace *vm;
  254 
  255         vm = (struct vmspace *)mem;
  256 
  257         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
  258 }
  259 static void
  260 vm_map_zdtor(void *mem, int size, void *arg)
  261 {
  262         vm_map_t map;
  263 
  264         map = (vm_map_t)mem;
  265         KASSERT(map->nentries == 0,
  266             ("map %p nentries == %d on free.",
  267             map, map->nentries));
  268         KASSERT(map->size == 0,
  269             ("map %p size == %lu on free.",
  270             map, (unsigned long)map->size));
  271 }
  272 #endif  /* INVARIANTS */
  273 
  274 /*
  275  * Allocate a vmspace structure, including a vm_map and pmap,
  276  * and initialize those structures.  The refcnt is set to 1.
  277  *
  278  * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
  279  */
  280 struct vmspace *
  281 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
  282 {
  283         struct vmspace *vm;
  284 
  285         vm = uma_zalloc(vmspace_zone, M_WAITOK);
  286 
  287         KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
  288 
  289         if (pinit == NULL)
  290                 pinit = &pmap_pinit;
  291 
  292         if (!pinit(vmspace_pmap(vm))) {
  293                 uma_zfree(vmspace_zone, vm);
  294                 return (NULL);
  295         }
  296         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
  297         _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
  298         vm->vm_refcnt = 1;
  299         vm->vm_shm = NULL;
  300         vm->vm_swrss = 0;
  301         vm->vm_tsize = 0;
  302         vm->vm_dsize = 0;
  303         vm->vm_ssize = 0;
  304         vm->vm_taddr = 0;
  305         vm->vm_daddr = 0;
  306         vm->vm_maxsaddr = 0;
  307         return (vm);
  308 }
  309 
  310 #ifdef RACCT
  311 static void
  312 vmspace_container_reset(struct proc *p)
  313 {
  314 
  315         PROC_LOCK(p);
  316         racct_set(p, RACCT_DATA, 0);
  317         racct_set(p, RACCT_STACK, 0);
  318         racct_set(p, RACCT_RSS, 0);
  319         racct_set(p, RACCT_MEMLOCK, 0);
  320         racct_set(p, RACCT_VMEM, 0);
  321         PROC_UNLOCK(p);
  322 }
  323 #endif
  324 
  325 static inline void
  326 vmspace_dofree(struct vmspace *vm)
  327 {
  328 
  329         CTR1(KTR_VM, "vmspace_free: %p", vm);
  330 
  331         /*
  332          * Make sure any SysV shm is freed, it might not have been in
  333          * exit1().
  334          */
  335         shmexit(vm);
  336 
  337         /*
  338          * Lock the map, to wait out all other references to it.
  339          * Delete all of the mappings and pages they hold, then call
  340          * the pmap module to reclaim anything left.
  341          */
  342         (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
  343             vm_map_max(&vm->vm_map));
  344 
  345         pmap_release(vmspace_pmap(vm));
  346         vm->vm_map.pmap = NULL;
  347         uma_zfree(vmspace_zone, vm);
  348 }
  349 
  350 void
  351 vmspace_free(struct vmspace *vm)
  352 {
  353 
  354         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
  355             "vmspace_free() called");
  356 
  357         if (vm->vm_refcnt == 0)
  358                 panic("vmspace_free: attempt to free already freed vmspace");
  359 
  360         if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
  361                 vmspace_dofree(vm);
  362 }
  363 
  364 void
  365 vmspace_exitfree(struct proc *p)
  366 {
  367         struct vmspace *vm;
  368 
  369         PROC_VMSPACE_LOCK(p);
  370         vm = p->p_vmspace;
  371         p->p_vmspace = NULL;
  372         PROC_VMSPACE_UNLOCK(p);
  373         KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
  374         vmspace_free(vm);
  375 }
  376 
  377 void
  378 vmspace_exit(struct thread *td)
  379 {
  380         int refcnt;
  381         struct vmspace *vm;
  382         struct proc *p;
  383 
  384         /*
  385          * Release user portion of address space.
  386          * This releases references to vnodes,
  387          * which could cause I/O if the file has been unlinked.
  388          * Need to do this early enough that we can still sleep.
  389          *
  390          * The last exiting process to reach this point releases as
  391          * much of the environment as it can. vmspace_dofree() is the
  392          * slower fallback in case another process had a temporary
  393          * reference to the vmspace.
  394          */
  395 
  396         p = td->td_proc;
  397         vm = p->p_vmspace;
  398         atomic_add_int(&vmspace0.vm_refcnt, 1);
  399         do {
  400                 refcnt = vm->vm_refcnt;
  401                 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
  402                         /* Switch now since other proc might free vmspace */
  403                         PROC_VMSPACE_LOCK(p);
  404                         p->p_vmspace = &vmspace0;
  405                         PROC_VMSPACE_UNLOCK(p);
  406                         pmap_activate(td);
  407                 }
  408         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
  409         if (refcnt == 1) {
  410                 if (p->p_vmspace != vm) {
  411                         /* vmspace not yet freed, switch back */
  412                         PROC_VMSPACE_LOCK(p);
  413                         p->p_vmspace = vm;
  414                         PROC_VMSPACE_UNLOCK(p);
  415                         pmap_activate(td);
  416                 }
  417                 pmap_remove_pages(vmspace_pmap(vm));
  418                 /* Switch now since this proc will free vmspace */
  419                 PROC_VMSPACE_LOCK(p);
  420                 p->p_vmspace = &vmspace0;
  421                 PROC_VMSPACE_UNLOCK(p);
  422                 pmap_activate(td);
  423                 vmspace_dofree(vm);
  424         }
  425 #ifdef RACCT
  426         if (racct_enable)
  427                 vmspace_container_reset(p);
  428 #endif
  429 }
  430 
  431 /* Acquire reference to vmspace owned by another process. */
  432 
  433 struct vmspace *
  434 vmspace_acquire_ref(struct proc *p)
  435 {
  436         struct vmspace *vm;
  437         int refcnt;
  438 
  439         PROC_VMSPACE_LOCK(p);
  440         vm = p->p_vmspace;
  441         if (vm == NULL) {
  442                 PROC_VMSPACE_UNLOCK(p);
  443                 return (NULL);
  444         }
  445         do {
  446                 refcnt = vm->vm_refcnt;
  447                 if (refcnt <= 0) {      /* Avoid 0->1 transition */
  448                         PROC_VMSPACE_UNLOCK(p);
  449                         return (NULL);
  450                 }
  451         } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
  452         if (vm != p->p_vmspace) {
  453                 PROC_VMSPACE_UNLOCK(p);
  454                 vmspace_free(vm);
  455                 return (NULL);
  456         }
  457         PROC_VMSPACE_UNLOCK(p);
  458         return (vm);
  459 }
  460 
  461 /*
  462  * Switch between vmspaces in an AIO kernel process.
  463  *
  464  * The AIO kernel processes switch to and from a user process's
  465  * vmspace while performing an I/O operation on behalf of a user
  466  * process.  The new vmspace is either the vmspace of a user process
  467  * obtained from an active AIO request or the initial vmspace of the
  468  * AIO kernel process (when it is idling).  Because user processes
  469  * will block to drain any active AIO requests before proceeding in
  470  * exit() or execve(), the vmspace reference count for these vmspaces
  471  * can never be 0.  This allows for a much simpler implementation than
  472  * the loop in vmspace_acquire_ref() above.  Similarly, AIO kernel
  473  * processes hold an extra reference on their initial vmspace for the
  474  * life of the process so that this guarantee is true for any vmspace
  475  * passed as 'newvm'.
  476  */
  477 void
  478 vmspace_switch_aio(struct vmspace *newvm)
  479 {
  480         struct vmspace *oldvm;
  481 
  482         /* XXX: Need some way to assert that this is an aio daemon. */
  483 
  484         KASSERT(newvm->vm_refcnt > 0,
  485             ("vmspace_switch_aio: newvm unreferenced"));
  486 
  487         oldvm = curproc->p_vmspace;
  488         if (oldvm == newvm)
  489                 return;
  490 
  491         /*
  492          * Point to the new address space and refer to it.
  493          */
  494         curproc->p_vmspace = newvm;
  495         atomic_add_int(&newvm->vm_refcnt, 1);
  496 
  497         /* Activate the new mapping. */
  498         pmap_activate(curthread);
  499 
  500         /* Remove the daemon's reference to the old address space. */
  501         KASSERT(oldvm->vm_refcnt > 1,
  502             ("vmspace_switch_aio: oldvm dropping last reference"));
  503         vmspace_free(oldvm);
  504 }
  505 
  506 void
  507 _vm_map_lock(vm_map_t map, const char *file, int line)
  508 {
  509 
  510         if (map->system_map)
  511                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
  512         else
  513                 sx_xlock_(&map->lock, file, line);
  514         map->timestamp++;
  515 }
  516 
  517 static void
  518 vm_map_process_deferred(void)
  519 {
  520         struct thread *td;
  521         vm_map_entry_t entry, next;
  522         vm_object_t object;
  523 
  524         td = curthread;
  525         entry = td->td_map_def_user;
  526         td->td_map_def_user = NULL;
  527         while (entry != NULL) {
  528                 next = entry->next;
  529                 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
  530                         /*
  531                          * Decrement the object's writemappings and
  532                          * possibly the vnode's v_writecount.
  533                          */
  534                         KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
  535                             ("Submap with writecount"));
  536                         object = entry->object.vm_object;
  537                         KASSERT(object != NULL, ("No object for writecount"));
  538                         vnode_pager_release_writecount(object, entry->start,
  539                             entry->end);
  540                 }
  541                 vm_map_entry_deallocate(entry, FALSE);
  542                 entry = next;
  543         }
  544 }
  545 
  546 void
  547 _vm_map_unlock(vm_map_t map, const char *file, int line)
  548 {
  549 
  550         if (map->system_map)
  551                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
  552         else {
  553                 sx_xunlock_(&map->lock, file, line);
  554                 vm_map_process_deferred();
  555         }
  556 }
  557 
  558 void
  559 _vm_map_lock_read(vm_map_t map, const char *file, int line)
  560 {
  561 
  562         if (map->system_map)
  563                 mtx_lock_flags_(&map->system_mtx, 0, file, line);
  564         else
  565                 sx_slock_(&map->lock, file, line);
  566 }
  567 
  568 void
  569 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
  570 {
  571 
  572         if (map->system_map)
  573                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
  574         else {
  575                 sx_sunlock_(&map->lock, file, line);
  576                 vm_map_process_deferred();
  577         }
  578 }
  579 
  580 int
  581 _vm_map_trylock(vm_map_t map, const char *file, int line)
  582 {
  583         int error;
  584 
  585         error = map->system_map ?
  586             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
  587             !sx_try_xlock_(&map->lock, file, line);
  588         if (error == 0)
  589                 map->timestamp++;
  590         return (error == 0);
  591 }
  592 
  593 int
  594 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
  595 {
  596         int error;
  597 
  598         error = map->system_map ?
  599             !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
  600             !sx_try_slock_(&map->lock, file, line);
  601         return (error == 0);
  602 }
  603 
  604 /*
  605  *      _vm_map_lock_upgrade:   [ internal use only ]
  606  *
  607  *      Tries to upgrade a read (shared) lock on the specified map to a write
  608  *      (exclusive) lock.  Returns the value "" if the upgrade succeeds and a
  609  *      non-zero value if the upgrade fails.  If the upgrade fails, the map is
  610  *      returned without a read or write lock held.
  611  *
  612  *      Requires that the map be read locked.
  613  */
  614 int
  615 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
  616 {
  617         unsigned int last_timestamp;
  618 
  619         if (map->system_map) {
  620                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
  621         } else {
  622                 if (!sx_try_upgrade_(&map->lock, file, line)) {
  623                         last_timestamp = map->timestamp;
  624                         sx_sunlock_(&map->lock, file, line);
  625                         vm_map_process_deferred();
  626                         /*
  627                          * If the map's timestamp does not change while the
  628                          * map is unlocked, then the upgrade succeeds.
  629                          */
  630                         sx_xlock_(&map->lock, file, line);
  631                         if (last_timestamp != map->timestamp) {
  632                                 sx_xunlock_(&map->lock, file, line);
  633                                 return (1);
  634                         }
  635                 }
  636         }
  637         map->timestamp++;
  638         return (0);
  639 }
  640 
  641 void
  642 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
  643 {
  644 
  645         if (map->system_map) {
  646                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
  647         } else
  648                 sx_downgrade_(&map->lock, file, line);
  649 }
  650 
  651 /*
  652  *      vm_map_locked:
  653  *
  654  *      Returns a non-zero value if the caller holds a write (exclusive) lock
  655  *      on the specified map and the value "" otherwise.
  656  */
  657 int
  658 vm_map_locked(vm_map_t map)
  659 {
  660 
  661         if (map->system_map)
  662                 return (mtx_owned(&map->system_mtx));
  663         else
  664                 return (sx_xlocked(&map->lock));
  665 }
  666 
  667 #ifdef INVARIANTS
  668 static void
  669 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
  670 {
  671 
  672         if (map->system_map)
  673                 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
  674         else
  675                 sx_assert_(&map->lock, SA_XLOCKED, file, line);
  676 }
  677 
  678 #define VM_MAP_ASSERT_LOCKED(map) \
  679     _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
  680 #else
  681 #define VM_MAP_ASSERT_LOCKED(map)
  682 #endif
  683 
  684 /*
  685  *      _vm_map_unlock_and_wait:
  686  *
  687  *      Atomically releases the lock on the specified map and puts the calling
  688  *      thread to sleep.  The calling thread will remain asleep until either
  689  *      vm_map_wakeup() is performed on the map or the specified timeout is
  690  *      exceeded.
  691  *
  692  *      WARNING!  This function does not perform deferred deallocations of
  693  *      objects and map entries.  Therefore, the calling thread is expected to
  694  *      reacquire the map lock after reawakening and later perform an ordinary
  695  *      unlock operation, such as vm_map_unlock(), before completing its
  696  *      operation on the map.
  697  */
  698 int
  699 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
  700 {
  701 
  702         mtx_lock(&map_sleep_mtx);
  703         if (map->system_map)
  704                 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
  705         else
  706                 sx_xunlock_(&map->lock, file, line);
  707         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
  708             timo));
  709 }
  710 
  711 /*
  712  *      vm_map_wakeup:
  713  *
  714  *      Awaken any threads that have slept on the map using
  715  *      vm_map_unlock_and_wait().
  716  */
  717 void
  718 vm_map_wakeup(vm_map_t map)
  719 {
  720 
  721         /*
  722          * Acquire and release map_sleep_mtx to prevent a wakeup()
  723          * from being performed (and lost) between the map unlock
  724          * and the msleep() in _vm_map_unlock_and_wait().
  725          */
  726         mtx_lock(&map_sleep_mtx);
  727         mtx_unlock(&map_sleep_mtx);
  728         wakeup(&map->root);
  729 }
  730 
  731 void
  732 vm_map_busy(vm_map_t map)
  733 {
  734 
  735         VM_MAP_ASSERT_LOCKED(map);
  736         map->busy++;
  737 }
  738 
  739 void
  740 vm_map_unbusy(vm_map_t map)
  741 {
  742 
  743         VM_MAP_ASSERT_LOCKED(map);
  744         KASSERT(map->busy, ("vm_map_unbusy: not busy"));
  745         if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
  746                 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
  747                 wakeup(&map->busy);
  748         }
  749 }
  750 
  751 void 
  752 vm_map_wait_busy(vm_map_t map)
  753 {
  754 
  755         VM_MAP_ASSERT_LOCKED(map);
  756         while (map->busy) {
  757                 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
  758                 if (map->system_map)
  759                         msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
  760                 else
  761                         sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
  762         }
  763         map->timestamp++;
  764 }
  765 
  766 long
  767 vmspace_resident_count(struct vmspace *vmspace)
  768 {
  769         return pmap_resident_count(vmspace_pmap(vmspace));
  770 }
  771 
  772 /*
  773  *      vm_map_create:
  774  *
  775  *      Creates and returns a new empty VM map with
  776  *      the given physical map structure, and having
  777  *      the given lower and upper address bounds.
  778  */
  779 vm_map_t
  780 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
  781 {
  782         vm_map_t result;
  783 
  784         result = uma_zalloc(mapzone, M_WAITOK);
  785         CTR1(KTR_VM, "vm_map_create: %p", result);
  786         _vm_map_init(result, pmap, min, max);
  787         return (result);
  788 }
  789 
  790 /*
  791  * Initialize an existing vm_map structure
  792  * such as that in the vmspace structure.
  793  */
  794 static void
  795 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
  796 {
  797 
  798         map->header.next = map->header.prev = &map->header;
  799         map->needs_wakeup = FALSE;
  800         map->system_map = 0;
  801         map->pmap = pmap;
  802         map->header.end = min;
  803         map->header.start = max;
  804         map->flags = 0;
  805         map->root = NULL;
  806         map->timestamp = 0;
  807         map->busy = 0;
  808 }
  809 
  810 void
  811 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
  812 {
  813 
  814         _vm_map_init(map, pmap, min, max);
  815         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
  816         sx_init(&map->lock, "user map");
  817 }
  818 
  819 /*
  820  *      vm_map_entry_dispose:   [ internal use only ]
  821  *
  822  *      Inverse of vm_map_entry_create.
  823  */
  824 static void
  825 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
  826 {
  827         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
  828 }
  829 
  830 /*
  831  *      vm_map_entry_create:    [ internal use only ]
  832  *
  833  *      Allocates a VM map entry for insertion.
  834  *      No entry fields are filled in.
  835  */
  836 static vm_map_entry_t
  837 vm_map_entry_create(vm_map_t map)
  838 {
  839         vm_map_entry_t new_entry;
  840 
  841         if (map->system_map)
  842                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
  843         else
  844                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
  845         if (new_entry == NULL)
  846                 panic("vm_map_entry_create: kernel resources exhausted");
  847         return (new_entry);
  848 }
  849 
  850 /*
  851  *      vm_map_entry_set_behavior:
  852  *
  853  *      Set the expected access behavior, either normal, random, or
  854  *      sequential.
  855  */
  856 static inline void
  857 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
  858 {
  859         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
  860             (behavior & MAP_ENTRY_BEHAV_MASK);
  861 }
  862 
  863 /*
  864  *      vm_map_entry_set_max_free:
  865  *
  866  *      Set the max_free field in a vm_map_entry.
  867  */
  868 static inline void
  869 vm_map_entry_set_max_free(vm_map_entry_t entry)
  870 {
  871 
  872         entry->max_free = entry->adj_free;
  873         if (entry->left != NULL && entry->left->max_free > entry->max_free)
  874                 entry->max_free = entry->left->max_free;
  875         if (entry->right != NULL && entry->right->max_free > entry->max_free)
  876                 entry->max_free = entry->right->max_free;
  877 }
  878 
  879 /*
  880  *      vm_map_entry_splay:
  881  *
  882  *      The Sleator and Tarjan top-down splay algorithm with the
  883  *      following variation.  Max_free must be computed bottom-up, so
  884  *      on the downward pass, maintain the left and right spines in
  885  *      reverse order.  Then, make a second pass up each side to fix
  886  *      the pointers and compute max_free.  The time bound is O(log n)
  887  *      amortized.
  888  *
  889  *      The new root is the vm_map_entry containing "addr", or else an
  890  *      adjacent entry (lower or higher) if addr is not in the tree.
  891  *
  892  *      The map must be locked, and leaves it so.
  893  *
  894  *      Returns: the new root.
  895  */
  896 static vm_map_entry_t
  897 vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
  898 {
  899         vm_map_entry_t llist, rlist;
  900         vm_map_entry_t ltree, rtree;
  901         vm_map_entry_t y;
  902 
  903         /* Special case of empty tree. */
  904         if (root == NULL)
  905                 return (root);
  906 
  907         /*
  908          * Pass One: Splay down the tree until we find addr or a NULL
  909          * pointer where addr would go.  llist and rlist are the two
  910          * sides in reverse order (bottom-up), with llist linked by
  911          * the right pointer and rlist linked by the left pointer in
  912          * the vm_map_entry.  Wait until Pass Two to set max_free on
  913          * the two spines.
  914          */
  915         llist = NULL;
  916         rlist = NULL;
  917         for (;;) {
  918                 /* root is never NULL in here. */
  919                 if (addr < root->start) {
  920                         y = root->left;
  921                         if (y == NULL)
  922                                 break;
  923                         if (addr < y->start && y->left != NULL) {
  924                                 /* Rotate right and put y on rlist. */
  925                                 root->left = y->right;
  926                                 y->right = root;
  927                                 vm_map_entry_set_max_free(root);
  928                                 root = y->left;
  929                                 y->left = rlist;
  930                                 rlist = y;
  931                         } else {
  932                                 /* Put root on rlist. */
  933                                 root->left = rlist;
  934                                 rlist = root;
  935                                 root = y;
  936                         }
  937                 } else if (addr >= root->end) {
  938                         y = root->right;
  939                         if (y == NULL)
  940                                 break;
  941                         if (addr >= y->end && y->right != NULL) {
  942                                 /* Rotate left and put y on llist. */
  943                                 root->right = y->left;
  944                                 y->left = root;
  945                                 vm_map_entry_set_max_free(root);
  946                                 root = y->right;
  947                                 y->right = llist;
  948                                 llist = y;
  949                         } else {
  950                                 /* Put root on llist. */
  951                                 root->right = llist;
  952                                 llist = root;
  953                                 root = y;
  954                         }
  955                 } else
  956                         break;
  957         }
  958 
  959         /*
  960          * Pass Two: Walk back up the two spines, flip the pointers
  961          * and set max_free.  The subtrees of the root go at the
  962          * bottom of llist and rlist.
  963          */
  964         ltree = root->left;
  965         while (llist != NULL) {
  966                 y = llist->right;
  967                 llist->right = ltree;
  968                 vm_map_entry_set_max_free(llist);
  969                 ltree = llist;
  970                 llist = y;
  971         }
  972         rtree = root->right;
  973         while (rlist != NULL) {
  974                 y = rlist->left;
  975                 rlist->left = rtree;
  976                 vm_map_entry_set_max_free(rlist);
  977                 rtree = rlist;
  978                 rlist = y;
  979         }
  980 
  981         /*
  982          * Final assembly: add ltree and rtree as subtrees of root.
  983          */
  984         root->left = ltree;
  985         root->right = rtree;
  986         vm_map_entry_set_max_free(root);
  987 
  988         return (root);
  989 }
  990 
  991 /*
  992  *      vm_map_entry_{un,}link:
  993  *
  994  *      Insert/remove entries from maps.
  995  */
  996 static void
  997 vm_map_entry_link(vm_map_t map,
  998                   vm_map_entry_t after_where,
  999                   vm_map_entry_t entry)
 1000 {
 1001 
 1002         CTR4(KTR_VM,
 1003             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
 1004             map->nentries, entry, after_where);
 1005         VM_MAP_ASSERT_LOCKED(map);
 1006         KASSERT(after_where->end <= entry->start,
 1007             ("vm_map_entry_link: prev end %jx new start %jx overlap",
 1008             (uintmax_t)after_where->end, (uintmax_t)entry->start));
 1009         KASSERT(entry->end <= after_where->next->start,
 1010             ("vm_map_entry_link: new end %jx next start %jx overlap",
 1011             (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
 1012 
 1013         map->nentries++;
 1014         entry->prev = after_where;
 1015         entry->next = after_where->next;
 1016         entry->next->prev = entry;
 1017         after_where->next = entry;
 1018 
 1019         if (after_where != &map->header) {
 1020                 if (after_where != map->root)
 1021                         vm_map_entry_splay(after_where->start, map->root);
 1022                 entry->right = after_where->right;
 1023                 entry->left = after_where;
 1024                 after_where->right = NULL;
 1025                 after_where->adj_free = entry->start - after_where->end;
 1026                 vm_map_entry_set_max_free(after_where);
 1027         } else {
 1028                 entry->right = map->root;
 1029                 entry->left = NULL;
 1030         }
 1031         entry->adj_free = entry->next->start - entry->end;
 1032         vm_map_entry_set_max_free(entry);
 1033         map->root = entry;
 1034 }
 1035 
 1036 static void
 1037 vm_map_entry_unlink(vm_map_t map,
 1038                     vm_map_entry_t entry)
 1039 {
 1040         vm_map_entry_t next, prev, root;
 1041 
 1042         VM_MAP_ASSERT_LOCKED(map);
 1043         if (entry != map->root)
 1044                 vm_map_entry_splay(entry->start, map->root);
 1045         if (entry->left == NULL)
 1046                 root = entry->right;
 1047         else {
 1048                 root = vm_map_entry_splay(entry->start, entry->left);
 1049                 root->right = entry->right;
 1050                 root->adj_free = entry->next->start - root->end;
 1051                 vm_map_entry_set_max_free(root);
 1052         }
 1053         map->root = root;
 1054 
 1055         prev = entry->prev;
 1056         next = entry->next;
 1057         next->prev = prev;
 1058         prev->next = next;
 1059         map->nentries--;
 1060         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
 1061             map->nentries, entry);
 1062 }
 1063 
 1064 /*
 1065  *      vm_map_entry_resize_free:
 1066  *
 1067  *      Recompute the amount of free space following a vm_map_entry
 1068  *      and propagate that value up the tree.  Call this function after
 1069  *      resizing a map entry in-place, that is, without a call to
 1070  *      vm_map_entry_link() or _unlink().
 1071  *
 1072  *      The map must be locked, and leaves it so.
 1073  */
 1074 static void
 1075 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
 1076 {
 1077 
 1078         /*
 1079          * Using splay trees without parent pointers, propagating
 1080          * max_free up the tree is done by moving the entry to the
 1081          * root and making the change there.
 1082          */
 1083         if (entry != map->root)
 1084                 map->root = vm_map_entry_splay(entry->start, map->root);
 1085 
 1086         entry->adj_free = entry->next->start - entry->end;
 1087         vm_map_entry_set_max_free(entry);
 1088 }
 1089 
 1090 /*
 1091  *      vm_map_lookup_entry:    [ internal use only ]
 1092  *
 1093  *      Finds the map entry containing (or
 1094  *      immediately preceding) the specified address
 1095  *      in the given map; the entry is returned
 1096  *      in the "entry" parameter.  The boolean
 1097  *      result indicates whether the address is
 1098  *      actually contained in the map.
 1099  */
 1100 boolean_t
 1101 vm_map_lookup_entry(
 1102         vm_map_t map,
 1103         vm_offset_t address,
 1104         vm_map_entry_t *entry)  /* OUT */
 1105 {
 1106         vm_map_entry_t cur;
 1107         boolean_t locked;
 1108 
 1109         /*
 1110          * If the map is empty, then the map entry immediately preceding
 1111          * "address" is the map's header.
 1112          */
 1113         cur = map->root;
 1114         if (cur == NULL)
 1115                 *entry = &map->header;
 1116         else if (address >= cur->start && cur->end > address) {
 1117                 *entry = cur;
 1118                 return (TRUE);
 1119         } else if ((locked = vm_map_locked(map)) ||
 1120             sx_try_upgrade(&map->lock)) {
 1121                 /*
 1122                  * Splay requires a write lock on the map.  However, it only
 1123                  * restructures the binary search tree; it does not otherwise
 1124                  * change the map.  Thus, the map's timestamp need not change
 1125                  * on a temporary upgrade.
 1126                  */
 1127                 map->root = cur = vm_map_entry_splay(address, cur);
 1128                 if (!locked)
 1129                         sx_downgrade(&map->lock);
 1130 
 1131                 /*
 1132                  * If "address" is contained within a map entry, the new root
 1133                  * is that map entry.  Otherwise, the new root is a map entry
 1134                  * immediately before or after "address".
 1135                  */
 1136                 if (address >= cur->start) {
 1137                         *entry = cur;
 1138                         if (cur->end > address)
 1139                                 return (TRUE);
 1140                 } else
 1141                         *entry = cur->prev;
 1142         } else
 1143                 /*
 1144                  * Since the map is only locked for read access, perform a
 1145                  * standard binary search tree lookup for "address".
 1146                  */
 1147                 for (;;) {
 1148                         if (address < cur->start) {
 1149                                 if (cur->left == NULL) {
 1150                                         *entry = cur->prev;
 1151                                         break;
 1152                                 }
 1153                                 cur = cur->left;
 1154                         } else if (cur->end > address) {
 1155                                 *entry = cur;
 1156                                 return (TRUE);
 1157                         } else {
 1158                                 if (cur->right == NULL) {
 1159                                         *entry = cur;
 1160                                         break;
 1161                                 }
 1162                                 cur = cur->right;
 1163                         }
 1164                 }
 1165         return (FALSE);
 1166 }
 1167 
 1168 /*
 1169  *      vm_map_insert:
 1170  *
 1171  *      Inserts the given whole VM object into the target
 1172  *      map at the specified address range.  The object's
 1173  *      size should match that of the address range.
 1174  *
 1175  *      Requires that the map be locked, and leaves it so.
 1176  *
 1177  *      If object is non-NULL, ref count must be bumped by caller
 1178  *      prior to making call to account for the new entry.
 1179  */
 1180 int
 1181 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1182     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
 1183 {
 1184         vm_map_entry_t new_entry, prev_entry, temp_entry;
 1185         struct ucred *cred;
 1186         vm_eflags_t protoeflags;
 1187         vm_inherit_t inheritance;
 1188 
 1189         VM_MAP_ASSERT_LOCKED(map);
 1190         KASSERT(object != kernel_object ||
 1191             (cow & MAP_COPY_ON_WRITE) == 0,
 1192             ("vm_map_insert: kernel object and COW"));
 1193         KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
 1194             ("vm_map_insert: paradoxical MAP_NOFAULT request"));
 1195         KASSERT((prot & ~max) == 0,
 1196             ("prot %#x is not subset of max_prot %#x", prot, max));
 1197 
 1198         /*
 1199          * Check that the start and end points are not bogus.
 1200          */
 1201         if (start < vm_map_min(map) || end > vm_map_max(map) ||
 1202             start >= end)
 1203                 return (KERN_INVALID_ADDRESS);
 1204 
 1205         /*
 1206          * Find the entry prior to the proposed starting address; if it's part
 1207          * of an existing entry, this range is bogus.
 1208          */
 1209         if (vm_map_lookup_entry(map, start, &temp_entry))
 1210                 return (KERN_NO_SPACE);
 1211 
 1212         prev_entry = temp_entry;
 1213 
 1214         /*
 1215          * Assert that the next entry doesn't overlap the end point.
 1216          */
 1217         if (prev_entry->next->start < end)
 1218                 return (KERN_NO_SPACE);
 1219 
 1220         if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
 1221             max != VM_PROT_NONE))
 1222                 return (KERN_INVALID_ARGUMENT);
 1223 
 1224         protoeflags = 0;
 1225         if (cow & MAP_COPY_ON_WRITE)
 1226                 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
 1227         if (cow & MAP_NOFAULT)
 1228                 protoeflags |= MAP_ENTRY_NOFAULT;
 1229         if (cow & MAP_DISABLE_SYNCER)
 1230                 protoeflags |= MAP_ENTRY_NOSYNC;
 1231         if (cow & MAP_DISABLE_COREDUMP)
 1232                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
 1233         if (cow & MAP_STACK_GROWS_DOWN)
 1234                 protoeflags |= MAP_ENTRY_GROWS_DOWN;
 1235         if (cow & MAP_STACK_GROWS_UP)
 1236                 protoeflags |= MAP_ENTRY_GROWS_UP;
 1237         if (cow & MAP_VN_WRITECOUNT)
 1238                 protoeflags |= MAP_ENTRY_VN_WRITECNT;
 1239         if ((cow & MAP_CREATE_GUARD) != 0)
 1240                 protoeflags |= MAP_ENTRY_GUARD;
 1241         if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
 1242                 protoeflags |= MAP_ENTRY_STACK_GAP_DN;
 1243         if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
 1244                 protoeflags |= MAP_ENTRY_STACK_GAP_UP;
 1245         if (cow & MAP_INHERIT_SHARE)
 1246                 inheritance = VM_INHERIT_SHARE;
 1247         else
 1248                 inheritance = VM_INHERIT_DEFAULT;
 1249 
 1250         cred = NULL;
 1251         if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
 1252                 goto charged;
 1253         if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
 1254             ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
 1255                 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
 1256                         return (KERN_RESOURCE_SHORTAGE);
 1257                 KASSERT(object == NULL ||
 1258                     (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
 1259                     object->cred == NULL,
 1260                     ("overcommit: vm_map_insert o %p", object));
 1261                 cred = curthread->td_ucred;
 1262         }
 1263 
 1264 charged:
 1265         /* Expand the kernel pmap, if necessary. */
 1266         if (map == kernel_map && end > kernel_vm_end)
 1267                 pmap_growkernel(end);
 1268         if (object != NULL) {
 1269                 /*
 1270                  * OBJ_ONEMAPPING must be cleared unless this mapping
 1271                  * is trivially proven to be the only mapping for any
 1272                  * of the object's pages.  (Object granularity
 1273                  * reference counting is insufficient to recognize
 1274                  * aliases with precision.)
 1275                  */
 1276                 VM_OBJECT_WLOCK(object);
 1277                 if (object->ref_count > 1 || object->shadow_count != 0)
 1278                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
 1279                 VM_OBJECT_WUNLOCK(object);
 1280         } else if (prev_entry != &map->header &&
 1281             (prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) == protoeflags &&
 1282             (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
 1283             prev_entry->end == start && (prev_entry->cred == cred ||
 1284             (prev_entry->object.vm_object != NULL &&
 1285             prev_entry->object.vm_object->cred == cred)) &&
 1286             vm_object_coalesce(prev_entry->object.vm_object,
 1287             prev_entry->offset,
 1288             (vm_size_t)(prev_entry->end - prev_entry->start),
 1289             (vm_size_t)(end - prev_entry->end), cred != NULL &&
 1290             (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
 1291                 /*
 1292                  * We were able to extend the object.  Determine if we
 1293                  * can extend the previous map entry to include the
 1294                  * new range as well.
 1295                  */
 1296                 if (prev_entry->inheritance == inheritance &&
 1297                     prev_entry->protection == prot &&
 1298                     prev_entry->max_protection == max &&
 1299                     prev_entry->wired_count == 0) {
 1300                         KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
 1301                             0, ("prev_entry %p has incoherent wiring",
 1302                             prev_entry));
 1303                         if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
 1304                                 map->size += end - prev_entry->end;
 1305                         prev_entry->end = end;
 1306                         vm_map_entry_resize_free(map, prev_entry);
 1307                         vm_map_simplify_entry(map, prev_entry);
 1308                         return (KERN_SUCCESS);
 1309                 }
 1310 
 1311                 /*
 1312                  * If we can extend the object but cannot extend the
 1313                  * map entry, we have to create a new map entry.  We
 1314                  * must bump the ref count on the extended object to
 1315                  * account for it.  object may be NULL.
 1316                  */
 1317                 object = prev_entry->object.vm_object;
 1318                 offset = prev_entry->offset +
 1319                     (prev_entry->end - prev_entry->start);
 1320                 vm_object_reference(object);
 1321                 if (cred != NULL && object != NULL && object->cred != NULL &&
 1322                     !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
 1323                         /* Object already accounts for this uid. */
 1324                         cred = NULL;
 1325                 }
 1326         }
 1327         if (cred != NULL)
 1328                 crhold(cred);
 1329 
 1330         /*
 1331          * Create a new entry
 1332          */
 1333         new_entry = vm_map_entry_create(map);
 1334         new_entry->start = start;
 1335         new_entry->end = end;
 1336         new_entry->cred = NULL;
 1337 
 1338         new_entry->eflags = protoeflags;
 1339         new_entry->object.vm_object = object;
 1340         new_entry->offset = offset;
 1341 
 1342         new_entry->inheritance = inheritance;
 1343         new_entry->protection = prot;
 1344         new_entry->max_protection = max;
 1345         new_entry->wired_count = 0;
 1346         new_entry->wiring_thread = NULL;
 1347         new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
 1348         new_entry->next_read = start;
 1349 
 1350         KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
 1351             ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
 1352         new_entry->cred = cred;
 1353 
 1354         /*
 1355          * Insert the new entry into the list
 1356          */
 1357         vm_map_entry_link(map, prev_entry, new_entry);
 1358         if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
 1359                 map->size += new_entry->end - new_entry->start;
 1360 
 1361         /*
 1362          * Try to coalesce the new entry with both the previous and next
 1363          * entries in the list.  Previously, we only attempted to coalesce
 1364          * with the previous entry when object is NULL.  Here, we handle the
 1365          * other cases, which are less common.
 1366          */
 1367         vm_map_simplify_entry(map, new_entry);
 1368 
 1369         if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
 1370                 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
 1371                     end - start, cow & MAP_PREFAULT_PARTIAL);
 1372         }
 1373 
 1374         return (KERN_SUCCESS);
 1375 }
 1376 
 1377 /*
 1378  *      vm_map_findspace:
 1379  *
 1380  *      Find the first fit (lowest VM address) for "length" free bytes
 1381  *      beginning at address >= start in the given map.
 1382  *
 1383  *      In a vm_map_entry, "adj_free" is the amount of free space
 1384  *      adjacent (higher address) to this entry, and "max_free" is the
 1385  *      maximum amount of contiguous free space in its subtree.  This
 1386  *      allows finding a free region in one path down the tree, so
 1387  *      O(log n) amortized with splay trees.
 1388  *
 1389  *      The map must be locked, and leaves it so.
 1390  *
 1391  *      Returns: 0 on success, and starting address in *addr,
 1392  *               1 if insufficient space.
 1393  */
 1394 int
 1395 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
 1396     vm_offset_t *addr)  /* OUT */
 1397 {
 1398         vm_map_entry_t entry;
 1399         vm_offset_t st;
 1400 
 1401         /*
 1402          * Request must fit within min/max VM address and must avoid
 1403          * address wrap.
 1404          */
 1405         start = MAX(start, vm_map_min(map));
 1406         if (start + length > vm_map_max(map) || start + length < start)
 1407                 return (1);
 1408 
 1409         /* Empty tree means wide open address space. */
 1410         if (map->root == NULL) {
 1411                 *addr = start;
 1412                 return (0);
 1413         }
 1414 
 1415         /*
 1416          * After splay, if start comes before root node, then there
 1417          * must be a gap from start to the root.
 1418          */
 1419         map->root = vm_map_entry_splay(start, map->root);
 1420         if (start + length <= map->root->start) {
 1421                 *addr = start;
 1422                 return (0);
 1423         }
 1424 
 1425         /*
 1426          * Root is the last node that might begin its gap before
 1427          * start, and this is the last comparison where address
 1428          * wrap might be a problem.
 1429          */
 1430         st = (start > map->root->end) ? start : map->root->end;
 1431         if (length <= map->root->end + map->root->adj_free - st) {
 1432                 *addr = st;
 1433                 return (0);
 1434         }
 1435 
 1436         /* With max_free, can immediately tell if no solution. */
 1437         entry = map->root->right;
 1438         if (entry == NULL || length > entry->max_free)
 1439                 return (1);
 1440 
 1441         /*
 1442          * Search the right subtree in the order: left subtree, root,
 1443          * right subtree (first fit).  The previous splay implies that
 1444          * all regions in the right subtree have addresses > start.
 1445          */
 1446         while (entry != NULL) {
 1447                 if (entry->left != NULL && entry->left->max_free >= length)
 1448                         entry = entry->left;
 1449                 else if (entry->adj_free >= length) {
 1450                         *addr = entry->end;
 1451                         return (0);
 1452                 } else
 1453                         entry = entry->right;
 1454         }
 1455 
 1456         /* Can't get here, so panic if we do. */
 1457         panic("vm_map_findspace: max_free corrupt");
 1458 }
 1459 
 1460 int
 1461 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1462     vm_offset_t start, vm_size_t length, vm_prot_t prot,
 1463     vm_prot_t max, int cow)
 1464 {
 1465         vm_offset_t end;
 1466         int result;
 1467 
 1468         end = start + length;
 1469         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
 1470             object == NULL,
 1471             ("vm_map_fixed: non-NULL backing object for stack"));
 1472         vm_map_lock(map);
 1473         VM_MAP_RANGE_CHECK(map, start, end);
 1474         if ((cow & MAP_CHECK_EXCL) == 0)
 1475                 vm_map_delete(map, start, end);
 1476         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
 1477                 result = vm_map_stack_locked(map, start, length, sgrowsiz,
 1478                     prot, max, cow);
 1479         } else {
 1480                 result = vm_map_insert(map, object, offset, start, end,
 1481                     prot, max, cow);
 1482         }
 1483         vm_map_unlock(map);
 1484         return (result);
 1485 }
 1486 
 1487 /*
 1488  * Searches for the specified amount of free space in the given map with the
 1489  * specified alignment.  Performs an address-ordered, first-fit search from
 1490  * the given address "*addr", with an optional upper bound "max_addr".  If the
 1491  * parameter "alignment" is zero, then the alignment is computed from the
 1492  * given (object, offset) pair so as to enable the greatest possible use of
 1493  * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
 1494  * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
 1495  *
 1496  * The map must be locked.  Initially, there must be at least "length" bytes
 1497  * of free space at the given address.
 1498  */
 1499 static int
 1500 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1501     vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
 1502     vm_offset_t alignment)
 1503 {
 1504         vm_offset_t aligned_addr, free_addr;
 1505 
 1506         VM_MAP_ASSERT_LOCKED(map);
 1507         free_addr = *addr;
 1508         KASSERT(!vm_map_findspace(map, free_addr, length, addr) &&
 1509             free_addr == *addr, ("caller provided insufficient free space"));
 1510         for (;;) {
 1511                 /*
 1512                  * At the start of every iteration, the free space at address
 1513                  * "*addr" is at least "length" bytes.
 1514                  */
 1515                 if (alignment == 0)
 1516                         pmap_align_superpage(object, offset, addr, length);
 1517                 else if ((*addr & (alignment - 1)) != 0) {
 1518                         *addr &= ~(alignment - 1);
 1519                         *addr += alignment;
 1520                 }
 1521                 aligned_addr = *addr;
 1522                 if (aligned_addr == free_addr) {
 1523                         /*
 1524                          * Alignment did not change "*addr", so "*addr" must
 1525                          * still provide sufficient free space.
 1526                          */
 1527                         return (KERN_SUCCESS);
 1528                 }
 1529 
 1530                 /*
 1531                  * Test for address wrap on "*addr".  A wrapped "*addr" could
 1532                  * be a valid address, in which case vm_map_findspace() cannot
 1533                  * be relied upon to fail.
 1534                  */
 1535                 if (aligned_addr < free_addr ||
 1536                     vm_map_findspace(map, aligned_addr, length, addr) ||
 1537                     (max_addr != 0 && *addr + length > max_addr))
 1538                         return (KERN_NO_SPACE);
 1539                 free_addr = *addr;
 1540                 if (free_addr == aligned_addr) {
 1541                         /*
 1542                          * If a successful call to vm_map_findspace() did not
 1543                          * change "*addr", then "*addr" must still be aligned
 1544                          * and provide sufficient free space.
 1545                          */
 1546                         return (KERN_SUCCESS);
 1547                 }
 1548         }
 1549 }
 1550 
 1551 /*
 1552  *      vm_map_find finds an unallocated region in the target address
 1553  *      map with the given length.  The search is defined to be
 1554  *      first-fit from the specified address; the region found is
 1555  *      returned in the same parameter.
 1556  *
 1557  *      If object is non-NULL, ref count must be bumped by caller
 1558  *      prior to making call to account for the new entry.
 1559  */
 1560 int
 1561 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1562             vm_offset_t *addr,  /* IN/OUT */
 1563             vm_size_t length, vm_offset_t max_addr, int find_space,
 1564             vm_prot_t prot, vm_prot_t max, int cow)
 1565 {
 1566         vm_offset_t alignment, min_addr;
 1567         int rv;
 1568 
 1569         KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
 1570             object == NULL,
 1571             ("vm_map_find: non-NULL backing object for stack"));
 1572         if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
 1573             (object->flags & OBJ_COLORED) == 0))
 1574                 find_space = VMFS_ANY_SPACE;
 1575         if (find_space >> 8 != 0) {
 1576                 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
 1577                 alignment = (vm_offset_t)1 << (find_space >> 8);
 1578         } else
 1579                 alignment = 0;
 1580         vm_map_lock(map);
 1581         if (find_space != VMFS_NO_SPACE) {
 1582                 KASSERT(find_space == VMFS_ANY_SPACE ||
 1583                     find_space == VMFS_OPTIMAL_SPACE ||
 1584                     find_space == VMFS_SUPER_SPACE ||
 1585                     alignment != 0, ("unexpected VMFS flag"));
 1586                 min_addr = *addr;
 1587 again:
 1588                 if (vm_map_findspace(map, min_addr, length, addr) ||
 1589                     (max_addr != 0 && *addr + length > max_addr)) {
 1590                         rv = KERN_NO_SPACE;
 1591                         goto done;
 1592                 }
 1593                 if (find_space != VMFS_ANY_SPACE &&
 1594                     (rv = vm_map_alignspace(map, object, offset, addr, length,
 1595                     max_addr, alignment)) != KERN_SUCCESS) {
 1596                         if (find_space == VMFS_OPTIMAL_SPACE) {
 1597                                 find_space = VMFS_ANY_SPACE;
 1598                                 goto again;
 1599                         }
 1600                         goto done;
 1601                 }
 1602         }
 1603         if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
 1604                 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
 1605                     max, cow);
 1606         } else {
 1607                 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
 1608                     prot, max, cow);
 1609         }
 1610 done:
 1611         vm_map_unlock(map);
 1612         return (rv);
 1613 }
 1614 
 1615 /*
 1616  *      vm_map_find_min() is a variant of vm_map_find() that takes an
 1617  *      additional parameter (min_addr) and treats the given address
 1618  *      (*addr) differently.  Specifically, it treats *addr as a hint
 1619  *      and not as the minimum address where the mapping is created.
 1620  *
 1621  *      This function works in two phases.  First, it tries to
 1622  *      allocate above the hint.  If that fails and the hint is
 1623  *      greater than min_addr, it performs a second pass, replacing
 1624  *      the hint with min_addr as the minimum address for the
 1625  *      allocation.
 1626  */
 1627 int
 1628 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
 1629     vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
 1630     vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
 1631     int cow)
 1632 {
 1633         vm_offset_t hint;
 1634         int rv;
 1635 
 1636         hint = *addr;
 1637         for (;;) {
 1638                 rv = vm_map_find(map, object, offset, addr, length, max_addr,
 1639                     find_space, prot, max, cow);
 1640                 if (rv == KERN_SUCCESS || min_addr >= hint)
 1641                         return (rv);
 1642                 *addr = hint = min_addr;
 1643         }
 1644 }
 1645 
 1646 /*
 1647  *      vm_map_simplify_entry:
 1648  *
 1649  *      Simplify the given map entry by merging with either neighbor.  This
 1650  *      routine also has the ability to merge with both neighbors.
 1651  *
 1652  *      The map must be locked.
 1653  *
 1654  *      This routine guarantees that the passed entry remains valid (though
 1655  *      possibly extended).  When merging, this routine may delete one or
 1656  *      both neighbors.
 1657  */
 1658 void
 1659 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
 1660 {
 1661         vm_map_entry_t next, prev;
 1662         vm_size_t prevsize, esize;
 1663 
 1664         if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP |
 1665             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0)
 1666                 return;
 1667 
 1668         prev = entry->prev;
 1669         if (prev != &map->header) {
 1670                 prevsize = prev->end - prev->start;
 1671                 if ( (prev->end == entry->start) &&
 1672                      (prev->object.vm_object == entry->object.vm_object) &&
 1673                      (!prev->object.vm_object ||
 1674                         (prev->offset + prevsize == entry->offset)) &&
 1675                      (prev->eflags == entry->eflags) &&
 1676                      (prev->protection == entry->protection) &&
 1677                      (prev->max_protection == entry->max_protection) &&
 1678                      (prev->inheritance == entry->inheritance) &&
 1679                      (prev->wired_count == entry->wired_count) &&
 1680                      (prev->cred == entry->cred)) {
 1681                         vm_map_entry_unlink(map, prev);
 1682                         entry->start = prev->start;
 1683                         entry->offset = prev->offset;
 1684                         if (entry->prev != &map->header)
 1685                                 vm_map_entry_resize_free(map, entry->prev);
 1686 
 1687                         /*
 1688                          * If the backing object is a vnode object,
 1689                          * vm_object_deallocate() calls vrele().
 1690                          * However, vrele() does not lock the vnode
 1691                          * because the vnode has additional
 1692                          * references.  Thus, the map lock can be kept
 1693                          * without causing a lock-order reversal with
 1694                          * the vnode lock.
 1695                          *
 1696                          * Since we count the number of virtual page
 1697                          * mappings in object->un_pager.vnp.writemappings,
 1698                          * the writemappings value should not be adjusted
 1699                          * when the entry is disposed of.
 1700                          */
 1701                         if (prev->object.vm_object)
 1702                                 vm_object_deallocate(prev->object.vm_object);
 1703                         if (prev->cred != NULL)
 1704                                 crfree(prev->cred);
 1705                         vm_map_entry_dispose(map, prev);
 1706                 }
 1707         }
 1708 
 1709         next = entry->next;
 1710         if (next != &map->header) {
 1711                 esize = entry->end - entry->start;
 1712                 if ((entry->end == next->start) &&
 1713                     (next->object.vm_object == entry->object.vm_object) &&
 1714                      (!entry->object.vm_object ||
 1715                         (entry->offset + esize == next->offset)) &&
 1716                     (next->eflags == entry->eflags) &&
 1717                     (next->protection == entry->protection) &&
 1718                     (next->max_protection == entry->max_protection) &&
 1719                     (next->inheritance == entry->inheritance) &&
 1720                     (next->wired_count == entry->wired_count) &&
 1721                     (next->cred == entry->cred)) {
 1722                         vm_map_entry_unlink(map, next);
 1723                         entry->end = next->end;
 1724                         vm_map_entry_resize_free(map, entry);
 1725 
 1726                         /*
 1727                          * See comment above.
 1728                          */
 1729                         if (next->object.vm_object)
 1730                                 vm_object_deallocate(next->object.vm_object);
 1731                         if (next->cred != NULL)
 1732                                 crfree(next->cred);
 1733                         vm_map_entry_dispose(map, next);
 1734                 }
 1735         }
 1736 }
 1737 /*
 1738  *      vm_map_clip_start:      [ internal use only ]
 1739  *
 1740  *      Asserts that the given entry begins at or after
 1741  *      the specified address; if necessary,
 1742  *      it splits the entry into two.
 1743  */
 1744 #define vm_map_clip_start(map, entry, startaddr) \
 1745 { \
 1746         if (startaddr > entry->start) \
 1747                 _vm_map_clip_start(map, entry, startaddr); \
 1748 }
 1749 
 1750 /*
 1751  *      This routine is called only when it is known that
 1752  *      the entry must be split.
 1753  */
 1754 static void
 1755 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
 1756 {
 1757         vm_map_entry_t new_entry;
 1758 
 1759         VM_MAP_ASSERT_LOCKED(map);
 1760         KASSERT(entry->end > start && entry->start < start,
 1761             ("_vm_map_clip_start: invalid clip of entry %p", entry));
 1762 
 1763         /*
 1764          * Split off the front portion -- note that we must insert the new
 1765          * entry BEFORE this one, so that this entry has the specified
 1766          * starting address.
 1767          */
 1768         vm_map_simplify_entry(map, entry);
 1769 
 1770         /*
 1771          * If there is no object backing this entry, we might as well create
 1772          * one now.  If we defer it, an object can get created after the map
 1773          * is clipped, and individual objects will be created for the split-up
 1774          * map.  This is a bit of a hack, but is also about the best place to
 1775          * put this improvement.
 1776          */
 1777         if (entry->object.vm_object == NULL && !map->system_map &&
 1778             (entry->eflags & MAP_ENTRY_GUARD) == 0) {
 1779                 vm_object_t object;
 1780                 object = vm_object_allocate(OBJT_DEFAULT,
 1781                                 atop(entry->end - entry->start));
 1782                 entry->object.vm_object = object;
 1783                 entry->offset = 0;
 1784                 if (entry->cred != NULL) {
 1785                         object->cred = entry->cred;
 1786                         object->charge = entry->end - entry->start;
 1787                         entry->cred = NULL;
 1788                 }
 1789         } else if (entry->object.vm_object != NULL &&
 1790                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
 1791                    entry->cred != NULL) {
 1792                 VM_OBJECT_WLOCK(entry->object.vm_object);
 1793                 KASSERT(entry->object.vm_object->cred == NULL,
 1794                     ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
 1795                 entry->object.vm_object->cred = entry->cred;
 1796                 entry->object.vm_object->charge = entry->end - entry->start;
 1797                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
 1798                 entry->cred = NULL;
 1799         }
 1800 
 1801         new_entry = vm_map_entry_create(map);
 1802         *new_entry = *entry;
 1803 
 1804         new_entry->end = start;
 1805         entry->offset += (start - entry->start);
 1806         entry->start = start;
 1807         if (new_entry->cred != NULL)
 1808                 crhold(entry->cred);
 1809 
 1810         vm_map_entry_link(map, entry->prev, new_entry);
 1811 
 1812         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1813                 vm_object_reference(new_entry->object.vm_object);
 1814                 /*
 1815                  * The object->un_pager.vnp.writemappings for the
 1816                  * object of MAP_ENTRY_VN_WRITECNT type entry shall be
 1817                  * kept as is here.  The virtual pages are
 1818                  * re-distributed among the clipped entries, so the sum is
 1819                  * left the same.
 1820                  */
 1821         }
 1822 }
 1823 
 1824 /*
 1825  *      vm_map_clip_end:        [ internal use only ]
 1826  *
 1827  *      Asserts that the given entry ends at or before
 1828  *      the specified address; if necessary,
 1829  *      it splits the entry into two.
 1830  */
 1831 #define vm_map_clip_end(map, entry, endaddr) \
 1832 { \
 1833         if ((endaddr) < (entry->end)) \
 1834                 _vm_map_clip_end((map), (entry), (endaddr)); \
 1835 }
 1836 
 1837 /*
 1838  *      This routine is called only when it is known that
 1839  *      the entry must be split.
 1840  */
 1841 static void
 1842 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
 1843 {
 1844         vm_map_entry_t new_entry;
 1845 
 1846         VM_MAP_ASSERT_LOCKED(map);
 1847         KASSERT(entry->start < end && entry->end > end,
 1848             ("_vm_map_clip_end: invalid clip of entry %p", entry));
 1849 
 1850         /*
 1851          * If there is no object backing this entry, we might as well create
 1852          * one now.  If we defer it, an object can get created after the map
 1853          * is clipped, and individual objects will be created for the split-up
 1854          * map.  This is a bit of a hack, but is also about the best place to
 1855          * put this improvement.
 1856          */
 1857         if (entry->object.vm_object == NULL && !map->system_map &&
 1858             (entry->eflags & MAP_ENTRY_GUARD) == 0) {
 1859                 vm_object_t object;
 1860                 object = vm_object_allocate(OBJT_DEFAULT,
 1861                                 atop(entry->end - entry->start));
 1862                 entry->object.vm_object = object;
 1863                 entry->offset = 0;
 1864                 if (entry->cred != NULL) {
 1865                         object->cred = entry->cred;
 1866                         object->charge = entry->end - entry->start;
 1867                         entry->cred = NULL;
 1868                 }
 1869         } else if (entry->object.vm_object != NULL &&
 1870                    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
 1871                    entry->cred != NULL) {
 1872                 VM_OBJECT_WLOCK(entry->object.vm_object);
 1873                 KASSERT(entry->object.vm_object->cred == NULL,
 1874                     ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
 1875                 entry->object.vm_object->cred = entry->cred;
 1876                 entry->object.vm_object->charge = entry->end - entry->start;
 1877                 VM_OBJECT_WUNLOCK(entry->object.vm_object);
 1878                 entry->cred = NULL;
 1879         }
 1880 
 1881         /*
 1882          * Create a new entry and insert it AFTER the specified entry
 1883          */
 1884         new_entry = vm_map_entry_create(map);
 1885         *new_entry = *entry;
 1886 
 1887         new_entry->start = entry->end = end;
 1888         new_entry->offset += (end - entry->start);
 1889         if (new_entry->cred != NULL)
 1890                 crhold(entry->cred);
 1891 
 1892         vm_map_entry_link(map, entry, new_entry);
 1893 
 1894         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1895                 vm_object_reference(new_entry->object.vm_object);
 1896         }
 1897 }
 1898 
 1899 /*
 1900  *      vm_map_submap:          [ kernel use only ]
 1901  *
 1902  *      Mark the given range as handled by a subordinate map.
 1903  *
 1904  *      This range must have been created with vm_map_find,
 1905  *      and no other operations may have been performed on this
 1906  *      range prior to calling vm_map_submap.
 1907  *
 1908  *      Only a limited number of operations can be performed
 1909  *      within this rage after calling vm_map_submap:
 1910  *              vm_fault
 1911  *      [Don't try vm_map_copy!]
 1912  *
 1913  *      To remove a submapping, one must first remove the
 1914  *      range from the superior map, and then destroy the
 1915  *      submap (if desired).  [Better yet, don't try it.]
 1916  */
 1917 int
 1918 vm_map_submap(
 1919         vm_map_t map,
 1920         vm_offset_t start,
 1921         vm_offset_t end,
 1922         vm_map_t submap)
 1923 {
 1924         vm_map_entry_t entry;
 1925         int result = KERN_INVALID_ARGUMENT;
 1926 
 1927         vm_map_lock(map);
 1928 
 1929         VM_MAP_RANGE_CHECK(map, start, end);
 1930 
 1931         if (vm_map_lookup_entry(map, start, &entry)) {
 1932                 vm_map_clip_start(map, entry, start);
 1933         } else
 1934                 entry = entry->next;
 1935 
 1936         vm_map_clip_end(map, entry, end);
 1937 
 1938         if ((entry->start == start) && (entry->end == end) &&
 1939             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
 1940             (entry->object.vm_object == NULL)) {
 1941                 entry->object.sub_map = submap;
 1942                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
 1943                 result = KERN_SUCCESS;
 1944         }
 1945         vm_map_unlock(map);
 1946 
 1947         return (result);
 1948 }
 1949 
 1950 /*
 1951  * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
 1952  */
 1953 #define MAX_INIT_PT     96
 1954 
 1955 /*
 1956  *      vm_map_pmap_enter:
 1957  *
 1958  *      Preload the specified map's pmap with mappings to the specified
 1959  *      object's memory-resident pages.  No further physical pages are
 1960  *      allocated, and no further virtual pages are retrieved from secondary
 1961  *      storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
 1962  *      limited number of page mappings are created at the low-end of the
 1963  *      specified address range.  (For this purpose, a superpage mapping
 1964  *      counts as one page mapping.)  Otherwise, all resident pages within
 1965  *      the specified address range are mapped.
 1966  */
 1967 static void
 1968 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
 1969     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
 1970 {
 1971         vm_offset_t start;
 1972         vm_page_t p, p_start;
 1973         vm_pindex_t mask, psize, threshold, tmpidx;
 1974 
 1975         if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
 1976                 return;
 1977         VM_OBJECT_RLOCK(object);
 1978         if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
 1979                 VM_OBJECT_RUNLOCK(object);
 1980                 VM_OBJECT_WLOCK(object);
 1981                 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
 1982                         pmap_object_init_pt(map->pmap, addr, object, pindex,
 1983                             size);
 1984                         VM_OBJECT_WUNLOCK(object);
 1985                         return;
 1986                 }
 1987                 VM_OBJECT_LOCK_DOWNGRADE(object);
 1988         }
 1989 
 1990         psize = atop(size);
 1991         if (psize + pindex > object->size) {
 1992                 if (object->size < pindex) {
 1993                         VM_OBJECT_RUNLOCK(object);
 1994                         return;
 1995                 }
 1996                 psize = object->size - pindex;
 1997         }
 1998 
 1999         start = 0;
 2000         p_start = NULL;
 2001         threshold = MAX_INIT_PT;
 2002 
 2003         p = vm_page_find_least(object, pindex);
 2004         /*
 2005          * Assert: the variable p is either (1) the page with the
 2006          * least pindex greater than or equal to the parameter pindex
 2007          * or (2) NULL.
 2008          */
 2009         for (;
 2010              p != NULL && (tmpidx = p->pindex - pindex) < psize;
 2011              p = TAILQ_NEXT(p, listq)) {
 2012                 /*
 2013                  * don't allow an madvise to blow away our really
 2014                  * free pages allocating pv entries.
 2015                  */
 2016                 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
 2017                     vm_page_count_severe()) ||
 2018                     ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
 2019                     tmpidx >= threshold)) {
 2020                         psize = tmpidx;
 2021                         break;
 2022                 }
 2023                 if (p->valid == VM_PAGE_BITS_ALL) {
 2024                         if (p_start == NULL) {
 2025                                 start = addr + ptoa(tmpidx);
 2026                                 p_start = p;
 2027                         }
 2028                         /* Jump ahead if a superpage mapping is possible. */
 2029                         if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
 2030                             (pagesizes[p->psind] - 1)) == 0) {
 2031                                 mask = atop(pagesizes[p->psind]) - 1;
 2032                                 if (tmpidx + mask < psize &&
 2033                                     vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
 2034                                         p += mask;
 2035                                         threshold += mask;
 2036                                 }
 2037                         }
 2038                 } else if (p_start != NULL) {
 2039                         pmap_enter_object(map->pmap, start, addr +
 2040                             ptoa(tmpidx), p_start, prot);
 2041                         p_start = NULL;
 2042                 }
 2043         }
 2044         if (p_start != NULL)
 2045                 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
 2046                     p_start, prot);
 2047         VM_OBJECT_RUNLOCK(object);
 2048 }
 2049 
 2050 /*
 2051  *      vm_map_protect:
 2052  *
 2053  *      Sets the protection of the specified address
 2054  *      region in the target map.  If "set_max" is
 2055  *      specified, the maximum protection is to be set;
 2056  *      otherwise, only the current protection is affected.
 2057  */
 2058 int
 2059 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2060                vm_prot_t new_prot, boolean_t set_max)
 2061 {
 2062         vm_map_entry_t current, entry;
 2063         vm_object_t obj;
 2064         struct ucred *cred;
 2065         vm_prot_t old_prot;
 2066 
 2067         if (start == end)
 2068                 return (KERN_SUCCESS);
 2069 
 2070         vm_map_lock(map);
 2071 
 2072         /*
 2073          * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
 2074          * need to fault pages into the map and will drop the map lock while
 2075          * doing so, and the VM object may end up in an inconsistent state if we
 2076          * update the protection on the map entry in between faults.
 2077          */
 2078         vm_map_wait_busy(map);
 2079 
 2080         VM_MAP_RANGE_CHECK(map, start, end);
 2081 
 2082         if (vm_map_lookup_entry(map, start, &entry)) {
 2083                 vm_map_clip_start(map, entry, start);
 2084         } else {
 2085                 entry = entry->next;
 2086         }
 2087 
 2088         /*
 2089          * Make a first pass to check for protection violations.
 2090          */
 2091         for (current = entry; current->start < end; current = current->next) {
 2092                 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
 2093                         continue;
 2094                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 2095                         vm_map_unlock(map);
 2096                         return (KERN_INVALID_ARGUMENT);
 2097                 }
 2098                 if ((new_prot & current->max_protection) != new_prot) {
 2099                         vm_map_unlock(map);
 2100                         return (KERN_PROTECTION_FAILURE);
 2101                 }
 2102         }
 2103 
 2104         /*
 2105          * Do an accounting pass for private read-only mappings that
 2106          * now will do cow due to allowed write (e.g. debugger sets
 2107          * breakpoint on text segment)
 2108          */
 2109         for (current = entry; current->start < end; current = current->next) {
 2110 
 2111                 vm_map_clip_end(map, current, end);
 2112 
 2113                 if (set_max ||
 2114                     ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
 2115                     ENTRY_CHARGED(current) ||
 2116                     (current->eflags & MAP_ENTRY_GUARD) != 0) {
 2117                         continue;
 2118                 }
 2119 
 2120                 cred = curthread->td_ucred;
 2121                 obj = current->object.vm_object;
 2122 
 2123                 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
 2124                         if (!swap_reserve(current->end - current->start)) {
 2125                                 vm_map_unlock(map);
 2126                                 return (KERN_RESOURCE_SHORTAGE);
 2127                         }
 2128                         crhold(cred);
 2129                         current->cred = cred;
 2130                         continue;
 2131                 }
 2132 
 2133                 VM_OBJECT_WLOCK(obj);
 2134                 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
 2135                         VM_OBJECT_WUNLOCK(obj);
 2136                         continue;
 2137                 }
 2138 
 2139                 /*
 2140                  * Charge for the whole object allocation now, since
 2141                  * we cannot distinguish between non-charged and
 2142                  * charged clipped mapping of the same object later.
 2143                  */
 2144                 KASSERT(obj->charge == 0,
 2145                     ("vm_map_protect: object %p overcharged (entry %p)",
 2146                     obj, current));
 2147                 if (!swap_reserve(ptoa(obj->size))) {
 2148                         VM_OBJECT_WUNLOCK(obj);
 2149                         vm_map_unlock(map);
 2150                         return (KERN_RESOURCE_SHORTAGE);
 2151                 }
 2152 
 2153                 crhold(cred);
 2154                 obj->cred = cred;
 2155                 obj->charge = ptoa(obj->size);
 2156                 VM_OBJECT_WUNLOCK(obj);
 2157         }
 2158 
 2159         /*
 2160          * Go back and fix up protections. [Note that clipping is not
 2161          * necessary the second time.]
 2162          */
 2163         for (current = entry; current->start < end; current = current->next) {
 2164                 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
 2165                         continue;
 2166 
 2167                 old_prot = current->protection;
 2168 
 2169                 if (set_max)
 2170                         current->protection =
 2171                             (current->max_protection = new_prot) &
 2172                             old_prot;
 2173                 else
 2174                         current->protection = new_prot;
 2175 
 2176                 /*
 2177                  * For user wired map entries, the normal lazy evaluation of
 2178                  * write access upgrades through soft page faults is
 2179                  * undesirable.  Instead, immediately copy any pages that are
 2180                  * copy-on-write and enable write access in the physical map.
 2181                  */
 2182                 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
 2183                     (current->protection & VM_PROT_WRITE) != 0 &&
 2184                     (old_prot & VM_PROT_WRITE) == 0)
 2185                         vm_fault_copy_entry(map, map, current, current, NULL);
 2186 
 2187                 /*
 2188                  * When restricting access, update the physical map.  Worry
 2189                  * about copy-on-write here.
 2190                  */
 2191                 if ((old_prot & ~current->protection) != 0) {
 2192 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
 2193                                                         VM_PROT_ALL)
 2194                         pmap_protect(map->pmap, current->start,
 2195                             current->end,
 2196                             current->protection & MASK(current));
 2197 #undef  MASK
 2198                 }
 2199                 vm_map_simplify_entry(map, current);
 2200         }
 2201         vm_map_unlock(map);
 2202         return (KERN_SUCCESS);
 2203 }
 2204 
 2205 /*
 2206  *      vm_map_madvise:
 2207  *
 2208  *      This routine traverses a processes map handling the madvise
 2209  *      system call.  Advisories are classified as either those effecting
 2210  *      the vm_map_entry structure, or those effecting the underlying
 2211  *      objects.
 2212  */
 2213 int
 2214 vm_map_madvise(
 2215         vm_map_t map,
 2216         vm_offset_t start,
 2217         vm_offset_t end,
 2218         int behav)
 2219 {
 2220         vm_map_entry_t current, entry;
 2221         bool modify_map;
 2222 
 2223         /*
 2224          * Some madvise calls directly modify the vm_map_entry, in which case
 2225          * we need to use an exclusive lock on the map and we need to perform
 2226          * various clipping operations.  Otherwise we only need a read-lock
 2227          * on the map.
 2228          */
 2229         switch(behav) {
 2230         case MADV_NORMAL:
 2231         case MADV_SEQUENTIAL:
 2232         case MADV_RANDOM:
 2233         case MADV_NOSYNC:
 2234         case MADV_AUTOSYNC:
 2235         case MADV_NOCORE:
 2236         case MADV_CORE:
 2237                 if (start == end)
 2238                         return (0);
 2239                 modify_map = true;
 2240                 vm_map_lock(map);
 2241                 break;
 2242         case MADV_WILLNEED:
 2243         case MADV_DONTNEED:
 2244         case MADV_FREE:
 2245                 if (start == end)
 2246                         return (0);
 2247                 modify_map = false;
 2248                 vm_map_lock_read(map);
 2249                 break;
 2250         default:
 2251                 return (EINVAL);
 2252         }
 2253 
 2254         /*
 2255          * Locate starting entry and clip if necessary.
 2256          */
 2257         VM_MAP_RANGE_CHECK(map, start, end);
 2258 
 2259         if (vm_map_lookup_entry(map, start, &entry)) {
 2260                 if (modify_map)
 2261                         vm_map_clip_start(map, entry, start);
 2262         } else {
 2263                 entry = entry->next;
 2264         }
 2265 
 2266         if (modify_map) {
 2267                 /*
 2268                  * madvise behaviors that are implemented in the vm_map_entry.
 2269                  *
 2270                  * We clip the vm_map_entry so that behavioral changes are
 2271                  * limited to the specified address range.
 2272                  */
 2273                 for (current = entry; current->start < end;
 2274                     current = current->next) {
 2275                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 2276                                 continue;
 2277 
 2278                         vm_map_clip_end(map, current, end);
 2279 
 2280                         switch (behav) {
 2281                         case MADV_NORMAL:
 2282                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
 2283                                 break;
 2284                         case MADV_SEQUENTIAL:
 2285                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
 2286                                 break;
 2287                         case MADV_RANDOM:
 2288                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
 2289                                 break;
 2290                         case MADV_NOSYNC:
 2291                                 current->eflags |= MAP_ENTRY_NOSYNC;
 2292                                 break;
 2293                         case MADV_AUTOSYNC:
 2294                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
 2295                                 break;
 2296                         case MADV_NOCORE:
 2297                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
 2298                                 break;
 2299                         case MADV_CORE:
 2300                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
 2301                                 break;
 2302                         default:
 2303                                 break;
 2304                         }
 2305                         vm_map_simplify_entry(map, current);
 2306                 }
 2307                 vm_map_unlock(map);
 2308         } else {
 2309                 vm_pindex_t pstart, pend;
 2310 
 2311                 /*
 2312                  * madvise behaviors that are implemented in the underlying
 2313                  * vm_object.
 2314                  *
 2315                  * Since we don't clip the vm_map_entry, we have to clip
 2316                  * the vm_object pindex and count.
 2317                  */
 2318                 for (current = entry; current->start < end;
 2319                     current = current->next) {
 2320                         vm_offset_t useEnd, useStart;
 2321 
 2322                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 2323                                 continue;
 2324 
 2325                         pstart = OFF_TO_IDX(current->offset);
 2326                         pend = pstart + atop(current->end - current->start);
 2327                         useStart = current->start;
 2328                         useEnd = current->end;
 2329 
 2330                         if (current->start < start) {
 2331                                 pstart += atop(start - current->start);
 2332                                 useStart = start;
 2333                         }
 2334                         if (current->end > end) {
 2335                                 pend -= atop(current->end - end);
 2336                                 useEnd = end;
 2337                         }
 2338 
 2339                         if (pstart >= pend)
 2340                                 continue;
 2341 
 2342                         /*
 2343                          * Perform the pmap_advise() before clearing
 2344                          * PGA_REFERENCED in vm_page_advise().  Otherwise, a
 2345                          * concurrent pmap operation, such as pmap_remove(),
 2346                          * could clear a reference in the pmap and set
 2347                          * PGA_REFERENCED on the page before the pmap_advise()
 2348                          * had completed.  Consequently, the page would appear
 2349                          * referenced based upon an old reference that
 2350                          * occurred before this pmap_advise() ran.
 2351                          */
 2352                         if (behav == MADV_DONTNEED || behav == MADV_FREE)
 2353                                 pmap_advise(map->pmap, useStart, useEnd,
 2354                                     behav);
 2355 
 2356                         vm_object_madvise(current->object.vm_object, pstart,
 2357                             pend, behav);
 2358 
 2359                         /*
 2360                          * Pre-populate paging structures in the
 2361                          * WILLNEED case.  For wired entries, the
 2362                          * paging structures are already populated.
 2363                          */
 2364                         if (behav == MADV_WILLNEED &&
 2365                             current->wired_count == 0) {
 2366                                 vm_map_pmap_enter(map,
 2367                                     useStart,
 2368                                     current->protection,
 2369                                     current->object.vm_object,
 2370                                     pstart,
 2371                                     ptoa(pend - pstart),
 2372                                     MAP_PREFAULT_MADVISE
 2373                                 );
 2374                         }
 2375                 }
 2376                 vm_map_unlock_read(map);
 2377         }
 2378         return (0);
 2379 }
 2380 
 2381 
 2382 /*
 2383  *      vm_map_inherit:
 2384  *
 2385  *      Sets the inheritance of the specified address
 2386  *      range in the target map.  Inheritance
 2387  *      affects how the map will be shared with
 2388  *      child maps at the time of vmspace_fork.
 2389  */
 2390 int
 2391 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2392                vm_inherit_t new_inheritance)
 2393 {
 2394         vm_map_entry_t entry;
 2395         vm_map_entry_t temp_entry;
 2396 
 2397         switch (new_inheritance) {
 2398         case VM_INHERIT_NONE:
 2399         case VM_INHERIT_COPY:
 2400         case VM_INHERIT_SHARE:
 2401         case VM_INHERIT_ZERO:
 2402                 break;
 2403         default:
 2404                 return (KERN_INVALID_ARGUMENT);
 2405         }
 2406         if (start == end)
 2407                 return (KERN_SUCCESS);
 2408         vm_map_lock(map);
 2409         VM_MAP_RANGE_CHECK(map, start, end);
 2410         if (vm_map_lookup_entry(map, start, &temp_entry)) {
 2411                 entry = temp_entry;
 2412                 vm_map_clip_start(map, entry, start);
 2413         } else
 2414                 entry = temp_entry->next;
 2415         while (entry->start < end) {
 2416                 vm_map_clip_end(map, entry, end);
 2417                 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
 2418                     new_inheritance != VM_INHERIT_ZERO)
 2419                         entry->inheritance = new_inheritance;
 2420                 vm_map_simplify_entry(map, entry);
 2421                 entry = entry->next;
 2422         }
 2423         vm_map_unlock(map);
 2424         return (KERN_SUCCESS);
 2425 }
 2426 
 2427 /*
 2428  *      vm_map_unwire:
 2429  *
 2430  *      Implements both kernel and user unwiring.
 2431  */
 2432 int
 2433 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2434     int flags)
 2435 {
 2436         vm_map_entry_t entry, first_entry, tmp_entry;
 2437         vm_offset_t saved_start;
 2438         unsigned int last_timestamp;
 2439         int rv;
 2440         boolean_t need_wakeup, result, user_unwire;
 2441 
 2442         if (start == end)
 2443                 return (KERN_SUCCESS);
 2444         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
 2445         vm_map_lock(map);
 2446         VM_MAP_RANGE_CHECK(map, start, end);
 2447         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 2448                 if (flags & VM_MAP_WIRE_HOLESOK)
 2449                         first_entry = first_entry->next;
 2450                 else {
 2451                         vm_map_unlock(map);
 2452                         return (KERN_INVALID_ADDRESS);
 2453                 }
 2454         }
 2455         last_timestamp = map->timestamp;
 2456         entry = first_entry;
 2457         while (entry->start < end) {
 2458                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 2459                         /*
 2460                          * We have not yet clipped the entry.
 2461                          */
 2462                         saved_start = (start >= entry->start) ? start :
 2463                             entry->start;
 2464                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 2465                         if (vm_map_unlock_and_wait(map, 0)) {
 2466                                 /*
 2467                                  * Allow interruption of user unwiring?
 2468                                  */
 2469                         }
 2470                         vm_map_lock(map);
 2471                         if (last_timestamp+1 != map->timestamp) {
 2472                                 /*
 2473                                  * Look again for the entry because the map was
 2474                                  * modified while it was unlocked.
 2475                                  * Specifically, the entry may have been
 2476                                  * clipped, merged, or deleted.
 2477                                  */
 2478                                 if (!vm_map_lookup_entry(map, saved_start,
 2479                                     &tmp_entry)) {
 2480                                         if (flags & VM_MAP_WIRE_HOLESOK)
 2481                                                 tmp_entry = tmp_entry->next;
 2482                                         else {
 2483                                                 if (saved_start == start) {
 2484                                                         /*
 2485                                                          * First_entry has been deleted.
 2486                                                          */
 2487                                                         vm_map_unlock(map);
 2488                                                         return (KERN_INVALID_ADDRESS);
 2489                                                 }
 2490                                                 end = saved_start;
 2491                                                 rv = KERN_INVALID_ADDRESS;
 2492                                                 goto done;
 2493                                         }
 2494                                 }
 2495                                 if (entry == first_entry)
 2496                                         first_entry = tmp_entry;
 2497                                 else
 2498                                         first_entry = NULL;
 2499                                 entry = tmp_entry;
 2500                         }
 2501                         last_timestamp = map->timestamp;
 2502                         continue;
 2503                 }
 2504                 vm_map_clip_start(map, entry, start);
 2505                 vm_map_clip_end(map, entry, end);
 2506                 /*
 2507                  * Mark the entry in case the map lock is released.  (See
 2508                  * above.)
 2509                  */
 2510                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
 2511                     entry->wiring_thread == NULL,
 2512                     ("owned map entry %p", entry));
 2513                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 2514                 entry->wiring_thread = curthread;
 2515                 /*
 2516                  * Check the map for holes in the specified region.
 2517                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
 2518                  */
 2519                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
 2520                     (entry->end < end && entry->next->start > entry->end)) {
 2521                         end = entry->end;
 2522                         rv = KERN_INVALID_ADDRESS;
 2523                         goto done;
 2524                 }
 2525                 /*
 2526                  * If system unwiring, require that the entry is system wired.
 2527                  */
 2528                 if (!user_unwire &&
 2529                     vm_map_entry_system_wired_count(entry) == 0) {
 2530                         end = entry->end;
 2531                         rv = KERN_INVALID_ARGUMENT;
 2532                         goto done;
 2533                 }
 2534                 entry = entry->next;
 2535         }
 2536         rv = KERN_SUCCESS;
 2537 done:
 2538         need_wakeup = FALSE;
 2539         if (first_entry == NULL) {
 2540                 result = vm_map_lookup_entry(map, start, &first_entry);
 2541                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
 2542                         first_entry = first_entry->next;
 2543                 else
 2544                         KASSERT(result, ("vm_map_unwire: lookup failed"));
 2545         }
 2546         for (entry = first_entry; entry->start < end; entry = entry->next) {
 2547                 /*
 2548                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
 2549                  * space in the unwired region could have been mapped
 2550                  * while the map lock was dropped for draining
 2551                  * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
 2552                  * could be simultaneously wiring this new mapping
 2553                  * entry.  Detect these cases and skip any entries
 2554                  * marked as in transition by us.
 2555                  */
 2556                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
 2557                     entry->wiring_thread != curthread) {
 2558                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
 2559                             ("vm_map_unwire: !HOLESOK and new/changed entry"));
 2560                         continue;
 2561                 }
 2562 
 2563                 if (rv == KERN_SUCCESS && (!user_unwire ||
 2564                     (entry->eflags & MAP_ENTRY_USER_WIRED))) {
 2565                         if (user_unwire)
 2566                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 2567                         if (entry->wired_count == 1)
 2568                                 vm_map_entry_unwire(map, entry);
 2569                         else
 2570                                 entry->wired_count--;
 2571                 }
 2572                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
 2573                     ("vm_map_unwire: in-transition flag missing %p", entry));
 2574                 KASSERT(entry->wiring_thread == curthread,
 2575                     ("vm_map_unwire: alien wire %p", entry));
 2576                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
 2577                 entry->wiring_thread = NULL;
 2578                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 2579                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 2580                         need_wakeup = TRUE;
 2581                 }
 2582                 vm_map_simplify_entry(map, entry);
 2583         }
 2584         vm_map_unlock(map);
 2585         if (need_wakeup)
 2586                 vm_map_wakeup(map);
 2587         return (rv);
 2588 }
 2589 
 2590 /*
 2591  *      vm_map_wire_entry_failure:
 2592  *
 2593  *      Handle a wiring failure on the given entry.
 2594  *
 2595  *      The map should be locked.
 2596  */
 2597 static void
 2598 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
 2599     vm_offset_t failed_addr)
 2600 {
 2601 
 2602         VM_MAP_ASSERT_LOCKED(map);
 2603         KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
 2604             entry->wired_count == 1,
 2605             ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
 2606         KASSERT(failed_addr < entry->end,
 2607             ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
 2608 
 2609         /*
 2610          * If any pages at the start of this entry were successfully wired,
 2611          * then unwire them.
 2612          */
 2613         if (failed_addr > entry->start) {
 2614                 pmap_unwire(map->pmap, entry->start, failed_addr);
 2615                 vm_object_unwire(entry->object.vm_object, entry->offset,
 2616                     failed_addr - entry->start, PQ_ACTIVE);
 2617         }
 2618 
 2619         /*
 2620          * Assign an out-of-range value to represent the failure to wire this
 2621          * entry.
 2622          */
 2623         entry->wired_count = -1;
 2624 }
 2625 
 2626 /*
 2627  *      vm_map_wire:
 2628  *
 2629  *      Implements both kernel and user wiring.
 2630  */
 2631 int
 2632 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2633     int flags)
 2634 {
 2635         vm_map_entry_t entry, first_entry, tmp_entry;
 2636         vm_offset_t faddr, saved_end, saved_start;
 2637         unsigned int last_timestamp;
 2638         int rv;
 2639         boolean_t need_wakeup, result, user_wire;
 2640         vm_prot_t prot;
 2641 
 2642         if (start == end)
 2643                 return (KERN_SUCCESS);
 2644         prot = 0;
 2645         if (flags & VM_MAP_WIRE_WRITE)
 2646                 prot |= VM_PROT_WRITE;
 2647         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
 2648         vm_map_lock(map);
 2649         VM_MAP_RANGE_CHECK(map, start, end);
 2650         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 2651                 if (flags & VM_MAP_WIRE_HOLESOK)
 2652                         first_entry = first_entry->next;
 2653                 else {
 2654                         vm_map_unlock(map);
 2655                         return (KERN_INVALID_ADDRESS);
 2656                 }
 2657         }
 2658         last_timestamp = map->timestamp;
 2659         entry = first_entry;
 2660         while (entry->start < end) {
 2661                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 2662                         /*
 2663                          * We have not yet clipped the entry.
 2664                          */
 2665                         saved_start = (start >= entry->start) ? start :
 2666                             entry->start;
 2667                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 2668                         if (vm_map_unlock_and_wait(map, 0)) {
 2669                                 /*
 2670                                  * Allow interruption of user wiring?
 2671                                  */
 2672                         }
 2673                         vm_map_lock(map);
 2674                         if (last_timestamp + 1 != map->timestamp) {
 2675                                 /*
 2676                                  * Look again for the entry because the map was
 2677                                  * modified while it was unlocked.
 2678                                  * Specifically, the entry may have been
 2679                                  * clipped, merged, or deleted.
 2680                                  */
 2681                                 if (!vm_map_lookup_entry(map, saved_start,
 2682                                     &tmp_entry)) {
 2683                                         if (flags & VM_MAP_WIRE_HOLESOK)
 2684                                                 tmp_entry = tmp_entry->next;
 2685                                         else {
 2686                                                 if (saved_start == start) {
 2687                                                         /*
 2688                                                          * first_entry has been deleted.
 2689                                                          */
 2690                                                         vm_map_unlock(map);
 2691                                                         return (KERN_INVALID_ADDRESS);
 2692                                                 }
 2693                                                 end = saved_start;
 2694                                                 rv = KERN_INVALID_ADDRESS;
 2695                                                 goto done;
 2696                                         }
 2697                                 }
 2698                                 if (entry == first_entry)
 2699                                         first_entry = tmp_entry;
 2700                                 else
 2701                                         first_entry = NULL;
 2702                                 entry = tmp_entry;
 2703                         }
 2704                         last_timestamp = map->timestamp;
 2705                         continue;
 2706                 }
 2707                 vm_map_clip_start(map, entry, start);
 2708                 vm_map_clip_end(map, entry, end);
 2709                 /*
 2710                  * Mark the entry in case the map lock is released.  (See
 2711                  * above.)
 2712                  */
 2713                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
 2714                     entry->wiring_thread == NULL,
 2715                     ("owned map entry %p", entry));
 2716                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 2717                 entry->wiring_thread = curthread;
 2718                 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
 2719                     || (entry->protection & prot) != prot) {
 2720                         entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
 2721                         if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
 2722                                 end = entry->end;
 2723                                 rv = KERN_INVALID_ADDRESS;
 2724                                 goto done;
 2725                         }
 2726                         goto next_entry;
 2727                 }
 2728                 if (entry->wired_count == 0) {
 2729                         entry->wired_count++;
 2730                         saved_start = entry->start;
 2731                         saved_end = entry->end;
 2732 
 2733                         /*
 2734                          * Release the map lock, relying on the in-transition
 2735                          * mark.  Mark the map busy for fork.
 2736                          */
 2737                         vm_map_busy(map);
 2738                         vm_map_unlock(map);
 2739 
 2740                         faddr = saved_start;
 2741                         do {
 2742                                 /*
 2743                                  * Simulate a fault to get the page and enter
 2744                                  * it into the physical map.
 2745                                  */
 2746                                 if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
 2747                                     VM_FAULT_WIRE)) != KERN_SUCCESS)
 2748                                         break;
 2749                         } while ((faddr += PAGE_SIZE) < saved_end);
 2750                         vm_map_lock(map);
 2751                         vm_map_unbusy(map);
 2752                         if (last_timestamp + 1 != map->timestamp) {
 2753                                 /*
 2754                                  * Look again for the entry because the map was
 2755                                  * modified while it was unlocked.  The entry
 2756                                  * may have been clipped, but NOT merged or
 2757                                  * deleted.
 2758                                  */
 2759                                 result = vm_map_lookup_entry(map, saved_start,
 2760                                     &tmp_entry);
 2761                                 KASSERT(result, ("vm_map_wire: lookup failed"));
 2762                                 if (entry == first_entry)
 2763                                         first_entry = tmp_entry;
 2764                                 else
 2765                                         first_entry = NULL;
 2766                                 entry = tmp_entry;
 2767                                 while (entry->end < saved_end) {
 2768                                         /*
 2769                                          * In case of failure, handle entries
 2770                                          * that were not fully wired here;
 2771                                          * fully wired entries are handled
 2772                                          * later.
 2773                                          */
 2774                                         if (rv != KERN_SUCCESS &&
 2775                                             faddr < entry->end)
 2776                                                 vm_map_wire_entry_failure(map,
 2777                                                     entry, faddr);
 2778                                         entry = entry->next;
 2779                                 }
 2780                         }
 2781                         last_timestamp = map->timestamp;
 2782                         if (rv != KERN_SUCCESS) {
 2783                                 vm_map_wire_entry_failure(map, entry, faddr);
 2784                                 end = entry->end;
 2785                                 goto done;
 2786                         }
 2787                 } else if (!user_wire ||
 2788                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
 2789                         entry->wired_count++;
 2790                 }
 2791                 /*
 2792                  * Check the map for holes in the specified region.
 2793                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
 2794                  */
 2795         next_entry:
 2796                 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 &&
 2797                     entry->end < end && entry->next->start > entry->end) {
 2798                         end = entry->end;
 2799                         rv = KERN_INVALID_ADDRESS;
 2800                         goto done;
 2801                 }
 2802                 entry = entry->next;
 2803         }
 2804         rv = KERN_SUCCESS;
 2805 done:
 2806         need_wakeup = FALSE;
 2807         if (first_entry == NULL) {
 2808                 result = vm_map_lookup_entry(map, start, &first_entry);
 2809                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
 2810                         first_entry = first_entry->next;
 2811                 else
 2812                         KASSERT(result, ("vm_map_wire: lookup failed"));
 2813         }
 2814         for (entry = first_entry; entry->start < end; entry = entry->next) {
 2815                 /*
 2816                  * If VM_MAP_WIRE_HOLESOK was specified, an empty
 2817                  * space in the unwired region could have been mapped
 2818                  * while the map lock was dropped for faulting in the
 2819                  * pages or draining MAP_ENTRY_IN_TRANSITION.
 2820                  * Moreover, another thread could be simultaneously
 2821                  * wiring this new mapping entry.  Detect these cases
 2822                  * and skip any entries marked as in transition not by us.
 2823                  */
 2824                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
 2825                     entry->wiring_thread != curthread) {
 2826                         KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
 2827                             ("vm_map_wire: !HOLESOK and new/changed entry"));
 2828                         continue;
 2829                 }
 2830 
 2831                 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
 2832                         goto next_entry_done;
 2833 
 2834                 if (rv == KERN_SUCCESS) {
 2835                         if (user_wire)
 2836                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
 2837                 } else if (entry->wired_count == -1) {
 2838                         /*
 2839                          * Wiring failed on this entry.  Thus, unwiring is
 2840                          * unnecessary.
 2841                          */
 2842                         entry->wired_count = 0;
 2843                 } else if (!user_wire ||
 2844                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
 2845                         /*
 2846                          * Undo the wiring.  Wiring succeeded on this entry
 2847                          * but failed on a later entry.  
 2848                          */
 2849                         if (entry->wired_count == 1)
 2850                                 vm_map_entry_unwire(map, entry);
 2851                         else
 2852                                 entry->wired_count--;
 2853                 }
 2854         next_entry_done:
 2855                 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
 2856                     ("vm_map_wire: in-transition flag missing %p", entry));
 2857                 KASSERT(entry->wiring_thread == curthread,
 2858                     ("vm_map_wire: alien wire %p", entry));
 2859                 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
 2860                     MAP_ENTRY_WIRE_SKIPPED);
 2861                 entry->wiring_thread = NULL;
 2862                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 2863                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 2864                         need_wakeup = TRUE;
 2865                 }
 2866                 vm_map_simplify_entry(map, entry);
 2867         }
 2868         vm_map_unlock(map);
 2869         if (need_wakeup)
 2870                 vm_map_wakeup(map);
 2871         return (rv);
 2872 }
 2873 
 2874 /*
 2875  * vm_map_sync
 2876  *
 2877  * Push any dirty cached pages in the address range to their pager.
 2878  * If syncio is TRUE, dirty pages are written synchronously.
 2879  * If invalidate is TRUE, any cached pages are freed as well.
 2880  *
 2881  * If the size of the region from start to end is zero, we are
 2882  * supposed to flush all modified pages within the region containing
 2883  * start.  Unfortunately, a region can be split or coalesced with
 2884  * neighboring regions, making it difficult to determine what the
 2885  * original region was.  Therefore, we approximate this requirement by
 2886  * flushing the current region containing start.
 2887  *
 2888  * Returns an error if any part of the specified range is not mapped.
 2889  */
 2890 int
 2891 vm_map_sync(
 2892         vm_map_t map,
 2893         vm_offset_t start,
 2894         vm_offset_t end,
 2895         boolean_t syncio,
 2896         boolean_t invalidate)
 2897 {
 2898         vm_map_entry_t current;
 2899         vm_map_entry_t entry;
 2900         vm_size_t size;
 2901         vm_object_t object;
 2902         vm_ooffset_t offset;
 2903         unsigned int last_timestamp;
 2904         boolean_t failed;
 2905 
 2906         vm_map_lock_read(map);
 2907         VM_MAP_RANGE_CHECK(map, start, end);
 2908         if (!vm_map_lookup_entry(map, start, &entry)) {
 2909                 vm_map_unlock_read(map);
 2910                 return (KERN_INVALID_ADDRESS);
 2911         } else if (start == end) {
 2912                 start = entry->start;
 2913                 end = entry->end;
 2914         }
 2915         /*
 2916          * Make a first pass to check for user-wired memory and holes.
 2917          */
 2918         for (current = entry; current->start < end; current = current->next) {
 2919                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
 2920                         vm_map_unlock_read(map);
 2921                         return (KERN_INVALID_ARGUMENT);
 2922                 }
 2923                 if (end > current->end &&
 2924                     current->end != current->next->start) {
 2925                         vm_map_unlock_read(map);
 2926                         return (KERN_INVALID_ADDRESS);
 2927                 }
 2928         }
 2929 
 2930         if (invalidate)
 2931                 pmap_remove(map->pmap, start, end);
 2932         failed = FALSE;
 2933 
 2934         /*
 2935          * Make a second pass, cleaning/uncaching pages from the indicated
 2936          * objects as we go.
 2937          */
 2938         for (current = entry; current->start < end;) {
 2939                 offset = current->offset + (start - current->start);
 2940                 size = (end <= current->end ? end : current->end) - start;
 2941                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 2942                         vm_map_t smap;
 2943                         vm_map_entry_t tentry;
 2944                         vm_size_t tsize;
 2945 
 2946                         smap = current->object.sub_map;
 2947                         vm_map_lock_read(smap);
 2948                         (void) vm_map_lookup_entry(smap, offset, &tentry);
 2949                         tsize = tentry->end - offset;
 2950                         if (tsize < size)
 2951                                 size = tsize;
 2952                         object = tentry->object.vm_object;
 2953                         offset = tentry->offset + (offset - tentry->start);
 2954                         vm_map_unlock_read(smap);
 2955                 } else {
 2956                         object = current->object.vm_object;
 2957                 }
 2958                 vm_object_reference(object);
 2959                 last_timestamp = map->timestamp;
 2960                 vm_map_unlock_read(map);
 2961                 if (!vm_object_sync(object, offset, size, syncio, invalidate))
 2962                         failed = TRUE;
 2963                 start += size;
 2964                 vm_object_deallocate(object);
 2965                 vm_map_lock_read(map);
 2966                 if (last_timestamp == map->timestamp ||
 2967                     !vm_map_lookup_entry(map, start, &current))
 2968                         current = current->next;
 2969         }
 2970 
 2971         vm_map_unlock_read(map);
 2972         return (failed ? KERN_FAILURE : KERN_SUCCESS);
 2973 }
 2974 
 2975 /*
 2976  *      vm_map_entry_unwire:    [ internal use only ]
 2977  *
 2978  *      Make the region specified by this entry pageable.
 2979  *
 2980  *      The map in question should be locked.
 2981  *      [This is the reason for this routine's existence.]
 2982  */
 2983 static void
 2984 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
 2985 {
 2986 
 2987         VM_MAP_ASSERT_LOCKED(map);
 2988         KASSERT(entry->wired_count > 0,
 2989             ("vm_map_entry_unwire: entry %p isn't wired", entry));
 2990         pmap_unwire(map->pmap, entry->start, entry->end);
 2991         vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
 2992             entry->start, PQ_ACTIVE);
 2993         entry->wired_count = 0;
 2994 }
 2995 
 2996 static void
 2997 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
 2998 {
 2999 
 3000         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
 3001                 vm_object_deallocate(entry->object.vm_object);
 3002         uma_zfree(system_map ? kmapentzone : mapentzone, entry);
 3003 }
 3004 
 3005 /*
 3006  *      vm_map_entry_delete:    [ internal use only ]
 3007  *
 3008  *      Deallocate the given entry from the target map.
 3009  */
 3010 static void
 3011 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
 3012 {
 3013         vm_object_t object;
 3014         vm_pindex_t offidxstart, offidxend, count, size1;
 3015         vm_size_t size;
 3016 
 3017         vm_map_entry_unlink(map, entry);
 3018         object = entry->object.vm_object;
 3019 
 3020         if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
 3021                 MPASS(entry->cred == NULL);
 3022                 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
 3023                 MPASS(object == NULL);
 3024                 vm_map_entry_deallocate(entry, map->system_map);
 3025                 return;
 3026         }
 3027 
 3028         size = entry->end - entry->start;
 3029         map->size -= size;
 3030 
 3031         if (entry->cred != NULL) {
 3032                 swap_release_by_cred(size, entry->cred);
 3033                 crfree(entry->cred);
 3034         }
 3035 
 3036         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
 3037             (object != NULL)) {
 3038                 KASSERT(entry->cred == NULL || object->cred == NULL ||
 3039                     (entry->eflags & MAP_ENTRY_NEEDS_COPY),
 3040                     ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
 3041                 count = atop(size);
 3042                 offidxstart = OFF_TO_IDX(entry->offset);
 3043                 offidxend = offidxstart + count;
 3044                 VM_OBJECT_WLOCK(object);
 3045                 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
 3046                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
 3047                     object == kernel_object)) {
 3048                         vm_object_collapse(object);
 3049 
 3050                         /*
 3051                          * The option OBJPR_NOTMAPPED can be passed here
 3052                          * because vm_map_delete() already performed
 3053                          * pmap_remove() on the only mapping to this range
 3054                          * of pages. 
 3055                          */
 3056                         vm_object_page_remove(object, offidxstart, offidxend,
 3057                             OBJPR_NOTMAPPED);
 3058                         if (object->type == OBJT_SWAP)
 3059                                 swap_pager_freespace(object, offidxstart,
 3060                                     count);
 3061                         if (offidxend >= object->size &&
 3062                             offidxstart < object->size) {
 3063                                 size1 = object->size;
 3064                                 object->size = offidxstart;
 3065                                 if (object->cred != NULL) {
 3066                                         size1 -= object->size;
 3067                                         KASSERT(object->charge >= ptoa(size1),
 3068                                             ("object %p charge < 0", object));
 3069                                         swap_release_by_cred(ptoa(size1),
 3070                                             object->cred);
 3071                                         object->charge -= ptoa(size1);
 3072                                 }
 3073                         }
 3074                 }
 3075                 VM_OBJECT_WUNLOCK(object);
 3076         } else
 3077                 entry->object.vm_object = NULL;
 3078         if (map->system_map)
 3079                 vm_map_entry_deallocate(entry, TRUE);
 3080         else {
 3081                 entry->next = curthread->td_map_def_user;
 3082                 curthread->td_map_def_user = entry;
 3083         }
 3084 }
 3085 
 3086 /*
 3087  *      vm_map_delete:  [ internal use only ]
 3088  *
 3089  *      Deallocates the given address range from the target
 3090  *      map.
 3091  */
 3092 int
 3093 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
 3094 {
 3095         vm_map_entry_t entry;
 3096         vm_map_entry_t first_entry;
 3097 
 3098         VM_MAP_ASSERT_LOCKED(map);
 3099         if (start == end)
 3100                 return (KERN_SUCCESS);
 3101 
 3102         /*
 3103          * Find the start of the region, and clip it
 3104          */
 3105         if (!vm_map_lookup_entry(map, start, &first_entry))
 3106                 entry = first_entry->next;
 3107         else {
 3108                 entry = first_entry;
 3109                 vm_map_clip_start(map, entry, start);
 3110         }
 3111 
 3112         /*
 3113          * Step through all entries in this region
 3114          */
 3115         while (entry->start < end) {
 3116                 vm_map_entry_t next;
 3117 
 3118                 /*
 3119                  * Wait for wiring or unwiring of an entry to complete.
 3120                  * Also wait for any system wirings to disappear on
 3121                  * user maps.
 3122                  */
 3123                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
 3124                     (vm_map_pmap(map) != kernel_pmap &&
 3125                     vm_map_entry_system_wired_count(entry) != 0)) {
 3126                         unsigned int last_timestamp;
 3127                         vm_offset_t saved_start;
 3128                         vm_map_entry_t tmp_entry;
 3129 
 3130                         saved_start = entry->start;
 3131                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 3132                         last_timestamp = map->timestamp;
 3133                         (void) vm_map_unlock_and_wait(map, 0);
 3134                         vm_map_lock(map);
 3135                         if (last_timestamp + 1 != map->timestamp) {
 3136                                 /*
 3137                                  * Look again for the entry because the map was
 3138                                  * modified while it was unlocked.
 3139                                  * Specifically, the entry may have been
 3140                                  * clipped, merged, or deleted.
 3141                                  */
 3142                                 if (!vm_map_lookup_entry(map, saved_start,
 3143                                                          &tmp_entry))
 3144                                         entry = tmp_entry->next;
 3145                                 else {
 3146                                         entry = tmp_entry;
 3147                                         vm_map_clip_start(map, entry,
 3148                                                           saved_start);
 3149                                 }
 3150                         }
 3151                         continue;
 3152                 }
 3153                 vm_map_clip_end(map, entry, end);
 3154 
 3155                 next = entry->next;
 3156 
 3157                 /*
 3158                  * Unwire before removing addresses from the pmap; otherwise,
 3159                  * unwiring will put the entries back in the pmap.
 3160                  */
 3161                 if (entry->wired_count != 0)
 3162                         vm_map_entry_unwire(map, entry);
 3163 
 3164                 /*
 3165                  * Remove mappings for the pages, but only if the
 3166                  * mappings could exist.  For instance, it does not
 3167                  * make sense to call pmap_remove() for guard entries.
 3168                  */
 3169                 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
 3170                     entry->object.vm_object != NULL)
 3171                         pmap_remove(map->pmap, entry->start, entry->end);
 3172 
 3173                 /*
 3174                  * Delete the entry only after removing all pmap
 3175                  * entries pointing to its pages.  (Otherwise, its
 3176                  * page frames may be reallocated, and any modify bits
 3177                  * will be set in the wrong object!)
 3178                  */
 3179                 vm_map_entry_delete(map, entry);
 3180                 entry = next;
 3181         }
 3182         return (KERN_SUCCESS);
 3183 }
 3184 
 3185 /*
 3186  *      vm_map_remove:
 3187  *
 3188  *      Remove the given address range from the target map.
 3189  *      This is the exported form of vm_map_delete.
 3190  */
 3191 int
 3192 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
 3193 {
 3194         int result;
 3195 
 3196         vm_map_lock(map);
 3197         VM_MAP_RANGE_CHECK(map, start, end);
 3198         result = vm_map_delete(map, start, end);
 3199         vm_map_unlock(map);
 3200         return (result);
 3201 }
 3202 
 3203 /*
 3204  *      vm_map_check_protection:
 3205  *
 3206  *      Assert that the target map allows the specified privilege on the
 3207  *      entire address region given.  The entire region must be allocated.
 3208  *
 3209  *      WARNING!  This code does not and should not check whether the
 3210  *      contents of the region is accessible.  For example a smaller file
 3211  *      might be mapped into a larger address space.
 3212  *
 3213  *      NOTE!  This code is also called by munmap().
 3214  *
 3215  *      The map must be locked.  A read lock is sufficient.
 3216  */
 3217 boolean_t
 3218 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
 3219                         vm_prot_t protection)
 3220 {
 3221         vm_map_entry_t entry;
 3222         vm_map_entry_t tmp_entry;
 3223 
 3224         if (!vm_map_lookup_entry(map, start, &tmp_entry))
 3225                 return (FALSE);
 3226         entry = tmp_entry;
 3227 
 3228         while (start < end) {
 3229                 /*
 3230                  * No holes allowed!
 3231                  */
 3232                 if (start < entry->start)
 3233                         return (FALSE);
 3234                 /*
 3235                  * Check protection associated with entry.
 3236                  */
 3237                 if ((entry->protection & protection) != protection)
 3238                         return (FALSE);
 3239                 /* go to next entry */
 3240                 start = entry->end;
 3241                 entry = entry->next;
 3242         }
 3243         return (TRUE);
 3244 }
 3245 
 3246 /*
 3247  *      vm_map_copy_entry:
 3248  *
 3249  *      Copies the contents of the source entry to the destination
 3250  *      entry.  The entries *must* be aligned properly.
 3251  */
 3252 static void
 3253 vm_map_copy_entry(
 3254         vm_map_t src_map,
 3255         vm_map_t dst_map,
 3256         vm_map_entry_t src_entry,
 3257         vm_map_entry_t dst_entry,
 3258         vm_ooffset_t *fork_charge)
 3259 {
 3260         vm_object_t src_object;
 3261         vm_map_entry_t fake_entry;
 3262         vm_offset_t size;
 3263         struct ucred *cred;
 3264         int charged;
 3265 
 3266         VM_MAP_ASSERT_LOCKED(dst_map);
 3267 
 3268         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
 3269                 return;
 3270 
 3271         if (src_entry->wired_count == 0 ||
 3272             (src_entry->protection & VM_PROT_WRITE) == 0) {
 3273                 /*
 3274                  * If the source entry is marked needs_copy, it is already
 3275                  * write-protected.
 3276                  */
 3277                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
 3278                     (src_entry->protection & VM_PROT_WRITE) != 0) {
 3279                         pmap_protect(src_map->pmap,
 3280                             src_entry->start,
 3281                             src_entry->end,
 3282                             src_entry->protection & ~VM_PROT_WRITE);
 3283                 }
 3284 
 3285                 /*
 3286                  * Make a copy of the object.
 3287                  */
 3288                 size = src_entry->end - src_entry->start;
 3289                 if ((src_object = src_entry->object.vm_object) != NULL) {
 3290                         VM_OBJECT_WLOCK(src_object);
 3291                         charged = ENTRY_CHARGED(src_entry);
 3292                         if (src_object->handle == NULL &&
 3293                             (src_object->type == OBJT_DEFAULT ||
 3294                             src_object->type == OBJT_SWAP)) {
 3295                                 vm_object_collapse(src_object);
 3296                                 if ((src_object->flags & (OBJ_NOSPLIT |
 3297                                     OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
 3298                                         vm_object_split(src_entry);
 3299                                         src_object =
 3300                                             src_entry->object.vm_object;
 3301                                 }
 3302                         }
 3303                         vm_object_reference_locked(src_object);
 3304                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
 3305                         if (src_entry->cred != NULL &&
 3306                             !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
 3307                                 KASSERT(src_object->cred == NULL,
 3308                                     ("OVERCOMMIT: vm_map_copy_entry: cred %p",
 3309                                      src_object));
 3310                                 src_object->cred = src_entry->cred;
 3311                                 src_object->charge = size;
 3312                         }
 3313                         VM_OBJECT_WUNLOCK(src_object);
 3314                         dst_entry->object.vm_object = src_object;
 3315                         if (charged) {
 3316                                 cred = curthread->td_ucred;
 3317                                 crhold(cred);
 3318                                 dst_entry->cred = cred;
 3319                                 *fork_charge += size;
 3320                                 if (!(src_entry->eflags &
 3321                                       MAP_ENTRY_NEEDS_COPY)) {
 3322                                         crhold(cred);
 3323                                         src_entry->cred = cred;
 3324                                         *fork_charge += size;
 3325                                 }
 3326                         }
 3327                         src_entry->eflags |= MAP_ENTRY_COW |
 3328                             MAP_ENTRY_NEEDS_COPY;
 3329                         dst_entry->eflags |= MAP_ENTRY_COW |
 3330                             MAP_ENTRY_NEEDS_COPY;
 3331                         dst_entry->offset = src_entry->offset;
 3332                         if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
 3333                                 /*
 3334                                  * MAP_ENTRY_VN_WRITECNT cannot
 3335                                  * indicate write reference from
 3336                                  * src_entry, since the entry is
 3337                                  * marked as needs copy.  Allocate a
 3338                                  * fake entry that is used to
 3339                                  * decrement object->un_pager.vnp.writecount
 3340                                  * at the appropriate time.  Attach
 3341                                  * fake_entry to the deferred list.
 3342                                  */
 3343                                 fake_entry = vm_map_entry_create(dst_map);
 3344                                 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
 3345                                 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
 3346                                 vm_object_reference(src_object);
 3347                                 fake_entry->object.vm_object = src_object;
 3348                                 fake_entry->start = src_entry->start;
 3349                                 fake_entry->end = src_entry->end;
 3350                                 fake_entry->next = curthread->td_map_def_user;
 3351                                 curthread->td_map_def_user = fake_entry;
 3352                         }
 3353 
 3354                         pmap_copy(dst_map->pmap, src_map->pmap,
 3355                             dst_entry->start, dst_entry->end - dst_entry->start,
 3356                             src_entry->start);
 3357                 } else {
 3358                         dst_entry->object.vm_object = NULL;
 3359                         dst_entry->offset = 0;
 3360                         if (src_entry->cred != NULL) {
 3361                                 dst_entry->cred = curthread->td_ucred;
 3362                                 crhold(dst_entry->cred);
 3363                                 *fork_charge += size;
 3364                         }
 3365                 }
 3366         } else {
 3367                 /*
 3368                  * We don't want to make writeable wired pages copy-on-write.
 3369                  * Immediately copy these pages into the new map by simulating
 3370                  * page faults.  The new pages are pageable.
 3371                  */
 3372                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
 3373                     fork_charge);
 3374         }
 3375 }
 3376 
 3377 /*
 3378  * vmspace_map_entry_forked:
 3379  * Update the newly-forked vmspace each time a map entry is inherited
 3380  * or copied.  The values for vm_dsize and vm_tsize are approximate
 3381  * (and mostly-obsolete ideas in the face of mmap(2) et al.)
 3382  */
 3383 static void
 3384 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
 3385     vm_map_entry_t entry)
 3386 {
 3387         vm_size_t entrysize;
 3388         vm_offset_t newend;
 3389 
 3390         if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
 3391                 return;
 3392         entrysize = entry->end - entry->start;
 3393         vm2->vm_map.size += entrysize;
 3394         if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
 3395                 vm2->vm_ssize += btoc(entrysize);
 3396         } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
 3397             entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
 3398                 newend = MIN(entry->end,
 3399                     (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
 3400                 vm2->vm_dsize += btoc(newend - entry->start);
 3401         } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
 3402             entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
 3403                 newend = MIN(entry->end,
 3404                     (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
 3405                 vm2->vm_tsize += btoc(newend - entry->start);
 3406         }
 3407 }
 3408 
 3409 /*
 3410  * vmspace_fork:
 3411  * Create a new process vmspace structure and vm_map
 3412  * based on those of an existing process.  The new map
 3413  * is based on the old map, according to the inheritance
 3414  * values on the regions in that map.
 3415  *
 3416  * XXX It might be worth coalescing the entries added to the new vmspace.
 3417  *
 3418  * The source map must not be locked.
 3419  */
 3420 struct vmspace *
 3421 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
 3422 {
 3423         struct vmspace *vm2;
 3424         vm_map_t new_map, old_map;
 3425         vm_map_entry_t new_entry, old_entry;
 3426         vm_object_t object;
 3427         int locked;
 3428         vm_inherit_t inh;
 3429 
 3430         old_map = &vm1->vm_map;
 3431         /* Copy immutable fields of vm1 to vm2. */
 3432         vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), NULL);
 3433         if (vm2 == NULL)
 3434                 return (NULL);
 3435         vm2->vm_taddr = vm1->vm_taddr;
 3436         vm2->vm_daddr = vm1->vm_daddr;
 3437         vm2->vm_maxsaddr = vm1->vm_maxsaddr;
 3438         vm_map_lock(old_map);
 3439         if (old_map->busy)
 3440                 vm_map_wait_busy(old_map);
 3441         new_map = &vm2->vm_map;
 3442         locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
 3443         KASSERT(locked, ("vmspace_fork: lock failed"));
 3444 
 3445         old_entry = old_map->header.next;
 3446 
 3447         while (old_entry != &old_map->header) {
 3448                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 3449                         panic("vm_map_fork: encountered a submap");
 3450 
 3451                 inh = old_entry->inheritance;
 3452                 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
 3453                     inh != VM_INHERIT_NONE)
 3454                         inh = VM_INHERIT_COPY;
 3455 
 3456                 switch (inh) {
 3457                 case VM_INHERIT_NONE:
 3458                         break;
 3459 
 3460                 case VM_INHERIT_SHARE:
 3461                         /*
 3462                          * Clone the entry, creating the shared object if necessary.
 3463                          */
 3464                         object = old_entry->object.vm_object;
 3465                         if (object == NULL) {
 3466                                 object = vm_object_allocate(OBJT_DEFAULT,
 3467                                         atop(old_entry->end - old_entry->start));
 3468                                 old_entry->object.vm_object = object;
 3469                                 old_entry->offset = 0;
 3470                                 if (old_entry->cred != NULL) {
 3471                                         object->cred = old_entry->cred;
 3472                                         object->charge = old_entry->end -
 3473                                             old_entry->start;
 3474                                         old_entry->cred = NULL;
 3475                                 }
 3476                         }
 3477 
 3478                         /*
 3479                          * Add the reference before calling vm_object_shadow
 3480                          * to insure that a shadow object is created.
 3481                          */
 3482                         vm_object_reference(object);
 3483                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 3484                                 vm_object_shadow(&old_entry->object.vm_object,
 3485                                     &old_entry->offset,
 3486                                     old_entry->end - old_entry->start);
 3487                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 3488                                 /* Transfer the second reference too. */
 3489                                 vm_object_reference(
 3490                                     old_entry->object.vm_object);
 3491 
 3492                                 /*
 3493                                  * As in vm_map_simplify_entry(), the
 3494                                  * vnode lock will not be acquired in
 3495                                  * this call to vm_object_deallocate().
 3496                                  */
 3497                                 vm_object_deallocate(object);
 3498                                 object = old_entry->object.vm_object;
 3499                         }
 3500                         VM_OBJECT_WLOCK(object);
 3501                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
 3502                         if (old_entry->cred != NULL) {
 3503                                 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
 3504                                 object->cred = old_entry->cred;
 3505                                 object->charge = old_entry->end - old_entry->start;
 3506                                 old_entry->cred = NULL;
 3507                         }
 3508 
 3509                         /*
 3510                          * Assert the correct state of the vnode
 3511                          * v_writecount while the object is locked, to
 3512                          * not relock it later for the assertion
 3513                          * correctness.
 3514                          */
 3515                         if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
 3516                             object->type == OBJT_VNODE) {
 3517                                 KASSERT(((struct vnode *)object->handle)->
 3518                                     v_writecount > 0,
 3519                                     ("vmspace_fork: v_writecount %p", object));
 3520                                 KASSERT(object->un_pager.vnp.writemappings > 0,
 3521                                     ("vmspace_fork: vnp.writecount %p",
 3522                                     object));
 3523                         }
 3524                         VM_OBJECT_WUNLOCK(object);
 3525 
 3526                         /*
 3527                          * Clone the entry, referencing the shared object.
 3528                          */
 3529                         new_entry = vm_map_entry_create(new_map);
 3530                         *new_entry = *old_entry;
 3531                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
 3532                             MAP_ENTRY_IN_TRANSITION);
 3533                         new_entry->wiring_thread = NULL;
 3534                         new_entry->wired_count = 0;
 3535                         if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
 3536                                 vnode_pager_update_writecount(object,
 3537                                     new_entry->start, new_entry->end);
 3538                         }
 3539 
 3540                         /*
 3541                          * Insert the entry into the new map -- we know we're
 3542                          * inserting at the end of the new map.
 3543                          */
 3544                         vm_map_entry_link(new_map, new_map->header.prev,
 3545                             new_entry);
 3546                         vmspace_map_entry_forked(vm1, vm2, new_entry);
 3547 
 3548                         /*
 3549                          * Update the physical map
 3550                          */
 3551                         pmap_copy(new_map->pmap, old_map->pmap,
 3552                             new_entry->start,
 3553                             (old_entry->end - old_entry->start),
 3554                             old_entry->start);
 3555                         break;
 3556 
 3557                 case VM_INHERIT_COPY:
 3558                         /*
 3559                          * Clone the entry and link into the map.
 3560                          */
 3561                         new_entry = vm_map_entry_create(new_map);
 3562                         *new_entry = *old_entry;
 3563                         /*
 3564                          * Copied entry is COW over the old object.
 3565                          */
 3566                         new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
 3567                             MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
 3568                         new_entry->wiring_thread = NULL;
 3569                         new_entry->wired_count = 0;
 3570                         new_entry->object.vm_object = NULL;
 3571                         new_entry->cred = NULL;
 3572                         vm_map_entry_link(new_map, new_map->header.prev,
 3573                             new_entry);
 3574                         vmspace_map_entry_forked(vm1, vm2, new_entry);
 3575                         vm_map_copy_entry(old_map, new_map, old_entry,
 3576                             new_entry, fork_charge);
 3577                         break;
 3578 
 3579                 case VM_INHERIT_ZERO:
 3580                         /*
 3581                          * Create a new anonymous mapping entry modelled from
 3582                          * the old one.
 3583                          */
 3584                         new_entry = vm_map_entry_create(new_map);
 3585                         memset(new_entry, 0, sizeof(*new_entry));
 3586 
 3587                         new_entry->start = old_entry->start;
 3588                         new_entry->end = old_entry->end;
 3589                         new_entry->eflags = old_entry->eflags &
 3590                             ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
 3591                             MAP_ENTRY_VN_WRITECNT);
 3592                         new_entry->protection = old_entry->protection;
 3593                         new_entry->max_protection = old_entry->max_protection;
 3594                         new_entry->inheritance = VM_INHERIT_ZERO;
 3595 
 3596                         vm_map_entry_link(new_map, new_map->header.prev,
 3597                             new_entry);
 3598                         vmspace_map_entry_forked(vm1, vm2, new_entry);
 3599 
 3600                         new_entry->cred = curthread->td_ucred;
 3601                         crhold(new_entry->cred);
 3602                         *fork_charge += (new_entry->end - new_entry->start);
 3603 
 3604                         break;
 3605                 }
 3606                 old_entry = old_entry->next;
 3607         }
 3608         /*
 3609          * Use inlined vm_map_unlock() to postpone handling the deferred
 3610          * map entries, which cannot be done until both old_map and
 3611          * new_map locks are released.
 3612          */
 3613         sx_xunlock(&old_map->lock);
 3614         sx_xunlock(&new_map->lock);
 3615         vm_map_process_deferred();
 3616 
 3617         return (vm2);
 3618 }
 3619 
 3620 /*
 3621  * Create a process's stack for exec_new_vmspace().  This function is never
 3622  * asked to wire the newly created stack.
 3623  */
 3624 int
 3625 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
 3626     vm_prot_t prot, vm_prot_t max, int cow)
 3627 {
 3628         vm_size_t growsize, init_ssize;
 3629         rlim_t vmemlim;
 3630         int rv;
 3631 
 3632         MPASS((map->flags & MAP_WIREFUTURE) == 0);
 3633         growsize = sgrowsiz;
 3634         init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
 3635         vm_map_lock(map);
 3636         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
 3637         /* If we would blow our VMEM resource limit, no go */
 3638         if (map->size + init_ssize > vmemlim) {
 3639                 rv = KERN_NO_SPACE;
 3640                 goto out;
 3641         }
 3642         rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
 3643             max, cow);
 3644 out:
 3645         vm_map_unlock(map);
 3646         return (rv);
 3647 }
 3648 
 3649 static int stack_guard_page = 1;
 3650 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
 3651     &stack_guard_page, 0,
 3652     "Specifies the number of guard pages for a stack that grows");
 3653 
 3654 static int
 3655 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
 3656     vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
 3657 {
 3658         vm_map_entry_t new_entry, prev_entry;
 3659         vm_offset_t bot, gap_bot, gap_top, top;
 3660         vm_size_t init_ssize, sgp;
 3661         int orient, rv;
 3662 
 3663         /*
 3664          * The stack orientation is piggybacked with the cow argument.
 3665          * Extract it into orient and mask the cow argument so that we
 3666          * don't pass it around further.
 3667          */
 3668         orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
 3669         KASSERT(orient != 0, ("No stack grow direction"));
 3670         KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
 3671             ("bi-dir stack"));
 3672 
 3673         if (addrbos < vm_map_min(map) ||
 3674             addrbos + max_ssize > vm_map_max(map) ||
 3675             addrbos + max_ssize <= addrbos)
 3676                 return (KERN_INVALID_ADDRESS);
 3677         sgp = (vm_size_t)stack_guard_page * PAGE_SIZE;
 3678         if (sgp >= max_ssize)
 3679                 return (KERN_INVALID_ARGUMENT);
 3680 
 3681         init_ssize = growsize;
 3682         if (max_ssize < init_ssize + sgp)
 3683                 init_ssize = max_ssize - sgp;
 3684 
 3685         /* If addr is already mapped, no go */
 3686         if (vm_map_lookup_entry(map, addrbos, &prev_entry))
 3687                 return (KERN_NO_SPACE);
 3688 
 3689         /*
 3690          * If we can't accommodate max_ssize in the current mapping, no go.
 3691          */
 3692         if (prev_entry->next->start < addrbos + max_ssize)
 3693                 return (KERN_NO_SPACE);
 3694 
 3695         /*
 3696          * We initially map a stack of only init_ssize.  We will grow as
 3697          * needed later.  Depending on the orientation of the stack (i.e.
 3698          * the grow direction) we either map at the top of the range, the
 3699          * bottom of the range or in the middle.
 3700          *
 3701          * Note: we would normally expect prot and max to be VM_PROT_ALL,
 3702          * and cow to be 0.  Possibly we should eliminate these as input
 3703          * parameters, and just pass these values here in the insert call.
 3704          */
 3705         if (orient == MAP_STACK_GROWS_DOWN) {
 3706                 bot = addrbos + max_ssize - init_ssize;
 3707                 top = bot + init_ssize;
 3708                 gap_bot = addrbos;
 3709                 gap_top = bot;
 3710         } else /* if (orient == MAP_STACK_GROWS_UP) */ {
 3711                 bot = addrbos;
 3712                 top = bot + init_ssize;
 3713                 gap_bot = top;
 3714                 gap_top = addrbos + max_ssize;
 3715         }
 3716         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
 3717         if (rv != KERN_SUCCESS)
 3718                 return (rv);
 3719         new_entry = prev_entry->next;
 3720         KASSERT(new_entry->end == top || new_entry->start == bot,
 3721             ("Bad entry start/end for new stack entry"));
 3722         KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
 3723             (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
 3724             ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
 3725         KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
 3726             (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
 3727             ("new entry lacks MAP_ENTRY_GROWS_UP"));
 3728         rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
 3729             VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
 3730             MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
 3731         if (rv != KERN_SUCCESS)
 3732                 (void)vm_map_delete(map, bot, top);
 3733         return (rv);
 3734 }
 3735 
 3736 /*
 3737  * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
 3738  * successfully grow the stack.
 3739  */
 3740 static int
 3741 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
 3742 {
 3743         vm_map_entry_t stack_entry;
 3744         struct proc *p;
 3745         struct vmspace *vm;
 3746         struct ucred *cred;
 3747         vm_offset_t gap_end, gap_start, grow_start;
 3748         size_t grow_amount, guard, max_grow;
 3749         rlim_t lmemlim, stacklim, vmemlim;
 3750         int rv, rv1;
 3751         bool gap_deleted, grow_down, is_procstack;
 3752 #ifdef notyet
 3753         uint64_t limit;
 3754 #endif
 3755 #ifdef RACCT
 3756         int error;
 3757 #endif
 3758 
 3759         p = curproc;
 3760         vm = p->p_vmspace;
 3761 
 3762         /*
 3763          * Disallow stack growth when the access is performed by a
 3764          * debugger or AIO daemon.  The reason is that the wrong
 3765          * resource limits are applied.
 3766          */
 3767         if (map != &p->p_vmspace->vm_map || p->p_textvp == NULL)
 3768                 return (KERN_FAILURE);
 3769 
 3770         MPASS(!map->system_map);
 3771 
 3772         guard = stack_guard_page * PAGE_SIZE;
 3773         lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
 3774         stacklim = lim_cur(curthread, RLIMIT_STACK);
 3775         vmemlim = lim_cur(curthread, RLIMIT_VMEM);
 3776 retry:
 3777         /* If addr is not in a hole for a stack grow area, no need to grow. */
 3778         if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
 3779                 return (KERN_FAILURE);
 3780         if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
 3781                 return (KERN_SUCCESS);
 3782         if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
 3783                 stack_entry = gap_entry->next;
 3784                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
 3785                     stack_entry->start != gap_entry->end)
 3786                         return (KERN_FAILURE);
 3787                 grow_amount = round_page(stack_entry->start - addr);
 3788                 grow_down = true;
 3789         } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
 3790                 stack_entry = gap_entry->prev;
 3791                 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
 3792                     stack_entry->end != gap_entry->start)
 3793                         return (KERN_FAILURE);
 3794                 grow_amount = round_page(addr + 1 - stack_entry->end);
 3795                 grow_down = false;
 3796         } else {
 3797                 return (KERN_FAILURE);
 3798         }
 3799         max_grow = gap_entry->end - gap_entry->start;
 3800         if (guard > max_grow)
 3801                 return (KERN_NO_SPACE);
 3802         max_grow -= guard;
 3803         if (grow_amount > max_grow)
 3804                 return (KERN_NO_SPACE);
 3805 
 3806         /*
 3807          * If this is the main process stack, see if we're over the stack
 3808          * limit.
 3809          */
 3810         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
 3811             addr < (vm_offset_t)p->p_sysent->sv_usrstack;
 3812         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
 3813                 return (KERN_NO_SPACE);
 3814 
 3815 #ifdef RACCT
 3816         if (racct_enable) {
 3817                 PROC_LOCK(p);
 3818                 if (is_procstack && racct_set(p, RACCT_STACK,
 3819                     ctob(vm->vm_ssize) + grow_amount)) {
 3820                         PROC_UNLOCK(p);
 3821                         return (KERN_NO_SPACE);
 3822                 }
 3823                 PROC_UNLOCK(p);
 3824         }
 3825 #endif
 3826 
 3827         grow_amount = roundup(grow_amount, sgrowsiz);
 3828         if (grow_amount > max_grow)
 3829                 grow_amount = max_grow;
 3830         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
 3831                 grow_amount = trunc_page((vm_size_t)stacklim) -
 3832                     ctob(vm->vm_ssize);
 3833         }
 3834 
 3835 #ifdef notyet
 3836         PROC_LOCK(p);
 3837         limit = racct_get_available(p, RACCT_STACK);
 3838         PROC_UNLOCK(p);
 3839         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
 3840                 grow_amount = limit - ctob(vm->vm_ssize);
 3841 #endif
 3842 
 3843         if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
 3844                 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
 3845                         rv = KERN_NO_SPACE;
 3846                         goto out;
 3847                 }
 3848 #ifdef RACCT
 3849                 if (racct_enable) {
 3850                         PROC_LOCK(p);
 3851                         if (racct_set(p, RACCT_MEMLOCK,
 3852                             ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
 3853                                 PROC_UNLOCK(p);
 3854                                 rv = KERN_NO_SPACE;
 3855                                 goto out;
 3856                         }
 3857                         PROC_UNLOCK(p);
 3858                 }
 3859 #endif
 3860         }
 3861 
 3862         /* If we would blow our VMEM resource limit, no go */
 3863         if (map->size + grow_amount > vmemlim) {
 3864                 rv = KERN_NO_SPACE;
 3865                 goto out;
 3866         }
 3867 #ifdef RACCT
 3868         if (racct_enable) {
 3869                 PROC_LOCK(p);
 3870                 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
 3871                         PROC_UNLOCK(p);
 3872                         rv = KERN_NO_SPACE;
 3873                         goto out;
 3874                 }
 3875                 PROC_UNLOCK(p);
 3876         }
 3877 #endif
 3878 
 3879         if (vm_map_lock_upgrade(map)) {
 3880                 gap_entry = NULL;
 3881                 vm_map_lock_read(map);
 3882                 goto retry;
 3883         }
 3884 
 3885         if (grow_down) {
 3886                 grow_start = gap_entry->end - grow_amount;
 3887                 if (gap_entry->start + grow_amount == gap_entry->end) {
 3888                         gap_start = gap_entry->start;
 3889                         gap_end = gap_entry->end;
 3890                         vm_map_entry_delete(map, gap_entry);
 3891                         gap_deleted = true;
 3892                 } else {
 3893                         MPASS(gap_entry->start < gap_entry->end - grow_amount);
 3894                         gap_entry->end -= grow_amount;
 3895                         vm_map_entry_resize_free(map, gap_entry);
 3896                         gap_deleted = false;
 3897                 }
 3898                 rv = vm_map_insert(map, NULL, 0, grow_start,
 3899                     grow_start + grow_amount,
 3900                     stack_entry->protection, stack_entry->max_protection,
 3901                     MAP_STACK_GROWS_DOWN);
 3902                 if (rv != KERN_SUCCESS) {
 3903                         if (gap_deleted) {
 3904                                 rv1 = vm_map_insert(map, NULL, 0, gap_start,
 3905                                     gap_end, VM_PROT_NONE, VM_PROT_NONE,
 3906                                     MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
 3907                                 MPASS(rv1 == KERN_SUCCESS);
 3908                         } else {
 3909                                 gap_entry->end += grow_amount;
 3910                                 vm_map_entry_resize_free(map, gap_entry);
 3911                         }
 3912                 }
 3913         } else {
 3914                 grow_start = stack_entry->end;
 3915                 cred = stack_entry->cred;
 3916                 if (cred == NULL && stack_entry->object.vm_object != NULL)
 3917                         cred = stack_entry->object.vm_object->cred;
 3918                 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
 3919                         rv = KERN_NO_SPACE;
 3920                 /* Grow the underlying object if applicable. */
 3921                 else if (stack_entry->object.vm_object == NULL ||
 3922                     vm_object_coalesce(stack_entry->object.vm_object,
 3923                     stack_entry->offset,
 3924                     (vm_size_t)(stack_entry->end - stack_entry->start),
 3925                     (vm_size_t)grow_amount, cred != NULL)) {
 3926                         if (gap_entry->start + grow_amount == gap_entry->end)
 3927                                 vm_map_entry_delete(map, gap_entry);
 3928                         else
 3929                                 gap_entry->start += grow_amount;
 3930                         stack_entry->end += grow_amount;
 3931                         map->size += grow_amount;
 3932                         vm_map_entry_resize_free(map, stack_entry);
 3933                         rv = KERN_SUCCESS;
 3934                 } else
 3935                         rv = KERN_FAILURE;
 3936         }
 3937         if (rv == KERN_SUCCESS && is_procstack)
 3938                 vm->vm_ssize += btoc(grow_amount);
 3939 
 3940         /*
 3941          * Heed the MAP_WIREFUTURE flag if it was set for this process.
 3942          */
 3943         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
 3944                 vm_map_unlock(map);
 3945                 vm_map_wire(map, grow_start, grow_start + grow_amount,
 3946                     VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
 3947                 vm_map_lock_read(map);
 3948         } else
 3949                 vm_map_lock_downgrade(map);
 3950 
 3951 out:
 3952 #ifdef RACCT
 3953         if (racct_enable && rv != KERN_SUCCESS) {
 3954                 PROC_LOCK(p);
 3955                 error = racct_set(p, RACCT_VMEM, map->size);
 3956                 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
 3957                 if (!old_mlock) {
 3958                         error = racct_set(p, RACCT_MEMLOCK,
 3959                             ptoa(pmap_wired_count(map->pmap)));
 3960                         KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
 3961                 }
 3962                 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
 3963                 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
 3964                 PROC_UNLOCK(p);
 3965         }
 3966 #endif
 3967 
 3968         return (rv);
 3969 }
 3970 
 3971 /*
 3972  * Unshare the specified VM space for exec.  If other processes are
 3973  * mapped to it, then create a new one.  The new vmspace is null.
 3974  */
 3975 int
 3976 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
 3977 {
 3978         struct vmspace *oldvmspace = p->p_vmspace;
 3979         struct vmspace *newvmspace;
 3980 
 3981         KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
 3982             ("vmspace_exec recursed"));
 3983         newvmspace = vmspace_alloc(minuser, maxuser, NULL);
 3984         if (newvmspace == NULL)
 3985                 return (ENOMEM);
 3986         newvmspace->vm_swrss = oldvmspace->vm_swrss;
 3987         /*
 3988          * This code is written like this for prototype purposes.  The
 3989          * goal is to avoid running down the vmspace here, but let the
 3990          * other process's that are still using the vmspace to finally
 3991          * run it down.  Even though there is little or no chance of blocking
 3992          * here, it is a good idea to keep this form for future mods.
 3993          */
 3994         PROC_VMSPACE_LOCK(p);
 3995         p->p_vmspace = newvmspace;
 3996         PROC_VMSPACE_UNLOCK(p);
 3997         if (p == curthread->td_proc)
 3998                 pmap_activate(curthread);
 3999         curthread->td_pflags |= TDP_EXECVMSPC;
 4000         return (0);
 4001 }
 4002 
 4003 /*
 4004  * Unshare the specified VM space for forcing COW.  This
 4005  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
 4006  */
 4007 int
 4008 vmspace_unshare(struct proc *p)
 4009 {
 4010         struct vmspace *oldvmspace = p->p_vmspace;
 4011         struct vmspace *newvmspace;
 4012         vm_ooffset_t fork_charge;
 4013 
 4014         if (oldvmspace->vm_refcnt == 1)
 4015                 return (0);
 4016         fork_charge = 0;
 4017         newvmspace = vmspace_fork(oldvmspace, &fork_charge);
 4018         if (newvmspace == NULL)
 4019                 return (ENOMEM);
 4020         if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
 4021                 vmspace_free(newvmspace);
 4022                 return (ENOMEM);
 4023         }
 4024         PROC_VMSPACE_LOCK(p);
 4025         p->p_vmspace = newvmspace;
 4026         PROC_VMSPACE_UNLOCK(p);
 4027         if (p == curthread->td_proc)
 4028                 pmap_activate(curthread);
 4029         vmspace_free(oldvmspace);
 4030         return (0);
 4031 }
 4032 
 4033 /*
 4034  *      vm_map_lookup:
 4035  *
 4036  *      Finds the VM object, offset, and
 4037  *      protection for a given virtual address in the
 4038  *      specified map, assuming a page fault of the
 4039  *      type specified.
 4040  *
 4041  *      Leaves the map in question locked for read; return
 4042  *      values are guaranteed until a vm_map_lookup_done
 4043  *      call is performed.  Note that the map argument
 4044  *      is in/out; the returned map must be used in
 4045  *      the call to vm_map_lookup_done.
 4046  *
 4047  *      A handle (out_entry) is returned for use in
 4048  *      vm_map_lookup_done, to make that fast.
 4049  *
 4050  *      If a lookup is requested with "write protection"
 4051  *      specified, the map may be changed to perform virtual
 4052  *      copying operations, although the data referenced will
 4053  *      remain the same.
 4054  */
 4055 int
 4056 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
 4057               vm_offset_t vaddr,
 4058               vm_prot_t fault_typea,
 4059               vm_map_entry_t *out_entry,        /* OUT */
 4060               vm_object_t *object,              /* OUT */
 4061               vm_pindex_t *pindex,              /* OUT */
 4062               vm_prot_t *out_prot,              /* OUT */
 4063               boolean_t *wired)                 /* OUT */
 4064 {
 4065         vm_map_entry_t entry;
 4066         vm_map_t map = *var_map;
 4067         vm_prot_t prot;
 4068         vm_prot_t fault_type = fault_typea;
 4069         vm_object_t eobject;
 4070         vm_size_t size;
 4071         struct ucred *cred;
 4072 
 4073 RetryLookup:
 4074 
 4075         vm_map_lock_read(map);
 4076 
 4077 RetryLookupLocked:
 4078         /*
 4079          * Lookup the faulting address.
 4080          */
 4081         if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
 4082                 vm_map_unlock_read(map);
 4083                 return (KERN_INVALID_ADDRESS);
 4084         }
 4085 
 4086         entry = *out_entry;
 4087 
 4088         /*
 4089          * Handle submaps.
 4090          */
 4091         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 4092                 vm_map_t old_map = map;
 4093 
 4094                 *var_map = map = entry->object.sub_map;
 4095                 vm_map_unlock_read(old_map);
 4096                 goto RetryLookup;
 4097         }
 4098 
 4099         /*
 4100          * Check whether this task is allowed to have this page.
 4101          */
 4102         prot = entry->protection;
 4103         if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
 4104                 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
 4105                 if (prot == VM_PROT_NONE && map != kernel_map &&
 4106                     (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
 4107                     (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
 4108                     MAP_ENTRY_STACK_GAP_UP)) != 0 &&
 4109                     vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
 4110                         goto RetryLookupLocked;
 4111         }
 4112         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
 4113         if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
 4114                 vm_map_unlock_read(map);
 4115                 return (KERN_PROTECTION_FAILURE);
 4116         }
 4117         KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
 4118             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
 4119             (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
 4120             ("entry %p flags %x", entry, entry->eflags));
 4121         if ((fault_typea & VM_PROT_COPY) != 0 &&
 4122             (entry->max_protection & VM_PROT_WRITE) == 0 &&
 4123             (entry->eflags & MAP_ENTRY_COW) == 0) {
 4124                 vm_map_unlock_read(map);
 4125                 return (KERN_PROTECTION_FAILURE);
 4126         }
 4127 
 4128         /*
 4129          * If this page is not pageable, we have to get it for all possible
 4130          * accesses.
 4131          */
 4132         *wired = (entry->wired_count != 0);
 4133         if (*wired)
 4134                 fault_type = entry->protection;
 4135         size = entry->end - entry->start;
 4136         /*
 4137          * If the entry was copy-on-write, we either ...
 4138          */
 4139         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 4140                 /*
 4141                  * If we want to write the page, we may as well handle that
 4142                  * now since we've got the map locked.
 4143                  *
 4144                  * If we don't need to write the page, we just demote the
 4145                  * permissions allowed.
 4146                  */
 4147                 if ((fault_type & VM_PROT_WRITE) != 0 ||
 4148                     (fault_typea & VM_PROT_COPY) != 0) {
 4149                         /*
 4150                          * Make a new object, and place it in the object
 4151                          * chain.  Note that no new references have appeared
 4152                          * -- one just moved from the map to the new
 4153                          * object.
 4154                          */
 4155                         if (vm_map_lock_upgrade(map))
 4156                                 goto RetryLookup;
 4157 
 4158                         if (entry->cred == NULL) {
 4159                                 /*
 4160                                  * The debugger owner is charged for
 4161                                  * the memory.
 4162                                  */
 4163                                 cred = curthread->td_ucred;
 4164                                 crhold(cred);
 4165                                 if (!swap_reserve_by_cred(size, cred)) {
 4166                                         crfree(cred);
 4167                                         vm_map_unlock(map);
 4168                                         return (KERN_RESOURCE_SHORTAGE);
 4169                                 }
 4170                                 entry->cred = cred;
 4171                         }
 4172                         vm_object_shadow(&entry->object.vm_object,
 4173                             &entry->offset, size);
 4174                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 4175                         eobject = entry->object.vm_object;
 4176                         if (eobject->cred != NULL) {
 4177                                 /*
 4178                                  * The object was not shadowed.
 4179                                  */
 4180                                 swap_release_by_cred(size, entry->cred);
 4181                                 crfree(entry->cred);
 4182                                 entry->cred = NULL;
 4183                         } else if (entry->cred != NULL) {
 4184                                 VM_OBJECT_WLOCK(eobject);
 4185                                 eobject->cred = entry->cred;
 4186                                 eobject->charge = size;
 4187                                 VM_OBJECT_WUNLOCK(eobject);
 4188                                 entry->cred = NULL;
 4189                         }
 4190 
 4191                         vm_map_lock_downgrade(map);
 4192                 } else {
 4193                         /*
 4194                          * We're attempting to read a copy-on-write page --
 4195                          * don't allow writes.
 4196                          */
 4197                         prot &= ~VM_PROT_WRITE;
 4198                 }
 4199         }
 4200 
 4201         /*
 4202          * Create an object if necessary.
 4203          */
 4204         if (entry->object.vm_object == NULL &&
 4205             !map->system_map) {
 4206                 if (vm_map_lock_upgrade(map))
 4207                         goto RetryLookup;
 4208                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
 4209                     atop(size));
 4210                 entry->offset = 0;
 4211                 if (entry->cred != NULL) {
 4212                         VM_OBJECT_WLOCK(entry->object.vm_object);
 4213                         entry->object.vm_object->cred = entry->cred;
 4214                         entry->object.vm_object->charge = size;
 4215                         VM_OBJECT_WUNLOCK(entry->object.vm_object);
 4216                         entry->cred = NULL;
 4217                 }
 4218                 vm_map_lock_downgrade(map);
 4219         }
 4220 
 4221         /*
 4222          * Return the object/offset from this entry.  If the entry was
 4223          * copy-on-write or empty, it has been fixed up.
 4224          */
 4225         *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset);
 4226         *object = entry->object.vm_object;
 4227 
 4228         *out_prot = prot;
 4229         return (KERN_SUCCESS);
 4230 }
 4231 
 4232 /*
 4233  *      vm_map_lookup_locked:
 4234  *
 4235  *      Lookup the faulting address.  A version of vm_map_lookup that returns 
 4236  *      KERN_FAILURE instead of blocking on map lock or memory allocation.
 4237  */
 4238 int
 4239 vm_map_lookup_locked(vm_map_t *var_map,         /* IN/OUT */
 4240                      vm_offset_t vaddr,
 4241                      vm_prot_t fault_typea,
 4242                      vm_map_entry_t *out_entry, /* OUT */
 4243                      vm_object_t *object,       /* OUT */
 4244                      vm_pindex_t *pindex,       /* OUT */
 4245                      vm_prot_t *out_prot,       /* OUT */
 4246                      boolean_t *wired)          /* OUT */
 4247 {
 4248         vm_map_entry_t entry;
 4249         vm_map_t map = *var_map;
 4250         vm_prot_t prot;
 4251         vm_prot_t fault_type = fault_typea;
 4252 
 4253         /*
 4254          * Lookup the faulting address.
 4255          */
 4256         if (!vm_map_lookup_entry(map, vaddr, out_entry))
 4257                 return (KERN_INVALID_ADDRESS);
 4258 
 4259         entry = *out_entry;
 4260 
 4261         /*
 4262          * Fail if the entry refers to a submap.
 4263          */
 4264         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 4265                 return (KERN_FAILURE);
 4266 
 4267         /*
 4268          * Check whether this task is allowed to have this page.
 4269          */
 4270         prot = entry->protection;
 4271         fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
 4272         if ((fault_type & prot) != fault_type)
 4273                 return (KERN_PROTECTION_FAILURE);
 4274 
 4275         /*
 4276          * If this page is not pageable, we have to get it for all possible
 4277          * accesses.
 4278          */
 4279         *wired = (entry->wired_count != 0);
 4280         if (*wired)
 4281                 fault_type = entry->protection;
 4282 
 4283         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 4284                 /*
 4285                  * Fail if the entry was copy-on-write for a write fault.
 4286                  */
 4287                 if (fault_type & VM_PROT_WRITE)
 4288                         return (KERN_FAILURE);
 4289                 /*
 4290                  * We're attempting to read a copy-on-write page --
 4291                  * don't allow writes.
 4292                  */
 4293                 prot &= ~VM_PROT_WRITE;
 4294         }
 4295 
 4296         /*
 4297          * Fail if an object should be created.
 4298          */
 4299         if (entry->object.vm_object == NULL && !map->system_map)
 4300                 return (KERN_FAILURE);
 4301 
 4302         /*
 4303          * Return the object/offset from this entry.  If the entry was
 4304          * copy-on-write or empty, it has been fixed up.
 4305          */
 4306         *pindex = UOFF_TO_IDX((vaddr - entry->start) + entry->offset);
 4307         *object = entry->object.vm_object;
 4308 
 4309         *out_prot = prot;
 4310         return (KERN_SUCCESS);
 4311 }
 4312 
 4313 /*
 4314  *      vm_map_lookup_done:
 4315  *
 4316  *      Releases locks acquired by a vm_map_lookup
 4317  *      (according to the handle returned by that lookup).
 4318  */
 4319 void
 4320 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
 4321 {
 4322         /*
 4323          * Unlock the main-level map
 4324          */
 4325         vm_map_unlock_read(map);
 4326 }
 4327 
 4328 vm_offset_t
 4329 vm_map_max_KBI(const struct vm_map *map)
 4330 {
 4331 
 4332         return (vm_map_max(map));
 4333 }
 4334 
 4335 vm_offset_t
 4336 vm_map_min_KBI(const struct vm_map *map)
 4337 {
 4338 
 4339         return (vm_map_min(map));
 4340 }
 4341 
 4342 pmap_t
 4343 vm_map_pmap_KBI(vm_map_t map)
 4344 {
 4345 
 4346         return (map->pmap);
 4347 }
 4348 
 4349 #include "opt_ddb.h"
 4350 #ifdef DDB
 4351 #include <sys/kernel.h>
 4352 
 4353 #include <ddb/ddb.h>
 4354 
 4355 static void
 4356 vm_map_print(vm_map_t map)
 4357 {
 4358         vm_map_entry_t entry;
 4359 
 4360         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
 4361             (void *)map,
 4362             (void *)map->pmap, map->nentries, map->timestamp);
 4363 
 4364         db_indent += 2;
 4365         for (entry = map->header.next; entry != &map->header;
 4366             entry = entry->next) {
 4367                 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
 4368                     (void *)entry, (void *)entry->start, (void *)entry->end,
 4369                     entry->eflags);
 4370                 {
 4371                         static char *inheritance_name[4] =
 4372                         {"share", "copy", "none", "donate_copy"};
 4373 
 4374                         db_iprintf(" prot=%x/%x/%s",
 4375                             entry->protection,
 4376                             entry->max_protection,
 4377                             inheritance_name[(int)(unsigned char)entry->inheritance]);
 4378                         if (entry->wired_count != 0)
 4379                                 db_printf(", wired");
 4380                 }
 4381                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 4382                         db_printf(", share=%p, offset=0x%jx\n",
 4383                             (void *)entry->object.sub_map,
 4384                             (uintmax_t)entry->offset);
 4385                         if ((entry->prev == &map->header) ||
 4386                             (entry->prev->object.sub_map !=
 4387                                 entry->object.sub_map)) {
 4388                                 db_indent += 2;
 4389                                 vm_map_print((vm_map_t)entry->object.sub_map);
 4390                                 db_indent -= 2;
 4391                         }
 4392                 } else {
 4393                         if (entry->cred != NULL)
 4394                                 db_printf(", ruid %d", entry->cred->cr_ruid);
 4395                         db_printf(", object=%p, offset=0x%jx",
 4396                             (void *)entry->object.vm_object,
 4397                             (uintmax_t)entry->offset);
 4398                         if (entry->object.vm_object && entry->object.vm_object->cred)
 4399                                 db_printf(", obj ruid %d charge %jx",
 4400                                     entry->object.vm_object->cred->cr_ruid,
 4401                                     (uintmax_t)entry->object.vm_object->charge);
 4402                         if (entry->eflags & MAP_ENTRY_COW)
 4403                                 db_printf(", copy (%s)",
 4404                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
 4405                         db_printf("\n");
 4406 
 4407                         if ((entry->prev == &map->header) ||
 4408                             (entry->prev->object.vm_object !=
 4409                                 entry->object.vm_object)) {
 4410                                 db_indent += 2;
 4411                                 vm_object_print((db_expr_t)(intptr_t)
 4412                                                 entry->object.vm_object,
 4413                                                 0, 0, (char *)0);
 4414                                 db_indent -= 2;
 4415                         }
 4416                 }
 4417         }
 4418         db_indent -= 2;
 4419 }
 4420 
 4421 DB_SHOW_COMMAND(map, map)
 4422 {
 4423 
 4424         if (!have_addr) {
 4425                 db_printf("usage: show map <addr>\n");
 4426                 return;
 4427         }
 4428         vm_map_print((vm_map_t)addr);
 4429 }
 4430 
 4431 DB_SHOW_COMMAND(procvm, procvm)
 4432 {
 4433         struct proc *p;
 4434 
 4435         if (have_addr) {
 4436                 p = db_lookup_proc(addr);
 4437         } else {
 4438                 p = curproc;
 4439         }
 4440 
 4441         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
 4442             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
 4443             (void *)vmspace_pmap(p->p_vmspace));
 4444 
 4445         vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
 4446 }
 4447 
 4448 #endif /* DDB */

Cache object: 25c0f9cdec76cd91146421612ba413cd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.