The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
   37  *
   38  *
   39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   40  * All rights reserved.
   41  *
   42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   43  *
   44  * Permission to use, copy, modify and distribute this software and
   45  * its documentation is hereby granted, provided that both the copyright
   46  * notice and this permission notice appear in all copies of the
   47  * software, derivative works or modified versions, and any portions
   48  * thereof, and that both notices appear in supporting documentation.
   49  *
   50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   53  *
   54  * Carnegie Mellon requests users of this software to return to
   55  *
   56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   57  *  School of Computer Science
   58  *  Carnegie Mellon University
   59  *  Pittsburgh PA 15213-3890
   60  *
   61  * any improvements or extensions that they make and grant Carnegie the
   62  * rights to redistribute these changes.
   63  */
   64 
   65 /*
   66  *      Virtual memory mapping module.
   67  */
   68 
   69 #include <sys/cdefs.h>
   70 __FBSDID("$FreeBSD: releng/5.2/sys/vm/vm_map.c 122902 2003-11-19 18:48:45Z alc $");
   71 
   72 #include <sys/param.h>
   73 #include <sys/systm.h>
   74 #include <sys/ktr.h>
   75 #include <sys/lock.h>
   76 #include <sys/mutex.h>
   77 #include <sys/proc.h>
   78 #include <sys/vmmeter.h>
   79 #include <sys/mman.h>
   80 #include <sys/vnode.h>
   81 #include <sys/resourcevar.h>
   82 #include <sys/file.h>
   83 #include <sys/sysent.h>
   84 #include <sys/shm.h>
   85 
   86 #include <vm/vm.h>
   87 #include <vm/vm_param.h>
   88 #include <vm/pmap.h>
   89 #include <vm/vm_map.h>
   90 #include <vm/vm_page.h>
   91 #include <vm/vm_object.h>
   92 #include <vm/vm_pager.h>
   93 #include <vm/vm_kern.h>
   94 #include <vm/vm_extern.h>
   95 #include <vm/swap_pager.h>
   96 #include <vm/uma.h>
   97 
   98 /*
   99  *      Virtual memory maps provide for the mapping, protection,
  100  *      and sharing of virtual memory objects.  In addition,
  101  *      this module provides for an efficient virtual copy of
  102  *      memory from one map to another.
  103  *
  104  *      Synchronization is required prior to most operations.
  105  *
  106  *      Maps consist of an ordered doubly-linked list of simple
  107  *      entries; a single hint is used to speed up lookups.
  108  *
  109  *      Since portions of maps are specified by start/end addresses,
  110  *      which may not align with existing map entries, all
  111  *      routines merely "clip" entries to these start/end values.
  112  *      [That is, an entry is split into two, bordering at a
  113  *      start or end value.]  Note that these clippings may not
  114  *      always be necessary (as the two resulting entries are then
  115  *      not changed); however, the clipping is done for convenience.
  116  *
  117  *      As mentioned above, virtual copy operations are performed
  118  *      by copying VM object references from one map to
  119  *      another, and then marking both regions as copy-on-write.
  120  */
  121 
  122 /*
  123  *      vm_map_startup:
  124  *
  125  *      Initialize the vm_map module.  Must be called before
  126  *      any other vm_map routines.
  127  *
  128  *      Map and entry structures are allocated from the general
  129  *      purpose memory pool with some exceptions:
  130  *
  131  *      - The kernel map and kmem submap are allocated statically.
  132  *      - Kernel map entries are allocated out of a static pool.
  133  *
  134  *      These restrictions are necessary since malloc() uses the
  135  *      maps and requires map entries.
  136  */
  137 
  138 static struct mtx map_sleep_mtx;
  139 static uma_zone_t mapentzone;
  140 static uma_zone_t kmapentzone;
  141 static uma_zone_t mapzone;
  142 static uma_zone_t vmspace_zone;
  143 static struct vm_object kmapentobj;
  144 static void vmspace_zinit(void *mem, int size);
  145 static void vmspace_zfini(void *mem, int size);
  146 static void vm_map_zinit(void *mem, int size);
  147 static void vm_map_zfini(void *mem, int size);
  148 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
  149 
  150 #ifdef INVARIANTS
  151 static void vm_map_zdtor(void *mem, int size, void *arg);
  152 static void vmspace_zdtor(void *mem, int size, void *arg);
  153 #endif
  154 
  155 void
  156 vm_map_startup(void)
  157 {
  158         mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
  159         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
  160 #ifdef INVARIANTS
  161             vm_map_zdtor,
  162 #else
  163             NULL,
  164 #endif
  165             vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  166         uma_prealloc(mapzone, MAX_KMAP);
  167         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
  168             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  169             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
  170         uma_prealloc(kmapentzone, MAX_KMAPENT);
  171         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
  172             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  173         uma_prealloc(mapentzone, MAX_MAPENT);
  174 }
  175 
  176 static void
  177 vmspace_zfini(void *mem, int size)
  178 {
  179         struct vmspace *vm;
  180 
  181         vm = (struct vmspace *)mem;
  182 
  183         vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
  184 }
  185 
  186 static void
  187 vmspace_zinit(void *mem, int size)
  188 {
  189         struct vmspace *vm;
  190 
  191         vm = (struct vmspace *)mem;
  192 
  193         vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
  194 }
  195 
  196 static void
  197 vm_map_zfini(void *mem, int size)
  198 {
  199         vm_map_t map;
  200 
  201         map = (vm_map_t)mem;
  202         mtx_destroy(&map->system_mtx);
  203         lockdestroy(&map->lock);
  204 }
  205 
  206 static void
  207 vm_map_zinit(void *mem, int size)
  208 {
  209         vm_map_t map;
  210 
  211         map = (vm_map_t)mem;
  212         map->nentries = 0;
  213         map->size = 0;
  214         map->infork = 0;
  215         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
  216         lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
  217 }
  218 
  219 #ifdef INVARIANTS
  220 static void
  221 vmspace_zdtor(void *mem, int size, void *arg)
  222 {
  223         struct vmspace *vm;
  224 
  225         vm = (struct vmspace *)mem;
  226 
  227         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
  228 }
  229 static void
  230 vm_map_zdtor(void *mem, int size, void *arg)
  231 {
  232         vm_map_t map;
  233 
  234         map = (vm_map_t)mem;
  235         KASSERT(map->nentries == 0,
  236             ("map %p nentries == %d on free.",
  237             map, map->nentries));
  238         KASSERT(map->size == 0,
  239             ("map %p size == %lu on free.",
  240             map, (unsigned long)map->size));
  241         KASSERT(map->infork == 0,
  242             ("map %p infork == %d on free.",
  243             map, map->infork));
  244 }
  245 #endif  /* INVARIANTS */
  246 
  247 /*
  248  * Allocate a vmspace structure, including a vm_map and pmap,
  249  * and initialize those structures.  The refcnt is set to 1.
  250  * The remaining fields must be initialized by the caller.
  251  */
  252 struct vmspace *
  253 vmspace_alloc(min, max)
  254         vm_offset_t min, max;
  255 {
  256         struct vmspace *vm;
  257 
  258         vm = uma_zalloc(vmspace_zone, M_WAITOK);
  259         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
  260         _vm_map_init(&vm->vm_map, min, max);
  261         pmap_pinit(vmspace_pmap(vm));
  262         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
  263         vm->vm_refcnt = 1;
  264         vm->vm_shm = NULL;
  265         vm->vm_exitingcnt = 0;
  266         return (vm);
  267 }
  268 
  269 void
  270 vm_init2(void)
  271 {
  272         uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
  273             (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8 +
  274              maxproc * 2 + maxfiles);
  275         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
  276 #ifdef INVARIANTS
  277             vmspace_zdtor,
  278 #else
  279             NULL,
  280 #endif
  281             vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  282         pmap_init2();
  283 }
  284 
  285 static __inline void
  286 vmspace_dofree(struct vmspace *vm)
  287 {
  288         CTR1(KTR_VM, "vmspace_free: %p", vm);
  289 
  290         /*
  291          * Make sure any SysV shm is freed, it might not have been in
  292          * exit1().
  293          */
  294         shmexit(vm);
  295 
  296         /*
  297          * Lock the map, to wait out all other references to it.
  298          * Delete all of the mappings and pages they hold, then call
  299          * the pmap module to reclaim anything left.
  300          */
  301         vm_map_lock(&vm->vm_map);
  302         (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
  303             vm->vm_map.max_offset);
  304         vm_map_unlock(&vm->vm_map);
  305 
  306         pmap_release(vmspace_pmap(vm));
  307         uma_zfree(vmspace_zone, vm);
  308 }
  309 
  310 void
  311 vmspace_free(struct vmspace *vm)
  312 {
  313         GIANT_REQUIRED;
  314 
  315         if (vm->vm_refcnt == 0)
  316                 panic("vmspace_free: attempt to free already freed vmspace");
  317 
  318         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
  319                 vmspace_dofree(vm);
  320 }
  321 
  322 void
  323 vmspace_exitfree(struct proc *p)
  324 {
  325         struct vmspace *vm;
  326 
  327         GIANT_REQUIRED;
  328         vm = p->p_vmspace;
  329         p->p_vmspace = NULL;
  330 
  331         /*
  332          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
  333          * may not be 0 (e.g. fork() and child exits without exec()ing).
  334          * exitingcnt may increment above 0 and drop back down to zero
  335          * several times while vm_refcnt is held non-zero.  vm_refcnt
  336          * may also increment above 0 and drop back down to zero several
  337          * times while vm_exitingcnt is held non-zero.
  338          *
  339          * The last wait on the exiting child's vmspace will clean up
  340          * the remainder of the vmspace.
  341          */
  342         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
  343                 vmspace_dofree(vm);
  344 }
  345 
  346 void
  347 _vm_map_lock(vm_map_t map, const char *file, int line)
  348 {
  349         int error;
  350 
  351         if (map->system_map)
  352                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
  353         else {
  354                 error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
  355                 KASSERT(error == 0, ("%s: failed to get lock", __func__));
  356         }
  357         map->timestamp++;
  358 }
  359 
  360 void
  361 _vm_map_unlock(vm_map_t map, const char *file, int line)
  362 {
  363 
  364         if (map->system_map)
  365                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
  366         else
  367                 lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
  368 }
  369 
  370 void
  371 _vm_map_lock_read(vm_map_t map, const char *file, int line)
  372 {
  373         int error;
  374 
  375         if (map->system_map)
  376                 _mtx_lock_flags(&map->system_mtx, 0, file, line);
  377         else {
  378                 error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
  379                 KASSERT(error == 0, ("%s: failed to get lock", __func__));
  380         }
  381 }
  382 
  383 void
  384 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
  385 {
  386 
  387         if (map->system_map)
  388                 _mtx_unlock_flags(&map->system_mtx, 0, file, line);
  389         else
  390                 lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
  391 }
  392 
  393 int
  394 _vm_map_trylock(vm_map_t map, const char *file, int line)
  395 {
  396         int error;
  397 
  398         error = map->system_map ?
  399             !_mtx_trylock(&map->system_mtx, 0, file, line) :
  400             lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
  401         if (error == 0)
  402                 map->timestamp++;
  403         return (error == 0);
  404 }
  405 
  406 int
  407 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
  408 {
  409         int error;
  410 
  411         error = map->system_map ?
  412             !_mtx_trylock(&map->system_mtx, 0, file, line) :
  413             lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
  414         return (error == 0);
  415 }
  416 
  417 int
  418 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
  419 {
  420 
  421         if (map->system_map) {
  422 #ifdef INVARIANTS
  423                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
  424 #endif
  425         } else
  426                 KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
  427                     ("%s: lock not held", __func__));
  428         map->timestamp++;
  429         return (0);
  430 }
  431 
  432 void
  433 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
  434 {
  435 
  436         if (map->system_map) {
  437 #ifdef INVARIANTS
  438                 _mtx_assert(&map->system_mtx, MA_OWNED, file, line);
  439 #endif
  440         } else
  441                 KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
  442                     ("%s: lock not held", __func__));
  443 }
  444 
  445 /*
  446  *      vm_map_unlock_and_wait:
  447  */
  448 int
  449 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait)
  450 {
  451 
  452         mtx_lock(&map_sleep_mtx);
  453         vm_map_unlock(map);
  454         return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", 0));
  455 }
  456 
  457 /*
  458  *      vm_map_wakeup:
  459  */
  460 void
  461 vm_map_wakeup(vm_map_t map)
  462 {
  463 
  464         /*
  465          * Acquire and release map_sleep_mtx to prevent a wakeup()
  466          * from being performed (and lost) between the vm_map_unlock()
  467          * and the msleep() in vm_map_unlock_and_wait().
  468          */
  469         mtx_lock(&map_sleep_mtx);
  470         mtx_unlock(&map_sleep_mtx);
  471         wakeup(&map->root);
  472 }
  473 
  474 long
  475 vmspace_resident_count(struct vmspace *vmspace)
  476 {
  477         return pmap_resident_count(vmspace_pmap(vmspace));
  478 }
  479 
  480 long
  481 vmspace_wired_count(struct vmspace *vmspace)
  482 {
  483         return pmap_wired_count(vmspace_pmap(vmspace));
  484 }
  485 
  486 /*
  487  *      vm_map_create:
  488  *
  489  *      Creates and returns a new empty VM map with
  490  *      the given physical map structure, and having
  491  *      the given lower and upper address bounds.
  492  */
  493 vm_map_t
  494 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
  495 {
  496         vm_map_t result;
  497 
  498         result = uma_zalloc(mapzone, M_WAITOK);
  499         CTR1(KTR_VM, "vm_map_create: %p", result);
  500         _vm_map_init(result, min, max);
  501         result->pmap = pmap;
  502         return (result);
  503 }
  504 
  505 /*
  506  * Initialize an existing vm_map structure
  507  * such as that in the vmspace structure.
  508  * The pmap is set elsewhere.
  509  */
  510 static void
  511 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
  512 {
  513 
  514         map->header.next = map->header.prev = &map->header;
  515         map->needs_wakeup = FALSE;
  516         map->system_map = 0;
  517         map->min_offset = min;
  518         map->max_offset = max;
  519         map->first_free = &map->header;
  520         map->root = NULL;
  521         map->timestamp = 0;
  522 }
  523 
  524 void
  525 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
  526 {
  527         _vm_map_init(map, min, max);
  528         mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
  529         lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
  530 }
  531 
  532 /*
  533  *      vm_map_entry_dispose:   [ internal use only ]
  534  *
  535  *      Inverse of vm_map_entry_create.
  536  */
  537 static void
  538 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
  539 {
  540         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
  541 }
  542 
  543 /*
  544  *      vm_map_entry_create:    [ internal use only ]
  545  *
  546  *      Allocates a VM map entry for insertion.
  547  *      No entry fields are filled in.
  548  */
  549 static vm_map_entry_t
  550 vm_map_entry_create(vm_map_t map)
  551 {
  552         vm_map_entry_t new_entry;
  553 
  554         if (map->system_map)
  555                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
  556         else
  557                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
  558         if (new_entry == NULL)
  559                 panic("vm_map_entry_create: kernel resources exhausted");
  560         return (new_entry);
  561 }
  562 
  563 /*
  564  *      vm_map_entry_set_behavior:
  565  *
  566  *      Set the expected access behavior, either normal, random, or
  567  *      sequential.
  568  */
  569 static __inline void
  570 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
  571 {
  572         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
  573             (behavior & MAP_ENTRY_BEHAV_MASK);
  574 }
  575 
  576 /*
  577  *      vm_map_entry_splay:
  578  *
  579  *      Implements Sleator and Tarjan's top-down splay algorithm.  Returns
  580  *      the vm_map_entry containing the given address.  If, however, that
  581  *      address is not found in the vm_map, returns a vm_map_entry that is
  582  *      adjacent to the address, coming before or after it.
  583  */
  584 static vm_map_entry_t
  585 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root)
  586 {
  587         struct vm_map_entry dummy;
  588         vm_map_entry_t lefttreemax, righttreemin, y;
  589 
  590         if (root == NULL)
  591                 return (root);
  592         lefttreemax = righttreemin = &dummy;
  593         for (;; root = y) {
  594                 if (address < root->start) {
  595                         if ((y = root->left) == NULL)
  596                                 break;
  597                         if (address < y->start) {
  598                                 /* Rotate right. */
  599                                 root->left = y->right;
  600                                 y->right = root;
  601                                 root = y;
  602                                 if ((y = root->left) == NULL)
  603                                         break;
  604                         }
  605                         /* Link into the new root's right tree. */
  606                         righttreemin->left = root;
  607                         righttreemin = root;
  608                 } else if (address >= root->end) {
  609                         if ((y = root->right) == NULL)
  610                                 break;
  611                         if (address >= y->end) {
  612                                 /* Rotate left. */
  613                                 root->right = y->left;
  614                                 y->left = root;
  615                                 root = y;
  616                                 if ((y = root->right) == NULL)
  617                                         break;
  618                         }
  619                         /* Link into the new root's left tree. */
  620                         lefttreemax->right = root;
  621                         lefttreemax = root;
  622                 } else
  623                         break;
  624         }
  625         /* Assemble the new root. */
  626         lefttreemax->right = root->left;
  627         righttreemin->left = root->right;
  628         root->left = dummy.right;
  629         root->right = dummy.left;
  630         return (root);
  631 }
  632 
  633 /*
  634  *      vm_map_entry_{un,}link:
  635  *
  636  *      Insert/remove entries from maps.
  637  */
  638 static void
  639 vm_map_entry_link(vm_map_t map,
  640                   vm_map_entry_t after_where,
  641                   vm_map_entry_t entry)
  642 {
  643 
  644         CTR4(KTR_VM,
  645             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
  646             map->nentries, entry, after_where);
  647         map->nentries++;
  648         entry->prev = after_where;
  649         entry->next = after_where->next;
  650         entry->next->prev = entry;
  651         after_where->next = entry;
  652 
  653         if (after_where != &map->header) {
  654                 if (after_where != map->root)
  655                         vm_map_entry_splay(after_where->start, map->root);
  656                 entry->right = after_where->right;
  657                 entry->left = after_where;
  658                 after_where->right = NULL;
  659         } else {
  660                 entry->right = map->root;
  661                 entry->left = NULL;
  662         }
  663         map->root = entry;
  664 }
  665 
  666 static void
  667 vm_map_entry_unlink(vm_map_t map,
  668                     vm_map_entry_t entry)
  669 {
  670         vm_map_entry_t next, prev, root;
  671 
  672         if (entry != map->root)
  673                 vm_map_entry_splay(entry->start, map->root);
  674         if (entry->left == NULL)
  675                 root = entry->right;
  676         else {
  677                 root = vm_map_entry_splay(entry->start, entry->left);
  678                 root->right = entry->right;
  679         }
  680         map->root = root;
  681 
  682         prev = entry->prev;
  683         next = entry->next;
  684         next->prev = prev;
  685         prev->next = next;
  686         map->nentries--;
  687         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
  688             map->nentries, entry);
  689 }
  690 
  691 /*
  692  *      vm_map_lookup_entry:    [ internal use only ]
  693  *
  694  *      Finds the map entry containing (or
  695  *      immediately preceding) the specified address
  696  *      in the given map; the entry is returned
  697  *      in the "entry" parameter.  The boolean
  698  *      result indicates whether the address is
  699  *      actually contained in the map.
  700  */
  701 boolean_t
  702 vm_map_lookup_entry(
  703         vm_map_t map,
  704         vm_offset_t address,
  705         vm_map_entry_t *entry)  /* OUT */
  706 {
  707         vm_map_entry_t cur;
  708 
  709         cur = vm_map_entry_splay(address, map->root);
  710         if (cur == NULL)
  711                 *entry = &map->header;
  712         else {
  713                 map->root = cur;
  714 
  715                 if (address >= cur->start) {
  716                         *entry = cur;
  717                         if (cur->end > address)
  718                                 return (TRUE);
  719                 } else
  720                         *entry = cur->prev;
  721         }
  722         return (FALSE);
  723 }
  724 
  725 /*
  726  *      vm_map_insert:
  727  *
  728  *      Inserts the given whole VM object into the target
  729  *      map at the specified address range.  The object's
  730  *      size should match that of the address range.
  731  *
  732  *      Requires that the map be locked, and leaves it so.
  733  *
  734  *      If object is non-NULL, ref count must be bumped by caller
  735  *      prior to making call to account for the new entry.
  736  */
  737 int
  738 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  739               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
  740               int cow)
  741 {
  742         vm_map_entry_t new_entry;
  743         vm_map_entry_t prev_entry;
  744         vm_map_entry_t temp_entry;
  745         vm_eflags_t protoeflags;
  746 
  747         /*
  748          * Check that the start and end points are not bogus.
  749          */
  750         if ((start < map->min_offset) || (end > map->max_offset) ||
  751             (start >= end))
  752                 return (KERN_INVALID_ADDRESS);
  753 
  754         /*
  755          * Find the entry prior to the proposed starting address; if it's part
  756          * of an existing entry, this range is bogus.
  757          */
  758         if (vm_map_lookup_entry(map, start, &temp_entry))
  759                 return (KERN_NO_SPACE);
  760 
  761         prev_entry = temp_entry;
  762 
  763         /*
  764          * Assert that the next entry doesn't overlap the end point.
  765          */
  766         if ((prev_entry->next != &map->header) &&
  767             (prev_entry->next->start < end))
  768                 return (KERN_NO_SPACE);
  769 
  770         protoeflags = 0;
  771 
  772         if (cow & MAP_COPY_ON_WRITE)
  773                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
  774 
  775         if (cow & MAP_NOFAULT) {
  776                 protoeflags |= MAP_ENTRY_NOFAULT;
  777 
  778                 KASSERT(object == NULL,
  779                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
  780         }
  781         if (cow & MAP_DISABLE_SYNCER)
  782                 protoeflags |= MAP_ENTRY_NOSYNC;
  783         if (cow & MAP_DISABLE_COREDUMP)
  784                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
  785 
  786         if (object != NULL) {
  787                 /*
  788                  * OBJ_ONEMAPPING must be cleared unless this mapping
  789                  * is trivially proven to be the only mapping for any
  790                  * of the object's pages.  (Object granularity
  791                  * reference counting is insufficient to recognize
  792                  * aliases with precision.)
  793                  */
  794                 VM_OBJECT_LOCK(object);
  795                 if (object->ref_count > 1 || object->shadow_count != 0)
  796                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
  797                 VM_OBJECT_UNLOCK(object);
  798         }
  799         else if ((prev_entry != &map->header) &&
  800                  (prev_entry->eflags == protoeflags) &&
  801                  (prev_entry->end == start) &&
  802                  (prev_entry->wired_count == 0) &&
  803                  ((prev_entry->object.vm_object == NULL) ||
  804                   vm_object_coalesce(prev_entry->object.vm_object,
  805                                      OFF_TO_IDX(prev_entry->offset),
  806                                      (vm_size_t)(prev_entry->end - prev_entry->start),
  807                                      (vm_size_t)(end - prev_entry->end)))) {
  808                 /*
  809                  * We were able to extend the object.  Determine if we
  810                  * can extend the previous map entry to include the
  811                  * new range as well.
  812                  */
  813                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
  814                     (prev_entry->protection == prot) &&
  815                     (prev_entry->max_protection == max)) {
  816                         map->size += (end - prev_entry->end);
  817                         prev_entry->end = end;
  818                         vm_map_simplify_entry(map, prev_entry);
  819                         return (KERN_SUCCESS);
  820                 }
  821 
  822                 /*
  823                  * If we can extend the object but cannot extend the
  824                  * map entry, we have to create a new map entry.  We
  825                  * must bump the ref count on the extended object to
  826                  * account for it.  object may be NULL.
  827                  */
  828                 object = prev_entry->object.vm_object;
  829                 offset = prev_entry->offset +
  830                         (prev_entry->end - prev_entry->start);
  831                 vm_object_reference(object);
  832         }
  833 
  834         /*
  835          * NOTE: if conditionals fail, object can be NULL here.  This occurs
  836          * in things like the buffer map where we manage kva but do not manage
  837          * backing objects.
  838          */
  839 
  840         /*
  841          * Create a new entry
  842          */
  843         new_entry = vm_map_entry_create(map);
  844         new_entry->start = start;
  845         new_entry->end = end;
  846 
  847         new_entry->eflags = protoeflags;
  848         new_entry->object.vm_object = object;
  849         new_entry->offset = offset;
  850         new_entry->avail_ssize = 0;
  851 
  852         new_entry->inheritance = VM_INHERIT_DEFAULT;
  853         new_entry->protection = prot;
  854         new_entry->max_protection = max;
  855         new_entry->wired_count = 0;
  856 
  857         /*
  858          * Insert the new entry into the list
  859          */
  860         vm_map_entry_link(map, prev_entry, new_entry);
  861         map->size += new_entry->end - new_entry->start;
  862 
  863         /*
  864          * Update the free space hint
  865          */
  866         if ((map->first_free == prev_entry) &&
  867             (prev_entry->end >= new_entry->start)) {
  868                 map->first_free = new_entry;
  869         }
  870 
  871 #if 0
  872         /*
  873          * Temporarily removed to avoid MAP_STACK panic, due to
  874          * MAP_STACK being a huge hack.  Will be added back in
  875          * when MAP_STACK (and the user stack mapping) is fixed.
  876          */
  877         /*
  878          * It may be possible to simplify the entry
  879          */
  880         vm_map_simplify_entry(map, new_entry);
  881 #endif
  882 
  883         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
  884                 vm_map_pmap_enter(map, start,
  885                                     object, OFF_TO_IDX(offset), end - start,
  886                                     cow & MAP_PREFAULT_PARTIAL);
  887         }
  888 
  889         return (KERN_SUCCESS);
  890 }
  891 
  892 /*
  893  * Find sufficient space for `length' bytes in the given map, starting at
  894  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
  895  */
  896 int
  897 vm_map_findspace(
  898         vm_map_t map,
  899         vm_offset_t start,
  900         vm_size_t length,
  901         vm_offset_t *addr)
  902 {
  903         vm_map_entry_t entry, next;
  904         vm_offset_t end;
  905 
  906         if (start < map->min_offset)
  907                 start = map->min_offset;
  908         if (start > map->max_offset)
  909                 return (1);
  910 
  911         /*
  912          * Look for the first possible address; if there's already something
  913          * at this address, we have to start after it.
  914          */
  915         if (start == map->min_offset) {
  916                 if ((entry = map->first_free) != &map->header)
  917                         start = entry->end;
  918         } else {
  919                 vm_map_entry_t tmp;
  920 
  921                 if (vm_map_lookup_entry(map, start, &tmp))
  922                         start = tmp->end;
  923                 entry = tmp;
  924         }
  925 
  926         /*
  927          * Look through the rest of the map, trying to fit a new region in the
  928          * gap between existing regions, or after the very last region.
  929          */
  930         for (;; start = (entry = next)->end) {
  931                 /*
  932                  * Find the end of the proposed new region.  Be sure we didn't
  933                  * go beyond the end of the map, or wrap around the address;
  934                  * if so, we lose.  Otherwise, if this is the last entry, or
  935                  * if the proposed new region fits before the next entry, we
  936                  * win.
  937                  */
  938                 end = start + length;
  939                 if (end > map->max_offset || end < start)
  940                         return (1);
  941                 next = entry->next;
  942                 if (next == &map->header || next->start >= end)
  943                         break;
  944         }
  945         *addr = start;
  946         if (map == kernel_map) {
  947                 vm_offset_t ksize;
  948                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
  949                         pmap_growkernel(ksize);
  950                 }
  951         }
  952         return (0);
  953 }
  954 
  955 /*
  956  *      vm_map_find finds an unallocated region in the target address
  957  *      map with the given length.  The search is defined to be
  958  *      first-fit from the specified address; the region found is
  959  *      returned in the same parameter.
  960  *
  961  *      If object is non-NULL, ref count must be bumped by caller
  962  *      prior to making call to account for the new entry.
  963  */
  964 int
  965 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  966             vm_offset_t *addr,  /* IN/OUT */
  967             vm_size_t length, boolean_t find_space, vm_prot_t prot,
  968             vm_prot_t max, int cow)
  969 {
  970         vm_offset_t start;
  971         int result, s = 0;
  972 
  973         start = *addr;
  974 
  975         if (map == kmem_map)
  976                 s = splvm();
  977 
  978         vm_map_lock(map);
  979         if (find_space) {
  980                 if (vm_map_findspace(map, start, length, addr)) {
  981                         vm_map_unlock(map);
  982                         if (map == kmem_map)
  983                                 splx(s);
  984                         return (KERN_NO_SPACE);
  985                 }
  986                 start = *addr;
  987         }
  988         result = vm_map_insert(map, object, offset,
  989                 start, start + length, prot, max, cow);
  990         vm_map_unlock(map);
  991 
  992         if (map == kmem_map)
  993                 splx(s);
  994 
  995         return (result);
  996 }
  997 
  998 /*
  999  *      vm_map_simplify_entry:
 1000  *
 1001  *      Simplify the given map entry by merging with either neighbor.  This
 1002  *      routine also has the ability to merge with both neighbors.
 1003  *
 1004  *      The map must be locked.
 1005  *
 1006  *      This routine guarentees that the passed entry remains valid (though
 1007  *      possibly extended).  When merging, this routine may delete one or
 1008  *      both neighbors.
 1009  */
 1010 void
 1011 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
 1012 {
 1013         vm_map_entry_t next, prev;
 1014         vm_size_t prevsize, esize;
 1015 
 1016         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
 1017                 return;
 1018 
 1019         prev = entry->prev;
 1020         if (prev != &map->header) {
 1021                 prevsize = prev->end - prev->start;
 1022                 if ( (prev->end == entry->start) &&
 1023                      (prev->object.vm_object == entry->object.vm_object) &&
 1024                      (!prev->object.vm_object ||
 1025                         (prev->offset + prevsize == entry->offset)) &&
 1026                      (prev->eflags == entry->eflags) &&
 1027                      (prev->protection == entry->protection) &&
 1028                      (prev->max_protection == entry->max_protection) &&
 1029                      (prev->inheritance == entry->inheritance) &&
 1030                      (prev->wired_count == entry->wired_count)) {
 1031                         if (map->first_free == prev)
 1032                                 map->first_free = entry;
 1033                         vm_map_entry_unlink(map, prev);
 1034                         entry->start = prev->start;
 1035                         entry->offset = prev->offset;
 1036                         if (prev->object.vm_object)
 1037                                 vm_object_deallocate(prev->object.vm_object);
 1038                         vm_map_entry_dispose(map, prev);
 1039                 }
 1040         }
 1041 
 1042         next = entry->next;
 1043         if (next != &map->header) {
 1044                 esize = entry->end - entry->start;
 1045                 if ((entry->end == next->start) &&
 1046                     (next->object.vm_object == entry->object.vm_object) &&
 1047                      (!entry->object.vm_object ||
 1048                         (entry->offset + esize == next->offset)) &&
 1049                     (next->eflags == entry->eflags) &&
 1050                     (next->protection == entry->protection) &&
 1051                     (next->max_protection == entry->max_protection) &&
 1052                     (next->inheritance == entry->inheritance) &&
 1053                     (next->wired_count == entry->wired_count)) {
 1054                         if (map->first_free == next)
 1055                                 map->first_free = entry;
 1056                         vm_map_entry_unlink(map, next);
 1057                         entry->end = next->end;
 1058                         if (next->object.vm_object)
 1059                                 vm_object_deallocate(next->object.vm_object);
 1060                         vm_map_entry_dispose(map, next);
 1061                 }
 1062         }
 1063 }
 1064 /*
 1065  *      vm_map_clip_start:      [ internal use only ]
 1066  *
 1067  *      Asserts that the given entry begins at or after
 1068  *      the specified address; if necessary,
 1069  *      it splits the entry into two.
 1070  */
 1071 #define vm_map_clip_start(map, entry, startaddr) \
 1072 { \
 1073         if (startaddr > entry->start) \
 1074                 _vm_map_clip_start(map, entry, startaddr); \
 1075 }
 1076 
 1077 /*
 1078  *      This routine is called only when it is known that
 1079  *      the entry must be split.
 1080  */
 1081 static void
 1082 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
 1083 {
 1084         vm_map_entry_t new_entry;
 1085 
 1086         /*
 1087          * Split off the front portion -- note that we must insert the new
 1088          * entry BEFORE this one, so that this entry has the specified
 1089          * starting address.
 1090          */
 1091         vm_map_simplify_entry(map, entry);
 1092 
 1093         /*
 1094          * If there is no object backing this entry, we might as well create
 1095          * one now.  If we defer it, an object can get created after the map
 1096          * is clipped, and individual objects will be created for the split-up
 1097          * map.  This is a bit of a hack, but is also about the best place to
 1098          * put this improvement.
 1099          */
 1100         if (entry->object.vm_object == NULL && !map->system_map) {
 1101                 vm_object_t object;
 1102                 object = vm_object_allocate(OBJT_DEFAULT,
 1103                                 atop(entry->end - entry->start));
 1104                 entry->object.vm_object = object;
 1105                 entry->offset = 0;
 1106         }
 1107 
 1108         new_entry = vm_map_entry_create(map);
 1109         *new_entry = *entry;
 1110 
 1111         new_entry->end = start;
 1112         entry->offset += (start - entry->start);
 1113         entry->start = start;
 1114 
 1115         vm_map_entry_link(map, entry->prev, new_entry);
 1116 
 1117         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1118                 vm_object_reference(new_entry->object.vm_object);
 1119         }
 1120 }
 1121 
 1122 /*
 1123  *      vm_map_clip_end:        [ internal use only ]
 1124  *
 1125  *      Asserts that the given entry ends at or before
 1126  *      the specified address; if necessary,
 1127  *      it splits the entry into two.
 1128  */
 1129 #define vm_map_clip_end(map, entry, endaddr) \
 1130 { \
 1131         if ((endaddr) < (entry->end)) \
 1132                 _vm_map_clip_end((map), (entry), (endaddr)); \
 1133 }
 1134 
 1135 /*
 1136  *      This routine is called only when it is known that
 1137  *      the entry must be split.
 1138  */
 1139 static void
 1140 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
 1141 {
 1142         vm_map_entry_t new_entry;
 1143 
 1144         /*
 1145          * If there is no object backing this entry, we might as well create
 1146          * one now.  If we defer it, an object can get created after the map
 1147          * is clipped, and individual objects will be created for the split-up
 1148          * map.  This is a bit of a hack, but is also about the best place to
 1149          * put this improvement.
 1150          */
 1151         if (entry->object.vm_object == NULL && !map->system_map) {
 1152                 vm_object_t object;
 1153                 object = vm_object_allocate(OBJT_DEFAULT,
 1154                                 atop(entry->end - entry->start));
 1155                 entry->object.vm_object = object;
 1156                 entry->offset = 0;
 1157         }
 1158 
 1159         /*
 1160          * Create a new entry and insert it AFTER the specified entry
 1161          */
 1162         new_entry = vm_map_entry_create(map);
 1163         *new_entry = *entry;
 1164 
 1165         new_entry->start = entry->end = end;
 1166         new_entry->offset += (end - entry->start);
 1167 
 1168         vm_map_entry_link(map, entry, new_entry);
 1169 
 1170         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1171                 vm_object_reference(new_entry->object.vm_object);
 1172         }
 1173 }
 1174 
 1175 /*
 1176  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
 1177  *
 1178  *      Asserts that the starting and ending region
 1179  *      addresses fall within the valid range of the map.
 1180  */
 1181 #define VM_MAP_RANGE_CHECK(map, start, end)             \
 1182                 {                                       \
 1183                 if (start < vm_map_min(map))            \
 1184                         start = vm_map_min(map);        \
 1185                 if (end > vm_map_max(map))              \
 1186                         end = vm_map_max(map);          \
 1187                 if (start > end)                        \
 1188                         start = end;                    \
 1189                 }
 1190 
 1191 /*
 1192  *      vm_map_submap:          [ kernel use only ]
 1193  *
 1194  *      Mark the given range as handled by a subordinate map.
 1195  *
 1196  *      This range must have been created with vm_map_find,
 1197  *      and no other operations may have been performed on this
 1198  *      range prior to calling vm_map_submap.
 1199  *
 1200  *      Only a limited number of operations can be performed
 1201  *      within this rage after calling vm_map_submap:
 1202  *              vm_fault
 1203  *      [Don't try vm_map_copy!]
 1204  *
 1205  *      To remove a submapping, one must first remove the
 1206  *      range from the superior map, and then destroy the
 1207  *      submap (if desired).  [Better yet, don't try it.]
 1208  */
 1209 int
 1210 vm_map_submap(
 1211         vm_map_t map,
 1212         vm_offset_t start,
 1213         vm_offset_t end,
 1214         vm_map_t submap)
 1215 {
 1216         vm_map_entry_t entry;
 1217         int result = KERN_INVALID_ARGUMENT;
 1218 
 1219         vm_map_lock(map);
 1220 
 1221         VM_MAP_RANGE_CHECK(map, start, end);
 1222 
 1223         if (vm_map_lookup_entry(map, start, &entry)) {
 1224                 vm_map_clip_start(map, entry, start);
 1225         } else
 1226                 entry = entry->next;
 1227 
 1228         vm_map_clip_end(map, entry, end);
 1229 
 1230         if ((entry->start == start) && (entry->end == end) &&
 1231             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
 1232             (entry->object.vm_object == NULL)) {
 1233                 entry->object.sub_map = submap;
 1234                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
 1235                 result = KERN_SUCCESS;
 1236         }
 1237         vm_map_unlock(map);
 1238 
 1239         return (result);
 1240 }
 1241 
 1242 /*
 1243  * The maximum number of pages to map
 1244  */
 1245 #define MAX_INIT_PT     96
 1246 
 1247 /*
 1248  *      vm_map_pmap_enter:
 1249  *
 1250  *      Preload the mappings for the given object into the specified
 1251  *      map.  This eliminates the soft faults on process startup and
 1252  *      immediately after an mmap(2).
 1253  */
 1254 void
 1255 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr,
 1256     vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
 1257 {
 1258         vm_offset_t tmpidx;
 1259         int psize;
 1260         vm_page_t p, mpte;
 1261 
 1262         if (object == NULL)
 1263                 return;
 1264         mtx_lock(&Giant);
 1265         VM_OBJECT_LOCK(object);
 1266         if (object->type == OBJT_DEVICE) {
 1267                 pmap_object_init_pt(map->pmap, addr, object, pindex, size);
 1268                 goto unlock_return;
 1269         }
 1270 
 1271         psize = atop(size);
 1272 
 1273         if (object->type != OBJT_VNODE ||
 1274             ((flags & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
 1275              (object->resident_page_count > MAX_INIT_PT))) {
 1276                 goto unlock_return;
 1277         }
 1278 
 1279         if (psize + pindex > object->size) {
 1280                 if (object->size < pindex)
 1281                         goto unlock_return;
 1282                 psize = object->size - pindex;
 1283         }
 1284 
 1285         mpte = NULL;
 1286 
 1287         if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
 1288                 if (p->pindex < pindex) {
 1289                         p = vm_page_splay(pindex, object->root);
 1290                         if ((object->root = p)->pindex < pindex)
 1291                                 p = TAILQ_NEXT(p, listq);
 1292                 }
 1293         }
 1294         /*
 1295          * Assert: the variable p is either (1) the page with the
 1296          * least pindex greater than or equal to the parameter pindex
 1297          * or (2) NULL.
 1298          */
 1299         for (;
 1300              p != NULL && (tmpidx = p->pindex - pindex) < psize;
 1301              p = TAILQ_NEXT(p, listq)) {
 1302                 /*
 1303                  * don't allow an madvise to blow away our really
 1304                  * free pages allocating pv entries.
 1305                  */
 1306                 if ((flags & MAP_PREFAULT_MADVISE) &&
 1307                     cnt.v_free_count < cnt.v_free_reserved) {
 1308                         break;
 1309                 }
 1310                 vm_page_lock_queues();
 1311                 if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
 1312                     (p->busy == 0) &&
 1313                     (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
 1314                         if ((p->queue - p->pc) == PQ_CACHE)
 1315                                 vm_page_deactivate(p);
 1316                         vm_page_busy(p);
 1317                         vm_page_unlock_queues();
 1318                         VM_OBJECT_UNLOCK(object);
 1319                         mpte = pmap_enter_quick(map->pmap,
 1320                                 addr + ptoa(tmpidx), p, mpte);
 1321                         VM_OBJECT_LOCK(object);
 1322                         vm_page_lock_queues();
 1323                         vm_page_wakeup(p);
 1324                 }
 1325                 vm_page_unlock_queues();
 1326         }
 1327 unlock_return:
 1328         VM_OBJECT_UNLOCK(object);
 1329         mtx_unlock(&Giant);
 1330 }
 1331 
 1332 /*
 1333  *      vm_map_protect:
 1334  *
 1335  *      Sets the protection of the specified address
 1336  *      region in the target map.  If "set_max" is
 1337  *      specified, the maximum protection is to be set;
 1338  *      otherwise, only the current protection is affected.
 1339  */
 1340 int
 1341 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1342                vm_prot_t new_prot, boolean_t set_max)
 1343 {
 1344         vm_map_entry_t current;
 1345         vm_map_entry_t entry;
 1346 
 1347         vm_map_lock(map);
 1348 
 1349         VM_MAP_RANGE_CHECK(map, start, end);
 1350 
 1351         if (vm_map_lookup_entry(map, start, &entry)) {
 1352                 vm_map_clip_start(map, entry, start);
 1353         } else {
 1354                 entry = entry->next;
 1355         }
 1356 
 1357         /*
 1358          * Make a first pass to check for protection violations.
 1359          */
 1360         current = entry;
 1361         while ((current != &map->header) && (current->start < end)) {
 1362                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 1363                         vm_map_unlock(map);
 1364                         return (KERN_INVALID_ARGUMENT);
 1365                 }
 1366                 if ((new_prot & current->max_protection) != new_prot) {
 1367                         vm_map_unlock(map);
 1368                         return (KERN_PROTECTION_FAILURE);
 1369                 }
 1370                 current = current->next;
 1371         }
 1372 
 1373         /*
 1374          * Go back and fix up protections. [Note that clipping is not
 1375          * necessary the second time.]
 1376          */
 1377         current = entry;
 1378         while ((current != &map->header) && (current->start < end)) {
 1379                 vm_prot_t old_prot;
 1380 
 1381                 vm_map_clip_end(map, current, end);
 1382 
 1383                 old_prot = current->protection;
 1384                 if (set_max)
 1385                         current->protection =
 1386                             (current->max_protection = new_prot) &
 1387                             old_prot;
 1388                 else
 1389                         current->protection = new_prot;
 1390 
 1391                 /*
 1392                  * Update physical map if necessary. Worry about copy-on-write
 1393                  * here -- CHECK THIS XXX
 1394                  */
 1395                 if (current->protection != old_prot) {
 1396                         mtx_lock(&Giant);
 1397                         vm_page_lock_queues();
 1398 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
 1399                                                         VM_PROT_ALL)
 1400                         pmap_protect(map->pmap, current->start,
 1401                             current->end,
 1402                             current->protection & MASK(current));
 1403 #undef  MASK
 1404                         vm_page_unlock_queues();
 1405                         mtx_unlock(&Giant);
 1406                 }
 1407                 vm_map_simplify_entry(map, current);
 1408                 current = current->next;
 1409         }
 1410         vm_map_unlock(map);
 1411         return (KERN_SUCCESS);
 1412 }
 1413 
 1414 /*
 1415  *      vm_map_madvise:
 1416  *
 1417  *      This routine traverses a processes map handling the madvise
 1418  *      system call.  Advisories are classified as either those effecting
 1419  *      the vm_map_entry structure, or those effecting the underlying
 1420  *      objects.
 1421  */
 1422 int
 1423 vm_map_madvise(
 1424         vm_map_t map,
 1425         vm_offset_t start,
 1426         vm_offset_t end,
 1427         int behav)
 1428 {
 1429         vm_map_entry_t current, entry;
 1430         int modify_map = 0;
 1431 
 1432         /*
 1433          * Some madvise calls directly modify the vm_map_entry, in which case
 1434          * we need to use an exclusive lock on the map and we need to perform
 1435          * various clipping operations.  Otherwise we only need a read-lock
 1436          * on the map.
 1437          */
 1438         switch(behav) {
 1439         case MADV_NORMAL:
 1440         case MADV_SEQUENTIAL:
 1441         case MADV_RANDOM:
 1442         case MADV_NOSYNC:
 1443         case MADV_AUTOSYNC:
 1444         case MADV_NOCORE:
 1445         case MADV_CORE:
 1446                 modify_map = 1;
 1447                 vm_map_lock(map);
 1448                 break;
 1449         case MADV_WILLNEED:
 1450         case MADV_DONTNEED:
 1451         case MADV_FREE:
 1452                 vm_map_lock_read(map);
 1453                 break;
 1454         default:
 1455                 return (KERN_INVALID_ARGUMENT);
 1456         }
 1457 
 1458         /*
 1459          * Locate starting entry and clip if necessary.
 1460          */
 1461         VM_MAP_RANGE_CHECK(map, start, end);
 1462 
 1463         if (vm_map_lookup_entry(map, start, &entry)) {
 1464                 if (modify_map)
 1465                         vm_map_clip_start(map, entry, start);
 1466         } else {
 1467                 entry = entry->next;
 1468         }
 1469 
 1470         if (modify_map) {
 1471                 /*
 1472                  * madvise behaviors that are implemented in the vm_map_entry.
 1473                  *
 1474                  * We clip the vm_map_entry so that behavioral changes are
 1475                  * limited to the specified address range.
 1476                  */
 1477                 for (current = entry;
 1478                      (current != &map->header) && (current->start < end);
 1479                      current = current->next
 1480                 ) {
 1481                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 1482                                 continue;
 1483 
 1484                         vm_map_clip_end(map, current, end);
 1485 
 1486                         switch (behav) {
 1487                         case MADV_NORMAL:
 1488                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
 1489                                 break;
 1490                         case MADV_SEQUENTIAL:
 1491                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
 1492                                 break;
 1493                         case MADV_RANDOM:
 1494                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
 1495                                 break;
 1496                         case MADV_NOSYNC:
 1497                                 current->eflags |= MAP_ENTRY_NOSYNC;
 1498                                 break;
 1499                         case MADV_AUTOSYNC:
 1500                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
 1501                                 break;
 1502                         case MADV_NOCORE:
 1503                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
 1504                                 break;
 1505                         case MADV_CORE:
 1506                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
 1507                                 break;
 1508                         default:
 1509                                 break;
 1510                         }
 1511                         vm_map_simplify_entry(map, current);
 1512                 }
 1513                 vm_map_unlock(map);
 1514         } else {
 1515                 vm_pindex_t pindex;
 1516                 int count;
 1517 
 1518                 /*
 1519                  * madvise behaviors that are implemented in the underlying
 1520                  * vm_object.
 1521                  *
 1522                  * Since we don't clip the vm_map_entry, we have to clip
 1523                  * the vm_object pindex and count.
 1524                  */
 1525                 for (current = entry;
 1526                      (current != &map->header) && (current->start < end);
 1527                      current = current->next
 1528                 ) {
 1529                         vm_offset_t useStart;
 1530 
 1531                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 1532                                 continue;
 1533 
 1534                         pindex = OFF_TO_IDX(current->offset);
 1535                         count = atop(current->end - current->start);
 1536                         useStart = current->start;
 1537 
 1538                         if (current->start < start) {
 1539                                 pindex += atop(start - current->start);
 1540                                 count -= atop(start - current->start);
 1541                                 useStart = start;
 1542                         }
 1543                         if (current->end > end)
 1544                                 count -= atop(current->end - end);
 1545 
 1546                         if (count <= 0)
 1547                                 continue;
 1548 
 1549                         vm_object_madvise(current->object.vm_object,
 1550                                           pindex, count, behav);
 1551                         if (behav == MADV_WILLNEED) {
 1552                                 vm_map_pmap_enter(map,
 1553                                     useStart,
 1554                                     current->object.vm_object,
 1555                                     pindex,
 1556                                     (count << PAGE_SHIFT),
 1557                                     MAP_PREFAULT_MADVISE
 1558                                 );
 1559                         }
 1560                 }
 1561                 vm_map_unlock_read(map);
 1562         }
 1563         return (0);
 1564 }
 1565 
 1566 
 1567 /*
 1568  *      vm_map_inherit:
 1569  *
 1570  *      Sets the inheritance of the specified address
 1571  *      range in the target map.  Inheritance
 1572  *      affects how the map will be shared with
 1573  *      child maps at the time of vm_map_fork.
 1574  */
 1575 int
 1576 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1577                vm_inherit_t new_inheritance)
 1578 {
 1579         vm_map_entry_t entry;
 1580         vm_map_entry_t temp_entry;
 1581 
 1582         switch (new_inheritance) {
 1583         case VM_INHERIT_NONE:
 1584         case VM_INHERIT_COPY:
 1585         case VM_INHERIT_SHARE:
 1586                 break;
 1587         default:
 1588                 return (KERN_INVALID_ARGUMENT);
 1589         }
 1590         vm_map_lock(map);
 1591         VM_MAP_RANGE_CHECK(map, start, end);
 1592         if (vm_map_lookup_entry(map, start, &temp_entry)) {
 1593                 entry = temp_entry;
 1594                 vm_map_clip_start(map, entry, start);
 1595         } else
 1596                 entry = temp_entry->next;
 1597         while ((entry != &map->header) && (entry->start < end)) {
 1598                 vm_map_clip_end(map, entry, end);
 1599                 entry->inheritance = new_inheritance;
 1600                 vm_map_simplify_entry(map, entry);
 1601                 entry = entry->next;
 1602         }
 1603         vm_map_unlock(map);
 1604         return (KERN_SUCCESS);
 1605 }
 1606 
 1607 /*
 1608  *      vm_map_unwire:
 1609  *
 1610  *      Implements both kernel and user unwiring.
 1611  */
 1612 int
 1613 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1614     int flags)
 1615 {
 1616         vm_map_entry_t entry, first_entry, tmp_entry;
 1617         vm_offset_t saved_start;
 1618         unsigned int last_timestamp;
 1619         int rv;
 1620         boolean_t need_wakeup, result, user_unwire;
 1621 
 1622         user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
 1623         vm_map_lock(map);
 1624         VM_MAP_RANGE_CHECK(map, start, end);
 1625         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 1626                 if (flags & VM_MAP_WIRE_HOLESOK)
 1627                         first_entry = first_entry->next;
 1628                 else {
 1629                         vm_map_unlock(map);
 1630                         return (KERN_INVALID_ADDRESS);
 1631                 }
 1632         }
 1633         last_timestamp = map->timestamp;
 1634         entry = first_entry;
 1635         while (entry != &map->header && entry->start < end) {
 1636                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 1637                         /*
 1638                          * We have not yet clipped the entry.
 1639                          */
 1640                         saved_start = (start >= entry->start) ? start :
 1641                             entry->start;
 1642                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 1643                         if (vm_map_unlock_and_wait(map, user_unwire)) {
 1644                                 /*
 1645                                  * Allow interruption of user unwiring?
 1646                                  */
 1647                         }
 1648                         vm_map_lock(map);
 1649                         if (last_timestamp+1 != map->timestamp) {
 1650                                 /*
 1651                                  * Look again for the entry because the map was
 1652                                  * modified while it was unlocked.
 1653                                  * Specifically, the entry may have been
 1654                                  * clipped, merged, or deleted.
 1655                                  */
 1656                                 if (!vm_map_lookup_entry(map, saved_start,
 1657                                     &tmp_entry)) {
 1658                                         if (flags & VM_MAP_WIRE_HOLESOK)
 1659                                                 tmp_entry = tmp_entry->next;
 1660                                         else {
 1661                                                 if (saved_start == start) {
 1662                                                         /*
 1663                                                          * First_entry has been deleted.
 1664                                                          */
 1665                                                         vm_map_unlock(map);
 1666                                                         return (KERN_INVALID_ADDRESS);
 1667                                                 }
 1668                                                 end = saved_start;
 1669                                                 rv = KERN_INVALID_ADDRESS;
 1670                                                 goto done;
 1671                                         }
 1672                                 }
 1673                                 if (entry == first_entry)
 1674                                         first_entry = tmp_entry;
 1675                                 else
 1676                                         first_entry = NULL;
 1677                                 entry = tmp_entry;
 1678                         }
 1679                         last_timestamp = map->timestamp;
 1680                         continue;
 1681                 }
 1682                 vm_map_clip_start(map, entry, start);
 1683                 vm_map_clip_end(map, entry, end);
 1684                 /*
 1685                  * Mark the entry in case the map lock is released.  (See
 1686                  * above.)
 1687                  */
 1688                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 1689                 /*
 1690                  * Check the map for holes in the specified region.
 1691                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
 1692                  */
 1693                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
 1694                     (entry->end < end && (entry->next == &map->header ||
 1695                     entry->next->start > entry->end))) {
 1696                         end = entry->end;
 1697                         rv = KERN_INVALID_ADDRESS;
 1698                         goto done;
 1699                 }
 1700                 /*
 1701                  * Require that the entry is wired.
 1702                  */
 1703                 if (entry->wired_count == 0 || (user_unwire &&
 1704                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) {
 1705                         end = entry->end;
 1706                         rv = KERN_INVALID_ARGUMENT;
 1707                         goto done;
 1708                 }
 1709                 entry = entry->next;
 1710         }
 1711         rv = KERN_SUCCESS;
 1712 done:
 1713         need_wakeup = FALSE;
 1714         if (first_entry == NULL) {
 1715                 result = vm_map_lookup_entry(map, start, &first_entry);
 1716                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
 1717                         first_entry = first_entry->next;
 1718                 else
 1719                         KASSERT(result, ("vm_map_unwire: lookup failed"));
 1720         }
 1721         entry = first_entry;
 1722         while (entry != &map->header && entry->start < end) {
 1723                 if (rv == KERN_SUCCESS) {
 1724                         if (user_unwire)
 1725                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 1726                         entry->wired_count--;
 1727                         if (entry->wired_count == 0) {
 1728                                 /*
 1729                                  * Retain the map lock.
 1730                                  */
 1731                                 vm_fault_unwire(map, entry->start, entry->end);
 1732                         }
 1733                 }
 1734                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
 1735                         ("vm_map_unwire: in-transition flag missing"));
 1736                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
 1737                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 1738                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 1739                         need_wakeup = TRUE;
 1740                 }
 1741                 vm_map_simplify_entry(map, entry);
 1742                 entry = entry->next;
 1743         }
 1744         vm_map_unlock(map);
 1745         if (need_wakeup)
 1746                 vm_map_wakeup(map);
 1747         return (rv);
 1748 }
 1749 
 1750 /*
 1751  *      vm_map_wire:
 1752  *
 1753  *      Implements both kernel and user wiring.
 1754  */
 1755 int
 1756 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1757     int flags)
 1758 {
 1759         vm_map_entry_t entry, first_entry, tmp_entry;
 1760         vm_offset_t saved_end, saved_start;
 1761         unsigned int last_timestamp;
 1762         int rv;
 1763         boolean_t need_wakeup, result, user_wire;
 1764 
 1765         user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
 1766         vm_map_lock(map);
 1767         VM_MAP_RANGE_CHECK(map, start, end);
 1768         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 1769                 if (flags & VM_MAP_WIRE_HOLESOK)
 1770                         first_entry = first_entry->next;
 1771                 else {
 1772                         vm_map_unlock(map);
 1773                         return (KERN_INVALID_ADDRESS);
 1774                 }
 1775         }
 1776         last_timestamp = map->timestamp;
 1777         entry = first_entry;
 1778         while (entry != &map->header && entry->start < end) {
 1779                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 1780                         /*
 1781                          * We have not yet clipped the entry.
 1782                          */
 1783                         saved_start = (start >= entry->start) ? start :
 1784                             entry->start;
 1785                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 1786                         if (vm_map_unlock_and_wait(map, user_wire)) {
 1787                                 /*
 1788                                  * Allow interruption of user wiring?
 1789                                  */
 1790                         }
 1791                         vm_map_lock(map);
 1792                         if (last_timestamp + 1 != map->timestamp) {
 1793                                 /*
 1794                                  * Look again for the entry because the map was
 1795                                  * modified while it was unlocked.
 1796                                  * Specifically, the entry may have been
 1797                                  * clipped, merged, or deleted.
 1798                                  */
 1799                                 if (!vm_map_lookup_entry(map, saved_start,
 1800                                     &tmp_entry)) {
 1801                                         if (flags & VM_MAP_WIRE_HOLESOK)
 1802                                                 tmp_entry = tmp_entry->next;
 1803                                         else {
 1804                                                 if (saved_start == start) {
 1805                                                         /*
 1806                                                          * first_entry has been deleted.
 1807                                                          */
 1808                                                         vm_map_unlock(map);
 1809                                                         return (KERN_INVALID_ADDRESS);
 1810                                                 }
 1811                                                 end = saved_start;
 1812                                                 rv = KERN_INVALID_ADDRESS;
 1813                                                 goto done;
 1814                                         }
 1815                                 }
 1816                                 if (entry == first_entry)
 1817                                         first_entry = tmp_entry;
 1818                                 else
 1819                                         first_entry = NULL;
 1820                                 entry = tmp_entry;
 1821                         }
 1822                         last_timestamp = map->timestamp;
 1823                         continue;
 1824                 }
 1825                 vm_map_clip_start(map, entry, start);
 1826                 vm_map_clip_end(map, entry, end);
 1827                 /*
 1828                  * Mark the entry in case the map lock is released.  (See
 1829                  * above.)
 1830                  */
 1831                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 1832                 /*
 1833                  *
 1834                  */
 1835                 if (entry->wired_count == 0) {
 1836                         entry->wired_count++;
 1837                         saved_start = entry->start;
 1838                         saved_end = entry->end;
 1839                         /*
 1840                          * Release the map lock, relying on the in-transition
 1841                          * mark.
 1842                          */
 1843                         vm_map_unlock(map);
 1844                         rv = vm_fault_wire(map, saved_start, saved_end,
 1845                             user_wire);
 1846                         vm_map_lock(map);
 1847                         if (last_timestamp + 1 != map->timestamp) {
 1848                                 /*
 1849                                  * Look again for the entry because the map was
 1850                                  * modified while it was unlocked.  The entry
 1851                                  * may have been clipped, but NOT merged or
 1852                                  * deleted.
 1853                                  */
 1854                                 result = vm_map_lookup_entry(map, saved_start,
 1855                                     &tmp_entry);
 1856                                 KASSERT(result, ("vm_map_wire: lookup failed"));
 1857                                 if (entry == first_entry)
 1858                                         first_entry = tmp_entry;
 1859                                 else
 1860                                         first_entry = NULL;
 1861                                 entry = tmp_entry;
 1862                                 while (entry->end < saved_end) {
 1863                                         if (rv != KERN_SUCCESS) {
 1864                                                 KASSERT(entry->wired_count == 1,
 1865                                                     ("vm_map_wire: bad count"));
 1866                                                 entry->wired_count = -1;
 1867                                         }
 1868                                         entry = entry->next;
 1869                                 }
 1870                         }
 1871                         last_timestamp = map->timestamp;
 1872                         if (rv != KERN_SUCCESS) {
 1873                                 KASSERT(entry->wired_count == 1,
 1874                                     ("vm_map_wire: bad count"));
 1875                                 /*
 1876                                  * Assign an out-of-range value to represent
 1877                                  * the failure to wire this entry.
 1878                                  */
 1879                                 entry->wired_count = -1;
 1880                                 end = entry->end;
 1881                                 goto done;
 1882                         }
 1883                 } else if (!user_wire ||
 1884                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
 1885                         entry->wired_count++;
 1886                 }
 1887                 /*
 1888                  * Check the map for holes in the specified region.
 1889                  * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
 1890                  */
 1891                 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
 1892                     (entry->end < end && (entry->next == &map->header ||
 1893                     entry->next->start > entry->end))) {
 1894                         end = entry->end;
 1895                         rv = KERN_INVALID_ADDRESS;
 1896                         goto done;
 1897                 }
 1898                 entry = entry->next;
 1899         }
 1900         rv = KERN_SUCCESS;
 1901 done:
 1902         need_wakeup = FALSE;
 1903         if (first_entry == NULL) {
 1904                 result = vm_map_lookup_entry(map, start, &first_entry);
 1905                 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
 1906                         first_entry = first_entry->next;
 1907                 else
 1908                         KASSERT(result, ("vm_map_wire: lookup failed"));
 1909         }
 1910         entry = first_entry;
 1911         while (entry != &map->header && entry->start < end) {
 1912                 if (rv == KERN_SUCCESS) {
 1913                         if (user_wire)
 1914                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
 1915                 } else if (entry->wired_count == -1) {
 1916                         /*
 1917                          * Wiring failed on this entry.  Thus, unwiring is
 1918                          * unnecessary.
 1919                          */
 1920                         entry->wired_count = 0;
 1921                 } else {
 1922                         if (!user_wire ||
 1923                             (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
 1924                                 entry->wired_count--;
 1925                         if (entry->wired_count == 0) {
 1926                                 /*
 1927                                  * Retain the map lock.
 1928                                  */
 1929                                 vm_fault_unwire(map, entry->start, entry->end);
 1930                         }
 1931                 }
 1932                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
 1933                         ("vm_map_wire: in-transition flag missing"));
 1934                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
 1935                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 1936                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 1937                         need_wakeup = TRUE;
 1938                 }
 1939                 vm_map_simplify_entry(map, entry);
 1940                 entry = entry->next;
 1941         }
 1942         vm_map_unlock(map);
 1943         if (need_wakeup)
 1944                 vm_map_wakeup(map);
 1945         return (rv);
 1946 }
 1947 
 1948 /*
 1949  * vm_map_sync
 1950  *
 1951  * Push any dirty cached pages in the address range to their pager.
 1952  * If syncio is TRUE, dirty pages are written synchronously.
 1953  * If invalidate is TRUE, any cached pages are freed as well.
 1954  *
 1955  * If the size of the region from start to end is zero, we are
 1956  * supposed to flush all modified pages within the region containing
 1957  * start.  Unfortunately, a region can be split or coalesced with
 1958  * neighboring regions, making it difficult to determine what the
 1959  * original region was.  Therefore, we approximate this requirement by
 1960  * flushing the current region containing start.
 1961  *
 1962  * Returns an error if any part of the specified range is not mapped.
 1963  */
 1964 int
 1965 vm_map_sync(
 1966         vm_map_t map,
 1967         vm_offset_t start,
 1968         vm_offset_t end,
 1969         boolean_t syncio,
 1970         boolean_t invalidate)
 1971 {
 1972         vm_map_entry_t current;
 1973         vm_map_entry_t entry;
 1974         vm_size_t size;
 1975         vm_object_t object;
 1976         vm_ooffset_t offset;
 1977 
 1978         vm_map_lock_read(map);
 1979         VM_MAP_RANGE_CHECK(map, start, end);
 1980         if (!vm_map_lookup_entry(map, start, &entry)) {
 1981                 vm_map_unlock_read(map);
 1982                 return (KERN_INVALID_ADDRESS);
 1983         } else if (start == end) {
 1984                 start = entry->start;
 1985                 end = entry->end;
 1986         }
 1987         /*
 1988          * Make a first pass to check for user-wired memory and holes.
 1989          */
 1990         for (current = entry; current->start < end; current = current->next) {
 1991                 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
 1992                         vm_map_unlock_read(map);
 1993                         return (KERN_INVALID_ARGUMENT);
 1994                 }
 1995                 if (end > current->end &&
 1996                     (current->next == &map->header ||
 1997                         current->end != current->next->start)) {
 1998                         vm_map_unlock_read(map);
 1999                         return (KERN_INVALID_ADDRESS);
 2000                 }
 2001         }
 2002 
 2003         if (invalidate) {
 2004                 mtx_lock(&Giant);
 2005                 vm_page_lock_queues();
 2006                 pmap_remove(map->pmap, start, end);
 2007                 vm_page_unlock_queues();
 2008                 mtx_unlock(&Giant);
 2009         }
 2010         /*
 2011          * Make a second pass, cleaning/uncaching pages from the indicated
 2012          * objects as we go.
 2013          */
 2014         for (current = entry; current->start < end; current = current->next) {
 2015                 offset = current->offset + (start - current->start);
 2016                 size = (end <= current->end ? end : current->end) - start;
 2017                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 2018                         vm_map_t smap;
 2019                         vm_map_entry_t tentry;
 2020                         vm_size_t tsize;
 2021 
 2022                         smap = current->object.sub_map;
 2023                         vm_map_lock_read(smap);
 2024                         (void) vm_map_lookup_entry(smap, offset, &tentry);
 2025                         tsize = tentry->end - offset;
 2026                         if (tsize < size)
 2027                                 size = tsize;
 2028                         object = tentry->object.vm_object;
 2029                         offset = tentry->offset + (offset - tentry->start);
 2030                         vm_map_unlock_read(smap);
 2031                 } else {
 2032                         object = current->object.vm_object;
 2033                 }
 2034                 vm_object_sync(object, offset, size, syncio, invalidate);
 2035                 start += size;
 2036         }
 2037 
 2038         vm_map_unlock_read(map);
 2039         return (KERN_SUCCESS);
 2040 }
 2041 
 2042 /*
 2043  *      vm_map_entry_unwire:    [ internal use only ]
 2044  *
 2045  *      Make the region specified by this entry pageable.
 2046  *
 2047  *      The map in question should be locked.
 2048  *      [This is the reason for this routine's existence.]
 2049  */
 2050 static void
 2051 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
 2052 {
 2053         vm_fault_unwire(map, entry->start, entry->end);
 2054         entry->wired_count = 0;
 2055 }
 2056 
 2057 /*
 2058  *      vm_map_entry_delete:    [ internal use only ]
 2059  *
 2060  *      Deallocate the given entry from the target map.
 2061  */
 2062 static void
 2063 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
 2064 {
 2065         vm_object_t object;
 2066         vm_pindex_t offidxstart, offidxend, count;
 2067 
 2068         vm_map_entry_unlink(map, entry);
 2069         map->size -= entry->end - entry->start;
 2070 
 2071         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
 2072             (object = entry->object.vm_object) != NULL) {
 2073                 count = OFF_TO_IDX(entry->end - entry->start);
 2074                 offidxstart = OFF_TO_IDX(entry->offset);
 2075                 offidxend = offidxstart + count;
 2076                 VM_OBJECT_LOCK(object);
 2077                 if (object->ref_count != 1 &&
 2078                     ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
 2079                      object == kernel_object || object == kmem_object) &&
 2080                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
 2081                         vm_object_collapse(object);
 2082                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
 2083                         if (object->type == OBJT_SWAP)
 2084                                 swap_pager_freespace(object, offidxstart, count);
 2085                         if (offidxend >= object->size &&
 2086                             offidxstart < object->size)
 2087                                 object->size = offidxstart;
 2088                 }
 2089                 VM_OBJECT_UNLOCK(object);
 2090                 vm_object_deallocate(object);
 2091         }
 2092 
 2093         vm_map_entry_dispose(map, entry);
 2094 }
 2095 
 2096 /*
 2097  *      vm_map_delete:  [ internal use only ]
 2098  *
 2099  *      Deallocates the given address range from the target
 2100  *      map.
 2101  */
 2102 int
 2103 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
 2104 {
 2105         vm_map_entry_t entry;
 2106         vm_map_entry_t first_entry;
 2107 
 2108         /*
 2109          * Find the start of the region, and clip it
 2110          */
 2111         if (!vm_map_lookup_entry(map, start, &first_entry))
 2112                 entry = first_entry->next;
 2113         else {
 2114                 entry = first_entry;
 2115                 vm_map_clip_start(map, entry, start);
 2116         }
 2117 
 2118         /*
 2119          * Save the free space hint
 2120          */
 2121         if (entry == &map->header) {
 2122                 map->first_free = &map->header;
 2123         } else if (map->first_free->start >= start) {
 2124                 map->first_free = entry->prev;
 2125         }
 2126 
 2127         /*
 2128          * Step through all entries in this region
 2129          */
 2130         while ((entry != &map->header) && (entry->start < end)) {
 2131                 vm_map_entry_t next;
 2132 
 2133                 /*
 2134                  * Wait for wiring or unwiring of an entry to complete.
 2135                  */
 2136                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) {
 2137                         unsigned int last_timestamp;
 2138                         vm_offset_t saved_start;
 2139                         vm_map_entry_t tmp_entry;
 2140 
 2141                         saved_start = entry->start;
 2142                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 2143                         last_timestamp = map->timestamp;
 2144                         (void) vm_map_unlock_and_wait(map, FALSE);
 2145                         vm_map_lock(map);
 2146                         if (last_timestamp + 1 != map->timestamp) {
 2147                                 /*
 2148                                  * Look again for the entry because the map was
 2149                                  * modified while it was unlocked.
 2150                                  * Specifically, the entry may have been
 2151                                  * clipped, merged, or deleted.
 2152                                  */
 2153                                 if (!vm_map_lookup_entry(map, saved_start,
 2154                                                          &tmp_entry))
 2155                                         entry = tmp_entry->next;
 2156                                 else {
 2157                                         entry = tmp_entry;
 2158                                         vm_map_clip_start(map, entry,
 2159                                                           saved_start);
 2160                                 }
 2161                         }
 2162                         continue;
 2163                 }
 2164                 vm_map_clip_end(map, entry, end);
 2165 
 2166                 next = entry->next;
 2167 
 2168                 /*
 2169                  * Unwire before removing addresses from the pmap; otherwise,
 2170                  * unwiring will put the entries back in the pmap.
 2171                  */
 2172                 if (entry->wired_count != 0) {
 2173                         vm_map_entry_unwire(map, entry);
 2174                 }
 2175 
 2176                 if (map != kmem_map)
 2177                         mtx_lock(&Giant);
 2178                 vm_page_lock_queues();
 2179                 pmap_remove(map->pmap, entry->start, entry->end);
 2180                 vm_page_unlock_queues();
 2181                 if (map != kmem_map)
 2182                         mtx_unlock(&Giant);
 2183 
 2184                 /*
 2185                  * Delete the entry (which may delete the object) only after
 2186                  * removing all pmap entries pointing to its pages.
 2187                  * (Otherwise, its page frames may be reallocated, and any
 2188                  * modify bits will be set in the wrong object!)
 2189                  */
 2190                 vm_map_entry_delete(map, entry);
 2191                 entry = next;
 2192         }
 2193         return (KERN_SUCCESS);
 2194 }
 2195 
 2196 /*
 2197  *      vm_map_remove:
 2198  *
 2199  *      Remove the given address range from the target map.
 2200  *      This is the exported form of vm_map_delete.
 2201  */
 2202 int
 2203 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
 2204 {
 2205         int result, s = 0;
 2206 
 2207         if (map == kmem_map)
 2208                 s = splvm();
 2209 
 2210         vm_map_lock(map);
 2211         VM_MAP_RANGE_CHECK(map, start, end);
 2212         result = vm_map_delete(map, start, end);
 2213         vm_map_unlock(map);
 2214 
 2215         if (map == kmem_map)
 2216                 splx(s);
 2217 
 2218         return (result);
 2219 }
 2220 
 2221 /*
 2222  *      vm_map_check_protection:
 2223  *
 2224  *      Assert that the target map allows the specified privilege on the
 2225  *      entire address region given.  The entire region must be allocated.
 2226  *
 2227  *      WARNING!  This code does not and should not check whether the
 2228  *      contents of the region is accessible.  For example a smaller file
 2229  *      might be mapped into a larger address space.
 2230  *
 2231  *      NOTE!  This code is also called by munmap().
 2232  *
 2233  *      The map must be locked.  A read lock is sufficient.
 2234  */
 2235 boolean_t
 2236 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2237                         vm_prot_t protection)
 2238 {
 2239         vm_map_entry_t entry;
 2240         vm_map_entry_t tmp_entry;
 2241 
 2242         if (!vm_map_lookup_entry(map, start, &tmp_entry))
 2243                 return (FALSE);
 2244         entry = tmp_entry;
 2245 
 2246         while (start < end) {
 2247                 if (entry == &map->header)
 2248                         return (FALSE);
 2249                 /*
 2250                  * No holes allowed!
 2251                  */
 2252                 if (start < entry->start)
 2253                         return (FALSE);
 2254                 /*
 2255                  * Check protection associated with entry.
 2256                  */
 2257                 if ((entry->protection & protection) != protection)
 2258                         return (FALSE);
 2259                 /* go to next entry */
 2260                 start = entry->end;
 2261                 entry = entry->next;
 2262         }
 2263         return (TRUE);
 2264 }
 2265 
 2266 /*
 2267  *      vm_map_copy_entry:
 2268  *
 2269  *      Copies the contents of the source entry to the destination
 2270  *      entry.  The entries *must* be aligned properly.
 2271  */
 2272 static void
 2273 vm_map_copy_entry(
 2274         vm_map_t src_map,
 2275         vm_map_t dst_map,
 2276         vm_map_entry_t src_entry,
 2277         vm_map_entry_t dst_entry)
 2278 {
 2279         vm_object_t src_object;
 2280 
 2281         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
 2282                 return;
 2283 
 2284         if (src_entry->wired_count == 0) {
 2285 
 2286                 /*
 2287                  * If the source entry is marked needs_copy, it is already
 2288                  * write-protected.
 2289                  */
 2290                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
 2291                         vm_page_lock_queues();
 2292                         pmap_protect(src_map->pmap,
 2293                             src_entry->start,
 2294                             src_entry->end,
 2295                             src_entry->protection & ~VM_PROT_WRITE);
 2296                         vm_page_unlock_queues();
 2297                 }
 2298 
 2299                 /*
 2300                  * Make a copy of the object.
 2301                  */
 2302                 if ((src_object = src_entry->object.vm_object) != NULL) {
 2303                         VM_OBJECT_LOCK(src_object);
 2304                         if ((src_object->handle == NULL) &&
 2305                                 (src_object->type == OBJT_DEFAULT ||
 2306                                  src_object->type == OBJT_SWAP)) {
 2307                                 vm_object_collapse(src_object);
 2308                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
 2309                                         VM_OBJECT_UNLOCK(src_object);
 2310                                         vm_object_split(src_entry);
 2311                                         src_object = src_entry->object.vm_object;
 2312                                         VM_OBJECT_LOCK(src_object);
 2313                                 }
 2314                         }
 2315                         vm_object_reference_locked(src_object);
 2316                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
 2317                         VM_OBJECT_UNLOCK(src_object);
 2318                         dst_entry->object.vm_object = src_object;
 2319                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
 2320                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
 2321                         dst_entry->offset = src_entry->offset;
 2322                 } else {
 2323                         dst_entry->object.vm_object = NULL;
 2324                         dst_entry->offset = 0;
 2325                 }
 2326 
 2327                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
 2328                     dst_entry->end - dst_entry->start, src_entry->start);
 2329         } else {
 2330                 /*
 2331                  * Of course, wired down pages can't be set copy-on-write.
 2332                  * Cause wired pages to be copied into the new map by
 2333                  * simulating faults (the new pages are pageable)
 2334                  */
 2335                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
 2336         }
 2337 }
 2338 
 2339 /*
 2340  * vmspace_fork:
 2341  * Create a new process vmspace structure and vm_map
 2342  * based on those of an existing process.  The new map
 2343  * is based on the old map, according to the inheritance
 2344  * values on the regions in that map.
 2345  *
 2346  * The source map must not be locked.
 2347  */
 2348 struct vmspace *
 2349 vmspace_fork(struct vmspace *vm1)
 2350 {
 2351         struct vmspace *vm2;
 2352         vm_map_t old_map = &vm1->vm_map;
 2353         vm_map_t new_map;
 2354         vm_map_entry_t old_entry;
 2355         vm_map_entry_t new_entry;
 2356         vm_object_t object;
 2357 
 2358         GIANT_REQUIRED;
 2359 
 2360         vm_map_lock(old_map);
 2361         old_map->infork = 1;
 2362 
 2363         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
 2364         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
 2365             (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
 2366         new_map = &vm2->vm_map; /* XXX */
 2367         new_map->timestamp = 1;
 2368 
 2369         /* Do not inherit the MAP_WIREFUTURE property. */
 2370         if ((new_map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE)
 2371                 new_map->flags &= ~MAP_WIREFUTURE;
 2372 
 2373         old_entry = old_map->header.next;
 2374 
 2375         while (old_entry != &old_map->header) {
 2376                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 2377                         panic("vm_map_fork: encountered a submap");
 2378 
 2379                 switch (old_entry->inheritance) {
 2380                 case VM_INHERIT_NONE:
 2381                         break;
 2382 
 2383                 case VM_INHERIT_SHARE:
 2384                         /*
 2385                          * Clone the entry, creating the shared object if necessary.
 2386                          */
 2387                         object = old_entry->object.vm_object;
 2388                         if (object == NULL) {
 2389                                 object = vm_object_allocate(OBJT_DEFAULT,
 2390                                         atop(old_entry->end - old_entry->start));
 2391                                 old_entry->object.vm_object = object;
 2392                                 old_entry->offset = (vm_offset_t) 0;
 2393                         }
 2394 
 2395                         /*
 2396                          * Add the reference before calling vm_object_shadow
 2397                          * to insure that a shadow object is created.
 2398                          */
 2399                         vm_object_reference(object);
 2400                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 2401                                 vm_object_shadow(&old_entry->object.vm_object,
 2402                                         &old_entry->offset,
 2403                                         atop(old_entry->end - old_entry->start));
 2404                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 2405                                 /* Transfer the second reference too. */
 2406                                 vm_object_reference(
 2407                                     old_entry->object.vm_object);
 2408                                 vm_object_deallocate(object);
 2409                                 object = old_entry->object.vm_object;
 2410                         }
 2411                         VM_OBJECT_LOCK(object);
 2412                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
 2413                         VM_OBJECT_UNLOCK(object);
 2414 
 2415                         /*
 2416                          * Clone the entry, referencing the shared object.
 2417                          */
 2418                         new_entry = vm_map_entry_create(new_map);
 2419                         *new_entry = *old_entry;
 2420                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 2421                         new_entry->wired_count = 0;
 2422 
 2423                         /*
 2424                          * Insert the entry into the new map -- we know we're
 2425                          * inserting at the end of the new map.
 2426                          */
 2427                         vm_map_entry_link(new_map, new_map->header.prev,
 2428                             new_entry);
 2429 
 2430                         /*
 2431                          * Update the physical map
 2432                          */
 2433                         pmap_copy(new_map->pmap, old_map->pmap,
 2434                             new_entry->start,
 2435                             (old_entry->end - old_entry->start),
 2436                             old_entry->start);
 2437                         break;
 2438 
 2439                 case VM_INHERIT_COPY:
 2440                         /*
 2441                          * Clone the entry and link into the map.
 2442                          */
 2443                         new_entry = vm_map_entry_create(new_map);
 2444                         *new_entry = *old_entry;
 2445                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 2446                         new_entry->wired_count = 0;
 2447                         new_entry->object.vm_object = NULL;
 2448                         vm_map_entry_link(new_map, new_map->header.prev,
 2449                             new_entry);
 2450                         vm_map_copy_entry(old_map, new_map, old_entry,
 2451                             new_entry);
 2452                         break;
 2453                 }
 2454                 old_entry = old_entry->next;
 2455         }
 2456 
 2457         new_map->size = old_map->size;
 2458         old_map->infork = 0;
 2459         vm_map_unlock(old_map);
 2460 
 2461         return (vm2);
 2462 }
 2463 
 2464 int
 2465 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
 2466     vm_prot_t prot, vm_prot_t max, int cow)
 2467 {
 2468         vm_map_entry_t new_entry, prev_entry;
 2469         vm_offset_t bot, top;
 2470         vm_size_t init_ssize;
 2471         int orient, rv;
 2472 
 2473         /*
 2474          * The stack orientation is piggybacked with the cow argument.
 2475          * Extract it into orient and mask the cow argument so that we
 2476          * don't pass it around further.
 2477          * NOTE: We explicitly allow bi-directional stacks.
 2478          */
 2479         orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
 2480         cow &= ~orient;
 2481         KASSERT(orient != 0, ("No stack grow direction"));
 2482 
 2483         if (addrbos < vm_map_min(map) || addrbos > map->max_offset)
 2484                 return (KERN_NO_SPACE);
 2485 
 2486         init_ssize = (max_ssize < sgrowsiz) ? max_ssize : sgrowsiz;
 2487 
 2488         vm_map_lock(map);
 2489 
 2490         /* If addr is already mapped, no go */
 2491         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
 2492                 vm_map_unlock(map);
 2493                 return (KERN_NO_SPACE);
 2494         }
 2495 
 2496         /* If we would blow our VMEM resource limit, no go */
 2497         if (map->size + init_ssize >
 2498             curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
 2499                 vm_map_unlock(map);
 2500                 return (KERN_NO_SPACE);
 2501         }
 2502 
 2503         /*
 2504          * If we can't accomodate max_ssize in the current mapping, no go.
 2505          * However, we need to be aware that subsequent user mappings might
 2506          * map into the space we have reserved for stack, and currently this
 2507          * space is not protected.
 2508          *
 2509          * Hopefully we will at least detect this condition when we try to
 2510          * grow the stack.
 2511          */
 2512         if ((prev_entry->next != &map->header) &&
 2513             (prev_entry->next->start < addrbos + max_ssize)) {
 2514                 vm_map_unlock(map);
 2515                 return (KERN_NO_SPACE);
 2516         }
 2517 
 2518         /*
 2519          * We initially map a stack of only init_ssize.  We will grow as
 2520          * needed later.  Depending on the orientation of the stack (i.e.
 2521          * the grow direction) we either map at the top of the range, the
 2522          * bottom of the range or in the middle.
 2523          *
 2524          * Note: we would normally expect prot and max to be VM_PROT_ALL,
 2525          * and cow to be 0.  Possibly we should eliminate these as input
 2526          * parameters, and just pass these values here in the insert call.
 2527          */
 2528         if (orient == MAP_STACK_GROWS_DOWN)
 2529                 bot = addrbos + max_ssize - init_ssize;
 2530         else if (orient == MAP_STACK_GROWS_UP)
 2531                 bot = addrbos;
 2532         else
 2533                 bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
 2534         top = bot + init_ssize;
 2535         rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
 2536 
 2537         /* Now set the avail_ssize amount. */
 2538         if (rv == KERN_SUCCESS) {
 2539                 if (prev_entry != &map->header)
 2540                         vm_map_clip_end(map, prev_entry, bot);
 2541                 new_entry = prev_entry->next;
 2542                 if (new_entry->end != top || new_entry->start != bot)
 2543                         panic("Bad entry start/end for new stack entry");
 2544 
 2545                 new_entry->avail_ssize = max_ssize - init_ssize;
 2546                 if (orient & MAP_STACK_GROWS_DOWN)
 2547                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
 2548                 if (orient & MAP_STACK_GROWS_UP)
 2549                         new_entry->eflags |= MAP_ENTRY_GROWS_UP;
 2550         }
 2551 
 2552         vm_map_unlock(map);
 2553         return (rv);
 2554 }
 2555 
 2556 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
 2557  * desired address is already mapped, or if we successfully grow
 2558  * the stack.  Also returns KERN_SUCCESS if addr is outside the
 2559  * stack range (this is strange, but preserves compatibility with
 2560  * the grow function in vm_machdep.c).
 2561  */
 2562 int
 2563 vm_map_growstack(struct proc *p, vm_offset_t addr)
 2564 {
 2565         vm_map_entry_t next_entry, prev_entry;
 2566         vm_map_entry_t new_entry, stack_entry;
 2567         struct vmspace *vm = p->p_vmspace;
 2568         vm_map_t map = &vm->vm_map;
 2569         vm_offset_t end;
 2570         size_t grow_amount, max_grow;
 2571         int is_procstack, rv;
 2572 
 2573         GIANT_REQUIRED;
 2574 
 2575 Retry:
 2576         vm_map_lock_read(map);
 2577 
 2578         /* If addr is already in the entry range, no need to grow.*/
 2579         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
 2580                 vm_map_unlock_read(map);
 2581                 return (KERN_SUCCESS);
 2582         }
 2583 
 2584         next_entry = prev_entry->next;
 2585         if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
 2586                 /*
 2587                  * This entry does not grow upwards. Since the address lies
 2588                  * beyond this entry, the next entry (if one exists) has to
 2589                  * be a downward growable entry. The entry list header is
 2590                  * never a growable entry, so it suffices to check the flags.
 2591                  */
 2592                 if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
 2593                         vm_map_unlock_read(map);
 2594                         return (KERN_SUCCESS);
 2595                 }
 2596                 stack_entry = next_entry;
 2597         } else {
 2598                 /*
 2599                  * This entry grows upward. If the next entry does not at
 2600                  * least grow downwards, this is the entry we need to grow.
 2601                  * otherwise we have two possible choices and we have to
 2602                  * select one.
 2603                  */
 2604                 if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
 2605                         /*
 2606                          * We have two choices; grow the entry closest to
 2607                          * the address to minimize the amount of growth.
 2608                          */
 2609                         if (addr - prev_entry->end <= next_entry->start - addr)
 2610                                 stack_entry = prev_entry;
 2611                         else
 2612                                 stack_entry = next_entry;
 2613                 } else
 2614                         stack_entry = prev_entry;
 2615         }
 2616 
 2617         if (stack_entry == next_entry) {
 2618                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
 2619                 KASSERT(addr < stack_entry->start, ("foo"));
 2620                 end = (prev_entry != &map->header) ? prev_entry->end :
 2621                     stack_entry->start - stack_entry->avail_ssize;
 2622                 grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
 2623                 max_grow = stack_entry->start - end;
 2624         } else {
 2625                 KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
 2626                 KASSERT(addr >= stack_entry->end, ("foo"));
 2627                 end = (next_entry != &map->header) ? next_entry->start :
 2628                     stack_entry->end + stack_entry->avail_ssize;
 2629                 grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
 2630                 max_grow = end - stack_entry->end;
 2631         }
 2632 
 2633         if (grow_amount > stack_entry->avail_ssize) {
 2634                 vm_map_unlock_read(map);
 2635                 return (KERN_NO_SPACE);
 2636         }
 2637 
 2638         /*
 2639          * If there is no longer enough space between the entries nogo, and
 2640          * adjust the available space.  Note: this  should only happen if the
 2641          * user has mapped into the stack area after the stack was created,
 2642          * and is probably an error.
 2643          *
 2644          * This also effectively destroys any guard page the user might have
 2645          * intended by limiting the stack size.
 2646          */
 2647         if (grow_amount > max_grow) {
 2648                 if (vm_map_lock_upgrade(map))
 2649                         goto Retry;
 2650 
 2651                 stack_entry->avail_ssize = max_grow;
 2652 
 2653                 vm_map_unlock(map);
 2654                 return (KERN_NO_SPACE);
 2655         }
 2656 
 2657         is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr) ? 1 : 0;
 2658 
 2659         /*
 2660          * If this is the main process stack, see if we're over the stack
 2661          * limit.
 2662          */
 2663         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
 2664                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
 2665                 vm_map_unlock_read(map);
 2666                 return (KERN_NO_SPACE);
 2667         }
 2668 
 2669         /* Round up the grow amount modulo SGROWSIZ */
 2670         grow_amount = roundup (grow_amount, sgrowsiz);
 2671         if (grow_amount > stack_entry->avail_ssize)
 2672                 grow_amount = stack_entry->avail_ssize;
 2673         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
 2674                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
 2675                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
 2676                               ctob(vm->vm_ssize);
 2677         }
 2678 
 2679         /* If we would blow our VMEM resource limit, no go */
 2680         if (map->size + grow_amount >
 2681             curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
 2682                 vm_map_unlock_read(map);
 2683                 return (KERN_NO_SPACE);
 2684         }
 2685 
 2686         if (vm_map_lock_upgrade(map))
 2687                 goto Retry;
 2688 
 2689         if (stack_entry == next_entry) {
 2690                 /*
 2691                  * Growing downward.
 2692                  */
 2693                 /* Get the preliminary new entry start value */
 2694                 addr = stack_entry->start - grow_amount;
 2695 
 2696                 /*
 2697                  * If this puts us into the previous entry, cut back our
 2698                  * growth to the available space. Also, see the note above.
 2699                  */
 2700                 if (addr < end) {
 2701                         stack_entry->avail_ssize = max_grow;
 2702                         addr = end;
 2703                 }
 2704 
 2705                 rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
 2706                     p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
 2707 
 2708                 /* Adjust the available stack space by the amount we grew. */
 2709                 if (rv == KERN_SUCCESS) {
 2710                         if (prev_entry != &map->header)
 2711                                 vm_map_clip_end(map, prev_entry, addr);
 2712                         new_entry = prev_entry->next;
 2713                         KASSERT(new_entry == stack_entry->prev, ("foo"));
 2714                         KASSERT(new_entry->end == stack_entry->start, ("foo"));
 2715                         KASSERT(new_entry->start == addr, ("foo"));
 2716                         grow_amount = new_entry->end - new_entry->start;
 2717                         new_entry->avail_ssize = stack_entry->avail_ssize -
 2718                             grow_amount;
 2719                         stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
 2720                         new_entry->eflags |= MAP_ENTRY_GROWS_DOWN;
 2721                 }
 2722         } else {
 2723                 /*
 2724                  * Growing upward.
 2725                  */
 2726                 addr = stack_entry->end + grow_amount;
 2727 
 2728                 /*
 2729                  * If this puts us into the next entry, cut back our growth
 2730                  * to the available space. Also, see the note above.
 2731                  */
 2732                 if (addr > end) {
 2733                         stack_entry->avail_ssize = end - stack_entry->end;
 2734                         addr = end;
 2735                 }
 2736 
 2737                 grow_amount = addr - stack_entry->end;
 2738 
 2739                 /* Grow the underlying object if applicable. */
 2740                 if (stack_entry->object.vm_object == NULL ||
 2741                     vm_object_coalesce(stack_entry->object.vm_object,
 2742                     OFF_TO_IDX(stack_entry->offset),
 2743                     (vm_size_t)(stack_entry->end - stack_entry->start),
 2744                     (vm_size_t)grow_amount)) {
 2745                         map->size += (addr - stack_entry->end);
 2746                         /* Update the current entry. */
 2747                         stack_entry->end = addr;
 2748                         stack_entry->avail_ssize -= grow_amount;
 2749                         rv = KERN_SUCCESS;
 2750 
 2751                         if (next_entry != &map->header)
 2752                                 vm_map_clip_start(map, next_entry, addr);
 2753                 } else
 2754                         rv = KERN_FAILURE;
 2755         }
 2756 
 2757         if (rv == KERN_SUCCESS && is_procstack)
 2758                 vm->vm_ssize += btoc(grow_amount);
 2759 
 2760         vm_map_unlock(map);
 2761 
 2762         /*
 2763          * Heed the MAP_WIREFUTURE flag if it was set for this process.
 2764          */
 2765         if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
 2766                 vm_map_wire(map,
 2767                     (stack_entry == next_entry) ? addr : addr - grow_amount,
 2768                     (stack_entry == next_entry) ? stack_entry->start : addr,
 2769                     (p->p_flag & P_SYSTEM)
 2770                     ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
 2771                     : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
 2772         }
 2773 
 2774         return (rv);
 2775 }
 2776 
 2777 /*
 2778  * Unshare the specified VM space for exec.  If other processes are
 2779  * mapped to it, then create a new one.  The new vmspace is null.
 2780  */
 2781 void
 2782 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
 2783 {
 2784         struct vmspace *oldvmspace = p->p_vmspace;
 2785         struct vmspace *newvmspace;
 2786 
 2787         GIANT_REQUIRED;
 2788         newvmspace = vmspace_alloc(minuser, maxuser);
 2789         bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
 2790             (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
 2791         /*
 2792          * This code is written like this for prototype purposes.  The
 2793          * goal is to avoid running down the vmspace here, but let the
 2794          * other process's that are still using the vmspace to finally
 2795          * run it down.  Even though there is little or no chance of blocking
 2796          * here, it is a good idea to keep this form for future mods.
 2797          */
 2798         p->p_vmspace = newvmspace;
 2799         pmap_pinit2(vmspace_pmap(newvmspace));
 2800         vmspace_free(oldvmspace);
 2801         if (p == curthread->td_proc)            /* XXXKSE ? */
 2802                 pmap_activate(curthread);
 2803 }
 2804 
 2805 /*
 2806  * Unshare the specified VM space for forcing COW.  This
 2807  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
 2808  */
 2809 void
 2810 vmspace_unshare(struct proc *p)
 2811 {
 2812         struct vmspace *oldvmspace = p->p_vmspace;
 2813         struct vmspace *newvmspace;
 2814 
 2815         GIANT_REQUIRED;
 2816         if (oldvmspace->vm_refcnt == 1)
 2817                 return;
 2818         newvmspace = vmspace_fork(oldvmspace);
 2819         p->p_vmspace = newvmspace;
 2820         pmap_pinit2(vmspace_pmap(newvmspace));
 2821         vmspace_free(oldvmspace);
 2822         if (p == curthread->td_proc)            /* XXXKSE ? */
 2823                 pmap_activate(curthread);
 2824 }
 2825 
 2826 /*
 2827  *      vm_map_lookup:
 2828  *
 2829  *      Finds the VM object, offset, and
 2830  *      protection for a given virtual address in the
 2831  *      specified map, assuming a page fault of the
 2832  *      type specified.
 2833  *
 2834  *      Leaves the map in question locked for read; return
 2835  *      values are guaranteed until a vm_map_lookup_done
 2836  *      call is performed.  Note that the map argument
 2837  *      is in/out; the returned map must be used in
 2838  *      the call to vm_map_lookup_done.
 2839  *
 2840  *      A handle (out_entry) is returned for use in
 2841  *      vm_map_lookup_done, to make that fast.
 2842  *
 2843  *      If a lookup is requested with "write protection"
 2844  *      specified, the map may be changed to perform virtual
 2845  *      copying operations, although the data referenced will
 2846  *      remain the same.
 2847  */
 2848 int
 2849 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
 2850               vm_offset_t vaddr,
 2851               vm_prot_t fault_typea,
 2852               vm_map_entry_t *out_entry,        /* OUT */
 2853               vm_object_t *object,              /* OUT */
 2854               vm_pindex_t *pindex,              /* OUT */
 2855               vm_prot_t *out_prot,              /* OUT */
 2856               boolean_t *wired)                 /* OUT */
 2857 {
 2858         vm_map_entry_t entry;
 2859         vm_map_t map = *var_map;
 2860         vm_prot_t prot;
 2861         vm_prot_t fault_type = fault_typea;
 2862 
 2863 RetryLookup:;
 2864         /*
 2865          * Lookup the faulting address.
 2866          */
 2867 
 2868         vm_map_lock_read(map);
 2869 #define RETURN(why) \
 2870                 { \
 2871                 vm_map_unlock_read(map); \
 2872                 return (why); \
 2873                 }
 2874 
 2875         /*
 2876          * If the map has an interesting hint, try it before calling full
 2877          * blown lookup routine.
 2878          */
 2879         entry = map->root;
 2880         *out_entry = entry;
 2881         if (entry == NULL ||
 2882             (vaddr < entry->start) || (vaddr >= entry->end)) {
 2883                 /*
 2884                  * Entry was either not a valid hint, or the vaddr was not
 2885                  * contained in the entry, so do a full lookup.
 2886                  */
 2887                 if (!vm_map_lookup_entry(map, vaddr, out_entry))
 2888                         RETURN(KERN_INVALID_ADDRESS);
 2889 
 2890                 entry = *out_entry;
 2891         }
 2892 
 2893         /*
 2894          * Handle submaps.
 2895          */
 2896         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 2897                 vm_map_t old_map = map;
 2898 
 2899                 *var_map = map = entry->object.sub_map;
 2900                 vm_map_unlock_read(old_map);
 2901                 goto RetryLookup;
 2902         }
 2903 
 2904         /*
 2905          * Check whether this task is allowed to have this page.
 2906          * Note the special case for MAP_ENTRY_COW
 2907          * pages with an override.  This is to implement a forced
 2908          * COW for debuggers.
 2909          */
 2910         if (fault_type & VM_PROT_OVERRIDE_WRITE)
 2911                 prot = entry->max_protection;
 2912         else
 2913                 prot = entry->protection;
 2914         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
 2915         if ((fault_type & prot) != fault_type) {
 2916                         RETURN(KERN_PROTECTION_FAILURE);
 2917         }
 2918         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
 2919             (entry->eflags & MAP_ENTRY_COW) &&
 2920             (fault_type & VM_PROT_WRITE) &&
 2921             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
 2922                 RETURN(KERN_PROTECTION_FAILURE);
 2923         }
 2924 
 2925         /*
 2926          * If this page is not pageable, we have to get it for all possible
 2927          * accesses.
 2928          */
 2929         *wired = (entry->wired_count != 0);
 2930         if (*wired)
 2931                 prot = fault_type = entry->protection;
 2932 
 2933         /*
 2934          * If the entry was copy-on-write, we either ...
 2935          */
 2936         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 2937                 /*
 2938                  * If we want to write the page, we may as well handle that
 2939                  * now since we've got the map locked.
 2940                  *
 2941                  * If we don't need to write the page, we just demote the
 2942                  * permissions allowed.
 2943                  */
 2944                 if (fault_type & VM_PROT_WRITE) {
 2945                         /*
 2946                          * Make a new object, and place it in the object
 2947                          * chain.  Note that no new references have appeared
 2948                          * -- one just moved from the map to the new
 2949                          * object.
 2950                          */
 2951                         if (vm_map_lock_upgrade(map))
 2952                                 goto RetryLookup;
 2953 
 2954                         vm_object_shadow(
 2955                             &entry->object.vm_object,
 2956                             &entry->offset,
 2957                             atop(entry->end - entry->start));
 2958                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 2959 
 2960                         vm_map_lock_downgrade(map);
 2961                 } else {
 2962                         /*
 2963                          * We're attempting to read a copy-on-write page --
 2964                          * don't allow writes.
 2965                          */
 2966                         prot &= ~VM_PROT_WRITE;
 2967                 }
 2968         }
 2969 
 2970         /*
 2971          * Create an object if necessary.
 2972          */
 2973         if (entry->object.vm_object == NULL &&
 2974             !map->system_map) {
 2975                 if (vm_map_lock_upgrade(map))
 2976                         goto RetryLookup;
 2977                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
 2978                     atop(entry->end - entry->start));
 2979                 entry->offset = 0;
 2980                 vm_map_lock_downgrade(map);
 2981         }
 2982 
 2983         /*
 2984          * Return the object/offset from this entry.  If the entry was
 2985          * copy-on-write or empty, it has been fixed up.
 2986          */
 2987         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
 2988         *object = entry->object.vm_object;
 2989 
 2990         /*
 2991          * Return whether this is the only map sharing this data.
 2992          */
 2993         *out_prot = prot;
 2994         return (KERN_SUCCESS);
 2995 
 2996 #undef  RETURN
 2997 }
 2998 
 2999 /*
 3000  *      vm_map_lookup_done:
 3001  *
 3002  *      Releases locks acquired by a vm_map_lookup
 3003  *      (according to the handle returned by that lookup).
 3004  */
 3005 void
 3006 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
 3007 {
 3008         /*
 3009          * Unlock the main-level map
 3010          */
 3011         vm_map_unlock_read(map);
 3012 }
 3013 
 3014 #include "opt_ddb.h"
 3015 #ifdef DDB
 3016 #include <sys/kernel.h>
 3017 
 3018 #include <ddb/ddb.h>
 3019 
 3020 /*
 3021  *      vm_map_print:   [ debug ]
 3022  */
 3023 DB_SHOW_COMMAND(map, vm_map_print)
 3024 {
 3025         static int nlines;
 3026         /* XXX convert args. */
 3027         vm_map_t map = (vm_map_t)addr;
 3028         boolean_t full = have_addr;
 3029 
 3030         vm_map_entry_t entry;
 3031 
 3032         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
 3033             (void *)map,
 3034             (void *)map->pmap, map->nentries, map->timestamp);
 3035         nlines++;
 3036 
 3037         if (!full && db_indent)
 3038                 return;
 3039 
 3040         db_indent += 2;
 3041         for (entry = map->header.next; entry != &map->header;
 3042             entry = entry->next) {
 3043                 db_iprintf("map entry %p: start=%p, end=%p\n",
 3044                     (void *)entry, (void *)entry->start, (void *)entry->end);
 3045                 nlines++;
 3046                 {
 3047                         static char *inheritance_name[4] =
 3048                         {"share", "copy", "none", "donate_copy"};
 3049 
 3050                         db_iprintf(" prot=%x/%x/%s",
 3051                             entry->protection,
 3052                             entry->max_protection,
 3053                             inheritance_name[(int)(unsigned char)entry->inheritance]);
 3054                         if (entry->wired_count != 0)
 3055                                 db_printf(", wired");
 3056                 }
 3057                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 3058                         db_printf(", share=%p, offset=0x%jx\n",
 3059                             (void *)entry->object.sub_map,
 3060                             (uintmax_t)entry->offset);
 3061                         nlines++;
 3062                         if ((entry->prev == &map->header) ||
 3063                             (entry->prev->object.sub_map !=
 3064                                 entry->object.sub_map)) {
 3065                                 db_indent += 2;
 3066                                 vm_map_print((db_expr_t)(intptr_t)
 3067                                              entry->object.sub_map,
 3068                                              full, 0, (char *)0);
 3069                                 db_indent -= 2;
 3070                         }
 3071                 } else {
 3072                         db_printf(", object=%p, offset=0x%jx",
 3073                             (void *)entry->object.vm_object,
 3074                             (uintmax_t)entry->offset);
 3075                         if (entry->eflags & MAP_ENTRY_COW)
 3076                                 db_printf(", copy (%s)",
 3077                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
 3078                         db_printf("\n");
 3079                         nlines++;
 3080 
 3081                         if ((entry->prev == &map->header) ||
 3082                             (entry->prev->object.vm_object !=
 3083                                 entry->object.vm_object)) {
 3084                                 db_indent += 2;
 3085                                 vm_object_print((db_expr_t)(intptr_t)
 3086                                                 entry->object.vm_object,
 3087                                                 full, 0, (char *)0);
 3088                                 nlines += 4;
 3089                                 db_indent -= 2;
 3090                         }
 3091                 }
 3092         }
 3093         db_indent -= 2;
 3094         if (db_indent == 0)
 3095                 nlines = 0;
 3096 }
 3097 
 3098 
 3099 DB_SHOW_COMMAND(procvm, procvm)
 3100 {
 3101         struct proc *p;
 3102 
 3103         if (have_addr) {
 3104                 p = (struct proc *) addr;
 3105         } else {
 3106                 p = curproc;
 3107         }
 3108 
 3109         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
 3110             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
 3111             (void *)vmspace_pmap(p->p_vmspace));
 3112 
 3113         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
 3114 }
 3115 
 3116 #endif /* DDB */

Cache object: 46839a13d0673060d86a1702fd17f4ce


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.