The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_map.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      from: @(#)vm_map.c      8.3 (Berkeley) 1/12/94
   37  *
   38  *
   39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   40  * All rights reserved.
   41  *
   42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   43  *
   44  * Permission to use, copy, modify and distribute this software and
   45  * its documentation is hereby granted, provided that both the copyright
   46  * notice and this permission notice appear in all copies of the
   47  * software, derivative works or modified versions, and any portions
   48  * thereof, and that both notices appear in supporting documentation.
   49  *
   50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   53  *
   54  * Carnegie Mellon requests users of this software to return to
   55  *
   56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   57  *  School of Computer Science
   58  *  Carnegie Mellon University
   59  *  Pittsburgh PA 15213-3890
   60  *
   61  * any improvements or extensions that they make and grant Carnegie the
   62  * rights to redistribute these changes.
   63  *
   64  * $FreeBSD: releng/5.0/sys/vm/vm_map.c 108169 2002-12-22 03:30:34Z dillon $
   65  */
   66 
   67 /*
   68  *      Virtual memory mapping module.
   69  */
   70 
   71 #include <sys/param.h>
   72 #include <sys/systm.h>
   73 #include <sys/ktr.h>
   74 #include <sys/lock.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/vmmeter.h>
   78 #include <sys/mman.h>
   79 #include <sys/vnode.h>
   80 #include <sys/resourcevar.h>
   81 #include <sys/sysent.h>
   82 #include <sys/stdint.h>
   83 
   84 #include <vm/vm.h>
   85 #include <vm/vm_param.h>
   86 #include <vm/pmap.h>
   87 #include <vm/vm_map.h>
   88 #include <vm/vm_page.h>
   89 #include <vm/vm_object.h>
   90 #include <vm/vm_pager.h>
   91 #include <vm/vm_kern.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/swap_pager.h>
   94 #include <vm/uma.h>
   95 
   96 /*
   97  *      Virtual memory maps provide for the mapping, protection,
   98  *      and sharing of virtual memory objects.  In addition,
   99  *      this module provides for an efficient virtual copy of
  100  *      memory from one map to another.
  101  *
  102  *      Synchronization is required prior to most operations.
  103  *
  104  *      Maps consist of an ordered doubly-linked list of simple
  105  *      entries; a single hint is used to speed up lookups.
  106  *
  107  *      Since portions of maps are specified by start/end addresses,
  108  *      which may not align with existing map entries, all
  109  *      routines merely "clip" entries to these start/end values.
  110  *      [That is, an entry is split into two, bordering at a
  111  *      start or end value.]  Note that these clippings may not
  112  *      always be necessary (as the two resulting entries are then
  113  *      not changed); however, the clipping is done for convenience.
  114  *
  115  *      As mentioned above, virtual copy operations are performed
  116  *      by copying VM object references from one map to
  117  *      another, and then marking both regions as copy-on-write.
  118  */
  119 
  120 /*
  121  *      vm_map_startup:
  122  *
  123  *      Initialize the vm_map module.  Must be called before
  124  *      any other vm_map routines.
  125  *
  126  *      Map and entry structures are allocated from the general
  127  *      purpose memory pool with some exceptions:
  128  *
  129  *      - The kernel map and kmem submap are allocated statically.
  130  *      - Kernel map entries are allocated out of a static pool.
  131  *
  132  *      These restrictions are necessary since malloc() uses the
  133  *      maps and requires map entries.
  134  */
  135 
  136 static uma_zone_t mapentzone;
  137 static uma_zone_t kmapentzone;
  138 static uma_zone_t mapzone;
  139 static uma_zone_t vmspace_zone;
  140 static struct vm_object kmapentobj;
  141 static void vmspace_zinit(void *mem, int size);
  142 static void vmspace_zfini(void *mem, int size);
  143 static void vm_map_zinit(void *mem, int size);
  144 static void vm_map_zfini(void *mem, int size);
  145 static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
  146 
  147 #ifdef INVARIANTS
  148 static void vm_map_zdtor(void *mem, int size, void *arg);
  149 static void vmspace_zdtor(void *mem, int size, void *arg);
  150 #endif
  151 
  152 void
  153 vm_map_startup(void)
  154 {
  155         mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
  156 #ifdef INVARIANTS
  157             vm_map_zdtor,
  158 #else
  159             NULL,
  160 #endif
  161             vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  162         uma_prealloc(mapzone, MAX_KMAP);
  163         kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), 
  164             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
  165             UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
  166         uma_prealloc(kmapentzone, MAX_KMAPENT);
  167         mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry), 
  168             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  169         uma_prealloc(mapentzone, MAX_MAPENT);
  170 }
  171 
  172 static void
  173 vmspace_zfini(void *mem, int size)
  174 {
  175         struct vmspace *vm;
  176 
  177         vm = (struct vmspace *)mem;
  178 
  179         vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
  180 }
  181 
  182 static void
  183 vmspace_zinit(void *mem, int size)
  184 {
  185         struct vmspace *vm;
  186 
  187         vm = (struct vmspace *)mem;
  188 
  189         vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
  190 }
  191 
  192 static void
  193 vm_map_zfini(void *mem, int size)
  194 {
  195         vm_map_t map;
  196 
  197         map = (vm_map_t)mem;
  198 
  199         lockdestroy(&map->lock);
  200 }
  201 
  202 static void
  203 vm_map_zinit(void *mem, int size)
  204 {
  205         vm_map_t map;
  206 
  207         map = (vm_map_t)mem;
  208         map->nentries = 0;
  209         map->size = 0;
  210         map->infork = 0;
  211         lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
  212 }
  213 
  214 #ifdef INVARIANTS
  215 static void
  216 vmspace_zdtor(void *mem, int size, void *arg)
  217 {
  218         struct vmspace *vm;
  219 
  220         vm = (struct vmspace *)mem;
  221 
  222         vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
  223 }
  224 static void
  225 vm_map_zdtor(void *mem, int size, void *arg)
  226 {
  227         vm_map_t map;
  228 
  229         map = (vm_map_t)mem;
  230         KASSERT(map->nentries == 0,
  231             ("map %p nentries == %d on free.", 
  232             map, map->nentries));
  233         KASSERT(map->size == 0,
  234             ("map %p size == %lu on free.",
  235             map, (unsigned long)map->size));
  236         KASSERT(map->infork == 0,
  237             ("map %p infork == %d on free.",
  238             map, map->infork));
  239 }
  240 #endif  /* INVARIANTS */
  241 
  242 /*
  243  * Allocate a vmspace structure, including a vm_map and pmap,
  244  * and initialize those structures.  The refcnt is set to 1.
  245  * The remaining fields must be initialized by the caller.
  246  */
  247 struct vmspace *
  248 vmspace_alloc(min, max)
  249         vm_offset_t min, max;
  250 {
  251         struct vmspace *vm;
  252 
  253         GIANT_REQUIRED;
  254         vm = uma_zalloc(vmspace_zone, M_WAITOK);
  255         CTR1(KTR_VM, "vmspace_alloc: %p", vm);
  256         _vm_map_init(&vm->vm_map, min, max);
  257         pmap_pinit(vmspace_pmap(vm));
  258         vm->vm_map.pmap = vmspace_pmap(vm);             /* XXX */
  259         vm->vm_refcnt = 1;
  260         vm->vm_shm = NULL;
  261         vm->vm_exitingcnt = 0;
  262         return (vm);
  263 }
  264 
  265 void
  266 vm_init2(void) 
  267 {
  268         uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
  269             (VM_MAX_KERNEL_ADDRESS - KERNBASE) / PAGE_SIZE) / 8);
  270         vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
  271 #ifdef INVARIANTS
  272             vmspace_zdtor,
  273 #else
  274             NULL,
  275 #endif
  276             vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
  277         pmap_init2();
  278         vm_object_init2();
  279 }
  280 
  281 static __inline void
  282 vmspace_dofree(struct vmspace *vm)
  283 {
  284         CTR1(KTR_VM, "vmspace_free: %p", vm);
  285         /*
  286          * Lock the map, to wait out all other references to it.
  287          * Delete all of the mappings and pages they hold, then call
  288          * the pmap module to reclaim anything left.
  289          */
  290         vm_map_lock(&vm->vm_map);
  291         (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
  292             vm->vm_map.max_offset);
  293         vm_map_unlock(&vm->vm_map);
  294 
  295         pmap_release(vmspace_pmap(vm));
  296         uma_zfree(vmspace_zone, vm);
  297 }
  298 
  299 void
  300 vmspace_free(struct vmspace *vm)
  301 {
  302         GIANT_REQUIRED;
  303 
  304         if (vm->vm_refcnt == 0)
  305                 panic("vmspace_free: attempt to free already freed vmspace");
  306 
  307         if (--vm->vm_refcnt == 0 && vm->vm_exitingcnt == 0)
  308                 vmspace_dofree(vm);
  309 }
  310 
  311 void
  312 vmspace_exitfree(struct proc *p)
  313 {
  314         struct vmspace *vm;
  315 
  316         GIANT_REQUIRED;
  317         vm = p->p_vmspace;
  318         p->p_vmspace = NULL;
  319 
  320         /*
  321          * cleanup by parent process wait()ing on exiting child.  vm_refcnt
  322          * may not be 0 (e.g. fork() and child exits without exec()ing).
  323          * exitingcnt may increment above 0 and drop back down to zero
  324          * several times while vm_refcnt is held non-zero.  vm_refcnt
  325          * may also increment above 0 and drop back down to zero several 
  326          * times while vm_exitingcnt is held non-zero.
  327          * 
  328          * The last wait on the exiting child's vmspace will clean up 
  329          * the remainder of the vmspace.
  330          */
  331         if (--vm->vm_exitingcnt == 0 && vm->vm_refcnt == 0)
  332                 vmspace_dofree(vm);
  333 }
  334 
  335 /*
  336  * vmspace_swap_count() - count the approximate swap useage in pages for a
  337  *                        vmspace.
  338  *
  339  *      Swap useage is determined by taking the proportional swap used by
  340  *      VM objects backing the VM map.  To make up for fractional losses,
  341  *      if the VM object has any swap use at all the associated map entries
  342  *      count for at least 1 swap page.
  343  */
  344 int
  345 vmspace_swap_count(struct vmspace *vmspace)
  346 {
  347         vm_map_t map = &vmspace->vm_map;
  348         vm_map_entry_t cur;
  349         int count = 0;
  350 
  351         vm_map_lock_read(map);
  352         for (cur = map->header.next; cur != &map->header; cur = cur->next) {
  353                 vm_object_t object;
  354 
  355                 if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
  356                     (object = cur->object.vm_object) != NULL &&
  357                     object->type == OBJT_SWAP
  358                 ) {
  359                         int n = (cur->end - cur->start) / PAGE_SIZE;
  360 
  361                         if (object->un_pager.swp.swp_bcount) {
  362                                 count += object->un_pager.swp.swp_bcount *
  363                                     SWAP_META_PAGES * n / object->size + 1;
  364                         }
  365                 }
  366         }
  367         vm_map_unlock_read(map);
  368         return (count);
  369 }
  370 
  371 void
  372 _vm_map_lock(vm_map_t map, const char *file, int line)
  373 {
  374         int error;
  375 
  376         if (map->system_map)
  377                 GIANT_REQUIRED;
  378         error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
  379         KASSERT(error == 0, ("%s: failed to get lock", __func__));
  380         map->timestamp++;
  381 }
  382 
  383 void
  384 _vm_map_unlock(vm_map_t map, const char *file, int line)
  385 {
  386 
  387         lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
  388 }
  389 
  390 void
  391 _vm_map_lock_read(vm_map_t map, const char *file, int line)
  392 {
  393         int error;
  394 
  395         if (map->system_map)
  396                 GIANT_REQUIRED;
  397         error = lockmgr(&map->lock, LK_EXCLUSIVE, NULL, curthread);
  398         KASSERT(error == 0, ("%s: failed to get lock", __func__));
  399 }
  400 
  401 void
  402 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
  403 {
  404 
  405         lockmgr(&map->lock, LK_RELEASE, NULL, curthread);
  406 }
  407 
  408 int
  409 _vm_map_trylock(vm_map_t map, const char *file, int line)
  410 {
  411         int error;
  412 
  413         if (map->system_map)
  414                 GIANT_REQUIRED;
  415         error = lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, NULL, curthread);
  416         return (error == 0);
  417 }
  418 
  419 int
  420 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
  421 {
  422 
  423         KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
  424                 ("%s: lock not held", __func__));
  425         map->timestamp++;
  426         return (0);
  427 }
  428 
  429 void
  430 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
  431 {
  432 
  433         KASSERT(lockstatus(&map->lock, curthread) == LK_EXCLUSIVE,
  434                 ("%s: lock not held", __func__));
  435 }
  436 
  437 /*
  438  *      vm_map_unlock_and_wait:
  439  */
  440 int
  441 vm_map_unlock_and_wait(vm_map_t map, boolean_t user_wait)
  442 {
  443         int retval;
  444 
  445         mtx_lock(&Giant);
  446         vm_map_unlock(map);
  447         retval = tsleep(&map->root, PVM, "vmmapw", 0);
  448         mtx_unlock(&Giant);
  449         return (retval);
  450 }
  451 
  452 /*
  453  *      vm_map_wakeup:
  454  */
  455 void
  456 vm_map_wakeup(vm_map_t map)
  457 {
  458 
  459         /*
  460          * Acquire and release Giant to prevent a wakeup() from being
  461          * performed (and lost) between the vm_map_unlock() and the
  462          * tsleep() in vm_map_unlock_and_wait().
  463          */
  464         mtx_lock(&Giant);
  465         mtx_unlock(&Giant);
  466         wakeup(&map->root);
  467 }
  468 
  469 long
  470 vmspace_resident_count(struct vmspace *vmspace)
  471 {
  472         return pmap_resident_count(vmspace_pmap(vmspace));
  473 }
  474 
  475 /*
  476  *      vm_map_create:
  477  *
  478  *      Creates and returns a new empty VM map with
  479  *      the given physical map structure, and having
  480  *      the given lower and upper address bounds.
  481  */
  482 vm_map_t
  483 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
  484 {
  485         vm_map_t result;
  486 
  487         result = uma_zalloc(mapzone, M_WAITOK);
  488         CTR1(KTR_VM, "vm_map_create: %p", result);
  489         _vm_map_init(result, min, max);
  490         result->pmap = pmap;
  491         return (result);
  492 }
  493 
  494 /*
  495  * Initialize an existing vm_map structure
  496  * such as that in the vmspace structure.
  497  * The pmap is set elsewhere.
  498  */
  499 static void
  500 _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
  501 {
  502 
  503         map->header.next = map->header.prev = &map->header;
  504         map->needs_wakeup = FALSE;
  505         map->system_map = 0;
  506         map->min_offset = min;
  507         map->max_offset = max;
  508         map->first_free = &map->header;
  509         map->root = NULL;
  510         map->timestamp = 0;
  511 }
  512 
  513 void
  514 vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
  515 {
  516         _vm_map_init(map, min, max);
  517         lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
  518 }
  519 
  520 /*
  521  *      vm_map_entry_dispose:   [ internal use only ]
  522  *
  523  *      Inverse of vm_map_entry_create.
  524  */
  525 static void
  526 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
  527 {
  528         uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
  529 }
  530 
  531 /*
  532  *      vm_map_entry_create:    [ internal use only ]
  533  *
  534  *      Allocates a VM map entry for insertion.
  535  *      No entry fields are filled in.
  536  */
  537 static vm_map_entry_t
  538 vm_map_entry_create(vm_map_t map)
  539 {
  540         vm_map_entry_t new_entry;
  541 
  542         if (map->system_map)
  543                 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
  544         else
  545                 new_entry = uma_zalloc(mapentzone, M_WAITOK);
  546         if (new_entry == NULL)
  547                 panic("vm_map_entry_create: kernel resources exhausted");
  548         return (new_entry);
  549 }
  550 
  551 /*
  552  *      vm_map_entry_set_behavior:
  553  *
  554  *      Set the expected access behavior, either normal, random, or
  555  *      sequential.
  556  */
  557 static __inline void
  558 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
  559 {
  560         entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
  561             (behavior & MAP_ENTRY_BEHAV_MASK);
  562 }
  563 
  564 /*
  565  *      vm_map_entry_splay:
  566  *
  567  *      Implements Sleator and Tarjan's top-down splay algorithm.  Returns
  568  *      the vm_map_entry containing the given address.  If, however, that
  569  *      address is not found in the vm_map, returns a vm_map_entry that is
  570  *      adjacent to the address, coming before or after it.
  571  */
  572 static vm_map_entry_t
  573 vm_map_entry_splay(vm_offset_t address, vm_map_entry_t root)
  574 {
  575         struct vm_map_entry dummy;
  576         vm_map_entry_t lefttreemax, righttreemin, y;
  577 
  578         if (root == NULL)
  579                 return (root);
  580         lefttreemax = righttreemin = &dummy;
  581         for (;; root = y) {
  582                 if (address < root->start) {
  583                         if ((y = root->left) == NULL)
  584                                 break;
  585                         if (address < y->start) {
  586                                 /* Rotate right. */
  587                                 root->left = y->right;
  588                                 y->right = root;
  589                                 root = y;
  590                                 if ((y = root->left) == NULL)
  591                                         break;
  592                         }
  593                         /* Link into the new root's right tree. */
  594                         righttreemin->left = root;
  595                         righttreemin = root;
  596                 } else if (address >= root->end) {
  597                         if ((y = root->right) == NULL)
  598                                 break;
  599                         if (address >= y->end) {
  600                                 /* Rotate left. */
  601                                 root->right = y->left;
  602                                 y->left = root;
  603                                 root = y;
  604                                 if ((y = root->right) == NULL)
  605                                         break;
  606                         }
  607                         /* Link into the new root's left tree. */
  608                         lefttreemax->right = root;
  609                         lefttreemax = root;
  610                 } else
  611                         break;
  612         }
  613         /* Assemble the new root. */
  614         lefttreemax->right = root->left;
  615         righttreemin->left = root->right;
  616         root->left = dummy.right;
  617         root->right = dummy.left;
  618         return (root);
  619 }
  620 
  621 /*
  622  *      vm_map_entry_{un,}link:
  623  *
  624  *      Insert/remove entries from maps.
  625  */
  626 static void
  627 vm_map_entry_link(vm_map_t map,
  628                   vm_map_entry_t after_where,
  629                   vm_map_entry_t entry)
  630 {
  631 
  632         CTR4(KTR_VM,
  633             "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
  634             map->nentries, entry, after_where);
  635         map->nentries++;
  636         entry->prev = after_where;
  637         entry->next = after_where->next;
  638         entry->next->prev = entry;
  639         after_where->next = entry;
  640 
  641         if (after_where != &map->header) {
  642                 if (after_where != map->root)
  643                         vm_map_entry_splay(after_where->start, map->root);
  644                 entry->right = after_where->right;
  645                 entry->left = after_where;
  646                 after_where->right = NULL;
  647         } else {
  648                 entry->right = map->root;
  649                 entry->left = NULL;
  650         }
  651         map->root = entry;
  652 }
  653 
  654 static void
  655 vm_map_entry_unlink(vm_map_t map,
  656                     vm_map_entry_t entry)
  657 {
  658         vm_map_entry_t next, prev, root;
  659 
  660         if (entry != map->root)
  661                 vm_map_entry_splay(entry->start, map->root);
  662         if (entry->left == NULL)
  663                 root = entry->right;
  664         else {
  665                 root = vm_map_entry_splay(entry->start, entry->left);
  666                 root->right = entry->right;
  667         }
  668         map->root = root;
  669 
  670         prev = entry->prev;
  671         next = entry->next;
  672         next->prev = prev;
  673         prev->next = next;
  674         map->nentries--;
  675         CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
  676             map->nentries, entry);
  677 }
  678 
  679 /*
  680  *      vm_map_lookup_entry:    [ internal use only ]
  681  *
  682  *      Finds the map entry containing (or
  683  *      immediately preceding) the specified address
  684  *      in the given map; the entry is returned
  685  *      in the "entry" parameter.  The boolean
  686  *      result indicates whether the address is
  687  *      actually contained in the map.
  688  */
  689 boolean_t
  690 vm_map_lookup_entry(
  691         vm_map_t map,
  692         vm_offset_t address,
  693         vm_map_entry_t *entry)  /* OUT */
  694 {
  695         vm_map_entry_t cur;
  696 
  697         cur = vm_map_entry_splay(address, map->root);
  698         if (cur == NULL)
  699                 *entry = &map->header;
  700         else {
  701                 map->root = cur;
  702 
  703                 if (address >= cur->start) {
  704                         *entry = cur;
  705                         if (cur->end > address)
  706                                 return (TRUE);
  707                 } else
  708                         *entry = cur->prev;
  709         }
  710         return (FALSE);
  711 }
  712 
  713 /*
  714  *      vm_map_insert:
  715  *
  716  *      Inserts the given whole VM object into the target
  717  *      map at the specified address range.  The object's
  718  *      size should match that of the address range.
  719  *
  720  *      Requires that the map be locked, and leaves it so.
  721  *
  722  *      If object is non-NULL, ref count must be bumped by caller
  723  *      prior to making call to account for the new entry.
  724  */
  725 int
  726 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  727               vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max,
  728               int cow)
  729 {
  730         vm_map_entry_t new_entry;
  731         vm_map_entry_t prev_entry;
  732         vm_map_entry_t temp_entry;
  733         vm_eflags_t protoeflags;
  734 
  735         /*
  736          * Check that the start and end points are not bogus.
  737          */
  738         if ((start < map->min_offset) || (end > map->max_offset) ||
  739             (start >= end))
  740                 return (KERN_INVALID_ADDRESS);
  741 
  742         /*
  743          * Find the entry prior to the proposed starting address; if it's part
  744          * of an existing entry, this range is bogus.
  745          */
  746         if (vm_map_lookup_entry(map, start, &temp_entry))
  747                 return (KERN_NO_SPACE);
  748 
  749         prev_entry = temp_entry;
  750 
  751         /*
  752          * Assert that the next entry doesn't overlap the end point.
  753          */
  754         if ((prev_entry->next != &map->header) &&
  755             (prev_entry->next->start < end))
  756                 return (KERN_NO_SPACE);
  757 
  758         protoeflags = 0;
  759 
  760         if (cow & MAP_COPY_ON_WRITE)
  761                 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
  762 
  763         if (cow & MAP_NOFAULT) {
  764                 protoeflags |= MAP_ENTRY_NOFAULT;
  765 
  766                 KASSERT(object == NULL,
  767                         ("vm_map_insert: paradoxical MAP_NOFAULT request"));
  768         }
  769         if (cow & MAP_DISABLE_SYNCER)
  770                 protoeflags |= MAP_ENTRY_NOSYNC;
  771         if (cow & MAP_DISABLE_COREDUMP)
  772                 protoeflags |= MAP_ENTRY_NOCOREDUMP;
  773 
  774         if (object) {
  775                 /*
  776                  * When object is non-NULL, it could be shared with another
  777                  * process.  We have to set or clear OBJ_ONEMAPPING 
  778                  * appropriately.
  779                  */
  780                 vm_object_lock(object);
  781                 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
  782                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
  783                 }
  784                 vm_object_unlock(object);
  785         }
  786         else if ((prev_entry != &map->header) &&
  787                  (prev_entry->eflags == protoeflags) &&
  788                  (prev_entry->end == start) &&
  789                  (prev_entry->wired_count == 0) &&
  790                  ((prev_entry->object.vm_object == NULL) ||
  791                   vm_object_coalesce(prev_entry->object.vm_object,
  792                                      OFF_TO_IDX(prev_entry->offset),
  793                                      (vm_size_t)(prev_entry->end - prev_entry->start),
  794                                      (vm_size_t)(end - prev_entry->end)))) {
  795                 /*
  796                  * We were able to extend the object.  Determine if we
  797                  * can extend the previous map entry to include the 
  798                  * new range as well.
  799                  */
  800                 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
  801                     (prev_entry->protection == prot) &&
  802                     (prev_entry->max_protection == max)) {
  803                         map->size += (end - prev_entry->end);
  804                         prev_entry->end = end;
  805                         vm_map_simplify_entry(map, prev_entry);
  806                         return (KERN_SUCCESS);
  807                 }
  808 
  809                 /*
  810                  * If we can extend the object but cannot extend the
  811                  * map entry, we have to create a new map entry.  We
  812                  * must bump the ref count on the extended object to
  813                  * account for it.  object may be NULL.
  814                  */
  815                 object = prev_entry->object.vm_object;
  816                 offset = prev_entry->offset +
  817                         (prev_entry->end - prev_entry->start);
  818                 vm_object_reference(object);
  819         }
  820 
  821         /*
  822          * NOTE: if conditionals fail, object can be NULL here.  This occurs
  823          * in things like the buffer map where we manage kva but do not manage
  824          * backing objects.
  825          */
  826 
  827         /*
  828          * Create a new entry
  829          */
  830         new_entry = vm_map_entry_create(map);
  831         new_entry->start = start;
  832         new_entry->end = end;
  833 
  834         new_entry->eflags = protoeflags;
  835         new_entry->object.vm_object = object;
  836         new_entry->offset = offset;
  837         new_entry->avail_ssize = 0;
  838 
  839         new_entry->inheritance = VM_INHERIT_DEFAULT;
  840         new_entry->protection = prot;
  841         new_entry->max_protection = max;
  842         new_entry->wired_count = 0;
  843 
  844         /*
  845          * Insert the new entry into the list
  846          */
  847         vm_map_entry_link(map, prev_entry, new_entry);
  848         map->size += new_entry->end - new_entry->start;
  849 
  850         /*
  851          * Update the free space hint
  852          */
  853         if ((map->first_free == prev_entry) &&
  854             (prev_entry->end >= new_entry->start)) {
  855                 map->first_free = new_entry;
  856         }
  857 
  858 #if 0
  859         /*
  860          * Temporarily removed to avoid MAP_STACK panic, due to
  861          * MAP_STACK being a huge hack.  Will be added back in
  862          * when MAP_STACK (and the user stack mapping) is fixed.
  863          */
  864         /*
  865          * It may be possible to simplify the entry
  866          */
  867         vm_map_simplify_entry(map, new_entry);
  868 #endif
  869 
  870         if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
  871                 mtx_lock(&Giant);
  872                 pmap_object_init_pt(map->pmap, start,
  873                                     object, OFF_TO_IDX(offset), end - start,
  874                                     cow & MAP_PREFAULT_PARTIAL);
  875                 mtx_unlock(&Giant);
  876         }
  877 
  878         return (KERN_SUCCESS);
  879 }
  880 
  881 /*
  882  * Find sufficient space for `length' bytes in the given map, starting at
  883  * `start'.  The map must be locked.  Returns 0 on success, 1 on no space.
  884  */
  885 int
  886 vm_map_findspace(
  887         vm_map_t map,
  888         vm_offset_t start,
  889         vm_size_t length,
  890         vm_offset_t *addr)
  891 {
  892         vm_map_entry_t entry, next;
  893         vm_offset_t end;
  894 
  895         if (start < map->min_offset)
  896                 start = map->min_offset;
  897         if (start > map->max_offset)
  898                 return (1);
  899 
  900         /*
  901          * Look for the first possible address; if there's already something
  902          * at this address, we have to start after it.
  903          */
  904         if (start == map->min_offset) {
  905                 if ((entry = map->first_free) != &map->header)
  906                         start = entry->end;
  907         } else {
  908                 vm_map_entry_t tmp;
  909 
  910                 if (vm_map_lookup_entry(map, start, &tmp))
  911                         start = tmp->end;
  912                 entry = tmp;
  913         }
  914 
  915         /*
  916          * Look through the rest of the map, trying to fit a new region in the
  917          * gap between existing regions, or after the very last region.
  918          */
  919         for (;; start = (entry = next)->end) {
  920                 /*
  921                  * Find the end of the proposed new region.  Be sure we didn't
  922                  * go beyond the end of the map, or wrap around the address;
  923                  * if so, we lose.  Otherwise, if this is the last entry, or
  924                  * if the proposed new region fits before the next entry, we
  925                  * win.
  926                  */
  927                 end = start + length;
  928                 if (end > map->max_offset || end < start)
  929                         return (1);
  930                 next = entry->next;
  931                 if (next == &map->header || next->start >= end)
  932                         break;
  933         }
  934         *addr = start;
  935         if (map == kernel_map) {
  936                 vm_offset_t ksize;
  937                 if ((ksize = round_page(start + length)) > kernel_vm_end) {
  938                         mtx_lock(&Giant);
  939                         pmap_growkernel(ksize);
  940                         mtx_unlock(&Giant);
  941                 }
  942         }
  943         return (0);
  944 }
  945 
  946 /*
  947  *      vm_map_find finds an unallocated region in the target address
  948  *      map with the given length.  The search is defined to be
  949  *      first-fit from the specified address; the region found is
  950  *      returned in the same parameter.
  951  *
  952  *      If object is non-NULL, ref count must be bumped by caller
  953  *      prior to making call to account for the new entry.
  954  */
  955 int
  956 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
  957             vm_offset_t *addr,  /* IN/OUT */
  958             vm_size_t length, boolean_t find_space, vm_prot_t prot,
  959             vm_prot_t max, int cow)
  960 {
  961         vm_offset_t start;
  962         int result, s = 0;
  963 
  964         start = *addr;
  965 
  966         if (map == kmem_map)
  967                 s = splvm();
  968 
  969         vm_map_lock(map);
  970         if (find_space) {
  971                 if (vm_map_findspace(map, start, length, addr)) {
  972                         vm_map_unlock(map);
  973                         if (map == kmem_map)
  974                                 splx(s);
  975                         return (KERN_NO_SPACE);
  976                 }
  977                 start = *addr;
  978         }
  979         result = vm_map_insert(map, object, offset,
  980                 start, start + length, prot, max, cow);
  981         vm_map_unlock(map);
  982 
  983         if (map == kmem_map)
  984                 splx(s);
  985 
  986         return (result);
  987 }
  988 
  989 /*
  990  *      vm_map_simplify_entry:
  991  *
  992  *      Simplify the given map entry by merging with either neighbor.  This
  993  *      routine also has the ability to merge with both neighbors.
  994  *
  995  *      The map must be locked.
  996  *
  997  *      This routine guarentees that the passed entry remains valid (though
  998  *      possibly extended).  When merging, this routine may delete one or
  999  *      both neighbors.
 1000  */
 1001 void
 1002 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
 1003 {
 1004         vm_map_entry_t next, prev;
 1005         vm_size_t prevsize, esize;
 1006 
 1007         if (entry->eflags & (MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP))
 1008                 return;
 1009 
 1010         prev = entry->prev;
 1011         if (prev != &map->header) {
 1012                 prevsize = prev->end - prev->start;
 1013                 if ( (prev->end == entry->start) &&
 1014                      (prev->object.vm_object == entry->object.vm_object) &&
 1015                      (!prev->object.vm_object ||
 1016                         (prev->offset + prevsize == entry->offset)) &&
 1017                      (prev->eflags == entry->eflags) &&
 1018                      (prev->protection == entry->protection) &&
 1019                      (prev->max_protection == entry->max_protection) &&
 1020                      (prev->inheritance == entry->inheritance) &&
 1021                      (prev->wired_count == entry->wired_count)) {
 1022                         if (map->first_free == prev)
 1023                                 map->first_free = entry;
 1024                         vm_map_entry_unlink(map, prev);
 1025                         entry->start = prev->start;
 1026                         entry->offset = prev->offset;
 1027                         if (prev->object.vm_object)
 1028                                 vm_object_deallocate(prev->object.vm_object);
 1029                         vm_map_entry_dispose(map, prev);
 1030                 }
 1031         }
 1032 
 1033         next = entry->next;
 1034         if (next != &map->header) {
 1035                 esize = entry->end - entry->start;
 1036                 if ((entry->end == next->start) &&
 1037                     (next->object.vm_object == entry->object.vm_object) &&
 1038                      (!entry->object.vm_object ||
 1039                         (entry->offset + esize == next->offset)) &&
 1040                     (next->eflags == entry->eflags) &&
 1041                     (next->protection == entry->protection) &&
 1042                     (next->max_protection == entry->max_protection) &&
 1043                     (next->inheritance == entry->inheritance) &&
 1044                     (next->wired_count == entry->wired_count)) {
 1045                         if (map->first_free == next)
 1046                                 map->first_free = entry;
 1047                         vm_map_entry_unlink(map, next);
 1048                         entry->end = next->end;
 1049                         if (next->object.vm_object)
 1050                                 vm_object_deallocate(next->object.vm_object);
 1051                         vm_map_entry_dispose(map, next);
 1052                 }
 1053         }
 1054 }
 1055 /*
 1056  *      vm_map_clip_start:      [ internal use only ]
 1057  *
 1058  *      Asserts that the given entry begins at or after
 1059  *      the specified address; if necessary,
 1060  *      it splits the entry into two.
 1061  */
 1062 #define vm_map_clip_start(map, entry, startaddr) \
 1063 { \
 1064         if (startaddr > entry->start) \
 1065                 _vm_map_clip_start(map, entry, startaddr); \
 1066 }
 1067 
 1068 /*
 1069  *      This routine is called only when it is known that
 1070  *      the entry must be split.
 1071  */
 1072 static void
 1073 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
 1074 {
 1075         vm_map_entry_t new_entry;
 1076 
 1077         /*
 1078          * Split off the front portion -- note that we must insert the new
 1079          * entry BEFORE this one, so that this entry has the specified
 1080          * starting address.
 1081          */
 1082         vm_map_simplify_entry(map, entry);
 1083 
 1084         /*
 1085          * If there is no object backing this entry, we might as well create
 1086          * one now.  If we defer it, an object can get created after the map
 1087          * is clipped, and individual objects will be created for the split-up
 1088          * map.  This is a bit of a hack, but is also about the best place to
 1089          * put this improvement.
 1090          */
 1091         if (entry->object.vm_object == NULL && !map->system_map) {
 1092                 vm_object_t object;
 1093                 object = vm_object_allocate(OBJT_DEFAULT,
 1094                                 atop(entry->end - entry->start));
 1095                 entry->object.vm_object = object;
 1096                 entry->offset = 0;
 1097         }
 1098 
 1099         new_entry = vm_map_entry_create(map);
 1100         *new_entry = *entry;
 1101 
 1102         new_entry->end = start;
 1103         entry->offset += (start - entry->start);
 1104         entry->start = start;
 1105 
 1106         vm_map_entry_link(map, entry->prev, new_entry);
 1107 
 1108         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1109                 vm_object_reference(new_entry->object.vm_object);
 1110         }
 1111 }
 1112 
 1113 /*
 1114  *      vm_map_clip_end:        [ internal use only ]
 1115  *
 1116  *      Asserts that the given entry ends at or before
 1117  *      the specified address; if necessary,
 1118  *      it splits the entry into two.
 1119  */
 1120 #define vm_map_clip_end(map, entry, endaddr) \
 1121 { \
 1122         if ((endaddr) < (entry->end)) \
 1123                 _vm_map_clip_end((map), (entry), (endaddr)); \
 1124 }
 1125 
 1126 /*
 1127  *      This routine is called only when it is known that
 1128  *      the entry must be split.
 1129  */
 1130 static void
 1131 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
 1132 {
 1133         vm_map_entry_t new_entry;
 1134 
 1135         /*
 1136          * If there is no object backing this entry, we might as well create
 1137          * one now.  If we defer it, an object can get created after the map
 1138          * is clipped, and individual objects will be created for the split-up
 1139          * map.  This is a bit of a hack, but is also about the best place to
 1140          * put this improvement.
 1141          */
 1142         if (entry->object.vm_object == NULL && !map->system_map) {
 1143                 vm_object_t object;
 1144                 object = vm_object_allocate(OBJT_DEFAULT,
 1145                                 atop(entry->end - entry->start));
 1146                 entry->object.vm_object = object;
 1147                 entry->offset = 0;
 1148         }
 1149 
 1150         /*
 1151          * Create a new entry and insert it AFTER the specified entry
 1152          */
 1153         new_entry = vm_map_entry_create(map);
 1154         *new_entry = *entry;
 1155 
 1156         new_entry->start = entry->end = end;
 1157         new_entry->offset += (end - entry->start);
 1158 
 1159         vm_map_entry_link(map, entry, new_entry);
 1160 
 1161         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1162                 vm_object_reference(new_entry->object.vm_object);
 1163         }
 1164 }
 1165 
 1166 /*
 1167  *      VM_MAP_RANGE_CHECK:     [ internal use only ]
 1168  *
 1169  *      Asserts that the starting and ending region
 1170  *      addresses fall within the valid range of the map.
 1171  */
 1172 #define VM_MAP_RANGE_CHECK(map, start, end)             \
 1173                 {                                       \
 1174                 if (start < vm_map_min(map))            \
 1175                         start = vm_map_min(map);        \
 1176                 if (end > vm_map_max(map))              \
 1177                         end = vm_map_max(map);          \
 1178                 if (start > end)                        \
 1179                         start = end;                    \
 1180                 }
 1181 
 1182 /*
 1183  *      vm_map_submap:          [ kernel use only ]
 1184  *
 1185  *      Mark the given range as handled by a subordinate map.
 1186  *
 1187  *      This range must have been created with vm_map_find,
 1188  *      and no other operations may have been performed on this
 1189  *      range prior to calling vm_map_submap.
 1190  *
 1191  *      Only a limited number of operations can be performed
 1192  *      within this rage after calling vm_map_submap:
 1193  *              vm_fault
 1194  *      [Don't try vm_map_copy!]
 1195  *
 1196  *      To remove a submapping, one must first remove the
 1197  *      range from the superior map, and then destroy the
 1198  *      submap (if desired).  [Better yet, don't try it.]
 1199  */
 1200 int
 1201 vm_map_submap(
 1202         vm_map_t map,
 1203         vm_offset_t start,
 1204         vm_offset_t end,
 1205         vm_map_t submap)
 1206 {
 1207         vm_map_entry_t entry;
 1208         int result = KERN_INVALID_ARGUMENT;
 1209 
 1210         vm_map_lock(map);
 1211 
 1212         VM_MAP_RANGE_CHECK(map, start, end);
 1213 
 1214         if (vm_map_lookup_entry(map, start, &entry)) {
 1215                 vm_map_clip_start(map, entry, start);
 1216         } else
 1217                 entry = entry->next;
 1218 
 1219         vm_map_clip_end(map, entry, end);
 1220 
 1221         if ((entry->start == start) && (entry->end == end) &&
 1222             ((entry->eflags & MAP_ENTRY_COW) == 0) &&
 1223             (entry->object.vm_object == NULL)) {
 1224                 entry->object.sub_map = submap;
 1225                 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
 1226                 result = KERN_SUCCESS;
 1227         }
 1228         vm_map_unlock(map);
 1229 
 1230         return (result);
 1231 }
 1232 
 1233 /*
 1234  *      vm_map_protect:
 1235  *
 1236  *      Sets the protection of the specified address
 1237  *      region in the target map.  If "set_max" is
 1238  *      specified, the maximum protection is to be set;
 1239  *      otherwise, only the current protection is affected.
 1240  */
 1241 int
 1242 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1243                vm_prot_t new_prot, boolean_t set_max)
 1244 {
 1245         vm_map_entry_t current;
 1246         vm_map_entry_t entry;
 1247 
 1248         vm_map_lock(map);
 1249 
 1250         VM_MAP_RANGE_CHECK(map, start, end);
 1251 
 1252         if (vm_map_lookup_entry(map, start, &entry)) {
 1253                 vm_map_clip_start(map, entry, start);
 1254         } else {
 1255                 entry = entry->next;
 1256         }
 1257 
 1258         /*
 1259          * Make a first pass to check for protection violations.
 1260          */
 1261         current = entry;
 1262         while ((current != &map->header) && (current->start < end)) {
 1263                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 1264                         vm_map_unlock(map);
 1265                         return (KERN_INVALID_ARGUMENT);
 1266                 }
 1267                 if ((new_prot & current->max_protection) != new_prot) {
 1268                         vm_map_unlock(map);
 1269                         return (KERN_PROTECTION_FAILURE);
 1270                 }
 1271                 current = current->next;
 1272         }
 1273 
 1274         /*
 1275          * Go back and fix up protections. [Note that clipping is not
 1276          * necessary the second time.]
 1277          */
 1278         current = entry;
 1279         while ((current != &map->header) && (current->start < end)) {
 1280                 vm_prot_t old_prot;
 1281 
 1282                 vm_map_clip_end(map, current, end);
 1283 
 1284                 old_prot = current->protection;
 1285                 if (set_max)
 1286                         current->protection =
 1287                             (current->max_protection = new_prot) &
 1288                             old_prot;
 1289                 else
 1290                         current->protection = new_prot;
 1291 
 1292                 /*
 1293                  * Update physical map if necessary. Worry about copy-on-write
 1294                  * here -- CHECK THIS XXX
 1295                  */
 1296                 if (current->protection != old_prot) {
 1297                         mtx_lock(&Giant);
 1298                         vm_page_lock_queues();
 1299 #define MASK(entry)     (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
 1300                                                         VM_PROT_ALL)
 1301                         pmap_protect(map->pmap, current->start,
 1302                             current->end,
 1303                             current->protection & MASK(current));
 1304 #undef  MASK
 1305                         vm_page_unlock_queues();
 1306                         mtx_unlock(&Giant);
 1307                 }
 1308                 vm_map_simplify_entry(map, current);
 1309                 current = current->next;
 1310         }
 1311         vm_map_unlock(map);
 1312         return (KERN_SUCCESS);
 1313 }
 1314 
 1315 /*
 1316  *      vm_map_madvise:
 1317  *
 1318  *      This routine traverses a processes map handling the madvise
 1319  *      system call.  Advisories are classified as either those effecting
 1320  *      the vm_map_entry structure, or those effecting the underlying 
 1321  *      objects.
 1322  */
 1323 int
 1324 vm_map_madvise(
 1325         vm_map_t map,
 1326         vm_offset_t start, 
 1327         vm_offset_t end,
 1328         int behav)
 1329 {
 1330         vm_map_entry_t current, entry;
 1331         int modify_map = 0;
 1332 
 1333         /*
 1334          * Some madvise calls directly modify the vm_map_entry, in which case
 1335          * we need to use an exclusive lock on the map and we need to perform 
 1336          * various clipping operations.  Otherwise we only need a read-lock
 1337          * on the map.
 1338          */
 1339         switch(behav) {
 1340         case MADV_NORMAL:
 1341         case MADV_SEQUENTIAL:
 1342         case MADV_RANDOM:
 1343         case MADV_NOSYNC:
 1344         case MADV_AUTOSYNC:
 1345         case MADV_NOCORE:
 1346         case MADV_CORE:
 1347                 modify_map = 1;
 1348                 vm_map_lock(map);
 1349                 break;
 1350         case MADV_WILLNEED:
 1351         case MADV_DONTNEED:
 1352         case MADV_FREE:
 1353                 vm_map_lock_read(map);
 1354                 break;
 1355         default:
 1356                 return (KERN_INVALID_ARGUMENT);
 1357         }
 1358 
 1359         /*
 1360          * Locate starting entry and clip if necessary.
 1361          */
 1362         VM_MAP_RANGE_CHECK(map, start, end);
 1363 
 1364         if (vm_map_lookup_entry(map, start, &entry)) {
 1365                 if (modify_map)
 1366                         vm_map_clip_start(map, entry, start);
 1367         } else {
 1368                 entry = entry->next;
 1369         }
 1370 
 1371         if (modify_map) {
 1372                 /*
 1373                  * madvise behaviors that are implemented in the vm_map_entry.
 1374                  *
 1375                  * We clip the vm_map_entry so that behavioral changes are
 1376                  * limited to the specified address range.
 1377                  */
 1378                 for (current = entry;
 1379                      (current != &map->header) && (current->start < end);
 1380                      current = current->next
 1381                 ) {
 1382                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 1383                                 continue;
 1384 
 1385                         vm_map_clip_end(map, current, end);
 1386 
 1387                         switch (behav) {
 1388                         case MADV_NORMAL:
 1389                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
 1390                                 break;
 1391                         case MADV_SEQUENTIAL:
 1392                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
 1393                                 break;
 1394                         case MADV_RANDOM:
 1395                                 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
 1396                                 break;
 1397                         case MADV_NOSYNC:
 1398                                 current->eflags |= MAP_ENTRY_NOSYNC;
 1399                                 break;
 1400                         case MADV_AUTOSYNC:
 1401                                 current->eflags &= ~MAP_ENTRY_NOSYNC;
 1402                                 break;
 1403                         case MADV_NOCORE:
 1404                                 current->eflags |= MAP_ENTRY_NOCOREDUMP;
 1405                                 break;
 1406                         case MADV_CORE:
 1407                                 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
 1408                                 break;
 1409                         default:
 1410                                 break;
 1411                         }
 1412                         vm_map_simplify_entry(map, current);
 1413                 }
 1414                 vm_map_unlock(map);
 1415         } else {
 1416                 vm_pindex_t pindex;
 1417                 int count;
 1418 
 1419                 /*
 1420                  * madvise behaviors that are implemented in the underlying
 1421                  * vm_object.
 1422                  *
 1423                  * Since we don't clip the vm_map_entry, we have to clip
 1424                  * the vm_object pindex and count.
 1425                  */
 1426                 for (current = entry;
 1427                      (current != &map->header) && (current->start < end);
 1428                      current = current->next
 1429                 ) {
 1430                         vm_offset_t useStart;
 1431 
 1432                         if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 1433                                 continue;
 1434 
 1435                         pindex = OFF_TO_IDX(current->offset);
 1436                         count = atop(current->end - current->start);
 1437                         useStart = current->start;
 1438 
 1439                         if (current->start < start) {
 1440                                 pindex += atop(start - current->start);
 1441                                 count -= atop(start - current->start);
 1442                                 useStart = start;
 1443                         }
 1444                         if (current->end > end)
 1445                                 count -= atop(current->end - end);
 1446 
 1447                         if (count <= 0)
 1448                                 continue;
 1449 
 1450                         vm_object_madvise(current->object.vm_object,
 1451                                           pindex, count, behav);
 1452                         if (behav == MADV_WILLNEED) {
 1453                                 mtx_lock(&Giant);
 1454                                 pmap_object_init_pt(
 1455                                     map->pmap, 
 1456                                     useStart,
 1457                                     current->object.vm_object,
 1458                                     pindex, 
 1459                                     (count << PAGE_SHIFT),
 1460                                     MAP_PREFAULT_MADVISE
 1461                                 );
 1462                                 mtx_unlock(&Giant);
 1463                         }
 1464                 }
 1465                 vm_map_unlock_read(map);
 1466         }
 1467         return (0);
 1468 }       
 1469 
 1470 
 1471 /*
 1472  *      vm_map_inherit:
 1473  *
 1474  *      Sets the inheritance of the specified address
 1475  *      range in the target map.  Inheritance
 1476  *      affects how the map will be shared with
 1477  *      child maps at the time of vm_map_fork.
 1478  */
 1479 int
 1480 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1481                vm_inherit_t new_inheritance)
 1482 {
 1483         vm_map_entry_t entry;
 1484         vm_map_entry_t temp_entry;
 1485 
 1486         switch (new_inheritance) {
 1487         case VM_INHERIT_NONE:
 1488         case VM_INHERIT_COPY:
 1489         case VM_INHERIT_SHARE:
 1490                 break;
 1491         default:
 1492                 return (KERN_INVALID_ARGUMENT);
 1493         }
 1494         vm_map_lock(map);
 1495         VM_MAP_RANGE_CHECK(map, start, end);
 1496         if (vm_map_lookup_entry(map, start, &temp_entry)) {
 1497                 entry = temp_entry;
 1498                 vm_map_clip_start(map, entry, start);
 1499         } else
 1500                 entry = temp_entry->next;
 1501         while ((entry != &map->header) && (entry->start < end)) {
 1502                 vm_map_clip_end(map, entry, end);
 1503                 entry->inheritance = new_inheritance;
 1504                 vm_map_simplify_entry(map, entry);
 1505                 entry = entry->next;
 1506         }
 1507         vm_map_unlock(map);
 1508         return (KERN_SUCCESS);
 1509 }
 1510 
 1511 /*
 1512  *      vm_map_unwire:
 1513  *
 1514  *      Implements both kernel and user unwiring.
 1515  */
 1516 int
 1517 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1518         boolean_t user_unwire)
 1519 {
 1520         vm_map_entry_t entry, first_entry, tmp_entry;
 1521         vm_offset_t saved_start;
 1522         unsigned int last_timestamp;
 1523         int rv;
 1524         boolean_t need_wakeup, result;
 1525 
 1526         vm_map_lock(map);
 1527         VM_MAP_RANGE_CHECK(map, start, end);
 1528         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 1529                 vm_map_unlock(map);
 1530                 return (KERN_INVALID_ADDRESS);
 1531         }
 1532         last_timestamp = map->timestamp;
 1533         entry = first_entry;
 1534         while (entry != &map->header && entry->start < end) {
 1535                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 1536                         /*
 1537                          * We have not yet clipped the entry.
 1538                          */
 1539                         saved_start = (start >= entry->start) ? start :
 1540                             entry->start;
 1541                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 1542                         if (vm_map_unlock_and_wait(map, user_unwire)) {
 1543                                 /*
 1544                                  * Allow interruption of user unwiring?
 1545                                  */
 1546                         }
 1547                         vm_map_lock(map);
 1548                         if (last_timestamp+1 != map->timestamp) {
 1549                                 /*
 1550                                  * Look again for the entry because the map was
 1551                                  * modified while it was unlocked.
 1552                                  * Specifically, the entry may have been
 1553                                  * clipped, merged, or deleted.
 1554                                  */
 1555                                 if (!vm_map_lookup_entry(map, saved_start,
 1556                                     &tmp_entry)) {
 1557                                         if (saved_start == start) {
 1558                                                 /*
 1559                                                  * First_entry has been deleted.
 1560                                                  */
 1561                                                 vm_map_unlock(map);
 1562                                                 return (KERN_INVALID_ADDRESS);
 1563                                         }
 1564                                         end = saved_start;
 1565                                         rv = KERN_INVALID_ADDRESS;
 1566                                         goto done;
 1567                                 }
 1568                                 if (entry == first_entry)
 1569                                         first_entry = tmp_entry;
 1570                                 else
 1571                                         first_entry = NULL;
 1572                                 entry = tmp_entry;
 1573                         }
 1574                         last_timestamp = map->timestamp;
 1575                         continue;
 1576                 }
 1577                 vm_map_clip_start(map, entry, start);
 1578                 vm_map_clip_end(map, entry, end);
 1579                 /*
 1580                  * Mark the entry in case the map lock is released.  (See
 1581                  * above.)
 1582                  */
 1583                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 1584                 /*
 1585                  * Check the map for holes in the specified region.
 1586                  */
 1587                 if (entry->end < end && (entry->next == &map->header ||
 1588                     entry->next->start > entry->end)) {
 1589                         end = entry->end;
 1590                         rv = KERN_INVALID_ADDRESS;
 1591                         goto done;
 1592                 }
 1593                 /*
 1594                  * Require that the entry is wired.
 1595                  */
 1596                 if (entry->wired_count == 0 || (user_unwire &&
 1597                     (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)) {
 1598                         end = entry->end;
 1599                         rv = KERN_INVALID_ARGUMENT;
 1600                         goto done;
 1601                 }
 1602                 entry = entry->next;
 1603         }
 1604         rv = KERN_SUCCESS;
 1605 done:
 1606         need_wakeup = FALSE;
 1607         if (first_entry == NULL) {
 1608                 result = vm_map_lookup_entry(map, start, &first_entry);
 1609                 KASSERT(result, ("vm_map_unwire: lookup failed"));
 1610         }
 1611         entry = first_entry;
 1612         while (entry != &map->header && entry->start < end) {
 1613                 if (rv == KERN_SUCCESS) {
 1614                         if (user_unwire)
 1615                                 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 1616                         entry->wired_count--;
 1617                         if (entry->wired_count == 0) {
 1618                                 /*
 1619                                  * Retain the map lock.
 1620                                  */
 1621                                 vm_fault_unwire(map, entry->start, entry->end);
 1622                         }
 1623                 }
 1624                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
 1625                         ("vm_map_unwire: in-transition flag missing"));
 1626                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
 1627                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 1628                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 1629                         need_wakeup = TRUE;
 1630                 }
 1631                 vm_map_simplify_entry(map, entry);
 1632                 entry = entry->next;
 1633         }
 1634         vm_map_unlock(map);
 1635         if (need_wakeup)
 1636                 vm_map_wakeup(map);
 1637         return (rv);
 1638 }
 1639 
 1640 /*
 1641  *      vm_map_wire:
 1642  *
 1643  *      Implements both kernel and user wiring.
 1644  */
 1645 int
 1646 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1647         boolean_t user_wire)
 1648 {
 1649         vm_map_entry_t entry, first_entry, tmp_entry;
 1650         vm_offset_t saved_end, saved_start;
 1651         unsigned int last_timestamp;
 1652         int rv;
 1653         boolean_t need_wakeup, result;
 1654 
 1655         vm_map_lock(map);
 1656         VM_MAP_RANGE_CHECK(map, start, end);
 1657         if (!vm_map_lookup_entry(map, start, &first_entry)) {
 1658                 vm_map_unlock(map);
 1659                 return (KERN_INVALID_ADDRESS);
 1660         }
 1661         last_timestamp = map->timestamp;
 1662         entry = first_entry;
 1663         while (entry != &map->header && entry->start < end) {
 1664                 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 1665                         /*
 1666                          * We have not yet clipped the entry.
 1667                          */
 1668                         saved_start = (start >= entry->start) ? start :
 1669                             entry->start;
 1670                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 1671                         if (vm_map_unlock_and_wait(map, user_wire)) {
 1672                                 /*
 1673                                  * Allow interruption of user wiring?
 1674                                  */
 1675                         }
 1676                         vm_map_lock(map);
 1677                         if (last_timestamp + 1 != map->timestamp) {
 1678                                 /*
 1679                                  * Look again for the entry because the map was
 1680                                  * modified while it was unlocked.
 1681                                  * Specifically, the entry may have been
 1682                                  * clipped, merged, or deleted.
 1683                                  */
 1684                                 if (!vm_map_lookup_entry(map, saved_start,
 1685                                     &tmp_entry)) {
 1686                                         if (saved_start == start) {
 1687                                                 /*
 1688                                                  * first_entry has been deleted.
 1689                                                  */
 1690                                                 vm_map_unlock(map);
 1691                                                 return (KERN_INVALID_ADDRESS);
 1692                                         }
 1693                                         end = saved_start;
 1694                                         rv = KERN_INVALID_ADDRESS;
 1695                                         goto done;
 1696                                 }
 1697                                 if (entry == first_entry)
 1698                                         first_entry = tmp_entry;
 1699                                 else
 1700                                         first_entry = NULL;
 1701                                 entry = tmp_entry;
 1702                         }
 1703                         last_timestamp = map->timestamp;
 1704                         continue;
 1705                 }
 1706                 vm_map_clip_start(map, entry, start);
 1707                 vm_map_clip_end(map, entry, end);
 1708                 /*
 1709                  * Mark the entry in case the map lock is released.  (See
 1710                  * above.)
 1711                  */
 1712                 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 1713                 /*
 1714                  *
 1715                  */
 1716                 if (entry->wired_count == 0) {
 1717                         entry->wired_count++;
 1718                         saved_start = entry->start;
 1719                         saved_end = entry->end;
 1720                         /*
 1721                          * Release the map lock, relying on the in-transition
 1722                          * mark.
 1723                          */
 1724                         vm_map_unlock(map);
 1725                         rv = vm_fault_wire(map, saved_start, saved_end,
 1726                             user_wire);
 1727                         vm_map_lock(map);
 1728                         if (last_timestamp + 1 != map->timestamp) {
 1729                                 /*
 1730                                  * Look again for the entry because the map was
 1731                                  * modified while it was unlocked.  The entry
 1732                                  * may have been clipped, but NOT merged or
 1733                                  * deleted.
 1734                                  */
 1735                                 result = vm_map_lookup_entry(map, saved_start,
 1736                                     &tmp_entry);
 1737                                 KASSERT(result, ("vm_map_wire: lookup failed"));
 1738                                 if (entry == first_entry)
 1739                                         first_entry = tmp_entry;
 1740                                 else
 1741                                         first_entry = NULL;
 1742                                 entry = tmp_entry;
 1743                                 while (entry->end < saved_end) {
 1744                                         if (rv != KERN_SUCCESS) {
 1745                                                 KASSERT(entry->wired_count == 1,
 1746                                                     ("vm_map_wire: bad count"));
 1747                                                 entry->wired_count = -1;
 1748                                         }
 1749                                         entry = entry->next;
 1750                                 }
 1751                         }
 1752                         last_timestamp = map->timestamp;
 1753                         if (rv != KERN_SUCCESS) {
 1754                                 KASSERT(entry->wired_count == 1,
 1755                                     ("vm_map_wire: bad count"));
 1756                                 /*
 1757                                  * Assign an out-of-range value to represent
 1758                                  * the failure to wire this entry.
 1759                                  */
 1760                                 entry->wired_count = -1;
 1761                                 end = entry->end;
 1762                                 goto done;
 1763                         }
 1764                 } else if (!user_wire ||
 1765                            (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
 1766                         entry->wired_count++;
 1767                 }
 1768                 /*
 1769                  * Check the map for holes in the specified region.
 1770                  */
 1771                 if (entry->end < end && (entry->next == &map->header ||
 1772                     entry->next->start > entry->end)) {
 1773                         end = entry->end;
 1774                         rv = KERN_INVALID_ADDRESS;
 1775                         goto done;
 1776                 }
 1777                 entry = entry->next;
 1778         }
 1779         rv = KERN_SUCCESS;
 1780 done:
 1781         need_wakeup = FALSE;
 1782         if (first_entry == NULL) {
 1783                 result = vm_map_lookup_entry(map, start, &first_entry);
 1784                 KASSERT(result, ("vm_map_wire: lookup failed"));
 1785         }
 1786         entry = first_entry;
 1787         while (entry != &map->header && entry->start < end) {
 1788                 if (rv == KERN_SUCCESS) {
 1789                         if (user_wire)
 1790                                 entry->eflags |= MAP_ENTRY_USER_WIRED;
 1791                 } else if (entry->wired_count == -1) {
 1792                         /*
 1793                          * Wiring failed on this entry.  Thus, unwiring is
 1794                          * unnecessary.
 1795                          */
 1796                         entry->wired_count = 0;
 1797                 } else {
 1798                         if (!user_wire ||
 1799                             (entry->eflags & MAP_ENTRY_USER_WIRED) == 0)
 1800                                 entry->wired_count--;
 1801                         if (entry->wired_count == 0) {
 1802                                 /*
 1803                                  * Retain the map lock.
 1804                                  */
 1805                                 vm_fault_unwire(map, entry->start, entry->end);
 1806                         }
 1807                 }
 1808                 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
 1809                         ("vm_map_wire: in-transition flag missing"));
 1810                 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
 1811                 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
 1812                         entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 1813                         need_wakeup = TRUE;
 1814                 }
 1815                 vm_map_simplify_entry(map, entry);
 1816                 entry = entry->next;
 1817         }
 1818         vm_map_unlock(map);
 1819         if (need_wakeup)
 1820                 vm_map_wakeup(map);
 1821         return (rv);
 1822 }
 1823 
 1824 /*
 1825  * vm_map_clean
 1826  *
 1827  * Push any dirty cached pages in the address range to their pager.
 1828  * If syncio is TRUE, dirty pages are written synchronously.
 1829  * If invalidate is TRUE, any cached pages are freed as well.
 1830  *
 1831  * Returns an error if any part of the specified range is not mapped.
 1832  */
 1833 int
 1834 vm_map_clean(
 1835         vm_map_t map,
 1836         vm_offset_t start,
 1837         vm_offset_t end,
 1838         boolean_t syncio,
 1839         boolean_t invalidate)
 1840 {
 1841         vm_map_entry_t current;
 1842         vm_map_entry_t entry;
 1843         vm_size_t size;
 1844         vm_object_t object;
 1845         vm_ooffset_t offset;
 1846 
 1847         GIANT_REQUIRED;
 1848 
 1849         vm_map_lock_read(map);
 1850         VM_MAP_RANGE_CHECK(map, start, end);
 1851         if (!vm_map_lookup_entry(map, start, &entry)) {
 1852                 vm_map_unlock_read(map);
 1853                 return (KERN_INVALID_ADDRESS);
 1854         }
 1855         /*
 1856          * Make a first pass to check for holes.
 1857          */
 1858         for (current = entry; current->start < end; current = current->next) {
 1859                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 1860                         vm_map_unlock_read(map);
 1861                         return (KERN_INVALID_ARGUMENT);
 1862                 }
 1863                 if (end > current->end &&
 1864                     (current->next == &map->header ||
 1865                         current->end != current->next->start)) {
 1866                         vm_map_unlock_read(map);
 1867                         return (KERN_INVALID_ADDRESS);
 1868                 }
 1869         }
 1870 
 1871         if (invalidate) {
 1872                 vm_page_lock_queues();
 1873                 pmap_remove(map->pmap, start, end);
 1874                 vm_page_unlock_queues();
 1875         }
 1876         /*
 1877          * Make a second pass, cleaning/uncaching pages from the indicated
 1878          * objects as we go.
 1879          */
 1880         for (current = entry; current->start < end; current = current->next) {
 1881                 offset = current->offset + (start - current->start);
 1882                 size = (end <= current->end ? end : current->end) - start;
 1883                 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
 1884                         vm_map_t smap;
 1885                         vm_map_entry_t tentry;
 1886                         vm_size_t tsize;
 1887 
 1888                         smap = current->object.sub_map;
 1889                         vm_map_lock_read(smap);
 1890                         (void) vm_map_lookup_entry(smap, offset, &tentry);
 1891                         tsize = tentry->end - offset;
 1892                         if (tsize < size)
 1893                                 size = tsize;
 1894                         object = tentry->object.vm_object;
 1895                         offset = tentry->offset + (offset - tentry->start);
 1896                         vm_map_unlock_read(smap);
 1897                 } else {
 1898                         object = current->object.vm_object;
 1899                 }
 1900                 /*
 1901                  * Note that there is absolutely no sense in writing out
 1902                  * anonymous objects, so we track down the vnode object
 1903                  * to write out.
 1904                  * We invalidate (remove) all pages from the address space
 1905                  * anyway, for semantic correctness.
 1906                  *
 1907                  * note: certain anonymous maps, such as MAP_NOSYNC maps,
 1908                  * may start out with a NULL object.
 1909                  */
 1910                 while (object && object->backing_object) {
 1911                         object = object->backing_object;
 1912                         offset += object->backing_object_offset;
 1913                         if (object->size < OFF_TO_IDX(offset + size))
 1914                                 size = IDX_TO_OFF(object->size) - offset;
 1915                 }
 1916                 if (object && (object->type == OBJT_VNODE) && 
 1917                     (current->protection & VM_PROT_WRITE)) {
 1918                         /*
 1919                          * Flush pages if writing is allowed, invalidate them
 1920                          * if invalidation requested.  Pages undergoing I/O
 1921                          * will be ignored by vm_object_page_remove().
 1922                          *
 1923                          * We cannot lock the vnode and then wait for paging
 1924                          * to complete without deadlocking against vm_fault.
 1925                          * Instead we simply call vm_object_page_remove() and
 1926                          * allow it to block internally on a page-by-page 
 1927                          * basis when it encounters pages undergoing async 
 1928                          * I/O.
 1929                          */
 1930                         int flags;
 1931 
 1932                         vm_object_reference(object);
 1933                         vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
 1934                         flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
 1935                         flags |= invalidate ? OBJPC_INVAL : 0;
 1936                         vm_object_page_clean(object,
 1937                             OFF_TO_IDX(offset),
 1938                             OFF_TO_IDX(offset + size + PAGE_MASK),
 1939                             flags);
 1940                         VOP_UNLOCK(object->handle, 0, curthread);
 1941                         vm_object_deallocate(object);
 1942                 }
 1943                 if (object && invalidate &&
 1944                     ((object->type == OBJT_VNODE) ||
 1945                      (object->type == OBJT_DEVICE))) {
 1946                         vm_object_reference(object);
 1947                         vm_object_page_remove(object,
 1948                             OFF_TO_IDX(offset),
 1949                             OFF_TO_IDX(offset + size + PAGE_MASK),
 1950                             FALSE);
 1951                         vm_object_deallocate(object);
 1952                 }
 1953                 start += size;
 1954         }
 1955 
 1956         vm_map_unlock_read(map);
 1957         return (KERN_SUCCESS);
 1958 }
 1959 
 1960 /*
 1961  *      vm_map_entry_unwire:    [ internal use only ]
 1962  *
 1963  *      Make the region specified by this entry pageable.
 1964  *
 1965  *      The map in question should be locked.
 1966  *      [This is the reason for this routine's existence.]
 1967  */
 1968 static void 
 1969 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
 1970 {
 1971         vm_fault_unwire(map, entry->start, entry->end);
 1972         entry->wired_count = 0;
 1973 }
 1974 
 1975 /*
 1976  *      vm_map_entry_delete:    [ internal use only ]
 1977  *
 1978  *      Deallocate the given entry from the target map.
 1979  */
 1980 static void
 1981 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
 1982 {
 1983         vm_map_entry_unlink(map, entry);
 1984         map->size -= entry->end - entry->start;
 1985 
 1986         if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
 1987                 vm_object_deallocate(entry->object.vm_object);
 1988         }
 1989 
 1990         vm_map_entry_dispose(map, entry);
 1991 }
 1992 
 1993 /*
 1994  *      vm_map_delete:  [ internal use only ]
 1995  *
 1996  *      Deallocates the given address range from the target
 1997  *      map.
 1998  */
 1999 int
 2000 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
 2001 {
 2002         vm_object_t object;
 2003         vm_map_entry_t entry;
 2004         vm_map_entry_t first_entry;
 2005 
 2006         /*
 2007          * Find the start of the region, and clip it
 2008          */
 2009         if (!vm_map_lookup_entry(map, start, &first_entry))
 2010                 entry = first_entry->next;
 2011         else {
 2012                 entry = first_entry;
 2013                 vm_map_clip_start(map, entry, start);
 2014         }
 2015 
 2016         /*
 2017          * Save the free space hint
 2018          */
 2019         if (entry == &map->header) {
 2020                 map->first_free = &map->header;
 2021         } else if (map->first_free->start >= start) {
 2022                 map->first_free = entry->prev;
 2023         }
 2024 
 2025         /*
 2026          * Step through all entries in this region
 2027          */
 2028         while ((entry != &map->header) && (entry->start < end)) {
 2029                 vm_map_entry_t next;
 2030                 vm_offset_t s, e;
 2031                 vm_pindex_t offidxstart, offidxend, count;
 2032 
 2033                 /*
 2034                  * Wait for wiring or unwiring of an entry to complete.
 2035                  */
 2036                 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) {
 2037                         unsigned int last_timestamp;
 2038                         vm_offset_t saved_start;
 2039                         vm_map_entry_t tmp_entry;
 2040 
 2041                         saved_start = entry->start;
 2042                         entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
 2043                         last_timestamp = map->timestamp;
 2044                         (void) vm_map_unlock_and_wait(map, FALSE);
 2045                         vm_map_lock(map);
 2046                         if (last_timestamp + 1 != map->timestamp) {
 2047                                 /*
 2048                                  * Look again for the entry because the map was
 2049                                  * modified while it was unlocked.
 2050                                  * Specifically, the entry may have been
 2051                                  * clipped, merged, or deleted.
 2052                                  */
 2053                                 if (!vm_map_lookup_entry(map, saved_start,
 2054                                                          &tmp_entry))
 2055                                         entry = tmp_entry->next;
 2056                                 else {
 2057                                         entry = tmp_entry;
 2058                                         vm_map_clip_start(map, entry,
 2059                                                           saved_start);
 2060                                 }
 2061                         }
 2062                         continue;
 2063                 }
 2064                 vm_map_clip_end(map, entry, end);
 2065 
 2066                 s = entry->start;
 2067                 e = entry->end;
 2068                 next = entry->next;
 2069 
 2070                 offidxstart = OFF_TO_IDX(entry->offset);
 2071                 count = OFF_TO_IDX(e - s);
 2072                 object = entry->object.vm_object;
 2073 
 2074                 /*
 2075                  * Unwire before removing addresses from the pmap; otherwise,
 2076                  * unwiring will put the entries back in the pmap.
 2077                  */
 2078                 if (entry->wired_count != 0) {
 2079                         vm_map_entry_unwire(map, entry);
 2080                 }
 2081 
 2082                 offidxend = offidxstart + count;
 2083 
 2084                 if ((object == kernel_object) || (object == kmem_object)) {
 2085                         vm_object_page_remove(object, offidxstart, offidxend, FALSE);
 2086                 } else {
 2087                         mtx_lock(&Giant);
 2088                         vm_page_lock_queues();
 2089                         pmap_remove(map->pmap, s, e);
 2090                         vm_page_unlock_queues();
 2091                         if (object != NULL &&
 2092                             object->ref_count != 1 &&
 2093                             (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING &&
 2094                             (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
 2095                                 vm_object_collapse(object);
 2096                                 vm_object_page_remove(object, offidxstart, offidxend, FALSE);
 2097                                 if (object->type == OBJT_SWAP) {
 2098                                         swap_pager_freespace(object, offidxstart, count);
 2099                                 }
 2100                                 if (offidxend >= object->size &&
 2101                                     offidxstart < object->size) {
 2102                                         object->size = offidxstart;
 2103                                 }
 2104                         }
 2105                         mtx_unlock(&Giant);
 2106                 }
 2107 
 2108                 /*
 2109                  * Delete the entry (which may delete the object) only after
 2110                  * removing all pmap entries pointing to its pages.
 2111                  * (Otherwise, its page frames may be reallocated, and any
 2112                  * modify bits will be set in the wrong object!)
 2113                  */
 2114                 vm_map_entry_delete(map, entry);
 2115                 entry = next;
 2116         }
 2117         return (KERN_SUCCESS);
 2118 }
 2119 
 2120 /*
 2121  *      vm_map_remove:
 2122  *
 2123  *      Remove the given address range from the target map.
 2124  *      This is the exported form of vm_map_delete.
 2125  */
 2126 int
 2127 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
 2128 {
 2129         int result, s = 0;
 2130 
 2131         if (map == kmem_map)
 2132                 s = splvm();
 2133 
 2134         vm_map_lock(map);
 2135         VM_MAP_RANGE_CHECK(map, start, end);
 2136         result = vm_map_delete(map, start, end);
 2137         vm_map_unlock(map);
 2138 
 2139         if (map == kmem_map)
 2140                 splx(s);
 2141 
 2142         return (result);
 2143 }
 2144 
 2145 /*
 2146  *      vm_map_check_protection:
 2147  *
 2148  *      Assert that the target map allows the specified
 2149  *      privilege on the entire address region given.
 2150  *      The entire region must be allocated.
 2151  */
 2152 boolean_t
 2153 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
 2154                         vm_prot_t protection)
 2155 {
 2156         vm_map_entry_t entry;
 2157         vm_map_entry_t tmp_entry;
 2158 
 2159         vm_map_lock_read(map);
 2160         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
 2161                 vm_map_unlock_read(map);
 2162                 return (FALSE);
 2163         }
 2164         entry = tmp_entry;
 2165 
 2166         while (start < end) {
 2167                 if (entry == &map->header) {
 2168                         vm_map_unlock_read(map);
 2169                         return (FALSE);
 2170                 }
 2171                 /*
 2172                  * No holes allowed!
 2173                  */
 2174                 if (start < entry->start) {
 2175                         vm_map_unlock_read(map);
 2176                         return (FALSE);
 2177                 }
 2178                 /*
 2179                  * Check protection associated with entry.
 2180                  */
 2181                 if ((entry->protection & protection) != protection) {
 2182                         vm_map_unlock_read(map);
 2183                         return (FALSE);
 2184                 }
 2185                 /* go to next entry */
 2186                 start = entry->end;
 2187                 entry = entry->next;
 2188         }
 2189         vm_map_unlock_read(map);
 2190         return (TRUE);
 2191 }
 2192 
 2193 /*
 2194  *      vm_map_copy_entry:
 2195  *
 2196  *      Copies the contents of the source entry to the destination
 2197  *      entry.  The entries *must* be aligned properly.
 2198  */
 2199 static void
 2200 vm_map_copy_entry(
 2201         vm_map_t src_map,
 2202         vm_map_t dst_map,
 2203         vm_map_entry_t src_entry, 
 2204         vm_map_entry_t dst_entry)
 2205 {
 2206         vm_object_t src_object;
 2207 
 2208         if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
 2209                 return;
 2210 
 2211         if (src_entry->wired_count == 0) {
 2212 
 2213                 /*
 2214                  * If the source entry is marked needs_copy, it is already
 2215                  * write-protected.
 2216                  */
 2217                 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
 2218                         vm_page_lock_queues();
 2219                         pmap_protect(src_map->pmap,
 2220                             src_entry->start,
 2221                             src_entry->end,
 2222                             src_entry->protection & ~VM_PROT_WRITE);
 2223                         vm_page_unlock_queues();
 2224                 }
 2225 
 2226                 /*
 2227                  * Make a copy of the object.
 2228                  */
 2229                 if ((src_object = src_entry->object.vm_object) != NULL) {
 2230 
 2231                         if ((src_object->handle == NULL) &&
 2232                                 (src_object->type == OBJT_DEFAULT ||
 2233                                  src_object->type == OBJT_SWAP)) {
 2234                                 vm_object_collapse(src_object);
 2235                                 if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
 2236                                         vm_object_split(src_entry);
 2237                                         src_object = src_entry->object.vm_object;
 2238                                 }
 2239                         }
 2240 
 2241                         vm_object_reference(src_object);
 2242                         vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
 2243                         dst_entry->object.vm_object = src_object;
 2244                         src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
 2245                         dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
 2246                         dst_entry->offset = src_entry->offset;
 2247                 } else {
 2248                         dst_entry->object.vm_object = NULL;
 2249                         dst_entry->offset = 0;
 2250                 }
 2251 
 2252                 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
 2253                     dst_entry->end - dst_entry->start, src_entry->start);
 2254         } else {
 2255                 /*
 2256                  * Of course, wired down pages can't be set copy-on-write.
 2257                  * Cause wired pages to be copied into the new map by
 2258                  * simulating faults (the new pages are pageable)
 2259                  */
 2260                 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
 2261         }
 2262 }
 2263 
 2264 /*
 2265  * vmspace_fork:
 2266  * Create a new process vmspace structure and vm_map
 2267  * based on those of an existing process.  The new map
 2268  * is based on the old map, according to the inheritance
 2269  * values on the regions in that map.
 2270  *
 2271  * The source map must not be locked.
 2272  */
 2273 struct vmspace *
 2274 vmspace_fork(struct vmspace *vm1)
 2275 {
 2276         struct vmspace *vm2;
 2277         vm_map_t old_map = &vm1->vm_map;
 2278         vm_map_t new_map;
 2279         vm_map_entry_t old_entry;
 2280         vm_map_entry_t new_entry;
 2281         vm_object_t object;
 2282 
 2283         GIANT_REQUIRED;
 2284 
 2285         vm_map_lock(old_map);
 2286         old_map->infork = 1;
 2287 
 2288         vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
 2289         bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
 2290             (caddr_t) &vm1->vm_endcopy - (caddr_t) &vm1->vm_startcopy);
 2291         new_map = &vm2->vm_map; /* XXX */
 2292         new_map->timestamp = 1;
 2293 
 2294         old_entry = old_map->header.next;
 2295 
 2296         while (old_entry != &old_map->header) {
 2297                 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
 2298                         panic("vm_map_fork: encountered a submap");
 2299 
 2300                 switch (old_entry->inheritance) {
 2301                 case VM_INHERIT_NONE:
 2302                         break;
 2303 
 2304                 case VM_INHERIT_SHARE:
 2305                         /*
 2306                          * Clone the entry, creating the shared object if necessary.
 2307                          */
 2308                         object = old_entry->object.vm_object;
 2309                         if (object == NULL) {
 2310                                 object = vm_object_allocate(OBJT_DEFAULT,
 2311                                         atop(old_entry->end - old_entry->start));
 2312                                 old_entry->object.vm_object = object;
 2313                                 old_entry->offset = (vm_offset_t) 0;
 2314                         }
 2315 
 2316                         /*
 2317                          * Add the reference before calling vm_object_shadow
 2318                          * to insure that a shadow object is created.
 2319                          */
 2320                         vm_object_reference(object);
 2321                         if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 2322                                 vm_object_shadow(&old_entry->object.vm_object,
 2323                                         &old_entry->offset,
 2324                                         atop(old_entry->end - old_entry->start));
 2325                                 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 2326                                 /* Transfer the second reference too. */
 2327                                 vm_object_reference(
 2328                                     old_entry->object.vm_object);
 2329                                 vm_object_deallocate(object);
 2330                                 object = old_entry->object.vm_object;
 2331                         }
 2332                         vm_object_clear_flag(object, OBJ_ONEMAPPING);
 2333 
 2334                         /*
 2335                          * Clone the entry, referencing the shared object.
 2336                          */
 2337                         new_entry = vm_map_entry_create(new_map);
 2338                         *new_entry = *old_entry;
 2339                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 2340                         new_entry->wired_count = 0;
 2341 
 2342                         /*
 2343                          * Insert the entry into the new map -- we know we're
 2344                          * inserting at the end of the new map.
 2345                          */
 2346                         vm_map_entry_link(new_map, new_map->header.prev,
 2347                             new_entry);
 2348 
 2349                         /*
 2350                          * Update the physical map
 2351                          */
 2352                         pmap_copy(new_map->pmap, old_map->pmap,
 2353                             new_entry->start,
 2354                             (old_entry->end - old_entry->start),
 2355                             old_entry->start);
 2356                         break;
 2357 
 2358                 case VM_INHERIT_COPY:
 2359                         /*
 2360                          * Clone the entry and link into the map.
 2361                          */
 2362                         new_entry = vm_map_entry_create(new_map);
 2363                         *new_entry = *old_entry;
 2364                         new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
 2365                         new_entry->wired_count = 0;
 2366                         new_entry->object.vm_object = NULL;
 2367                         vm_map_entry_link(new_map, new_map->header.prev,
 2368                             new_entry);
 2369                         vm_map_copy_entry(old_map, new_map, old_entry,
 2370                             new_entry);
 2371                         break;
 2372                 }
 2373                 old_entry = old_entry->next;
 2374         }
 2375 
 2376         new_map->size = old_map->size;
 2377         old_map->infork = 0;
 2378         vm_map_unlock(old_map);
 2379 
 2380         return (vm2);
 2381 }
 2382 
 2383 int
 2384 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
 2385               vm_prot_t prot, vm_prot_t max, int cow)
 2386 {
 2387         vm_map_entry_t prev_entry;
 2388         vm_map_entry_t new_stack_entry;
 2389         vm_size_t      init_ssize;
 2390         int            rv;
 2391 
 2392         if (addrbos < vm_map_min(map))
 2393                 return (KERN_NO_SPACE);
 2394 
 2395         if (max_ssize < sgrowsiz)
 2396                 init_ssize = max_ssize;
 2397         else
 2398                 init_ssize = sgrowsiz;
 2399 
 2400         vm_map_lock(map);
 2401 
 2402         /* If addr is already mapped, no go */
 2403         if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
 2404                 vm_map_unlock(map);
 2405                 return (KERN_NO_SPACE);
 2406         }
 2407 
 2408         /* If we would blow our VMEM resource limit, no go */
 2409         if (map->size + init_ssize >
 2410             curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
 2411                 vm_map_unlock(map);
 2412                 return (KERN_NO_SPACE);
 2413         }
 2414 
 2415         /* If we can't accomodate max_ssize in the current mapping,
 2416          * no go.  However, we need to be aware that subsequent user
 2417          * mappings might map into the space we have reserved for
 2418          * stack, and currently this space is not protected.  
 2419          * 
 2420          * Hopefully we will at least detect this condition 
 2421          * when we try to grow the stack.
 2422          */
 2423         if ((prev_entry->next != &map->header) &&
 2424             (prev_entry->next->start < addrbos + max_ssize)) {
 2425                 vm_map_unlock(map);
 2426                 return (KERN_NO_SPACE);
 2427         }
 2428 
 2429         /* We initially map a stack of only init_ssize.  We will
 2430          * grow as needed later.  Since this is to be a grow 
 2431          * down stack, we map at the top of the range.
 2432          *
 2433          * Note: we would normally expect prot and max to be
 2434          * VM_PROT_ALL, and cow to be 0.  Possibly we should
 2435          * eliminate these as input parameters, and just
 2436          * pass these values here in the insert call.
 2437          */
 2438         rv = vm_map_insert(map, NULL, 0, addrbos + max_ssize - init_ssize,
 2439                            addrbos + max_ssize, prot, max, cow);
 2440 
 2441         /* Now set the avail_ssize amount */
 2442         if (rv == KERN_SUCCESS){
 2443                 if (prev_entry != &map->header)
 2444                         vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize);
 2445                 new_stack_entry = prev_entry->next;
 2446                 if (new_stack_entry->end   != addrbos + max_ssize ||
 2447                     new_stack_entry->start != addrbos + max_ssize - init_ssize)
 2448                         panic ("Bad entry start/end for new stack entry");
 2449                 else 
 2450                         new_stack_entry->avail_ssize = max_ssize - init_ssize;
 2451         }
 2452 
 2453         vm_map_unlock(map);
 2454         return (rv);
 2455 }
 2456 
 2457 /* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
 2458  * desired address is already mapped, or if we successfully grow
 2459  * the stack.  Also returns KERN_SUCCESS if addr is outside the
 2460  * stack range (this is strange, but preserves compatibility with
 2461  * the grow function in vm_machdep.c).
 2462  */
 2463 int
 2464 vm_map_growstack (struct proc *p, vm_offset_t addr)
 2465 {
 2466         vm_map_entry_t prev_entry;
 2467         vm_map_entry_t stack_entry;
 2468         vm_map_entry_t new_stack_entry;
 2469         struct vmspace *vm = p->p_vmspace;
 2470         vm_map_t map = &vm->vm_map;
 2471         vm_offset_t    end;
 2472         int      grow_amount;
 2473         int      rv;
 2474         int      is_procstack;
 2475 
 2476         GIANT_REQUIRED;
 2477         
 2478 Retry:
 2479         vm_map_lock_read(map);
 2480 
 2481         /* If addr is already in the entry range, no need to grow.*/
 2482         if (vm_map_lookup_entry(map, addr, &prev_entry)) {
 2483                 vm_map_unlock_read(map);
 2484                 return (KERN_SUCCESS);
 2485         }
 2486 
 2487         if ((stack_entry = prev_entry->next) == &map->header) {
 2488                 vm_map_unlock_read(map);
 2489                 return (KERN_SUCCESS);
 2490         } 
 2491         if (prev_entry == &map->header) 
 2492                 end = stack_entry->start - stack_entry->avail_ssize;
 2493         else
 2494                 end = prev_entry->end;
 2495 
 2496         /* This next test mimics the old grow function in vm_machdep.c.
 2497          * It really doesn't quite make sense, but we do it anyway
 2498          * for compatibility.
 2499          *
 2500          * If not growable stack, return success.  This signals the
 2501          * caller to proceed as he would normally with normal vm.
 2502          */
 2503         if (stack_entry->avail_ssize < 1 ||
 2504             addr >= stack_entry->start ||
 2505             addr <  stack_entry->start - stack_entry->avail_ssize) {
 2506                 vm_map_unlock_read(map);
 2507                 return (KERN_SUCCESS);
 2508         } 
 2509         
 2510         /* Find the minimum grow amount */
 2511         grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
 2512         if (grow_amount > stack_entry->avail_ssize) {
 2513                 vm_map_unlock_read(map);
 2514                 return (KERN_NO_SPACE);
 2515         }
 2516 
 2517         /* If there is no longer enough space between the entries
 2518          * nogo, and adjust the available space.  Note: this 
 2519          * should only happen if the user has mapped into the
 2520          * stack area after the stack was created, and is
 2521          * probably an error.
 2522          *
 2523          * This also effectively destroys any guard page the user
 2524          * might have intended by limiting the stack size.
 2525          */
 2526         if (grow_amount > stack_entry->start - end) {
 2527                 if (vm_map_lock_upgrade(map))
 2528                         goto Retry;
 2529 
 2530                 stack_entry->avail_ssize = stack_entry->start - end;
 2531 
 2532                 vm_map_unlock(map);
 2533                 return (KERN_NO_SPACE);
 2534         }
 2535 
 2536         is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
 2537 
 2538         /* If this is the main process stack, see if we're over the 
 2539          * stack limit.
 2540          */
 2541         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
 2542                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
 2543                 vm_map_unlock_read(map);
 2544                 return (KERN_NO_SPACE);
 2545         }
 2546 
 2547         /* Round up the grow amount modulo SGROWSIZ */
 2548         grow_amount = roundup (grow_amount, sgrowsiz);
 2549         if (grow_amount > stack_entry->avail_ssize) {
 2550                 grow_amount = stack_entry->avail_ssize;
 2551         }
 2552         if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
 2553                              p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
 2554                 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
 2555                               ctob(vm->vm_ssize);
 2556         }
 2557 
 2558         /* If we would blow our VMEM resource limit, no go */
 2559         if (map->size + grow_amount >
 2560             curthread->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
 2561                 vm_map_unlock_read(map);
 2562                 return (KERN_NO_SPACE);
 2563         }
 2564 
 2565         if (vm_map_lock_upgrade(map))
 2566                 goto Retry;
 2567 
 2568         /* Get the preliminary new entry start value */
 2569         addr = stack_entry->start - grow_amount;
 2570 
 2571         /* If this puts us into the previous entry, cut back our growth
 2572          * to the available space.  Also, see the note above.
 2573          */
 2574         if (addr < end) {
 2575                 stack_entry->avail_ssize = stack_entry->start - end;
 2576                 addr = end;
 2577         }
 2578 
 2579         rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
 2580             p->p_sysent->sv_stackprot, VM_PROT_ALL, 0);
 2581 
 2582         /* Adjust the available stack space by the amount we grew. */
 2583         if (rv == KERN_SUCCESS) {
 2584                 if (prev_entry != &map->header)
 2585                         vm_map_clip_end(map, prev_entry, addr);
 2586                 new_stack_entry = prev_entry->next;
 2587                 if (new_stack_entry->end   != stack_entry->start  ||
 2588                     new_stack_entry->start != addr)
 2589                         panic ("Bad stack grow start/end in new stack entry");
 2590                 else {
 2591                         new_stack_entry->avail_ssize = stack_entry->avail_ssize -
 2592                                                         (new_stack_entry->end -
 2593                                                          new_stack_entry->start);
 2594                         if (is_procstack)
 2595                                 vm->vm_ssize += btoc(new_stack_entry->end -
 2596                                                      new_stack_entry->start);
 2597                 }
 2598         }
 2599 
 2600         vm_map_unlock(map);
 2601         return (rv);
 2602 }
 2603 
 2604 /*
 2605  * Unshare the specified VM space for exec.  If other processes are
 2606  * mapped to it, then create a new one.  The new vmspace is null.
 2607  */
 2608 void
 2609 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
 2610 {
 2611         struct vmspace *oldvmspace = p->p_vmspace;
 2612         struct vmspace *newvmspace;
 2613 
 2614         GIANT_REQUIRED;
 2615         newvmspace = vmspace_alloc(minuser, maxuser);
 2616         bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
 2617             (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
 2618         /*
 2619          * This code is written like this for prototype purposes.  The
 2620          * goal is to avoid running down the vmspace here, but let the
 2621          * other process's that are still using the vmspace to finally
 2622          * run it down.  Even though there is little or no chance of blocking
 2623          * here, it is a good idea to keep this form for future mods.
 2624          */
 2625         p->p_vmspace = newvmspace;
 2626         pmap_pinit2(vmspace_pmap(newvmspace));
 2627         vmspace_free(oldvmspace);
 2628         if (p == curthread->td_proc)            /* XXXKSE ? */
 2629                 pmap_activate(curthread);
 2630 }
 2631 
 2632 /*
 2633  * Unshare the specified VM space for forcing COW.  This
 2634  * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
 2635  */
 2636 void
 2637 vmspace_unshare(struct proc *p)
 2638 {
 2639         struct vmspace *oldvmspace = p->p_vmspace;
 2640         struct vmspace *newvmspace;
 2641 
 2642         GIANT_REQUIRED;
 2643         if (oldvmspace->vm_refcnt == 1)
 2644                 return;
 2645         newvmspace = vmspace_fork(oldvmspace);
 2646         p->p_vmspace = newvmspace;
 2647         pmap_pinit2(vmspace_pmap(newvmspace));
 2648         vmspace_free(oldvmspace);
 2649         if (p == curthread->td_proc)            /* XXXKSE ? */
 2650                 pmap_activate(curthread);
 2651 }
 2652 
 2653 /*
 2654  *      vm_map_lookup:
 2655  *
 2656  *      Finds the VM object, offset, and
 2657  *      protection for a given virtual address in the
 2658  *      specified map, assuming a page fault of the
 2659  *      type specified.
 2660  *
 2661  *      Leaves the map in question locked for read; return
 2662  *      values are guaranteed until a vm_map_lookup_done
 2663  *      call is performed.  Note that the map argument
 2664  *      is in/out; the returned map must be used in
 2665  *      the call to vm_map_lookup_done.
 2666  *
 2667  *      A handle (out_entry) is returned for use in
 2668  *      vm_map_lookup_done, to make that fast.
 2669  *
 2670  *      If a lookup is requested with "write protection"
 2671  *      specified, the map may be changed to perform virtual
 2672  *      copying operations, although the data referenced will
 2673  *      remain the same.
 2674  */
 2675 int
 2676 vm_map_lookup(vm_map_t *var_map,                /* IN/OUT */
 2677               vm_offset_t vaddr,
 2678               vm_prot_t fault_typea,
 2679               vm_map_entry_t *out_entry,        /* OUT */
 2680               vm_object_t *object,              /* OUT */
 2681               vm_pindex_t *pindex,              /* OUT */
 2682               vm_prot_t *out_prot,              /* OUT */
 2683               boolean_t *wired)                 /* OUT */
 2684 {
 2685         vm_map_entry_t entry;
 2686         vm_map_t map = *var_map;
 2687         vm_prot_t prot;
 2688         vm_prot_t fault_type = fault_typea;
 2689 
 2690 RetryLookup:;
 2691         /*
 2692          * Lookup the faulting address.
 2693          */
 2694 
 2695         vm_map_lock_read(map);
 2696 #define RETURN(why) \
 2697                 { \
 2698                 vm_map_unlock_read(map); \
 2699                 return (why); \
 2700                 }
 2701 
 2702         /*
 2703          * If the map has an interesting hint, try it before calling full
 2704          * blown lookup routine.
 2705          */
 2706         entry = map->root;
 2707         *out_entry = entry;
 2708         if (entry == NULL ||
 2709             (vaddr < entry->start) || (vaddr >= entry->end)) {
 2710                 /*
 2711                  * Entry was either not a valid hint, or the vaddr was not
 2712                  * contained in the entry, so do a full lookup.
 2713                  */
 2714                 if (!vm_map_lookup_entry(map, vaddr, out_entry))
 2715                         RETURN(KERN_INVALID_ADDRESS);
 2716 
 2717                 entry = *out_entry;
 2718         }
 2719         
 2720         /*
 2721          * Handle submaps.
 2722          */
 2723         if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 2724                 vm_map_t old_map = map;
 2725 
 2726                 *var_map = map = entry->object.sub_map;
 2727                 vm_map_unlock_read(old_map);
 2728                 goto RetryLookup;
 2729         }
 2730 
 2731         /*
 2732          * Check whether this task is allowed to have this page.
 2733          * Note the special case for MAP_ENTRY_COW
 2734          * pages with an override.  This is to implement a forced
 2735          * COW for debuggers.
 2736          */
 2737         if (fault_type & VM_PROT_OVERRIDE_WRITE)
 2738                 prot = entry->max_protection;
 2739         else
 2740                 prot = entry->protection;
 2741         fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
 2742         if ((fault_type & prot) != fault_type) {
 2743                         RETURN(KERN_PROTECTION_FAILURE);
 2744         }
 2745         if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
 2746             (entry->eflags & MAP_ENTRY_COW) &&
 2747             (fault_type & VM_PROT_WRITE) &&
 2748             (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
 2749                 RETURN(KERN_PROTECTION_FAILURE);
 2750         }
 2751 
 2752         /*
 2753          * If this page is not pageable, we have to get it for all possible
 2754          * accesses.
 2755          */
 2756         *wired = (entry->wired_count != 0);
 2757         if (*wired)
 2758                 prot = fault_type = entry->protection;
 2759 
 2760         /*
 2761          * If the entry was copy-on-write, we either ...
 2762          */
 2763         if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
 2764                 /*
 2765                  * If we want to write the page, we may as well handle that
 2766                  * now since we've got the map locked.
 2767                  *
 2768                  * If we don't need to write the page, we just demote the
 2769                  * permissions allowed.
 2770                  */
 2771                 if (fault_type & VM_PROT_WRITE) {
 2772                         /*
 2773                          * Make a new object, and place it in the object
 2774                          * chain.  Note that no new references have appeared
 2775                          * -- one just moved from the map to the new
 2776                          * object.
 2777                          */
 2778                         if (vm_map_lock_upgrade(map))
 2779                                 goto RetryLookup;
 2780 
 2781                         vm_object_shadow(
 2782                             &entry->object.vm_object,
 2783                             &entry->offset,
 2784                             atop(entry->end - entry->start));
 2785                         entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
 2786 
 2787                         vm_map_lock_downgrade(map);
 2788                 } else {
 2789                         /*
 2790                          * We're attempting to read a copy-on-write page --
 2791                          * don't allow writes.
 2792                          */
 2793                         prot &= ~VM_PROT_WRITE;
 2794                 }
 2795         }
 2796 
 2797         /*
 2798          * Create an object if necessary.
 2799          */
 2800         if (entry->object.vm_object == NULL &&
 2801             !map->system_map) {
 2802                 if (vm_map_lock_upgrade(map)) 
 2803                         goto RetryLookup;
 2804                 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
 2805                     atop(entry->end - entry->start));
 2806                 entry->offset = 0;
 2807                 vm_map_lock_downgrade(map);
 2808         }
 2809 
 2810         /*
 2811          * Return the object/offset from this entry.  If the entry was
 2812          * copy-on-write or empty, it has been fixed up.
 2813          */
 2814         *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
 2815         *object = entry->object.vm_object;
 2816 
 2817         /*
 2818          * Return whether this is the only map sharing this data.
 2819          */
 2820         *out_prot = prot;
 2821         return (KERN_SUCCESS);
 2822 
 2823 #undef  RETURN
 2824 }
 2825 
 2826 /*
 2827  *      vm_map_lookup_done:
 2828  *
 2829  *      Releases locks acquired by a vm_map_lookup
 2830  *      (according to the handle returned by that lookup).
 2831  */
 2832 void
 2833 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
 2834 {
 2835         /*
 2836          * Unlock the main-level map
 2837          */
 2838         vm_map_unlock_read(map);
 2839 }
 2840 
 2841 #ifdef ENABLE_VFS_IOOPT
 2842 /*
 2843  * Experimental support for zero-copy I/O
 2844  *
 2845  * Implement uiomove with VM operations.  This handles (and collateral changes)
 2846  * support every combination of source object modification, and COW type
 2847  * operations.
 2848  */
 2849 int
 2850 vm_uiomove(
 2851         vm_map_t mapa,
 2852         vm_object_t srcobject,
 2853         off_t cp,
 2854         int cnta,
 2855         vm_offset_t uaddra,
 2856         int *npages)
 2857 {
 2858         vm_map_t map;
 2859         vm_object_t first_object, oldobject, object;
 2860         vm_map_entry_t entry;
 2861         vm_prot_t prot;
 2862         boolean_t wired;
 2863         int tcnt, rv;
 2864         vm_offset_t uaddr, start, end, tend;
 2865         vm_pindex_t first_pindex, oindex;
 2866         vm_size_t osize;
 2867         off_t ooffset;
 2868         int cnt;
 2869 
 2870         GIANT_REQUIRED;
 2871 
 2872         if (npages)
 2873                 *npages = 0;
 2874 
 2875         cnt = cnta;
 2876         uaddr = uaddra;
 2877 
 2878         while (cnt > 0) {
 2879                 map = mapa;
 2880 
 2881                 if ((vm_map_lookup(&map, uaddr,
 2882                         VM_PROT_READ, &entry, &first_object,
 2883                         &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
 2884                         return EFAULT;
 2885                 }
 2886 
 2887                 vm_map_clip_start(map, entry, uaddr);
 2888 
 2889                 tcnt = cnt;
 2890                 tend = uaddr + tcnt;
 2891                 if (tend > entry->end) {
 2892                         tcnt = entry->end - uaddr;
 2893                         tend = entry->end;
 2894                 }
 2895 
 2896                 vm_map_clip_end(map, entry, tend);
 2897 
 2898                 start = entry->start;
 2899                 end = entry->end;
 2900 
 2901                 osize = atop(tcnt);
 2902 
 2903                 oindex = OFF_TO_IDX(cp);
 2904                 if (npages) {
 2905                         vm_size_t idx;
 2906                         for (idx = 0; idx < osize; idx++) {
 2907                                 vm_page_t m;
 2908                                 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
 2909                                         vm_map_lookup_done(map, entry);
 2910                                         return 0;
 2911                                 }
 2912                                 /*
 2913                                  * disallow busy or invalid pages, but allow
 2914                                  * m->busy pages if they are entirely valid.
 2915                                  */
 2916                                 if ((m->flags & PG_BUSY) ||
 2917                                         ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
 2918                                         vm_map_lookup_done(map, entry);
 2919                                         return 0;
 2920                                 }
 2921                         }
 2922                 }
 2923 
 2924 /*
 2925  * If we are changing an existing map entry, just redirect
 2926  * the object, and change mappings.
 2927  */
 2928                 if ((first_object->type == OBJT_VNODE) &&
 2929                         ((oldobject = entry->object.vm_object) == first_object)) {
 2930 
 2931                         if ((entry->offset != cp) || (oldobject != srcobject)) {
 2932                                 /*
 2933                                 * Remove old window into the file
 2934                                 */
 2935                                 vm_page_lock_queues();
 2936                                 pmap_remove(map->pmap, uaddr, tend);
 2937                                 vm_page_unlock_queues();
 2938 
 2939                                 /*
 2940                                 * Force copy on write for mmaped regions
 2941                                 */
 2942                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
 2943 
 2944                                 /*
 2945                                 * Point the object appropriately
 2946                                 */
 2947                                 if (oldobject != srcobject) {
 2948 
 2949                                 /*
 2950                                 * Set the object optimization hint flag
 2951                                 */
 2952                                         vm_object_set_flag(srcobject, OBJ_OPT);
 2953                                         vm_object_reference(srcobject);
 2954                                         entry->object.vm_object = srcobject;
 2955 
 2956                                         if (oldobject) {
 2957                                                 vm_object_deallocate(oldobject);
 2958                                         }
 2959                                 }
 2960 
 2961                                 entry->offset = cp;
 2962                                 map->timestamp++;
 2963                         } else {
 2964                                 vm_page_lock_queues();
 2965                                 pmap_remove(map->pmap, uaddr, tend);
 2966                                 vm_page_unlock_queues();
 2967                         }
 2968 
 2969                 } else if ((first_object->ref_count == 1) &&
 2970                         (first_object->size == osize) &&
 2971                         ((first_object->type == OBJT_DEFAULT) ||
 2972                                 (first_object->type == OBJT_SWAP)) ) {
 2973 
 2974                         oldobject = first_object->backing_object;
 2975 
 2976                         if ((first_object->backing_object_offset != cp) ||
 2977                                 (oldobject != srcobject)) {
 2978                                 /*
 2979                                 * Remove old window into the file
 2980                                 */
 2981                                 vm_page_lock_queues();
 2982                                 pmap_remove(map->pmap, uaddr, tend);
 2983                                 vm_page_unlock_queues();
 2984 
 2985                                 /*
 2986                                  * Remove unneeded old pages
 2987                                  */
 2988                                 vm_object_page_remove(first_object, 0, 0, 0);
 2989 
 2990                                 /*
 2991                                  * Invalidate swap space
 2992                                  */
 2993                                 if (first_object->type == OBJT_SWAP) {
 2994                                         swap_pager_freespace(first_object,
 2995                                                 0,
 2996                                                 first_object->size);
 2997                                 }
 2998 
 2999                                 /*
 3000                                  * Force copy on write for mmaped regions
 3001                                  */
 3002                                 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
 3003 
 3004                                 /*
 3005                                  * Point the object appropriately
 3006                                  */
 3007                                 if (oldobject != srcobject) {
 3008                                         /*
 3009                                          * Set the object optimization hint flag
 3010                                          */
 3011                                         vm_object_set_flag(srcobject, OBJ_OPT);
 3012                                         vm_object_reference(srcobject);
 3013 
 3014                                         if (oldobject) {
 3015                                                 TAILQ_REMOVE(&oldobject->shadow_head,
 3016                                                         first_object, shadow_list);
 3017                                                 oldobject->shadow_count--;
 3018                                                 /* XXX bump generation? */
 3019                                                 vm_object_deallocate(oldobject);
 3020                                         }
 3021 
 3022                                         TAILQ_INSERT_TAIL(&srcobject->shadow_head,
 3023                                                 first_object, shadow_list);
 3024                                         srcobject->shadow_count++;
 3025                                         /* XXX bump generation? */
 3026 
 3027                                         first_object->backing_object = srcobject;
 3028                                 }
 3029                                 first_object->backing_object_offset = cp;
 3030                                 map->timestamp++;
 3031                         } else {
 3032                                 vm_page_lock_queues();
 3033                                 pmap_remove(map->pmap, uaddr, tend);
 3034                                 vm_page_unlock_queues();
 3035                         }
 3036 /*
 3037  * Otherwise, we have to do a logical mmap.
 3038  */
 3039                 } else {
 3040 
 3041                         vm_object_set_flag(srcobject, OBJ_OPT);
 3042                         vm_object_reference(srcobject);
 3043 
 3044                         vm_page_lock_queues();
 3045                         pmap_remove(map->pmap, uaddr, tend);
 3046                         vm_page_unlock_queues();
 3047 
 3048                         vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
 3049                         vm_map_lock_upgrade(map);
 3050 
 3051                         if (entry == &map->header) {
 3052                                 map->first_free = &map->header;
 3053                         } else if (map->first_free->start >= start) {
 3054                                 map->first_free = entry->prev;
 3055                         }
 3056 
 3057                         vm_map_entry_delete(map, entry);
 3058 
 3059                         object = srcobject;
 3060                         ooffset = cp;
 3061 
 3062                         rv = vm_map_insert(map, object, ooffset, start, tend,
 3063                                 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
 3064 
 3065                         if (rv != KERN_SUCCESS)
 3066                                 panic("vm_uiomove: could not insert new entry: %d", rv);
 3067                 }
 3068 
 3069 /*
 3070  * Map the window directly, if it is already in memory
 3071  */
 3072                 pmap_object_init_pt(map->pmap, uaddr,
 3073                         srcobject, oindex, tcnt, 0);
 3074 
 3075                 map->timestamp++;
 3076                 vm_map_unlock(map);
 3077 
 3078                 cnt -= tcnt;
 3079                 uaddr += tcnt;
 3080                 cp += tcnt;
 3081                 if (npages)
 3082                         *npages += osize;
 3083         }
 3084         return 0;
 3085 }
 3086 #endif
 3087 
 3088 #include "opt_ddb.h"
 3089 #ifdef DDB
 3090 #include <sys/kernel.h>
 3091 
 3092 #include <ddb/ddb.h>
 3093 
 3094 /*
 3095  *      vm_map_print:   [ debug ]
 3096  */
 3097 DB_SHOW_COMMAND(map, vm_map_print)
 3098 {
 3099         static int nlines;
 3100         /* XXX convert args. */
 3101         vm_map_t map = (vm_map_t)addr;
 3102         boolean_t full = have_addr;
 3103 
 3104         vm_map_entry_t entry;
 3105 
 3106         db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
 3107             (void *)map,
 3108             (void *)map->pmap, map->nentries, map->timestamp);
 3109         nlines++;
 3110 
 3111         if (!full && db_indent)
 3112                 return;
 3113 
 3114         db_indent += 2;
 3115         for (entry = map->header.next; entry != &map->header;
 3116             entry = entry->next) {
 3117                 db_iprintf("map entry %p: start=%p, end=%p\n",
 3118                     (void *)entry, (void *)entry->start, (void *)entry->end);
 3119                 nlines++;
 3120                 {
 3121                         static char *inheritance_name[4] =
 3122                         {"share", "copy", "none", "donate_copy"};
 3123 
 3124                         db_iprintf(" prot=%x/%x/%s",
 3125                             entry->protection,
 3126                             entry->max_protection,
 3127                             inheritance_name[(int)(unsigned char)entry->inheritance]);
 3128                         if (entry->wired_count != 0)
 3129                                 db_printf(", wired");
 3130                 }
 3131                 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 3132                         db_printf(", share=%p, offset=0x%jx\n",
 3133                             (void *)entry->object.sub_map,
 3134                             (uintmax_t)entry->offset);
 3135                         nlines++;
 3136                         if ((entry->prev == &map->header) ||
 3137                             (entry->prev->object.sub_map !=
 3138                                 entry->object.sub_map)) {
 3139                                 db_indent += 2;
 3140                                 vm_map_print((db_expr_t)(intptr_t)
 3141                                              entry->object.sub_map,
 3142                                              full, 0, (char *)0);
 3143                                 db_indent -= 2;
 3144                         }
 3145                 } else {
 3146                         db_printf(", object=%p, offset=0x%jx",
 3147                             (void *)entry->object.vm_object,
 3148                             (uintmax_t)entry->offset);
 3149                         if (entry->eflags & MAP_ENTRY_COW)
 3150                                 db_printf(", copy (%s)",
 3151                                     (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
 3152                         db_printf("\n");
 3153                         nlines++;
 3154 
 3155                         if ((entry->prev == &map->header) ||
 3156                             (entry->prev->object.vm_object !=
 3157                                 entry->object.vm_object)) {
 3158                                 db_indent += 2;
 3159                                 vm_object_print((db_expr_t)(intptr_t)
 3160                                                 entry->object.vm_object,
 3161                                                 full, 0, (char *)0);
 3162                                 nlines += 4;
 3163                                 db_indent -= 2;
 3164                         }
 3165                 }
 3166         }
 3167         db_indent -= 2;
 3168         if (db_indent == 0)
 3169                 nlines = 0;
 3170 }
 3171 
 3172 
 3173 DB_SHOW_COMMAND(procvm, procvm)
 3174 {
 3175         struct proc *p;
 3176 
 3177         if (have_addr) {
 3178                 p = (struct proc *) addr;
 3179         } else {
 3180                 p = curproc;
 3181         }
 3182 
 3183         db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
 3184             (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
 3185             (void *)vmspace_pmap(p->p_vmspace));
 3186 
 3187         vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
 3188 }
 3189 
 3190 #endif /* DDB */

Cache object: b6cdda87b663d50a956acd313ef85d8e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.