The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_object.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   39  *
   40  * Permission to use, copy, modify and distribute this software and
   41  * its documentation is hereby granted, provided that both the copyright
   42  * notice and this permission notice appear in all copies of the
   43  * software, derivative works or modified versions, and any portions
   44  * thereof, and that both notices appear in supporting documentation.
   45  *
   46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   49  *
   50  * Carnegie Mellon requests users of this software to return to
   51  *
   52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   53  *  School of Computer Science
   54  *  Carnegie Mellon University
   55  *  Pittsburgh PA 15213-3890
   56  *
   57  * any improvements or extensions that they make and grant Carnegie the
   58  * rights to redistribute these changes.
   59  */
   60 
   61 /*
   62  *      Virtual memory object module.
   63  */
   64 
   65 #include <sys/cdefs.h>
   66 __FBSDID("$FreeBSD: releng/6.0/sys/vm/vm_object.c 151710 2005-10-26 20:22:00Z delphij $");
   67 
   68 #include <sys/param.h>
   69 #include <sys/systm.h>
   70 #include <sys/lock.h>
   71 #include <sys/mman.h>
   72 #include <sys/mount.h>
   73 #include <sys/kernel.h>
   74 #include <sys/sysctl.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>           /* for curproc, pageproc */
   77 #include <sys/socket.h>
   78 #include <sys/vnode.h>
   79 #include <sys/vmmeter.h>
   80 #include <sys/sx.h>
   81 
   82 #include <vm/vm.h>
   83 #include <vm/vm_param.h>
   84 #include <vm/pmap.h>
   85 #include <vm/vm_map.h>
   86 #include <vm/vm_object.h>
   87 #include <vm/vm_page.h>
   88 #include <vm/vm_pageout.h>
   89 #include <vm/vm_pager.h>
   90 #include <vm/swap_pager.h>
   91 #include <vm/vm_kern.h>
   92 #include <vm/vm_extern.h>
   93 #include <vm/uma.h>
   94 
   95 #define EASY_SCAN_FACTOR       8
   96 
   97 #define MSYNC_FLUSH_HARDSEQ     0x01
   98 #define MSYNC_FLUSH_SOFTSEQ     0x02
   99 
  100 /*
  101  * msync / VM object flushing optimizations
  102  */
  103 static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ;
  104 SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags,
  105         CTLFLAG_RW, &msync_flush_flags, 0, "");
  106 
  107 static int old_msync;
  108 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
  109     "Use old (insecure) msync behavior");
  110 
  111 static void     vm_object_qcollapse(vm_object_t object);
  112 static int      vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags);
  113 
  114 /*
  115  *      Virtual memory objects maintain the actual data
  116  *      associated with allocated virtual memory.  A given
  117  *      page of memory exists within exactly one object.
  118  *
  119  *      An object is only deallocated when all "references"
  120  *      are given up.  Only one "reference" to a given
  121  *      region of an object should be writeable.
  122  *
  123  *      Associated with each object is a list of all resident
  124  *      memory pages belonging to that object; this list is
  125  *      maintained by the "vm_page" module, and locked by the object's
  126  *      lock.
  127  *
  128  *      Each object also records a "pager" routine which is
  129  *      used to retrieve (and store) pages to the proper backing
  130  *      storage.  In addition, objects may be backed by other
  131  *      objects from which they were virtual-copied.
  132  *
  133  *      The only items within the object structure which are
  134  *      modified after time of creation are:
  135  *              reference count         locked by object's lock
  136  *              pager routine           locked by object's lock
  137  *
  138  */
  139 
  140 struct object_q vm_object_list;
  141 struct mtx vm_object_list_mtx;  /* lock for object list and count */
  142 
  143 struct vm_object kernel_object_store;
  144 struct vm_object kmem_object_store;
  145 
  146 static long object_collapses;
  147 static long object_bypasses;
  148 
  149 /*
  150  * next_index determines the page color that is assigned to the next
  151  * allocated object.  Accesses to next_index are not synchronized
  152  * because the effects of two or more object allocations using
  153  * next_index simultaneously are inconsequential.  At any given time,
  154  * numerous objects have the same page color.
  155  */
  156 static int next_index;
  157 
  158 static uma_zone_t obj_zone;
  159 #define VM_OBJECTS_INIT 256
  160 
  161 static int vm_object_zinit(void *mem, int size, int flags);
  162 
  163 #ifdef INVARIANTS
  164 static void vm_object_zdtor(void *mem, int size, void *arg);
  165 
  166 static void
  167 vm_object_zdtor(void *mem, int size, void *arg)
  168 {
  169         vm_object_t object;
  170 
  171         object = (vm_object_t)mem;
  172         KASSERT(TAILQ_EMPTY(&object->memq),
  173             ("object %p has resident pages",
  174             object));
  175         KASSERT(object->paging_in_progress == 0,
  176             ("object %p paging_in_progress = %d",
  177             object, object->paging_in_progress));
  178         KASSERT(object->resident_page_count == 0,
  179             ("object %p resident_page_count = %d",
  180             object, object->resident_page_count));
  181         KASSERT(object->shadow_count == 0,
  182             ("object %p shadow_count = %d",
  183             object, object->shadow_count));
  184 }
  185 #endif
  186 
  187 static int
  188 vm_object_zinit(void *mem, int size, int flags)
  189 {
  190         vm_object_t object;
  191 
  192         object = (vm_object_t)mem;
  193         bzero(&object->mtx, sizeof(object->mtx));
  194         VM_OBJECT_LOCK_INIT(object, "standard object");
  195 
  196         /* These are true for any object that has been freed */
  197         object->paging_in_progress = 0;
  198         object->resident_page_count = 0;
  199         object->shadow_count = 0;
  200         return (0);
  201 }
  202 
  203 void
  204 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
  205 {
  206         int incr;
  207 
  208         TAILQ_INIT(&object->memq);
  209         LIST_INIT(&object->shadow_head);
  210 
  211         object->root = NULL;
  212         object->type = type;
  213         object->size = size;
  214         object->generation = 1;
  215         object->ref_count = 1;
  216         object->flags = 0;
  217         if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
  218                 object->flags = OBJ_ONEMAPPING;
  219         if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
  220                 incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
  221         else
  222                 incr = size;
  223         object->pg_color = next_index;
  224         next_index = (object->pg_color + incr) & PQ_L2_MASK;
  225         object->handle = NULL;
  226         object->backing_object = NULL;
  227         object->backing_object_offset = (vm_ooffset_t) 0;
  228 
  229         mtx_lock(&vm_object_list_mtx);
  230         TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
  231         mtx_unlock(&vm_object_list_mtx);
  232 }
  233 
  234 /*
  235  *      vm_object_init:
  236  *
  237  *      Initialize the VM objects module.
  238  */
  239 void
  240 vm_object_init(void)
  241 {
  242         TAILQ_INIT(&vm_object_list);
  243         mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
  244         
  245         VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
  246         _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
  247             kernel_object);
  248 
  249         VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
  250         _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
  251             kmem_object);
  252 
  253         /*
  254          * The lock portion of struct vm_object must be type stable due
  255          * to vm_pageout_fallback_object_lock locking a vm object
  256          * without holding any references to it.
  257          */
  258         obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
  259 #ifdef INVARIANTS
  260             vm_object_zdtor,
  261 #else
  262             NULL,
  263 #endif
  264             vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
  265         uma_prealloc(obj_zone, VM_OBJECTS_INIT);
  266 }
  267 
  268 void
  269 vm_object_clear_flag(vm_object_t object, u_short bits)
  270 {
  271 
  272         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  273         object->flags &= ~bits;
  274 }
  275 
  276 void
  277 vm_object_pip_add(vm_object_t object, short i)
  278 {
  279 
  280         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  281         object->paging_in_progress += i;
  282 }
  283 
  284 void
  285 vm_object_pip_subtract(vm_object_t object, short i)
  286 {
  287 
  288         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  289         object->paging_in_progress -= i;
  290 }
  291 
  292 void
  293 vm_object_pip_wakeup(vm_object_t object)
  294 {
  295 
  296         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  297         object->paging_in_progress--;
  298         if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
  299                 vm_object_clear_flag(object, OBJ_PIPWNT);
  300                 wakeup(object);
  301         }
  302 }
  303 
  304 void
  305 vm_object_pip_wakeupn(vm_object_t object, short i)
  306 {
  307 
  308         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  309         if (i)
  310                 object->paging_in_progress -= i;
  311         if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
  312                 vm_object_clear_flag(object, OBJ_PIPWNT);
  313                 wakeup(object);
  314         }
  315 }
  316 
  317 void
  318 vm_object_pip_wait(vm_object_t object, char *waitid)
  319 {
  320 
  321         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  322         while (object->paging_in_progress) {
  323                 object->flags |= OBJ_PIPWNT;
  324                 msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
  325         }
  326 }
  327 
  328 /*
  329  *      vm_object_allocate:
  330  *
  331  *      Returns a new object with the given size.
  332  */
  333 vm_object_t
  334 vm_object_allocate(objtype_t type, vm_pindex_t size)
  335 {
  336         vm_object_t object;
  337 
  338         object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
  339         _vm_object_allocate(type, size, object);
  340         return (object);
  341 }
  342 
  343 
  344 /*
  345  *      vm_object_reference:
  346  *
  347  *      Gets another reference to the given object.  Note: OBJ_DEAD
  348  *      objects can be referenced during final cleaning.
  349  */
  350 void
  351 vm_object_reference(vm_object_t object)
  352 {
  353         struct vnode *vp;
  354         int flags;
  355 
  356         if (object == NULL)
  357                 return;
  358         VM_OBJECT_LOCK(object);
  359         object->ref_count++;
  360         if (object->type == OBJT_VNODE) {
  361                 vp = object->handle;
  362                 VI_LOCK(vp);
  363                 VM_OBJECT_UNLOCK(object);
  364                 for (flags = LK_INTERLOCK; vget(vp, flags, curthread);
  365                      flags = 0)
  366                         printf("vm_object_reference: delay in vget\n");
  367         } else
  368                 VM_OBJECT_UNLOCK(object);
  369 }
  370 
  371 /*
  372  *      vm_object_reference_locked:
  373  *
  374  *      Gets another reference to the given object.
  375  *
  376  *      The object must be locked.
  377  */
  378 void
  379 vm_object_reference_locked(vm_object_t object)
  380 {
  381         struct vnode *vp;
  382 
  383         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  384         KASSERT((object->flags & OBJ_DEAD) == 0,
  385             ("vm_object_reference_locked: dead object referenced"));
  386         object->ref_count++;
  387         if (object->type == OBJT_VNODE) {
  388                 vp = object->handle;
  389                 vref(vp);
  390         }
  391 }
  392 
  393 /*
  394  * Handle deallocating an object of type OBJT_VNODE.
  395  */
  396 void
  397 vm_object_vndeallocate(vm_object_t object)
  398 {
  399         struct vnode *vp = (struct vnode *) object->handle;
  400 
  401         VFS_ASSERT_GIANT(vp->v_mount);
  402         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  403         KASSERT(object->type == OBJT_VNODE,
  404             ("vm_object_vndeallocate: not a vnode object"));
  405         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
  406 #ifdef INVARIANTS
  407         if (object->ref_count == 0) {
  408                 vprint("vm_object_vndeallocate", vp);
  409                 panic("vm_object_vndeallocate: bad object reference count");
  410         }
  411 #endif
  412 
  413         object->ref_count--;
  414         if (object->ref_count == 0) {
  415                 mp_fixme("Unlocked vflag access.");
  416                 vp->v_vflag &= ~VV_TEXT;
  417         }
  418         VM_OBJECT_UNLOCK(object);
  419         /*
  420          * vrele may need a vop lock
  421          */
  422         vrele(vp);
  423 }
  424 
  425 /*
  426  *      vm_object_deallocate:
  427  *
  428  *      Release a reference to the specified object,
  429  *      gained either through a vm_object_allocate
  430  *      or a vm_object_reference call.  When all references
  431  *      are gone, storage associated with this object
  432  *      may be relinquished.
  433  *
  434  *      No object may be locked.
  435  */
  436 void
  437 vm_object_deallocate(vm_object_t object)
  438 {
  439         vm_object_t temp;
  440 
  441         while (object != NULL) {
  442                 int vfslocked;
  443                 /*
  444                  * In general, the object should be locked when working with
  445                  * its type.  In this case, in order to maintain proper lock
  446                  * ordering, an exception is possible because a vnode-backed
  447                  * object never changes its type.
  448                  */
  449                 vfslocked = 0;
  450                 if (object->type == OBJT_VNODE) {
  451                         struct vnode *vp = (struct vnode *) object->handle;
  452                         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  453                 }
  454                 VM_OBJECT_LOCK(object);
  455                 if (object->type == OBJT_VNODE) {
  456                         vm_object_vndeallocate(object);
  457                         VFS_UNLOCK_GIANT(vfslocked);
  458                         return;
  459                 }
  460 
  461                 KASSERT(object->ref_count != 0,
  462                         ("vm_object_deallocate: object deallocated too many times: %d", object->type));
  463 
  464                 /*
  465                  * If the reference count goes to 0 we start calling
  466                  * vm_object_terminate() on the object chain.
  467                  * A ref count of 1 may be a special case depending on the
  468                  * shadow count being 0 or 1.
  469                  */
  470                 object->ref_count--;
  471                 if (object->ref_count > 1) {
  472                         VM_OBJECT_UNLOCK(object);
  473                         return;
  474                 } else if (object->ref_count == 1) {
  475                         if (object->shadow_count == 0) {
  476                                 vm_object_set_flag(object, OBJ_ONEMAPPING);
  477                         } else if ((object->shadow_count == 1) &&
  478                             (object->handle == NULL) &&
  479                             (object->type == OBJT_DEFAULT ||
  480                              object->type == OBJT_SWAP)) {
  481                                 vm_object_t robject;
  482 
  483                                 robject = LIST_FIRST(&object->shadow_head);
  484                                 KASSERT(robject != NULL,
  485                                     ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
  486                                          object->ref_count,
  487                                          object->shadow_count));
  488                                 if (!VM_OBJECT_TRYLOCK(robject)) {
  489                                         /*
  490                                          * Avoid a potential deadlock.
  491                                          */
  492                                         object->ref_count++;
  493                                         VM_OBJECT_UNLOCK(object);
  494                                         /*
  495                                          * More likely than not the thread
  496                                          * holding robject's lock has lower
  497                                          * priority than the current thread.
  498                                          * Let the lower priority thread run.
  499                                          */
  500                                         tsleep(&proc0, PVM, "vmo_de", 1);
  501                                         continue;
  502                                 }
  503                                 /*
  504                                  * Collapse object into its shadow unless its
  505                                  * shadow is dead.  In that case, object will
  506                                  * be deallocated by the thread that is
  507                                  * deallocating its shadow.
  508                                  */
  509                                 if ((robject->flags & OBJ_DEAD) == 0 &&
  510                                     (robject->handle == NULL) &&
  511                                     (robject->type == OBJT_DEFAULT ||
  512                                      robject->type == OBJT_SWAP)) {
  513 
  514                                         robject->ref_count++;
  515 retry:
  516                                         if (robject->paging_in_progress) {
  517                                                 VM_OBJECT_UNLOCK(object);
  518                                                 vm_object_pip_wait(robject,
  519                                                     "objde1");
  520                                                 VM_OBJECT_LOCK(object);
  521                                                 goto retry;
  522                                         } else if (object->paging_in_progress) {
  523                                                 VM_OBJECT_UNLOCK(robject);
  524                                                 object->flags |= OBJ_PIPWNT;
  525                                                 msleep(object,
  526                                                     VM_OBJECT_MTX(object),
  527                                                     PDROP | PVM, "objde2", 0);
  528                                                 VM_OBJECT_LOCK(robject);
  529                                                 VM_OBJECT_LOCK(object);
  530                                                 goto retry;
  531                                         }
  532                                         VM_OBJECT_UNLOCK(object);
  533                                         if (robject->ref_count == 1) {
  534                                                 robject->ref_count--;
  535                                                 object = robject;
  536                                                 goto doterm;
  537                                         }
  538                                         object = robject;
  539                                         vm_object_collapse(object);
  540                                         VM_OBJECT_UNLOCK(object);
  541                                         continue;
  542                                 }
  543                                 VM_OBJECT_UNLOCK(robject);
  544                         }
  545                         VM_OBJECT_UNLOCK(object);
  546                         return;
  547                 }
  548 doterm:
  549                 temp = object->backing_object;
  550                 if (temp != NULL) {
  551                         VM_OBJECT_LOCK(temp);
  552                         LIST_REMOVE(object, shadow_list);
  553                         temp->shadow_count--;
  554                         temp->generation++;
  555                         VM_OBJECT_UNLOCK(temp);
  556                         object->backing_object = NULL;
  557                 }
  558                 /*
  559                  * Don't double-terminate, we could be in a termination
  560                  * recursion due to the terminate having to sync data
  561                  * to disk.
  562                  */
  563                 if ((object->flags & OBJ_DEAD) == 0)
  564                         vm_object_terminate(object);
  565                 else
  566                         VM_OBJECT_UNLOCK(object);
  567                 object = temp;
  568         }
  569 }
  570 
  571 /*
  572  *      vm_object_terminate actually destroys the specified object, freeing
  573  *      up all previously used resources.
  574  *
  575  *      The object must be locked.
  576  *      This routine may block.
  577  */
  578 void
  579 vm_object_terminate(vm_object_t object)
  580 {
  581         vm_page_t p;
  582 
  583         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  584 
  585         /*
  586          * Make sure no one uses us.
  587          */
  588         vm_object_set_flag(object, OBJ_DEAD);
  589 
  590         /*
  591          * wait for the pageout daemon to be done with the object
  592          */
  593         vm_object_pip_wait(object, "objtrm");
  594 
  595         KASSERT(!object->paging_in_progress,
  596                 ("vm_object_terminate: pageout in progress"));
  597 
  598         /*
  599          * Clean and free the pages, as appropriate. All references to the
  600          * object are gone, so we don't need to lock it.
  601          */
  602         if (object->type == OBJT_VNODE) {
  603                 struct vnode *vp = (struct vnode *)object->handle;
  604 
  605                 /*
  606                  * Clean pages and flush buffers.
  607                  */
  608                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
  609                 VM_OBJECT_UNLOCK(object);
  610 
  611                 vinvalbuf(vp, V_SAVE, NULL, 0, 0);
  612 
  613                 VM_OBJECT_LOCK(object);
  614         }
  615 
  616         KASSERT(object->ref_count == 0, 
  617                 ("vm_object_terminate: object with references, ref_count=%d",
  618                 object->ref_count));
  619 
  620         /*
  621          * Now free any remaining pages. For internal objects, this also
  622          * removes them from paging queues. Don't free wired pages, just
  623          * remove them from the object. 
  624          */
  625         vm_page_lock_queues();
  626         while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
  627                 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0,
  628                         ("vm_object_terminate: freeing busy page %p "
  629                         "p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
  630                 if (p->wire_count == 0) {
  631                         vm_page_free(p);
  632                         cnt.v_pfree++;
  633                 } else {
  634                         vm_page_remove(p);
  635                 }
  636         }
  637         vm_page_unlock_queues();
  638 
  639         /*
  640          * Let the pager know object is dead.
  641          */
  642         vm_pager_deallocate(object);
  643         VM_OBJECT_UNLOCK(object);
  644 
  645         /*
  646          * Remove the object from the global object list.
  647          */
  648         mtx_lock(&vm_object_list_mtx);
  649         TAILQ_REMOVE(&vm_object_list, object, object_list);
  650         mtx_unlock(&vm_object_list_mtx);
  651 
  652         /*
  653          * Free the space for the object.
  654          */
  655         uma_zfree(obj_zone, object);
  656 }
  657 
  658 /*
  659  *      vm_object_page_clean
  660  *
  661  *      Clean all dirty pages in the specified range of object.  Leaves page 
  662  *      on whatever queue it is currently on.   If NOSYNC is set then do not
  663  *      write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
  664  *      leaving the object dirty.
  665  *
  666  *      When stuffing pages asynchronously, allow clustering.  XXX we need a
  667  *      synchronous clustering mode implementation.
  668  *
  669  *      Odd semantics: if start == end, we clean everything.
  670  *
  671  *      The object must be locked.
  672  */
  673 void
  674 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
  675 {
  676         vm_page_t p, np;
  677         vm_pindex_t tstart, tend;
  678         vm_pindex_t pi;
  679         int clearobjflags;
  680         int pagerflags;
  681         int curgeneration;
  682 
  683         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
  684         if (object->type != OBJT_VNODE ||
  685                 (object->flags & OBJ_MIGHTBEDIRTY) == 0)
  686                 return;
  687 
  688         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
  689         pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
  690 
  691         vm_object_set_flag(object, OBJ_CLEANING);
  692 
  693         tstart = start;
  694         if (end == 0) {
  695                 tend = object->size;
  696         } else {
  697                 tend = end;
  698         }
  699 
  700         vm_page_lock_queues();
  701         /*
  702          * If the caller is smart and only msync()s a range he knows is
  703          * dirty, we may be able to avoid an object scan.  This results in
  704          * a phenominal improvement in performance.  We cannot do this
  705          * as a matter of course because the object may be huge - e.g.
  706          * the size might be in the gigabytes or terrabytes.
  707          */
  708         if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) {
  709                 vm_pindex_t tscan;
  710                 int scanlimit;
  711                 int scanreset;
  712 
  713                 scanreset = object->resident_page_count / EASY_SCAN_FACTOR;
  714                 if (scanreset < 16)
  715                         scanreset = 16;
  716                 pagerflags |= VM_PAGER_IGNORE_CLEANCHK;
  717 
  718                 scanlimit = scanreset;
  719                 tscan = tstart;
  720                 while (tscan < tend) {
  721                         curgeneration = object->generation;
  722                         p = vm_page_lookup(object, tscan);
  723                         if (p == NULL || p->valid == 0 ||
  724                             (p->queue - p->pc) == PQ_CACHE) {
  725                                 if (--scanlimit == 0)
  726                                         break;
  727                                 ++tscan;
  728                                 continue;
  729                         }
  730                         vm_page_test_dirty(p);
  731                         if ((p->dirty & p->valid) == 0) {
  732                                 if (--scanlimit == 0)
  733                                         break;
  734                                 ++tscan;
  735                                 continue;
  736                         }
  737                         /*
  738                          * If we have been asked to skip nosync pages and 
  739                          * this is a nosync page, we can't continue.
  740                          */
  741                         if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
  742                                 if (--scanlimit == 0)
  743                                         break;
  744                                 ++tscan;
  745                                 continue;
  746                         }
  747                         scanlimit = scanreset;
  748 
  749                         /*
  750                          * This returns 0 if it was unable to busy the first
  751                          * page (i.e. had to sleep).
  752                          */
  753                         tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);
  754                 }
  755 
  756                 /*
  757                  * If everything was dirty and we flushed it successfully,
  758                  * and the requested range is not the entire object, we
  759                  * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can
  760                  * return immediately.
  761                  */
  762                 if (tscan >= tend && (tstart || tend < object->size)) {
  763                         vm_page_unlock_queues();
  764                         vm_object_clear_flag(object, OBJ_CLEANING);
  765                         return;
  766                 }
  767                 pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK;
  768         }
  769 
  770         /*
  771          * Generally set CLEANCHK interlock and make the page read-only so
  772          * we can then clear the object flags.
  773          *
  774          * However, if this is a nosync mmap then the object is likely to 
  775          * stay dirty so do not mess with the page and do not clear the
  776          * object flags.
  777          */
  778         clearobjflags = 1;
  779         TAILQ_FOREACH(p, &object->memq, listq) {
  780                 vm_page_flag_set(p, PG_CLEANCHK);
  781                 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
  782                         clearobjflags = 0;
  783                 else
  784                         pmap_page_protect(p, VM_PROT_READ);
  785         }
  786 
  787         if (clearobjflags && (tstart == 0) && (tend == object->size)) {
  788                 struct vnode *vp;
  789 
  790                 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
  791                 if (object->type == OBJT_VNODE &&
  792                     (vp = (struct vnode *)object->handle) != NULL) {
  793                         VI_LOCK(vp);
  794                         if (vp->v_iflag & VI_OBJDIRTY)
  795                                 vp->v_iflag &= ~VI_OBJDIRTY;
  796                         VI_UNLOCK(vp);
  797                 }
  798         }
  799 
  800 rescan:
  801         curgeneration = object->generation;
  802 
  803         for (p = TAILQ_FIRST(&object->memq); p; p = np) {
  804                 int n;
  805 
  806                 np = TAILQ_NEXT(p, listq);
  807 
  808 again:
  809                 pi = p->pindex;
  810                 if (((p->flags & PG_CLEANCHK) == 0) ||
  811                         (pi < tstart) || (pi >= tend) ||
  812                         (p->valid == 0) ||
  813                         ((p->queue - p->pc) == PQ_CACHE)) {
  814                         vm_page_flag_clear(p, PG_CLEANCHK);
  815                         continue;
  816                 }
  817 
  818                 vm_page_test_dirty(p);
  819                 if ((p->dirty & p->valid) == 0) {
  820                         vm_page_flag_clear(p, PG_CLEANCHK);
  821                         continue;
  822                 }
  823 
  824                 /*
  825                  * If we have been asked to skip nosync pages and this is a
  826                  * nosync page, skip it.  Note that the object flags were
  827                  * not cleared in this case so we do not have to set them.
  828                  */
  829                 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
  830                         vm_page_flag_clear(p, PG_CLEANCHK);
  831                         continue;
  832                 }
  833 
  834                 n = vm_object_page_collect_flush(object, p,
  835                         curgeneration, pagerflags);
  836                 if (n == 0)
  837                         goto rescan;
  838 
  839                 if (object->generation != curgeneration)
  840                         goto rescan;
  841 
  842                 /*
  843                  * Try to optimize the next page.  If we can't we pick up
  844                  * our (random) scan where we left off.
  845                  */
  846                 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) {
  847                         if ((p = vm_page_lookup(object, pi + n)) != NULL)
  848                                 goto again;
  849                 }
  850         }
  851         vm_page_unlock_queues();
  852 #if 0
  853         VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
  854 #endif
  855 
  856         vm_object_clear_flag(object, OBJ_CLEANING);
  857         return;
  858 }
  859 
  860 static int
  861 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)
  862 {
  863         int runlen;
  864         int maxf;
  865         int chkb;
  866         int maxb;
  867         int i;
  868         vm_pindex_t pi;
  869         vm_page_t maf[vm_pageout_page_count];
  870         vm_page_t mab[vm_pageout_page_count];
  871         vm_page_t ma[vm_pageout_page_count];
  872 
  873         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  874         pi = p->pindex;
  875         while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
  876                 vm_page_lock_queues();
  877                 if (object->generation != curgeneration) {
  878                         return(0);
  879                 }
  880         }
  881         maxf = 0;
  882         for(i = 1; i < vm_pageout_page_count; i++) {
  883                 vm_page_t tp;
  884 
  885                 if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
  886                         if ((tp->flags & PG_BUSY) ||
  887                                 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
  888                                  (tp->flags & PG_CLEANCHK) == 0) ||
  889                                 (tp->busy != 0))
  890                                 break;
  891                         if((tp->queue - tp->pc) == PQ_CACHE) {
  892                                 vm_page_flag_clear(tp, PG_CLEANCHK);
  893                                 break;
  894                         }
  895                         vm_page_test_dirty(tp);
  896                         if ((tp->dirty & tp->valid) == 0) {
  897                                 vm_page_flag_clear(tp, PG_CLEANCHK);
  898                                 break;
  899                         }
  900                         maf[ i - 1 ] = tp;
  901                         maxf++;
  902                         continue;
  903                 }
  904                 break;
  905         }
  906 
  907         maxb = 0;
  908         chkb = vm_pageout_page_count -  maxf;
  909         if (chkb) {
  910                 for(i = 1; i < chkb;i++) {
  911                         vm_page_t tp;
  912 
  913                         if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
  914                                 if ((tp->flags & PG_BUSY) ||
  915                                         ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
  916                                          (tp->flags & PG_CLEANCHK) == 0) ||
  917                                         (tp->busy != 0))
  918                                         break;
  919                                 if ((tp->queue - tp->pc) == PQ_CACHE) {
  920                                         vm_page_flag_clear(tp, PG_CLEANCHK);
  921                                         break;
  922                                 }
  923                                 vm_page_test_dirty(tp);
  924                                 if ((tp->dirty & tp->valid) == 0) {
  925                                         vm_page_flag_clear(tp, PG_CLEANCHK);
  926                                         break;
  927                                 }
  928                                 mab[ i - 1 ] = tp;
  929                                 maxb++;
  930                                 continue;
  931                         }
  932                         break;
  933                 }
  934         }
  935 
  936         for(i = 0; i < maxb; i++) {
  937                 int index = (maxb - i) - 1;
  938                 ma[index] = mab[i];
  939                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
  940         }
  941         vm_page_flag_clear(p, PG_CLEANCHK);
  942         ma[maxb] = p;
  943         for(i = 0; i < maxf; i++) {
  944                 int index = (maxb + i) + 1;
  945                 ma[index] = maf[i];
  946                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
  947         }
  948         runlen = maxb + maxf + 1;
  949 
  950         vm_pageout_flush(ma, runlen, pagerflags);
  951         for (i = 0; i < runlen; i++) {
  952                 if (ma[i]->valid & ma[i]->dirty) {
  953                         pmap_page_protect(ma[i], VM_PROT_READ);
  954                         vm_page_flag_set(ma[i], PG_CLEANCHK);
  955 
  956                         /*
  957                          * maxf will end up being the actual number of pages
  958                          * we wrote out contiguously, non-inclusive of the
  959                          * first page.  We do not count look-behind pages.
  960                          */
  961                         if (i >= maxb + 1 && (maxf > i - maxb - 1))
  962                                 maxf = i - maxb - 1;
  963                 }
  964         }
  965         return(maxf + 1);
  966 }
  967 
  968 /*
  969  * Note that there is absolutely no sense in writing out
  970  * anonymous objects, so we track down the vnode object
  971  * to write out.
  972  * We invalidate (remove) all pages from the address space
  973  * for semantic correctness.
  974  *
  975  * Note: certain anonymous maps, such as MAP_NOSYNC maps,
  976  * may start out with a NULL object.
  977  */
  978 void
  979 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
  980     boolean_t syncio, boolean_t invalidate)
  981 {
  982         vm_object_t backing_object;
  983         struct vnode *vp;
  984         int flags;
  985 
  986         if (object == NULL)
  987                 return;
  988         VM_OBJECT_LOCK(object);
  989         while ((backing_object = object->backing_object) != NULL) {
  990                 VM_OBJECT_LOCK(backing_object);
  991                 offset += object->backing_object_offset;
  992                 VM_OBJECT_UNLOCK(object);
  993                 object = backing_object;
  994                 if (object->size < OFF_TO_IDX(offset + size))
  995                         size = IDX_TO_OFF(object->size) - offset;
  996         }
  997         /*
  998          * Flush pages if writing is allowed, invalidate them
  999          * if invalidation requested.  Pages undergoing I/O
 1000          * will be ignored by vm_object_page_remove().
 1001          *
 1002          * We cannot lock the vnode and then wait for paging
 1003          * to complete without deadlocking against vm_fault.
 1004          * Instead we simply call vm_object_page_remove() and
 1005          * allow it to block internally on a page-by-page
 1006          * basis when it encounters pages undergoing async
 1007          * I/O.
 1008          */
 1009         if (object->type == OBJT_VNODE &&
 1010             (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
 1011                 int vfslocked;
 1012                 vp = object->handle;
 1013                 VM_OBJECT_UNLOCK(object);
 1014                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 1015                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
 1016                 flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
 1017                 flags |= invalidate ? OBJPC_INVAL : 0;
 1018                 VM_OBJECT_LOCK(object);
 1019                 vm_object_page_clean(object,
 1020                     OFF_TO_IDX(offset),
 1021                     OFF_TO_IDX(offset + size + PAGE_MASK),
 1022                     flags);
 1023                 VM_OBJECT_UNLOCK(object);
 1024                 VOP_UNLOCK(vp, 0, curthread);
 1025                 VFS_UNLOCK_GIANT(vfslocked);
 1026                 VM_OBJECT_LOCK(object);
 1027         }
 1028         if ((object->type == OBJT_VNODE ||
 1029              object->type == OBJT_DEVICE) && invalidate) {
 1030                 boolean_t purge;
 1031                 purge = old_msync || (object->type == OBJT_DEVICE);
 1032                 vm_object_page_remove(object,
 1033                     OFF_TO_IDX(offset),
 1034                     OFF_TO_IDX(offset + size + PAGE_MASK),
 1035                     purge ? FALSE : TRUE);
 1036         }
 1037         VM_OBJECT_UNLOCK(object);
 1038 }
 1039 
 1040 /*
 1041  *      vm_object_madvise:
 1042  *
 1043  *      Implements the madvise function at the object/page level.
 1044  *
 1045  *      MADV_WILLNEED   (any object)
 1046  *
 1047  *          Activate the specified pages if they are resident.
 1048  *
 1049  *      MADV_DONTNEED   (any object)
 1050  *
 1051  *          Deactivate the specified pages if they are resident.
 1052  *
 1053  *      MADV_FREE       (OBJT_DEFAULT/OBJT_SWAP objects,
 1054  *                       OBJ_ONEMAPPING only)
 1055  *
 1056  *          Deactivate and clean the specified pages if they are
 1057  *          resident.  This permits the process to reuse the pages
 1058  *          without faulting or the kernel to reclaim the pages
 1059  *          without I/O.
 1060  */
 1061 void
 1062 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
 1063 {
 1064         vm_pindex_t end, tpindex;
 1065         vm_object_t backing_object, tobject;
 1066         vm_page_t m;
 1067 
 1068         if (object == NULL)
 1069                 return;
 1070         VM_OBJECT_LOCK(object);
 1071         end = pindex + count;
 1072         /*
 1073          * Locate and adjust resident pages
 1074          */
 1075         for (; pindex < end; pindex += 1) {
 1076 relookup:
 1077                 tobject = object;
 1078                 tpindex = pindex;
 1079 shadowlookup:
 1080                 /*
 1081                  * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
 1082                  * and those pages must be OBJ_ONEMAPPING.
 1083                  */
 1084                 if (advise == MADV_FREE) {
 1085                         if ((tobject->type != OBJT_DEFAULT &&
 1086                              tobject->type != OBJT_SWAP) ||
 1087                             (tobject->flags & OBJ_ONEMAPPING) == 0) {
 1088                                 goto unlock_tobject;
 1089                         }
 1090                 }
 1091                 m = vm_page_lookup(tobject, tpindex);
 1092                 if (m == NULL) {
 1093                         /*
 1094                          * There may be swap even if there is no backing page
 1095                          */
 1096                         if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
 1097                                 swap_pager_freespace(tobject, tpindex, 1);
 1098                         /*
 1099                          * next object
 1100                          */
 1101                         backing_object = tobject->backing_object;
 1102                         if (backing_object == NULL)
 1103                                 goto unlock_tobject;
 1104                         VM_OBJECT_LOCK(backing_object);
 1105                         tpindex += OFF_TO_IDX(tobject->backing_object_offset);
 1106                         if (tobject != object)
 1107                                 VM_OBJECT_UNLOCK(tobject);
 1108                         tobject = backing_object;
 1109                         goto shadowlookup;
 1110                 }
 1111                 /*
 1112                  * If the page is busy or not in a normal active state,
 1113                  * we skip it.  If the page is not managed there are no
 1114                  * page queues to mess with.  Things can break if we mess
 1115                  * with pages in any of the below states.
 1116                  */
 1117                 vm_page_lock_queues();
 1118                 if (m->hold_count ||
 1119                     m->wire_count ||
 1120                     (m->flags & PG_UNMANAGED) ||
 1121                     m->valid != VM_PAGE_BITS_ALL) {
 1122                         vm_page_unlock_queues();
 1123                         goto unlock_tobject;
 1124                 }
 1125                 if ((m->flags & PG_BUSY) || m->busy) {
 1126                         vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
 1127                         if (object != tobject)
 1128                                 VM_OBJECT_UNLOCK(object);
 1129                         VM_OBJECT_UNLOCK(tobject);
 1130                         msleep(m, &vm_page_queue_mtx, PDROP | PVM, "madvpo", 0);
 1131                         VM_OBJECT_LOCK(object);
 1132                         goto relookup;
 1133                 }
 1134                 if (advise == MADV_WILLNEED) {
 1135                         vm_page_activate(m);
 1136                 } else if (advise == MADV_DONTNEED) {
 1137                         vm_page_dontneed(m);
 1138                 } else if (advise == MADV_FREE) {
 1139                         /*
 1140                          * Mark the page clean.  This will allow the page
 1141                          * to be freed up by the system.  However, such pages
 1142                          * are often reused quickly by malloc()/free()
 1143                          * so we do not do anything that would cause
 1144                          * a page fault if we can help it.
 1145                          *
 1146                          * Specifically, we do not try to actually free
 1147                          * the page now nor do we try to put it in the
 1148                          * cache (which would cause a page fault on reuse).
 1149                          *
 1150                          * But we do make the page is freeable as we
 1151                          * can without actually taking the step of unmapping
 1152                          * it.
 1153                          */
 1154                         pmap_clear_modify(m);
 1155                         m->dirty = 0;
 1156                         m->act_count = 0;
 1157                         vm_page_dontneed(m);
 1158                 }
 1159                 vm_page_unlock_queues();
 1160                 if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
 1161                         swap_pager_freespace(tobject, tpindex, 1);
 1162 unlock_tobject:
 1163                 if (tobject != object)
 1164                         VM_OBJECT_UNLOCK(tobject);
 1165         }       
 1166         VM_OBJECT_UNLOCK(object);
 1167 }
 1168 
 1169 /*
 1170  *      vm_object_shadow:
 1171  *
 1172  *      Create a new object which is backed by the
 1173  *      specified existing object range.  The source
 1174  *      object reference is deallocated.
 1175  *
 1176  *      The new object and offset into that object
 1177  *      are returned in the source parameters.
 1178  */
 1179 void
 1180 vm_object_shadow(
 1181         vm_object_t *object,    /* IN/OUT */
 1182         vm_ooffset_t *offset,   /* IN/OUT */
 1183         vm_size_t length)
 1184 {
 1185         vm_object_t source;
 1186         vm_object_t result;
 1187 
 1188         source = *object;
 1189 
 1190         /*
 1191          * Don't create the new object if the old object isn't shared.
 1192          */
 1193         if (source != NULL) {
 1194                 VM_OBJECT_LOCK(source);
 1195                 if (source->ref_count == 1 &&
 1196                     source->handle == NULL &&
 1197                     (source->type == OBJT_DEFAULT ||
 1198                      source->type == OBJT_SWAP)) {
 1199                         VM_OBJECT_UNLOCK(source);
 1200                         return;
 1201                 }
 1202                 VM_OBJECT_UNLOCK(source);
 1203         }
 1204 
 1205         /*
 1206          * Allocate a new object with the given length.
 1207          */
 1208         result = vm_object_allocate(OBJT_DEFAULT, length);
 1209 
 1210         /*
 1211          * The new object shadows the source object, adding a reference to it.
 1212          * Our caller changes his reference to point to the new object,
 1213          * removing a reference to the source object.  Net result: no change
 1214          * of reference count.
 1215          *
 1216          * Try to optimize the result object's page color when shadowing
 1217          * in order to maintain page coloring consistency in the combined 
 1218          * shadowed object.
 1219          */
 1220         result->backing_object = source;
 1221         /*
 1222          * Store the offset into the source object, and fix up the offset into
 1223          * the new object.
 1224          */
 1225         result->backing_object_offset = *offset;
 1226         if (source != NULL) {
 1227                 VM_OBJECT_LOCK(source);
 1228                 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
 1229                 source->shadow_count++;
 1230                 source->generation++;
 1231                 if (length < source->size)
 1232                         length = source->size;
 1233                 if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 ||
 1234                     source->generation > 1)
 1235                         length = PQ_L2_SIZE / 3 + PQ_PRIME1;
 1236                 result->pg_color = (source->pg_color +
 1237                     length * source->generation) & PQ_L2_MASK;
 1238                 result->flags |= source->flags & OBJ_NEEDGIANT;
 1239                 VM_OBJECT_UNLOCK(source);
 1240                 next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) &
 1241                     PQ_L2_MASK;
 1242         }
 1243 
 1244 
 1245         /*
 1246          * Return the new things
 1247          */
 1248         *offset = 0;
 1249         *object = result;
 1250 }
 1251 
 1252 /*
 1253  *      vm_object_split:
 1254  *
 1255  * Split the pages in a map entry into a new object.  This affords
 1256  * easier removal of unused pages, and keeps object inheritance from
 1257  * being a negative impact on memory usage.
 1258  */
 1259 void
 1260 vm_object_split(vm_map_entry_t entry)
 1261 {
 1262         vm_page_t m;
 1263         vm_object_t orig_object, new_object, source;
 1264         vm_pindex_t offidxstart, offidxend;
 1265         vm_size_t idx, size;
 1266 
 1267         orig_object = entry->object.vm_object;
 1268         if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
 1269                 return;
 1270         if (orig_object->ref_count <= 1)
 1271                 return;
 1272         VM_OBJECT_UNLOCK(orig_object);
 1273 
 1274         offidxstart = OFF_TO_IDX(entry->offset);
 1275         offidxend = offidxstart + OFF_TO_IDX(entry->end - entry->start);
 1276         size = offidxend - offidxstart;
 1277 
 1278         /*
 1279          * If swap_pager_copy() is later called, it will convert new_object
 1280          * into a swap object.
 1281          */
 1282         new_object = vm_object_allocate(OBJT_DEFAULT, size);
 1283 
 1284         VM_OBJECT_LOCK(new_object);
 1285         VM_OBJECT_LOCK(orig_object);
 1286         source = orig_object->backing_object;
 1287         if (source != NULL) {
 1288                 VM_OBJECT_LOCK(source);
 1289                 LIST_INSERT_HEAD(&source->shadow_head,
 1290                                   new_object, shadow_list);
 1291                 source->shadow_count++;
 1292                 source->generation++;
 1293                 vm_object_reference_locked(source);     /* for new_object */
 1294                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
 1295                 VM_OBJECT_UNLOCK(source);
 1296                 new_object->backing_object_offset = 
 1297                         orig_object->backing_object_offset + entry->offset;
 1298                 new_object->backing_object = source;
 1299         }
 1300         new_object->flags |= orig_object->flags & OBJ_NEEDGIANT;
 1301         vm_page_lock_queues();
 1302         for (idx = 0; idx < size; idx++) {
 1303         retry:
 1304                 m = vm_page_lookup(orig_object, offidxstart + idx);
 1305                 if (m == NULL)
 1306                         continue;
 1307 
 1308                 /*
 1309                  * We must wait for pending I/O to complete before we can
 1310                  * rename the page.
 1311                  *
 1312                  * We do not have to VM_PROT_NONE the page as mappings should
 1313                  * not be changed by this operation.
 1314                  */
 1315                 if ((m->flags & PG_BUSY) || m->busy) {
 1316                         vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
 1317                         VM_OBJECT_UNLOCK(orig_object);
 1318                         VM_OBJECT_UNLOCK(new_object);
 1319                         msleep(m, &vm_page_queue_mtx, PDROP | PVM, "spltwt", 0);
 1320                         VM_OBJECT_LOCK(new_object);
 1321                         VM_OBJECT_LOCK(orig_object);
 1322                         vm_page_lock_queues();
 1323                         goto retry;
 1324                 }
 1325                 vm_page_rename(m, new_object, idx);
 1326                 /* page automatically made dirty by rename and cache handled */
 1327                 vm_page_busy(m);
 1328         }
 1329         vm_page_unlock_queues();
 1330         if (orig_object->type == OBJT_SWAP) {
 1331                 /*
 1332                  * swap_pager_copy() can sleep, in which case the orig_object's
 1333                  * and new_object's locks are released and reacquired. 
 1334                  */
 1335                 swap_pager_copy(orig_object, new_object, offidxstart, 0);
 1336         }
 1337         VM_OBJECT_UNLOCK(orig_object);
 1338         vm_page_lock_queues();
 1339         TAILQ_FOREACH(m, &new_object->memq, listq)
 1340                 vm_page_wakeup(m);
 1341         vm_page_unlock_queues();
 1342         VM_OBJECT_UNLOCK(new_object);
 1343         entry->object.vm_object = new_object;
 1344         entry->offset = 0LL;
 1345         vm_object_deallocate(orig_object);
 1346         VM_OBJECT_LOCK(new_object);
 1347 }
 1348 
 1349 #define OBSC_TEST_ALL_SHADOWED  0x0001
 1350 #define OBSC_COLLAPSE_NOWAIT    0x0002
 1351 #define OBSC_COLLAPSE_WAIT      0x0004
 1352 
 1353 static int
 1354 vm_object_backing_scan(vm_object_t object, int op)
 1355 {
 1356         int r = 1;
 1357         vm_page_t p;
 1358         vm_object_t backing_object;
 1359         vm_pindex_t backing_offset_index;
 1360 
 1361         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1362         VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
 1363 
 1364         backing_object = object->backing_object;
 1365         backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
 1366 
 1367         /*
 1368          * Initial conditions
 1369          */
 1370         if (op & OBSC_TEST_ALL_SHADOWED) {
 1371                 /*
 1372                  * We do not want to have to test for the existence of
 1373                  * swap pages in the backing object.  XXX but with the
 1374                  * new swapper this would be pretty easy to do.
 1375                  *
 1376                  * XXX what about anonymous MAP_SHARED memory that hasn't
 1377                  * been ZFOD faulted yet?  If we do not test for this, the
 1378                  * shadow test may succeed! XXX
 1379                  */
 1380                 if (backing_object->type != OBJT_DEFAULT) {
 1381                         return (0);
 1382                 }
 1383         }
 1384         if (op & OBSC_COLLAPSE_WAIT) {
 1385                 vm_object_set_flag(backing_object, OBJ_DEAD);
 1386         }
 1387 
 1388         /*
 1389          * Our scan
 1390          */
 1391         p = TAILQ_FIRST(&backing_object->memq);
 1392         while (p) {
 1393                 vm_page_t next = TAILQ_NEXT(p, listq);
 1394                 vm_pindex_t new_pindex = p->pindex - backing_offset_index;
 1395 
 1396                 if (op & OBSC_TEST_ALL_SHADOWED) {
 1397                         vm_page_t pp;
 1398 
 1399                         /*
 1400                          * Ignore pages outside the parent object's range
 1401                          * and outside the parent object's mapping of the 
 1402                          * backing object.
 1403                          *
 1404                          * note that we do not busy the backing object's
 1405                          * page.
 1406                          */
 1407                         if (
 1408                             p->pindex < backing_offset_index ||
 1409                             new_pindex >= object->size
 1410                         ) {
 1411                                 p = next;
 1412                                 continue;
 1413                         }
 1414 
 1415                         /*
 1416                          * See if the parent has the page or if the parent's
 1417                          * object pager has the page.  If the parent has the
 1418                          * page but the page is not valid, the parent's
 1419                          * object pager must have the page.
 1420                          *
 1421                          * If this fails, the parent does not completely shadow
 1422                          * the object and we might as well give up now.
 1423                          */
 1424 
 1425                         pp = vm_page_lookup(object, new_pindex);
 1426                         if (
 1427                             (pp == NULL || pp->valid == 0) &&
 1428                             !vm_pager_has_page(object, new_pindex, NULL, NULL)
 1429                         ) {
 1430                                 r = 0;
 1431                                 break;
 1432                         }
 1433                 }
 1434 
 1435                 /*
 1436                  * Check for busy page
 1437                  */
 1438                 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
 1439                         vm_page_t pp;
 1440 
 1441                         if (op & OBSC_COLLAPSE_NOWAIT) {
 1442                                 if ((p->flags & PG_BUSY) ||
 1443                                     !p->valid || 
 1444                                     p->busy) {
 1445                                         p = next;
 1446                                         continue;
 1447                                 }
 1448                         } else if (op & OBSC_COLLAPSE_WAIT) {
 1449                                 if ((p->flags & PG_BUSY) || p->busy) {
 1450                                         vm_page_lock_queues();
 1451                                         vm_page_flag_set(p,
 1452                                             PG_WANTED | PG_REFERENCED);
 1453                                         VM_OBJECT_UNLOCK(backing_object);
 1454                                         VM_OBJECT_UNLOCK(object);
 1455                                         msleep(p, &vm_page_queue_mtx,
 1456                                             PDROP | PVM, "vmocol", 0);
 1457                                         VM_OBJECT_LOCK(object);
 1458                                         VM_OBJECT_LOCK(backing_object);
 1459                                         /*
 1460                                          * If we slept, anything could have
 1461                                          * happened.  Since the object is
 1462                                          * marked dead, the backing offset
 1463                                          * should not have changed so we
 1464                                          * just restart our scan.
 1465                                          */
 1466                                         p = TAILQ_FIRST(&backing_object->memq);
 1467                                         continue;
 1468                                 }
 1469                         }
 1470 
 1471                         KASSERT(
 1472                             p->object == backing_object,
 1473                             ("vm_object_backing_scan: object mismatch")
 1474                         );
 1475 
 1476                         /*
 1477                          * Destroy any associated swap
 1478                          */
 1479                         if (backing_object->type == OBJT_SWAP) {
 1480                                 swap_pager_freespace(
 1481                                     backing_object, 
 1482                                     p->pindex,
 1483                                     1
 1484                                 );
 1485                         }
 1486 
 1487                         if (
 1488                             p->pindex < backing_offset_index ||
 1489                             new_pindex >= object->size
 1490                         ) {
 1491                                 /*
 1492                                  * Page is out of the parent object's range, we 
 1493                                  * can simply destroy it. 
 1494                                  */
 1495                                 vm_page_lock_queues();
 1496                                 KASSERT(!pmap_page_is_mapped(p),
 1497                                     ("freeing mapped page %p", p));
 1498                                 if (p->wire_count == 0)
 1499                                         vm_page_free(p);
 1500                                 else
 1501                                         vm_page_remove(p);
 1502                                 vm_page_unlock_queues();
 1503                                 p = next;
 1504                                 continue;
 1505                         }
 1506 
 1507                         pp = vm_page_lookup(object, new_pindex);
 1508                         if (
 1509                             pp != NULL ||
 1510                             vm_pager_has_page(object, new_pindex, NULL, NULL)
 1511                         ) {
 1512                                 /*
 1513                                  * page already exists in parent OR swap exists
 1514                                  * for this location in the parent.  Destroy 
 1515                                  * the original page from the backing object.
 1516                                  *
 1517                                  * Leave the parent's page alone
 1518                                  */
 1519                                 vm_page_lock_queues();
 1520                                 KASSERT(!pmap_page_is_mapped(p),
 1521                                     ("freeing mapped page %p", p));
 1522                                 if (p->wire_count == 0)
 1523                                         vm_page_free(p);
 1524                                 else
 1525                                         vm_page_remove(p);
 1526                                 vm_page_unlock_queues();
 1527                                 p = next;
 1528                                 continue;
 1529                         }
 1530 
 1531                         /*
 1532                          * Page does not exist in parent, rename the
 1533                          * page from the backing object to the main object. 
 1534                          *
 1535                          * If the page was mapped to a process, it can remain 
 1536                          * mapped through the rename.
 1537                          */
 1538                         vm_page_lock_queues();
 1539                         vm_page_rename(p, object, new_pindex);
 1540                         vm_page_unlock_queues();
 1541                         /* page automatically made dirty by rename */
 1542                 }
 1543                 p = next;
 1544         }
 1545         return (r);
 1546 }
 1547 
 1548 
 1549 /*
 1550  * this version of collapse allows the operation to occur earlier and
 1551  * when paging_in_progress is true for an object...  This is not a complete
 1552  * operation, but should plug 99.9% of the rest of the leaks.
 1553  */
 1554 static void
 1555 vm_object_qcollapse(vm_object_t object)
 1556 {
 1557         vm_object_t backing_object = object->backing_object;
 1558 
 1559         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1560         VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
 1561 
 1562         if (backing_object->ref_count != 1)
 1563                 return;
 1564 
 1565         vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
 1566 }
 1567 
 1568 /*
 1569  *      vm_object_collapse:
 1570  *
 1571  *      Collapse an object with the object backing it.
 1572  *      Pages in the backing object are moved into the
 1573  *      parent, and the backing object is deallocated.
 1574  */
 1575 void
 1576 vm_object_collapse(vm_object_t object)
 1577 {
 1578         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1579         
 1580         while (TRUE) {
 1581                 vm_object_t backing_object;
 1582 
 1583                 /*
 1584                  * Verify that the conditions are right for collapse:
 1585                  *
 1586                  * The object exists and the backing object exists.
 1587                  */
 1588                 if ((backing_object = object->backing_object) == NULL)
 1589                         break;
 1590 
 1591                 /*
 1592                  * we check the backing object first, because it is most likely
 1593                  * not collapsable.
 1594                  */
 1595                 VM_OBJECT_LOCK(backing_object);
 1596                 if (backing_object->handle != NULL ||
 1597                     (backing_object->type != OBJT_DEFAULT &&
 1598                      backing_object->type != OBJT_SWAP) ||
 1599                     (backing_object->flags & OBJ_DEAD) ||
 1600                     object->handle != NULL ||
 1601                     (object->type != OBJT_DEFAULT &&
 1602                      object->type != OBJT_SWAP) ||
 1603                     (object->flags & OBJ_DEAD)) {
 1604                         VM_OBJECT_UNLOCK(backing_object);
 1605                         break;
 1606                 }
 1607 
 1608                 if (
 1609                     object->paging_in_progress != 0 ||
 1610                     backing_object->paging_in_progress != 0
 1611                 ) {
 1612                         vm_object_qcollapse(object);
 1613                         VM_OBJECT_UNLOCK(backing_object);
 1614                         break;
 1615                 }
 1616                 /*
 1617                  * We know that we can either collapse the backing object (if
 1618                  * the parent is the only reference to it) or (perhaps) have
 1619                  * the parent bypass the object if the parent happens to shadow
 1620                  * all the resident pages in the entire backing object.
 1621                  *
 1622                  * This is ignoring pager-backed pages such as swap pages.
 1623                  * vm_object_backing_scan fails the shadowing test in this
 1624                  * case.
 1625                  */
 1626                 if (backing_object->ref_count == 1) {
 1627                         /*
 1628                          * If there is exactly one reference to the backing
 1629                          * object, we can collapse it into the parent.  
 1630                          */
 1631                         vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
 1632 
 1633                         /*
 1634                          * Move the pager from backing_object to object.
 1635                          */
 1636                         if (backing_object->type == OBJT_SWAP) {
 1637                                 /*
 1638                                  * swap_pager_copy() can sleep, in which case
 1639                                  * the backing_object's and object's locks are
 1640                                  * released and reacquired.
 1641                                  */
 1642                                 swap_pager_copy(
 1643                                     backing_object,
 1644                                     object,
 1645                                     OFF_TO_IDX(object->backing_object_offset), TRUE);
 1646                         }
 1647                         /*
 1648                          * Object now shadows whatever backing_object did.
 1649                          * Note that the reference to 
 1650                          * backing_object->backing_object moves from within 
 1651                          * backing_object to within object.
 1652                          */
 1653                         LIST_REMOVE(object, shadow_list);
 1654                         backing_object->shadow_count--;
 1655                         backing_object->generation++;
 1656                         if (backing_object->backing_object) {
 1657                                 VM_OBJECT_LOCK(backing_object->backing_object);
 1658                                 LIST_REMOVE(backing_object, shadow_list);
 1659                                 LIST_INSERT_HEAD(
 1660                                     &backing_object->backing_object->shadow_head,
 1661                                     object, shadow_list);
 1662                                 /*
 1663                                  * The shadow_count has not changed.
 1664                                  */
 1665                                 backing_object->backing_object->generation++;
 1666                                 VM_OBJECT_UNLOCK(backing_object->backing_object);
 1667                         }
 1668                         object->backing_object = backing_object->backing_object;
 1669                         object->backing_object_offset +=
 1670                             backing_object->backing_object_offset;
 1671 
 1672                         /*
 1673                          * Discard backing_object.
 1674                          *
 1675                          * Since the backing object has no pages, no pager left,
 1676                          * and no object references within it, all that is
 1677                          * necessary is to dispose of it.
 1678                          */
 1679                         KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
 1680                         VM_OBJECT_UNLOCK(backing_object);
 1681 
 1682                         mtx_lock(&vm_object_list_mtx);
 1683                         TAILQ_REMOVE(
 1684                             &vm_object_list, 
 1685                             backing_object,
 1686                             object_list
 1687                         );
 1688                         mtx_unlock(&vm_object_list_mtx);
 1689 
 1690                         uma_zfree(obj_zone, backing_object);
 1691 
 1692                         object_collapses++;
 1693                 } else {
 1694                         vm_object_t new_backing_object;
 1695 
 1696                         /*
 1697                          * If we do not entirely shadow the backing object,
 1698                          * there is nothing we can do so we give up.
 1699                          */
 1700                         if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
 1701                                 VM_OBJECT_UNLOCK(backing_object);
 1702                                 break;
 1703                         }
 1704 
 1705                         /*
 1706                          * Make the parent shadow the next object in the
 1707                          * chain.  Deallocating backing_object will not remove
 1708                          * it, since its reference count is at least 2.
 1709                          */
 1710                         LIST_REMOVE(object, shadow_list);
 1711                         backing_object->shadow_count--;
 1712                         backing_object->generation++;
 1713 
 1714                         new_backing_object = backing_object->backing_object;
 1715                         if ((object->backing_object = new_backing_object) != NULL) {
 1716                                 VM_OBJECT_LOCK(new_backing_object);
 1717                                 LIST_INSERT_HEAD(
 1718                                     &new_backing_object->shadow_head,
 1719                                     object,
 1720                                     shadow_list
 1721                                 );
 1722                                 new_backing_object->shadow_count++;
 1723                                 new_backing_object->generation++;
 1724                                 vm_object_reference_locked(new_backing_object);
 1725                                 VM_OBJECT_UNLOCK(new_backing_object);
 1726                                 object->backing_object_offset +=
 1727                                         backing_object->backing_object_offset;
 1728                         }
 1729 
 1730                         /*
 1731                          * Drop the reference count on backing_object. Since
 1732                          * its ref_count was at least 2, it will not vanish.
 1733                          */
 1734                         backing_object->ref_count--;
 1735                         VM_OBJECT_UNLOCK(backing_object);
 1736                         object_bypasses++;
 1737                 }
 1738 
 1739                 /*
 1740                  * Try again with this object's new backing object.
 1741                  */
 1742         }
 1743 }
 1744 
 1745 /*
 1746  *      vm_object_page_remove:
 1747  *
 1748  *      Removes all physical pages in the given range from the
 1749  *      object's list of pages.  If the range's end is zero, all
 1750  *      physical pages from the range's start to the end of the object
 1751  *      are deleted.
 1752  *
 1753  *      The object must be locked.
 1754  */
 1755 void
 1756 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
 1757     boolean_t clean_only)
 1758 {
 1759         vm_page_t p, next;
 1760 
 1761         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1762         if (object->resident_page_count == 0)
 1763                 return;
 1764 
 1765         /*
 1766          * Since physically-backed objects do not use managed pages, we can't
 1767          * remove pages from the object (we must instead remove the page
 1768          * references, and then destroy the object).
 1769          */
 1770         KASSERT(object->type != OBJT_PHYS,
 1771             ("attempt to remove pages from a physical object"));
 1772 
 1773         vm_object_pip_add(object, 1);
 1774 again:
 1775         vm_page_lock_queues();
 1776         if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
 1777                 if (p->pindex < start) {
 1778                         p = vm_page_splay(start, object->root);
 1779                         if ((object->root = p)->pindex < start)
 1780                                 p = TAILQ_NEXT(p, listq);
 1781                 }
 1782         }
 1783         /*
 1784          * Assert: the variable p is either (1) the page with the
 1785          * least pindex greater than or equal to the parameter pindex
 1786          * or (2) NULL.
 1787          */
 1788         for (;
 1789              p != NULL && (p->pindex < end || end == 0);
 1790              p = next) {
 1791                 next = TAILQ_NEXT(p, listq);
 1792 
 1793                 if (p->wire_count != 0) {
 1794                         pmap_remove_all(p);
 1795                         if (!clean_only)
 1796                                 p->valid = 0;
 1797                         continue;
 1798                 }
 1799                 if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
 1800                         goto again;
 1801                 if (clean_only && p->valid) {
 1802                         pmap_page_protect(p, VM_PROT_READ | VM_PROT_EXECUTE);
 1803                         if (p->valid & p->dirty)
 1804                                 continue;
 1805                 }
 1806                 pmap_remove_all(p);
 1807                 vm_page_free(p);
 1808         }
 1809         vm_page_unlock_queues();
 1810         vm_object_pip_wakeup(object);
 1811 }
 1812 
 1813 /*
 1814  *      Routine:        vm_object_coalesce
 1815  *      Function:       Coalesces two objects backing up adjoining
 1816  *                      regions of memory into a single object.
 1817  *
 1818  *      returns TRUE if objects were combined.
 1819  *
 1820  *      NOTE:   Only works at the moment if the second object is NULL -
 1821  *              if it's not, which object do we lock first?
 1822  *
 1823  *      Parameters:
 1824  *              prev_object     First object to coalesce
 1825  *              prev_offset     Offset into prev_object
 1826  *              prev_size       Size of reference to prev_object
 1827  *              next_size       Size of reference to the second object
 1828  *
 1829  *      Conditions:
 1830  *      The object must *not* be locked.
 1831  */
 1832 boolean_t
 1833 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
 1834         vm_size_t prev_size, vm_size_t next_size)
 1835 {
 1836         vm_pindex_t next_pindex;
 1837 
 1838         if (prev_object == NULL)
 1839                 return (TRUE);
 1840         VM_OBJECT_LOCK(prev_object);
 1841         if (prev_object->type != OBJT_DEFAULT &&
 1842             prev_object->type != OBJT_SWAP) {
 1843                 VM_OBJECT_UNLOCK(prev_object);
 1844                 return (FALSE);
 1845         }
 1846 
 1847         /*
 1848          * Try to collapse the object first
 1849          */
 1850         vm_object_collapse(prev_object);
 1851 
 1852         /*
 1853          * Can't coalesce if: . more than one reference . paged out . shadows
 1854          * another object . has a copy elsewhere (any of which mean that the
 1855          * pages not mapped to prev_entry may be in use anyway)
 1856          */
 1857         if (prev_object->backing_object != NULL) {
 1858                 VM_OBJECT_UNLOCK(prev_object);
 1859                 return (FALSE);
 1860         }
 1861 
 1862         prev_size >>= PAGE_SHIFT;
 1863         next_size >>= PAGE_SHIFT;
 1864         next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
 1865 
 1866         if ((prev_object->ref_count > 1) &&
 1867             (prev_object->size != next_pindex)) {
 1868                 VM_OBJECT_UNLOCK(prev_object);
 1869                 return (FALSE);
 1870         }
 1871 
 1872         /*
 1873          * Remove any pages that may still be in the object from a previous
 1874          * deallocation.
 1875          */
 1876         if (next_pindex < prev_object->size) {
 1877                 vm_object_page_remove(prev_object,
 1878                                       next_pindex,
 1879                                       next_pindex + next_size, FALSE);
 1880                 if (prev_object->type == OBJT_SWAP)
 1881                         swap_pager_freespace(prev_object,
 1882                                              next_pindex, next_size);
 1883         }
 1884 
 1885         /*
 1886          * Extend the object if necessary.
 1887          */
 1888         if (next_pindex + next_size > prev_object->size)
 1889                 prev_object->size = next_pindex + next_size;
 1890 
 1891         VM_OBJECT_UNLOCK(prev_object);
 1892         return (TRUE);
 1893 }
 1894 
 1895 void
 1896 vm_object_set_writeable_dirty(vm_object_t object)
 1897 {
 1898         struct vnode *vp;
 1899 
 1900         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1901         if ((object->flags & (OBJ_MIGHTBEDIRTY|OBJ_WRITEABLE)) ==
 1902             (OBJ_MIGHTBEDIRTY|OBJ_WRITEABLE))
 1903                 return;
 1904         vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
 1905         if (object->type == OBJT_VNODE &&
 1906             (vp = (struct vnode *)object->handle) != NULL) {
 1907                 VI_LOCK(vp);
 1908                 vp->v_iflag |= VI_OBJDIRTY;
 1909                 VI_UNLOCK(vp);
 1910         }
 1911 }
 1912 
 1913 #include "opt_ddb.h"
 1914 #ifdef DDB
 1915 #include <sys/kernel.h>
 1916 
 1917 #include <sys/cons.h>
 1918 
 1919 #include <ddb/ddb.h>
 1920 
 1921 static int
 1922 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
 1923 {
 1924         vm_map_t tmpm;
 1925         vm_map_entry_t tmpe;
 1926         vm_object_t obj;
 1927         int entcount;
 1928 
 1929         if (map == 0)
 1930                 return 0;
 1931 
 1932         if (entry == 0) {
 1933                 tmpe = map->header.next;
 1934                 entcount = map->nentries;
 1935                 while (entcount-- && (tmpe != &map->header)) {
 1936                         if (_vm_object_in_map(map, object, tmpe)) {
 1937                                 return 1;
 1938                         }
 1939                         tmpe = tmpe->next;
 1940                 }
 1941         } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
 1942                 tmpm = entry->object.sub_map;
 1943                 tmpe = tmpm->header.next;
 1944                 entcount = tmpm->nentries;
 1945                 while (entcount-- && tmpe != &tmpm->header) {
 1946                         if (_vm_object_in_map(tmpm, object, tmpe)) {
 1947                                 return 1;
 1948                         }
 1949                         tmpe = tmpe->next;
 1950                 }
 1951         } else if ((obj = entry->object.vm_object) != NULL) {
 1952                 for (; obj; obj = obj->backing_object)
 1953                         if (obj == object) {
 1954                                 return 1;
 1955                         }
 1956         }
 1957         return 0;
 1958 }
 1959 
 1960 static int
 1961 vm_object_in_map(vm_object_t object)
 1962 {
 1963         struct proc *p;
 1964 
 1965         /* sx_slock(&allproc_lock); */
 1966         LIST_FOREACH(p, &allproc, p_list) {
 1967                 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
 1968                         continue;
 1969                 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
 1970                         /* sx_sunlock(&allproc_lock); */
 1971                         return 1;
 1972                 }
 1973         }
 1974         /* sx_sunlock(&allproc_lock); */
 1975         if (_vm_object_in_map(kernel_map, object, 0))
 1976                 return 1;
 1977         if (_vm_object_in_map(kmem_map, object, 0))
 1978                 return 1;
 1979         if (_vm_object_in_map(pager_map, object, 0))
 1980                 return 1;
 1981         if (_vm_object_in_map(buffer_map, object, 0))
 1982                 return 1;
 1983         return 0;
 1984 }
 1985 
 1986 DB_SHOW_COMMAND(vmochk, vm_object_check)
 1987 {
 1988         vm_object_t object;
 1989 
 1990         /*
 1991          * make sure that internal objs are in a map somewhere
 1992          * and none have zero ref counts.
 1993          */
 1994         TAILQ_FOREACH(object, &vm_object_list, object_list) {
 1995                 if (object->handle == NULL &&
 1996                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
 1997                         if (object->ref_count == 0) {
 1998                                 db_printf("vmochk: internal obj has zero ref count: %ld\n",
 1999                                         (long)object->size);
 2000                         }
 2001                         if (!vm_object_in_map(object)) {
 2002                                 db_printf(
 2003                         "vmochk: internal obj is not in a map: "
 2004                         "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
 2005                                     object->ref_count, (u_long)object->size, 
 2006                                     (u_long)object->size,
 2007                                     (void *)object->backing_object);
 2008                         }
 2009                 }
 2010         }
 2011 }
 2012 
 2013 /*
 2014  *      vm_object_print:        [ debug ]
 2015  */
 2016 DB_SHOW_COMMAND(object, vm_object_print_static)
 2017 {
 2018         /* XXX convert args. */
 2019         vm_object_t object = (vm_object_t)addr;
 2020         boolean_t full = have_addr;
 2021 
 2022         vm_page_t p;
 2023 
 2024         /* XXX count is an (unused) arg.  Avoid shadowing it. */
 2025 #define count   was_count
 2026 
 2027         int count;
 2028 
 2029         if (object == NULL)
 2030                 return;
 2031 
 2032         db_iprintf(
 2033             "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n",
 2034             object, (int)object->type, (uintmax_t)object->size,
 2035             object->resident_page_count, object->ref_count, object->flags);
 2036         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
 2037             object->shadow_count, 
 2038             object->backing_object ? object->backing_object->ref_count : 0,
 2039             object->backing_object, (uintmax_t)object->backing_object_offset);
 2040 
 2041         if (!full)
 2042                 return;
 2043 
 2044         db_indent += 2;
 2045         count = 0;
 2046         TAILQ_FOREACH(p, &object->memq, listq) {
 2047                 if (count == 0)
 2048                         db_iprintf("memory:=");
 2049                 else if (count == 6) {
 2050                         db_printf("\n");
 2051                         db_iprintf(" ...");
 2052                         count = 0;
 2053                 } else
 2054                         db_printf(",");
 2055                 count++;
 2056 
 2057                 db_printf("(off=0x%jx,page=0x%jx)",
 2058                     (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
 2059         }
 2060         if (count != 0)
 2061                 db_printf("\n");
 2062         db_indent -= 2;
 2063 }
 2064 
 2065 /* XXX. */
 2066 #undef count
 2067 
 2068 /* XXX need this non-static entry for calling from vm_map_print. */
 2069 void
 2070 vm_object_print(
 2071         /* db_expr_t */ long addr,
 2072         boolean_t have_addr,
 2073         /* db_expr_t */ long count,
 2074         char *modif)
 2075 {
 2076         vm_object_print_static(addr, have_addr, count, modif);
 2077 }
 2078 
 2079 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
 2080 {
 2081         vm_object_t object;
 2082         int nl = 0;
 2083         int c;
 2084 
 2085         TAILQ_FOREACH(object, &vm_object_list, object_list) {
 2086                 vm_pindex_t idx, fidx;
 2087                 vm_pindex_t osize;
 2088                 vm_paddr_t pa = -1, padiff;
 2089                 int rcount;
 2090                 vm_page_t m;
 2091 
 2092                 db_printf("new object: %p\n", (void *)object);
 2093                 if (nl > 18) {
 2094                         c = cngetc();
 2095                         if (c != ' ')
 2096                                 return;
 2097                         nl = 0;
 2098                 }
 2099                 nl++;
 2100                 rcount = 0;
 2101                 fidx = 0;
 2102                 osize = object->size;
 2103                 if (osize > 128)
 2104                         osize = 128;
 2105                 for (idx = 0; idx < osize; idx++) {
 2106                         m = vm_page_lookup(object, idx);
 2107                         if (m == NULL) {
 2108                                 if (rcount) {
 2109                                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
 2110                                                 (long)fidx, rcount, (long)pa);
 2111                                         if (nl > 18) {
 2112                                                 c = cngetc();
 2113                                                 if (c != ' ')
 2114                                                         return;
 2115                                                 nl = 0;
 2116                                         }
 2117                                         nl++;
 2118                                         rcount = 0;
 2119                                 }
 2120                                 continue;
 2121                         }
 2122 
 2123                                 
 2124                         if (rcount &&
 2125                                 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
 2126                                 ++rcount;
 2127                                 continue;
 2128                         }
 2129                         if (rcount) {
 2130                                 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
 2131                                 padiff >>= PAGE_SHIFT;
 2132                                 padiff &= PQ_L2_MASK;
 2133                                 if (padiff == 0) {
 2134                                         pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
 2135                                         ++rcount;
 2136                                         continue;
 2137                                 }
 2138                                 db_printf(" index(%ld)run(%d)pa(0x%lx)",
 2139                                         (long)fidx, rcount, (long)pa);
 2140                                 db_printf("pd(%ld)\n", (long)padiff);
 2141                                 if (nl > 18) {
 2142                                         c = cngetc();
 2143                                         if (c != ' ')
 2144                                                 return;
 2145                                         nl = 0;
 2146                                 }
 2147                                 nl++;
 2148                         }
 2149                         fidx = idx;
 2150                         pa = VM_PAGE_TO_PHYS(m);
 2151                         rcount = 1;
 2152                 }
 2153                 if (rcount) {
 2154                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
 2155                                 (long)fidx, rcount, (long)pa);
 2156                         if (nl > 18) {
 2157                                 c = cngetc();
 2158                                 if (c != ' ')
 2159                                         return;
 2160                                 nl = 0;
 2161                         }
 2162                         nl++;
 2163                 }
 2164         }
 2165 }
 2166 #endif /* DDB */

Cache object: 5cc00eab7f2901bad4827fa6c5abde81


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.