The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_fault.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  *
    9  *
   10  * This code is derived from software contributed to Berkeley by
   11  * The Mach Operating System project at Carnegie-Mellon University.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by the University of
   24  *      California, Berkeley and its contributors.
   25  * 4. Neither the name of the University nor the names of its contributors
   26  *    may be used to endorse or promote products derived from this software
   27  *    without specific prior written permission.
   28  *
   29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   39  * SUCH DAMAGE.
   40  *
   41  *      from: @(#)vm_fault.c    8.4 (Berkeley) 1/12/94
   42  *
   43  *
   44  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   45  * All rights reserved.
   46  *
   47  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   48  *
   49  * Permission to use, copy, modify and distribute this software and
   50  * its documentation is hereby granted, provided that both the copyright
   51  * notice and this permission notice appear in all copies of the
   52  * software, derivative works or modified versions, and any portions
   53  * thereof, and that both notices appear in supporting documentation.
   54  *
   55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   58  *
   59  * Carnegie Mellon requests users of this software to return to
   60  *
   61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   62  *  School of Computer Science
   63  *  Carnegie Mellon University
   64  *  Pittsburgh PA 15213-3890
   65  *
   66  * any improvements or extensions that they make and grant Carnegie the
   67  * rights to redistribute these changes.
   68  */
   69 
   70 /*
   71  *      Page fault handling module.
   72  */
   73 
   74 #include <sys/cdefs.h>
   75 __FBSDID("$FreeBSD: releng/8.0/sys/vm/vm_fault.c 195840 2009-07-24 13:50:29Z jhb $");
   76 
   77 #include "opt_vm.h"
   78 
   79 #include <sys/param.h>
   80 #include <sys/systm.h>
   81 #include <sys/kernel.h>
   82 #include <sys/lock.h>
   83 #include <sys/mutex.h>
   84 #include <sys/proc.h>
   85 #include <sys/resourcevar.h>
   86 #include <sys/sysctl.h>
   87 #include <sys/vmmeter.h>
   88 #include <sys/vnode.h>
   89 
   90 #include <vm/vm.h>
   91 #include <vm/vm_param.h>
   92 #include <vm/pmap.h>
   93 #include <vm/vm_map.h>
   94 #include <vm/vm_object.h>
   95 #include <vm/vm_page.h>
   96 #include <vm/vm_pageout.h>
   97 #include <vm/vm_kern.h>
   98 #include <vm/vm_pager.h>
   99 #include <vm/vnode_pager.h>
  100 #include <vm/vm_extern.h>
  101 
  102 #include <sys/mount.h>  /* XXX Temporary for VFS_LOCK_GIANT() */
  103 
  104 #define PFBAK 4
  105 #define PFFOR 4
  106 #define PAGEORDER_SIZE (PFBAK+PFFOR)
  107 
  108 static int prefault_pageorder[] = {
  109         -1 * PAGE_SIZE, 1 * PAGE_SIZE,
  110         -2 * PAGE_SIZE, 2 * PAGE_SIZE,
  111         -3 * PAGE_SIZE, 3 * PAGE_SIZE,
  112         -4 * PAGE_SIZE, 4 * PAGE_SIZE
  113 };
  114 
  115 static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *);
  116 static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
  117 
  118 #define VM_FAULT_READ_AHEAD 8
  119 #define VM_FAULT_READ_BEHIND 7
  120 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1)
  121 
  122 struct faultstate {
  123         vm_page_t m;
  124         vm_object_t object;
  125         vm_pindex_t pindex;
  126         vm_page_t first_m;
  127         vm_object_t     first_object;
  128         vm_pindex_t first_pindex;
  129         vm_map_t map;
  130         vm_map_entry_t entry;
  131         int lookup_still_valid;
  132         struct vnode *vp;
  133         int vfslocked;
  134 };
  135 
  136 static inline void
  137 release_page(struct faultstate *fs)
  138 {
  139 
  140         vm_page_wakeup(fs->m);
  141         vm_page_lock_queues();
  142         vm_page_deactivate(fs->m);
  143         vm_page_unlock_queues();
  144         fs->m = NULL;
  145 }
  146 
  147 static inline void
  148 unlock_map(struct faultstate *fs)
  149 {
  150 
  151         if (fs->lookup_still_valid) {
  152                 vm_map_lookup_done(fs->map, fs->entry);
  153                 fs->lookup_still_valid = FALSE;
  154         }
  155 }
  156 
  157 static void
  158 unlock_and_deallocate(struct faultstate *fs)
  159 {
  160 
  161         vm_object_pip_wakeup(fs->object);
  162         VM_OBJECT_UNLOCK(fs->object);
  163         if (fs->object != fs->first_object) {
  164                 VM_OBJECT_LOCK(fs->first_object);
  165                 vm_page_lock_queues();
  166                 vm_page_free(fs->first_m);
  167                 vm_page_unlock_queues();
  168                 vm_object_pip_wakeup(fs->first_object);
  169                 VM_OBJECT_UNLOCK(fs->first_object);
  170                 fs->first_m = NULL;
  171         }
  172         vm_object_deallocate(fs->first_object);
  173         unlock_map(fs); 
  174         if (fs->vp != NULL) { 
  175                 vput(fs->vp);
  176                 fs->vp = NULL;
  177         }
  178         VFS_UNLOCK_GIANT(fs->vfslocked);
  179         fs->vfslocked = 0;
  180 }
  181 
  182 /*
  183  * TRYPAGER - used by vm_fault to calculate whether the pager for the
  184  *            current object *might* contain the page.
  185  *
  186  *            default objects are zero-fill, there is no real pager.
  187  */
  188 #define TRYPAGER        (fs.object->type != OBJT_DEFAULT && \
  189                         (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired))
  190 
  191 /*
  192  *      vm_fault:
  193  *
  194  *      Handle a page fault occurring at the given address,
  195  *      requiring the given permissions, in the map specified.
  196  *      If successful, the page is inserted into the
  197  *      associated physical map.
  198  *
  199  *      NOTE: the given address should be truncated to the
  200  *      proper page address.
  201  *
  202  *      KERN_SUCCESS is returned if the page fault is handled; otherwise,
  203  *      a standard error specifying why the fault is fatal is returned.
  204  *
  205  *
  206  *      The map in question must be referenced, and remains so.
  207  *      Caller may hold no locks.
  208  */
  209 int
  210 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
  211          int fault_flags)
  212 {
  213         vm_prot_t prot;
  214         int is_first_object_locked, result;
  215         boolean_t are_queues_locked, growstack, wired;
  216         int map_generation;
  217         vm_object_t next_object;
  218         vm_page_t marray[VM_FAULT_READ];
  219         int hardfault;
  220         int faultcount, ahead, behind;
  221         struct faultstate fs;
  222         struct vnode *vp;
  223         int locked, error;
  224 
  225         hardfault = 0;
  226         growstack = TRUE;
  227         PCPU_INC(cnt.v_vm_faults);
  228         fs.vp = NULL;
  229         fs.vfslocked = 0;
  230         faultcount = behind = 0;
  231 
  232 RetryFault:;
  233 
  234         /*
  235          * Find the backing store object and offset into it to begin the
  236          * search.
  237          */
  238         fs.map = map;
  239         result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry,
  240             &fs.first_object, &fs.first_pindex, &prot, &wired);
  241         if (result != KERN_SUCCESS) {
  242                 if (result != KERN_PROTECTION_FAILURE ||
  243                     (fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) {
  244                         if (growstack && result == KERN_INVALID_ADDRESS &&
  245                             map != kernel_map && curproc != NULL) {
  246                                 result = vm_map_growstack(curproc, vaddr);
  247                                 if (result != KERN_SUCCESS)
  248                                         return (KERN_FAILURE);
  249                                 growstack = FALSE;
  250                                 goto RetryFault;
  251                         }
  252                         return (result);
  253                 }
  254 
  255                 /*
  256                  * If we are user-wiring a r/w segment, and it is COW, then
  257                  * we need to do the COW operation.  Note that we don't COW
  258                  * currently RO sections now, because it is NOT desirable
  259                  * to COW .text.  We simply keep .text from ever being COW'ed
  260                  * and take the heat that one cannot debug wired .text sections.
  261                  */
  262                 result = vm_map_lookup(&fs.map, vaddr,
  263                         VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE,
  264                         &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired);
  265                 if (result != KERN_SUCCESS)
  266                         return (result);
  267 
  268                 /*
  269                  * If we don't COW now, on a user wire, the user will never
  270                  * be able to write to the mapping.  If we don't make this
  271                  * restriction, the bookkeeping would be nearly impossible.
  272                  *
  273                  * XXX The following assignment modifies the map without
  274                  * holding a write lock on it.
  275                  */
  276                 if ((fs.entry->protection & VM_PROT_WRITE) == 0)
  277                         fs.entry->max_protection &= ~VM_PROT_WRITE;
  278         }
  279 
  280         map_generation = fs.map->timestamp;
  281 
  282         if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
  283                 panic("vm_fault: fault on nofault entry, addr: %lx",
  284                     (u_long)vaddr);
  285         }
  286 
  287         /*
  288          * Make a reference to this object to prevent its disposal while we
  289          * are messing with it.  Once we have the reference, the map is free
  290          * to be diddled.  Since objects reference their shadows (and copies),
  291          * they will stay around as well.
  292          *
  293          * Bump the paging-in-progress count to prevent size changes (e.g. 
  294          * truncation operations) during I/O.  This must be done after
  295          * obtaining the vnode lock in order to avoid possible deadlocks.
  296          */
  297         VM_OBJECT_LOCK(fs.first_object);
  298         vm_object_reference_locked(fs.first_object);
  299         vm_object_pip_add(fs.first_object, 1);
  300 
  301         fs.lookup_still_valid = TRUE;
  302 
  303         if (wired)
  304                 fault_type = prot;
  305 
  306         fs.first_m = NULL;
  307 
  308         /*
  309          * Search for the page at object/offset.
  310          */
  311         fs.object = fs.first_object;
  312         fs.pindex = fs.first_pindex;
  313         while (TRUE) {
  314                 /*
  315                  * If the object is dead, we stop here
  316                  */
  317                 if (fs.object->flags & OBJ_DEAD) {
  318                         unlock_and_deallocate(&fs);
  319                         return (KERN_PROTECTION_FAILURE);
  320                 }
  321 
  322                 /*
  323                  * See if page is resident
  324                  */
  325                 fs.m = vm_page_lookup(fs.object, fs.pindex);
  326                 if (fs.m != NULL) {
  327                         /* 
  328                          * check for page-based copy on write.
  329                          * We check fs.object == fs.first_object so
  330                          * as to ensure the legacy COW mechanism is
  331                          * used when the page in question is part of
  332                          * a shadow object.  Otherwise, vm_page_cowfault()
  333                          * removes the page from the backing object, 
  334                          * which is not what we want.
  335                          */
  336                         vm_page_lock_queues();
  337                         if ((fs.m->cow) && 
  338                             (fault_type & VM_PROT_WRITE) &&
  339                             (fs.object == fs.first_object)) {
  340                                 vm_page_cowfault(fs.m);
  341                                 vm_page_unlock_queues();
  342                                 unlock_and_deallocate(&fs);
  343                                 goto RetryFault;
  344                         }
  345 
  346                         /*
  347                          * Wait/Retry if the page is busy.  We have to do this
  348                          * if the page is busy via either VPO_BUSY or 
  349                          * vm_page_t->busy because the vm_pager may be using
  350                          * vm_page_t->busy for pageouts ( and even pageins if
  351                          * it is the vnode pager ), and we could end up trying
  352                          * to pagein and pageout the same page simultaneously.
  353                          *
  354                          * We can theoretically allow the busy case on a read
  355                          * fault if the page is marked valid, but since such
  356                          * pages are typically already pmap'd, putting that
  357                          * special case in might be more effort then it is 
  358                          * worth.  We cannot under any circumstances mess
  359                          * around with a vm_page_t->busy page except, perhaps,
  360                          * to pmap it.
  361                          */
  362                         if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) {
  363                                 vm_page_unlock_queues();
  364                                 VM_OBJECT_UNLOCK(fs.object);
  365                                 if (fs.object != fs.first_object) {
  366                                         VM_OBJECT_LOCK(fs.first_object);
  367                                         vm_page_lock_queues();
  368                                         vm_page_free(fs.first_m);
  369                                         vm_page_unlock_queues();
  370                                         vm_object_pip_wakeup(fs.first_object);
  371                                         VM_OBJECT_UNLOCK(fs.first_object);
  372                                         fs.first_m = NULL;
  373                                 }
  374                                 unlock_map(&fs);
  375                                 VM_OBJECT_LOCK(fs.object);
  376                                 if (fs.m == vm_page_lookup(fs.object,
  377                                     fs.pindex)) {
  378                                         vm_page_sleep_if_busy(fs.m, TRUE,
  379                                             "vmpfw");
  380                                 }
  381                                 vm_object_pip_wakeup(fs.object);
  382                                 VM_OBJECT_UNLOCK(fs.object);
  383                                 PCPU_INC(cnt.v_intrans);
  384                                 vm_object_deallocate(fs.first_object);
  385                                 goto RetryFault;
  386                         }
  387                         vm_pageq_remove(fs.m);
  388                         vm_page_unlock_queues();
  389 
  390                         /*
  391                          * Mark page busy for other processes, and the 
  392                          * pagedaemon.  If it still isn't completely valid
  393                          * (readable), jump to readrest, else break-out ( we
  394                          * found the page ).
  395                          */
  396                         vm_page_busy(fs.m);
  397                         if (fs.m->valid != VM_PAGE_BITS_ALL &&
  398                                 fs.m->object != kernel_object && fs.m->object != kmem_object) {
  399                                 goto readrest;
  400                         }
  401 
  402                         break;
  403                 }
  404 
  405                 /*
  406                  * Page is not resident, If this is the search termination
  407                  * or the pager might contain the page, allocate a new page.
  408                  */
  409                 if (TRYPAGER || fs.object == fs.first_object) {
  410                         if (fs.pindex >= fs.object->size) {
  411                                 unlock_and_deallocate(&fs);
  412                                 return (KERN_PROTECTION_FAILURE);
  413                         }
  414 
  415                         /*
  416                          * Allocate a new page for this object/offset pair.
  417                          */
  418                         fs.m = NULL;
  419                         if (!vm_page_count_severe()) {
  420 #if VM_NRESERVLEVEL > 0
  421                                 if ((fs.object->flags & OBJ_COLORED) == 0) {
  422                                         fs.object->flags |= OBJ_COLORED;
  423                                         fs.object->pg_color = atop(vaddr) -
  424                                             fs.pindex;
  425                                 }
  426 #endif
  427                                 fs.m = vm_page_alloc(fs.object, fs.pindex,
  428                                     (fs.object->type == OBJT_VNODE ||
  429                                      fs.object->backing_object != NULL) ?
  430                                     VM_ALLOC_NORMAL : VM_ALLOC_ZERO);
  431                         }
  432                         if (fs.m == NULL) {
  433                                 unlock_and_deallocate(&fs);
  434                                 VM_WAITPFAULT;
  435                                 goto RetryFault;
  436                         } else if (fs.m->valid == VM_PAGE_BITS_ALL)
  437                                 break;
  438                 }
  439 
  440 readrest:
  441                 /*
  442                  * We have found a valid page or we have allocated a new page.
  443                  * The page thus may not be valid or may not be entirely 
  444                  * valid.
  445                  *
  446                  * Attempt to fault-in the page if there is a chance that the
  447                  * pager has it, and potentially fault in additional pages
  448                  * at the same time.
  449                  */
  450                 if (TRYPAGER) {
  451                         int rv;
  452                         int reqpage = 0;
  453                         u_char behavior = vm_map_entry_behavior(fs.entry);
  454 
  455                         if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
  456                                 ahead = 0;
  457                                 behind = 0;
  458                         } else {
  459                                 behind = (vaddr - fs.entry->start) >> PAGE_SHIFT;
  460                                 if (behind > VM_FAULT_READ_BEHIND)
  461                                         behind = VM_FAULT_READ_BEHIND;
  462 
  463                                 ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1;
  464                                 if (ahead > VM_FAULT_READ_AHEAD)
  465                                         ahead = VM_FAULT_READ_AHEAD;
  466                         }
  467                         is_first_object_locked = FALSE;
  468                         if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL ||
  469                              (behavior != MAP_ENTRY_BEHAV_RANDOM &&
  470                               fs.pindex >= fs.entry->lastr &&
  471                               fs.pindex < fs.entry->lastr + VM_FAULT_READ)) &&
  472                             (fs.first_object == fs.object ||
  473                              (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) &&
  474                             fs.first_object->type != OBJT_DEVICE &&
  475                             fs.first_object->type != OBJT_PHYS &&
  476                             fs.first_object->type != OBJT_SG) {
  477                                 vm_pindex_t firstpindex, tmppindex;
  478 
  479                                 if (fs.first_pindex < 2 * VM_FAULT_READ)
  480                                         firstpindex = 0;
  481                                 else
  482                                         firstpindex = fs.first_pindex - 2 * VM_FAULT_READ;
  483 
  484                                 are_queues_locked = FALSE;
  485                                 /*
  486                                  * note: partially valid pages cannot be 
  487                                  * included in the lookahead - NFS piecemeal
  488                                  * writes will barf on it badly.
  489                                  */
  490                                 for (tmppindex = fs.first_pindex - 1;
  491                                         tmppindex >= firstpindex;
  492                                         --tmppindex) {
  493                                         vm_page_t mt;
  494 
  495                                         mt = vm_page_lookup(fs.first_object, tmppindex);
  496                                         if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL))
  497                                                 break;
  498                                         if (mt->busy ||
  499                                             (mt->oflags & VPO_BUSY))
  500                                                 continue;
  501                                         if (!are_queues_locked) {
  502                                                 are_queues_locked = TRUE;
  503                                                 vm_page_lock_queues();
  504                                         }
  505                                         if (mt->hold_count ||
  506                                                 mt->wire_count) 
  507                                                 continue;
  508                                         pmap_remove_all(mt);
  509                                         if (mt->dirty) {
  510                                                 vm_page_deactivate(mt);
  511                                         } else {
  512                                                 vm_page_cache(mt);
  513                                         }
  514                                 }
  515                                 if (are_queues_locked)
  516                                         vm_page_unlock_queues();
  517                                 ahead += behind;
  518                                 behind = 0;
  519                         }
  520                         if (is_first_object_locked)
  521                                 VM_OBJECT_UNLOCK(fs.first_object);
  522 
  523                         /*
  524                          * Call the pager to retrieve the data, if any, after
  525                          * releasing the lock on the map.  We hold a ref on
  526                          * fs.object and the pages are VPO_BUSY'd.
  527                          */
  528                         unlock_map(&fs);
  529 
  530 vnode_lock:
  531                         if (fs.object->type == OBJT_VNODE) {
  532                                 vp = fs.object->handle;
  533                                 if (vp == fs.vp)
  534                                         goto vnode_locked;
  535                                 else if (fs.vp != NULL) {
  536                                         vput(fs.vp);
  537                                         fs.vp = NULL;
  538                                 }
  539                                 locked = VOP_ISLOCKED(vp);
  540 
  541                                 if (VFS_NEEDSGIANT(vp->v_mount) && !fs.vfslocked) {
  542                                         fs.vfslocked = 1;
  543                                         if (!mtx_trylock(&Giant)) {
  544                                                 VM_OBJECT_UNLOCK(fs.object);
  545                                                 mtx_lock(&Giant);
  546                                                 VM_OBJECT_LOCK(fs.object);
  547                                                 goto vnode_lock;
  548                                         }
  549                                 }
  550                                 if (locked != LK_EXCLUSIVE)
  551                                         locked = LK_SHARED;
  552                                 /* Do not sleep for vnode lock while fs.m is busy */
  553                                 error = vget(vp, locked | LK_CANRECURSE |
  554                                     LK_NOWAIT, curthread);
  555                                 if (error != 0) {
  556                                         int vfslocked;
  557 
  558                                         vfslocked = fs.vfslocked;
  559                                         fs.vfslocked = 0; /* Keep Giant */
  560                                         vhold(vp);
  561                                         release_page(&fs);
  562                                         unlock_and_deallocate(&fs);
  563                                         error = vget(vp, locked | LK_RETRY |
  564                                             LK_CANRECURSE, curthread);
  565                                         vdrop(vp);
  566                                         fs.vp = vp;
  567                                         fs.vfslocked = vfslocked;
  568                                         KASSERT(error == 0,
  569                                             ("vm_fault: vget failed"));
  570                                         goto RetryFault;
  571                                 }
  572                                 fs.vp = vp;
  573                         }
  574 vnode_locked:
  575                         KASSERT(fs.vp == NULL || !fs.map->system_map,
  576                             ("vm_fault: vnode-backed object mapped by system map"));
  577 
  578                         /*
  579                          * now we find out if any other pages should be paged
  580                          * in at this time this routine checks to see if the
  581                          * pages surrounding this fault reside in the same
  582                          * object as the page for this fault.  If they do,
  583                          * then they are faulted in also into the object.  The
  584                          * array "marray" returned contains an array of
  585                          * vm_page_t structs where one of them is the
  586                          * vm_page_t passed to the routine.  The reqpage
  587                          * return value is the index into the marray for the
  588                          * vm_page_t passed to the routine.
  589                          *
  590                          * fs.m plus the additional pages are VPO_BUSY'd.
  591                          */
  592                         faultcount = vm_fault_additional_pages(
  593                             fs.m, behind, ahead, marray, &reqpage);
  594 
  595                         rv = faultcount ?
  596                             vm_pager_get_pages(fs.object, marray, faultcount,
  597                                 reqpage) : VM_PAGER_FAIL;
  598 
  599                         if (rv == VM_PAGER_OK) {
  600                                 /*
  601                                  * Found the page. Leave it busy while we play
  602                                  * with it.
  603                                  */
  604 
  605                                 /*
  606                                  * Relookup in case pager changed page. Pager
  607                                  * is responsible for disposition of old page
  608                                  * if moved.
  609                                  */
  610                                 fs.m = vm_page_lookup(fs.object, fs.pindex);
  611                                 if (!fs.m) {
  612                                         unlock_and_deallocate(&fs);
  613                                         goto RetryFault;
  614                                 }
  615 
  616                                 hardfault++;
  617                                 break; /* break to PAGE HAS BEEN FOUND */
  618                         }
  619                         /*
  620                          * Remove the bogus page (which does not exist at this
  621                          * object/offset); before doing so, we must get back
  622                          * our object lock to preserve our invariant.
  623                          *
  624                          * Also wake up any other process that may want to bring
  625                          * in this page.
  626                          *
  627                          * If this is the top-level object, we must leave the
  628                          * busy page to prevent another process from rushing
  629                          * past us, and inserting the page in that object at
  630                          * the same time that we are.
  631                          */
  632                         if (rv == VM_PAGER_ERROR)
  633                                 printf("vm_fault: pager read error, pid %d (%s)\n",
  634                                     curproc->p_pid, curproc->p_comm);
  635                         /*
  636                          * Data outside the range of the pager or an I/O error
  637                          */
  638                         /*
  639                          * XXX - the check for kernel_map is a kludge to work
  640                          * around having the machine panic on a kernel space
  641                          * fault w/ I/O error.
  642                          */
  643                         if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) ||
  644                                 (rv == VM_PAGER_BAD)) {
  645                                 vm_page_lock_queues();
  646                                 vm_page_free(fs.m);
  647                                 vm_page_unlock_queues();
  648                                 fs.m = NULL;
  649                                 unlock_and_deallocate(&fs);
  650                                 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE);
  651                         }
  652                         if (fs.object != fs.first_object) {
  653                                 vm_page_lock_queues();
  654                                 vm_page_free(fs.m);
  655                                 vm_page_unlock_queues();
  656                                 fs.m = NULL;
  657                                 /*
  658                                  * XXX - we cannot just fall out at this
  659                                  * point, m has been freed and is invalid!
  660                                  */
  661                         }
  662                 }
  663 
  664                 /*
  665                  * We get here if the object has default pager (or unwiring) 
  666                  * or the pager doesn't have the page.
  667                  */
  668                 if (fs.object == fs.first_object)
  669                         fs.first_m = fs.m;
  670 
  671                 /*
  672                  * Move on to the next object.  Lock the next object before
  673                  * unlocking the current one.
  674                  */
  675                 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset);
  676                 next_object = fs.object->backing_object;
  677                 if (next_object == NULL) {
  678                         /*
  679                          * If there's no object left, fill the page in the top
  680                          * object with zeros.
  681                          */
  682                         if (fs.object != fs.first_object) {
  683                                 vm_object_pip_wakeup(fs.object);
  684                                 VM_OBJECT_UNLOCK(fs.object);
  685 
  686                                 fs.object = fs.first_object;
  687                                 fs.pindex = fs.first_pindex;
  688                                 fs.m = fs.first_m;
  689                                 VM_OBJECT_LOCK(fs.object);
  690                         }
  691                         fs.first_m = NULL;
  692 
  693                         /*
  694                          * Zero the page if necessary and mark it valid.
  695                          */
  696                         if ((fs.m->flags & PG_ZERO) == 0) {
  697                                 pmap_zero_page(fs.m);
  698                         } else {
  699                                 PCPU_INC(cnt.v_ozfod);
  700                         }
  701                         PCPU_INC(cnt.v_zfod);
  702                         fs.m->valid = VM_PAGE_BITS_ALL;
  703                         break;  /* break to PAGE HAS BEEN FOUND */
  704                 } else {
  705                         KASSERT(fs.object != next_object,
  706                             ("object loop %p", next_object));
  707                         VM_OBJECT_LOCK(next_object);
  708                         vm_object_pip_add(next_object, 1);
  709                         if (fs.object != fs.first_object)
  710                                 vm_object_pip_wakeup(fs.object);
  711                         VM_OBJECT_UNLOCK(fs.object);
  712                         fs.object = next_object;
  713                 }
  714         }
  715 
  716         KASSERT((fs.m->oflags & VPO_BUSY) != 0,
  717             ("vm_fault: not busy after main loop"));
  718 
  719         /*
  720          * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
  721          * is held.]
  722          */
  723 
  724         /*
  725          * If the page is being written, but isn't already owned by the
  726          * top-level object, we have to copy it into a new page owned by the
  727          * top-level object.
  728          */
  729         if (fs.object != fs.first_object) {
  730                 /*
  731                  * We only really need to copy if we want to write it.
  732                  */
  733                 if (fault_type & VM_PROT_WRITE) {
  734                         /*
  735                          * This allows pages to be virtually copied from a 
  736                          * backing_object into the first_object, where the 
  737                          * backing object has no other refs to it, and cannot
  738                          * gain any more refs.  Instead of a bcopy, we just 
  739                          * move the page from the backing object to the 
  740                          * first object.  Note that we must mark the page 
  741                          * dirty in the first object so that it will go out 
  742                          * to swap when needed.
  743                          */
  744                         is_first_object_locked = FALSE;
  745                         if (
  746                                 /*
  747                                  * Only one shadow object
  748                                  */
  749                                 (fs.object->shadow_count == 1) &&
  750                                 /*
  751                                  * No COW refs, except us
  752                                  */
  753                                 (fs.object->ref_count == 1) &&
  754                                 /*
  755                                  * No one else can look this object up
  756                                  */
  757                                 (fs.object->handle == NULL) &&
  758                                 /*
  759                                  * No other ways to look the object up
  760                                  */
  761                                 ((fs.object->type == OBJT_DEFAULT) ||
  762                                  (fs.object->type == OBJT_SWAP)) &&
  763                             (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) &&
  764                                 /*
  765                                  * We don't chase down the shadow chain
  766                                  */
  767                             fs.object == fs.first_object->backing_object) {
  768                                 vm_page_lock_queues();
  769                                 /*
  770                                  * get rid of the unnecessary page
  771                                  */
  772                                 vm_page_free(fs.first_m);
  773                                 /*
  774                                  * grab the page and put it into the 
  775                                  * process'es object.  The page is 
  776                                  * automatically made dirty.
  777                                  */
  778                                 vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
  779                                 vm_page_unlock_queues();
  780                                 vm_page_busy(fs.m);
  781                                 fs.first_m = fs.m;
  782                                 fs.m = NULL;
  783                                 PCPU_INC(cnt.v_cow_optim);
  784                         } else {
  785                                 /*
  786                                  * Oh, well, lets copy it.
  787                                  */
  788                                 pmap_copy_page(fs.m, fs.first_m);
  789                                 fs.first_m->valid = VM_PAGE_BITS_ALL;
  790                         }
  791                         if (fs.m) {
  792                                 /*
  793                                  * We no longer need the old page or object.
  794                                  */
  795                                 release_page(&fs);
  796                         }
  797                         /*
  798                          * fs.object != fs.first_object due to above 
  799                          * conditional
  800                          */
  801                         vm_object_pip_wakeup(fs.object);
  802                         VM_OBJECT_UNLOCK(fs.object);
  803                         /*
  804                          * Only use the new page below...
  805                          */
  806                         fs.object = fs.first_object;
  807                         fs.pindex = fs.first_pindex;
  808                         fs.m = fs.first_m;
  809                         if (!is_first_object_locked)
  810                                 VM_OBJECT_LOCK(fs.object);
  811                         PCPU_INC(cnt.v_cow_faults);
  812                 } else {
  813                         prot &= ~VM_PROT_WRITE;
  814                 }
  815         }
  816 
  817         /*
  818          * We must verify that the maps have not changed since our last
  819          * lookup.
  820          */
  821         if (!fs.lookup_still_valid) {
  822                 vm_object_t retry_object;
  823                 vm_pindex_t retry_pindex;
  824                 vm_prot_t retry_prot;
  825 
  826                 if (!vm_map_trylock_read(fs.map)) {
  827                         release_page(&fs);
  828                         unlock_and_deallocate(&fs);
  829                         goto RetryFault;
  830                 }
  831                 fs.lookup_still_valid = TRUE;
  832                 if (fs.map->timestamp != map_generation) {
  833                         result = vm_map_lookup_locked(&fs.map, vaddr, fault_type,
  834                             &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
  835 
  836                         /*
  837                          * If we don't need the page any longer, put it on the inactive
  838                          * list (the easiest thing to do here).  If no one needs it,
  839                          * pageout will grab it eventually.
  840                          */
  841                         if (result != KERN_SUCCESS) {
  842                                 release_page(&fs);
  843                                 unlock_and_deallocate(&fs);
  844 
  845                                 /*
  846                                  * If retry of map lookup would have blocked then
  847                                  * retry fault from start.
  848                                  */
  849                                 if (result == KERN_FAILURE)
  850                                         goto RetryFault;
  851                                 return (result);
  852                         }
  853                         if ((retry_object != fs.first_object) ||
  854                             (retry_pindex != fs.first_pindex)) {
  855                                 release_page(&fs);
  856                                 unlock_and_deallocate(&fs);
  857                                 goto RetryFault;
  858                         }
  859 
  860                         /*
  861                          * Check whether the protection has changed or the object has
  862                          * been copied while we left the map unlocked. Changing from
  863                          * read to write permission is OK - we leave the page
  864                          * write-protected, and catch the write fault. Changing from
  865                          * write to read permission means that we can't mark the page
  866                          * write-enabled after all.
  867                          */
  868                         prot &= retry_prot;
  869                 }
  870         }
  871         /*
  872          * If the page was filled by a pager, update the map entry's
  873          * last read offset.  Since the pager does not return the
  874          * actual set of pages that it read, this update is based on
  875          * the requested set.  Typically, the requested and actual
  876          * sets are the same.
  877          *
  878          * XXX The following assignment modifies the map
  879          * without holding a write lock on it.
  880          */
  881         if (hardfault)
  882                 fs.entry->lastr = fs.pindex + faultcount - behind;
  883 
  884         if (prot & VM_PROT_WRITE) {
  885                 vm_object_set_writeable_dirty(fs.object);
  886 
  887                 /*
  888                  * If the fault is a write, we know that this page is being
  889                  * written NOW so dirty it explicitly to save on 
  890                  * pmap_is_modified() calls later.
  891                  *
  892                  * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC
  893                  * if the page is already dirty to prevent data written with
  894                  * the expectation of being synced from not being synced.
  895                  * Likewise if this entry does not request NOSYNC then make
  896                  * sure the page isn't marked NOSYNC.  Applications sharing
  897                  * data should use the same flags to avoid ping ponging.
  898                  *
  899                  * Also tell the backing pager, if any, that it should remove
  900                  * any swap backing since the page is now dirty.
  901                  */
  902                 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
  903                         if (fs.m->dirty == 0)
  904                                 fs.m->oflags |= VPO_NOSYNC;
  905                 } else {
  906                         fs.m->oflags &= ~VPO_NOSYNC;
  907                 }
  908                 if (fault_flags & VM_FAULT_DIRTY) {
  909                         vm_page_dirty(fs.m);
  910                         vm_pager_page_unswapped(fs.m);
  911                 }
  912         }
  913 
  914         /*
  915          * Page had better still be busy
  916          */
  917         KASSERT(fs.m->oflags & VPO_BUSY,
  918                 ("vm_fault: page %p not busy!", fs.m));
  919         /*
  920          * Page must be completely valid or it is not fit to
  921          * map into user space.  vm_pager_get_pages() ensures this.
  922          */
  923         KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
  924             ("vm_fault: page %p partially invalid", fs.m));
  925         VM_OBJECT_UNLOCK(fs.object);
  926 
  927         /*
  928          * Put this page into the physical map.  We had to do the unlock above
  929          * because pmap_enter() may sleep.  We don't put the page
  930          * back on the active queue until later so that the pageout daemon
  931          * won't find it (yet).
  932          */
  933         pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
  934         if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) {
  935                 vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
  936         }
  937         VM_OBJECT_LOCK(fs.object);
  938         vm_page_lock_queues();
  939         vm_page_flag_set(fs.m, PG_REFERENCED);
  940 
  941         /*
  942          * If the page is not wired down, then put it where the pageout daemon
  943          * can find it.
  944          */
  945         if (fault_flags & VM_FAULT_WIRE_MASK) {
  946                 if (wired)
  947                         vm_page_wire(fs.m);
  948                 else
  949                         vm_page_unwire(fs.m, 1);
  950         } else {
  951                 vm_page_activate(fs.m);
  952         }
  953         vm_page_unlock_queues();
  954         vm_page_wakeup(fs.m);
  955 
  956         /*
  957          * Unlock everything, and return
  958          */
  959         unlock_and_deallocate(&fs);
  960         if (hardfault)
  961                 curthread->td_ru.ru_majflt++;
  962         else
  963                 curthread->td_ru.ru_minflt++;
  964 
  965         return (KERN_SUCCESS);
  966 }
  967 
  968 /*
  969  * vm_fault_prefault provides a quick way of clustering
  970  * pagefaults into a processes address space.  It is a "cousin"
  971  * of vm_map_pmap_enter, except it runs at page fault time instead
  972  * of mmap time.
  973  */
  974 static void
  975 vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
  976 {
  977         int i;
  978         vm_offset_t addr, starta;
  979         vm_pindex_t pindex;
  980         vm_page_t m;
  981         vm_object_t object;
  982 
  983         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
  984                 return;
  985 
  986         object = entry->object.vm_object;
  987 
  988         starta = addra - PFBAK * PAGE_SIZE;
  989         if (starta < entry->start) {
  990                 starta = entry->start;
  991         } else if (starta > addra) {
  992                 starta = 0;
  993         }
  994 
  995         for (i = 0; i < PAGEORDER_SIZE; i++) {
  996                 vm_object_t backing_object, lobject;
  997 
  998                 addr = addra + prefault_pageorder[i];
  999                 if (addr > addra + (PFFOR * PAGE_SIZE))
 1000                         addr = 0;
 1001 
 1002                 if (addr < starta || addr >= entry->end)
 1003                         continue;
 1004 
 1005                 if (!pmap_is_prefaultable(pmap, addr))
 1006                         continue;
 1007 
 1008                 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
 1009                 lobject = object;
 1010                 VM_OBJECT_LOCK(lobject);
 1011                 while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
 1012                     lobject->type == OBJT_DEFAULT &&
 1013                     (backing_object = lobject->backing_object) != NULL) {
 1014                         if (lobject->backing_object_offset & PAGE_MASK)
 1015                                 break;
 1016                         pindex += lobject->backing_object_offset >> PAGE_SHIFT;
 1017                         VM_OBJECT_LOCK(backing_object);
 1018                         VM_OBJECT_UNLOCK(lobject);
 1019                         lobject = backing_object;
 1020                 }
 1021                 /*
 1022                  * give-up when a page is not in memory
 1023                  */
 1024                 if (m == NULL) {
 1025                         VM_OBJECT_UNLOCK(lobject);
 1026                         break;
 1027                 }
 1028                 if (m->valid == VM_PAGE_BITS_ALL &&
 1029                     (m->flags & PG_FICTITIOUS) == 0) {
 1030                         vm_page_lock_queues();
 1031                         pmap_enter_quick(pmap, addr, m, entry->protection);
 1032                         vm_page_unlock_queues();
 1033                 }
 1034                 VM_OBJECT_UNLOCK(lobject);
 1035         }
 1036 }
 1037 
 1038 /*
 1039  *      vm_fault_quick:
 1040  *
 1041  *      Ensure that the requested virtual address, which may be in userland,
 1042  *      is valid.  Fault-in the page if necessary.  Return -1 on failure.
 1043  */
 1044 int
 1045 vm_fault_quick(caddr_t v, int prot)
 1046 {
 1047         int r;
 1048 
 1049         if (prot & VM_PROT_WRITE)
 1050                 r = subyte(v, fubyte(v));
 1051         else
 1052                 r = fubyte(v);
 1053         return(r);
 1054 }
 1055 
 1056 /*
 1057  *      vm_fault_wire:
 1058  *
 1059  *      Wire down a range of virtual addresses in a map.
 1060  */
 1061 int
 1062 vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1063     boolean_t user_wire, boolean_t fictitious)
 1064 {
 1065         vm_offset_t va;
 1066         int rv;
 1067 
 1068         /*
 1069          * We simulate a fault to get the page and enter it in the physical
 1070          * map.  For user wiring, we only ask for read access on currently
 1071          * read-only sections.
 1072          */
 1073         for (va = start; va < end; va += PAGE_SIZE) {
 1074                 rv = vm_fault(map, va,
 1075                     user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
 1076                     user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING);
 1077                 if (rv) {
 1078                         if (va != start)
 1079                                 vm_fault_unwire(map, start, va, fictitious);
 1080                         return (rv);
 1081                 }
 1082         }
 1083         return (KERN_SUCCESS);
 1084 }
 1085 
 1086 /*
 1087  *      vm_fault_unwire:
 1088  *
 1089  *      Unwire a range of virtual addresses in a map.
 1090  */
 1091 void
 1092 vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
 1093     boolean_t fictitious)
 1094 {
 1095         vm_paddr_t pa;
 1096         vm_offset_t va;
 1097         pmap_t pmap;
 1098 
 1099         pmap = vm_map_pmap(map);
 1100 
 1101         /*
 1102          * Since the pages are wired down, we must be able to get their
 1103          * mappings from the physical map system.
 1104          */
 1105         for (va = start; va < end; va += PAGE_SIZE) {
 1106                 pa = pmap_extract(pmap, va);
 1107                 if (pa != 0) {
 1108                         pmap_change_wiring(pmap, va, FALSE);
 1109                         if (!fictitious) {
 1110                                 vm_page_lock_queues();
 1111                                 vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
 1112                                 vm_page_unlock_queues();
 1113                         }
 1114                 }
 1115         }
 1116 }
 1117 
 1118 /*
 1119  *      Routine:
 1120  *              vm_fault_copy_entry
 1121  *      Function:
 1122  *              Copy all of the pages from a wired-down map entry to another.
 1123  *
 1124  *      In/out conditions:
 1125  *              The source and destination maps must be locked for write.
 1126  *              The source map entry must be wired down (or be a sharing map
 1127  *              entry corresponding to a main map entry that is wired down).
 1128  */
 1129 void
 1130 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
 1131     vm_map_entry_t dst_entry, vm_map_entry_t src_entry,
 1132     vm_ooffset_t *fork_charge)
 1133 {
 1134         vm_object_t backing_object, dst_object, object;
 1135         vm_object_t src_object;
 1136         vm_ooffset_t dst_offset;
 1137         vm_ooffset_t src_offset;
 1138         vm_pindex_t pindex;
 1139         vm_prot_t prot;
 1140         vm_offset_t vaddr;
 1141         vm_page_t dst_m;
 1142         vm_page_t src_m;
 1143 
 1144 #ifdef  lint
 1145         src_map++;
 1146 #endif  /* lint */
 1147 
 1148         src_object = src_entry->object.vm_object;
 1149         src_offset = src_entry->offset;
 1150 
 1151         /*
 1152          * Create the top-level object for the destination entry. (Doesn't
 1153          * actually shadow anything - we copy the pages directly.)
 1154          */
 1155         dst_object = vm_object_allocate(OBJT_DEFAULT,
 1156             OFF_TO_IDX(dst_entry->end - dst_entry->start));
 1157 #if VM_NRESERVLEVEL > 0
 1158         dst_object->flags |= OBJ_COLORED;
 1159         dst_object->pg_color = atop(dst_entry->start);
 1160 #endif
 1161 
 1162         VM_OBJECT_LOCK(dst_object);
 1163         KASSERT(dst_entry->object.vm_object == NULL,
 1164             ("vm_fault_copy_entry: vm_object not NULL"));
 1165         dst_entry->object.vm_object = dst_object;
 1166         dst_entry->offset = 0;
 1167         dst_object->uip = curthread->td_ucred->cr_ruidinfo;
 1168         uihold(dst_object->uip);
 1169         dst_object->charge = dst_entry->end - dst_entry->start;
 1170         KASSERT(dst_entry->uip == NULL,
 1171             ("vm_fault_copy_entry: leaked swp charge"));
 1172         *fork_charge += dst_object->charge;
 1173         prot = dst_entry->max_protection;
 1174 
 1175         /*
 1176          * Loop through all of the pages in the entry's range, copying each
 1177          * one from the source object (it should be there) to the destination
 1178          * object.
 1179          */
 1180         for (vaddr = dst_entry->start, dst_offset = 0;
 1181             vaddr < dst_entry->end;
 1182             vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) {
 1183 
 1184                 /*
 1185                  * Allocate a page in the destination object
 1186                  */
 1187                 do {
 1188                         dst_m = vm_page_alloc(dst_object,
 1189                                 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL);
 1190                         if (dst_m == NULL) {
 1191                                 VM_OBJECT_UNLOCK(dst_object);
 1192                                 VM_WAIT;
 1193                                 VM_OBJECT_LOCK(dst_object);
 1194                         }
 1195                 } while (dst_m == NULL);
 1196 
 1197                 /*
 1198                  * Find the page in the source object, and copy it in.
 1199                  * (Because the source is wired down, the page will be in
 1200                  * memory.)
 1201                  */
 1202                 VM_OBJECT_LOCK(src_object);
 1203                 object = src_object;
 1204                 pindex = 0;
 1205                 while ((src_m = vm_page_lookup(object, pindex +
 1206                     OFF_TO_IDX(dst_offset + src_offset))) == NULL &&
 1207                     (src_entry->protection & VM_PROT_WRITE) == 0 &&
 1208                     (backing_object = object->backing_object) != NULL) {
 1209                         /*
 1210                          * Allow fallback to backing objects if we are reading.
 1211                          */
 1212                         VM_OBJECT_LOCK(backing_object);
 1213                         pindex += OFF_TO_IDX(object->backing_object_offset);
 1214                         VM_OBJECT_UNLOCK(object);
 1215                         object = backing_object;
 1216                 }
 1217                 if (src_m == NULL)
 1218                         panic("vm_fault_copy_wired: page missing");
 1219                 pmap_copy_page(src_m, dst_m);
 1220                 VM_OBJECT_UNLOCK(object);
 1221                 dst_m->valid = VM_PAGE_BITS_ALL;
 1222                 VM_OBJECT_UNLOCK(dst_object);
 1223 
 1224                 /*
 1225                  * Enter it in the pmap as a read and/or execute access.
 1226                  */
 1227                 pmap_enter(dst_map->pmap, vaddr, prot & ~VM_PROT_WRITE, dst_m,
 1228                     prot, FALSE);
 1229 
 1230                 /*
 1231                  * Mark it no longer busy, and put it on the active list.
 1232                  */
 1233                 VM_OBJECT_LOCK(dst_object);
 1234                 vm_page_lock_queues();
 1235                 vm_page_activate(dst_m);
 1236                 vm_page_unlock_queues();
 1237                 vm_page_wakeup(dst_m);
 1238         }
 1239         VM_OBJECT_UNLOCK(dst_object);
 1240 }
 1241 
 1242 
 1243 /*
 1244  * This routine checks around the requested page for other pages that
 1245  * might be able to be faulted in.  This routine brackets the viable
 1246  * pages for the pages to be paged in.
 1247  *
 1248  * Inputs:
 1249  *      m, rbehind, rahead
 1250  *
 1251  * Outputs:
 1252  *  marray (array of vm_page_t), reqpage (index of requested page)
 1253  *
 1254  * Return value:
 1255  *  number of pages in marray
 1256  */
 1257 static int
 1258 vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage)
 1259         vm_page_t m;
 1260         int rbehind;
 1261         int rahead;
 1262         vm_page_t *marray;
 1263         int *reqpage;
 1264 {
 1265         int i,j;
 1266         vm_object_t object;
 1267         vm_pindex_t pindex, startpindex, endpindex, tpindex;
 1268         vm_page_t rtm;
 1269         int cbehind, cahead;
 1270 
 1271         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 1272 
 1273         object = m->object;
 1274         pindex = m->pindex;
 1275         cbehind = cahead = 0;
 1276 
 1277         /*
 1278          * if the requested page is not available, then give up now
 1279          */
 1280         if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) {
 1281                 return 0;
 1282         }
 1283 
 1284         if ((cbehind == 0) && (cahead == 0)) {
 1285                 *reqpage = 0;
 1286                 marray[0] = m;
 1287                 return 1;
 1288         }
 1289 
 1290         if (rahead > cahead) {
 1291                 rahead = cahead;
 1292         }
 1293 
 1294         if (rbehind > cbehind) {
 1295                 rbehind = cbehind;
 1296         }
 1297 
 1298         /*
 1299          * scan backward for the read behind pages -- in memory 
 1300          */
 1301         if (pindex > 0) {
 1302                 if (rbehind > pindex) {
 1303                         rbehind = pindex;
 1304                         startpindex = 0;
 1305                 } else {
 1306                         startpindex = pindex - rbehind;
 1307                 }
 1308 
 1309                 if ((rtm = TAILQ_PREV(m, pglist, listq)) != NULL &&
 1310                     rtm->pindex >= startpindex)
 1311                         startpindex = rtm->pindex + 1;
 1312 
 1313                 /* tpindex is unsigned; beware of numeric underflow. */
 1314                 for (i = 0, tpindex = pindex - 1; tpindex >= startpindex &&
 1315                     tpindex < pindex; i++, tpindex--) {
 1316 
 1317                         rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
 1318                             VM_ALLOC_IFNOTCACHED);
 1319                         if (rtm == NULL) {
 1320                                 /*
 1321                                  * Shift the allocated pages to the
 1322                                  * beginning of the array.
 1323                                  */
 1324                                 for (j = 0; j < i; j++) {
 1325                                         marray[j] = marray[j + tpindex + 1 -
 1326                                             startpindex];
 1327                                 }
 1328                                 break;
 1329                         }
 1330 
 1331                         marray[tpindex - startpindex] = rtm;
 1332                 }
 1333         } else {
 1334                 startpindex = 0;
 1335                 i = 0;
 1336         }
 1337 
 1338         marray[i] = m;
 1339         /* page offset of the required page */
 1340         *reqpage = i;
 1341 
 1342         tpindex = pindex + 1;
 1343         i++;
 1344 
 1345         /*
 1346          * scan forward for the read ahead pages
 1347          */
 1348         endpindex = tpindex + rahead;
 1349         if ((rtm = TAILQ_NEXT(m, listq)) != NULL && rtm->pindex < endpindex)
 1350                 endpindex = rtm->pindex;
 1351         if (endpindex > object->size)
 1352                 endpindex = object->size;
 1353 
 1354         for (; tpindex < endpindex; i++, tpindex++) {
 1355 
 1356                 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
 1357                     VM_ALLOC_IFNOTCACHED);
 1358                 if (rtm == NULL) {
 1359                         break;
 1360                 }
 1361 
 1362                 marray[i] = rtm;
 1363         }
 1364 
 1365         /* return number of pages */
 1366         return i;
 1367 }

Cache object: 6aaf1375f10c8dd662e67234f6fec607


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.