The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_fault.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $OpenBSD: uvm_fault.c,v 1.133 2022/11/04 09:36:44 mpi Exp $     */
    2 /*      $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $   */
    3 
    4 /*
    5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  *
   28  * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
   29  */
   30 
   31 /*
   32  * uvm_fault.c: fault handler
   33  */
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/kernel.h>
   38 #include <sys/percpu.h>
   39 #include <sys/proc.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mman.h>
   42 #include <sys/tracepoint.h>
   43 
   44 #include <uvm/uvm.h>
   45 
   46 /*
   47  *
   48  * a word on page faults:
   49  *
   50  * types of page faults we handle:
   51  *
   52  * CASE 1: upper layer faults                   CASE 2: lower layer faults
   53  *
   54  *    CASE 1A         CASE 1B                  CASE 2A        CASE 2B
   55  *    read/write1     write>1                  read/write   +-cow_write/zero
   56  *         |             |                         |        |
   57  *      +--|--+       +--|--+     +-----+       +  |  +     | +-----+
   58  * amap |  V  |       |  ---------> new |          |        | |  ^  |
   59  *      +-----+       +-----+     +-----+       +  |  +     | +--|--+
   60  *                                                 |        |    |
   61  *      +-----+       +-----+                   +--|--+     | +--|--+
   62  * uobj | d/c |       | d/c |                   |  V  |     +----+  |
   63  *      +-----+       +-----+                   +-----+       +-----+
   64  *
   65  * d/c = don't care
   66  *
   67  *   case [0]: layerless fault
   68  *      no amap or uobj is present.   this is an error.
   69  *
   70  *   case [1]: upper layer fault [anon active]
   71  *     1A: [read] or [write with anon->an_ref == 1]
   72  *              I/O takes place in upper level anon and uobj is not touched.
   73  *     1B: [write with anon->an_ref > 1]
   74  *              new anon is alloc'd and data is copied off ["COW"]
   75  *
   76  *   case [2]: lower layer fault [uobj]
   77  *     2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
   78  *              I/O takes place directly in object.
   79  *     2B: [write to copy_on_write] or [read on NULL uobj]
   80  *              data is "promoted" from uobj to a new anon.
   81  *              if uobj is null, then we zero fill.
   82  *
   83  * we follow the standard UVM locking protocol ordering:
   84  *
   85  * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
   86  * we hold a PG_BUSY page if we unlock for I/O
   87  *
   88  *
   89  * the code is structured as follows:
   90  *
   91  *     - init the "IN" params in the ufi structure
   92  *   ReFault: (ERESTART returned to the loop in uvm_fault)
   93  *     - do lookups [locks maps], check protection, handle needs_copy
   94  *     - check for case 0 fault (error)
   95  *     - establish "range" of fault
   96  *     - if we have an amap lock it and extract the anons
   97  *     - if sequential advice deactivate pages behind us
   98  *     - at the same time check pmap for unmapped areas and anon for pages
   99  *       that we could map in (and do map it if found)
  100  *     - check object for resident pages that we could map in
  101  *     - if (case 2) goto Case2
  102  *     - >>> handle case 1
  103  *           - ensure source anon is resident in RAM
  104  *           - if case 1B alloc new anon and copy from source
  105  *           - map the correct page in
  106  *   Case2:
  107  *     - >>> handle case 2
  108  *           - ensure source page is resident (if uobj)
  109  *           - if case 2B alloc new anon and copy from source (could be zero
  110  *              fill if uobj == NULL)
  111  *           - map the correct page in
  112  *     - done!
  113  *
  114  * note on paging:
  115  *   if we have to do I/O we place a PG_BUSY page in the correct object,
  116  * unlock everything, and do the I/O.   when I/O is done we must reverify
  117  * the state of the world before assuming that our data structures are
  118  * valid.   [because mappings could change while the map is unlocked]
  119  *
  120  *  alternative 1: unbusy the page in question and restart the page fault
  121  *    from the top (ReFault).   this is easy but does not take advantage
  122  *    of the information that we already have from our previous lookup,
  123  *    although it is possible that the "hints" in the vm_map will help here.
  124  *
  125  * alternative 2: the system already keeps track of a "version" number of
  126  *    a map.   [i.e. every time you write-lock a map (e.g. to change a
  127  *    mapping) you bump the version number up by one...]   so, we can save
  128  *    the version number of the map before we release the lock and start I/O.
  129  *    then when I/O is done we can relock and check the version numbers
  130  *    to see if anything changed.    this might save us some over 1 because
  131  *    we don't have to unbusy the page and may be less compares(?).
  132  *
  133  * alternative 3: put in backpointers or a way to "hold" part of a map
  134  *    in place while I/O is in progress.   this could be complex to
  135  *    implement (especially with structures like amap that can be referenced
  136  *    by multiple map entries, and figuring out what should wait could be
  137  *    complex as well...).
  138  *
  139  * we use alternative 2.  given that we are multi-threaded now we may want
  140  * to reconsider the choice.
  141  */
  142 
  143 /*
  144  * local data structures
  145  */
  146 struct uvm_advice {
  147         int nback;
  148         int nforw;
  149 };
  150 
  151 /*
  152  * page range array: set up in uvmfault_init().
  153  */
  154 static struct uvm_advice uvmadvice[MADV_MASK + 1];
  155 
  156 #define UVM_MAXRANGE 16 /* must be max() of nback+nforw+1 */
  157 
  158 /*
  159  * private prototypes
  160  */
  161 static void uvmfault_amapcopy(struct uvm_faultinfo *);
  162 static inline void uvmfault_anonflush(struct vm_anon **, int);
  163 void    uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
  164 void    uvmfault_update_stats(struct uvm_faultinfo *);
  165 
  166 /*
  167  * inline functions
  168  */
  169 /*
  170  * uvmfault_anonflush: try and deactivate pages in specified anons
  171  *
  172  * => does not have to deactivate page if it is busy
  173  */
  174 static inline void
  175 uvmfault_anonflush(struct vm_anon **anons, int n)
  176 {
  177         int lcv;
  178         struct vm_page *pg;
  179 
  180         for (lcv = 0; lcv < n; lcv++) {
  181                 if (anons[lcv] == NULL)
  182                         continue;
  183                 KASSERT(rw_lock_held(anons[lcv]->an_lock));
  184                 pg = anons[lcv]->an_page;
  185                 if (pg && (pg->pg_flags & PG_BUSY) == 0) {
  186                         uvm_lock_pageq();
  187                         if (pg->wire_count == 0) {
  188                                 pmap_page_protect(pg, PROT_NONE);
  189                                 uvm_pagedeactivate(pg);
  190                         }
  191                         uvm_unlock_pageq();
  192                 }
  193         }
  194 }
  195 
  196 /*
  197  * normal functions
  198  */
  199 /*
  200  * uvmfault_init: compute proper values for the uvmadvice[] array.
  201  */
  202 void
  203 uvmfault_init(void)
  204 {
  205         int npages;
  206 
  207         npages = atop(16384);
  208         if (npages > 0) {
  209                 KASSERT(npages <= UVM_MAXRANGE / 2);
  210                 uvmadvice[MADV_NORMAL].nforw = npages;
  211                 uvmadvice[MADV_NORMAL].nback = npages - 1;
  212         }
  213 
  214         npages = atop(32768);
  215         if (npages > 0) {
  216                 KASSERT(npages <= UVM_MAXRANGE / 2);
  217                 uvmadvice[MADV_SEQUENTIAL].nforw = npages - 1;
  218                 uvmadvice[MADV_SEQUENTIAL].nback = npages;
  219         }
  220 }
  221 
  222 /*
  223  * uvmfault_amapcopy: clear "needs_copy" in a map.
  224  *
  225  * => called with VM data structures unlocked (usually, see below)
  226  * => we get a write lock on the maps and clear needs_copy for a VA
  227  * => if we are out of RAM we sleep (waiting for more)
  228  */
  229 static void
  230 uvmfault_amapcopy(struct uvm_faultinfo *ufi)
  231 {
  232         for (;;) {
  233                 /*
  234                  * no mapping?  give up.
  235                  */
  236                 if (uvmfault_lookup(ufi, TRUE) == FALSE)
  237                         return;
  238 
  239                 /*
  240                  * copy if needed.
  241                  */
  242                 if (UVM_ET_ISNEEDSCOPY(ufi->entry))
  243                         amap_copy(ufi->map, ufi->entry, M_NOWAIT,
  244                                 UVM_ET_ISSTACK(ufi->entry) ? FALSE : TRUE,
  245                                 ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
  246 
  247                 /*
  248                  * didn't work?  must be out of RAM.   unlock and sleep.
  249                  */
  250                 if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
  251                         uvmfault_unlockmaps(ufi, TRUE);
  252                         uvm_wait("fltamapcopy");
  253                         continue;
  254                 }
  255 
  256                 /*
  257                  * got it!   unlock and return.
  258                  */
  259                 uvmfault_unlockmaps(ufi, TRUE);
  260                 return;
  261         }
  262         /*NOTREACHED*/
  263 }
  264 
  265 /*
  266  * uvmfault_anonget: get data in an anon into a non-busy, non-released
  267  * page in that anon.
  268  *
  269  * => Map, amap and thus anon should be locked by caller.
  270  * => If we fail, we unlock everything and error is returned.
  271  * => If we are successful, return with everything still locked.
  272  * => We do not move the page on the queues [gets moved later].  If we
  273  *    allocate a new page [we_own], it gets put on the queues.  Either way,
  274  *    the result is that the page is on the queues at return time
  275  */
  276 int
  277 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
  278     struct vm_anon *anon)
  279 {
  280         struct vm_page *pg;
  281         int error;
  282 
  283         KASSERT(rw_lock_held(anon->an_lock));
  284         KASSERT(anon->an_lock == amap->am_lock);
  285 
  286         /* Increment the counters.*/
  287         counters_inc(uvmexp_counters, flt_anget);
  288         if (anon->an_page) {
  289                 curproc->p_ru.ru_minflt++;
  290         } else {
  291                 curproc->p_ru.ru_majflt++;
  292         }
  293         error = 0;
  294 
  295         /*
  296          * Loop until we get the anon data, or fail.
  297          */
  298         for (;;) {
  299                 boolean_t we_own, locked;
  300                 /*
  301                  * Note: 'we_own' will become true if we set PG_BUSY on a page.
  302                  */
  303                 we_own = FALSE;
  304                 pg = anon->an_page;
  305 
  306                 /*
  307                  * Is page resident?  Make sure it is not busy/released.
  308                  */
  309                 if (pg) {
  310                         KASSERT(pg->pg_flags & PQ_ANON);
  311                         KASSERT(pg->uanon == anon);
  312 
  313                         /*
  314                          * if the page is busy, we drop all the locks and
  315                          * try again.
  316                          */
  317                         if ((pg->pg_flags & (PG_BUSY|PG_RELEASED)) == 0)
  318                                 return (VM_PAGER_OK);
  319                         atomic_setbits_int(&pg->pg_flags, PG_WANTED);
  320                         counters_inc(uvmexp_counters, flt_pgwait);
  321 
  322                         /*
  323                          * The last unlock must be an atomic unlock and wait
  324                          * on the owner of page.
  325                          */
  326                         if (pg->uobject) {
  327                                 /* Owner of page is UVM object. */
  328                                 uvmfault_unlockall(ufi, amap, NULL);
  329                                 rwsleep_nsec(pg, pg->uobject->vmobjlock,
  330                                     PVM | PNORELOCK, "anonget1", INFSLP);
  331                         } else {
  332                                 /* Owner of page is anon. */
  333                                 uvmfault_unlockall(ufi, NULL, NULL);
  334                                 rwsleep_nsec(pg, anon->an_lock, PVM | PNORELOCK,
  335                                     "anonget2", INFSLP);
  336                         }
  337                 } else {
  338                         /*
  339                          * No page, therefore allocate one.
  340                          */
  341                         pg = uvm_pagealloc(NULL, 0, anon, 0);
  342                         if (pg == NULL) {
  343                                 /* Out of memory.  Wait a little. */
  344                                 uvmfault_unlockall(ufi, amap, NULL);
  345                                 counters_inc(uvmexp_counters, flt_noram);
  346                                 uvm_wait("flt_noram1");
  347                         } else {
  348                                 /* PG_BUSY bit is set. */
  349                                 we_own = TRUE;
  350                                 uvmfault_unlockall(ufi, amap, NULL);
  351 
  352                                 /*
  353                                  * Pass a PG_BUSY+PG_FAKE+PG_CLEAN page into
  354                                  * the uvm_swap_get() function with all data
  355                                  * structures unlocked.  Note that it is OK
  356                                  * to read an_swslot here, because we hold
  357                                  * PG_BUSY on the page.
  358                                  */
  359                                 counters_inc(uvmexp_counters, pageins);
  360                                 error = uvm_swap_get(pg, anon->an_swslot,
  361                                     PGO_SYNCIO);
  362 
  363                                 /*
  364                                  * We clean up after the I/O below in the
  365                                  * 'we_own' case.
  366                                  */
  367                         }
  368                 }
  369 
  370                 /*
  371                  * Re-lock the map and anon.
  372                  */
  373                 locked = uvmfault_relock(ufi);
  374                 if (locked || we_own) {
  375                         rw_enter(anon->an_lock, RW_WRITE);
  376                 }
  377 
  378                 /*
  379                  * If we own the page (i.e. we set PG_BUSY), then we need
  380                  * to clean up after the I/O.  There are three cases to
  381                  * consider:
  382                  *
  383                  * 1) Page was released during I/O: free anon and ReFault.
  384                  * 2) I/O not OK.  Free the page and cause the fault to fail.
  385                  * 3) I/O OK!  Activate the page and sync with the non-we_own
  386                  *    case (i.e. drop anon lock if not locked).
  387                  */
  388                 if (we_own) {
  389                         if (pg->pg_flags & PG_WANTED) {
  390                                 wakeup(pg);
  391                         }
  392 
  393                         /*
  394                          * if we were RELEASED during I/O, then our anon is
  395                          * no longer part of an amap.   we need to free the
  396                          * anon and try again.
  397                          */
  398                         if (pg->pg_flags & PG_RELEASED) {
  399                                 pmap_page_protect(pg, PROT_NONE);
  400                                 KASSERT(anon->an_ref == 0);
  401                                 /*
  402                                  * Released while we had unlocked amap.
  403                                  */
  404                                 if (locked)
  405                                         uvmfault_unlockall(ufi, NULL, NULL);
  406                                 uvm_anon_release(anon); /* frees page for us */
  407                                 counters_inc(uvmexp_counters, flt_pgrele);
  408                                 return (VM_PAGER_REFAULT);      /* refault! */
  409                         }
  410 
  411                         if (error != VM_PAGER_OK) {
  412                                 KASSERT(error != VM_PAGER_PEND);
  413 
  414                                 /* remove page from anon */
  415                                 anon->an_page = NULL;
  416 
  417                                 /*
  418                                  * Remove the swap slot from the anon and
  419                                  * mark the anon as having no real slot.
  420                                  * Do not free the swap slot, thus preventing
  421                                  * it from being used again.
  422                                  */
  423                                 uvm_swap_markbad(anon->an_swslot, 1);
  424                                 anon->an_swslot = SWSLOT_BAD;
  425 
  426                                 /*
  427                                  * Note: page was never !PG_BUSY, so it
  428                                  * cannot be mapped and thus no need to
  429                                  * pmap_page_protect() it.
  430                                  */
  431                                 uvm_lock_pageq();
  432                                 uvm_pagefree(pg);
  433                                 uvm_unlock_pageq();
  434 
  435                                 if (locked) {
  436                                         uvmfault_unlockall(ufi, NULL, NULL);
  437                                 }
  438                                 rw_exit(anon->an_lock);
  439                                 return (VM_PAGER_ERROR);
  440                         }
  441 
  442                         /*
  443                          * We have successfully read the page, activate it.
  444                          */
  445                         pmap_clear_modify(pg);
  446                         uvm_lock_pageq();
  447                         uvm_pageactivate(pg);
  448                         uvm_unlock_pageq();
  449                         atomic_clearbits_int(&pg->pg_flags,
  450                             PG_WANTED|PG_BUSY|PG_FAKE);
  451                         UVM_PAGE_OWN(pg, NULL);
  452                 }
  453 
  454                 /*
  455                  * We were not able to re-lock the map - restart the fault.
  456                  */
  457                 if (!locked) {
  458                         if (we_own) {
  459                                 rw_exit(anon->an_lock);
  460                         }
  461                         return (VM_PAGER_REFAULT);
  462                 }
  463 
  464                 /*
  465                  * Verify that no one has touched the amap and moved
  466                  * the anon on us.
  467                  */
  468                 if (ufi != NULL && amap_lookup(&ufi->entry->aref,
  469                                 ufi->orig_rvaddr - ufi->entry->start) != anon) {
  470 
  471                         uvmfault_unlockall(ufi, amap, NULL);
  472                         return (VM_PAGER_REFAULT);
  473                 }
  474 
  475                 /*
  476                  * Retry..
  477                  */
  478                 counters_inc(uvmexp_counters, flt_anretry);
  479                 continue;
  480 
  481         }
  482         /*NOTREACHED*/
  483 }
  484 
  485 /*
  486  * Update statistics after fault resolution.
  487  * - maxrss
  488  */
  489 void
  490 uvmfault_update_stats(struct uvm_faultinfo *ufi)
  491 {
  492         struct vm_map           *map;
  493         struct proc             *p;
  494         vsize_t                  res;
  495 
  496         map = ufi->orig_map;
  497 
  498         /*
  499          * If this is a nested pmap (eg, a virtual machine pmap managed
  500          * by vmm(4) on amd64/i386), don't do any updating, just return.
  501          *
  502          * pmap_nested() on other archs is #defined to 0, so this is a
  503          * no-op.
  504          */
  505         if (pmap_nested(map->pmap))
  506                 return;
  507 
  508         /* Update the maxrss for the process. */
  509         if (map->flags & VM_MAP_ISVMSPACE) {
  510                 p = curproc;
  511                 KASSERT(p != NULL && &p->p_vmspace->vm_map == map);
  512 
  513                 res = pmap_resident_count(map->pmap);
  514                 /* Convert res from pages to kilobytes. */
  515                 res <<= (PAGE_SHIFT - 10);
  516 
  517                 if (p->p_ru.ru_maxrss < res)
  518                         p->p_ru.ru_maxrss = res;
  519         }
  520 }
  521 
  522 /*
  523  *   F A U L T   -   m a i n   e n t r y   p o i n t
  524  */
  525 
  526 /*
  527  * uvm_fault: page fault handler
  528  *
  529  * => called from MD code to resolve a page fault
  530  * => VM data structures usually should be unlocked.   however, it is
  531  *      possible to call here with the main map locked if the caller
  532  *      gets a write lock, sets it recursive, and then calls us (c.f.
  533  *      uvm_map_pageable).   this should be avoided because it keeps
  534  *      the map locked off during I/O.
  535  * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
  536  */
  537 #define MASK(entry)     (UVM_ET_ISCOPYONWRITE(entry) ? \
  538                          ~PROT_WRITE : PROT_MASK)
  539 struct uvm_faultctx {
  540         /*
  541          * the following members are set up by uvm_fault_check() and
  542          * read-only after that.
  543          */
  544         vm_prot_t enter_prot;
  545         vm_prot_t access_type;
  546         vaddr_t startva;
  547         int npages;
  548         int centeridx;
  549         boolean_t narrow;
  550         boolean_t wired;
  551         paddr_t pa_flags;
  552 };
  553 
  554 int             uvm_fault_check(
  555                     struct uvm_faultinfo *, struct uvm_faultctx *,
  556                     struct vm_anon ***);
  557 
  558 int             uvm_fault_upper(
  559                     struct uvm_faultinfo *, struct uvm_faultctx *,
  560                     struct vm_anon **, vm_fault_t);
  561 boolean_t       uvm_fault_upper_lookup(
  562                     struct uvm_faultinfo *, const struct uvm_faultctx *,
  563                     struct vm_anon **, struct vm_page **);
  564 
  565 int             uvm_fault_lower(
  566                     struct uvm_faultinfo *, struct uvm_faultctx *,
  567                     struct vm_page **, vm_fault_t);
  568 
  569 int
  570 uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
  571     vm_prot_t access_type)
  572 {
  573         struct uvm_faultinfo ufi;
  574         struct uvm_faultctx flt;
  575         boolean_t shadowed;
  576         struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
  577         struct vm_page *pages[UVM_MAXRANGE];
  578         int error;
  579 
  580         counters_inc(uvmexp_counters, faults);
  581         TRACEPOINT(uvm, fault, vaddr, fault_type, access_type, NULL);
  582 
  583         /*
  584          * init the IN parameters in the ufi
  585          */
  586         ufi.orig_map = orig_map;
  587         ufi.orig_rvaddr = trunc_page(vaddr);
  588         ufi.orig_size = PAGE_SIZE;      /* can't get any smaller than this */
  589         if (fault_type == VM_FAULT_WIRE)
  590                 flt.narrow = TRUE;      /* don't look for neighborhood
  591                                          * pages on wire */
  592         else
  593                 flt.narrow = FALSE;     /* normal fault */
  594         flt.access_type = access_type;
  595 
  596 
  597         error = ERESTART;
  598         while (error == ERESTART) { /* ReFault: */
  599                 anons = anons_store;
  600 
  601                 error = uvm_fault_check(&ufi, &flt, &anons);
  602                 if (error != 0)
  603                         continue;
  604 
  605                 /* True if there is an anon at the faulting address */
  606                 shadowed = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
  607                 if (shadowed == TRUE) {
  608                         /* case 1: fault on an anon in our amap */
  609                         error = uvm_fault_upper(&ufi, &flt, anons, fault_type);
  610                 } else {
  611                         struct uvm_object *uobj = ufi.entry->object.uvm_obj;
  612 
  613                         /*
  614                          * if the desired page is not shadowed by the amap and
  615                          * we have a backing object, then we check to see if
  616                          * the backing object would prefer to handle the fault
  617                          * itself (rather than letting us do it with the usual
  618                          * pgo_get hook).  the backing object signals this by
  619                          * providing a pgo_fault routine.
  620                          */
  621                         if (uobj != NULL && uobj->pgops->pgo_fault != NULL) {
  622                                 KERNEL_LOCK();
  623                                 rw_enter(uobj->vmobjlock, RW_WRITE);
  624                                 error = uobj->pgops->pgo_fault(&ufi,
  625                                     flt.startva, pages, flt.npages,
  626                                     flt.centeridx, fault_type, flt.access_type,
  627                                     PGO_LOCKED);
  628                                 KERNEL_UNLOCK();
  629 
  630                                 if (error == VM_PAGER_OK)
  631                                         error = 0;
  632                                 else if (error == VM_PAGER_REFAULT)
  633                                         error = ERESTART;
  634                                 else
  635                                         error = EACCES;
  636                         } else {
  637                                 /* case 2: fault on backing obj or zero fill */
  638                                 error = uvm_fault_lower(&ufi, &flt, pages,
  639                                     fault_type);
  640                         }
  641                 }
  642         }
  643 
  644         return error;
  645 }
  646 
  647 /*
  648  * uvm_fault_check: check prot, handle needs-copy, etc.
  649  *
  650  *      1. lookup entry.
  651  *      2. check protection.
  652  *      3. adjust fault condition (mainly for simulated fault).
  653  *      4. handle needs-copy (lazy amap copy).
  654  *      5. establish range of interest for neighbor fault (aka pre-fault).
  655  *      6. look up anons (if amap exists).
  656  *      7. flush pages (if MADV_SEQUENTIAL)
  657  *
  658  * => called with nothing locked.
  659  * => if we fail (result != 0) we unlock everything.
  660  * => initialize/adjust many members of flt.
  661  */
  662 int
  663 uvm_fault_check(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
  664     struct vm_anon ***ranons)
  665 {
  666         struct vm_amap *amap;
  667         struct uvm_object *uobj;
  668         int nback, nforw;
  669 
  670         /*
  671          * lookup and lock the maps
  672          */
  673         if (uvmfault_lookup(ufi, FALSE) == FALSE) {
  674                 return EFAULT;
  675         }
  676         /* locked: maps(read) */
  677 
  678 #ifdef DIAGNOSTIC
  679         if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0)
  680                 panic("uvm_fault: fault on non-pageable map (%p, 0x%lx)",
  681                     ufi->map, ufi->orig_rvaddr);
  682 #endif
  683 
  684         /*
  685          * check protection
  686          */
  687         if ((ufi->entry->protection & flt->access_type) != flt->access_type) {
  688                 uvmfault_unlockmaps(ufi, FALSE);
  689                 return EACCES;
  690         }
  691 
  692         /*
  693          * "enter_prot" is the protection we want to enter the page in at.
  694          * for certain pages (e.g. copy-on-write pages) this protection can
  695          * be more strict than ufi->entry->protection.  "wired" means either
  696          * the entry is wired or we are fault-wiring the pg.
  697          */
  698 
  699         flt->enter_prot = ufi->entry->protection;
  700         flt->pa_flags = UVM_ET_ISWC(ufi->entry) ? PMAP_WC : 0;
  701         flt->wired = VM_MAPENT_ISWIRED(ufi->entry) || (flt->narrow == TRUE);
  702         if (flt->wired)
  703                 flt->access_type = flt->enter_prot; /* full access for wired */
  704 
  705         /* handle "needs_copy" case. */
  706         if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
  707                 if ((flt->access_type & PROT_WRITE) ||
  708                     (ufi->entry->object.uvm_obj == NULL)) {
  709                         /* need to clear */
  710                         uvmfault_unlockmaps(ufi, FALSE);
  711                         uvmfault_amapcopy(ufi);
  712                         counters_inc(uvmexp_counters, flt_amcopy);
  713                         return ERESTART;
  714                 } else {
  715                         /*
  716                          * ensure that we pmap_enter page R/O since
  717                          * needs_copy is still true
  718                          */
  719                         flt->enter_prot &= ~PROT_WRITE;
  720                 }
  721         }
  722 
  723         /*
  724          * identify the players
  725          */
  726         amap = ufi->entry->aref.ar_amap;        /* upper layer */
  727         uobj = ufi->entry->object.uvm_obj;      /* lower layer */
  728 
  729         /*
  730          * check for a case 0 fault.  if nothing backing the entry then
  731          * error now.
  732          */
  733         if (amap == NULL && uobj == NULL) {
  734                 uvmfault_unlockmaps(ufi, FALSE);
  735                 return EFAULT;
  736         }
  737 
  738         /*
  739          * for a case 2B fault waste no time on adjacent pages because
  740          * they are likely already entered.
  741          */
  742         if (uobj != NULL && amap != NULL &&
  743             (flt->access_type & PROT_WRITE) != 0) {
  744                 /* wide fault (!narrow) */
  745                 flt->narrow = TRUE;
  746         }
  747 
  748         /*
  749          * establish range of interest based on advice from mapper
  750          * and then clip to fit map entry.   note that we only want
  751          * to do this the first time through the fault.   if we
  752          * ReFault we will disable this by setting "narrow" to true.
  753          */
  754         if (flt->narrow == FALSE) {
  755 
  756                 /* wide fault (!narrow) */
  757                 nback = min(uvmadvice[ufi->entry->advice].nback,
  758                     (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
  759                 flt->startva = ufi->orig_rvaddr - ((vsize_t)nback << PAGE_SHIFT);
  760                 nforw = min(uvmadvice[ufi->entry->advice].nforw,
  761                     ((ufi->entry->end - ufi->orig_rvaddr) >> PAGE_SHIFT) - 1);
  762                 /*
  763                  * note: "-1" because we don't want to count the
  764                  * faulting page as forw
  765                  */
  766                 flt->npages = nback + nforw + 1;
  767                 flt->centeridx = nback;
  768 
  769                 flt->narrow = TRUE;     /* ensure only once per-fault */
  770         } else {
  771                 /* narrow fault! */
  772                 nback = nforw = 0;
  773                 flt->startva = ufi->orig_rvaddr;
  774                 flt->npages = 1;
  775                 flt->centeridx = 0;
  776         }
  777 
  778         /*
  779          * if we've got an amap then lock it and extract current anons.
  780          */
  781         if (amap) {
  782                 amap_lock(amap);
  783                 amap_lookups(&ufi->entry->aref,
  784                     flt->startva - ufi->entry->start, *ranons, flt->npages);
  785         } else {
  786                 *ranons = NULL; /* to be safe */
  787         }
  788 
  789         /*
  790          * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
  791          * now and then forget about them (for the rest of the fault).
  792          */
  793         if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) {
  794                 /* flush back-page anons? */
  795                 if (amap)
  796                         uvmfault_anonflush(*ranons, nback);
  797 
  798                 /*
  799                  * flush object?
  800                  */
  801                 if (uobj) {
  802                         voff_t uoff;
  803 
  804                         uoff = (flt->startva - ufi->entry->start) + ufi->entry->offset;
  805                         rw_enter(uobj->vmobjlock, RW_WRITE);
  806                         (void) uobj->pgops->pgo_flush(uobj, uoff, uoff +
  807                             ((vsize_t)nback << PAGE_SHIFT), PGO_DEACTIVATE);
  808                         rw_exit(uobj->vmobjlock);
  809                 }
  810 
  811                 /* now forget about the backpages */
  812                 if (amap)
  813                         *ranons += nback;
  814                 flt->startva += ((vsize_t)nback << PAGE_SHIFT);
  815                 flt->npages -= nback;
  816                 flt->centeridx = 0;
  817         }
  818 
  819         return 0;
  820 }
  821 
  822 /*
  823  * uvm_fault_upper_lookup: look up existing h/w mapping and amap.
  824  *
  825  * iterate range of interest:
  826  *      1. check if h/w mapping exists.  if yes, we don't care
  827  *      2. check if anon exists.  if not, page is lower.
  828  *      3. if anon exists, enter h/w mapping for neighbors.
  829  *
  830  * => called with amap locked (if exists).
  831  */
  832 boolean_t
  833 uvm_fault_upper_lookup(struct uvm_faultinfo *ufi,
  834     const struct uvm_faultctx *flt, struct vm_anon **anons,
  835     struct vm_page **pages)
  836 {
  837         struct vm_amap *amap = ufi->entry->aref.ar_amap;
  838         struct vm_anon *anon;
  839         boolean_t shadowed;
  840         vaddr_t currva;
  841         paddr_t pa;
  842         int lcv;
  843 
  844         /* locked: maps(read), amap(if there) */
  845         KASSERT(amap == NULL ||
  846             rw_write_held(amap->am_lock));
  847 
  848         /*
  849          * map in the backpages and frontpages we found in the amap in hopes
  850          * of preventing future faults.    we also init the pages[] array as
  851          * we go.
  852          */
  853         currva = flt->startva;
  854         shadowed = FALSE;
  855         for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
  856                 /*
  857                  * dont play with VAs that are already mapped
  858                  * except for center)
  859                  */
  860                 if (lcv != flt->centeridx &&
  861                     pmap_extract(ufi->orig_map->pmap, currva, &pa)) {
  862                         pages[lcv] = PGO_DONTCARE;
  863                         continue;
  864                 }
  865 
  866                 /*
  867                  * unmapped or center page.   check if any anon at this level.
  868                  */
  869                 if (amap == NULL || anons[lcv] == NULL) {
  870                         pages[lcv] = NULL;
  871                         continue;
  872                 }
  873 
  874                 /*
  875                  * check for present page and map if possible.
  876                  */
  877                 pages[lcv] = PGO_DONTCARE;
  878                 if (lcv == flt->centeridx) {    /* save center for later! */
  879                         shadowed = TRUE;
  880                         continue;
  881                 }
  882                 anon = anons[lcv];
  883                 KASSERT(anon->an_lock == amap->am_lock);
  884                 if (anon->an_page &&
  885                     (anon->an_page->pg_flags & (PG_RELEASED|PG_BUSY)) == 0) {
  886                         uvm_lock_pageq();
  887                         uvm_pageactivate(anon->an_page);        /* reactivate */
  888                         uvm_unlock_pageq();
  889                         counters_inc(uvmexp_counters, flt_namap);
  890 
  891                         /*
  892                          * Since this isn't the page that's actually faulting,
  893                          * ignore pmap_enter() failures; it's not critical
  894                          * that we enter these right now.
  895                          */
  896                         (void) pmap_enter(ufi->orig_map->pmap, currva,
  897                             VM_PAGE_TO_PHYS(anon->an_page) | flt->pa_flags,
  898                             (anon->an_ref > 1) ?
  899                             (flt->enter_prot & ~PROT_WRITE) : flt->enter_prot,
  900                             PMAP_CANFAIL |
  901                              (VM_MAPENT_ISWIRED(ufi->entry) ? PMAP_WIRED : 0));
  902                 }
  903         }
  904         if (flt->npages > 1)
  905                 pmap_update(ufi->orig_map->pmap);
  906 
  907         return shadowed;
  908 }
  909 
  910 /*
  911  * uvm_fault_upper: handle upper fault.
  912  *
  913  *      1. acquire anon lock.
  914  *      2. get anon.  let uvmfault_anonget do the dirty work.
  915  *      3. if COW, promote data to new anon
  916  *      4. enter h/w mapping
  917  */
  918 int
  919 uvm_fault_upper(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
  920    struct vm_anon **anons, vm_fault_t fault_type)
  921 {
  922         struct vm_amap *amap = ufi->entry->aref.ar_amap;
  923         struct vm_anon *oanon, *anon = anons[flt->centeridx];
  924         struct vm_page *pg = NULL;
  925         int error, ret;
  926 
  927         /* locked: maps(read), amap, anon */
  928         KASSERT(rw_write_held(amap->am_lock));
  929         KASSERT(anon->an_lock == amap->am_lock);
  930 
  931         /*
  932          * no matter if we have case 1A or case 1B we are going to need to
  933          * have the anon's memory resident.   ensure that now.
  934          */
  935         /*
  936          * let uvmfault_anonget do the dirty work.
  937          * if it fails (!OK) it will unlock everything for us.
  938          * if it succeeds, locks are still valid and locked.
  939          * also, if it is OK, then the anon's page is on the queues.
  940          * if the page is on loan from a uvm_object, then anonget will
  941          * lock that object for us if it does not fail.
  942          */
  943         error = uvmfault_anonget(ufi, amap, anon);
  944         switch (error) {
  945         case VM_PAGER_OK:
  946                 break;
  947 
  948         case VM_PAGER_REFAULT:
  949                 return ERESTART;
  950 
  951         case VM_PAGER_ERROR:
  952                 /*
  953                  * An error occurred while trying to bring in the
  954                  * page -- this is the only error we return right
  955                  * now.
  956                  */
  957                 return EACCES;  /* XXX */
  958         default:
  959 #ifdef DIAGNOSTIC
  960                 panic("uvm_fault: uvmfault_anonget -> %d", error);
  961 #else
  962                 return EACCES;
  963 #endif
  964         }
  965 
  966         KASSERT(rw_write_held(amap->am_lock));
  967         KASSERT(anon->an_lock == amap->am_lock);
  968 
  969         /*
  970          * if we are case 1B then we will need to allocate a new blank
  971          * anon to transfer the data into.   note that we have a lock
  972          * on anon, so no one can busy or release the page until we are done.
  973          * also note that the ref count can't drop to zero here because
  974          * it is > 1 and we are only dropping one ref.
  975          *
  976          * in the (hopefully very rare) case that we are out of RAM we
  977          * will unlock, wait for more RAM, and refault.
  978          *
  979          * if we are out of anon VM we wait for RAM to become available.
  980          */
  981 
  982         if ((flt->access_type & PROT_WRITE) != 0 && anon->an_ref > 1) {
  983                 counters_inc(uvmexp_counters, flt_acow);
  984                 oanon = anon;           /* oanon = old */
  985                 anon = uvm_analloc();
  986                 if (anon) {
  987                         anon->an_lock = amap->am_lock;
  988                         pg = uvm_pagealloc(NULL, 0, anon, 0);
  989                 }
  990 
  991                 /* check for out of RAM */
  992                 if (anon == NULL || pg == NULL) {
  993                         uvmfault_unlockall(ufi, amap, NULL);
  994                         if (anon == NULL)
  995                                 counters_inc(uvmexp_counters, flt_noanon);
  996                         else {
  997                                 anon->an_lock = NULL;
  998                                 anon->an_ref--;
  999                                 uvm_anfree(anon);
 1000                                 counters_inc(uvmexp_counters, flt_noram);
 1001                         }
 1002 
 1003                         if (uvm_swapisfull())
 1004                                 return ENOMEM;
 1005 
 1006                         /* out of RAM, wait for more */
 1007                         if (anon == NULL)
 1008                                 uvm_anwait();
 1009                         else
 1010                                 uvm_wait("flt_noram3");
 1011                         return ERESTART;
 1012                 }
 1013 
 1014                 /* got all resources, replace anon with nanon */
 1015                 uvm_pagecopy(oanon->an_page, pg);       /* pg now !PG_CLEAN */
 1016                 /* un-busy! new page */
 1017                 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE);
 1018                 UVM_PAGE_OWN(pg, NULL);
 1019                 ret = amap_add(&ufi->entry->aref,
 1020                     ufi->orig_rvaddr - ufi->entry->start, anon, 1);
 1021                 KASSERT(ret == 0);
 1022 
 1023                 /* deref: can not drop to zero here by defn! */
 1024                 oanon->an_ref--;
 1025 
 1026 #if defined(MULTIPROCESSOR) && !defined(__HAVE_PMAP_MPSAFE_ENTER_COW)
 1027                 /*
 1028                  * If there are multiple threads, either uvm or the
 1029                  * pmap has to make sure no threads see the old RO
 1030                  * mapping once any have seen the new RW mapping.
 1031                  * uvm does it by inserting the new mapping RO and
 1032                  * letting it fault again.
 1033                  * This is only a problem on MP systems.
 1034                  */
 1035                 if (P_HASSIBLING(curproc)) {
 1036                         flt->enter_prot &= ~PROT_WRITE;
 1037                         flt->access_type &= ~PROT_WRITE;
 1038                 }
 1039 #endif
 1040 
 1041                 /*
 1042                  * note: anon is _not_ locked, but we have the sole references
 1043                  * to in from amap.
 1044                  * thus, no one can get at it until we are done with it.
 1045                  */
 1046         } else {
 1047                 counters_inc(uvmexp_counters, flt_anon);
 1048                 oanon = anon;
 1049                 pg = anon->an_page;
 1050                 if (anon->an_ref > 1)     /* disallow writes to ref > 1 anons */
 1051                         flt->enter_prot = flt->enter_prot & ~PROT_WRITE;
 1052         }
 1053 
 1054         /*
 1055          * now map the page in .
 1056          */
 1057         if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
 1058             VM_PAGE_TO_PHYS(pg) | flt->pa_flags, flt->enter_prot,
 1059             flt->access_type | PMAP_CANFAIL | (flt->wired ? PMAP_WIRED : 0)) != 0) {
 1060                 /*
 1061                  * No need to undo what we did; we can simply think of
 1062                  * this as the pmap throwing away the mapping information.
 1063                  *
 1064                  * We do, however, have to go through the ReFault path,
 1065                  * as the map may change while we're asleep.
 1066                  */
 1067                 uvmfault_unlockall(ufi, amap, NULL);
 1068                 if (uvm_swapisfull()) {
 1069                         /* XXX instrumentation */
 1070                         return ENOMEM;
 1071                 }
 1072                 /* XXX instrumentation */
 1073                 uvm_wait("flt_pmfail1");
 1074                 return ERESTART;
 1075         }
 1076 
 1077         /*
 1078          * ... update the page queues.
 1079          */
 1080         uvm_lock_pageq();
 1081 
 1082         if (fault_type == VM_FAULT_WIRE) {
 1083                 uvm_pagewire(pg);
 1084                 /*
 1085                  * since the now-wired page cannot be paged out,
 1086                  * release its swap resources for others to use.
 1087                  * since an anon with no swap cannot be PG_CLEAN,
 1088                  * clear its clean flag now.
 1089                  */
 1090                 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
 1091                 uvm_anon_dropswap(anon);
 1092         } else {
 1093                 /* activate it */
 1094                 uvm_pageactivate(pg);
 1095         }
 1096 
 1097         uvm_unlock_pageq();
 1098 
 1099         /*
 1100          * done case 1!  finish up by unlocking everything and returning success
 1101          */
 1102         uvmfault_unlockall(ufi, amap, NULL);
 1103         pmap_update(ufi->orig_map->pmap);
 1104         return 0;
 1105 }
 1106 
 1107 /*
 1108  * uvm_fault_lower_lookup: look up on-memory uobj pages.
 1109  *
 1110  *      1. get on-memory pages.
 1111  *      2. if failed, give up (get only center page later).
 1112  *      3. if succeeded, enter h/w mapping of neighbor pages.
 1113  */
 1114 
 1115 struct vm_page *
 1116 uvm_fault_lower_lookup(
 1117         struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
 1118         struct vm_page **pages)
 1119 {
 1120         struct uvm_object *uobj = ufi->entry->object.uvm_obj;
 1121         struct vm_page *uobjpage = NULL;
 1122         int lcv, gotpages;
 1123         vaddr_t currva;
 1124 
 1125         rw_enter(uobj->vmobjlock, RW_WRITE);
 1126 
 1127         counters_inc(uvmexp_counters, flt_lget);
 1128         gotpages = flt->npages;
 1129         (void) uobj->pgops->pgo_get(uobj,
 1130             ufi->entry->offset + (flt->startva - ufi->entry->start),
 1131             pages, &gotpages, flt->centeridx,
 1132             flt->access_type & MASK(ufi->entry), ufi->entry->advice,
 1133             PGO_LOCKED);
 1134 
 1135         /*
 1136          * check for pages to map, if we got any
 1137          */
 1138         if (gotpages == 0) {
 1139                 return NULL;
 1140         }
 1141 
 1142         currva = flt->startva;
 1143         for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
 1144                 if (pages[lcv] == NULL ||
 1145                     pages[lcv] == PGO_DONTCARE)
 1146                         continue;
 1147 
 1148                 KASSERT((pages[lcv]->pg_flags & PG_RELEASED) == 0);
 1149 
 1150                 /*
 1151                  * if center page is resident and not
 1152                  * PG_BUSY, then pgo_get made it PG_BUSY
 1153                  * for us and gave us a handle to it.
 1154                  * remember this page as "uobjpage."
 1155                  * (for later use).
 1156                  */
 1157                 if (lcv == flt->centeridx) {
 1158                         uobjpage = pages[lcv];
 1159                         continue;
 1160                 }
 1161 
 1162                 /*
 1163                  * note: calling pgo_get with locked data
 1164                  * structures returns us pages which are
 1165                  * neither busy nor released, so we don't
 1166                  * need to check for this.   we can just
 1167                  * directly enter the page (after moving it
 1168                  * to the head of the active queue [useful?]).
 1169                  */
 1170 
 1171                 uvm_lock_pageq();
 1172                 uvm_pageactivate(pages[lcv]);   /* reactivate */
 1173                 uvm_unlock_pageq();
 1174                 counters_inc(uvmexp_counters, flt_nomap);
 1175 
 1176                 /*
 1177                  * Since this page isn't the page that's
 1178                  * actually faulting, ignore pmap_enter()
 1179                  * failures; it's not critical that we
 1180                  * enter these right now.
 1181                  */
 1182                 (void) pmap_enter(ufi->orig_map->pmap, currva,
 1183                     VM_PAGE_TO_PHYS(pages[lcv]) | flt->pa_flags,
 1184                     flt->enter_prot & MASK(ufi->entry),
 1185                     PMAP_CANFAIL |
 1186                      (flt->wired ? PMAP_WIRED : 0));
 1187 
 1188                 /*
 1189                  * NOTE: page can't be PG_WANTED because
 1190                  * we've held the lock the whole time
 1191                  * we've had the handle.
 1192                  */
 1193                 atomic_clearbits_int(&pages[lcv]->pg_flags, PG_BUSY);
 1194                 UVM_PAGE_OWN(pages[lcv], NULL);
 1195         }
 1196         pmap_update(ufi->orig_map->pmap);
 1197 
 1198         return uobjpage;
 1199 }
 1200 
 1201 /*
 1202  * uvm_fault_lower: handle lower fault.
 1203  *
 1204  */
 1205 int
 1206 uvm_fault_lower(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
 1207    struct vm_page **pages, vm_fault_t fault_type)
 1208 {
 1209         struct vm_amap *amap = ufi->entry->aref.ar_amap;
 1210         struct uvm_object *uobj = ufi->entry->object.uvm_obj;
 1211         boolean_t promote, locked;
 1212         int result;
 1213         struct vm_page *uobjpage, *pg = NULL;
 1214         struct vm_anon *anon = NULL;
 1215         voff_t uoff;
 1216 
 1217         /*
 1218          * now, if the desired page is not shadowed by the amap and we have
 1219          * a backing object that does not have a special fault routine, then
 1220          * we ask (with pgo_get) the object for resident pages that we care
 1221          * about and attempt to map them in.  we do not let pgo_get block
 1222          * (PGO_LOCKED).
 1223          */
 1224         if (uobj == NULL) {
 1225                 /* zero fill; don't care neighbor pages */
 1226                 uobjpage = NULL;
 1227         } else {
 1228                 uobjpage = uvm_fault_lower_lookup(ufi, flt, pages);
 1229         }
 1230 
 1231         /*
 1232          * note that at this point we are done with any front or back pages.
 1233          * we are now going to focus on the center page (i.e. the one we've
 1234          * faulted on).  if we have faulted on the bottom (uobj)
 1235          * layer [i.e. case 2] and the page was both present and available,
 1236          * then we've got a pointer to it as "uobjpage" and we've already
 1237          * made it BUSY.
 1238          */
 1239 
 1240         /*
 1241          * locked:
 1242          */
 1243         KASSERT(amap == NULL ||
 1244             rw_write_held(amap->am_lock));
 1245         KASSERT(uobj == NULL ||
 1246             rw_write_held(uobj->vmobjlock));
 1247 
 1248         /*
 1249          * note that uobjpage can not be PGO_DONTCARE at this point.  we now
 1250          * set uobjpage to PGO_DONTCARE if we are doing a zero fill.  if we
 1251          * have a backing object, check and see if we are going to promote
 1252          * the data up to an anon during the fault.
 1253          */
 1254         if (uobj == NULL) {
 1255                 uobjpage = PGO_DONTCARE;
 1256                 promote = TRUE;         /* always need anon here */
 1257         } else {
 1258                 KASSERT(uobjpage != PGO_DONTCARE);
 1259                 promote = (flt->access_type & PROT_WRITE) &&
 1260                      UVM_ET_ISCOPYONWRITE(ufi->entry);
 1261         }
 1262 
 1263         /*
 1264          * if uobjpage is not null then we do not need to do I/O to get the
 1265          * uobjpage.
 1266          *
 1267          * if uobjpage is null, then we need to ask the pager to
 1268          * get the data for us.   once we have the data, we need to reverify
 1269          * the state the world.   we are currently not holding any resources.
 1270          */
 1271         if (uobjpage) {
 1272                 /* update rusage counters */
 1273                 curproc->p_ru.ru_minflt++;
 1274         } else {
 1275                 int gotpages;
 1276 
 1277                 /* update rusage counters */
 1278                 curproc->p_ru.ru_majflt++;
 1279 
 1280                 uvmfault_unlockall(ufi, amap, NULL);
 1281 
 1282                 counters_inc(uvmexp_counters, flt_get);
 1283                 gotpages = 1;
 1284                 uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset;
 1285                 result = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages,
 1286                     0, flt->access_type & MASK(ufi->entry), ufi->entry->advice,
 1287                     PGO_SYNCIO);
 1288 
 1289                 /*
 1290                  * recover from I/O
 1291                  */
 1292                 if (result != VM_PAGER_OK) {
 1293                         KASSERT(result != VM_PAGER_PEND);
 1294 
 1295                         if (result == VM_PAGER_AGAIN) {
 1296                                 tsleep_nsec(&nowake, PVM, "fltagain2",
 1297                                     MSEC_TO_NSEC(5));
 1298                                 return ERESTART;
 1299                         }
 1300 
 1301                         if (!UVM_ET_ISNOFAULT(ufi->entry))
 1302                                 return (EIO);
 1303 
 1304                         uobjpage = PGO_DONTCARE;
 1305                         uobj = NULL;
 1306                         promote = TRUE;
 1307                 }
 1308 
 1309                 /* re-verify the state of the world.  */
 1310                 locked = uvmfault_relock(ufi);
 1311                 if (locked && amap != NULL)
 1312                         amap_lock(amap);
 1313 
 1314                 /* might be changed */
 1315                 if (uobjpage != PGO_DONTCARE) {
 1316                         uobj = uobjpage->uobject;
 1317                         rw_enter(uobj->vmobjlock, RW_WRITE);
 1318                 }
 1319 
 1320                 /*
 1321                  * Re-verify that amap slot is still free. if there is
 1322                  * a problem, we clean up.
 1323                  */
 1324                 if (locked && amap && amap_lookup(&ufi->entry->aref,
 1325                       ufi->orig_rvaddr - ufi->entry->start)) {
 1326                         if (locked)
 1327                                 uvmfault_unlockall(ufi, amap, NULL);
 1328                         locked = FALSE;
 1329                 }
 1330 
 1331                 /* didn't get the lock?   release the page and retry. */
 1332                 if (locked == FALSE && uobjpage != PGO_DONTCARE) {
 1333                         uvm_lock_pageq();
 1334                         /* make sure it is in queues */
 1335                         uvm_pageactivate(uobjpage);
 1336                         uvm_unlock_pageq();
 1337 
 1338                         if (uobjpage->pg_flags & PG_WANTED)
 1339                                 /* still holding object lock */
 1340                                 wakeup(uobjpage);
 1341                         atomic_clearbits_int(&uobjpage->pg_flags,
 1342                             PG_BUSY|PG_WANTED);
 1343                         UVM_PAGE_OWN(uobjpage, NULL);
 1344                 }
 1345 
 1346                 if (locked == FALSE) {
 1347                         if (uobjpage != PGO_DONTCARE)
 1348                                 rw_exit(uobj->vmobjlock);
 1349                         return ERESTART;
 1350                 }
 1351 
 1352                 /*
 1353                  * we have the data in uobjpage which is PG_BUSY
 1354                  */
 1355         }
 1356 
 1357         /*
 1358          * notes:
 1359          *  - at this point uobjpage can not be NULL
 1360          *  - at this point uobjpage could be PG_WANTED (handle later)
 1361          */
 1362         if (promote == FALSE) {
 1363                 /*
 1364                  * we are not promoting.   if the mapping is COW ensure that we
 1365                  * don't give more access than we should (e.g. when doing a read
 1366                  * fault on a COPYONWRITE mapping we want to map the COW page in
 1367                  * R/O even though the entry protection could be R/W).
 1368                  *
 1369                  * set "pg" to the page we want to map in (uobjpage, usually)
 1370                  */
 1371                 counters_inc(uvmexp_counters, flt_obj);
 1372                 if (UVM_ET_ISCOPYONWRITE(ufi->entry))
 1373                         flt->enter_prot &= ~PROT_WRITE;
 1374                 pg = uobjpage;          /* map in the actual object */
 1375 
 1376                 /* assert(uobjpage != PGO_DONTCARE) */
 1377 
 1378                 /*
 1379                  * we are faulting directly on the page.
 1380                  */
 1381         } else {
 1382                 /*
 1383                  * if we are going to promote the data to an anon we
 1384                  * allocate a blank anon here and plug it into our amap.
 1385                  */
 1386 #ifdef DIAGNOSTIC
 1387                 if (amap == NULL)
 1388                         panic("uvm_fault: want to promote data, but no anon");
 1389 #endif
 1390 
 1391                 anon = uvm_analloc();
 1392                 if (anon) {
 1393                         /*
 1394                          * In `Fill in data...' below, if
 1395                          * uobjpage == PGO_DONTCARE, we want
 1396                          * a zero'd, dirty page, so have
 1397                          * uvm_pagealloc() do that for us.
 1398                          */
 1399                         anon->an_lock = amap->am_lock;
 1400                         pg = uvm_pagealloc(NULL, 0, anon,
 1401                             (uobjpage == PGO_DONTCARE) ? UVM_PGA_ZERO : 0);
 1402                 }
 1403 
 1404                 /*
 1405                  * out of memory resources?
 1406                  */
 1407                 if (anon == NULL || pg == NULL) {
 1408                         /*
 1409                          * arg!  must unbusy our page and fail or sleep.
 1410                          */
 1411                         if (uobjpage != PGO_DONTCARE) {
 1412                                 uvm_lock_pageq();
 1413                                 uvm_pageactivate(uobjpage);
 1414                                 uvm_unlock_pageq();
 1415 
 1416                                 if (uobjpage->pg_flags & PG_WANTED)
 1417                                         wakeup(uobjpage);
 1418                                 atomic_clearbits_int(&uobjpage->pg_flags,
 1419                                     PG_BUSY|PG_WANTED);
 1420                                 UVM_PAGE_OWN(uobjpage, NULL);
 1421                         }
 1422 
 1423                         /* unlock and fail ... */
 1424                         uvmfault_unlockall(ufi, amap, uobj);
 1425                         if (anon == NULL)
 1426                                 counters_inc(uvmexp_counters, flt_noanon);
 1427                         else {
 1428                                 anon->an_lock = NULL;
 1429                                 anon->an_ref--;
 1430                                 uvm_anfree(anon);
 1431                                 counters_inc(uvmexp_counters, flt_noram);
 1432                         }
 1433 
 1434                         if (uvm_swapisfull())
 1435                                 return (ENOMEM);
 1436 
 1437                         /* out of RAM, wait for more */
 1438                         if (anon == NULL)
 1439                                 uvm_anwait();
 1440                         else
 1441                                 uvm_wait("flt_noram5");
 1442                         return ERESTART;
 1443                 }
 1444 
 1445                 /*
 1446                  * fill in the data
 1447                  */
 1448                 if (uobjpage != PGO_DONTCARE) {
 1449                         counters_inc(uvmexp_counters, flt_prcopy);
 1450                         /* copy page [pg now dirty] */
 1451                         uvm_pagecopy(uobjpage, pg);
 1452 
 1453                         /*
 1454                          * promote to shared amap?  make sure all sharing
 1455                          * procs see it
 1456                          */
 1457                         if ((amap_flags(amap) & AMAP_SHARED) != 0) {
 1458                                 pmap_page_protect(uobjpage, PROT_NONE);
 1459                                 }
 1460 
 1461                         /* dispose of uobjpage. drop handle to uobj as well. */
 1462                         if (uobjpage->pg_flags & PG_WANTED)
 1463                                 wakeup(uobjpage);
 1464                         atomic_clearbits_int(&uobjpage->pg_flags,
 1465                             PG_BUSY|PG_WANTED);
 1466                         UVM_PAGE_OWN(uobjpage, NULL);
 1467                         uvm_lock_pageq();
 1468                         uvm_pageactivate(uobjpage);
 1469                         uvm_unlock_pageq();
 1470                         rw_exit(uobj->vmobjlock);
 1471                         uobj = NULL;
 1472                 } else {
 1473                         counters_inc(uvmexp_counters, flt_przero);
 1474                         /*
 1475                          * Page is zero'd and marked dirty by uvm_pagealloc()
 1476                          * above.
 1477                          */
 1478                 }
 1479 
 1480                 if (amap_add(&ufi->entry->aref,
 1481                     ufi->orig_rvaddr - ufi->entry->start, anon, 0)) {
 1482                         uvmfault_unlockall(ufi, amap, uobj);
 1483                         uvm_anfree(anon);
 1484                         counters_inc(uvmexp_counters, flt_noamap);
 1485 
 1486                         if (uvm_swapisfull())
 1487                                 return (ENOMEM);
 1488 
 1489                         amap_populate(&ufi->entry->aref,
 1490                             ufi->orig_rvaddr - ufi->entry->start);
 1491                         return ERESTART;
 1492                 }
 1493         }
 1494 
 1495         /* note: pg is either the uobjpage or the new page in the new anon */
 1496         /*
 1497          * all resources are present.   we can now map it in and free our
 1498          * resources.
 1499          */
 1500         if (amap == NULL)
 1501                 KASSERT(anon == NULL);
 1502         else {
 1503                 KASSERT(rw_write_held(amap->am_lock));
 1504                 KASSERT(anon == NULL || anon->an_lock == amap->am_lock);
 1505         }
 1506         if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
 1507             VM_PAGE_TO_PHYS(pg) | flt->pa_flags, flt->enter_prot,
 1508             flt->access_type | PMAP_CANFAIL | (flt->wired ? PMAP_WIRED : 0)) != 0) {
 1509                 /*
 1510                  * No need to undo what we did; we can simply think of
 1511                  * this as the pmap throwing away the mapping information.
 1512                  *
 1513                  * We do, however, have to go through the ReFault path,
 1514                  * as the map may change while we're asleep.
 1515                  */
 1516                 if (pg->pg_flags & PG_WANTED)
 1517                         wakeup(pg);
 1518 
 1519                 atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
 1520                 UVM_PAGE_OWN(pg, NULL);
 1521                 uvmfault_unlockall(ufi, amap, uobj);
 1522                 if (uvm_swapisfull()) {
 1523                         /* XXX instrumentation */
 1524                         return (ENOMEM);
 1525                 }
 1526                 /* XXX instrumentation */
 1527                 uvm_wait("flt_pmfail2");
 1528                 return ERESTART;
 1529         }
 1530 
 1531         if (fault_type == VM_FAULT_WIRE) {
 1532                 uvm_lock_pageq();
 1533                 uvm_pagewire(pg);
 1534                 uvm_unlock_pageq();
 1535                 if (pg->pg_flags & PQ_AOBJ) {
 1536                         /*
 1537                          * since the now-wired page cannot be paged out,
 1538                          * release its swap resources for others to use.
 1539                          * since an aobj page with no swap cannot be clean,
 1540                          * mark it dirty now.
 1541                          *
 1542                          * use pg->uobject here.  if the page is from a
 1543                          * tmpfs vnode, the pages are backed by its UAO and
 1544                          * not the vnode.
 1545                          */
 1546                         KASSERT(uobj != NULL);
 1547                         KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
 1548                         atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
 1549                         uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
 1550                 }
 1551         } else {
 1552                 /* activate it */
 1553                 uvm_lock_pageq();
 1554                 uvm_pageactivate(pg);
 1555                 uvm_unlock_pageq();
 1556         }
 1557 
 1558         if (pg->pg_flags & PG_WANTED)
 1559                 wakeup(pg);
 1560 
 1561         atomic_clearbits_int(&pg->pg_flags, PG_BUSY|PG_FAKE|PG_WANTED);
 1562         UVM_PAGE_OWN(pg, NULL);
 1563         uvmfault_unlockall(ufi, amap, uobj);
 1564         pmap_update(ufi->orig_map->pmap);
 1565 
 1566         return (0);
 1567 }
 1568 
 1569 
 1570 /*
 1571  * uvm_fault_wire: wire down a range of virtual addresses in a map.
 1572  *
 1573  * => map may be read-locked by caller, but MUST NOT be write-locked.
 1574  * => if map is read-locked, any operations which may cause map to
 1575  *      be write-locked in uvm_fault() must be taken care of by
 1576  *      the caller.  See uvm_map_pageable().
 1577  */
 1578 int
 1579 uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type)
 1580 {
 1581         vaddr_t va;
 1582         int rv;
 1583 
 1584         /*
 1585          * now fault it in a page at a time.   if the fault fails then we have
 1586          * to undo what we have done.   note that in uvm_fault PROT_NONE
 1587          * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
 1588          */
 1589         for (va = start ; va < end ; va += PAGE_SIZE) {
 1590                 rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type);
 1591                 if (rv) {
 1592                         if (va != start) {
 1593                                 uvm_fault_unwire(map, start, va);
 1594                         }
 1595                         return (rv);
 1596                 }
 1597         }
 1598 
 1599         return (0);
 1600 }
 1601 
 1602 /*
 1603  * uvm_fault_unwire(): unwire range of virtual space.
 1604  */
 1605 void
 1606 uvm_fault_unwire(vm_map_t map, vaddr_t start, vaddr_t end)
 1607 {
 1608 
 1609         vm_map_lock_read(map);
 1610         uvm_fault_unwire_locked(map, start, end);
 1611         vm_map_unlock_read(map);
 1612 }
 1613 
 1614 /*
 1615  * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
 1616  *
 1617  * => map must be at least read-locked.
 1618  */
 1619 void
 1620 uvm_fault_unwire_locked(vm_map_t map, vaddr_t start, vaddr_t end)
 1621 {
 1622         vm_map_entry_t entry, oentry = NULL, next;
 1623         pmap_t pmap = vm_map_pmap(map);
 1624         vaddr_t va;
 1625         paddr_t pa;
 1626         struct vm_page *pg;
 1627 
 1628         KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
 1629         vm_map_assert_anylock(map);
 1630 
 1631         /*
 1632          * we assume that the area we are unwiring has actually been wired
 1633          * in the first place.   this means that we should be able to extract
 1634          * the PAs from the pmap.
 1635          */
 1636 
 1637         /*
 1638          * find the beginning map entry for the region.
 1639          */
 1640         KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
 1641         if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
 1642                 panic("uvm_fault_unwire_locked: address not in map");
 1643 
 1644         for (va = start; va < end ; va += PAGE_SIZE) {
 1645                 if (pmap_extract(pmap, va, &pa) == FALSE)
 1646                         continue;
 1647 
 1648                 /*
 1649                  * find the map entry for the current address.
 1650                  */
 1651                 KASSERT(va >= entry->start);
 1652                 while (entry && va >= entry->end) {
 1653                         next = RBT_NEXT(uvm_map_addr, entry);
 1654                         entry = next;
 1655                 }
 1656 
 1657                 if (entry == NULL)
 1658                         return;
 1659                 if (va < entry->start)
 1660                         continue;
 1661 
 1662                 /*
 1663                  * lock it.
 1664                  */
 1665                 if (entry != oentry) {
 1666                         if (oentry != NULL) {
 1667                                 uvm_map_unlock_entry(oentry);
 1668                         }
 1669                         uvm_map_lock_entry(entry);
 1670                         oentry = entry;
 1671                 }
 1672 
 1673                 /*
 1674                  * if the entry is no longer wired, tell the pmap.
 1675                  */
 1676                 if (VM_MAPENT_ISWIRED(entry) == 0)
 1677                         pmap_unwire(pmap, va);
 1678 
 1679                 pg = PHYS_TO_VM_PAGE(pa);
 1680                 if (pg) {
 1681                         uvm_lock_pageq();
 1682                         uvm_pageunwire(pg);
 1683                         uvm_unlock_pageq();
 1684                 }
 1685         }
 1686 
 1687         if (oentry != NULL) {
 1688                 uvm_map_unlock_entry(oentry);
 1689         }
 1690 }
 1691 
 1692 /*
 1693  * uvmfault_unlockmaps: unlock the maps
 1694  */
 1695 void
 1696 uvmfault_unlockmaps(struct uvm_faultinfo *ufi, boolean_t write_locked)
 1697 {
 1698         /*
 1699          * ufi can be NULL when this isn't really a fault,
 1700          * but merely paging in anon data.
 1701          */
 1702         if (ufi == NULL) {
 1703                 return;
 1704         }
 1705 
 1706         uvmfault_update_stats(ufi);
 1707         if (write_locked) {
 1708                 vm_map_unlock(ufi->map);
 1709         } else {
 1710                 vm_map_unlock_read(ufi->map);
 1711         }
 1712 }
 1713 
 1714 /*
 1715  * uvmfault_unlockall: unlock everything passed in.
 1716  *
 1717  * => maps must be read-locked (not write-locked).
 1718  */
 1719 void
 1720 uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
 1721     struct uvm_object *uobj)
 1722 {
 1723         if (uobj)
 1724                 rw_exit(uobj->vmobjlock);
 1725         if (amap != NULL)
 1726                 amap_unlock(amap);
 1727         uvmfault_unlockmaps(ufi, FALSE);
 1728 }
 1729 
 1730 /*
 1731  * uvmfault_lookup: lookup a virtual address in a map
 1732  *
 1733  * => caller must provide a uvm_faultinfo structure with the IN
 1734  *      params properly filled in
 1735  * => we will lookup the map entry (handling submaps) as we go
 1736  * => if the lookup is a success we will return with the maps locked
 1737  * => if "write_lock" is TRUE, we write_lock the map, otherwise we only
 1738  *      get a read lock.
 1739  * => note that submaps can only appear in the kernel and they are
 1740  *      required to use the same virtual addresses as the map they
 1741  *      are referenced by (thus address translation between the main
 1742  *      map and the submap is unnecessary).
 1743  */
 1744 
 1745 boolean_t
 1746 uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
 1747 {
 1748         vm_map_t tmpmap;
 1749 
 1750         /*
 1751          * init ufi values for lookup.
 1752          */
 1753         ufi->map = ufi->orig_map;
 1754         ufi->size = ufi->orig_size;
 1755 
 1756         /*
 1757          * keep going down levels until we are done.   note that there can
 1758          * only be two levels so we won't loop very long.
 1759          */
 1760         while (1) {
 1761                 if (ufi->orig_rvaddr < ufi->map->min_offset ||
 1762                     ufi->orig_rvaddr >= ufi->map->max_offset)
 1763                         return FALSE;
 1764 
 1765                 /* lock map */
 1766                 if (write_lock) {
 1767                         vm_map_lock(ufi->map);
 1768                 } else {
 1769                         vm_map_lock_read(ufi->map);
 1770                 }
 1771 
 1772                 /* lookup */
 1773                 if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
 1774                     &ufi->entry)) {
 1775                         uvmfault_unlockmaps(ufi, write_lock);
 1776                         return FALSE;
 1777                 }
 1778 
 1779                 /* reduce size if necessary */
 1780                 if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
 1781                         ufi->size = ufi->entry->end - ufi->orig_rvaddr;
 1782 
 1783                 /*
 1784                  * submap?    replace map with the submap and lookup again.
 1785                  * note: VAs in submaps must match VAs in main map.
 1786                  */
 1787                 if (UVM_ET_ISSUBMAP(ufi->entry)) {
 1788                         tmpmap = ufi->entry->object.sub_map;
 1789                         uvmfault_unlockmaps(ufi, write_lock);
 1790                         ufi->map = tmpmap;
 1791                         continue;
 1792                 }
 1793 
 1794                 /*
 1795                  * got it!
 1796                  */
 1797                 ufi->mapv = ufi->map->timestamp;
 1798                 return TRUE;
 1799 
 1800         }       /* while loop */
 1801 
 1802         /*NOTREACHED*/
 1803 }
 1804 
 1805 /*
 1806  * uvmfault_relock: attempt to relock the same version of the map
 1807  *
 1808  * => fault data structures should be unlocked before calling.
 1809  * => if a success (TRUE) maps will be locked after call.
 1810  */
 1811 boolean_t
 1812 uvmfault_relock(struct uvm_faultinfo *ufi)
 1813 {
 1814         /*
 1815          * ufi can be NULL when this isn't really a fault,
 1816          * but merely paging in anon data.
 1817          */
 1818         if (ufi == NULL) {
 1819                 return TRUE;
 1820         }
 1821 
 1822         counters_inc(uvmexp_counters, flt_relck);
 1823 
 1824         /*
 1825          * relock map.   fail if version mismatch (in which case nothing
 1826          * gets locked).
 1827          */
 1828         vm_map_lock_read(ufi->map);
 1829         if (ufi->mapv != ufi->map->timestamp) {
 1830                 vm_map_unlock_read(ufi->map);
 1831                 return FALSE;
 1832         }
 1833 
 1834         counters_inc(uvmexp_counters, flt_relckok);
 1835         return TRUE;            /* got it! */
 1836 }

Cache object: 8eb1f9b7a8e91b6a336d2d135003fbd8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.