The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_loan.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: uvm_loan.c,v 1.62 2006/11/01 10:18:27 yamt Exp $       */
    2 
    3 /*
    4  *
    5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Charles D. Cranor and
   19  *      Washington University.
   20  * 4. The name of the author may not be used to endorse or promote products
   21  *    derived from this software without specific prior written permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   33  *
   34  * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
   35  */
   36 
   37 /*
   38  * uvm_loan.c: page loanout handler
   39  */
   40 
   41 #include <sys/cdefs.h>
   42 __KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.62 2006/11/01 10:18:27 yamt Exp $");
   43 
   44 #include <sys/param.h>
   45 #include <sys/systm.h>
   46 #include <sys/kernel.h>
   47 #include <sys/proc.h>
   48 #include <sys/malloc.h>
   49 #include <sys/mman.h>
   50 
   51 #include <uvm/uvm.h>
   52 
   53 /*
   54  * "loaned" pages are pages which are (read-only, copy-on-write) loaned
   55  * from the VM system to other parts of the kernel.   this allows page
   56  * copying to be avoided (e.g. you can loan pages from objs/anons to
   57  * the mbuf system).
   58  *
   59  * there are 3 types of loans possible:
   60  *  O->K  uvm_object page to wired kernel page (e.g. mbuf data area)
   61  *  A->K  anon page to wired kernel page (e.g. mbuf data area)
   62  *  O->A  uvm_object to anon loan (e.g. vnode page to an anon)
   63  * note that it possible to have an O page loaned to both an A and K
   64  * at the same time.
   65  *
   66  * loans are tracked by pg->loan_count.  an O->A page will have both
   67  * a uvm_object and a vm_anon, but PQ_ANON will not be set.   this sort
   68  * of page is considered "owned" by the uvm_object (not the anon).
   69  *
   70  * each loan of a page to the kernel bumps the pg->wire_count.  the
   71  * kernel mappings for these pages will be read-only and wired.  since
   72  * the page will also be wired, it will not be a candidate for pageout,
   73  * and thus will never be pmap_page_protect()'d with VM_PROT_NONE.  a
   74  * write fault in the kernel to one of these pages will not cause
   75  * copy-on-write.  instead, the page fault is considered fatal.  this
   76  * is because the kernel mapping will have no way to look up the
   77  * object/anon which the page is owned by.  this is a good side-effect,
   78  * since a kernel write to a loaned page is an error.
   79  *
   80  * owners that want to free their pages and discover that they are
   81  * loaned out simply "disown" them (the page becomes an orphan).  these
   82  * pages should be freed when the last loan is dropped.   in some cases
   83  * an anon may "adopt" an orphaned page.
   84  *
   85  * locking: to read pg->loan_count either the owner or the page queues
   86  * must be locked.   to modify pg->loan_count, both the owner of the page
   87  * and the PQs must be locked.   pg->flags is (as always) locked by
   88  * the owner of the page.
   89  *
   90  * note that locking from the "loaned" side is tricky since the object
   91  * getting the loaned page has no reference to the page's owner and thus
   92  * the owner could "die" at any time.   in order to prevent the owner
   93  * from dying the page queues should be locked.   this forces us to sometimes
   94  * use "try" locking.
   95  *
   96  * loans are typically broken by the following events:
   97  *  1. user-level xwrite fault to a loaned page
   98  *  2. pageout of clean+inactive O->A loaned page
   99  *  3. owner frees page (e.g. pager flush)
  100  *
  101  * note that loaning a page causes all mappings of the page to become
  102  * read-only (via pmap_page_protect).   this could have an unexpected
  103  * effect on normal "wired" pages if one is not careful (XXX).
  104  */
  105 
  106 /*
  107  * local prototypes
  108  */
  109 
  110 static int      uvm_loananon(struct uvm_faultinfo *, void ***,
  111                              int, struct vm_anon *);
  112 static int      uvm_loanuobj(struct uvm_faultinfo *, void ***,
  113                              int, vaddr_t);
  114 static int      uvm_loanzero(struct uvm_faultinfo *, void ***, int);
  115 static void     uvm_unloananon(struct vm_anon **, int);
  116 static void     uvm_unloanpage(struct vm_page **, int);
  117 static int      uvm_loanpage(struct vm_page **, int);
  118 
  119 
  120 /*
  121  * inlines
  122  */
  123 
  124 /*
  125  * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
  126  *
  127  * => "ufi" is the result of a successful map lookup (meaning that
  128  *      on entry the map is locked by the caller)
  129  * => we may unlock and then relock the map if needed (for I/O)
  130  * => we put our output result in "output"
  131  * => we always return with the map unlocked
  132  * => possible return values:
  133  *      -1 == error, map is unlocked
  134  *       0 == map relock error (try again!), map is unlocked
  135  *      >0 == number of pages we loaned, map is unlocked
  136  *
  137  * NOTE: We can live with this being an inline, because it is only called
  138  * from one place.
  139  */
  140 
  141 static inline int
  142 uvm_loanentry(struct uvm_faultinfo *ufi, void ***output, int flags)
  143 {
  144         vaddr_t curaddr = ufi->orig_rvaddr;
  145         vsize_t togo = ufi->size;
  146         struct vm_aref *aref = &ufi->entry->aref;
  147         struct uvm_object *uobj = ufi->entry->object.uvm_obj;
  148         struct vm_anon *anon;
  149         int rv, result = 0;
  150 
  151         UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
  152 
  153         /*
  154          * lock us the rest of the way down (we unlock before return)
  155          */
  156         if (aref->ar_amap)
  157                 amap_lock(aref->ar_amap);
  158 
  159         /*
  160          * loop until done
  161          */
  162         while (togo) {
  163 
  164                 /*
  165                  * find the page we want.   check the anon layer first.
  166                  */
  167 
  168                 if (aref->ar_amap) {
  169                         anon = amap_lookup(aref, curaddr - ufi->entry->start);
  170                 } else {
  171                         anon = NULL;
  172                 }
  173 
  174                 /* locked: map, amap, uobj */
  175                 if (anon) {
  176                         rv = uvm_loananon(ufi, output, flags, anon);
  177                 } else if (uobj) {
  178                         rv = uvm_loanuobj(ufi, output, flags, curaddr);
  179                 } else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
  180                         rv = uvm_loanzero(ufi, output, flags);
  181                 } else {
  182                         uvmfault_unlockall(ufi, aref->ar_amap, uobj, NULL);
  183                         rv = -1;
  184                 }
  185                 /* locked: if (rv > 0) => map, amap, uobj  [o.w. unlocked] */
  186                 LOCK_ASSERT(rv > 0 || aref->ar_amap == NULL ||
  187                     !simple_lock_held(&aref->ar_amap->am_l));
  188                 LOCK_ASSERT(rv > 0 || uobj == NULL ||
  189                     !simple_lock_held(&uobj->vmobjlock));
  190 
  191                 /* total failure */
  192                 if (rv < 0) {
  193                         UVMHIST_LOG(loanhist, "failure %d", rv, 0,0,0);
  194                         return (-1);
  195                 }
  196 
  197                 /* relock failed, need to do another lookup */
  198                 if (rv == 0) {
  199                         UVMHIST_LOG(loanhist, "relock failure %d", result
  200                             ,0,0,0);
  201                         return (result);
  202                 }
  203 
  204                 /*
  205                  * got it... advance to next page
  206                  */
  207 
  208                 result++;
  209                 togo -= PAGE_SIZE;
  210                 curaddr += PAGE_SIZE;
  211         }
  212 
  213         /*
  214          * unlock what we locked, unlock the maps and return
  215          */
  216 
  217         if (aref->ar_amap)
  218                 amap_unlock(aref->ar_amap);
  219         uvmfault_unlockmaps(ufi, FALSE);
  220         UVMHIST_LOG(loanhist, "done %d", result, 0,0,0);
  221         return (result);
  222 }
  223 
  224 /*
  225  * normal functions
  226  */
  227 
  228 /*
  229  * uvm_loan: loan pages in a map out to anons or to the kernel
  230  *
  231  * => map should be unlocked
  232  * => start and len should be multiples of PAGE_SIZE
  233  * => result is either an array of anon's or vm_pages (depending on flags)
  234  * => flag values: UVM_LOAN_TOANON - loan to anons
  235  *                 UVM_LOAN_TOPAGE - loan to wired kernel page
  236  *    one and only one of these flags must be set!
  237  * => returns 0 (success), or an appropriate error number
  238  */
  239 
  240 int
  241 uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
  242 {
  243         struct uvm_faultinfo ufi;
  244         void **result, **output;
  245         int rv, error;
  246 
  247         UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
  248 
  249         /*
  250          * ensure that one and only one of the flags is set
  251          */
  252 
  253         KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^
  254                 ((flags & UVM_LOAN_TOPAGE) == 0));
  255         KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
  256 
  257         /*
  258          * "output" is a pointer to the current place to put the loaned page.
  259          */
  260 
  261         result = v;
  262         output = &result[0];    /* start at the beginning ... */
  263 
  264         /*
  265          * while we've got pages to do
  266          */
  267 
  268         while (len > 0) {
  269 
  270                 /*
  271                  * fill in params for a call to uvmfault_lookup
  272                  */
  273 
  274                 ufi.orig_map = map;
  275                 ufi.orig_rvaddr = start;
  276                 ufi.orig_size = len;
  277 
  278                 /*
  279                  * do the lookup, the only time this will fail is if we hit on
  280                  * an unmapped region (an error)
  281                  */
  282 
  283                 if (!uvmfault_lookup(&ufi, FALSE)) {
  284                         error = ENOENT;
  285                         goto fail;
  286                 }
  287 
  288                 /*
  289                  * map now locked.  now do the loanout...
  290                  */
  291 
  292                 rv = uvm_loanentry(&ufi, &output, flags);
  293                 if (rv < 0) {
  294                         /* all unlocked due to error */
  295                         error = EINVAL;
  296                         goto fail;
  297                 }
  298 
  299                 /*
  300                  * done!  the map is unlocked.  advance, if possible.
  301                  *
  302                  * XXXCDC: could be recoded to hold the map lock with
  303                  *         smarter code (but it only happens on map entry
  304                  *         boundaries, so it isn't that bad).
  305                  */
  306 
  307                 if (rv) {
  308                         rv <<= PAGE_SHIFT;
  309                         len -= rv;
  310                         start += rv;
  311                 }
  312         }
  313         UVMHIST_LOG(loanhist, "success", 0,0,0,0);
  314         return 0;
  315 
  316 fail:
  317         /*
  318          * failed to complete loans.  drop any loans and return failure code.
  319          * map is already unlocked.
  320          */
  321 
  322         if (output - result) {
  323                 if (flags & UVM_LOAN_TOANON) {
  324                         uvm_unloananon((struct vm_anon **)result,
  325                             output - result);
  326                 } else {
  327                         uvm_unloanpage((struct vm_page **)result,
  328                             output - result);
  329                 }
  330         }
  331         UVMHIST_LOG(loanhist, "error %d", error,0,0,0);
  332         return (error);
  333 }
  334 
  335 /*
  336  * uvm_loananon: loan a page from an anon out
  337  *
  338  * => called with map, amap, uobj locked
  339  * => return value:
  340  *      -1 = fatal error, everything is unlocked, abort.
  341  *       0 = lookup in ufi went stale, everything unlocked, relookup and
  342  *              try again
  343  *       1 = got it, everything still locked
  344  */
  345 
  346 int
  347 uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
  348     struct vm_anon *anon)
  349 {
  350         struct vm_page *pg;
  351         int error;
  352 
  353         UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
  354 
  355         /*
  356          * if we are loaning to "another" anon then it is easy, we just
  357          * bump the reference count on the current anon and return a
  358          * pointer to it (it becomes copy-on-write shared).
  359          */
  360 
  361         if (flags & UVM_LOAN_TOANON) {
  362                 simple_lock(&anon->an_lock);
  363                 pg = anon->an_page;
  364                 if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) {
  365                         if (pg->wire_count > 0) {
  366                                 UVMHIST_LOG(loanhist, "->A wired %p", pg,0,0,0);
  367                                 uvmfault_unlockall(ufi,
  368                                     ufi->entry->aref.ar_amap,
  369                                     ufi->entry->object.uvm_obj, anon);
  370                                 return (-1);
  371                         }
  372                         pmap_page_protect(pg, VM_PROT_READ);
  373                 }
  374                 anon->an_ref++;
  375                 **output = anon;
  376                 (*output)++;
  377                 simple_unlock(&anon->an_lock);
  378                 UVMHIST_LOG(loanhist, "->A done", 0,0,0,0);
  379                 return (1);
  380         }
  381 
  382         /*
  383          * we are loaning to a kernel-page.   we need to get the page
  384          * resident so we can wire it.   uvmfault_anonget will handle
  385          * this for us.
  386          */
  387 
  388         simple_lock(&anon->an_lock);
  389         error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
  390 
  391         /*
  392          * if we were unable to get the anon, then uvmfault_anonget has
  393          * unlocked everything and returned an error code.
  394          */
  395 
  396         if (error) {
  397                 UVMHIST_LOG(loanhist, "error %d", error,0,0,0);
  398 
  399                 /* need to refault (i.e. refresh our lookup) ? */
  400                 if (error == ERESTART) {
  401                         return (0);
  402                 }
  403 
  404                 /* "try again"?   sleep a bit and retry ... */
  405                 if (error == EAGAIN) {
  406                         tsleep(&lbolt, PVM, "loanagain", 0);
  407                         return (0);
  408                 }
  409 
  410                 /* otherwise flag it as an error */
  411                 return (-1);
  412         }
  413 
  414         /*
  415          * we have the page and its owner locked: do the loan now.
  416          */
  417 
  418         pg = anon->an_page;
  419         uvm_lock_pageq();
  420         if (pg->wire_count > 0) {
  421                 uvm_unlock_pageq();
  422                 UVMHIST_LOG(loanhist, "->K wired %p", pg,0,0,0);
  423                 KASSERT(pg->uobject == NULL);
  424                 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
  425                     NULL, anon);
  426                 return (-1);
  427         }
  428         if (pg->loan_count == 0) {
  429                 pmap_page_protect(pg, VM_PROT_READ);
  430         }
  431         pg->loan_count++;
  432         uvm_pagedequeue(pg);
  433         uvm_unlock_pageq();
  434         **output = pg;
  435         (*output)++;
  436 
  437         /* unlock anon and return success */
  438         if (pg->uobject)
  439                 simple_unlock(&pg->uobject->vmobjlock);
  440         simple_unlock(&anon->an_lock);
  441         UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
  442         return (1);
  443 }
  444 
  445 /*
  446  * uvm_loanpage: loan out pages to kernel (->K)
  447  *
  448  * => pages should be object-owned and the object should be locked.
  449  * => in the case of error, the object might be unlocked and relocked.
  450  * => caller should busy the pages beforehand.
  451  * => pages will be unbusied.
  452  * => fail with EBUSY if meet a wired page.
  453  */
  454 static int
  455 uvm_loanpage(struct vm_page **pgpp, int npages)
  456 {
  457         int i;
  458         int error = 0;
  459 
  460         UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
  461 
  462         for (i = 0; i < npages; i++) {
  463                 struct vm_page *pg = pgpp[i];
  464 
  465                 KASSERT(pg->uobject != NULL);
  466                 KASSERT(pg->uobject == pgpp[0]->uobject);
  467                 KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
  468                 LOCK_ASSERT(simple_lock_held(&pg->uobject->vmobjlock));
  469                 KASSERT(pg->flags & PG_BUSY);
  470 
  471                 uvm_lock_pageq();
  472                 if (pg->wire_count > 0) {
  473                         uvm_unlock_pageq();
  474                         UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
  475                         error = EBUSY;
  476                         break;
  477                 }
  478                 if (pg->loan_count == 0) {
  479                         pmap_page_protect(pg, VM_PROT_READ);
  480                 }
  481                 pg->loan_count++;
  482                 uvm_pagedequeue(pg);
  483                 uvm_unlock_pageq();
  484         }
  485 
  486         uvm_page_unbusy(pgpp, npages);
  487 
  488         if (error) {
  489                 /*
  490                  * backout what we've done
  491                  */
  492                 struct simplelock *slock = &pgpp[0]->uobject->vmobjlock;
  493 
  494                 simple_unlock(slock);
  495                 uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
  496                 simple_lock(slock);
  497         }
  498 
  499         UVMHIST_LOG(loanhist, "done %d", error,0,0,0);
  500         return error;
  501 }
  502 
  503 /*
  504  * XXX UBC temp limit
  505  * number of pages to get at once.
  506  * should be <= MAX_READ_AHEAD in genfs_vnops.c
  507  */
  508 #define UVM_LOAN_GET_CHUNK      16
  509 
  510 /*
  511  * uvm_loanuobjpages: loan pages from a uobj out (O->K)
  512  *
  513  * => uobj shouldn't be locked.  (we'll lock it)
  514  * => fail with EBUSY if we meet a wired page.
  515  */
  516 int
  517 uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
  518     struct vm_page **origpgpp)
  519 {
  520         int ndone; /* # of pages loaned out */
  521         struct vm_page **pgpp;
  522         int error;
  523         int i;
  524         struct simplelock *slock;
  525 
  526         pgpp = origpgpp;
  527         for (ndone = 0; ndone < orignpages; ) {
  528                 int npages;
  529                 /* npendloan: # of pages busied but not loand out yet. */
  530                 int npendloan = 0xdead; /* XXX gcc */
  531 reget:
  532                 npages = MIN(UVM_LOAN_GET_CHUNK, orignpages - ndone);
  533                 simple_lock(&uobj->vmobjlock);
  534                 error = (*uobj->pgops->pgo_get)(uobj,
  535                     pgoff + (ndone << PAGE_SHIFT), pgpp, &npages, 0,
  536                     VM_PROT_READ, 0, PGO_SYNCIO);
  537                 if (error == EAGAIN) {
  538                         tsleep(&lbolt, PVM, "nfsread", 0);
  539                         continue;
  540                 }
  541                 if (error)
  542                         goto fail;
  543 
  544                 KASSERT(npages > 0);
  545 
  546                 /* loan and unbusy pages */
  547                 slock = NULL;
  548                 for (i = 0; i < npages; i++) {
  549                         struct simplelock *nextslock; /* slock for next page */
  550                         struct vm_page *pg = *pgpp;
  551 
  552                         /* XXX assuming that the page is owned by uobj */
  553                         KASSERT(pg->uobject != NULL);
  554                         nextslock = &pg->uobject->vmobjlock;
  555 
  556                         if (slock != nextslock) {
  557                                 if (slock) {
  558                                         KASSERT(npendloan > 0);
  559                                         error = uvm_loanpage(pgpp - npendloan,
  560                                             npendloan);
  561                                         simple_unlock(slock);
  562                                         if (error)
  563                                                 goto fail;
  564                                         ndone += npendloan;
  565                                         KASSERT(origpgpp + ndone == pgpp);
  566                                 }
  567                                 slock = nextslock;
  568                                 npendloan = 0;
  569                                 simple_lock(slock);
  570                         }
  571 
  572                         if ((pg->flags & PG_RELEASED) != 0) {
  573                                 /*
  574                                  * release pages and try again.
  575                                  */
  576                                 simple_unlock(slock);
  577                                 for (; i < npages; i++) {
  578                                         pg = pgpp[i];
  579                                         slock = &pg->uobject->vmobjlock;
  580 
  581                                         simple_lock(slock);
  582                                         uvm_lock_pageq();
  583                                         uvm_page_unbusy(&pg, 1);
  584                                         uvm_unlock_pageq();
  585                                         simple_unlock(slock);
  586                                 }
  587                                 goto reget;
  588                         }
  589 
  590                         npendloan++;
  591                         pgpp++;
  592                         KASSERT(origpgpp + ndone + npendloan == pgpp);
  593                 }
  594                 KASSERT(slock != NULL);
  595                 KASSERT(npendloan > 0);
  596                 error = uvm_loanpage(pgpp - npendloan, npendloan);
  597                 simple_unlock(slock);
  598                 if (error)
  599                         goto fail;
  600                 ndone += npendloan;
  601                 KASSERT(origpgpp + ndone == pgpp);
  602         }
  603 
  604         return 0;
  605 
  606 fail:
  607         uvm_unloan(origpgpp, ndone, UVM_LOAN_TOPAGE);
  608 
  609         return error;
  610 }
  611 
  612 /*
  613  * uvm_loanuobj: loan a page from a uobj out
  614  *
  615  * => called with map, amap, uobj locked
  616  * => return value:
  617  *      -1 = fatal error, everything is unlocked, abort.
  618  *       0 = lookup in ufi went stale, everything unlocked, relookup and
  619  *              try again
  620  *       1 = got it, everything still locked
  621  */
  622 
  623 static int
  624 uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va)
  625 {
  626         struct vm_amap *amap = ufi->entry->aref.ar_amap;
  627         struct uvm_object *uobj = ufi->entry->object.uvm_obj;
  628         struct vm_page *pg;
  629         struct vm_anon *anon;
  630         int error, npages;
  631         boolean_t locked;
  632 
  633         UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
  634 
  635         /*
  636          * first we must make sure the page is resident.
  637          *
  638          * XXXCDC: duplicate code with uvm_fault().
  639          */
  640 
  641         simple_lock(&uobj->vmobjlock);
  642         if (uobj->pgops->pgo_get) {     /* try locked pgo_get */
  643                 npages = 1;
  644                 pg = NULL;
  645                 error = (*uobj->pgops->pgo_get)(uobj,
  646                     va - ufi->entry->start + ufi->entry->offset,
  647                     &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
  648         } else {
  649                 error = EIO;            /* must have pgo_get op */
  650         }
  651 
  652         /*
  653          * check the result of the locked pgo_get.  if there is a problem,
  654          * then we fail the loan.
  655          */
  656 
  657         if (error && error != EBUSY) {
  658                 uvmfault_unlockall(ufi, amap, uobj, NULL);
  659                 return (-1);
  660         }
  661 
  662         /*
  663          * if we need to unlock for I/O, do so now.
  664          */
  665 
  666         if (error == EBUSY) {
  667                 uvmfault_unlockall(ufi, amap, NULL, NULL);
  668 
  669                 /* locked: uobj */
  670                 npages = 1;
  671                 error = (*uobj->pgops->pgo_get)(uobj,
  672                     va - ufi->entry->start + ufi->entry->offset,
  673                     &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
  674                 /* locked: <nothing> */
  675 
  676                 if (error) {
  677                         if (error == EAGAIN) {
  678                                 tsleep(&lbolt, PVM, "fltagain2", 0);
  679                                 return (0);
  680                         }
  681                         return (-1);
  682                 }
  683 
  684                 /*
  685                  * pgo_get was a success.   attempt to relock everything.
  686                  */
  687 
  688                 locked = uvmfault_relock(ufi);
  689                 if (locked && amap)
  690                         amap_lock(amap);
  691                 uobj = pg->uobject;
  692                 simple_lock(&uobj->vmobjlock);
  693 
  694                 /*
  695                  * verify that the page has not be released and re-verify
  696                  * that amap slot is still free.   if there is a problem we
  697                  * drop our lock (thus force a lookup refresh/retry).
  698                  */
  699 
  700                 if ((pg->flags & PG_RELEASED) != 0 ||
  701                     (locked && amap && amap_lookup(&ufi->entry->aref,
  702                     ufi->orig_rvaddr - ufi->entry->start))) {
  703                         if (locked)
  704                                 uvmfault_unlockall(ufi, amap, NULL, NULL);
  705                         locked = FALSE;
  706                 }
  707 
  708                 /*
  709                  * didn't get the lock?   release the page and retry.
  710                  */
  711 
  712                 if (locked == FALSE) {
  713                         if (pg->flags & PG_WANTED) {
  714                                 wakeup(pg);
  715                         }
  716                         if (pg->flags & PG_RELEASED) {
  717                                 uvm_lock_pageq();
  718                                 uvm_pagefree(pg);
  719                                 uvm_unlock_pageq();
  720                                 simple_unlock(&uobj->vmobjlock);
  721                                 return (0);
  722                         }
  723                         uvm_lock_pageq();
  724                         uvm_pageactivate(pg);
  725                         uvm_unlock_pageq();
  726                         pg->flags &= ~(PG_BUSY|PG_WANTED);
  727                         UVM_PAGE_OWN(pg, NULL);
  728                         simple_unlock(&uobj->vmobjlock);
  729                         return (0);
  730                 }
  731         }
  732 
  733         KASSERT(uobj == pg->uobject);
  734 
  735         /*
  736          * at this point we have the page we want ("pg") marked PG_BUSY for us
  737          * and we have all data structures locked.  do the loanout.  page can
  738          * not be PG_RELEASED (we caught this above).
  739          */
  740 
  741         if ((flags & UVM_LOAN_TOANON) == 0) {
  742                 if (uvm_loanpage(&pg, 1)) {
  743                         uvmfault_unlockall(ufi, amap, uobj, NULL);
  744                         return (-1);
  745                 }
  746                 simple_unlock(&uobj->vmobjlock);
  747                 **output = pg;
  748                 (*output)++;
  749                 return (1);
  750         }
  751 
  752         /*
  753          * must be a loan to an anon.   check to see if there is already
  754          * an anon associated with this page.  if so, then just return
  755          * a reference to this object.   the page should already be
  756          * mapped read-only because it is already on loan.
  757          */
  758 
  759         if (pg->uanon) {
  760                 anon = pg->uanon;
  761                 simple_lock(&anon->an_lock);
  762                 anon->an_ref++;
  763                 simple_unlock(&anon->an_lock);
  764                 if (pg->flags & PG_WANTED) {
  765                         wakeup(pg);
  766                 }
  767                 pg->flags &= ~(PG_WANTED|PG_BUSY);
  768                 UVM_PAGE_OWN(pg, NULL);
  769                 simple_unlock(&uobj->vmobjlock);
  770                 **output = anon;
  771                 (*output)++;
  772                 return (1);
  773         }
  774 
  775         /*
  776          * need to allocate a new anon
  777          */
  778 
  779         anon = uvm_analloc();
  780         if (anon == NULL) {
  781                 goto fail;
  782         }
  783         anon->an_page = pg;
  784         pg->uanon = anon;
  785         uvm_lock_pageq();
  786         if (pg->wire_count > 0) {
  787                 uvm_unlock_pageq();
  788                 UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
  789                 pg->uanon = NULL;
  790                 anon->an_page = NULL;
  791                 anon->an_ref--;
  792                 simple_unlock(&anon->an_lock);
  793                 uvm_anfree(anon);
  794                 goto fail;
  795         }
  796         if (pg->loan_count == 0) {
  797                 pmap_page_protect(pg, VM_PROT_READ);
  798         }
  799         pg->loan_count++;
  800         uvm_pageactivate(pg);
  801         uvm_unlock_pageq();
  802         if (pg->flags & PG_WANTED) {
  803                 wakeup(pg);
  804         }
  805         pg->flags &= ~(PG_WANTED|PG_BUSY);
  806         UVM_PAGE_OWN(pg, NULL);
  807         simple_unlock(&uobj->vmobjlock);
  808         simple_unlock(&anon->an_lock);
  809         **output = anon;
  810         (*output)++;
  811         return (1);
  812 
  813 fail:
  814         UVMHIST_LOG(loanhist, "fail", 0,0,0,0);
  815         /*
  816          * unlock everything and bail out.
  817          */
  818         if (pg->flags & PG_WANTED) {
  819                 wakeup(pg);
  820         }
  821         pg->flags &= ~(PG_WANTED|PG_BUSY);
  822         UVM_PAGE_OWN(pg, NULL);
  823         uvmfault_unlockall(ufi, amap, uobj, NULL);
  824         return (-1);
  825 }
  826 
  827 /*
  828  * uvm_loanzero: loan a zero-fill page out
  829  *
  830  * => called with map, amap, uobj locked
  831  * => return value:
  832  *      -1 = fatal error, everything is unlocked, abort.
  833  *       0 = lookup in ufi went stale, everything unlocked, relookup and
  834  *              try again
  835  *       1 = got it, everything still locked
  836  */
  837 
  838 static struct uvm_object uvm_loanzero_object;
  839 
  840 static int
  841 uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags)
  842 {
  843         struct vm_anon *anon;
  844         struct vm_page *pg;
  845         struct vm_amap *amap = ufi->entry->aref.ar_amap;
  846 
  847         UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
  848 again:
  849         simple_lock(&uvm_loanzero_object.vmobjlock);
  850 
  851         /*
  852          * first, get ahold of our single zero page.
  853          */
  854 
  855         if (__predict_false((pg =
  856                              TAILQ_FIRST(&uvm_loanzero_object.memq)) == NULL)) {
  857                 while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL,
  858                                            UVM_PGA_ZERO)) == NULL) {
  859                         simple_unlock(&uvm_loanzero_object.vmobjlock);
  860                         uvmfault_unlockall(ufi, amap, NULL, NULL);
  861                         uvm_wait("loanzero");
  862                         if (!uvmfault_relock(ufi)) {
  863                                 return (0);
  864                         }
  865                         if (amap) {
  866                                 amap_lock(amap);
  867                         }
  868                         goto again;
  869                 }
  870 
  871                 /* got a zero'd page. */
  872                 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
  873                 pg->flags |= PG_RDONLY;
  874                 uvm_lock_pageq();
  875                 uvm_pageactivate(pg);
  876                 uvm_unlock_pageq();
  877                 UVM_PAGE_OWN(pg, NULL);
  878         }
  879 
  880         if ((flags & UVM_LOAN_TOANON) == 0) {   /* loaning to kernel-page */
  881                 uvm_lock_pageq();
  882                 pg->loan_count++;
  883                 uvm_pagedequeue(pg);
  884                 uvm_unlock_pageq();
  885                 simple_unlock(&uvm_loanzero_object.vmobjlock);
  886                 **output = pg;
  887                 (*output)++;
  888                 return (1);
  889         }
  890 
  891         /*
  892          * loaning to an anon.  check to see if there is already an anon
  893          * associated with this page.  if so, then just return a reference
  894          * to this object.
  895          */
  896 
  897         if (pg->uanon) {
  898                 anon = pg->uanon;
  899                 simple_lock(&anon->an_lock);
  900                 anon->an_ref++;
  901                 simple_unlock(&anon->an_lock);
  902                 simple_unlock(&uvm_loanzero_object.vmobjlock);
  903                 **output = anon;
  904                 (*output)++;
  905                 return (1);
  906         }
  907 
  908         /*
  909          * need to allocate a new anon
  910          */
  911 
  912         anon = uvm_analloc();
  913         if (anon == NULL) {
  914                 /* out of swap causes us to fail */
  915                 simple_unlock(&uvm_loanzero_object.vmobjlock);
  916                 uvmfault_unlockall(ufi, amap, NULL, NULL);
  917                 return (-1);
  918         }
  919         anon->an_page = pg;
  920         pg->uanon = anon;
  921         uvm_lock_pageq();
  922         pg->loan_count++;
  923         uvm_pageactivate(pg);
  924         uvm_unlock_pageq();
  925         simple_unlock(&anon->an_lock);
  926         simple_unlock(&uvm_loanzero_object.vmobjlock);
  927         **output = anon;
  928         (*output)++;
  929         return (1);
  930 }
  931 
  932 
  933 /*
  934  * uvm_unloananon: kill loans on anons (basically a normal ref drop)
  935  *
  936  * => we expect all our resources to be unlocked
  937  */
  938 
  939 static void
  940 uvm_unloananon(struct vm_anon **aloans, int nanons)
  941 {
  942         struct vm_anon *anon;
  943 
  944         while (nanons-- > 0) {
  945                 int refs;
  946 
  947                 anon = *aloans++;
  948                 simple_lock(&anon->an_lock);
  949                 refs = --anon->an_ref;
  950                 simple_unlock(&anon->an_lock);
  951 
  952                 if (refs == 0) {
  953                         uvm_anfree(anon);
  954                 }
  955         }
  956 }
  957 
  958 /*
  959  * uvm_unloanpage: kill loans on pages loaned out to the kernel
  960  *
  961  * => we expect all our resources to be unlocked
  962  */
  963 
  964 static void
  965 uvm_unloanpage(struct vm_page **ploans, int npages)
  966 {
  967         struct vm_page *pg;
  968         struct simplelock *slock;
  969 
  970         uvm_lock_pageq();
  971         while (npages-- > 0) {
  972                 pg = *ploans++;
  973 
  974                 /*
  975                  * do a little dance to acquire the object or anon lock
  976                  * as appropriate.  we are locking in the wrong order,
  977                  * so we have to do a try-lock here.
  978                  */
  979 
  980                 slock = NULL;
  981                 while (pg->uobject != NULL || pg->uanon != NULL) {
  982                         if (pg->uobject != NULL) {
  983                                 slock = &pg->uobject->vmobjlock;
  984                         } else {
  985                                 slock = &pg->uanon->an_lock;
  986                         }
  987                         if (simple_lock_try(slock)) {
  988                                 break;
  989                         }
  990                         uvm_unlock_pageq();
  991                         uvm_lock_pageq();
  992                         slock = NULL;
  993                 }
  994 
  995                 /*
  996                  * drop our loan.  if page is owned by an anon but
  997                  * PQ_ANON is not set, the page was loaned to the anon
  998                  * from an object which dropped ownership, so resolve
  999                  * this by turning the anon's loan into real ownership
 1000                  * (ie. decrement loan_count again and set PQ_ANON).
 1001                  * after all this, if there are no loans left, put the
 1002                  * page back a paging queue (if the page is owned by
 1003                  * an anon) or free it (if the page is now unowned).
 1004                  */
 1005 
 1006                 KASSERT(pg->loan_count > 0);
 1007                 pg->loan_count--;
 1008                 if (pg->uobject == NULL && pg->uanon != NULL &&
 1009                     (pg->pqflags & PQ_ANON) == 0) {
 1010                         KASSERT(pg->loan_count > 0);
 1011                         pg->loan_count--;
 1012                         pg->pqflags |= PQ_ANON;
 1013                 }
 1014                 if (pg->loan_count == 0) {
 1015                         if (pg->uobject == NULL && pg->uanon == NULL) {
 1016                                 KASSERT((pg->flags & PG_BUSY) == 0);
 1017                                 uvm_pagefree(pg);
 1018                         } else {
 1019                                 uvm_pageactivate(pg);
 1020                         }
 1021                 } else if (pg->loan_count == 1 && pg->uobject != NULL &&
 1022                            pg->uanon != NULL) {
 1023                         uvm_pageactivate(pg);
 1024                 }
 1025                 if (slock != NULL) {
 1026                         simple_unlock(slock);
 1027                 }
 1028         }
 1029         uvm_unlock_pageq();
 1030 }
 1031 
 1032 /*
 1033  * uvm_unloan: kill loans on pages or anons.
 1034  */
 1035 
 1036 void
 1037 uvm_unloan(void *v, int npages, int flags)
 1038 {
 1039         if (flags & UVM_LOAN_TOANON) {
 1040                 uvm_unloananon(v, npages);
 1041         } else {
 1042                 uvm_unloanpage(v, npages);
 1043         }
 1044 }
 1045 
 1046 /*
 1047  * Minimal pager for uvm_loanzero_object.  We need to provide a "put"
 1048  * method, because the page can end up on a paging queue, and the
 1049  * page daemon will want to call pgo_put when it encounters the page
 1050  * on the inactive list.
 1051  */
 1052 
 1053 static int
 1054 ulz_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
 1055 {
 1056         struct vm_page *pg;
 1057 
 1058         KDASSERT(uobj == &uvm_loanzero_object);
 1059 
 1060         /*
 1061          * Don't need to do any work here if we're not freeing pages.
 1062          */
 1063 
 1064         if ((flags & PGO_FREE) == 0) {
 1065                 simple_unlock(&uobj->vmobjlock);
 1066                 return 0;
 1067         }
 1068 
 1069         /*
 1070          * we don't actually want to ever free the uvm_loanzero_page, so
 1071          * just reactivate or dequeue it.
 1072          */
 1073 
 1074         pg = TAILQ_FIRST(&uobj->memq);
 1075         KASSERT(pg != NULL);
 1076         KASSERT(TAILQ_NEXT(pg, listq) == NULL);
 1077 
 1078         uvm_lock_pageq();
 1079         if (pg->uanon)
 1080                 uvm_pageactivate(pg);
 1081         else
 1082                 uvm_pagedequeue(pg);
 1083         uvm_unlock_pageq();
 1084 
 1085         simple_unlock(&uobj->vmobjlock);
 1086         return 0;
 1087 }
 1088 
 1089 static struct uvm_pagerops ulz_pager = {
 1090         NULL,           /* init */
 1091         NULL,           /* reference */
 1092         NULL,           /* detach */
 1093         NULL,           /* fault */
 1094         NULL,           /* get */
 1095         ulz_put,        /* put */
 1096 };
 1097 
 1098 /*
 1099  * uvm_loan_init(): initialize the uvm_loan() facility.
 1100  */
 1101 
 1102 void
 1103 uvm_loan_init(void)
 1104 {
 1105 
 1106         simple_lock_init(&uvm_loanzero_object.vmobjlock);
 1107         TAILQ_INIT(&uvm_loanzero_object.memq);
 1108         uvm_loanzero_object.pgops = &ulz_pager;
 1109 
 1110         UVMHIST_INIT(loanhist, 300);
 1111 }
 1112 
 1113 /*
 1114  * uvm_loanbreak: break loan on a uobj page
 1115  *
 1116  * => called with uobj locked
 1117  * => the page should be busy
 1118  * => return value:
 1119  *      newly allocated page if succeeded
 1120  */
 1121 struct vm_page *
 1122 uvm_loanbreak(struct vm_page *uobjpage)
 1123 {
 1124         struct vm_page *pg;
 1125 #ifdef DIAGNOSTIC
 1126         struct uvm_object *uobj = uobjpage->uobject;
 1127 #endif
 1128 
 1129         KASSERT(uobj != NULL);
 1130         LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
 1131         KASSERT(uobjpage->flags & PG_BUSY);
 1132 
 1133         /* alloc new un-owned page */
 1134         pg = uvm_pagealloc(NULL, 0, NULL, 0);
 1135         if (pg == NULL)
 1136                 return NULL;
 1137 
 1138         /*
 1139          * copy the data from the old page to the new
 1140          * one and clear the fake flags on the new page (keep it busy).
 1141          * force a reload of the old page by clearing it from all
 1142          * pmaps.
 1143          * transfer dirtiness of the old page to the new page.
 1144          * then lock the page queues to rename the pages.
 1145          */
 1146 
 1147         uvm_pagecopy(uobjpage, pg);     /* old -> new */
 1148         pg->flags &= ~PG_FAKE;
 1149         pmap_page_protect(uobjpage, VM_PROT_NONE);
 1150         if ((uobjpage->flags & PG_CLEAN) != 0 && !pmap_clear_modify(uobjpage)) {
 1151                 pmap_clear_modify(pg);
 1152                 pg->flags |= PG_CLEAN;
 1153         } else {
 1154                 /* uvm_pagecopy marked it dirty */
 1155                 KASSERT((pg->flags & PG_CLEAN) == 0);
 1156                 /* a object with a dirty page should be dirty. */
 1157                 KASSERT(!UVM_OBJ_IS_CLEAN(uobj));
 1158         }
 1159         if (uobjpage->flags & PG_WANTED)
 1160                 wakeup(uobjpage);
 1161         /* uobj still locked */
 1162         uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
 1163         UVM_PAGE_OWN(uobjpage, NULL);
 1164 
 1165         uvm_lock_pageq();
 1166 
 1167         /*
 1168          * replace uobjpage with new page.
 1169          */
 1170 
 1171         uvm_pagereplace(uobjpage, pg);
 1172 
 1173         /*
 1174          * if the page is no longer referenced by
 1175          * an anon (i.e. we are breaking an O->K
 1176          * loan), then remove it from any pageq's.
 1177          */
 1178         if (uobjpage->uanon == NULL)
 1179                 uvm_pagedequeue(uobjpage);
 1180 
 1181         /*
 1182          * at this point we have absolutely no
 1183          * control over uobjpage
 1184          */
 1185 
 1186         /* install new page */
 1187         uvm_pageactivate(pg);
 1188         uvm_unlock_pageq();
 1189 
 1190         /*
 1191          * done!  loan is broken and "pg" is
 1192          * PG_BUSY.   it can now replace uobjpage.
 1193          */
 1194 
 1195         return pg;
 1196 }

Cache object: 02bb2397ef7e56fcc619d3999de2596c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.