The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_reserv.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2006 Rice University
    3  * Copyright (c) 2007-2008 Alan L. Cox <alc@cs.rice.edu>
    4  * All rights reserved.
    5  *
    6  * This software was developed for the FreeBSD Project by Alan L. Cox,
    7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
   22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
   25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
   28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  *      Superpage reservation management module
   34  *
   35  * Any external functions defined by this module are only to be used by the
   36  * virtual memory system.
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD: releng/10.0/sys/vm/vm_reserv.c 255626 2013-09-17 07:35:26Z kib $");
   41 
   42 #include "opt_vm.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/kernel.h>
   46 #include <sys/lock.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mutex.h>
   49 #include <sys/queue.h>
   50 #include <sys/rwlock.h>
   51 #include <sys/sbuf.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/systm.h>
   54 
   55 #include <vm/vm.h>
   56 #include <vm/vm_param.h>
   57 #include <vm/vm_object.h>
   58 #include <vm/vm_page.h>
   59 #include <vm/vm_phys.h>
   60 #include <vm/vm_radix.h>
   61 #include <vm/vm_reserv.h>
   62 
   63 /*
   64  * The reservation system supports the speculative allocation of large physical
   65  * pages ("superpages").  Speculative allocation enables the fully-automatic
   66  * utilization of superpages by the virtual memory system.  In other words, no
   67  * programmatic directives are required to use superpages.
   68  */
   69 
   70 #if VM_NRESERVLEVEL > 0
   71 
   72 /*
   73  * The number of small pages that are contained in a level 0 reservation
   74  */
   75 #define VM_LEVEL_0_NPAGES       (1 << VM_LEVEL_0_ORDER)
   76 
   77 /*
   78  * The number of bits by which a physical address is shifted to obtain the
   79  * reservation number
   80  */
   81 #define VM_LEVEL_0_SHIFT        (VM_LEVEL_0_ORDER + PAGE_SHIFT)
   82 
   83 /*
   84  * The size of a level 0 reservation in bytes
   85  */
   86 #define VM_LEVEL_0_SIZE         (1 << VM_LEVEL_0_SHIFT)
   87 
   88 /*
   89  * Computes the index of the small page underlying the given (object, pindex)
   90  * within the reservation's array of small pages.
   91  */
   92 #define VM_RESERV_INDEX(object, pindex) \
   93     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
   94 
   95 /*
   96  * The reservation structure
   97  *
   98  * A reservation structure is constructed whenever a large physical page is
   99  * speculatively allocated to an object.  The reservation provides the small
  100  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
  101  * within that object.  The reservation's "popcnt" tracks the number of these
  102  * small physical pages that are in use at any given time.  When and if the
  103  * reservation is not fully utilized, it appears in the queue of partially-
  104  * populated reservations.  The reservation always appears on the containing
  105  * object's list of reservations.
  106  *
  107  * A partially-populated reservation can be broken and reclaimed at any time.
  108  */
  109 struct vm_reserv {
  110         TAILQ_ENTRY(vm_reserv) partpopq;
  111         LIST_ENTRY(vm_reserv) objq;
  112         vm_object_t     object;                 /* containing object */
  113         vm_pindex_t     pindex;                 /* offset within object */
  114         vm_page_t       pages;                  /* first page of a superpage */
  115         int             popcnt;                 /* # of pages in use */
  116         char            inpartpopq;
  117 };
  118 
  119 /*
  120  * The reservation array
  121  *
  122  * This array is analoguous in function to vm_page_array.  It differs in the
  123  * respect that it may contain a greater number of useful reservation
  124  * structures than there are (physical) superpages.  These "invalid"
  125  * reservation structures exist to trade-off space for time in the
  126  * implementation of vm_reserv_from_page().  Invalid reservation structures are
  127  * distinguishable from "valid" reservation structures by inspecting the
  128  * reservation's "pages" field.  Invalid reservation structures have a NULL
  129  * "pages" field.
  130  *
  131  * vm_reserv_from_page() maps a small (physical) page to an element of this
  132  * array by computing a physical reservation number from the page's physical
  133  * address.  The physical reservation number is used as the array index.
  134  *
  135  * An "active" reservation is a valid reservation structure that has a non-NULL
  136  * "object" field and a non-zero "popcnt" field.  In other words, every active
  137  * reservation belongs to a particular object.  Moreover, every active
  138  * reservation has an entry in the containing object's list of reservations.  
  139  */
  140 static vm_reserv_t vm_reserv_array;
  141 
  142 /*
  143  * The partially-populated reservation queue
  144  *
  145  * This queue enables the fast recovery of an unused cached or free small page
  146  * from a partially-populated reservation.  The reservation at the head of
  147  * this queue is the least-recently-changed, partially-populated reservation.
  148  *
  149  * Access to this queue is synchronized by the free page queue lock.
  150  */
  151 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
  152                             TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
  153 
  154 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
  155 
  156 static long vm_reserv_broken;
  157 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
  158     &vm_reserv_broken, 0, "Cumulative number of broken reservations");
  159 
  160 static long vm_reserv_freed;
  161 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
  162     &vm_reserv_freed, 0, "Cumulative number of freed reservations");
  163 
  164 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
  165 
  166 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
  167     sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
  168 
  169 static long vm_reserv_reclaimed;
  170 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
  171     &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
  172 
  173 static void             vm_reserv_depopulate(vm_reserv_t rv);
  174 static vm_reserv_t      vm_reserv_from_page(vm_page_t m);
  175 static boolean_t        vm_reserv_has_pindex(vm_reserv_t rv,
  176                             vm_pindex_t pindex);
  177 static void             vm_reserv_populate(vm_reserv_t rv);
  178 static void             vm_reserv_reclaim(vm_reserv_t rv);
  179 
  180 /*
  181  * Describes the current state of the partially-populated reservation queue.
  182  */
  183 static int
  184 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
  185 {
  186         struct sbuf sbuf;
  187         vm_reserv_t rv;
  188         int counter, error, level, unused_pages;
  189 
  190         error = sysctl_wire_old_buffer(req, 0);
  191         if (error != 0)
  192                 return (error);
  193         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
  194         sbuf_printf(&sbuf, "\nLEVEL     SIZE  NUMBER\n\n");
  195         for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
  196                 counter = 0;
  197                 unused_pages = 0;
  198                 mtx_lock(&vm_page_queue_free_mtx);
  199                 TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
  200                         counter++;
  201                         unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
  202                 }
  203                 mtx_unlock(&vm_page_queue_free_mtx);
  204                 sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
  205                     unused_pages * ((int)PAGE_SIZE / 1024), counter);
  206         }
  207         error = sbuf_finish(&sbuf);
  208         sbuf_delete(&sbuf);
  209         return (error);
  210 }
  211 
  212 /*
  213  * Reduces the given reservation's population count.  If the population count
  214  * becomes zero, the reservation is destroyed.  Additionally, moves the
  215  * reservation to the tail of the partially-populated reservations queue if the
  216  * population count is non-zero.
  217  *
  218  * The free page queue lock must be held.
  219  */
  220 static void
  221 vm_reserv_depopulate(vm_reserv_t rv)
  222 {
  223 
  224         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  225         KASSERT(rv->object != NULL,
  226             ("vm_reserv_depopulate: reserv %p is free", rv));
  227         KASSERT(rv->popcnt > 0,
  228             ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
  229         if (rv->inpartpopq) {
  230                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  231                 rv->inpartpopq = FALSE;
  232         }
  233         rv->popcnt--;
  234         if (rv->popcnt == 0) {
  235                 LIST_REMOVE(rv, objq);
  236                 rv->object = NULL;
  237                 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
  238                 vm_reserv_freed++;
  239         } else {
  240                 rv->inpartpopq = TRUE;
  241                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
  242         }
  243 }
  244 
  245 /*
  246  * Returns the reservation to which the given page might belong.
  247  */
  248 static __inline vm_reserv_t
  249 vm_reserv_from_page(vm_page_t m)
  250 {
  251 
  252         return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
  253 }
  254 
  255 /*
  256  * Returns TRUE if the given reservation contains the given page index and
  257  * FALSE otherwise.
  258  */
  259 static __inline boolean_t
  260 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
  261 {
  262 
  263         return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
  264 }
  265 
  266 /*
  267  * Increases the given reservation's population count.  Moves the reservation
  268  * to the tail of the partially-populated reservation queue.
  269  *
  270  * The free page queue must be locked.
  271  */
  272 static void
  273 vm_reserv_populate(vm_reserv_t rv)
  274 {
  275 
  276         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  277         KASSERT(rv->object != NULL,
  278             ("vm_reserv_populate: reserv %p is free", rv));
  279         KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
  280             ("vm_reserv_populate: reserv %p is already full", rv));
  281         if (rv->inpartpopq) {
  282                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  283                 rv->inpartpopq = FALSE;
  284         }
  285         rv->popcnt++;
  286         if (rv->popcnt < VM_LEVEL_0_NPAGES) {
  287                 rv->inpartpopq = TRUE;
  288                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
  289         }
  290 }
  291 
  292 /*
  293  * Allocates a contiguous set of physical pages of the given size "npages"
  294  * from an existing or newly-created reservation.  All of the physical pages
  295  * must be at or above the given physical address "low" and below the given
  296  * physical address "high".  The given value "alignment" determines the
  297  * alignment of the first physical page in the set.  If the given value
  298  * "boundary" is non-zero, then the set of physical pages cannot cross any
  299  * physical address boundary that is a multiple of that value.  Both
  300  * "alignment" and "boundary" must be a power of two.
  301  *
  302  * The object and free page queue must be locked.
  303  */
  304 vm_page_t
  305 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
  306     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
  307 {
  308         vm_paddr_t pa, size;
  309         vm_page_t m, m_ret, mpred, msucc;
  310         vm_pindex_t first, leftcap, rightcap;
  311         vm_reserv_t rv;
  312         u_long allocpages, maxpages, minpages;
  313         int i, index, n;
  314 
  315         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  316         VM_OBJECT_ASSERT_WLOCKED(object);
  317         KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
  318 
  319         /*
  320          * Is a reservation fundamentally impossible?
  321          */
  322         if (pindex < VM_RESERV_INDEX(object, pindex) ||
  323             pindex + npages > object->size)
  324                 return (NULL);
  325 
  326         /*
  327          * All reservations of a particular size have the same alignment.
  328          * Assuming that the first page is allocated from a reservation, the
  329          * least significant bits of its physical address can be determined
  330          * from its offset from the beginning of the reservation and the size
  331          * of the reservation.
  332          *
  333          * Could the specified index within a reservation of the smallest
  334          * possible size satisfy the alignment and boundary requirements?
  335          */
  336         pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
  337         if ((pa & (alignment - 1)) != 0)
  338                 return (NULL);
  339         size = npages << PAGE_SHIFT;
  340         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
  341                 return (NULL);
  342 
  343         /*
  344          * Look for an existing reservation.
  345          */
  346         mpred = vm_radix_lookup_le(&object->rtree, pindex);
  347         if (mpred != NULL) {
  348                 KASSERT(mpred->pindex < pindex,
  349                     ("vm_reserv_alloc_contig: pindex already allocated"));
  350                 rv = vm_reserv_from_page(mpred);
  351                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  352                         goto found;
  353                 msucc = TAILQ_NEXT(mpred, listq);
  354         } else
  355                 msucc = TAILQ_FIRST(&object->memq);
  356         if (msucc != NULL) {
  357                 KASSERT(msucc->pindex > pindex,
  358                     ("vm_reserv_alloc_page: pindex already allocated"));
  359                 rv = vm_reserv_from_page(msucc);
  360                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  361                         goto found;
  362         }
  363 
  364         /*
  365          * Could at least one reservation fit between the first index to the
  366          * left that can be used and the first index to the right that cannot
  367          * be used?
  368          */
  369         first = pindex - VM_RESERV_INDEX(object, pindex);
  370         if (mpred != NULL) {
  371                 if ((rv = vm_reserv_from_page(mpred))->object != object)
  372                         leftcap = mpred->pindex + 1;
  373                 else
  374                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
  375                 if (leftcap > first)
  376                         return (NULL);
  377         }
  378         minpages = VM_RESERV_INDEX(object, pindex) + npages;
  379         maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
  380         allocpages = maxpages;
  381         if (msucc != NULL) {
  382                 if ((rv = vm_reserv_from_page(msucc))->object != object)
  383                         rightcap = msucc->pindex;
  384                 else
  385                         rightcap = rv->pindex;
  386                 if (first + maxpages > rightcap) {
  387                         if (maxpages == VM_LEVEL_0_NPAGES)
  388                                 return (NULL);
  389                         allocpages = minpages;
  390                 }
  391         }
  392 
  393         /*
  394          * Would the last new reservation extend past the end of the object?
  395          */
  396         if (first + maxpages > object->size) {
  397                 /*
  398                  * Don't allocate the last new reservation if the object is a
  399                  * vnode or backed by another object that is a vnode. 
  400                  */
  401                 if (object->type == OBJT_VNODE ||
  402                     (object->backing_object != NULL &&
  403                     object->backing_object->type == OBJT_VNODE)) {
  404                         if (maxpages == VM_LEVEL_0_NPAGES)
  405                                 return (NULL);
  406                         allocpages = minpages;
  407                 }
  408                 /* Speculate that the object may grow. */
  409         }
  410 
  411         /*
  412          * Allocate and populate the new reservations.  The alignment and
  413          * boundary specified for this allocation may be different from the
  414          * alignment and boundary specified for the requested pages.  For
  415          * instance, the specified index may not be the first page within the
  416          * first new reservation.
  417          */
  418         m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
  419             VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
  420         if (m == NULL)
  421                 return (NULL);
  422         m_ret = NULL;
  423         index = VM_RESERV_INDEX(object, pindex);
  424         do {
  425                 rv = vm_reserv_from_page(m);
  426                 KASSERT(rv->pages == m,
  427                     ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
  428                     rv));
  429                 KASSERT(rv->object == NULL,
  430                     ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
  431                 LIST_INSERT_HEAD(&object->rvq, rv, objq);
  432                 rv->object = object;
  433                 rv->pindex = first;
  434                 KASSERT(rv->popcnt == 0,
  435                     ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
  436                     rv));
  437                 KASSERT(!rv->inpartpopq,
  438                     ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
  439                     rv));
  440                 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
  441                 for (i = 0; i < n; i++)
  442                         vm_reserv_populate(rv);
  443                 npages -= n;
  444                 if (m_ret == NULL) {
  445                         m_ret = &rv->pages[index];
  446                         index = 0;
  447                 }
  448                 m += VM_LEVEL_0_NPAGES;
  449                 first += VM_LEVEL_0_NPAGES;
  450                 allocpages -= VM_LEVEL_0_NPAGES;
  451         } while (allocpages > 0);
  452         return (m_ret);
  453 
  454         /*
  455          * Found a matching reservation.
  456          */
  457 found:
  458         index = VM_RESERV_INDEX(object, pindex);
  459         /* Does the allocation fit within the reservation? */
  460         if (index + npages > VM_LEVEL_0_NPAGES)
  461                 return (NULL);
  462         m = &rv->pages[index];
  463         pa = VM_PAGE_TO_PHYS(m);
  464         if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
  465             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
  466                 return (NULL);
  467         /* Handle vm_page_rename(m, new_object, ...). */
  468         for (i = 0; i < npages; i++)
  469                 if ((rv->pages[index + i].flags & (PG_CACHED | PG_FREE)) == 0)
  470                         return (NULL);
  471         for (i = 0; i < npages; i++)
  472                 vm_reserv_populate(rv);
  473         return (m);
  474 }
  475 
  476 /*
  477  * Allocates a page from an existing or newly-created reservation.
  478  *
  479  * The page "mpred" must immediately precede the offset "pindex" within the
  480  * specified object.
  481  *
  482  * The object and free page queue must be locked.
  483  */
  484 vm_page_t
  485 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
  486 {
  487         vm_page_t m, msucc;
  488         vm_pindex_t first, leftcap, rightcap;
  489         vm_reserv_t rv;
  490 
  491         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  492         VM_OBJECT_ASSERT_WLOCKED(object);
  493 
  494         /*
  495          * Is a reservation fundamentally impossible?
  496          */
  497         if (pindex < VM_RESERV_INDEX(object, pindex) ||
  498             pindex >= object->size)
  499                 return (NULL);
  500 
  501         /*
  502          * Look for an existing reservation.
  503          */
  504         if (mpred != NULL) {
  505                 KASSERT(mpred->object == object,
  506                     ("vm_reserv_alloc_page: object doesn't contain mpred"));
  507                 KASSERT(mpred->pindex < pindex,
  508                     ("vm_reserv_alloc_page: mpred doesn't precede pindex"));
  509                 rv = vm_reserv_from_page(mpred);
  510                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  511                         goto found;
  512                 msucc = TAILQ_NEXT(mpred, listq);
  513         } else
  514                 msucc = TAILQ_FIRST(&object->memq);
  515         if (msucc != NULL) {
  516                 KASSERT(msucc->pindex > pindex,
  517                     ("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
  518                 rv = vm_reserv_from_page(msucc);
  519                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  520                         goto found;
  521         }
  522 
  523         /*
  524          * Could a reservation fit between the first index to the left that
  525          * can be used and the first index to the right that cannot be used?
  526          */
  527         first = pindex - VM_RESERV_INDEX(object, pindex);
  528         if (mpred != NULL) {
  529                 if ((rv = vm_reserv_from_page(mpred))->object != object)
  530                         leftcap = mpred->pindex + 1;
  531                 else
  532                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
  533                 if (leftcap > first)
  534                         return (NULL);
  535         }
  536         if (msucc != NULL) {
  537                 if ((rv = vm_reserv_from_page(msucc))->object != object)
  538                         rightcap = msucc->pindex;
  539                 else
  540                         rightcap = rv->pindex;
  541                 if (first + VM_LEVEL_0_NPAGES > rightcap)
  542                         return (NULL);
  543         }
  544 
  545         /*
  546          * Would a new reservation extend past the end of the object? 
  547          */
  548         if (first + VM_LEVEL_0_NPAGES > object->size) {
  549                 /*
  550                  * Don't allocate a new reservation if the object is a vnode or
  551                  * backed by another object that is a vnode. 
  552                  */
  553                 if (object->type == OBJT_VNODE ||
  554                     (object->backing_object != NULL &&
  555                     object->backing_object->type == OBJT_VNODE))
  556                         return (NULL);
  557                 /* Speculate that the object may grow. */
  558         }
  559 
  560         /*
  561          * Allocate and populate the new reservation.
  562          */
  563         m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
  564         if (m == NULL)
  565                 return (NULL);
  566         rv = vm_reserv_from_page(m);
  567         KASSERT(rv->pages == m,
  568             ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
  569         KASSERT(rv->object == NULL,
  570             ("vm_reserv_alloc_page: reserv %p isn't free", rv));
  571         LIST_INSERT_HEAD(&object->rvq, rv, objq);
  572         rv->object = object;
  573         rv->pindex = first;
  574         KASSERT(rv->popcnt == 0,
  575             ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
  576         KASSERT(!rv->inpartpopq,
  577             ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
  578         vm_reserv_populate(rv);
  579         return (&rv->pages[VM_RESERV_INDEX(object, pindex)]);
  580 
  581         /*
  582          * Found a matching reservation.
  583          */
  584 found:
  585         m = &rv->pages[VM_RESERV_INDEX(object, pindex)];
  586         /* Handle vm_page_rename(m, new_object, ...). */
  587         if ((m->flags & (PG_CACHED | PG_FREE)) == 0)
  588                 return (NULL);
  589         vm_reserv_populate(rv);
  590         return (m);
  591 }
  592 
  593 /*
  594  * Breaks all reservations belonging to the given object.
  595  */
  596 void
  597 vm_reserv_break_all(vm_object_t object)
  598 {
  599         vm_reserv_t rv;
  600         int i;
  601 
  602         mtx_lock(&vm_page_queue_free_mtx);
  603         while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
  604                 KASSERT(rv->object == object,
  605                     ("vm_reserv_break_all: reserv %p is corrupted", rv));
  606                 if (rv->inpartpopq) {
  607                         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  608                         rv->inpartpopq = FALSE;
  609                 }
  610                 LIST_REMOVE(rv, objq);
  611                 rv->object = NULL;
  612                 for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
  613                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
  614                                 vm_phys_free_pages(&rv->pages[i], 0);
  615                         else
  616                                 rv->popcnt--;
  617                 }
  618                 KASSERT(rv->popcnt == 0,
  619                     ("vm_reserv_break_all: reserv %p's popcnt is corrupted",
  620                     rv));
  621                 vm_reserv_broken++;
  622         }
  623         mtx_unlock(&vm_page_queue_free_mtx);
  624 }
  625 
  626 /*
  627  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
  628  * page is freed and FALSE otherwise.
  629  *
  630  * The free page queue lock must be held.
  631  */
  632 boolean_t
  633 vm_reserv_free_page(vm_page_t m)
  634 {
  635         vm_reserv_t rv;
  636 
  637         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  638         rv = vm_reserv_from_page(m);
  639         if (rv->object == NULL)
  640                 return (FALSE);
  641         if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE)
  642                 vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages,
  643                     VM_LEVEL_0_ORDER);
  644         vm_reserv_depopulate(rv);
  645         return (TRUE);
  646 }
  647 
  648 /*
  649  * Initializes the reservation management system.  Specifically, initializes
  650  * the reservation array.
  651  *
  652  * Requires that vm_page_array and first_page are initialized!
  653  */
  654 void
  655 vm_reserv_init(void)
  656 {
  657         vm_paddr_t paddr;
  658         int i;
  659 
  660         /*
  661          * Initialize the reservation array.  Specifically, initialize the
  662          * "pages" field for every element that has an underlying superpage.
  663          */
  664         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
  665                 paddr = roundup2(phys_avail[i], VM_LEVEL_0_SIZE);
  666                 while (paddr + VM_LEVEL_0_SIZE <= phys_avail[i + 1]) {
  667                         vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
  668                             PHYS_TO_VM_PAGE(paddr);
  669                         paddr += VM_LEVEL_0_SIZE;
  670                 }
  671         }
  672 }
  673 
  674 /*
  675  * Returns a reservation level if the given page belongs to a fully-populated
  676  * reservation and -1 otherwise.
  677  */
  678 int
  679 vm_reserv_level_iffullpop(vm_page_t m)
  680 {
  681         vm_reserv_t rv;
  682 
  683         rv = vm_reserv_from_page(m);
  684         return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
  685 }
  686 
  687 /*
  688  * Prepare for the reactivation of a cached page.
  689  *
  690  * First, suppose that the given page "m" was allocated individually, i.e., not
  691  * as part of a reservation, and cached.  Then, suppose a reservation
  692  * containing "m" is allocated by the same object.  Although "m" and the
  693  * reservation belong to the same object, "m"'s pindex may not match the
  694  * reservation's.
  695  *
  696  * The free page queue must be locked.
  697  */
  698 boolean_t
  699 vm_reserv_reactivate_page(vm_page_t m)
  700 {
  701         vm_reserv_t rv;
  702         int i, m_index;
  703 
  704         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  705         rv = vm_reserv_from_page(m);
  706         if (rv->object == NULL)
  707                 return (FALSE);
  708         KASSERT((m->flags & PG_CACHED) != 0,
  709             ("vm_reserv_uncache_page: page %p is not cached", m));
  710         if (m->object == rv->object &&
  711             m->pindex - rv->pindex == VM_RESERV_INDEX(m->object, m->pindex))
  712                 vm_reserv_populate(rv);
  713         else {
  714                 KASSERT(rv->inpartpopq,
  715                     ("vm_reserv_uncache_page: reserv %p's inpartpopq is FALSE",
  716                     rv));
  717                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  718                 rv->inpartpopq = FALSE;
  719                 LIST_REMOVE(rv, objq);
  720                 rv->object = NULL;
  721                 /* Don't vm_phys_free_pages(m, 0). */
  722                 m_index = m - rv->pages;
  723                 for (i = 0; i < m_index; i++) {
  724                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
  725                                 vm_phys_free_pages(&rv->pages[i], 0);
  726                         else
  727                                 rv->popcnt--;
  728                 }
  729                 for (i++; i < VM_LEVEL_0_NPAGES; i++) {
  730                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
  731                                 vm_phys_free_pages(&rv->pages[i], 0);
  732                         else
  733                                 rv->popcnt--;
  734                 }
  735                 KASSERT(rv->popcnt == 0,
  736                     ("vm_reserv_uncache_page: reserv %p's popcnt is corrupted",
  737                     rv));
  738                 vm_reserv_broken++;
  739         }
  740         return (TRUE);
  741 }
  742 
  743 /*
  744  * Breaks the given partially-populated reservation, releasing its cached and
  745  * free pages to the physical memory allocator.
  746  *
  747  * The free page queue lock must be held.
  748  */
  749 static void
  750 vm_reserv_reclaim(vm_reserv_t rv)
  751 {
  752         int i;
  753 
  754         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  755         KASSERT(rv->inpartpopq,
  756             ("vm_reserv_reclaim: reserv %p's inpartpopq is corrupted", rv));
  757         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  758         rv->inpartpopq = FALSE;
  759         KASSERT(rv->object != NULL,
  760             ("vm_reserv_reclaim: reserv %p is free", rv));
  761         LIST_REMOVE(rv, objq);
  762         rv->object = NULL;
  763         for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
  764                 if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
  765                         vm_phys_free_pages(&rv->pages[i], 0);
  766                 else
  767                         rv->popcnt--;
  768         }
  769         KASSERT(rv->popcnt == 0,
  770             ("vm_reserv_reclaim: reserv %p's popcnt is corrupted", rv));
  771         vm_reserv_reclaimed++;
  772 }
  773 
  774 /*
  775  * Breaks the reservation at the head of the partially-populated reservation
  776  * queue, releasing its cached and free pages to the physical memory
  777  * allocator.  Returns TRUE if a reservation is broken and FALSE otherwise.
  778  *
  779  * The free page queue lock must be held.
  780  */
  781 boolean_t
  782 vm_reserv_reclaim_inactive(void)
  783 {
  784         vm_reserv_t rv;
  785 
  786         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  787         if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
  788                 vm_reserv_reclaim(rv);
  789                 return (TRUE);
  790         }
  791         return (FALSE);
  792 }
  793 
  794 /*
  795  * Searches the partially-populated reservation queue for the least recently
  796  * active reservation with unused pages, i.e., cached or free, that satisfy the
  797  * given request for contiguous physical memory.  If a satisfactory reservation
  798  * is found, it is broken.  Returns TRUE if a reservation is broken and FALSE
  799  * otherwise.
  800  *
  801  * The free page queue lock must be held.
  802  */
  803 boolean_t
  804 vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
  805     u_long alignment, vm_paddr_t boundary)
  806 {
  807         vm_paddr_t pa, pa_length, size;
  808         vm_reserv_t rv;
  809         int i;
  810 
  811         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  812         if (npages > VM_LEVEL_0_NPAGES - 1)
  813                 return (FALSE);
  814         size = npages << PAGE_SHIFT;
  815         TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
  816                 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
  817                 if (pa + PAGE_SIZE - size < low) {
  818                         /* this entire reservation is too low; go to next */
  819                         continue;
  820                 }
  821                 pa_length = 0;
  822                 for (i = 0; i < VM_LEVEL_0_NPAGES; i++)
  823                         if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) {
  824                                 pa_length += PAGE_SIZE;
  825                                 if (pa_length == PAGE_SIZE) {
  826                                         pa = VM_PAGE_TO_PHYS(&rv->pages[i]);
  827                                         if (pa + size > high) {
  828                                                 /* skip to next reservation */
  829                                                 break;
  830                                         } else if (pa < low ||
  831                                             (pa & (alignment - 1)) != 0 ||
  832                                             ((pa ^ (pa + size - 1)) &
  833                                             ~(boundary - 1)) != 0)
  834                                                 pa_length = 0;
  835                                 }
  836                                 if (pa_length >= size) {
  837                                         vm_reserv_reclaim(rv);
  838                                         return (TRUE);
  839                                 }
  840                         } else
  841                                 pa_length = 0;
  842         }
  843         return (FALSE);
  844 }
  845 
  846 /*
  847  * Transfers the reservation underlying the given page to a new object.
  848  *
  849  * The object must be locked.
  850  */
  851 void
  852 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
  853     vm_pindex_t old_object_offset)
  854 {
  855         vm_reserv_t rv;
  856 
  857         VM_OBJECT_ASSERT_WLOCKED(new_object);
  858         rv = vm_reserv_from_page(m);
  859         if (rv->object == old_object) {
  860                 mtx_lock(&vm_page_queue_free_mtx);
  861                 if (rv->object == old_object) {
  862                         LIST_REMOVE(rv, objq);
  863                         LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
  864                         rv->object = new_object;
  865                         rv->pindex -= old_object_offset;
  866                 }
  867                 mtx_unlock(&vm_page_queue_free_mtx);
  868         }
  869 }
  870 
  871 /*
  872  * Allocates the virtual and physical memory required by the reservation
  873  * management system's data structures, in particular, the reservation array.
  874  */
  875 vm_paddr_t
  876 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
  877 {
  878         vm_paddr_t new_end;
  879         size_t size;
  880 
  881         /*
  882          * Calculate the size (in bytes) of the reservation array.  Round up
  883          * from "high_water" because every small page is mapped to an element
  884          * in the reservation array based on its physical address.  Thus, the
  885          * number of elements in the reservation array can be greater than the
  886          * number of superpages. 
  887          */
  888         size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
  889 
  890         /*
  891          * Allocate and map the physical memory for the reservation array.  The
  892          * next available virtual address is returned by reference.
  893          */
  894         new_end = end - round_page(size);
  895         vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
  896             VM_PROT_READ | VM_PROT_WRITE);
  897         bzero(vm_reserv_array, size);
  898 
  899         /*
  900          * Return the next available physical address.
  901          */
  902         return (new_end);
  903 }
  904 
  905 #endif  /* VM_NRESERVLEVEL > 0 */

Cache object: 78830422f00843097bc584acdfa9243a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.