The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_reserv.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2006 Rice University
    3  * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
    4  * All rights reserved.
    5  *
    6  * This software was developed for the FreeBSD Project by Alan L. Cox,
    7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
   22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
   25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
   28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  *      Superpage reservation management module
   34  *
   35  * Any external functions defined by this module are only to be used by the
   36  * virtual memory system.
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD$");
   41 
   42 #include "opt_vm.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/kernel.h>
   46 #include <sys/lock.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mutex.h>
   49 #include <sys/queue.h>
   50 #include <sys/rwlock.h>
   51 #include <sys/sbuf.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/systm.h>
   54 #include <sys/vmmeter.h>
   55 
   56 #include <vm/vm.h>
   57 #include <vm/vm_param.h>
   58 #include <vm/vm_object.h>
   59 #include <vm/vm_page.h>
   60 #include <vm/vm_phys.h>
   61 #include <vm/vm_radix.h>
   62 #include <vm/vm_reserv.h>
   63 
   64 /*
   65  * The reservation system supports the speculative allocation of large physical
   66  * pages ("superpages").  Speculative allocation enables the fully automatic
   67  * utilization of superpages by the virtual memory system.  In other words, no
   68  * programmatic directives are required to use superpages.
   69  */
   70 
   71 #if VM_NRESERVLEVEL > 0
   72 
   73 /*
   74  * The number of small pages that are contained in a level 0 reservation
   75  */
   76 #define VM_LEVEL_0_NPAGES       (1 << VM_LEVEL_0_ORDER)
   77 
   78 /*
   79  * The number of bits by which a physical address is shifted to obtain the
   80  * reservation number
   81  */
   82 #define VM_LEVEL_0_SHIFT        (VM_LEVEL_0_ORDER + PAGE_SHIFT)
   83 
   84 /*
   85  * The size of a level 0 reservation in bytes
   86  */
   87 #define VM_LEVEL_0_SIZE         (1 << VM_LEVEL_0_SHIFT)
   88 
   89 /*
   90  * Computes the index of the small page underlying the given (object, pindex)
   91  * within the reservation's array of small pages.
   92  */
   93 #define VM_RESERV_INDEX(object, pindex) \
   94     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
   95 
   96 /*
   97  * The size of a population map entry
   98  */
   99 typedef u_long          popmap_t;
  100 
  101 /*
  102  * The number of bits in a population map entry
  103  */
  104 #define NBPOPMAP        (NBBY * sizeof(popmap_t))
  105 
  106 /*
  107  * The number of population map entries in a reservation
  108  */
  109 #define NPOPMAP         howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
  110 
  111 /*
  112  * Clear a bit in the population map.
  113  */
  114 static __inline void
  115 popmap_clear(popmap_t popmap[], int i)
  116 {
  117 
  118         popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
  119 }
  120 
  121 /*
  122  * Set a bit in the population map.
  123  */
  124 static __inline void
  125 popmap_set(popmap_t popmap[], int i)
  126 {
  127 
  128         popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
  129 }
  130 
  131 /*
  132  * Is a bit in the population map clear?
  133  */
  134 static __inline boolean_t
  135 popmap_is_clear(popmap_t popmap[], int i)
  136 {
  137 
  138         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
  139 }
  140 
  141 /*
  142  * Is a bit in the population map set?
  143  */
  144 static __inline boolean_t
  145 popmap_is_set(popmap_t popmap[], int i)
  146 {
  147 
  148         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
  149 }
  150 
  151 /*
  152  * The reservation structure
  153  *
  154  * A reservation structure is constructed whenever a large physical page is
  155  * speculatively allocated to an object.  The reservation provides the small
  156  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
  157  * within that object.  The reservation's "popcnt" tracks the number of these
  158  * small physical pages that are in use at any given time.  When and if the
  159  * reservation is not fully utilized, it appears in the queue of partially
  160  * populated reservations.  The reservation always appears on the containing
  161  * object's list of reservations.
  162  *
  163  * A partially populated reservation can be broken and reclaimed at any time.
  164  */
  165 struct vm_reserv {
  166         TAILQ_ENTRY(vm_reserv) partpopq;
  167         LIST_ENTRY(vm_reserv) objq;
  168         vm_object_t     object;                 /* containing object */
  169         vm_pindex_t     pindex;                 /* offset within object */
  170         vm_page_t       pages;                  /* first page of a superpage */
  171         int             popcnt;                 /* # of pages in use */
  172         char            inpartpopq;
  173         popmap_t        popmap[NPOPMAP];        /* bit vector of used pages */
  174 };
  175 
  176 /*
  177  * The reservation array
  178  *
  179  * This array is analoguous in function to vm_page_array.  It differs in the
  180  * respect that it may contain a greater number of useful reservation
  181  * structures than there are (physical) superpages.  These "invalid"
  182  * reservation structures exist to trade-off space for time in the
  183  * implementation of vm_reserv_from_page().  Invalid reservation structures are
  184  * distinguishable from "valid" reservation structures by inspecting the
  185  * reservation's "pages" field.  Invalid reservation structures have a NULL
  186  * "pages" field.
  187  *
  188  * vm_reserv_from_page() maps a small (physical) page to an element of this
  189  * array by computing a physical reservation number from the page's physical
  190  * address.  The physical reservation number is used as the array index.
  191  *
  192  * An "active" reservation is a valid reservation structure that has a non-NULL
  193  * "object" field and a non-zero "popcnt" field.  In other words, every active
  194  * reservation belongs to a particular object.  Moreover, every active
  195  * reservation has an entry in the containing object's list of reservations.  
  196  */
  197 static vm_reserv_t vm_reserv_array;
  198 
  199 /*
  200  * The partially populated reservation queue
  201  *
  202  * This queue enables the fast recovery of an unused free small page from a
  203  * partially populated reservation.  The reservation at the head of this queue
  204  * is the least recently changed, partially populated reservation.
  205  *
  206  * Access to this queue is synchronized by the free page queue lock.
  207  */
  208 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
  209                             TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
  210 
  211 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
  212 
  213 static long vm_reserv_broken;
  214 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
  215     &vm_reserv_broken, 0, "Cumulative number of broken reservations");
  216 
  217 static long vm_reserv_freed;
  218 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
  219     &vm_reserv_freed, 0, "Cumulative number of freed reservations");
  220 
  221 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
  222 
  223 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
  224     sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
  225 
  226 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
  227 
  228 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
  229     sysctl_vm_reserv_partpopq, "A", "Partially populated reservation queues");
  230 
  231 static long vm_reserv_reclaimed;
  232 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
  233     &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
  234 
  235 static void             vm_reserv_break(vm_reserv_t rv);
  236 static void             vm_reserv_depopulate(vm_reserv_t rv, int index);
  237 static vm_reserv_t      vm_reserv_from_page(vm_page_t m);
  238 static boolean_t        vm_reserv_has_pindex(vm_reserv_t rv,
  239                             vm_pindex_t pindex);
  240 static void             vm_reserv_populate(vm_reserv_t rv, int index);
  241 static void             vm_reserv_reclaim(vm_reserv_t rv);
  242 
  243 /*
  244  * Returns the current number of full reservations.
  245  *
  246  * Since the number of full reservations is computed without acquiring the
  247  * free page queue lock, the returned value may be inexact.
  248  */
  249 static int
  250 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
  251 {
  252         vm_paddr_t paddr;
  253         struct vm_phys_seg *seg;
  254         vm_reserv_t rv;
  255         int fullpop, segind;
  256 
  257         fullpop = 0;
  258         for (segind = 0; segind < vm_phys_nsegs; segind++) {
  259                 seg = &vm_phys_segs[segind];
  260                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
  261                 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
  262                     VM_LEVEL_0_SIZE <= seg->end) {
  263                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
  264                         fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
  265                         paddr += VM_LEVEL_0_SIZE;
  266                 }
  267         }
  268         return (sysctl_handle_int(oidp, &fullpop, 0, req));
  269 }
  270 
  271 /*
  272  * Describes the current state of the partially populated reservation queue.
  273  */
  274 static int
  275 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
  276 {
  277         struct sbuf sbuf;
  278         vm_reserv_t rv;
  279         int counter, error, level, unused_pages;
  280 
  281         error = sysctl_wire_old_buffer(req, 0);
  282         if (error != 0)
  283                 return (error);
  284         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
  285         sbuf_printf(&sbuf, "\nLEVEL     SIZE  NUMBER\n\n");
  286         for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
  287                 counter = 0;
  288                 unused_pages = 0;
  289                 mtx_lock(&vm_page_queue_free_mtx);
  290                 TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
  291                         counter++;
  292                         unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
  293                 }
  294                 mtx_unlock(&vm_page_queue_free_mtx);
  295                 sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
  296                     unused_pages * ((int)PAGE_SIZE / 1024), counter);
  297         }
  298         error = sbuf_finish(&sbuf);
  299         sbuf_delete(&sbuf);
  300         return (error);
  301 }
  302 
  303 /*
  304  * Reduces the given reservation's population count.  If the population count
  305  * becomes zero, the reservation is destroyed.  Additionally, moves the
  306  * reservation to the tail of the partially populated reservation queue if the
  307  * population count is non-zero.
  308  *
  309  * The free page queue lock must be held.
  310  */
  311 static void
  312 vm_reserv_depopulate(vm_reserv_t rv, int index)
  313 {
  314 
  315         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  316         KASSERT(rv->object != NULL,
  317             ("vm_reserv_depopulate: reserv %p is free", rv));
  318         KASSERT(popmap_is_set(rv->popmap, index),
  319             ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
  320             index));
  321         KASSERT(rv->popcnt > 0,
  322             ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
  323         if (rv->inpartpopq) {
  324                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  325                 rv->inpartpopq = FALSE;
  326         } else {
  327                 KASSERT(rv->pages->psind == 1,
  328                     ("vm_reserv_depopulate: reserv %p is already demoted",
  329                     rv));
  330                 rv->pages->psind = 0;
  331         }
  332         popmap_clear(rv->popmap, index);
  333         rv->popcnt--;
  334         if (rv->popcnt == 0) {
  335                 LIST_REMOVE(rv, objq);
  336                 rv->object = NULL;
  337                 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
  338                 vm_reserv_freed++;
  339         } else {
  340                 rv->inpartpopq = TRUE;
  341                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
  342         }
  343 }
  344 
  345 /*
  346  * Returns the reservation to which the given page might belong.
  347  */
  348 static __inline vm_reserv_t
  349 vm_reserv_from_page(vm_page_t m)
  350 {
  351 
  352         return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
  353 }
  354 
  355 /*
  356  * Returns TRUE if the given reservation contains the given page index and
  357  * FALSE otherwise.
  358  */
  359 static __inline boolean_t
  360 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
  361 {
  362 
  363         return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
  364 }
  365 
  366 /*
  367  * Increases the given reservation's population count.  Moves the reservation
  368  * to the tail of the partially populated reservation queue.
  369  *
  370  * The free page queue must be locked.
  371  */
  372 static void
  373 vm_reserv_populate(vm_reserv_t rv, int index)
  374 {
  375 
  376         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  377         KASSERT(rv->object != NULL,
  378             ("vm_reserv_populate: reserv %p is free", rv));
  379         KASSERT(popmap_is_clear(rv->popmap, index),
  380             ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
  381             index));
  382         KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
  383             ("vm_reserv_populate: reserv %p is already full", rv));
  384         KASSERT(rv->pages->psind == 0,
  385             ("vm_reserv_populate: reserv %p is already promoted", rv));
  386         if (rv->inpartpopq) {
  387                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  388                 rv->inpartpopq = FALSE;
  389         }
  390         popmap_set(rv->popmap, index);
  391         rv->popcnt++;
  392         if (rv->popcnt < VM_LEVEL_0_NPAGES) {
  393                 rv->inpartpopq = TRUE;
  394                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
  395         } else
  396                 rv->pages->psind = 1;
  397 }
  398 
  399 /*
  400  * Allocates a contiguous set of physical pages of the given size "npages"
  401  * from existing or newly created reservations.  All of the physical pages
  402  * must be at or above the given physical address "low" and below the given
  403  * physical address "high".  The given value "alignment" determines the
  404  * alignment of the first physical page in the set.  If the given value
  405  * "boundary" is non-zero, then the set of physical pages cannot cross any
  406  * physical address boundary that is a multiple of that value.  Both
  407  * "alignment" and "boundary" must be a power of two.
  408  *
  409  * The page "mpred" must immediately precede the offset "pindex" within the
  410  * specified object.
  411  *
  412  * The object and free page queue must be locked.
  413  */
  414 vm_page_t
  415 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
  416     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  417     vm_page_t mpred)
  418 {
  419         vm_paddr_t pa, size;
  420         vm_page_t m, m_ret, msucc;
  421         vm_pindex_t first, leftcap, rightcap;
  422         vm_reserv_t rv;
  423         u_long allocpages, maxpages, minpages;
  424         int i, index, n;
  425 
  426         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  427         VM_OBJECT_ASSERT_WLOCKED(object);
  428         KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
  429 
  430         /*
  431          * Is a reservation fundamentally impossible?
  432          */
  433         if (pindex < VM_RESERV_INDEX(object, pindex) ||
  434             pindex + npages > object->size)
  435                 return (NULL);
  436 
  437         /*
  438          * All reservations of a particular size have the same alignment.
  439          * Assuming that the first page is allocated from a reservation, the
  440          * least significant bits of its physical address can be determined
  441          * from its offset from the beginning of the reservation and the size
  442          * of the reservation.
  443          *
  444          * Could the specified index within a reservation of the smallest
  445          * possible size satisfy the alignment and boundary requirements?
  446          */
  447         pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
  448         if ((pa & (alignment - 1)) != 0)
  449                 return (NULL);
  450         size = npages << PAGE_SHIFT;
  451         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
  452                 return (NULL);
  453 
  454         /*
  455          * Look for an existing reservation.
  456          */
  457         if (mpred != NULL) {
  458                 KASSERT(mpred->object == object,
  459                     ("vm_reserv_alloc_contig: object doesn't contain mpred"));
  460                 KASSERT(mpred->pindex < pindex,
  461                     ("vm_reserv_alloc_contig: mpred doesn't precede pindex"));
  462                 rv = vm_reserv_from_page(mpred);
  463                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  464                         goto found;
  465                 msucc = TAILQ_NEXT(mpred, listq);
  466         } else
  467                 msucc = TAILQ_FIRST(&object->memq);
  468         if (msucc != NULL) {
  469                 KASSERT(msucc->pindex > pindex,
  470                     ("vm_reserv_alloc_contig: msucc doesn't succeed pindex"));
  471                 rv = vm_reserv_from_page(msucc);
  472                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  473                         goto found;
  474         }
  475 
  476         /*
  477          * Could at least one reservation fit between the first index to the
  478          * left that can be used ("leftcap") and the first index to the right
  479          * that cannot be used ("rightcap")?
  480          */
  481         first = pindex - VM_RESERV_INDEX(object, pindex);
  482         if (mpred != NULL) {
  483                 if ((rv = vm_reserv_from_page(mpred))->object != object)
  484                         leftcap = mpred->pindex + 1;
  485                 else
  486                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
  487                 if (leftcap > first)
  488                         return (NULL);
  489         }
  490         minpages = VM_RESERV_INDEX(object, pindex) + npages;
  491         maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
  492         allocpages = maxpages;
  493         if (msucc != NULL) {
  494                 if ((rv = vm_reserv_from_page(msucc))->object != object)
  495                         rightcap = msucc->pindex;
  496                 else
  497                         rightcap = rv->pindex;
  498                 if (first + maxpages > rightcap) {
  499                         if (maxpages == VM_LEVEL_0_NPAGES)
  500                                 return (NULL);
  501 
  502                         /*
  503                          * At least one reservation will fit between "leftcap"
  504                          * and "rightcap".  However, a reservation for the
  505                          * last of the requested pages will not fit.  Reduce
  506                          * the size of the upcoming allocation accordingly.
  507                          */
  508                         allocpages = minpages;
  509                 }
  510         }
  511 
  512         /*
  513          * Would the last new reservation extend past the end of the object?
  514          */
  515         if (first + maxpages > object->size) {
  516                 /*
  517                  * Don't allocate the last new reservation if the object is a
  518                  * vnode or backed by another object that is a vnode. 
  519                  */
  520                 if (object->type == OBJT_VNODE ||
  521                     (object->backing_object != NULL &&
  522                     object->backing_object->type == OBJT_VNODE)) {
  523                         if (maxpages == VM_LEVEL_0_NPAGES)
  524                                 return (NULL);
  525                         allocpages = minpages;
  526                 }
  527                 /* Speculate that the object may grow. */
  528         }
  529 
  530         /*
  531          * Allocate the physical pages.  The alignment and boundary specified
  532          * for this allocation may be different from the alignment and
  533          * boundary specified for the requested pages.  For instance, the
  534          * specified index may not be the first page within the first new
  535          * reservation.
  536          */
  537         m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
  538             VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
  539         if (m == NULL)
  540                 return (NULL);
  541 
  542         /*
  543          * The allocated physical pages always begin at a reservation
  544          * boundary, but they do not always end at a reservation boundary.
  545          * Initialize every reservation that is completely covered by the
  546          * allocated physical pages.
  547          */
  548         m_ret = NULL;
  549         index = VM_RESERV_INDEX(object, pindex);
  550         do {
  551                 rv = vm_reserv_from_page(m);
  552                 KASSERT(rv->pages == m,
  553                     ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
  554                     rv));
  555                 KASSERT(rv->object == NULL,
  556                     ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
  557                 LIST_INSERT_HEAD(&object->rvq, rv, objq);
  558                 rv->object = object;
  559                 rv->pindex = first;
  560                 KASSERT(rv->popcnt == 0,
  561                     ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
  562                     rv));
  563                 KASSERT(!rv->inpartpopq,
  564                     ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
  565                     rv));
  566                 for (i = 0; i < NPOPMAP; i++)
  567                         KASSERT(rv->popmap[i] == 0,
  568                     ("vm_reserv_alloc_contig: reserv %p's popmap is corrupted",
  569                             rv));
  570                 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
  571                 for (i = 0; i < n; i++)
  572                         vm_reserv_populate(rv, index + i);
  573                 npages -= n;
  574                 if (m_ret == NULL) {
  575                         m_ret = &rv->pages[index];
  576                         index = 0;
  577                 }
  578                 m += VM_LEVEL_0_NPAGES;
  579                 first += VM_LEVEL_0_NPAGES;
  580                 allocpages -= VM_LEVEL_0_NPAGES;
  581         } while (allocpages >= VM_LEVEL_0_NPAGES);
  582         return (m_ret);
  583 
  584         /*
  585          * Found a matching reservation.
  586          */
  587 found:
  588         index = VM_RESERV_INDEX(object, pindex);
  589         /* Does the allocation fit within the reservation? */
  590         if (index + npages > VM_LEVEL_0_NPAGES)
  591                 return (NULL);
  592         m = &rv->pages[index];
  593         pa = VM_PAGE_TO_PHYS(m);
  594         if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
  595             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
  596                 return (NULL);
  597         /* Handle vm_page_rename(m, new_object, ...). */
  598         for (i = 0; i < npages; i++)
  599                 if (popmap_is_set(rv->popmap, index + i))
  600                         return (NULL);
  601         for (i = 0; i < npages; i++)
  602                 vm_reserv_populate(rv, index + i);
  603         return (m);
  604 }
  605 
  606 /*
  607  * Allocates a page from an existing or newly created reservation.
  608  *
  609  * The page "mpred" must immediately precede the offset "pindex" within the
  610  * specified object.
  611  *
  612  * The object and free page queue must be locked.
  613  */
  614 vm_page_t
  615 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
  616 {
  617         vm_page_t m, msucc;
  618         vm_pindex_t first, leftcap, rightcap;
  619         vm_reserv_t rv;
  620         int i, index;
  621 
  622         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  623         VM_OBJECT_ASSERT_WLOCKED(object);
  624 
  625         /*
  626          * Is a reservation fundamentally impossible?
  627          */
  628         if (pindex < VM_RESERV_INDEX(object, pindex) ||
  629             pindex >= object->size)
  630                 return (NULL);
  631 
  632         /*
  633          * Look for an existing reservation.
  634          */
  635         if (mpred != NULL) {
  636                 KASSERT(mpred->object == object,
  637                     ("vm_reserv_alloc_page: object doesn't contain mpred"));
  638                 KASSERT(mpred->pindex < pindex,
  639                     ("vm_reserv_alloc_page: mpred doesn't precede pindex"));
  640                 rv = vm_reserv_from_page(mpred);
  641                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  642                         goto found;
  643                 msucc = TAILQ_NEXT(mpred, listq);
  644         } else
  645                 msucc = TAILQ_FIRST(&object->memq);
  646         if (msucc != NULL) {
  647                 KASSERT(msucc->pindex > pindex,
  648                     ("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
  649                 rv = vm_reserv_from_page(msucc);
  650                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  651                         goto found;
  652         }
  653 
  654         /*
  655          * Could a reservation fit between the first index to the left that
  656          * can be used and the first index to the right that cannot be used?
  657          */
  658         first = pindex - VM_RESERV_INDEX(object, pindex);
  659         if (mpred != NULL) {
  660                 if ((rv = vm_reserv_from_page(mpred))->object != object)
  661                         leftcap = mpred->pindex + 1;
  662                 else
  663                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
  664                 if (leftcap > first)
  665                         return (NULL);
  666         }
  667         if (msucc != NULL) {
  668                 if ((rv = vm_reserv_from_page(msucc))->object != object)
  669                         rightcap = msucc->pindex;
  670                 else
  671                         rightcap = rv->pindex;
  672                 if (first + VM_LEVEL_0_NPAGES > rightcap)
  673                         return (NULL);
  674         }
  675 
  676         /*
  677          * Would a new reservation extend past the end of the object? 
  678          */
  679         if (first + VM_LEVEL_0_NPAGES > object->size) {
  680                 /*
  681                  * Don't allocate a new reservation if the object is a vnode or
  682                  * backed by another object that is a vnode. 
  683                  */
  684                 if (object->type == OBJT_VNODE ||
  685                     (object->backing_object != NULL &&
  686                     object->backing_object->type == OBJT_VNODE))
  687                         return (NULL);
  688                 /* Speculate that the object may grow. */
  689         }
  690 
  691         /*
  692          * Allocate and populate the new reservation.
  693          */
  694         m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
  695         if (m == NULL)
  696                 return (NULL);
  697         rv = vm_reserv_from_page(m);
  698         KASSERT(rv->pages == m,
  699             ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
  700         KASSERT(rv->object == NULL,
  701             ("vm_reserv_alloc_page: reserv %p isn't free", rv));
  702         LIST_INSERT_HEAD(&object->rvq, rv, objq);
  703         rv->object = object;
  704         rv->pindex = first;
  705         KASSERT(rv->popcnt == 0,
  706             ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
  707         KASSERT(!rv->inpartpopq,
  708             ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
  709         for (i = 0; i < NPOPMAP; i++)
  710                 KASSERT(rv->popmap[i] == 0,
  711                     ("vm_reserv_alloc_page: reserv %p's popmap is corrupted",
  712                     rv));
  713         index = VM_RESERV_INDEX(object, pindex);
  714         vm_reserv_populate(rv, index);
  715         return (&rv->pages[index]);
  716 
  717         /*
  718          * Found a matching reservation.
  719          */
  720 found:
  721         index = VM_RESERV_INDEX(object, pindex);
  722         m = &rv->pages[index];
  723         /* Handle vm_page_rename(m, new_object, ...). */
  724         if (popmap_is_set(rv->popmap, index))
  725                 return (NULL);
  726         vm_reserv_populate(rv, index);
  727         return (m);
  728 }
  729 
  730 /*
  731  * Breaks the given reservation.  All free pages in the reservation
  732  * are returned to the physical memory allocator.  The reservation's
  733  * population count and map are reset to their initial state.
  734  *
  735  * The given reservation must not be in the partially populated reservation
  736  * queue.  The free page queue lock must be held.
  737  */
  738 static void
  739 vm_reserv_break(vm_reserv_t rv)
  740 {
  741         int begin_zeroes, hi, i, lo;
  742 
  743         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  744         KASSERT(rv->object != NULL,
  745             ("vm_reserv_break: reserv %p is free", rv));
  746         KASSERT(!rv->inpartpopq,
  747             ("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv));
  748         LIST_REMOVE(rv, objq);
  749         rv->object = NULL;
  750         rv->pages->psind = 0;
  751         i = hi = 0;
  752         do {
  753                 /* Find the next 0 bit.  Any previous 0 bits are < "hi". */
  754                 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
  755                 if (lo == 0) {
  756                         /* Redundantly clears bits < "hi". */
  757                         rv->popmap[i] = 0;
  758                         rv->popcnt -= NBPOPMAP - hi;
  759                         while (++i < NPOPMAP) {
  760                                 lo = ffsl(~rv->popmap[i]);
  761                                 if (lo == 0) {
  762                                         rv->popmap[i] = 0;
  763                                         rv->popcnt -= NBPOPMAP;
  764                                 } else
  765                                         break;
  766                         }
  767                         if (i == NPOPMAP)
  768                                 break;
  769                         hi = 0;
  770                 }
  771                 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
  772                 /* Convert from ffsl() to ordinary bit numbering. */
  773                 lo--;
  774                 if (lo > 0) {
  775                         /* Redundantly clears bits < "hi". */
  776                         rv->popmap[i] &= ~((1UL << lo) - 1);
  777                         rv->popcnt -= lo - hi;
  778                 }
  779                 begin_zeroes = NBPOPMAP * i + lo;
  780                 /* Find the next 1 bit. */
  781                 do
  782                         hi = ffsl(rv->popmap[i]);
  783                 while (hi == 0 && ++i < NPOPMAP);
  784                 if (i != NPOPMAP)
  785                         /* Convert from ffsl() to ordinary bit numbering. */
  786                         hi--;
  787                 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
  788                     hi - begin_zeroes);
  789         } while (i < NPOPMAP);
  790         KASSERT(rv->popcnt == 0,
  791             ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
  792         vm_reserv_broken++;
  793 }
  794 
  795 /*
  796  * Breaks all reservations belonging to the given object.
  797  */
  798 void
  799 vm_reserv_break_all(vm_object_t object)
  800 {
  801         vm_reserv_t rv;
  802 
  803         mtx_lock(&vm_page_queue_free_mtx);
  804         while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
  805                 KASSERT(rv->object == object,
  806                     ("vm_reserv_break_all: reserv %p is corrupted", rv));
  807                 if (rv->inpartpopq) {
  808                         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  809                         rv->inpartpopq = FALSE;
  810                 }
  811                 vm_reserv_break(rv);
  812         }
  813         mtx_unlock(&vm_page_queue_free_mtx);
  814 }
  815 
  816 /*
  817  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
  818  * page is freed and FALSE otherwise.
  819  *
  820  * The free page queue lock must be held.
  821  */
  822 boolean_t
  823 vm_reserv_free_page(vm_page_t m)
  824 {
  825         vm_reserv_t rv;
  826 
  827         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  828         rv = vm_reserv_from_page(m);
  829         if (rv->object == NULL)
  830                 return (FALSE);
  831         vm_reserv_depopulate(rv, m - rv->pages);
  832         return (TRUE);
  833 }
  834 
  835 /*
  836  * Initializes the reservation management system.  Specifically, initializes
  837  * the reservation array.
  838  *
  839  * Requires that vm_page_array and first_page are initialized!
  840  */
  841 void
  842 vm_reserv_init(void)
  843 {
  844         vm_paddr_t paddr;
  845         struct vm_phys_seg *seg;
  846         int segind;
  847 
  848         /*
  849          * Initialize the reservation array.  Specifically, initialize the
  850          * "pages" field for every element that has an underlying superpage.
  851          */
  852         for (segind = 0; segind < vm_phys_nsegs; segind++) {
  853                 seg = &vm_phys_segs[segind];
  854                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
  855                 while (paddr + VM_LEVEL_0_SIZE > paddr && paddr +
  856                     VM_LEVEL_0_SIZE <= seg->end) {
  857                         vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
  858                             PHYS_TO_VM_PAGE(paddr);
  859                         paddr += VM_LEVEL_0_SIZE;
  860                 }
  861         }
  862 }
  863 
  864 /*
  865  * Returns true if the given page belongs to a reservation and that page is
  866  * free.  Otherwise, returns false.
  867  */
  868 bool
  869 vm_reserv_is_page_free(vm_page_t m)
  870 {
  871         vm_reserv_t rv;
  872 
  873         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  874         rv = vm_reserv_from_page(m);
  875         if (rv->object == NULL)
  876                 return (false);
  877         return (popmap_is_clear(rv->popmap, m - rv->pages));
  878 }
  879 
  880 /*
  881  * If the given page belongs to a reservation, returns the level of that
  882  * reservation.  Otherwise, returns -1.
  883  */
  884 int
  885 vm_reserv_level(vm_page_t m)
  886 {
  887         vm_reserv_t rv;
  888 
  889         rv = vm_reserv_from_page(m);
  890         return (rv->object != NULL ? 0 : -1);
  891 }
  892 
  893 /*
  894  * Returns a reservation level if the given page belongs to a fully populated
  895  * reservation and -1 otherwise.
  896  */
  897 int
  898 vm_reserv_level_iffullpop(vm_page_t m)
  899 {
  900         vm_reserv_t rv;
  901 
  902         rv = vm_reserv_from_page(m);
  903         return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
  904 }
  905 
  906 /*
  907  * Breaks the given partially populated reservation, releasing its free pages
  908  * to the physical memory allocator.
  909  *
  910  * The free page queue lock must be held.
  911  */
  912 static void
  913 vm_reserv_reclaim(vm_reserv_t rv)
  914 {
  915 
  916         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  917         KASSERT(rv->inpartpopq,
  918             ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
  919         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  920         rv->inpartpopq = FALSE;
  921         vm_reserv_break(rv);
  922         vm_reserv_reclaimed++;
  923 }
  924 
  925 /*
  926  * Breaks the reservation at the head of the partially populated reservation
  927  * queue, releasing its free pages to the physical memory allocator.  Returns
  928  * TRUE if a reservation is broken and FALSE otherwise.
  929  *
  930  * The free page queue lock must be held.
  931  */
  932 boolean_t
  933 vm_reserv_reclaim_inactive(void)
  934 {
  935         vm_reserv_t rv;
  936 
  937         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  938         if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
  939                 vm_reserv_reclaim(rv);
  940                 return (TRUE);
  941         }
  942         return (FALSE);
  943 }
  944 
  945 /*
  946  * Searches the partially populated reservation queue for the least recently
  947  * changed reservation with free pages that satisfy the given request for
  948  * contiguous physical memory.  If a satisfactory reservation is found, it is
  949  * broken.  Returns TRUE if a reservation is broken and FALSE otherwise.
  950  *
  951  * The free page queue lock must be held.
  952  */
  953 boolean_t
  954 vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
  955     u_long alignment, vm_paddr_t boundary)
  956 {
  957         vm_paddr_t pa, size;
  958         vm_reserv_t rv;
  959         int hi, i, lo, low_index, next_free;
  960 
  961         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  962         if (npages > VM_LEVEL_0_NPAGES - 1)
  963                 return (FALSE);
  964         size = npages << PAGE_SHIFT;
  965         TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
  966                 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
  967                 if (pa + PAGE_SIZE - size < low) {
  968                         /* This entire reservation is too low; go to next. */
  969                         continue;
  970                 }
  971                 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
  972                 if (pa + size > high) {
  973                         /* This entire reservation is too high; go to next. */
  974                         continue;
  975                 }
  976                 if (pa < low) {
  977                         /* Start the search for free pages at "low". */
  978                         low_index = (low + PAGE_MASK - pa) >> PAGE_SHIFT;
  979                         i = low_index / NBPOPMAP;
  980                         hi = low_index % NBPOPMAP;
  981                 } else
  982                         i = hi = 0;
  983                 do {
  984                         /* Find the next free page. */
  985                         lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
  986                         while (lo == 0 && ++i < NPOPMAP)
  987                                 lo = ffsl(~rv->popmap[i]);
  988                         if (i == NPOPMAP)
  989                                 break;
  990                         /* Convert from ffsl() to ordinary bit numbering. */
  991                         lo--;
  992                         next_free = NBPOPMAP * i + lo;
  993                         pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
  994                         KASSERT(pa >= low,
  995                             ("vm_reserv_reclaim_contig: pa is too low"));
  996                         if (pa + size > high) {
  997                                 /* The rest of this reservation is too high. */
  998                                 break;
  999                         } else if ((pa & (alignment - 1)) != 0 ||
 1000                             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
 1001                                 /*
 1002                                  * The current page doesn't meet the alignment
 1003                                  * and/or boundary requirements.  Continue
 1004                                  * searching this reservation until the rest
 1005                                  * of its free pages are either excluded or
 1006                                  * exhausted.
 1007                                  */
 1008                                 hi = lo + 1;
 1009                                 if (hi >= NBPOPMAP) {
 1010                                         hi = 0;
 1011                                         i++;
 1012                                 }
 1013                                 continue;
 1014                         }
 1015                         /* Find the next used page. */
 1016                         hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
 1017                         while (hi == 0 && ++i < NPOPMAP) {
 1018                                 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
 1019                                     size) {
 1020                                         vm_reserv_reclaim(rv);
 1021                                         return (TRUE);
 1022                                 }
 1023                                 hi = ffsl(rv->popmap[i]);
 1024                         }
 1025                         /* Convert from ffsl() to ordinary bit numbering. */
 1026                         if (i != NPOPMAP)
 1027                                 hi--;
 1028                         if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
 1029                             size) {
 1030                                 vm_reserv_reclaim(rv);
 1031                                 return (TRUE);
 1032                         }
 1033                 } while (i < NPOPMAP);
 1034         }
 1035         return (FALSE);
 1036 }
 1037 
 1038 /*
 1039  * Transfers the reservation underlying the given page to a new object.
 1040  *
 1041  * The object must be locked.
 1042  */
 1043 void
 1044 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
 1045     vm_pindex_t old_object_offset)
 1046 {
 1047         vm_reserv_t rv;
 1048 
 1049         VM_OBJECT_ASSERT_WLOCKED(new_object);
 1050         rv = vm_reserv_from_page(m);
 1051         if (rv->object == old_object) {
 1052                 mtx_lock(&vm_page_queue_free_mtx);
 1053                 if (rv->object == old_object) {
 1054                         LIST_REMOVE(rv, objq);
 1055                         LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
 1056                         rv->object = new_object;
 1057                         rv->pindex -= old_object_offset;
 1058                 }
 1059                 mtx_unlock(&vm_page_queue_free_mtx);
 1060         }
 1061 }
 1062 
 1063 /*
 1064  * Returns the size (in bytes) of a reservation of the specified level.
 1065  */
 1066 int
 1067 vm_reserv_size(int level)
 1068 {
 1069 
 1070         switch (level) {
 1071         case 0:
 1072                 return (VM_LEVEL_0_SIZE);
 1073         case -1:
 1074                 return (PAGE_SIZE);
 1075         default:
 1076                 return (0);
 1077         }
 1078 }
 1079 
 1080 /*
 1081  * Allocates the virtual and physical memory required by the reservation
 1082  * management system's data structures, in particular, the reservation array.
 1083  */
 1084 vm_paddr_t
 1085 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
 1086 {
 1087         vm_paddr_t new_end;
 1088         size_t size;
 1089 
 1090         /*
 1091          * Calculate the size (in bytes) of the reservation array.  Round up
 1092          * from "high_water" because every small page is mapped to an element
 1093          * in the reservation array based on its physical address.  Thus, the
 1094          * number of elements in the reservation array can be greater than the
 1095          * number of superpages. 
 1096          */
 1097         size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
 1098 
 1099         /*
 1100          * Allocate and map the physical memory for the reservation array.  The
 1101          * next available virtual address is returned by reference.
 1102          */
 1103         new_end = end - round_page(size);
 1104         vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
 1105             VM_PROT_READ | VM_PROT_WRITE);
 1106         bzero(vm_reserv_array, size);
 1107 
 1108         /*
 1109          * Return the next available physical address.
 1110          */
 1111         return (new_end);
 1112 }
 1113 
 1114 /*
 1115  * Returns the superpage containing the given page.
 1116  */
 1117 vm_page_t
 1118 vm_reserv_to_superpage(vm_page_t m)
 1119 {
 1120         vm_reserv_t rv;
 1121 
 1122         VM_OBJECT_ASSERT_LOCKED(m->object);
 1123         rv = vm_reserv_from_page(m);
 1124         return (rv->object == m->object && rv->popcnt == VM_LEVEL_0_NPAGES ?
 1125             rv->pages : NULL);
 1126 }
 1127 
 1128 #endif  /* VM_NRESERVLEVEL > 0 */

Cache object: a2ed631d101adfbf5f1dac08d087868f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.